Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/block/Kconfig b/block/Kconfig
index 1f2469a..41c0917 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,29 +26,8 @@
if BLOCK
-config LBDAF
- bool "Support for large (2TB+) block devices and files"
- depends on !64BIT
- default y
- help
- Enable block devices or files of size 2TB and larger.
-
- This option is required to support the full capacity of large
- (2TB+) block devices, including RAID, disk, Network Block Device,
- Logical Volume Manager (LVM) and loopback.
-
- This option also enables support for single files larger than
- 2TB.
-
- The ext4 filesystem requires that this feature be enabled in
- order to support filesystems that have the huge_file feature
- enabled. Otherwise, it will refuse to mount in the read-write
- mode any filesystems that use the huge_file feature, which is
- enabled by default by mke2fs.ext4.
-
- The GFS2 filesystem also requires this feature.
-
- If unsure, say Y.
+config BLK_RQ_ALLOC_TIME
+ bool
config BLK_SCSI_REQUEST
bool
@@ -74,7 +53,6 @@
config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib"
- default n
select BLK_DEV_BSG
select BLK_SCSI_REQUEST
help
@@ -98,6 +76,7 @@
config BLK_DEV_ZONED
bool "Zoned block device support"
+ select MQ_IOSCHED_DEADLINE
---help---
Block layer zoned block device support. This option enables
support for ZAC/ZBC host-managed and host-aware zoned block devices.
@@ -107,19 +86,17 @@
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
- default n
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
one needs to mount and use blkio cgroup controller for creating
cgroups and specifying per device IO rate policies.
- See Documentation/cgroup-v1/blkio-controller.txt for more information.
+ See Documentation/admin-guide/cgroup-v1/blkio-controller.rst for more information.
config BLK_DEV_THROTTLING_LOW
bool "Block throttling .low limit interface support (EXPERIMENTAL)"
depends on BLK_DEV_THROTTLING
- default n
---help---
Add .low limit interface for block throttling. The low limit is a best
effort limit to prioritize cgroups. Depending on the setting, the limit
@@ -130,18 +107,16 @@
config BLK_CMDLINE_PARSER
bool "Block device command line partition parser"
- default n
---help---
Enabling this option allows you to specify the partition layout from
the kernel boot args. This is typically of use for embedded devices
which don't otherwise have any standardized method for listing the
partitions on a block device.
- See Documentation/block/cmdline-partition.txt for more information.
+ See Documentation/block/cmdline-partition.rst for more information.
config BLK_WBT
bool "Enable support for block device writeback throttling"
- default n
---help---
Enabling this option enables the block layer to throttle buffered
background writeback from the VM, making it more smooth and having
@@ -152,7 +127,6 @@
config BLK_CGROUP_IOLATENCY
bool "Enable support for latency based cgroup IO protection"
depends on BLK_CGROUP=y
- default n
---help---
Enabling this option enables the .latency interface for IO throttling.
The IO controller will attempt to maintain average IO latencies below
@@ -161,12 +135,15 @@
Note, this is an experimental interface and could be changed someday.
-config BLK_WBT_SQ
- bool "Single queue writeback throttling"
- default n
- depends on BLK_WBT
+config BLK_CGROUP_IOCOST
+ bool "Enable support for cost model based cgroup IO controller"
+ depends on BLK_CGROUP=y
+ select BLK_RQ_ALLOC_TIME
---help---
- Enable writeback throttling by default on legacy single queue devices
+ Enabling this option enables the .weight interface for cost
+ model based proportional IO control. The IO controller
+ distributes IO capacity between different groups based on
+ their share of the overall weight distribution.
config BLK_WBT_MQ
bool "Multiqueue writeback throttling"
@@ -228,4 +205,7 @@
depends on BLOCK && INFINIBAND
default y
-source block/Kconfig.iosched
+config BLK_PM
+ def_bool BLOCK && PM
+
+source "block/Kconfig.iosched"
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index a4a8914..b89310a 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -3,68 +3,6 @@
menu "IO Schedulers"
-config IOSCHED_NOOP
- bool
- default y
- ---help---
- The no-op I/O scheduler is a minimal scheduler that does basic merging
- and sorting. Its main uses include non-disk based block devices like
- memory devices, and specialised software or hardware environments
- that do their own scheduling and require only minimal assistance from
- the kernel.
-
-config IOSCHED_DEADLINE
- tristate "Deadline I/O scheduler"
- default y
- ---help---
- The deadline I/O scheduler is simple and compact. It will provide
- CSCAN service with FIFO expiration of requests, switching to
- a new point in the service tree and doing a batch of IO from there
- in case of expiry.
-
-config IOSCHED_CFQ
- tristate "CFQ I/O scheduler"
- default y
- ---help---
- The CFQ I/O scheduler tries to distribute bandwidth equally
- among all processes in the system. It should provide a fair
- and low latency working environment, suitable for both desktop
- and server systems.
-
- This is the default I/O scheduler.
-
-config CFQ_GROUP_IOSCHED
- bool "CFQ Group Scheduling support"
- depends on IOSCHED_CFQ && BLK_CGROUP
- default n
- ---help---
- Enable group IO scheduling in CFQ.
-
-choice
-
- prompt "Default I/O scheduler"
- default DEFAULT_CFQ
- help
- Select the I/O scheduler which will be used by default for all
- block devices.
-
- config DEFAULT_DEADLINE
- bool "Deadline" if IOSCHED_DEADLINE=y
-
- config DEFAULT_CFQ
- bool "CFQ" if IOSCHED_CFQ=y
-
- config DEFAULT_NOOP
- bool "No-op"
-
-endchoice
-
-config DEFAULT_IOSCHED
- string
- default "deadline" if DEFAULT_DEADLINE
- default "cfq" if DEFAULT_CFQ
- default "noop" if DEFAULT_NOOP
-
config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
@@ -82,24 +20,29 @@
config IOSCHED_BFQ
tristate "BFQ I/O scheduler"
- default n
---help---
BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
of the device among all processes according to their weights,
regardless of the device parameters and with any workload. It
also guarantees a low latency to interactive and soft
real-time applications. Details in
- Documentation/block/bfq-iosched.txt
+ Documentation/block/bfq-iosched.rst
config BFQ_GROUP_IOSCHED
bool "BFQ hierarchical scheduling support"
depends on IOSCHED_BFQ && BLK_CGROUP
- default n
---help---
Enable hierarchical scheduling in BFQ, using the blkio
(cgroups-v1) or io (cgroups-v2) controller.
+config BFQ_CGROUP_DEBUG
+ bool "BFQ IO controller debugging"
+ depends on BFQ_GROUP_IOSCHED
+ ---help---
+ Enable some debugging help. Currently it exports additional stat
+ files in a cgroup which can be useful for debugging.
+
endmenu
endif
diff --git a/block/Makefile b/block/Makefile
index 572b33f..9ef57ac 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -3,7 +3,7 @@
# Makefile for the kernel block layer
#
-obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
+obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
@@ -18,9 +18,7 @@
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
-obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
-obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
-obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
+obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
@@ -37,3 +35,4 @@
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
+obj-$(CONFIG_BLK_PM) += blk-pm.o
diff --git a/block/badblocks.c b/block/badblocks.c
index 91f7bcf..2e5f569 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Bad block management
*
* - Heavily based on MD badblocks code from Neil Brown
*
* Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/badblocks.h>
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 9fe5952..86a607c 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cgroups support for the BFQ I/O scheduler.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
@@ -24,7 +15,83 @@
#include "bfq-iosched.h"
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
+static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
+{
+ int ret;
+
+ ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
+ if (ret)
+ return ret;
+
+ atomic64_set(&stat->aux_cnt, 0);
+ return 0;
+}
+
+static void bfq_stat_exit(struct bfq_stat *stat)
+{
+ percpu_counter_destroy(&stat->cpu_cnt);
+}
+
+/**
+ * bfq_stat_add - add a value to a bfq_stat
+ * @stat: target bfq_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller must ensure that IRQ on the same CPU
+ * don't re-enter this function for the same counter.
+ */
+static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
+{
+ percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
+}
+
+/**
+ * bfq_stat_read - read the current value of a bfq_stat
+ * @stat: bfq_stat to read
+ */
+static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
+{
+ return percpu_counter_sum_positive(&stat->cpu_cnt);
+}
+
+/**
+ * bfq_stat_reset - reset a bfq_stat
+ * @stat: bfq_stat to reset
+ */
+static inline void bfq_stat_reset(struct bfq_stat *stat)
+{
+ percpu_counter_set(&stat->cpu_cnt, 0);
+ atomic64_set(&stat->aux_cnt, 0);
+}
+
+/**
+ * bfq_stat_add_aux - add a bfq_stat into another's aux count
+ * @to: the destination bfq_stat
+ * @from: the source
+ *
+ * Add @from's count including the aux one to @to's aux count.
+ */
+static inline void bfq_stat_add_aux(struct bfq_stat *to,
+ struct bfq_stat *from)
+{
+ atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
+ &to->aux_cnt);
+}
+
+/**
+ * blkg_prfill_stat - prfill callback for bfq_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the bfq_stat in @pd
+ *
+ * prfill callback for printing a bfq_stat.
+ */
+static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
+}
/* bfqg stats flags */
enum bfqg_stats_flags {
@@ -62,7 +129,7 @@
now = ktime_get_ns();
if (now > stats->start_group_wait_time)
- blkg_stat_add(&stats->group_wait_time,
+ bfq_stat_add(&stats->group_wait_time,
now - stats->start_group_wait_time);
bfqg_stats_clear_waiting(stats);
}
@@ -91,14 +158,14 @@
now = ktime_get_ns();
if (now > stats->start_empty_time)
- blkg_stat_add(&stats->empty_time,
+ bfq_stat_add(&stats->empty_time,
now - stats->start_empty_time);
bfqg_stats_clear_empty(stats);
}
void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
{
- blkg_stat_add(&bfqg->stats.dequeue, 1);
+ bfq_stat_add(&bfqg->stats.dequeue, 1);
}
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
@@ -128,7 +195,7 @@
u64 now = ktime_get_ns();
if (now > stats->start_idle_time)
- blkg_stat_add(&stats->idle_time,
+ bfq_stat_add(&stats->idle_time,
now - stats->start_idle_time);
bfqg_stats_clear_idling(stats);
}
@@ -146,9 +213,9 @@
{
struct bfqg_stats *stats = &bfqg->stats;
- blkg_stat_add(&stats->avg_queue_size_sum,
+ bfq_stat_add(&stats->avg_queue_size_sum,
blkg_rwstat_total(&stats->queued));
- blkg_stat_add(&stats->avg_queue_size_samples, 1);
+ bfq_stat_add(&stats->avg_queue_size_samples, 1);
bfqg_stats_update_group_wait_time(stats);
}
@@ -185,7 +252,7 @@
io_start_time_ns - start_time_ns);
}
-#else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+#else /* CONFIG_BFQ_CGROUP_DEBUG */
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op) { }
@@ -199,7 +266,7 @@
void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
-#endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -283,18 +350,18 @@
/* @stats = 0 */
static void bfqg_stats_reset(struct bfqg_stats *stats)
{
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
/* queued stats shouldn't be cleared */
blkg_rwstat_reset(&stats->merged);
blkg_rwstat_reset(&stats->service_time);
blkg_rwstat_reset(&stats->wait_time);
- blkg_stat_reset(&stats->time);
- blkg_stat_reset(&stats->avg_queue_size_sum);
- blkg_stat_reset(&stats->avg_queue_size_samples);
- blkg_stat_reset(&stats->dequeue);
- blkg_stat_reset(&stats->group_wait_time);
- blkg_stat_reset(&stats->idle_time);
- blkg_stat_reset(&stats->empty_time);
+ bfq_stat_reset(&stats->time);
+ bfq_stat_reset(&stats->avg_queue_size_sum);
+ bfq_stat_reset(&stats->avg_queue_size_samples);
+ bfq_stat_reset(&stats->dequeue);
+ bfq_stat_reset(&stats->group_wait_time);
+ bfq_stat_reset(&stats->idle_time);
+ bfq_stat_reset(&stats->empty_time);
#endif
}
@@ -304,19 +371,19 @@
if (!to || !from)
return;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
/* queued stats shouldn't be cleared */
blkg_rwstat_add_aux(&to->merged, &from->merged);
blkg_rwstat_add_aux(&to->service_time, &from->service_time);
blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
- blkg_stat_add_aux(&from->time, &from->time);
- blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
- blkg_stat_add_aux(&to->avg_queue_size_samples,
+ bfq_stat_add_aux(&from->time, &from->time);
+ bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+ bfq_stat_add_aux(&to->avg_queue_size_samples,
&from->avg_queue_size_samples);
- blkg_stat_add_aux(&to->dequeue, &from->dequeue);
- blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
- blkg_stat_add_aux(&to->idle_time, &from->idle_time);
- blkg_stat_add_aux(&to->empty_time, &from->empty_time);
+ bfq_stat_add_aux(&to->dequeue, &from->dequeue);
+ bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
+ bfq_stat_add_aux(&to->idle_time, &from->idle_time);
+ bfq_stat_add_aux(&to->empty_time, &from->empty_time);
#endif
}
@@ -334,7 +401,7 @@
parent = bfqg_parent(bfqg);
- lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
+ lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
if (unlikely(!parent))
return;
@@ -364,35 +431,35 @@
static void bfqg_stats_exit(struct bfqg_stats *stats)
{
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
blkg_rwstat_exit(&stats->merged);
blkg_rwstat_exit(&stats->service_time);
blkg_rwstat_exit(&stats->wait_time);
blkg_rwstat_exit(&stats->queued);
- blkg_stat_exit(&stats->time);
- blkg_stat_exit(&stats->avg_queue_size_sum);
- blkg_stat_exit(&stats->avg_queue_size_samples);
- blkg_stat_exit(&stats->dequeue);
- blkg_stat_exit(&stats->group_wait_time);
- blkg_stat_exit(&stats->idle_time);
- blkg_stat_exit(&stats->empty_time);
+ bfq_stat_exit(&stats->time);
+ bfq_stat_exit(&stats->avg_queue_size_sum);
+ bfq_stat_exit(&stats->avg_queue_size_samples);
+ bfq_stat_exit(&stats->dequeue);
+ bfq_stat_exit(&stats->group_wait_time);
+ bfq_stat_exit(&stats->idle_time);
+ bfq_stat_exit(&stats->empty_time);
#endif
}
static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
{
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
if (blkg_rwstat_init(&stats->merged, gfp) ||
blkg_rwstat_init(&stats->service_time, gfp) ||
blkg_rwstat_init(&stats->wait_time, gfp) ||
blkg_rwstat_init(&stats->queued, gfp) ||
- blkg_stat_init(&stats->time, gfp) ||
- blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
- blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
- blkg_stat_init(&stats->dequeue, gfp) ||
- blkg_stat_init(&stats->group_wait_time, gfp) ||
- blkg_stat_init(&stats->idle_time, gfp) ||
- blkg_stat_init(&stats->empty_time, gfp)) {
+ bfq_stat_init(&stats->time, gfp) ||
+ bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
+ bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
+ bfq_stat_init(&stats->dequeue, gfp) ||
+ bfq_stat_init(&stats->group_wait_time, gfp) ||
+ bfq_stat_init(&stats->idle_time, gfp) ||
+ bfq_stat_init(&stats->empty_time, gfp)) {
bfqg_stats_exit(stats);
return -ENOMEM;
}
@@ -434,11 +501,12 @@
kfree(cpd_to_bfqgd(cpd));
}
-static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
+ struct blkcg *blkcg)
{
struct bfq_group *bfqg;
- bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
+ bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
if (!bfqg)
return NULL;
@@ -578,7 +646,8 @@
bfqg_and_blkg_get(bfqg);
if (bfq_bfqq_busy(bfqq)) {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
bfq_activate_bfqq(bfqd, bfqq);
}
@@ -642,7 +711,7 @@
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
+ serial_nr = __bio_blkcg(bio)->css.serial_nr;
/*
* Check whether blkcg has changed. The condition may trigger
@@ -651,7 +720,7 @@
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
- bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
@@ -836,7 +905,7 @@
bfq_end_wr_async_queues(bfqd, bfqd->root_group);
}
-static int bfq_io_show_weight(struct seq_file *sf, void *v)
+static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
{
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
@@ -850,6 +919,60 @@
return 0;
}
+static u64 bfqg_prfill_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
+
+ if (!bfqg->entity.dev_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
+}
+
+static int bfq_io_show_weight(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
+
+ seq_printf(sf, "default %u\n", bfqgd->weight);
+ blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
+ &blkcg_policy_bfq, 0, false);
+ return 0;
+}
+
+static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
+{
+ weight = dev_weight ?: weight;
+
+ bfqg->entity.dev_weight = dev_weight;
+ /*
+ * Setting the prio_changed flag of the entity
+ * to 1 with new_weight == weight would re-set
+ * the value of the weight to its ioprio mapping.
+ * Set the flag only if necessary.
+ */
+ if ((unsigned short)weight != bfqg->entity.new_weight) {
+ bfqg->entity.new_weight = (unsigned short)weight;
+ /*
+ * Make sure that the above new value has been
+ * stored in bfqg->entity.new_weight before
+ * setting the prio_changed flag. In fact,
+ * this flag may be read asynchronously (in
+ * critical sections protected by a different
+ * lock than that held here), and finding this
+ * flag set may cause the execution of the code
+ * for updating parameters whose value may
+ * depend also on bfqg->entity.new_weight (in
+ * __bfq_entity_update_weight_prio).
+ * This barrier makes sure that the new value
+ * of bfqg->entity.new_weight is correctly
+ * seen in that code.
+ */
+ smp_wmb();
+ bfqg->entity.prio_changed = 1;
+ }
+}
+
static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
struct cftype *cftype,
u64 val)
@@ -868,56 +991,73 @@
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
- if (!bfqg)
- continue;
- /*
- * Setting the prio_changed flag of the entity
- * to 1 with new_weight == weight would re-set
- * the value of the weight to its ioprio mapping.
- * Set the flag only if necessary.
- */
- if ((unsigned short)val != bfqg->entity.new_weight) {
- bfqg->entity.new_weight = (unsigned short)val;
- /*
- * Make sure that the above new value has been
- * stored in bfqg->entity.new_weight before
- * setting the prio_changed flag. In fact,
- * this flag may be read asynchronously (in
- * critical sections protected by a different
- * lock than that held here), and finding this
- * flag set may cause the execution of the code
- * for updating parameters whose value may
- * depend also on bfqg->entity.new_weight (in
- * __bfq_entity_update_weight_prio).
- * This barrier makes sure that the new value
- * of bfqg->entity.new_weight is correctly
- * seen in that code.
- */
- smp_wmb();
- bfqg->entity.prio_changed = 1;
- }
+ if (bfqg)
+ bfq_group_set_weight(bfqg, val, 0);
}
spin_unlock_irq(&blkcg->lock);
return ret;
}
+static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
+ char *buf, size_t nbytes,
+ loff_t off)
+{
+ int ret;
+ struct blkg_conf_ctx ctx;
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct bfq_group *bfqg;
+ u64 v;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
+ if (ret)
+ return ret;
+
+ if (sscanf(ctx.body, "%llu", &v) == 1) {
+ /* require "default" on dfl */
+ ret = -ERANGE;
+ if (!v)
+ goto out;
+ } else if (!strcmp(strim(ctx.body), "default")) {
+ v = 0;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bfqg = blkg_to_bfqg(ctx.blkg);
+
+ ret = -ERANGE;
+ if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
+ bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
+ ret = 0;
+ }
+out:
+ blkg_conf_finish(&ctx);
+ return ret ?: nbytes;
+}
+
static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
- u64 weight;
- /* First unsigned long found in the file is used */
- int ret = kstrtoull(strim(buf), 0, &weight);
+ char *endp;
+ int ret;
+ u64 v;
- if (ret)
- return ret;
+ buf = strim(buf);
- ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
- return ret ?: nbytes;
+ /* "WEIGHT" or "default WEIGHT" sets the default weight */
+ v = simple_strtoull(buf, &endp, 0);
+ if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
+ ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
+ return ret ?: nbytes;
+ }
+
+ return bfq_io_set_device_weight(of, buf, nbytes, off);
}
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
static int bfqg_print_stat(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@@ -935,17 +1075,34 @@
static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
- u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
- &blkcg_policy_bfq, off);
+ struct blkcg_gq *blkg = pd_to_blkg(pd);
+ struct blkcg_gq *pos_blkg;
+ struct cgroup_subsys_state *pos_css;
+ u64 sum = 0;
+
+ lockdep_assert_held(&blkg->q->queue_lock);
+
+ rcu_read_lock();
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
+ struct bfq_stat *stat;
+
+ if (!pos_blkg->online)
+ continue;
+
+ stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
+ sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
+ }
+ rcu_read_unlock();
+
return __blkg_prfill_u64(sf, pd, sum);
}
static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
- struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
- &blkcg_policy_bfq,
- off);
+ struct blkg_rwstat_sample sum;
+
+ blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
return __blkg_prfill_rwstat(sf, pd, &sum);
}
@@ -983,12 +1140,13 @@
static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
- struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes));
- u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+ struct blkg_rwstat_sample tmp;
- return __blkg_prfill_u64(sf, pd, sum >> 9);
+ blkg_rwstat_recursive_sum(pd->blkg, NULL,
+ offsetof(struct blkcg_gq, stat_bytes), &tmp);
+
+ return __blkg_prfill_u64(sf, pd,
+ (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
}
static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
@@ -1003,11 +1161,11 @@
struct blkg_policy_data *pd, int off)
{
struct bfq_group *bfqg = pd_to_bfqg(pd);
- u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
+ u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
u64 v = 0;
if (samples) {
- v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
+ v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
v = div64_u64(v, samples);
}
__blkg_prfill_u64(sf, pd, v);
@@ -1022,7 +1180,7 @@
0, false);
return 0;
}
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{
@@ -1055,9 +1213,15 @@
{
.name = "bfq.weight",
.flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = bfq_io_show_weight,
+ .seq_show = bfq_io_show_weight_legacy,
.write_u64 = bfq_io_set_weight_legacy,
},
+ {
+ .name = "bfq.weight_device",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = bfq_io_show_weight,
+ .write = bfq_io_set_weight,
+ },
/* statistics, covers only the tasks in the bfqg */
{
@@ -1070,7 +1234,7 @@
.private = (unsigned long)&blkcg_policy_bfq,
.seq_show = blkg_print_stat_ios,
},
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
{
.name = "bfq.time",
.private = offsetof(struct bfq_group, stats.time),
@@ -1100,9 +1264,9 @@
.private = offsetof(struct bfq_group, stats.queued),
.seq_show = bfqg_print_rwstat,
},
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
- /* the same statictics which cover the bfqg and its descendants */
+ /* the same statistics which cover the bfqg and its descendants */
{
.name = "bfq.io_service_bytes_recursive",
.private = (unsigned long)&blkcg_policy_bfq,
@@ -1113,7 +1277,7 @@
.private = (unsigned long)&blkcg_policy_bfq,
.seq_show = blkg_print_stat_ios_recursive,
},
-#ifdef CONFIG_DEBUG_BLK_CGROUP
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
{
.name = "bfq.time_recursive",
.private = offsetof(struct bfq_group, stats.time),
@@ -1167,7 +1331,7 @@
.private = offsetof(struct bfq_group, stats.dequeue),
.seq_show = bfqg_print_stat,
},
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
{ } /* terminate */
};
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 653100f..0c62144 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Budget Fair Queueing (BFQ) I/O scheduler.
*
@@ -12,21 +13,11 @@
*
* Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* BFQ is a proportional-share I/O scheduler, with some extra
* low-latency capabilities. BFQ also supports full hierarchical
* scheduling through cgroups. Next paragraphs provide an introduction
* on BFQ inner workings. Details on BFQ benefits, usage and
- * limitations can be found in Documentation/block/bfq-iosched.txt.
+ * limitations can be found in Documentation/block/bfq-iosched.rst.
*
* BFQ is a proportional-share storage-I/O scheduling algorithm based
* on the slice-by-slice service scheme of CFQ. But BFQ assigns
@@ -166,6 +157,7 @@
BFQ_BFQQ_FNS(coop);
BFQ_BFQQ_FNS(split_coop);
BFQ_BFQQ_FNS(softrt_update);
+BFQ_BFQQ_FNS(has_waker);
#undef BFQ_BFQQ_FNS \
/* Expiration time of sync (0) and async (1) requests, in ns. */
@@ -189,7 +181,7 @@
/*
* When a sync request is dispatched, the queue that contains that
* request, and all the ancestor entities of that queue, are charged
- * with the number of sectors of the request. In constrast, if the
+ * with the number of sectors of the request. In contrast, if the
* request is async, then the queue and its ancestor entities are
* charged with the number of sectors of the request, multiplied by
* the factor below. This throttles the bandwidth for async I/O,
@@ -217,7 +209,7 @@
* queue merging.
*
* As can be deduced from the low time limit below, queue merging, if
- * successful, happens at the very beggining of the I/O of the involved
+ * successful, happens at the very beginning of the I/O of the involved
* cooperating processes, as a consequence of the arrival of the very
* first requests from each cooperator. After that, there is very
* little chance to find cooperators.
@@ -230,13 +222,26 @@
#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
/* hw_tag detection: parallel requests threshold and min samples needed. */
-#define BFQ_HW_QUEUE_THRESHOLD 4
+#define BFQ_HW_QUEUE_THRESHOLD 3
#define BFQ_HW_QUEUE_SAMPLES 32
#define BFQQ_SEEK_THR (sector_t)(8 * 100)
#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
+ (get_sdist(last_pos, rq) > \
+ BFQQ_SEEK_THR && \
+ (!blk_queue_nonrot(bfqd->queue) || \
+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+/*
+ * Sync random I/O is likely to be confused with soft real-time I/O,
+ * because it is characterized by limited throughput and apparently
+ * isochronous arrival pattern. To avoid false positives, queues
+ * containing only random (seeky) I/O are prevented from being tagged
+ * as soft real-time.
+ */
+#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
@@ -399,9 +404,9 @@
unsigned long flags;
struct bfq_io_cq *icq;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&q->queue_lock, flags);
icq = icq_to_bic(ioc_lookup_icq(ioc, q));
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
return icq;
}
@@ -428,7 +433,7 @@
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
- * We choose the request that is closesr to the head right now. Distance
+ * We choose the request that is closer to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
@@ -590,7 +595,16 @@
bfq_merge_time_limit);
}
-void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+/*
+ * The following function is not marked as __cold because it is
+ * actually cold, but for the same performance goal described in the
+ * comments on the likely() at the beginning of
+ * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
+ * execution time for the case where this function is not invoked, we
+ * had to add an unlikely() in each involved if().
+ */
+void __cold
+bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
struct bfq_queue *__bfqq;
@@ -624,59 +638,73 @@
}
/*
- * Tell whether there are active queues or groups with differentiated weights.
- */
-static bool bfq_differentiated_weights(struct bfq_data *bfqd)
-{
- /*
- * For weights to differ, at least one of the trees must contain
- * at least two nodes.
- */
- return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
- (bfqd->queue_weights_tree.rb_node->rb_left ||
- bfqd->queue_weights_tree.rb_node->rb_right)
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
- ) ||
- (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
- (bfqd->group_weights_tree.rb_node->rb_left ||
- bfqd->group_weights_tree.rb_node->rb_right)
-#endif
- );
-}
-
-/*
- * The following function returns true if every queue must receive the
- * same share of the throughput (this condition is used when deciding
- * whether idling may be disabled, see the comments in the function
- * bfq_better_to_idle()).
+ * The following function returns false either if every active queue
+ * must receive the same share of the throughput (symmetric scenario),
+ * or, as a special case, if bfqq must receive a share of the
+ * throughput lower than or equal to the share that every other active
+ * queue must receive. If bfqq does sync I/O, then these are the only
+ * two cases where bfqq happens to be guaranteed its share of the
+ * throughput even if I/O dispatching is not plugged when bfqq remains
+ * temporarily empty (for more details, see the comments in the
+ * function bfq_better_to_idle()). For this reason, the return value
+ * of this function is used to check whether I/O-dispatch plugging can
+ * be avoided.
*
- * Such a scenario occurs when:
+ * The above first case (symmetric scenario) occurs when:
* 1) all active queues have the same weight,
- * 2) all active groups at the same level in the groups tree have the same
- * weight,
+ * 2) all active queues belong to the same I/O-priority class,
* 3) all active groups at the same level in the groups tree have the same
+ * weight,
+ * 4) all active groups at the same level in the groups tree have the same
* number of children.
*
- * Unfortunately, keeping the necessary state for evaluating exactly the
- * above symmetry conditions would be quite complex and time-consuming.
- * Therefore this function evaluates, instead, the following stronger
- * sub-conditions, for which it is much easier to maintain the needed
- * state:
+ * Unfortunately, keeping the necessary state for evaluating exactly
+ * the last two symmetry sub-conditions above would be quite complex
+ * and time consuming. Therefore this function evaluates, instead,
+ * only the following stronger three sub-conditions, for which it is
+ * much easier to maintain the needed state:
* 1) all active queues have the same weight,
- * 2) all active groups have the same weight,
- * 3) all active groups have at most one active child each.
- * In particular, the last two conditions are always true if hierarchical
- * support and the cgroups interface are not enabled, thus no state needs
- * to be maintained in this case.
+ * 2) all active queues belong to the same I/O-priority class,
+ * 3) there are no active groups.
+ * In particular, the last condition is always true if hierarchical
+ * support or the cgroups interface are not enabled, thus no state
+ * needs to be maintained in this case.
*/
-static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
{
- return !bfq_differentiated_weights(bfqd);
+ bool smallest_weight = bfqq &&
+ bfqq->weight_counter &&
+ bfqq->weight_counter ==
+ container_of(
+ rb_first_cached(&bfqd->queue_weights_tree),
+ struct bfq_weight_counter,
+ weights_node);
+
+ /*
+ * For queue weights to differ, queue_weights_tree must contain
+ * at least two nodes.
+ */
+ bool varied_queue_weights = !smallest_weight &&
+ !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
+ (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
+
+ bool multiple_classes_busy =
+ (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
+ (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
+ (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
+
+ return varied_queue_weights || multiple_classes_busy
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ || bfqd->num_groups_with_pending_reqs > 0
+#endif
+ ;
}
/*
* If the weight-counter tree passed as input contains no counter for
- * the weight of the input entity, then add that counter; otherwise just
+ * the weight of the input queue, then add that counter; otherwise just
* increment the existing counter.
*
* Note that weight-counter trees contain few nodes in mostly symmetric
@@ -687,25 +715,26 @@
* In most scenarios, the rate at which nodes are created/destroyed
* should be low too.
*/
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
- struct rb_root *root)
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct rb_root_cached *root)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct bfq_entity *entity = &bfqq->entity;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
/*
- * Do not insert if the entity is already associated with a
+ * Do not insert if the queue is already associated with a
* counter, which happens if:
- * 1) the entity is associated with a queue,
- * 2) a request arrival has caused the queue to become both
+ * 1) a request arrival has caused the queue to become both
* non-weight-raised, and hence change its weight, and
* backlogged; in this respect, each of the two events
* causes an invocation of this function,
- * 3) this is the invocation of this function caused by the
+ * 2) this is the invocation of this function caused by the
* second event. This second invocation is actually useless,
* and we handle this fact by exiting immediately. More
* efficient or clearer solutions might possibly be adopted.
*/
- if (entity->weight_counter)
+ if (bfqq->weight_counter)
return;
while (*new) {
@@ -715,77 +744,79 @@
parent = *new;
if (entity->weight == __counter->weight) {
- entity->weight_counter = __counter;
+ bfqq->weight_counter = __counter;
goto inc_counter;
}
if (entity->weight < __counter->weight)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
- entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
- GFP_ATOMIC);
+ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
+ GFP_ATOMIC);
/*
* In the unlucky event of an allocation failure, we just
- * exit. This will cause the weight of entity to not be
- * considered in bfq_differentiated_weights, which, in its
- * turn, causes the scenario to be deemed wrongly symmetric in
- * case entity's weight would have been the only weight making
- * the scenario asymmetric. On the bright side, no unbalance
- * will however occur when entity becomes inactive again (the
+ * exit. This will cause the weight of queue to not be
+ * considered in bfq_asymmetric_scenario, which, in its turn,
+ * causes the scenario to be deemed wrongly symmetric in case
+ * bfqq's weight would have been the only weight making the
+ * scenario asymmetric. On the bright side, no unbalance will
+ * however occur when bfqq becomes inactive again (the
* invocation of this function is triggered by an activation
- * of entity). In fact, bfq_weights_tree_remove does nothing
- * if !entity->weight_counter.
+ * of queue). In fact, bfq_weights_tree_remove does nothing
+ * if !bfqq->weight_counter.
*/
- if (unlikely(!entity->weight_counter))
+ if (unlikely(!bfqq->weight_counter))
return;
- entity->weight_counter->weight = entity->weight;
- rb_link_node(&entity->weight_counter->weights_node, parent, new);
- rb_insert_color(&entity->weight_counter->weights_node, root);
+ bfqq->weight_counter->weight = entity->weight;
+ rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
+ rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
+ leftmost);
inc_counter:
- entity->weight_counter->num_active++;
+ bfqq->weight_counter->num_active++;
+ bfqq->ref++;
}
/*
- * Decrement the weight counter associated with the entity, and, if the
+ * Decrement the weight counter associated with the queue, and, if the
* counter reaches 0, remove the counter from the tree.
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
- struct rb_root *root)
+ struct bfq_queue *bfqq,
+ struct rb_root_cached *root)
{
- if (!entity->weight_counter)
+ if (!bfqq->weight_counter)
return;
- entity->weight_counter->num_active--;
- if (entity->weight_counter->num_active > 0)
+ bfqq->weight_counter->num_active--;
+ if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&entity->weight_counter->weights_node, root);
- kfree(entity->weight_counter);
+ rb_erase_cached(&bfqq->weight_counter->weights_node, root);
+ kfree(bfqq->weight_counter);
reset_entity_pointer:
- entity->weight_counter = NULL;
+ bfqq->weight_counter = NULL;
+ bfq_put_queue(bfqq);
}
/*
- * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
- * parent entities.
+ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
+ * of active groups for each queue's inactive parent entity.
*/
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = bfqq->entity.parent;
- __bfq_weights_tree_remove(bfqd, &bfqq->entity,
- &bfqd->queue_weights_tree);
-
for_each_entity(entity) {
struct bfq_sched_data *sd = entity->my_sched_data;
@@ -797,18 +828,37 @@
* next_in_service for details on why
* in_service_entity must be checked too).
*
- * As a consequence, the weight of entity is
- * not to be removed. In addition, if entity
- * is active, then its parent entities are
- * active as well, and thus their weights are
- * not to be removed either. In the end, this
- * loop must stop here.
+ * As a consequence, its parent entities are
+ * active as well, and thus this loop must
+ * stop here.
*/
break;
}
- __bfq_weights_tree_remove(bfqd, entity,
- &bfqd->group_weights_tree);
+
+ /*
+ * The decrement of num_groups_with_pending_reqs is
+ * not performed immediately upon the deactivation of
+ * entity, but it is delayed to when it also happens
+ * that the first leaf descendant bfqq of entity gets
+ * all its pending requests completed. The following
+ * instructions perform this delayed decrement, if
+ * needed. See the comments on
+ * num_groups_with_pending_reqs for details.
+ */
+ if (entity->in_groups_with_pending_reqs) {
+ entity->in_groups_with_pending_reqs = false;
+ bfqd->num_groups_with_pending_reqs--;
+ }
}
+
+ /*
+ * Next function is invoked last, because it causes bfqq to be
+ * freed if the following holds: bfqq is not in service and
+ * has no dispatched request. DO NOT use bfqq after the next
+ * function invocation.
+ */
+ __bfq_weights_tree_remove(bfqd, bfqq,
+ &bfqd->queue_weights_tree);
}
/*
@@ -864,7 +914,8 @@
static unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
- if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
+ bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
return blk_rq_sectors(rq);
return blk_rq_sectors(rq) * bfq_async_charge_factor;
@@ -898,8 +949,10 @@
*/
return;
- new_budget = max_t(unsigned long, bfqq->max_budget,
- bfq_serv_to_charge(next_rq, bfqq));
+ new_budget = max_t(unsigned long,
+ max_t(unsigned long, bfqq->max_budget,
+ bfq_serv_to_charge(next_rq, bfqq)),
+ entity->service);
if (entity->budget != new_budget) {
entity->budget = new_budget;
bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
@@ -928,7 +981,7 @@
* of several files
* mplayer took 23 seconds to start, if constantly weight-raised.
*
- * As for higher values than that accomodating the above bad
+ * As for higher values than that accommodating the above bad
* scenario, tests show that higher values would often yield
* the opposite of the desired result, i.e., would worsen
* responsiveness by allowing non-interactive applications to
@@ -967,6 +1020,7 @@
else
bfq_clear_bfqq_IO_bound(bfqq);
+ bfqq->entity.new_weight = bic->saved_weight;
bfqq->ttime = bic->saved_ttime;
bfqq->wr_coeff = bic->saved_wr_coeff;
bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
@@ -1002,7 +1056,8 @@
static int bfqq_process_refs(struct bfq_queue *bfqq)
{
- return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
+ return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
+ (bfqq->weight_counter != NULL);
}
/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
@@ -1013,8 +1068,18 @@
hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
hlist_del_init(&item->burst_list_node);
- hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
- bfqd->burst_size = 1;
+
+ /*
+ * Start the creation of a new burst list only if there is no
+ * active queue. See comments on the conditional invocation of
+ * bfq_handle_burst().
+ */
+ if (bfq_tot_busy_queues(bfqd) == 0) {
+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
+ bfqd->burst_size = 1;
+ } else
+ bfqd->burst_size = 0;
+
bfqd->burst_parent_entity = bfqq->entity.parent;
}
@@ -1070,7 +1135,8 @@
* many parallel threads/processes. Examples are systemd during boot,
* or git grep. To help these processes get their job done as soon as
* possible, it is usually better to not grant either weight-raising
- * or device idling to their queues.
+ * or device idling to their queues, unless these queues must be
+ * protected from the I/O flowing through other active queues.
*
* In this comment we describe, firstly, the reasons why this fact
* holds, and, secondly, the next function, which implements the main
@@ -1082,7 +1148,10 @@
* cumulatively served, the sooner the target job of these queues gets
* completed. As a consequence, weight-raising any of these queues,
* which also implies idling the device for it, is almost always
- * counterproductive. In most cases it just lowers throughput.
+ * counterproductive, unless there are other active queues to isolate
+ * these new queues from. If there no other active queues, then
+ * weight-raising these new queues just lowers throughput in most
+ * cases.
*
* On the other hand, a burst of queue creations may be caused also by
* the start of an application that does not consist of a lot of
@@ -1116,14 +1185,16 @@
* are very rare. They typically occur if some service happens to
* start doing I/O exactly when the interactive task starts.
*
- * Turning back to the next function, it implements all the steps
- * needed to detect the occurrence of a large burst and to properly
- * mark all the queues belonging to it (so that they can then be
- * treated in a different way). This goal is achieved by maintaining a
- * "burst list" that holds, temporarily, the queues that belong to the
- * burst in progress. The list is then used to mark these queues as
- * belonging to a large burst if the burst does become large. The main
- * steps are the following.
+ * Turning back to the next function, it is invoked only if there are
+ * no active queues (apart from active queues that would belong to the
+ * same, possible burst bfqq would belong to), and it implements all
+ * the steps needed to detect the occurrence of a large burst and to
+ * properly mark all the queues belonging to it (so that they can then
+ * be treated in a different way). This goal is achieved by
+ * maintaining a "burst list" that holds, temporarily, the queues that
+ * belong to the burst in progress. The list is then used to mark
+ * these queues as belonging to a large burst if the burst does become
+ * large. The main steps are the following.
*
* . when the very first queue is created, the queue is inserted into the
* list (as it could be the first queue in a possible burst)
@@ -1357,21 +1428,31 @@
* mechanism may be re-designed in such a way to make it possible to
* know whether preemption is needed without needing to update service
* trees). In addition, queue preemptions almost always cause random
- * I/O, and thus loss of throughput. Because of these facts, the next
- * function adopts the following simple scheme to avoid both costly
- * operations and too frequent preemptions: it requests the expiration
- * of the in-service queue (unconditionally) only for queues that need
- * to recover a hole, or that either are weight-raised or deserve to
- * be weight-raised.
+ * I/O, which may in turn cause loss of throughput. Finally, there may
+ * even be no in-service queue when the next function is invoked (so,
+ * no queue to compare timestamps with). Because of these facts, the
+ * next function adopts the following simple scheme to avoid costly
+ * operations, too frequent preemptions and too many dependencies on
+ * the state of the scheduler: it requests the expiration of the
+ * in-service queue (unconditionally) only for queues that need to
+ * recover a hole. Then it delegates to other parts of the code the
+ * responsibility of handling the above case 2.
*/
static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- bool arrived_in_time,
- bool wr_or_deserves_wr)
+ bool arrived_in_time)
{
struct bfq_entity *entity = &bfqq->entity;
- if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
+ /*
+ * In the next compound condition, we check also whether there
+ * is some budget left, because otherwise there is no point in
+ * trying to go on serving bfqq with this same budget: bfqq
+ * would be expired immediately after being selected for
+ * service. This would only cause useless overhead.
+ */
+ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
+ bfq_bfqq_budget_left(bfqq) > 0) {
/*
* We do not clear the flag non_blocking_wait_rq here, as
* the latter is used in bfq_activate_bfqq to signal
@@ -1414,7 +1495,7 @@
entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(bfqq->next_rq, bfqq));
bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
- return wr_or_deserves_wr;
+ return false;
}
/*
@@ -1532,6 +1613,36 @@
bfqd->bfq_wr_min_idle_time);
}
+
+/*
+ * Return true if bfqq is in a higher priority class, or has a higher
+ * weight than the in-service queue.
+ */
+static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
+ struct bfq_queue *in_serv_bfqq)
+{
+ int bfqq_weight, in_serv_weight;
+
+ if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
+ return true;
+
+ if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
+ bfqq_weight = bfqq->entity.weight;
+ in_serv_weight = in_serv_bfqq->entity.weight;
+ } else {
+ if (bfqq->entity.parent)
+ bfqq_weight = bfqq->entity.parent->weight;
+ else
+ bfqq_weight = bfqq->entity.weight;
+ if (in_serv_bfqq->entity.parent)
+ in_serv_weight = in_serv_bfqq->entity.parent->weight;
+ else
+ in_serv_weight = in_serv_bfqq->entity.weight;
+ }
+
+ return bfqq_weight > in_serv_weight;
+}
+
static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
int old_wr_coeff,
@@ -1560,6 +1671,7 @@
*/
in_burst = bfq_bfqq_in_large_burst(bfqq);
soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+ !BFQQ_TOTALLY_SEEKY(bfqq) &&
!in_burst &&
time_is_before_jiffies(bfqq->soft_rt_next_start) &&
bfqq->dispatched == 0;
@@ -1575,8 +1687,7 @@
*/
bfqq_wants_to_preempt =
bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
- arrived_in_time,
- wr_or_deserves_wr);
+ arrived_in_time);
/*
* If bfqq happened to be activated in a burst, but has been
@@ -1641,21 +1752,111 @@
/*
* Expire in-service queue only if preemption may be needed
- * for guarantees. In this respect, the function
- * next_queue_may_preempt just checks a simple, necessary
- * condition, and not a sufficient condition based on
- * timestamps. In fact, for the latter condition to be
- * evaluated, timestamps would need first to be updated, and
- * this operation is quite costly (see the comments on the
- * function bfq_bfqq_update_budg_for_activation).
+ * for guarantees. In particular, we care only about two
+ * cases. The first is that bfqq has to recover a service
+ * hole, as explained in the comments on
+ * bfq_bfqq_update_budg_for_activation(), i.e., that
+ * bfqq_wants_to_preempt is true. However, if bfqq does not
+ * carry time-critical I/O, then bfqq's bandwidth is less
+ * important than that of queues that carry time-critical I/O.
+ * So, as a further constraint, we consider this case only if
+ * bfqq is at least as weight-raised, i.e., at least as time
+ * critical, as the in-service queue.
+ *
+ * The second case is that bfqq is in a higher priority class,
+ * or has a higher weight than the in-service queue. If this
+ * condition does not hold, we don't care because, even if
+ * bfqq does not start to be served immediately, the resulting
+ * delay for bfqq's I/O is however lower or much lower than
+ * the ideal completion time to be guaranteed to bfqq's I/O.
+ *
+ * In both cases, preemption is needed only if, according to
+ * the timestamps of both bfqq and of the in-service queue,
+ * bfqq actually is the next queue to serve. So, to reduce
+ * useless preemptions, the return value of
+ * next_queue_may_preempt() is considered in the next compound
+ * condition too. Yet next_queue_may_preempt() just checks a
+ * simple, necessary condition for bfqq to be the next queue
+ * to serve. In fact, to evaluate a sufficient condition, the
+ * timestamps of the in-service queue would need to be
+ * updated, and this operation is quite costly (see the
+ * comments on bfq_bfqq_update_budg_for_activation()).
*/
- if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
- bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
+ if (bfqd->in_service_queue &&
+ ((bfqq_wants_to_preempt &&
+ bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
+ bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) &&
next_queue_may_preempt(bfqd))
bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
false, BFQQE_PREEMPTED);
}
+static void bfq_reset_inject_limit(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ /* invalidate baseline total service time */
+ bfqq->last_serv_time_ns = 0;
+
+ /*
+ * Reset pointer in case we are waiting for
+ * some request completion.
+ */
+ bfqd->waited_rq = NULL;
+
+ /*
+ * If bfqq has a short think time, then start by setting the
+ * inject limit to 0 prudentially, because the service time of
+ * an injected I/O request may be higher than the think time
+ * of bfqq, and therefore, if one request was injected when
+ * bfqq remains empty, this injected request might delay the
+ * service of the next I/O request for bfqq significantly. In
+ * case bfqq can actually tolerate some injection, then the
+ * adaptive update will however raise the limit soon. This
+ * lucky circumstance holds exactly because bfqq has a short
+ * think time, and thus, after remaining empty, is likely to
+ * get new I/O enqueued---and then completed---before being
+ * expired. This is the very pattern that gives the
+ * limit-update algorithm the chance to measure the effect of
+ * injection on request service times, and then to update the
+ * limit accordingly.
+ *
+ * However, in the following special case, the inject limit is
+ * left to 1 even if the think time is short: bfqq's I/O is
+ * synchronized with that of some other queue, i.e., bfqq may
+ * receive new I/O only after the I/O of the other queue is
+ * completed. Keeping the inject limit to 1 allows the
+ * blocking I/O to be served while bfqq is in service. And
+ * this is very convenient both for bfqq and for overall
+ * throughput, as explained in detail in the comments in
+ * bfq_update_has_short_ttime().
+ *
+ * On the opposite end, if bfqq has a long think time, then
+ * start directly by 1, because:
+ * a) on the bright side, keeping at most one request in
+ * service in the drive is unlikely to cause any harm to the
+ * latency of bfqq's requests, as the service time of a single
+ * request is likely to be lower than the think time of bfqq;
+ * b) on the downside, after becoming empty, bfqq is likely to
+ * expire before getting its next request. With this request
+ * arrival pattern, it is very hard to sample total service
+ * times and update the inject limit accordingly (see comments
+ * on bfq_update_inject_limit()). So the limit is likely to be
+ * never, or at least seldom, updated. As a consequence, by
+ * setting the limit to 1, we avoid that no injection ever
+ * occurs with bfqq. On the downside, this proactive step
+ * further reduces chances to actually compute the baseline
+ * total service time. Thus it reduces chances to execute the
+ * limit-update algorithm and possibly raise the limit to more
+ * than 1.
+ */
+ if (bfq_bfqq_has_short_ttime(bfqq))
+ bfqq->inject_limit = 0;
+ else
+ bfqq->inject_limit = 1;
+
+ bfqq->decrease_time_jif = jiffies;
+}
+
static void bfq_add_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
@@ -1668,6 +1869,180 @@
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
+ if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
+ /*
+ * Detect whether bfqq's I/O seems synchronized with
+ * that of some other queue, i.e., whether bfqq, after
+ * remaining empty, happens to receive new I/O only
+ * right after some I/O request of the other queue has
+ * been completed. We call waker queue the other
+ * queue, and we assume, for simplicity, that bfqq may
+ * have at most one waker queue.
+ *
+ * A remarkable throughput boost can be reached by
+ * unconditionally injecting the I/O of the waker
+ * queue, every time a new bfq_dispatch_request
+ * happens to be invoked while I/O is being plugged
+ * for bfqq. In addition to boosting throughput, this
+ * unblocks bfqq's I/O, thereby improving bandwidth
+ * and latency for bfqq. Note that these same results
+ * may be achieved with the general injection
+ * mechanism, but less effectively. For details on
+ * this aspect, see the comments on the choice of the
+ * queue for injection in bfq_select_queue().
+ *
+ * Turning back to the detection of a waker queue, a
+ * queue Q is deemed as a waker queue for bfqq if, for
+ * two consecutive times, bfqq happens to become non
+ * empty right after a request of Q has been
+ * completed. In particular, on the first time, Q is
+ * tentatively set as a candidate waker queue, while
+ * on the second time, the flag
+ * bfq_bfqq_has_waker(bfqq) is set to confirm that Q
+ * is a waker queue for bfqq. These detection steps
+ * are performed only if bfqq has a long think time,
+ * so as to make it more likely that bfqq's I/O is
+ * actually being blocked by a synchronization. This
+ * last filter, plus the above two-times requirement,
+ * make false positives less likely.
+ *
+ * NOTE
+ *
+ * The sooner a waker queue is detected, the sooner
+ * throughput can be boosted by injecting I/O from the
+ * waker queue. Fortunately, detection is likely to be
+ * actually fast, for the following reasons. While
+ * blocked by synchronization, bfqq has a long think
+ * time. This implies that bfqq's inject limit is at
+ * least equal to 1 (see the comments in
+ * bfq_update_inject_limit()). So, thanks to
+ * injection, the waker queue is likely to be served
+ * during the very first I/O-plugging time interval
+ * for bfqq. This triggers the first step of the
+ * detection mechanism. Thanks again to injection, the
+ * candidate waker queue is then likely to be
+ * confirmed no later than during the next
+ * I/O-plugging interval for bfqq.
+ */
+ if (bfqd->last_completed_rq_bfqq &&
+ !bfq_bfqq_has_short_ttime(bfqq) &&
+ ktime_get_ns() - bfqd->last_completion <
+ 200 * NSEC_PER_USEC) {
+ if (bfqd->last_completed_rq_bfqq != bfqq &&
+ bfqd->last_completed_rq_bfqq !=
+ bfqq->waker_bfqq) {
+ /*
+ * First synchronization detected with
+ * a candidate waker queue, or with a
+ * different candidate waker queue
+ * from the current one.
+ */
+ bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
+
+ /*
+ * If the waker queue disappears, then
+ * bfqq->waker_bfqq must be reset. To
+ * this goal, we maintain in each
+ * waker queue a list, woken_list, of
+ * all the queues that reference the
+ * waker queue through their
+ * waker_bfqq pointer. When the waker
+ * queue exits, the waker_bfqq pointer
+ * of all the queues in the woken_list
+ * is reset.
+ *
+ * In addition, if bfqq is already in
+ * the woken_list of a waker queue,
+ * then, before being inserted into
+ * the woken_list of a new waker
+ * queue, bfqq must be removed from
+ * the woken_list of the old waker
+ * queue.
+ */
+ if (!hlist_unhashed(&bfqq->woken_list_node))
+ hlist_del_init(&bfqq->woken_list_node);
+ hlist_add_head(&bfqq->woken_list_node,
+ &bfqd->last_completed_rq_bfqq->woken_list);
+
+ bfq_clear_bfqq_has_waker(bfqq);
+ } else if (bfqd->last_completed_rq_bfqq ==
+ bfqq->waker_bfqq &&
+ !bfq_bfqq_has_waker(bfqq)) {
+ /*
+ * synchronization with waker_bfqq
+ * seen for the second time
+ */
+ bfq_mark_bfqq_has_waker(bfqq);
+ }
+ }
+
+ /*
+ * Periodically reset inject limit, to make sure that
+ * the latter eventually drops in case workload
+ * changes, see step (3) in the comments on
+ * bfq_update_inject_limit().
+ */
+ if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(1000)))
+ bfq_reset_inject_limit(bfqd, bfqq);
+
+ /*
+ * The following conditions must hold to setup a new
+ * sampling of total service time, and then a new
+ * update of the inject limit:
+ * - bfqq is in service, because the total service
+ * time is evaluated only for the I/O requests of
+ * the queues in service;
+ * - this is the right occasion to compute or to
+ * lower the baseline total service time, because
+ * there are actually no requests in the drive,
+ * or
+ * the baseline total service time is available, and
+ * this is the right occasion to compute the other
+ * quantity needed to update the inject limit, i.e.,
+ * the total service time caused by the amount of
+ * injection allowed by the current value of the
+ * limit. It is the right occasion because injection
+ * has actually been performed during the service
+ * hole, and there are still in-flight requests,
+ * which are very likely to be exactly the injected
+ * requests, or part of them;
+ * - the minimum interval for sampling the total
+ * service time and updating the inject limit has
+ * elapsed.
+ */
+ if (bfqq == bfqd->in_service_queue &&
+ (bfqd->rq_in_driver == 0 ||
+ (bfqq->last_serv_time_ns > 0 &&
+ bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
+ time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(10))) {
+ bfqd->last_empty_occupied_ns = ktime_get_ns();
+ /*
+ * Start the state machine for measuring the
+ * total service time of rq: setting
+ * wait_dispatch will cause bfqd->waited_rq to
+ * be set when rq will be dispatched.
+ */
+ bfqd->wait_dispatch = true;
+ /*
+ * If there is no I/O in service in the drive,
+ * then possible injection occurred before the
+ * arrival of rq will not affect the total
+ * service time of rq. So the injection limit
+ * must not be updated as a function of such
+ * total service time, unless new injection
+ * occurs before rq is completed. To have the
+ * injection limit updated only in the latter
+ * case, reset rqs_injected here (rqs_injected
+ * will be set in case injection is performed
+ * on bfqq before rq is completed).
+ */
+ if (bfqd->rq_in_driver == 0)
+ bfqd->rqs_injected = false;
+ }
+ }
+
elv_rb_add(&bfqq->sort_list, rq);
/*
@@ -1679,8 +2054,9 @@
/*
* Adjust priority tree position, if next_rq changes.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- if (prev != bfqq->next_rq)
+ if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
bfq_pos_tree_add_move(bfqd, bfqq);
if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
@@ -1820,7 +2196,9 @@
bfqq->pos_root = NULL;
}
} else {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /* see comments on bfq_pos_tree_add_move() for the unlikely() */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
if (rq->cmd_flags & REQ_META)
@@ -1828,7 +2206,8 @@
}
-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+ unsigned int nr_segs)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
@@ -1851,7 +2230,7 @@
bfqd->bio_bfqq = NULL;
bfqd->bio_bic = bic;
- ret = blk_mq_sched_try_merge(q, bio, &free);
+ ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
if (free)
blk_mq_free_request(free);
@@ -1886,9 +2265,14 @@
blk_rq_pos(container_of(rb_prev(&req->rb_node),
struct request, rb_node))) {
struct bfq_queue *bfqq = bfq_init_rq(req);
- struct bfq_data *bfqd = bfqq->bfqd;
+ struct bfq_data *bfqd;
struct request *prev, *next_rq;
+ if (!bfqq)
+ return;
+
+ bfqd = bfqq->bfqd;
+
/* Reposition request in its sort_list */
elv_rb_del(&bfqq->sort_list, req);
elv_rb_add(&bfqq->sort_list, req);
@@ -1905,7 +2289,12 @@
*/
if (prev != bfqq->next_rq) {
bfq_updated_next_req(bfqd, bfqq);
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /*
+ * See comments on bfq_pos_tree_add_move() for
+ * the unlikely().
+ */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
}
}
@@ -1930,6 +2319,9 @@
struct bfq_queue *bfqq = bfq_init_rq(rq),
*next_bfqq = bfq_init_rq(next);
+ if (!bfqq)
+ return;
+
/*
* If next and rq belong to the same bfq_queue and next is older
* than rq, then reposition rq in the fifo (by substituting next
@@ -2188,6 +2580,46 @@
struct bfq_queue *in_service_bfqq, *new_bfqq;
/*
+ * Do not perform queue merging if the device is non
+ * rotational and performs internal queueing. In fact, such a
+ * device reaches a high speed through internal parallelism
+ * and pipelining. This means that, to reach a high
+ * throughput, it must have many requests enqueued at the same
+ * time. But, in this configuration, the internal scheduling
+ * algorithm of the device does exactly the job of queue
+ * merging: it reorders requests so as to obtain as much as
+ * possible a sequential I/O pattern. As a consequence, with
+ * the workload generated by processes doing interleaved I/O,
+ * the throughput reached by the device is likely to be the
+ * same, with and without queue merging.
+ *
+ * Disabling merging also provides a remarkable benefit in
+ * terms of throughput. Merging tends to make many workloads
+ * artificially more uneven, because of shared queues
+ * remaining non empty for incomparably more time than
+ * non-merged queues. This may accentuate workload
+ * asymmetries. For example, if one of the queues in a set of
+ * merged queues has a higher weight than a normal queue, then
+ * the shared queue may inherit such a high weight and, by
+ * staying almost always active, may force BFQ to perform I/O
+ * plugging most of the time. This evidently makes it harder
+ * for BFQ to let the device reach a high throughput.
+ *
+ * Finally, the likely() macro below is not used because one
+ * of the two branches is more likely than the other, but to
+ * have the code path after the following if() executed as
+ * fast as possible for the case of a non rotational device
+ * with queueing. We want it because this is the fastest kind
+ * of device. On the opposite end, the likely() may lengthen
+ * the execution time of BFQ for the case of slower devices
+ * (rotational or at least without queueing). But in this case
+ * the execution time of BFQ matters very little, if not at
+ * all.
+ */
+ if (likely(bfqd->nonrot_with_queueing))
+ return NULL;
+
+ /*
* Prevent bfqq from being merged if it has been created too
* long ago. The idea is that true cooperating processes, and
* thus their associated bfq_queues, are supposed to be
@@ -2208,14 +2640,15 @@
return NULL;
/* If there is only one backlogged queue, don't search. */
- if (bfqd->busy_queues == 1)
+ if (bfq_tot_busy_queues(bfqd) == 1)
return NULL;
in_service_bfqq = bfqd->in_service_queue;
if (in_service_bfqq && in_service_bfqq != bfqq &&
likely(in_service_bfqq != &bfqd->oom_bfqq) &&
- bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ bfq_rq_close_to_sector(io_struct, request,
+ bfqd->in_serv_last_pos) &&
bfqq->entity.parent == in_service_bfqq->entity.parent &&
bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
@@ -2249,6 +2682,7 @@
if (!bic)
return;
+ bic->saved_weight = bfqq->entity.orig_weight;
bic->saved_ttime = bfqq->ttime;
bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
@@ -2267,6 +2701,7 @@
* to enjoy weight raising if split soon.
*/
bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
+ bic->saved_wr_start_at_switch_to_srt = bfq_smallest_from_now();
bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
bic->saved_last_wr_start_finish = jiffies;
} else {
@@ -2278,6 +2713,28 @@
}
}
+
+static
+void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{
+ /*
+ * To prevent bfqq's service guarantees from being violated,
+ * bfqq may be left busy, i.e., queued for service, even if
+ * empty (see comments in __bfq_bfqq_expire() for
+ * details). But, if no process will send requests to bfqq any
+ * longer, then there is no point in keeping bfqq queued for
+ * service. In addition, keeping bfqq queued for service, but
+ * with no process ref any longer, may have caused bfqq to be
+ * freed when dequeued from service. But this is assumed to
+ * never happen.
+ */
+ if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfqq != bfqd->in_service_queue)
+ bfq_del_bfqq_busy(bfqd, bfqq, false);
+
+ bfq_put_queue(bfqq);
+}
+
static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
@@ -2337,9 +2794,18 @@
* assignment causes no harm).
*/
new_bfqq->bic = NULL;
+ /*
+ * If the queue is shared, the pid is the pid of one of the associated
+ * processes. Which pid depends on the exact sequence of merge events
+ * the queue underwent. So printing such a pid is useless and confusing
+ * because it reports a random pid between those of the associated
+ * processes.
+ * We mark such a queue with a pid -1, and then print SHARED instead of
+ * a pid in logging messages.
+ */
+ new_bfqq->pid = -1;
bfqq->bic = NULL;
- /* release process reference to bfqq */
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqd, bfqq);
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@@ -2371,8 +2837,8 @@
/*
* bic still points to bfqq, then it has not yet been
* redirected to some other bfq_queue, and a queue
- * merge beween bfqq and new_bfqq can be safely
- * fulfillled, i.e., bic can be redirected to new_bfqq
+ * merge between bfqq and new_bfqq can be safely
+ * fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
@@ -2506,10 +2972,14 @@
* queue).
*/
if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- bfq_symmetric_scenario(bfqd))
+ !bfq_asymmetric_scenario(bfqd, bfqq))
sl = min_t(u64, sl, BFQ_MIN_TT);
+ else if (bfqq->wr_coeff > 1)
+ sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
bfqd->last_idling_start = ktime_get();
+ bfqd->last_idling_start_jiffies = jiffies;
+
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
HRTIMER_MODE_REL);
bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
@@ -2733,7 +3203,7 @@
if ((bfqd->rq_in_driver > 0 ||
now_ns - bfqd->last_completion < BFQ_MIN_TT)
- && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
+ && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
bfqd->sequential_samples++;
bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
@@ -2755,6 +3225,8 @@
bfq_update_rate_reset(bfqd, rq);
update_last_values:
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
+ if (RQ_BFQQ(rq) == bfqd->in_service_queue)
+ bfqd->in_serv_last_pos = bfqd->last_position;
bfqd->last_dispatch = now_ns;
}
@@ -2783,7 +3255,205 @@
bfq_remove_request(q, rq);
}
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+/*
+ * There is a case where idling does not have to be performed for
+ * throughput concerns, but to preserve the throughput share of
+ * the process associated with bfqq.
+ *
+ * To introduce this case, we can note that allowing the drive
+ * to enqueue more than one request at a time, and hence
+ * delegating de facto final scheduling decisions to the
+ * drive's internal scheduler, entails loss of control on the
+ * actual request service order. In particular, the critical
+ * situation is when requests from different processes happen
+ * to be present, at the same time, in the internal queue(s)
+ * of the drive. In such a situation, the drive, by deciding
+ * the service order of the internally-queued requests, does
+ * determine also the actual throughput distribution among
+ * these processes. But the drive typically has no notion or
+ * concern about per-process throughput distribution, and
+ * makes its decisions only on a per-request basis. Therefore,
+ * the service distribution enforced by the drive's internal
+ * scheduler is likely to coincide with the desired throughput
+ * distribution only in a completely symmetric, or favorably
+ * skewed scenario where:
+ * (i-a) each of these processes must get the same throughput as
+ * the others,
+ * (i-b) in case (i-a) does not hold, it holds that the process
+ * associated with bfqq must receive a lower or equal
+ * throughput than any of the other processes;
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on;
+
+ * In fact, in such a scenario, the drive tends to treat the requests
+ * of each process in about the same way as the requests of the
+ * others, and thus to provide each of these processes with about the
+ * same throughput. This is exactly the desired throughput
+ * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
+ * even more convenient distribution for (the process associated with)
+ * bfqq.
+ *
+ * In contrast, in any asymmetric or unfavorable scenario, device
+ * idling (I/O-dispatch plugging) is certainly needed to guarantee
+ * that bfqq receives its assigned fraction of the device throughput
+ * (see [1] for details).
+ *
+ * The problem is that idling may significantly reduce throughput with
+ * certain combinations of types of I/O and devices. An important
+ * example is sync random I/O on flash storage with command
+ * queueing. So, unless bfqq falls in cases where idling also boosts
+ * throughput, it is important to check conditions (i-a), i(-b) and
+ * (ii) accurately, so as to avoid idling when not strictly needed for
+ * service guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly check
+ * condition (ii). And, in case there are active groups, it becomes
+ * very difficult to check conditions (i-a) and (i-b) too. In fact,
+ * if there are active groups, then, for conditions (i-a) or (i-b) to
+ * become false 'indirectly', it is enough that an active group
+ * contains more active processes or sub-groups than some other active
+ * group. More precisely, for conditions (i-a) or (i-b) to become
+ * false because of such a group, it is not even necessary that the
+ * group is (still) active: it is sufficient that, even if the group
+ * has become inactive, some of its descendant processes still have
+ * some request already dispatched but still waiting for
+ * completion. In fact, requests have still to be guaranteed their
+ * share of the throughput even after being dispatched. In this
+ * respect, it is easy to show that, if a group frequently becomes
+ * inactive while still having in-flight requests, and if, when this
+ * happens, the group is not considered in the calculation of whether
+ * the scenario is asymmetric, then the group may fail to be
+ * guaranteed its fair share of the throughput (basically because
+ * idling may not be performed for the descendant processes of the
+ * group, but it had to be). We address this issue with the following
+ * bi-modal behavior, implemented in the function
+ * bfq_asymmetric_scenario().
+ *
+ * If there are groups with requests waiting for completion
+ * (as commented above, some of these groups may even be
+ * already inactive), then the scenario is tagged as
+ * asymmetric, conservatively, without checking any of the
+ * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
+ * This behavior matches also the fact that groups are created
+ * exactly if controlling I/O is a primary concern (to
+ * preserve bandwidth and latency guarantees).
+ *
+ * On the opposite end, if there are no groups with requests waiting
+ * for completion, then only conditions (i-a) and (i-b) are actually
+ * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
+ * idling is not performed, regardless of whether condition (ii)
+ * holds. In other words, only if conditions (i-a) and (i-b) do not
+ * hold, then idling is allowed, and the device tends to be prevented
+ * from queueing many requests, possibly of several processes. Since
+ * there are no groups with requests waiting for completion, then, to
+ * control conditions (i-a) and (i-b) it is enough to check just
+ * whether all the queues with requests waiting for completion also
+ * have the same weight.
+ *
+ * Not checking condition (ii) evidently exposes bfqq to the
+ * risk of getting less throughput than its fair share.
+ * However, for queues with the same weight, a further
+ * mechanism, preemption, mitigates or even eliminates this
+ * problem. And it does so without consequences on overall
+ * throughput. This mechanism and its benefits are explained
+ * in the next three paragraphs.
+ *
+ * Even if a queue, say Q, is expired when it remains idle, Q
+ * can still preempt the new in-service queue if the next
+ * request of Q arrives soon (see the comments on
+ * bfq_bfqq_update_budg_for_activation). If all queues and
+ * groups have the same weight, this form of preemption,
+ * combined with the hole-recovery heuristic described in the
+ * comments on function bfq_bfqq_update_budg_for_activation,
+ * are enough to preserve a correct bandwidth distribution in
+ * the mid term, even without idling. In fact, even if not
+ * idling allows the internal queues of the device to contain
+ * many requests, and thus to reorder requests, we can rather
+ * safely assume that the internal scheduler still preserves a
+ * minimum of mid-term fairness.
+ *
+ * More precisely, this preemption-based, idleless approach
+ * provides fairness in terms of IOPS, and not sectors per
+ * second. This can be seen with a simple example. Suppose
+ * that there are two queues with the same weight, but that
+ * the first queue receives requests of 8 sectors, while the
+ * second queue receives requests of 1024 sectors. In
+ * addition, suppose that each of the two queues contains at
+ * most one request at a time, which implies that each queue
+ * always remains idle after it is served. Finally, after
+ * remaining idle, each queue receives very quickly a new
+ * request. It follows that the two queues are served
+ * alternatively, preempting each other if needed. This
+ * implies that, although both queues have the same weight,
+ * the queue with large requests receives a service that is
+ * 1024/8 times as high as the service received by the other
+ * queue.
+ *
+ * The motivation for using preemption instead of idling (for
+ * queues with the same weight) is that, by not idling,
+ * service guarantees are preserved (completely or at least in
+ * part) without minimally sacrificing throughput. And, if
+ * there is no active group, then the primary expectation for
+ * this device is probably a high throughput.
+ *
+ * We are now left only with explaining the two sub-conditions in the
+ * additional compound condition that is checked below for deciding
+ * whether the scenario is asymmetric. To explain the first
+ * sub-condition, we need to add that the function
+ * bfq_asymmetric_scenario checks the weights of only
+ * non-weight-raised queues, for efficiency reasons (see comments on
+ * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
+ * is checked explicitly here. More precisely, the compound condition
+ * below takes into account also the fact that, even if bfqq is being
+ * weight-raised, the scenario is still symmetric if all queues with
+ * requests waiting for completion happen to be
+ * weight-raised. Actually, we should be even more precise here, and
+ * differentiate between interactive weight raising and soft real-time
+ * weight raising.
+ *
+ * The second sub-condition checked in the compound condition is
+ * whether there is a fair amount of already in-flight I/O not
+ * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
+ * following reason. The drive may decide to serve in-flight
+ * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
+ * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
+ * I/O-dispatching is not plugged, then, while bfqq remains empty, a
+ * basically uncontrolled amount of I/O from other queues may be
+ * dispatched too, possibly causing the service of bfqq's I/O to be
+ * delayed even longer in the drive. This problem gets more and more
+ * serious as the speed and the queue depth of the drive grow,
+ * because, as these two quantities grow, the probability to find no
+ * queue busy but many requests in flight grows too. By contrast,
+ * plugging I/O dispatching minimizes the delay induced by already
+ * in-flight I/O, and enables bfqq to recover the bandwidth it may
+ * lose because of this delay.
+ *
+ * As a side note, it is worth considering that the above
+ * device-idling countermeasures may however fail in the following
+ * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
+ * in a time period during which all symmetry sub-conditions hold, and
+ * therefore the device is allowed to enqueue many requests, but at
+ * some later point in time some sub-condition stops to hold, then it
+ * may become impossible to make requests be served in the desired
+ * order until all the requests already queued in the device have been
+ * served. The last sub-condition commented above somewhat mitigates
+ * this problem for weight-raised queues.
+ */
+static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ return (bfqq->wr_coeff > 1 &&
+ (bfqd->wr_busy_queues <
+ bfq_tot_busy_queues(bfqd) ||
+ bfqd->rq_in_driver >=
+ bfqq->dispatched + 4)) ||
+ bfq_asymmetric_scenario(bfqd, bfqq);
+}
+
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ enum bfqq_expiration reason)
{
/*
* If this bfqq is shared between multiple processes, check
@@ -2794,7 +3464,22 @@
if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
bfq_mark_bfqq_split_coop(bfqq);
- if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
+ /*
+ * Consider queues with a higher finish virtual time than
+ * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
+ * true, then bfqq's bandwidth would be violated if an
+ * uncontrolled amount of I/O from these queues were
+ * dispatched while bfqq is waiting for its new I/O to
+ * arrive. This is exactly what may happen if this is a forced
+ * expiration caused by a preemption attempt, and if bfqq is
+ * not re-scheduled. To prevent this from happening, re-queue
+ * bfqq if it needs I/O-dispatch plugging, even if it is
+ * empty. By doing so, bfqq is granted to be served before the
+ * above queues (provided that bfqq is of course eligible).
+ */
+ if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ !(reason == BFQQE_PREEMPTED &&
+ idling_needed_for_service_guarantees(bfqd, bfqq))) {
if (bfqq->dispatched == 0)
/*
* Overloading budget_timeout field to store
@@ -2809,16 +3494,21 @@
bfq_requeue_bfqq(bfqd, bfqq, true);
/*
* Resort priority tree of potential close cooperators.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing &&
+ !RB_EMPTY_ROOT(&bfqq->sort_list)))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
/*
* All in-service entities must have been properly deactivated
* or requeued before executing the next function, which
- * resets all in-service entites as no more in service.
+ * resets all in-service entities as no more in service. This
+ * may cause bfqq to be freed. If this happens, the next
+ * function returns true.
*/
- __bfq_bfqd_reset_in_service(bfqd);
+ return __bfq_bfqd_reset_in_service(bfqd);
}
/**
@@ -3216,7 +3906,6 @@
bool slow;
unsigned long delta = 0;
struct bfq_entity *entity = &bfqq->entity;
- int ref;
/*
* Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3258,16 +3947,32 @@
* requests, then the request pattern is isochronous
* (see the comments on the function
* bfq_bfqq_softrt_next_start()). Thus we can compute
- * soft_rt_next_start. If, instead, the queue still
- * has outstanding requests, then we have to wait for
- * the completion of all the outstanding requests to
- * discover whether the request pattern is actually
- * isochronous.
+ * soft_rt_next_start. And we do it, unless bfqq is in
+ * interactive weight raising. We do not do it in the
+ * latter subcase, for the following reason. bfqq may
+ * be conveying the I/O needed to load a soft
+ * real-time application. Such an application will
+ * actually exhibit a soft real-time I/O pattern after
+ * it finally starts doing its job. But, if
+ * soft_rt_next_start is computed here for an
+ * interactive bfqq, and bfqq had received a lot of
+ * service before remaining with no outstanding
+ * request (likely to happen on a fast device), then
+ * soft_rt_next_start would be assigned such a high
+ * value that, for a very long time, bfqq would be
+ * prevented from being possibly considered as soft
+ * real time.
+ *
+ * If, instead, the queue still has outstanding
+ * requests, then we have to wait for the completion
+ * of all the outstanding requests to discover whether
+ * the request pattern is actually isochronous.
*/
- if (bfqq->dispatched == 0)
+ if (bfqq->dispatched == 0 &&
+ bfqq->wr_coeff != bfqd->bfq_wr_coeff)
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
- else {
+ else if (bfqq->dispatched > 0) {
/*
* Schedule an update of soft_rt_next_start to when
* the task may be discovered to be isochronous.
@@ -3281,14 +3986,20 @@
slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
/*
+ * bfqq expired, so no total service time needs to be computed
+ * any longer: reset state machine for measuring total service
+ * times.
+ */
+ bfqd->rqs_injected = bfqd->wait_dispatch = false;
+ bfqd->waited_rq = NULL;
+
+ /*
* Increase, decrease or leave budget unchanged according to
* reason.
*/
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
- ref = bfqq->ref;
- __bfq_bfqq_expire(bfqd, bfqq);
-
- if (ref == 1) /* bfqq is gone, no more actions on it */
+ if (__bfq_bfqq_expire(bfqd, bfqq, reason))
+ /* bfqq is gone, no more actions on it */
return;
/* mark bfqq as waiting a request only if a bic still points to it */
@@ -3358,53 +4069,13 @@
bfq_bfqq_budget_timeout(bfqq);
}
-/*
- * For a queue that becomes empty, device idling is allowed only if
- * this function returns true for the queue. As a consequence, since
- * device idling plays a critical role in both throughput boosting and
- * service guarantees, the return value of this function plays a
- * critical role in both these aspects as well.
- *
- * In a nutshell, this function returns true only if idling is
- * beneficial for throughput or, even if detrimental for throughput,
- * idling is however necessary to preserve service guarantees (low
- * latency, desired throughput distribution, ...). In particular, on
- * NCQ-capable devices, this function tries to return false, so as to
- * help keep the drives' internal queues full, whenever this helps the
- * device boost the throughput without causing any service-guarantee
- * issue.
- *
- * In more detail, the return value of this function is obtained by,
- * first, computing a number of boolean variables that take into
- * account throughput and service-guarantee issues, and, then,
- * combining these variables in a logical expression. Most of the
- * issues taken into account are not trivial. We discuss these issues
- * individually while introducing the variables.
- */
-static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
{
- struct bfq_data *bfqd = bfqq->bfqd;
bool rot_without_queueing =
!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
bfqq_sequential_and_IO_bound,
- idling_boosts_thr, idling_boosts_thr_without_issues,
- idling_needed_for_service_guarantees,
- asymmetric_scenario;
-
- if (bfqd->strict_guarantees)
- return true;
-
- /*
- * Idling is performed only if slice_idle > 0. In addition, we
- * do not idle if
- * (a) bfqq is async
- * (b) bfqq is in the idle io prio class: in this case we do
- * not idle because we want to minimize the bandwidth that
- * queues in this class can steal to higher-priority queues
- */
- if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
- bfq_class_idle(bfqq))
- return false;
+ idling_boosts_thr;
bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
@@ -3436,8 +4107,7 @@
bfqq_sequential_and_IO_bound);
/*
- * The value of the next variable,
- * idling_boosts_thr_without_issues, is equal to that of
+ * The return value of this function is equal to that of
* idling_boosts_thr, unless a special case holds. In this
* special case, described below, idling may cause problems to
* weight-raised queues.
@@ -3454,163 +4124,81 @@
* which enqueue several requests in advance, and further
* reorder internally-queued requests.
*
- * For this reason, we force to false the value of
- * idling_boosts_thr_without_issues if there are weight-raised
- * busy queues. In this case, and if bfqq is not weight-raised,
- * this guarantees that the device is not idled for bfqq (if,
- * instead, bfqq is weight-raised, then idling will be
- * guaranteed by another variable, see below). Combined with
- * the timestamping rules of BFQ (see [1] for details), this
- * behavior causes bfqq, and hence any sync non-weight-raised
- * queue, to get a lower number of requests served, and thus
- * to ask for a lower number of requests from the request
- * pool, before the busy weight-raised queues get served
- * again. This often mitigates starvation problems in the
- * presence of heavy write workloads and NCQ, thereby
- * guaranteeing a higher application and system responsiveness
- * in these hostile scenarios.
+ * For this reason, we force to false the return value if
+ * there are weight-raised busy queues. In this case, and if
+ * bfqq is not weight-raised, this guarantees that the device
+ * is not idled for bfqq (if, instead, bfqq is weight-raised,
+ * then idling will be guaranteed by another variable, see
+ * below). Combined with the timestamping rules of BFQ (see
+ * [1] for details), this behavior causes bfqq, and hence any
+ * sync non-weight-raised queue, to get a lower number of
+ * requests served, and thus to ask for a lower number of
+ * requests from the request pool, before the busy
+ * weight-raised queues get served again. This often mitigates
+ * starvation problems in the presence of heavy write
+ * workloads and NCQ, thereby guaranteeing a higher
+ * application and system responsiveness in these hostile
+ * scenarios.
*/
- idling_boosts_thr_without_issues = idling_boosts_thr &&
+ return idling_boosts_thr &&
bfqd->wr_busy_queues == 0;
+}
+
+/*
+ * For a queue that becomes empty, device idling is allowed only if
+ * this function returns true for that queue. As a consequence, since
+ * device idling plays a critical role for both throughput boosting
+ * and service guarantees, the return value of this function plays a
+ * critical role as well.
+ *
+ * In a nutshell, this function returns true only if idling is
+ * beneficial for throughput or, even if detrimental for throughput,
+ * idling is however necessary to preserve service guarantees (low
+ * latency, desired throughput distribution, ...). In particular, on
+ * NCQ-capable devices, this function tries to return false, so as to
+ * help keep the drives' internal queues full, whenever this helps the
+ * device boost the throughput without causing any service-guarantee
+ * issue.
+ *
+ * Most of the issues taken into account to get the return value of
+ * this function are not trivial. We discuss these issues in the two
+ * functions providing the main pieces of information needed by this
+ * function.
+ */
+static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+{
+ struct bfq_data *bfqd = bfqq->bfqd;
+ bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
+
+ if (unlikely(bfqd->strict_guarantees))
+ return true;
/*
- * There is then a case where idling must be performed not
- * for throughput concerns, but to preserve service
- * guarantees.
- *
- * To introduce this case, we can note that allowing the drive
- * to enqueue more than one request at a time, and hence
- * delegating de facto final scheduling decisions to the
- * drive's internal scheduler, entails loss of control on the
- * actual request service order. In particular, the critical
- * situation is when requests from different processes happen
- * to be present, at the same time, in the internal queue(s)
- * of the drive. In such a situation, the drive, by deciding
- * the service order of the internally-queued requests, does
- * determine also the actual throughput distribution among
- * these processes. But the drive typically has no notion or
- * concern about per-process throughput distribution, and
- * makes its decisions only on a per-request basis. Therefore,
- * the service distribution enforced by the drive's internal
- * scheduler is likely to coincide with the desired
- * device-throughput distribution only in a completely
- * symmetric scenario where:
- * (i) each of these processes must get the same throughput as
- * the others;
- * (ii) all these processes have the same I/O pattern
- (either sequential or random).
- * In fact, in such a scenario, the drive will tend to treat
- * the requests of each of these processes in about the same
- * way as the requests of the others, and thus to provide
- * each of these processes with about the same throughput
- * (which is exactly the desired throughput distribution). In
- * contrast, in any asymmetric scenario, device idling is
- * certainly needed to guarantee that bfqq receives its
- * assigned fraction of the device throughput (see [1] for
- * details).
- *
- * We address this issue by controlling, actually, only the
- * symmetry sub-condition (i), i.e., provided that
- * sub-condition (i) holds, idling is not performed,
- * regardless of whether sub-condition (ii) holds. In other
- * words, only if sub-condition (i) holds, then idling is
- * allowed, and the device tends to be prevented from queueing
- * many requests, possibly of several processes. The reason
- * for not controlling also sub-condition (ii) is that we
- * exploit preemption to preserve guarantees in case of
- * symmetric scenarios, even if (ii) does not hold, as
- * explained in the next two paragraphs.
- *
- * Even if a queue, say Q, is expired when it remains idle, Q
- * can still preempt the new in-service queue if the next
- * request of Q arrives soon (see the comments on
- * bfq_bfqq_update_budg_for_activation). If all queues and
- * groups have the same weight, this form of preemption,
- * combined with the hole-recovery heuristic described in the
- * comments on function bfq_bfqq_update_budg_for_activation,
- * are enough to preserve a correct bandwidth distribution in
- * the mid term, even without idling. In fact, even if not
- * idling allows the internal queues of the device to contain
- * many requests, and thus to reorder requests, we can rather
- * safely assume that the internal scheduler still preserves a
- * minimum of mid-term fairness. The motivation for using
- * preemption instead of idling is that, by not idling,
- * service guarantees are preserved without minimally
- * sacrificing throughput. In other words, both a high
- * throughput and its desired distribution are obtained.
- *
- * More precisely, this preemption-based, idleless approach
- * provides fairness in terms of IOPS, and not sectors per
- * second. This can be seen with a simple example. Suppose
- * that there are two queues with the same weight, but that
- * the first queue receives requests of 8 sectors, while the
- * second queue receives requests of 1024 sectors. In
- * addition, suppose that each of the two queues contains at
- * most one request at a time, which implies that each queue
- * always remains idle after it is served. Finally, after
- * remaining idle, each queue receives very quickly a new
- * request. It follows that the two queues are served
- * alternatively, preempting each other if needed. This
- * implies that, although both queues have the same weight,
- * the queue with large requests receives a service that is
- * 1024/8 times as high as the service received by the other
- * queue.
- *
- * On the other hand, device idling is performed, and thus
- * pure sector-domain guarantees are provided, for the
- * following queues, which are likely to need stronger
- * throughput guarantees: weight-raised queues, and queues
- * with a higher weight than other queues. When such queues
- * are active, sub-condition (i) is false, which triggers
- * device idling.
- *
- * According to the above considerations, the next variable is
- * true (only) if sub-condition (i) holds. To compute the
- * value of this variable, we not only use the return value of
- * the function bfq_symmetric_scenario(), but also check
- * whether bfqq is being weight-raised, because
- * bfq_symmetric_scenario() does not take into account also
- * weight-raised queues (see comments on
- * bfq_weights_tree_add()).
- *
- * As a side note, it is worth considering that the above
- * device-idling countermeasures may however fail in the
- * following unlucky scenario: if idling is (correctly)
- * disabled in a time period during which all symmetry
- * sub-conditions hold, and hence the device is allowed to
- * enqueue many requests, but at some later point in time some
- * sub-condition stops to hold, then it may become impossible
- * to let requests be served in the desired order until all
- * the requests already queued in the device have been served.
+ * Idling is performed only if slice_idle > 0. In addition, we
+ * do not idle if
+ * (a) bfqq is async
+ * (b) bfqq is in the idle io prio class: in this case we do
+ * not idle because we want to minimize the bandwidth that
+ * queues in this class can steal to higher-priority queues
*/
- asymmetric_scenario = bfqq->wr_coeff > 1 ||
- !bfq_symmetric_scenario(bfqd);
+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
+ bfq_class_idle(bfqq))
+ return false;
+
+ idling_boosts_thr_with_no_issue =
+ idling_boosts_thr_without_issues(bfqd, bfqq);
+
+ idling_needed_for_service_guar =
+ idling_needed_for_service_guarantees(bfqd, bfqq);
/*
- * Finally, there is a case where maximizing throughput is the
- * best choice even if it may cause unfairness toward
- * bfqq. Such a case is when bfqq became active in a burst of
- * queue activations. Queues that became active during a large
- * burst benefit only from throughput, as discussed in the
- * comments on bfq_handle_burst. Thus, if bfqq became active
- * in a burst and not idling the device maximizes throughput,
- * then the device must no be idled, because not idling the
- * device provides bfqq and all other queues in the burst with
- * maximum benefit. Combining this and the above case, we can
- * now establish when idling is actually needed to preserve
- * service guarantees.
- */
- idling_needed_for_service_guarantees =
- asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
-
- /*
- * We have now all the components we need to compute the
+ * We have now the two components we need to compute the
* return value of the function, which is true only if idling
* either boosts the throughput (without issues), or is
* necessary to preserve service guarantees.
*/
- return idling_boosts_thr_without_issues ||
- idling_needed_for_service_guarantees;
+ return idling_boosts_thr_with_no_issue ||
+ idling_needed_for_service_guar;
}
/*
@@ -3630,6 +4218,102 @@
}
/*
+ * This function chooses the queue from which to pick the next extra
+ * I/O request to inject, if it finds a compatible queue. See the
+ * comments on bfq_update_inject_limit() for details on the injection
+ * mechanism, and for the definitions of the quantities mentioned
+ * below.
+ */
+static struct bfq_queue *
+bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
+{
+ struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
+ unsigned int limit = in_serv_bfqq->inject_limit;
+ /*
+ * If
+ * - bfqq is not weight-raised and therefore does not carry
+ * time-critical I/O,
+ * or
+ * - regardless of whether bfqq is weight-raised, bfqq has
+ * however a long think time, during which it can absorb the
+ * effect of an appropriate number of extra I/O requests
+ * from other queues (see bfq_update_inject_limit for
+ * details on the computation of this number);
+ * then injection can be performed without restrictions.
+ */
+ bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
+ !bfq_bfqq_has_short_ttime(in_serv_bfqq);
+
+ /*
+ * If
+ * - the baseline total service time could not be sampled yet,
+ * so the inject limit happens to be still 0, and
+ * - a lot of time has elapsed since the plugging of I/O
+ * dispatching started, so drive speed is being wasted
+ * significantly;
+ * then temporarily raise inject limit to one request.
+ */
+ if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
+ bfq_bfqq_wait_request(in_serv_bfqq) &&
+ time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
+ bfqd->bfq_slice_idle)
+ )
+ limit = 1;
+
+ if (bfqd->rq_in_driver >= limit)
+ return NULL;
+
+ /*
+ * Linear search of the source queue for injection; but, with
+ * a high probability, very few steps are needed to find a
+ * candidate queue, i.e., a queue with enough budget left for
+ * its next request. In fact:
+ * - BFQ dynamically updates the budget of every queue so as
+ * to accommodate the expected backlog of the queue;
+ * - if a queue gets all its requests dispatched as injected
+ * service, then the queue is removed from the active list
+ * (and re-added only if it gets new requests, but then it
+ * is assigned again enough budget for its new backlog).
+ */
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
+ if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ (in_serv_always_inject || bfqq->wr_coeff > 1) &&
+ bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
+ bfq_bfqq_budget_left(bfqq)) {
+ /*
+ * Allow for only one large in-flight request
+ * on non-rotational devices, for the
+ * following reason. On non-rotationl drives,
+ * large requests take much longer than
+ * smaller requests to be served. In addition,
+ * the drive prefers to serve large requests
+ * w.r.t. to small ones, if it can choose. So,
+ * having more than one large requests queued
+ * in the drive may easily make the next first
+ * request of the in-service queue wait for so
+ * long to break bfqq's service guarantees. On
+ * the bright side, large requests let the
+ * drive reach a very high throughput, even if
+ * there is only one in-flight large request
+ * at a time.
+ */
+ if (blk_queue_nonrot(bfqd->queue) &&
+ blk_rq_sectors(bfqq->next_rq) >=
+ BFQQ_SECT_THR_NONROT)
+ limit = min_t(unsigned int, 1, limit);
+ else
+ limit = in_serv_bfqq->inject_limit;
+
+ if (bfqd->rq_in_driver < limit) {
+ bfqd->rqs_injected = true;
+ return bfqq;
+ }
+ }
+
+ return NULL;
+}
+
+/*
* Select a queue for service. If we have a current queue in service,
* check whether to continue servicing it, or retrieve and set a new one.
*/
@@ -3710,10 +4394,110 @@
* No requests pending. However, if the in-service queue is idling
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
+ *
+ * Yet, inject service from other queues if it boosts
+ * throughput and is possible.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
- bfqq = NULL;
+ struct bfq_queue *async_bfqq =
+ bfqq->bic && bfqq->bic->bfqq[0] &&
+ bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
+ bfqq->bic->bfqq[0]->next_rq ?
+ bfqq->bic->bfqq[0] : NULL;
+
+ /*
+ * The next three mutually-exclusive ifs decide
+ * whether to try injection, and choose the queue to
+ * pick an I/O request from.
+ *
+ * The first if checks whether the process associated
+ * with bfqq has also async I/O pending. If so, it
+ * injects such I/O unconditionally. Injecting async
+ * I/O from the same process can cause no harm to the
+ * process. On the contrary, it can only increase
+ * bandwidth and reduce latency for the process.
+ *
+ * The second if checks whether there happens to be a
+ * non-empty waker queue for bfqq, i.e., a queue whose
+ * I/O needs to be completed for bfqq to receive new
+ * I/O. This happens, e.g., if bfqq is associated with
+ * a process that does some sync. A sync generates
+ * extra blocking I/O, which must be completed before
+ * the process associated with bfqq can go on with its
+ * I/O. If the I/O of the waker queue is not served,
+ * then bfqq remains empty, and no I/O is dispatched,
+ * until the idle timeout fires for bfqq. This is
+ * likely to result in lower bandwidth and higher
+ * latencies for bfqq, and in a severe loss of total
+ * throughput. The best action to take is therefore to
+ * serve the waker queue as soon as possible. So do it
+ * (without relying on the third alternative below for
+ * eventually serving waker_bfqq's I/O; see the last
+ * paragraph for further details). This systematic
+ * injection of I/O from the waker queue does not
+ * cause any delay to bfqq's I/O. On the contrary,
+ * next bfqq's I/O is brought forward dramatically,
+ * for it is not blocked for milliseconds.
+ *
+ * The third if checks whether bfqq is a queue for
+ * which it is better to avoid injection. It is so if
+ * bfqq delivers more throughput when served without
+ * any further I/O from other queues in the middle, or
+ * if the service times of bfqq's I/O requests both
+ * count more than overall throughput, and may be
+ * easily increased by injection (this happens if bfqq
+ * has a short think time). If none of these
+ * conditions holds, then a candidate queue for
+ * injection is looked for through
+ * bfq_choose_bfqq_for_injection(). Note that the
+ * latter may return NULL (for example if the inject
+ * limit for bfqq is currently 0).
+ *
+ * NOTE: motivation for the second alternative
+ *
+ * Thanks to the way the inject limit is updated in
+ * bfq_update_has_short_ttime(), it is rather likely
+ * that, if I/O is being plugged for bfqq and the
+ * waker queue has pending I/O requests that are
+ * blocking bfqq's I/O, then the third alternative
+ * above lets the waker queue get served before the
+ * I/O-plugging timeout fires. So one may deem the
+ * second alternative superfluous. It is not, because
+ * the third alternative may be way less effective in
+ * case of a synchronization. For two main
+ * reasons. First, throughput may be low because the
+ * inject limit may be too low to guarantee the same
+ * amount of injected I/O, from the waker queue or
+ * other queues, that the second alternative
+ * guarantees (the second alternative unconditionally
+ * injects a pending I/O request of the waker queue
+ * for each bfq_dispatch_request()). Second, with the
+ * third alternative, the duration of the plugging,
+ * i.e., the time before bfqq finally receives new I/O,
+ * may not be minimized, because the waker queue may
+ * happen to be served only after other queues.
+ */
+ if (async_bfqq &&
+ icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
+ bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
+ bfq_bfqq_budget_left(async_bfqq))
+ bfqq = bfqq->bic->bfqq[0];
+ else if (bfq_bfqq_has_waker(bfqq) &&
+ bfq_bfqq_busy(bfqq->waker_bfqq) &&
+ bfqq->next_rq &&
+ bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
+ bfqq->waker_bfqq) <=
+ bfq_bfqq_budget_left(bfqq->waker_bfqq)
+ )
+ bfqq = bfqq->waker_bfqq;
+ else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
+ (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
+ !bfq_bfqq_has_short_ttime(bfqq)))
+ bfqq = bfq_choose_bfqq_for_injection(bfqd);
+ else
+ bfqq = NULL;
+
goto keep_queue;
}
@@ -3801,8 +4585,16 @@
bfq_bfqq_served(bfqq, service_to_charge);
+ if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
+ bfqd->wait_dispatch = false;
+ bfqd->waited_rq = rq;
+ }
+
bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq != bfqd->in_service_queue)
+ goto return_rq;
+
/*
* If weight raising has to terminate for bfqq, then next
* function causes an immediate update of bfqq's weight,
@@ -3821,13 +4613,12 @@
* belongs to CLASS_IDLE and other queues are waiting for
* service.
*/
- if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
- goto expire;
+ if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
+ goto return_rq;
- return rq;
-
-expire:
bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
+
+return_rq:
return rq;
}
@@ -3840,7 +4631,7 @@
* most a call to dispatch for nothing
*/
return !list_empty_careful(&bfqd->dispatch) ||
- bfqd->busy_queues > 0;
+ bfq_tot_busy_queues(bfqd) > 0;
}
static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
@@ -3894,9 +4685,10 @@
goto start_rq;
}
- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
+ bfq_log(bfqd, "dispatch requests: %d busy queues",
+ bfq_tot_busy_queues(bfqd));
- if (bfqd->busy_queues == 0)
+ if (bfq_tot_busy_queues(bfqd) == 0)
goto exit;
/*
@@ -3930,7 +4722,7 @@
return rq;
}
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
static void bfq_update_dispatch_stats(struct request_queue *q,
struct request *rq,
struct bfq_queue *in_serv_queue,
@@ -3954,7 +4746,7 @@
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (idle_timer_disabled)
/*
* Since the idle timer has been disabled,
@@ -3973,14 +4765,14 @@
bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_dispatch_stats(struct request_queue *q,
struct request *rq,
struct bfq_queue *in_serv_queue,
bool idle_timer_disabled) {}
-#endif
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
@@ -4016,6 +4808,8 @@
*/
void bfq_put_queue(struct bfq_queue *bfqq)
{
+ struct bfq_queue *item;
+ struct hlist_node *n;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqg = bfqq_group(bfqq);
#endif
@@ -4060,6 +4854,36 @@
bfqq->bfqd->burst_size--;
}
+ /*
+ * bfqq does not exist any longer, so it cannot be woken by
+ * any other queue, and cannot wake any other queue. Then bfqq
+ * must be removed from the woken list of its possible waker
+ * queue, and all queues in the woken list of bfqq must stop
+ * having a waker queue. Strictly speaking, these updates
+ * should be performed when bfqq remains with no I/O source
+ * attached to it, which happens before bfqq gets freed. In
+ * particular, this happens when the last process associated
+ * with bfqq exits or gets associated with a different
+ * queue. However, both events lead to bfqq being freed soon,
+ * and dangling references would come out only after bfqq gets
+ * freed. So these updates are done here, as a simple and safe
+ * way to handle all cases.
+ */
+ /* remove bfqq from woken list */
+ if (!hlist_unhashed(&bfqq->woken_list_node))
+ hlist_del_init(&bfqq->woken_list_node);
+
+ /* reset waker for all queues in woken list */
+ hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
+ woken_list_node) {
+ item->waker_bfqq = NULL;
+ bfq_clear_bfqq_has_waker(item);
+ hlist_del_init(&item->woken_list_node);
+ }
+
+ if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
+ bfqq->bfqd->last_completed_rq_bfqq = NULL;
+
kmem_cache_free(bfq_pool, bfqq);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
bfqg_and_blkg_put(bfqg);
@@ -4088,7 +4912,7 @@
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
if (bfqq == bfqd->in_service_queue) {
- __bfq_bfqq_expire(bfqd, bfqq);
+ __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
bfq_schedule_dispatch(bfqd);
}
@@ -4096,7 +4920,7 @@
bfq_put_cooperator(bfqq);
- bfq_put_queue(bfqq); /* release process reference */
+ bfq_release_process_ref(bfqd, bfqq);
}
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
@@ -4111,6 +4935,7 @@
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
+ bfqq->bic = NULL;
bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync);
spin_unlock_irqrestore(&bfqd->lock, flags);
@@ -4197,8 +5022,7 @@
bfqq = bic_to_bfqq(bic, false);
if (bfqq) {
- /* release process reference on this queue */
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqd, bfqq);
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bic_set_bfqq(bic, bfqq, false);
}
@@ -4214,6 +5038,8 @@
RB_CLEAR_NODE(&bfqq->entity.rb_node);
INIT_LIST_HEAD(&bfqq->fifo);
INIT_HLIST_NODE(&bfqq->burst_list_node);
+ INIT_HLIST_NODE(&bfqq->woken_list_node);
+ INIT_HLIST_HEAD(&bfqq->woken_list);
bfqq->ref = 0;
bfqq->bfqd = bfqd;
@@ -4297,7 +5123,7 @@
rcu_read_lock();
- bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
+ bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
@@ -4369,17 +5195,19 @@
struct request *rq)
{
bfqq->seek_history <<= 1;
- bfqq->seek_history |=
- get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
- (!blk_queue_nonrot(bfqd->queue) ||
- blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
+ bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
+
+ if (bfqq->wr_coeff > 1 &&
+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
+ BFQQ_TOTALLY_SEEKY(bfqq))
+ bfq_bfqq_end_wr(bfqq);
}
static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct bfq_io_cq *bic)
{
- bool has_short_ttime = true;
+ bool has_short_ttime = true, state_changed;
/*
* No need to update has_short_ttime if bfqq is async or in
@@ -4404,13 +5232,102 @@
bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
has_short_ttime = false;
- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
- has_short_ttime);
+ state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
if (has_short_ttime)
bfq_mark_bfqq_has_short_ttime(bfqq);
else
bfq_clear_bfqq_has_short_ttime(bfqq);
+
+ /*
+ * Until the base value for the total service time gets
+ * finally computed for bfqq, the inject limit does depend on
+ * the think-time state (short|long). In particular, the limit
+ * is 0 or 1 if the think time is deemed, respectively, as
+ * short or long (details in the comments in
+ * bfq_update_inject_limit()). Accordingly, the next
+ * instructions reset the inject limit if the think-time state
+ * has changed and the above base value is still to be
+ * computed.
+ *
+ * However, the reset is performed only if more than 100 ms
+ * have elapsed since the last update of the inject limit, or
+ * (inclusive) if the change is from short to long think
+ * time. The reason for this waiting is as follows.
+ *
+ * bfqq may have a long think time because of a
+ * synchronization with some other queue, i.e., because the
+ * I/O of some other queue may need to be completed for bfqq
+ * to receive new I/O. Details in the comments on the choice
+ * of the queue for injection in bfq_select_queue().
+ *
+ * As stressed in those comments, if such a synchronization is
+ * actually in place, then, without injection on bfqq, the
+ * blocking I/O cannot happen to served while bfqq is in
+ * service. As a consequence, if bfqq is granted
+ * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
+ * is dispatched, until the idle timeout fires. This is likely
+ * to result in lower bandwidth and higher latencies for bfqq,
+ * and in a severe loss of total throughput.
+ *
+ * On the opposite end, a non-zero inject limit may allow the
+ * I/O that blocks bfqq to be executed soon, and therefore
+ * bfqq to receive new I/O soon.
+ *
+ * But, if the blocking gets actually eliminated, then the
+ * next think-time sample for bfqq may be very low. This in
+ * turn may cause bfqq's think time to be deemed
+ * short. Without the 100 ms barrier, this new state change
+ * would cause the body of the next if to be executed
+ * immediately. But this would set to 0 the inject
+ * limit. Without injection, the blocking I/O would cause the
+ * think time of bfqq to become long again, and therefore the
+ * inject limit to be raised again, and so on. The only effect
+ * of such a steady oscillation between the two think-time
+ * states would be to prevent effective injection on bfqq.
+ *
+ * In contrast, if the inject limit is not reset during such a
+ * long time interval as 100 ms, then the number of short
+ * think time samples can grow significantly before the reset
+ * is performed. As a consequence, the think time state can
+ * become stable before the reset. Therefore there will be no
+ * state change when the 100 ms elapse, and no reset of the
+ * inject limit. The inject limit remains steadily equal to 1
+ * both during and after the 100 ms. So injection can be
+ * performed at all times, and throughput gets boosted.
+ *
+ * An inject limit equal to 1 is however in conflict, in
+ * general, with the fact that the think time of bfqq is
+ * short, because injection may be likely to delay bfqq's I/O
+ * (as explained in the comments in
+ * bfq_update_inject_limit()). But this does not happen in
+ * this special case, because bfqq's low think time is due to
+ * an effective handling of a synchronization, through
+ * injection. In this special case, bfqq's I/O does not get
+ * delayed by injection; on the contrary, bfqq's I/O is
+ * brought forward, because it is not blocked for
+ * milliseconds.
+ *
+ * In addition, serving the blocking I/O much sooner, and much
+ * more frequently than once per I/O-plugging timeout, makes
+ * it much quicker to detect a waker queue (the concept of
+ * waker queue is defined in the comments in
+ * bfq_add_request()). This makes it possible to start sooner
+ * to boost throughput more effectively, by injecting the I/O
+ * of the waker queue unconditionally on every
+ * bfq_dispatch_request().
+ *
+ * One last, important benefit of not resetting the inject
+ * limit before 100 ms is that, during this time interval, the
+ * base value for the total service time is likely to get
+ * finally computed for bfqq, freeing the inject limit from
+ * its relation with the think time.
+ */
+ if (state_changed && bfqq->last_serv_time_ns == 0 &&
+ (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(100)) ||
+ !has_short_ttime))
+ bfq_reset_inject_limit(bfqd, bfqq);
}
/*
@@ -4420,19 +5337,9 @@
static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct request *rq)
{
- struct bfq_io_cq *bic = RQ_BIC(rq);
-
if (rq->cmd_flags & REQ_META)
bfqq->meta_pending++;
- bfq_update_io_thinktime(bfqd, bfqq);
- bfq_update_has_short_ttime(bfqd, bfqq, bic);
- bfq_update_io_seektime(bfqd, bfqq, rq);
-
- bfq_log_bfqq(bfqd, bfqq,
- "rq_enqueued: has_short_ttime=%d (seeky %d)",
- bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
-
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
@@ -4441,28 +5348,31 @@
bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
/*
- * There is just this request queued: if the request
- * is small and the queue is not to be expired, then
- * just exit.
+ * There is just this request queued: if
+ * - the request is small, and
+ * - we are idling to boost throughput, and
+ * - the queue is not to be expired,
+ * then just exit.
*
* In this way, if the device is being idled to wait
* for a new request from the in-service queue, we
* avoid unplugging the device and committing the
- * device to serve just a small request. On the
- * contrary, we wait for the block layer to decide
- * when to unplug the device: hopefully, new requests
- * will be merged to this one quickly, then the device
- * will be unplugged and larger requests will be
- * dispatched.
+ * device to serve just a small request. In contrast
+ * we wait for the block layer to decide when to
+ * unplug the device: hopefully, new requests will be
+ * merged to this one quickly, then the device will be
+ * unplugged and larger requests will be dispatched.
*/
- if (small_req && !budget_timeout)
+ if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
+ !budget_timeout)
return;
/*
- * A large enough request arrived, or the queue is to
- * be expired: in both cases disk idling is to be
- * stopped, so clear wait_request flag and reset
- * timer.
+ * A large enough request arrived, or idling is being
+ * performed to preserve service guarantees, or
+ * finally the queue is to be expired: in all these
+ * cases disk idling is to be stopped, so clear
+ * wait_request flag and reset timer.
*/
bfq_clear_bfqq_wait_request(bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
@@ -4488,8 +5398,6 @@
bool waiting, idle_timer_disabled = false;
if (new_bfqq) {
- if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
- new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
/*
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
@@ -4519,6 +5427,10 @@
bfqq = new_bfqq;
}
+ bfq_update_io_thinktime(bfqd, bfqq);
+ bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
waiting = bfqq && bfq_bfqq_wait_request(bfqq);
bfq_add_request(rq);
idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
@@ -4531,7 +5443,7 @@
return idle_timer_disabled;
}
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
static void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq,
bool idle_timer_disabled,
@@ -4550,18 +5462,18 @@
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
if (idle_timer_disabled)
bfqg_stats_update_idle_time(bfqq_group(bfqq));
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq,
bool idle_timer_disabled,
unsigned int cmd_flags) {}
-#endif
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head)
@@ -4584,12 +5496,12 @@
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
- if (at_head || blk_rq_is_passthrough(rq)) {
+ if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
if (at_head)
list_add(&rq->queuelist, &bfqd->dispatch);
else
list_add_tail(&rq->queuelist, &bfqd->dispatch);
- } else { /* bfqq is assumed to be non null here */
+ } else {
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
/*
* Update bfqq, because, if a queue merge has occurred
@@ -4632,6 +5544,8 @@
static void bfq_update_hw_tag(struct bfq_data *bfqd)
{
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
bfqd->rq_in_driver);
@@ -4644,7 +5558,18 @@
* sum is not exact, as it's not taking into account deactivated
* requests.
*/
- if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
+ if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
+ return;
+
+ /*
+ * If active queue hasn't enough requests and can idle, bfq might not
+ * dispatch sufficient requests to hardware. Don't zero hw_tag in this
+ * case
+ */
+ if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
+ bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
+ BFQ_HW_QUEUE_THRESHOLD &&
+ bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
return;
if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
@@ -4653,6 +5578,9 @@
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
bfqd->max_rq_in_driver = 0;
bfqd->hw_tag_samples = 0;
+
+ bfqd->nonrot_with_queueing =
+ blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
@@ -4708,6 +5636,7 @@
1UL<<(BFQ_RATE_SHIFT - 10))
bfq_update_rate_reset(bfqd, NULL);
bfqd->last_completion = now_ns;
+ bfqd->last_completed_rq_bfqq = bfqq;
/*
* If we are waiting to discover whether the request pattern
@@ -4715,11 +5644,14 @@
* isochronous, and both requisites for this condition to hold
* are now satisfied, then compute soft_rt_next_start (see the
* comments on the function bfq_bfqq_softrt_next_start()). We
- * schedule this delayed check when bfqq expires, if it still
- * has in-flight requests.
+ * do not compute soft_rt_next_start if bfqq is in interactive
+ * weight raising (see the comments in bfq_bfqq_expire() for
+ * an explanation). We schedule this delayed update when bfqq
+ * expires, if it still has in-flight requests.
*/
if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
- RB_EMPTY_ROOT(&bfqq->sort_list))
+ RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfqq->wr_coeff != bfqd->bfq_wr_coeff)
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
@@ -4777,6 +5709,167 @@
}
/*
+ * The processes associated with bfqq may happen to generate their
+ * cumulative I/O at a lower rate than the rate at which the device
+ * could serve the same I/O. This is rather probable, e.g., if only
+ * one process is associated with bfqq and the device is an SSD. It
+ * results in bfqq becoming often empty while in service. In this
+ * respect, if BFQ is allowed to switch to another queue when bfqq
+ * remains empty, then the device goes on being fed with I/O requests,
+ * and the throughput is not affected. In contrast, if BFQ is not
+ * allowed to switch to another queue---because bfqq is sync and
+ * I/O-dispatch needs to be plugged while bfqq is temporarily
+ * empty---then, during the service of bfqq, there will be frequent
+ * "service holes", i.e., time intervals during which bfqq gets empty
+ * and the device can only consume the I/O already queued in its
+ * hardware queues. During service holes, the device may even get to
+ * remaining idle. In the end, during the service of bfqq, the device
+ * is driven at a lower speed than the one it can reach with the kind
+ * of I/O flowing through bfqq.
+ *
+ * To counter this loss of throughput, BFQ implements a "request
+ * injection mechanism", which tries to fill the above service holes
+ * with I/O requests taken from other queues. The hard part in this
+ * mechanism is finding the right amount of I/O to inject, so as to
+ * both boost throughput and not break bfqq's bandwidth and latency
+ * guarantees. In this respect, the mechanism maintains a per-queue
+ * inject limit, computed as below. While bfqq is empty, the injection
+ * mechanism dispatches extra I/O requests only until the total number
+ * of I/O requests in flight---i.e., already dispatched but not yet
+ * completed---remains lower than this limit.
+ *
+ * A first definition comes in handy to introduce the algorithm by
+ * which the inject limit is computed. We define as first request for
+ * bfqq, an I/O request for bfqq that arrives while bfqq is in
+ * service, and causes bfqq to switch from empty to non-empty. The
+ * algorithm updates the limit as a function of the effect of
+ * injection on the service times of only the first requests of
+ * bfqq. The reason for this restriction is that these are the
+ * requests whose service time is affected most, because they are the
+ * first to arrive after injection possibly occurred.
+ *
+ * To evaluate the effect of injection, the algorithm measures the
+ * "total service time" of first requests. We define as total service
+ * time of an I/O request, the time that elapses since when the
+ * request is enqueued into bfqq, to when it is completed. This
+ * quantity allows the whole effect of injection to be measured. It is
+ * easy to see why. Suppose that some requests of other queues are
+ * actually injected while bfqq is empty, and that a new request R
+ * then arrives for bfqq. If the device does start to serve all or
+ * part of the injected requests during the service hole, then,
+ * because of this extra service, it may delay the next invocation of
+ * the dispatch hook of BFQ. Then, even after R gets eventually
+ * dispatched, the device may delay the actual service of R if it is
+ * still busy serving the extra requests, or if it decides to serve,
+ * before R, some extra request still present in its queues. As a
+ * conclusion, the cumulative extra delay caused by injection can be
+ * easily evaluated by just comparing the total service time of first
+ * requests with and without injection.
+ *
+ * The limit-update algorithm works as follows. On the arrival of a
+ * first request of bfqq, the algorithm measures the total time of the
+ * request only if one of the three cases below holds, and, for each
+ * case, it updates the limit as described below:
+ *
+ * (1) If there is no in-flight request. This gives a baseline for the
+ * total service time of the requests of bfqq. If the baseline has
+ * not been computed yet, then, after computing it, the limit is
+ * set to 1, to start boosting throughput, and to prepare the
+ * ground for the next case. If the baseline has already been
+ * computed, then it is updated, in case it results to be lower
+ * than the previous value.
+ *
+ * (2) If the limit is higher than 0 and there are in-flight
+ * requests. By comparing the total service time in this case with
+ * the above baseline, it is possible to know at which extent the
+ * current value of the limit is inflating the total service
+ * time. If the inflation is below a certain threshold, then bfqq
+ * is assumed to be suffering from no perceivable loss of its
+ * service guarantees, and the limit is even tentatively
+ * increased. If the inflation is above the threshold, then the
+ * limit is decreased. Due to the lack of any hysteresis, this
+ * logic makes the limit oscillate even in steady workload
+ * conditions. Yet we opted for it, because it is fast in reaching
+ * the best value for the limit, as a function of the current I/O
+ * workload. To reduce oscillations, this step is disabled for a
+ * short time interval after the limit happens to be decreased.
+ *
+ * (3) Periodically, after resetting the limit, to make sure that the
+ * limit eventually drops in case the workload changes. This is
+ * needed because, after the limit has gone safely up for a
+ * certain workload, it is impossible to guess whether the
+ * baseline total service time may have changed, without measuring
+ * it again without injection. A more effective version of this
+ * step might be to just sample the baseline, by interrupting
+ * injection only once, and then to reset/lower the limit only if
+ * the total service time with the current limit does happen to be
+ * too large.
+ *
+ * More details on each step are provided in the comments on the
+ * pieces of code that implement these steps: the branch handling the
+ * transition from empty to non empty in bfq_add_request(), the branch
+ * handling injection in bfq_select_queue(), and the function
+ * bfq_choose_bfqq_for_injection(). These comments also explain some
+ * exceptions, made by the injection mechanism in some special cases.
+ */
+static void bfq_update_inject_limit(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
+ unsigned int old_limit = bfqq->inject_limit;
+
+ if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
+ u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
+
+ if (tot_time_ns >= threshold && old_limit > 0) {
+ bfqq->inject_limit--;
+ bfqq->decrease_time_jif = jiffies;
+ } else if (tot_time_ns < threshold &&
+ old_limit <= bfqd->max_rq_in_driver)
+ bfqq->inject_limit++;
+ }
+
+ /*
+ * Either we still have to compute the base value for the
+ * total service time, and there seem to be the right
+ * conditions to do it, or we can lower the last base value
+ * computed.
+ *
+ * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O
+ * request in flight, because this function is in the code
+ * path that handles the completion of a request of bfqq, and,
+ * in particular, this function is executed before
+ * bfqd->rq_in_driver is decremented in such a code path.
+ */
+ if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
+ tot_time_ns < bfqq->last_serv_time_ns) {
+ if (bfqq->last_serv_time_ns == 0) {
+ /*
+ * Now we certainly have a base value: make sure we
+ * start trying injection.
+ */
+ bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
+ }
+ bfqq->last_serv_time_ns = tot_time_ns;
+ } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
+ /*
+ * No I/O injected and no request still in service in
+ * the drive: these are the exact conditions for
+ * computing the base value of the total service time
+ * for bfqq. So let's update this value, because it is
+ * rather variable. For example, it varies if the size
+ * or the spatial locality of the I/O requests in bfqq
+ * change.
+ */
+ bfqq->last_serv_time_ns = tot_time_ns;
+
+
+ /* update complete, not waiting for any request completion any longer */
+ bfqd->waited_rq = NULL;
+ bfqd->rqs_injected = false;
+}
+
+/*
* Handle either a requeue or a finish for rq. The things to do are
* the same in both cases: all references to rq are to be dropped. In
* particular, rq is considered completed from the point of view of
@@ -4820,6 +5913,9 @@
spin_lock_irqsave(&bfqd->lock, flags);
+ if (rq == bfqd->waited_rq)
+ bfq_update_inject_limit(bfqd, bfqq);
+
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
@@ -4887,7 +5983,7 @@
bfq_put_cooperator(bfqq);
- bfq_put_queue(bfqq);
+ bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL;
}
@@ -4983,7 +6079,7 @@
* preparation is that, after the prepare_request hook is invoked for
* rq, rq may still be transformed into a request with no icq, i.e., a
* request not associated with any queue. No bfq hook is invoked to
- * signal this tranformation. As a consequence, should these
+ * signal this transformation. As a consequence, should these
* preparation operations be performed when the prepare_request hook
* is invoked, and should rq be transformed one moment later, bfq
* would end up in an inconsistent state, because it would have
@@ -5074,7 +6170,29 @@
}
}
- if (unlikely(bfq_bfqq_just_created(bfqq)))
+ /*
+ * Consider bfqq as possibly belonging to a burst of newly
+ * created queues only if:
+ * 1) A burst is actually happening (bfqd->burst_size > 0)
+ * or
+ * 2) There is no other active queue. In fact, if, in
+ * contrast, there are active queues not belonging to the
+ * possible burst bfqq may belong to, then there is no gain
+ * in considering bfqq as belonging to a burst, and
+ * therefore in not weight-raising bfqq. See comments on
+ * bfq_handle_burst().
+ *
+ * This filtering also helps eliminating false positives,
+ * occurring when bfqq does not belong to an actual large
+ * burst, but some background task (e.g., a service) happens
+ * to trigger the creation of new queues very close to when
+ * bfqq and its possible companion queues are created. See
+ * comments on bfq_handle_burst() for further details also on
+ * this issue.
+ */
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+ (bfqd->burst_size > 0 ||
+ bfq_tot_busy_queues(bfqd) == 0)))
bfq_handle_burst(bfqd, bfqq);
return bfqq;
@@ -5223,7 +6341,7 @@
return min_shallow;
}
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5231,6 +6349,11 @@
min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+ bfq_depth_updated(hctx);
return 0;
}
@@ -5295,9 +6418,9 @@
}
eq->elevator_data = bfqd;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
/*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
@@ -5329,14 +6452,15 @@
HRTIMER_MODE_REL);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
- bfqd->queue_weights_tree = RB_ROOT;
- bfqd->group_weights_tree = RB_ROOT;
+ bfqd->queue_weights_tree = RB_ROOT_CACHED;
+ bfqd->num_groups_with_pending_reqs = 0;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
+ bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
bfqd->bfq_max_budget = bfq_default_max_budget;
@@ -5637,7 +6761,7 @@
};
static struct elevator_type iosched_bfq_mq = {
- .ops.mq = {
+ .ops = {
.limit_depth = bfq_limit_depth,
.prepare_request = bfq_prepare_request,
.requeue_request = bfq_finish_requeue_request,
@@ -5653,12 +6777,12 @@
.requests_merged = bfq_requests_merged,
.request_merged = bfq_request_merged,
.has_work = bfq_has_work,
+ .depth_updated = bfq_depth_updated,
.init_hctx = bfq_init_hctx,
.init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue,
},
- .uses_mq = true,
.icq_size = sizeof(struct bfq_io_cq),
.icq_align = __alignof__(struct bfq_io_cq),
.elevator_attrs = bfq_attrs,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index a8a2e5a..5d1a519 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Header file for the BFQ I/O scheduler: data structures and
* prototypes of interface functions among BFQ components.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _BFQ_H
#define _BFQ_H
@@ -32,6 +23,8 @@
#define BFQ_DEFAULT_GRP_IOPRIO 0
#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
+#define MAX_PID_STR_LENGTH 12
+
/*
* Soft real-time applications are extremely more latency sensitive
* than interactive ones. Over-raise the weight of the former to
@@ -89,7 +82,7 @@
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
- * service among the other active entitities in the same parent
+ * service among the other active entities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
@@ -108,15 +101,14 @@
};
/**
- * struct bfq_weight_counter - counter of the number of all active entities
+ * struct bfq_weight_counter - counter of the number of all active queues
* with a given weight.
*/
struct bfq_weight_counter {
- unsigned int weight; /* weight of the entities this counter refers to */
- unsigned int num_active; /* nr of active entities with this weight */
+ unsigned int weight; /* weight of the queues this counter refers to */
+ unsigned int num_active; /* nr of active queues with this weight */
/*
- * Weights tree member (see bfq_data's @queue_weights_tree and
- * @group_weights_tree)
+ * Weights tree member (see bfq_data's @queue_weights_tree)
*/
struct rb_node weights_node;
};
@@ -141,7 +133,7 @@
*
* Unless cgroups are used, the weight value is calculated from the
* ioprio to export the same interface as CFQ. When dealing with
- * ``well-behaved'' queues (i.e., queues that do not spend too much
+ * "well-behaved" queues (i.e., queues that do not spend too much
* time to consume their budget and have true sequential behavior, and
* when there are no external factors breaking anticipation) the
* relative weights at each level of the cgroups hierarchy should be
@@ -151,8 +143,6 @@
struct bfq_entity {
/* service_tree member */
struct rb_node rb_node;
- /* pointer to the weight counter associated with this entity */
- struct bfq_weight_counter *weight_counter;
/*
* Flag, true if the entity is on a tree (either the active or
@@ -178,6 +168,9 @@
/* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
int budget;
+ /* device weight, if non-zero, it overrides the default weight of
+ * bfq_group_data */
+ int dev_weight;
/* weight of the queue */
int weight;
/* next weight if a change is in progress */
@@ -199,6 +192,9 @@
/* flag, set to request a weight, ioprio or ioprio_class change */
int prio_changed;
+
+ /* flag, set if the entity is counted in groups_with_pending_reqs */
+ bool in_groups_with_pending_reqs;
};
struct bfq_group;
@@ -240,6 +236,13 @@
/* next ioprio and ioprio class if a change is in progress */
unsigned short new_ioprio, new_ioprio_class;
+ /* last total-service-time sample, see bfq_update_inject_limit() */
+ u64 last_serv_time_ns;
+ /* limit for request injection */
+ unsigned int inject_limit;
+ /* last time the inject limit has been decreased, in jiffies */
+ unsigned long decrease_time_jif;
+
/*
* Shared bfq_queue if queue is cooperating with one or more
* other queues.
@@ -266,6 +269,9 @@
/* entity representing this queue in the scheduler */
struct bfq_entity entity;
+ /* pointer to the weight counter associated with this entity */
+ struct bfq_weight_counter *weight_counter;
+
/* maximum budget allowed from the feedback mechanism */
int max_budget;
/* budget expiration (in jiffies) */
@@ -351,6 +357,27 @@
unsigned long split_time; /* time of last split */
unsigned long first_IO_time; /* time of first I/O for this queue */
+
+ /* max service rate measured so far */
+ u32 max_service_rate;
+
+ /*
+ * Pointer to the waker queue for this queue, i.e., to the
+ * queue Q such that this queue happens to get new I/O right
+ * after some I/O request of Q is completed. For details, see
+ * the comments on the choice of the queue for injection in
+ * bfq_select_queue().
+ */
+ struct bfq_queue *waker_bfqq;
+ /* node for woken_list, see below */
+ struct hlist_node woken_list_node;
+ /*
+ * Head of the list of the woken queues for this queue, i.e.,
+ * of the list of the queues for which this queue is a waker
+ * queue. This list is used to reset the waker_bfqq pointer in
+ * the woken queues when this queue exits.
+ */
+ struct hlist_head woken_list;
};
/**
@@ -390,6 +417,15 @@
bool was_in_burst_list;
/*
+ * Save the weight when a merge occurs, to be able
+ * to restore it in case of split. If the weight is not
+ * correctly resumed when the queue is recycled,
+ * then the weight of the recycled queue could differ
+ * from the weight of the original queue.
+ */
+ unsigned int saved_weight;
+
+ /*
* Similar to previous fields: save wr information.
*/
unsigned long saved_wr_coeff;
@@ -421,22 +457,62 @@
* weight-raised @bfq_queue (see the comments to the functions
* bfq_weights_tree_[add|remove] for further details).
*/
- struct rb_root queue_weights_tree;
- /*
- * rbtree of non-queue @bfq_entity weight counters, sorted by
- * weight. Used to keep track of whether all @bfq_groups have
- * the same weight. The tree contains one counter for each
- * distinct weight associated to some active @bfq_group (see
- * the comments to the functions bfq_weights_tree_[add|remove]
- * for further details).
- */
- struct rb_root group_weights_tree;
+ struct rb_root_cached queue_weights_tree;
/*
- * Number of bfq_queues containing requests (including the
- * queue in service, even if it is idling).
+ * Number of groups with at least one descendant process that
+ * has at least one request waiting for completion. Note that
+ * this accounts for also requests already dispatched, but not
+ * yet completed. Therefore this number of groups may differ
+ * (be larger) than the number of active groups, as a group is
+ * considered active only if its corresponding entity has
+ * descendant queues with at least one request queued. This
+ * number is used to decide whether a scenario is symmetric.
+ * For a detailed explanation see comments on the computation
+ * of the variable asymmetric_scenario in the function
+ * bfq_better_to_idle().
+ *
+ * However, it is hard to compute this number exactly, for
+ * groups with multiple descendant processes. Consider a group
+ * that is inactive, i.e., that has no descendant process with
+ * pending I/O inside BFQ queues. Then suppose that
+ * num_groups_with_pending_reqs is still accounting for this
+ * group, because the group has descendant processes with some
+ * I/O request still in flight. num_groups_with_pending_reqs
+ * should be decremented when the in-flight request of the
+ * last descendant process is finally completed (assuming that
+ * nothing else has changed for the group in the meantime, in
+ * terms of composition of the group and active/inactive state of child
+ * groups and processes). To accomplish this, an additional
+ * pending-request counter must be added to entities, and must
+ * be updated correctly. To avoid this additional field and operations,
+ * we resort to the following tradeoff between simplicity and
+ * accuracy: for an inactive group that is still counted in
+ * num_groups_with_pending_reqs, we decrement
+ * num_groups_with_pending_reqs when the first descendant
+ * process of the group remains with no request waiting for
+ * completion.
+ *
+ * Even this simpler decrement strategy requires a little
+ * carefulness: to avoid multiple decrements, we flag a group,
+ * more precisely an entity representing a group, as still
+ * counted in num_groups_with_pending_reqs when it becomes
+ * inactive. Then, when the first descendant queue of the
+ * entity remains with no request waiting for completion,
+ * num_groups_with_pending_reqs is decremented, and this flag
+ * is reset. After this flag is reset for the entity,
+ * num_groups_with_pending_reqs won't be decremented any
+ * longer in case a new descendant queue of the entity remains
+ * with no request waiting for completion.
*/
- int busy_queues;
+ unsigned int num_groups_with_pending_reqs;
+
+ /*
+ * Per-class (RT, BE, IDLE) number of bfq_queues containing
+ * requests (including the queue in service, even if it is
+ * idling).
+ */
+ unsigned int busy_queues[3];
/* number of weight-raised busy @bfq_queues */
int wr_busy_queues;
/* number of queued requests */
@@ -444,6 +520,9 @@
/* number of requests dispatched and waiting for completion */
int rq_in_driver;
+ /* true if the device is non rotational and performs queueing */
+ bool nonrot_with_queueing;
+
/*
* Maximum number of requests in driver in the last
* @hw_tag_samples completed requests.
@@ -469,9 +548,35 @@
/* on-disk position of the last served request */
sector_t last_position;
+ /* position of the last served request for the in-service queue */
+ sector_t in_serv_last_pos;
+
/* time of last request completion (ns) */
u64 last_completion;
+ /* bfqq owning the last completed rq */
+ struct bfq_queue *last_completed_rq_bfqq;
+
+ /* time of last transition from empty to non-empty (ns) */
+ u64 last_empty_occupied_ns;
+
+ /*
+ * Flag set to activate the sampling of the total service time
+ * of a just-arrived first I/O request (see
+ * bfq_update_inject_limit()). This will cause the setting of
+ * waited_rq when the request is finally dispatched.
+ */
+ bool wait_dispatch;
+ /*
+ * If set, then bfq_update_inject_limit() is invoked when
+ * waited_rq is eventually completed.
+ */
+ struct request *waited_rq;
+ /*
+ * True if some request has been injected during the last service hole.
+ */
+ bool rqs_injected;
+
/* time of first rq dispatch in current observation interval (ns) */
u64 first_dispatch;
/* time of last rq dispatch in current observation interval (ns) */
@@ -481,6 +586,7 @@
ktime_t last_budget_start;
/* beginning of the last idle slice */
ktime_t last_idling_start;
+ unsigned long last_idling_start_jiffies;
/* number of samples in current observation interval */
int peak_rate_samples;
@@ -661,7 +767,8 @@
* update
*/
BFQQF_coop, /* bfqq is shared */
- BFQQF_split_coop /* shared bfqq will be split */
+ BFQQF_split_coop, /* shared bfqq will be split */
+ BFQQF_has_waker /* bfqq has a waker queue */
};
#define BFQ_BFQQ_FNS(name) \
@@ -681,6 +788,7 @@
BFQ_BFQQ_FNS(coop);
BFQ_BFQQ_FNS(split_coop);
BFQ_BFQQ_FNS(softrt_update);
+BFQ_BFQQ_FNS(has_waker);
#undef BFQ_BFQQ_FNS
/* Expiration reasons. */
@@ -695,8 +803,13 @@
BFQQE_PREEMPTED /* preemption in progress */
};
+struct bfq_stat {
+ struct percpu_counter cpu_cnt;
+ atomic64_t aux_cnt;
+};
+
struct bfqg_stats {
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+#ifdef CONFIG_BFQ_CGROUP_DEBUG
/* number of ios merged */
struct blkg_rwstat merged;
/* total time spent on device in ns, may not be accurate w/ queueing */
@@ -706,25 +819,25 @@
/* number of IOs queued up */
struct blkg_rwstat queued;
/* total disk time and nr sectors dispatched by this group */
- struct blkg_stat time;
+ struct bfq_stat time;
/* sum of number of ios queued across all samples */
- struct blkg_stat avg_queue_size_sum;
+ struct bfq_stat avg_queue_size_sum;
/* count of samples taken for average */
- struct blkg_stat avg_queue_size_samples;
+ struct bfq_stat avg_queue_size_samples;
/* how many times this group has been removed from service tree */
- struct blkg_stat dequeue;
+ struct bfq_stat dequeue;
/* total time spent waiting for it to be assigned a timeslice. */
- struct blkg_stat group_wait_time;
+ struct bfq_stat group_wait_time;
/* time spent idling for this blkcg_gq */
- struct blkg_stat idle_time;
+ struct bfq_stat idle_time;
/* total time with empty current active q with other requests queued */
- struct blkg_stat empty_time;
+ struct bfq_stat empty_time;
/* fields after this shouldn't be cleared on stat reset */
u64 start_group_wait_time;
u64 start_idle_time;
u64 start_empty_time;
uint16_t flags;
-#endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_BFQ_CGROUP_DEBUG */
};
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -825,11 +938,11 @@
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
- struct rb_root *root);
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct rb_root_cached *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
- struct rb_root *root);
+ struct bfq_queue *bfqq,
+ struct rb_root_cached *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -906,6 +1019,7 @@
struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
+unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
struct bfq_entity *bfq_entity_of(struct rb_node *node);
unsigned short bfq_ioprio_to_weight(int ioprio);
@@ -922,7 +1036,7 @@
bool ins_into_idle_tree);
bool next_queue_may_preempt(struct bfq_data *bfqd);
struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool ins_into_idle_tree, bool expiration);
void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
@@ -935,13 +1049,23 @@
/* --------------- end of interface of B-WF2Q+ ---------------- */
/* Logging facilities. */
+static inline void bfq_pid_to_str(int pid, char *str, int len)
+{
+ if (pid != -1)
+ snprintf(str, len, "%d", pid);
+ else
+ snprintf(str, len, "SHARED-");
+}
+
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
- "bfq%d%c " fmt, (bfqq)->pid, \
+ "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \
} while (0)
@@ -952,10 +1076,13 @@
#else /* CONFIG_BFQ_GROUP_IOSCHED */
-#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- ##args)
+ ##args); \
+} while (0)
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
#endif /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ff7c2d4..05f0bf4 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hierarchical Budget Worst-case Fair Weighted Fair Queueing
* (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
* scheduler schedules generic entities. The latter can represent
* either single bfq queues (associated with processes) or groups of
* bfq queues (associated with cgroups).
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include "bfq-iosched.h"
@@ -44,6 +35,12 @@
BFQ_DEFAULT_GRP_CLASS - 1;
}
+unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
+{
+ return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
+ bfqd->busy_queues[2];
+}
+
static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
bool expiration);
@@ -53,7 +50,7 @@
* bfq_update_next_in_service - update sd->next_in_service
* @sd: sched_data for which to perform the update.
* @new_entity: if not NULL, pointer to the entity whose activation,
- * requeueing or repositionig triggered the invocation of
+ * requeueing or repositioning triggered the invocation of
* this function.
* @expiration: id true, this function is being invoked after the
* expiration of the in-service entity
@@ -84,7 +81,7 @@
/*
* If this update is triggered by the activation, requeueing
- * or repositiong of an entity that does not coincide with
+ * or repositioning of an entity that does not coincide with
* sd->next_in_service, then a full lookup in the active tree
* can be avoided. In fact, it is enough to check whether the
* just-modified entity has the same priority as
@@ -731,7 +728,7 @@
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
unsigned int prev_weight, new_weight;
struct bfq_data *bfqd = NULL;
- struct rb_root *root;
+ struct rb_root_cached *root;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_sched_data *sd;
struct bfq_group *bfqg;
@@ -747,6 +744,8 @@
}
#endif
+ /* Matches the smp_wmb() in bfq_group_set_weight. */
+ smp_rmb();
old_st->wsum -= entity->weight;
if (entity->new_weight != entity->orig_weight) {
@@ -788,25 +787,23 @@
new_weight = entity->orig_weight *
(bfqq ? bfqq->wr_coeff : 1);
/*
- * If the weight of the entity changes, remove the entity
- * from its old weight counter (if there is a counter
- * associated with the entity), and add it to the counter
- * associated with its new weight.
+ * If the weight of the entity changes, and the entity is a
+ * queue, remove the entity from its old weight counter (if
+ * there is a counter associated with the entity).
*/
- if (prev_weight != new_weight) {
- root = bfqq ? &bfqd->queue_weights_tree :
- &bfqd->group_weights_tree;
- __bfq_weights_tree_remove(bfqd, entity, root);
+ if (prev_weight != new_weight && bfqq) {
+ root = &bfqd->queue_weights_tree;
+ __bfq_weights_tree_remove(bfqd, bfqq, root);
}
entity->weight = new_weight;
/*
- * Add the entity to its weights tree only if it is
- * not associated with a weight-raised queue.
+ * Add the entity, if it is not a weight-raised queue,
+ * to the counter associated with its new weight.
*/
- if (prev_weight != new_weight &&
- (bfqq ? bfqq->wr_coeff == 1 : 1))
+ if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
/* If we get here, root has been initialized. */
- bfq_weights_tree_add(bfqd, entity, root);
+ bfq_weights_tree_add(bfqd, bfqq, root);
+ }
new_st->wsum += entity->weight;
@@ -1008,13 +1005,16 @@
entity->on_st = true;
}
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
struct bfq_group *bfqg =
container_of(entity, struct bfq_group, entity);
+ struct bfq_data *bfqd = bfqg->bfqd;
- bfq_weights_tree_add(bfqg->bfqd, entity,
- &bfqd->group_weights_tree);
+ if (!entity->in_groups_with_pending_reqs) {
+ entity->in_groups_with_pending_reqs = true;
+ bfqd->num_groups_with_pending_reqs++;
+ }
}
#endif
@@ -1153,15 +1153,14 @@
}
/**
- * __bfq_deactivate_entity - deactivate an entity from its service tree.
- * @entity: the entity to deactivate.
+ * __bfq_deactivate_entity - update sched_data and service trees for
+ * entity, so as to represent entity as inactive
+ * @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree.
*
- * Deactivates an entity, independently of its previous state. Must
- * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it into the idle
- * tree.
+ * If necessary and allowed, puts entity into the idle tree. NOTE:
+ * entity may be on no tree if in service.
*/
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{
@@ -1390,7 +1389,7 @@
* In this first case, update the virtual time in @st too (see the
* comments on this update inside the function).
*
- * In constrast, if there is an in-service entity, then return the
+ * In contrast, if there is an in-service entity, then return the
* entity that would be set in service if not only the above
* conditions, but also the next one held true: the currently
* in-service entity, on expiration,
@@ -1473,12 +1472,12 @@
* is being invoked as a part of the expiration path
* of the in-service queue. In this case, even if
* sd->in_service_entity is not NULL,
- * sd->in_service_entiy at this point is actually not
+ * sd->in_service_entity at this point is actually not
* in service any more, and, if needed, has already
* been properly queued or requeued into the right
* tree. The reason why sd->in_service_entity is still
* not NULL here, even if expiration is true, is that
- * sd->in_service_entiy is reset as a last step in the
+ * sd->in_service_entity is reset as a last step in the
* expiration path. So, if expiration is true, tell
* __bfq_lookup_next_entity that there is no
* sd->in_service_entity.
@@ -1513,7 +1512,7 @@
struct bfq_sched_data *sd;
struct bfq_queue *bfqq;
- if (bfqd->busy_queues == 0)
+ if (bfq_tot_busy_queues(bfqd) == 0)
return NULL;
/*
@@ -1599,7 +1598,8 @@
return bfqq;
}
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
{
struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1623,8 +1623,20 @@
* service tree either, then release the service reference to
* the queue it represents (taken with bfq_get_entity).
*/
- if (!in_serv_entity->on_st)
+ if (!in_serv_entity->on_st) {
+ /*
+ * If no process is referencing in_serv_bfqq any
+ * longer, then the service reference may be the only
+ * reference to the queue. If this is the case, then
+ * bfqq gets freed here.
+ */
+ int ref = in_serv_bfqq->ref;
bfq_put_queue(in_serv_bfqq);
+ if (ref == 1)
+ return true;
+ }
+
+ return false;
}
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -1665,10 +1677,7 @@
bfq_clear_bfqq_busy(bfqq);
- bfqd->busy_queues--;
-
- if (!bfqq->dispatched)
- bfq_weights_tree_remove(bfqd, bfqq);
+ bfqd->busy_queues[bfqq->ioprio_class - 1]--;
if (bfqq->wr_coeff > 1)
bfqd->wr_busy_queues--;
@@ -1676,6 +1685,9 @@
bfqg_stats_update_dequeue(bfqq_group(bfqq));
bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
+
+ if (!bfqq->dispatched)
+ bfq_weights_tree_remove(bfqd, bfqq);
}
/*
@@ -1688,11 +1700,11 @@
bfq_activate_bfqq(bfqd, bfqq);
bfq_mark_bfqq_busy(bfqq);
- bfqd->busy_queues++;
+ bfqd->busy_queues[bfqq->ioprio_class - 1]++;
if (!bfqq->dispatched)
if (bfqq->wr_coeff == 1)
- bfq_weights_tree_add(bfqd, &bfqq->entity,
+ bfq_weights_tree_add(bfqd, bfqq,
&bfqd->queue_weights_tree);
if (bfqq->wr_coeff > 1)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 67b5fb8..fb95dbb 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bio-integrity.c - bio data integrity extensions
*
* Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/blkdev.h>
@@ -57,8 +43,7 @@
unsigned inline_vecs;
if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
- bip = kmalloc(sizeof(struct bio_integrity_payload) +
- sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+ bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
inline_vecs = nr_vecs;
} else {
bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
@@ -291,8 +276,12 @@
ret = bio_integrity_add_page(bio, virt_to_page(buf),
bytes, offset);
- if (ret == 0)
- return false;
+ if (ret == 0) {
+ printk(KERN_ERR "could not attach integrity payload\n");
+ kfree(buf);
+ status = BLK_STS_RESOURCE;
+ goto err_end_io;
+ }
if (ret < bytes)
break;
@@ -306,6 +295,8 @@
if (bio_data_dir(bio) == WRITE) {
bio_integrity_process(bio, &bio->bi_iter,
bi->profile->generate_fn);
+ } else {
+ bip->bio_iter = bio->bi_iter;
}
return true;
@@ -331,20 +322,14 @@
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
- struct bvec_iter iter = bio->bi_iter;
/*
* At the moment verify is called bio's iterator was advanced
* during split and completion, we need to rewind iterator to
* it's original position.
*/
- if (bio_rewind_iter(bio, &iter, iter.bi_done)) {
- bio->bi_status = bio_integrity_process(bio, &iter,
- bi->profile->verify_fn);
- } else {
- bio->bi_status = BLK_STS_IOERR;
- }
-
+ bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
+ bi->profile->verify_fn);
bio_integrity_free(bio);
bio_endio(bio);
}
@@ -394,7 +379,6 @@
bip->bip_iter.bi_sector += bytes_done >> 9;
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
-EXPORT_SYMBOL(bio_integrity_advance);
/**
* bio_integrity_trim - Trim integrity vector
@@ -464,7 +448,6 @@
mempool_exit(&bs->bio_integrity_pool);
mempool_exit(&bs->bvec_integrity_pool);
}
-EXPORT_SYMBOL(bioset_integrity_free);
void __init bio_integrity_init(void)
{
diff --git a/block/bio.c b/block/bio.c
index 55a5386..b1170ec 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
*/
#include <linux/mm.h>
#include <linux/swap.h>
@@ -29,6 +16,7 @@
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/blk-cgroup.h>
+#include <linux/highmem.h>
#include <trace/events/block.h>
#include "blk.h"
@@ -244,7 +232,7 @@
void bio_uninit(struct bio *bio)
{
- bio_disassociate_task(bio);
+ bio_disassociate_blkg(bio);
}
EXPORT_SYMBOL(bio_uninit);
@@ -571,15 +559,6 @@
}
EXPORT_SYMBOL(bio_put);
-inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
-{
- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
- blk_recount_segments(q, bio);
-
- return bio->bi_phys_segments;
-}
-EXPORT_SYMBOL(bio_phys_segments);
-
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: destination bio
@@ -610,7 +589,8 @@
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+ blkcg_bio_issue_init(bio);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -647,25 +627,61 @@
}
EXPORT_SYMBOL(bio_clone_fast);
+static inline bool page_is_mergeable(const struct bio_vec *bv,
+ struct page *page, unsigned int len, unsigned int off,
+ bool *same_page)
+{
+ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
+ bv->bv_offset + bv->bv_len - 1;
+ phys_addr_t page_addr = page_to_phys(page);
+
+ if (vec_end_addr + 1 != page_addr + off)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
+ return false;
+
+ *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
+ if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
+ return false;
+ return true;
+}
+
+static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned len, unsigned offset,
+ bool *same_page)
+{
+ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
+ phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
+
+ if ((addr1 | mask) != (addr2 | mask))
+ return false;
+ if (bv->bv_len + len > queue_max_segment_size(q))
+ return false;
+ return __bio_try_merge_page(bio, page, len, offset, same_page);
+}
+
/**
- * bio_add_pc_page - attempt to add page to bio
+ * __bio_add_pc_page - attempt to add page to passthrough bio
* @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
+ * @same_page: return if the merge happen inside the same page
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device
* limitations. The target block device must allow bio's up to PAGE_SIZE,
* so it is always possible to add a single page to an empty bio.
*
- * This should only be used by REQ_PC bios.
+ * This should only be used by passthrough bios.
*/
-int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
- *page, unsigned int len, unsigned int offset)
+static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ bool *same_page)
{
- int retried_segments = 0;
struct bio_vec *bvec;
/*
@@ -677,99 +693,68 @@
if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
return 0;
- /*
- * For filesystems with a blocksize smaller than the pagesize
- * we will often be called with the same page as last time and
- * a consecutive offset. Optimize this special case.
- */
if (bio->bi_vcnt > 0) {
- struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
-
- if (page == prev->bv_page &&
- offset == prev->bv_offset + prev->bv_len) {
- prev->bv_len += len;
- bio->bi_iter.bi_size += len;
- goto done;
- }
+ if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
+ return len;
/*
- * If the queue doesn't support SG gaps and adding this
- * offset would create a gap, disallow it.
+ * If the queue doesn't support SG gaps and adding this segment
+ * would create a gap, disallow it.
*/
- if (bvec_gap_to_prev(q, prev, offset))
+ bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ if (bvec_gap_to_prev(q, bvec, offset))
return 0;
}
- if (bio_full(bio))
+ if (bio_full(bio, len))
return 0;
- /*
- * setup the new entry, we might clear it again later if we
- * cannot add the page
- */
+ if (bio->bi_vcnt >= queue_max_segments(q))
+ return 0;
+
bvec = &bio->bi_io_vec[bio->bi_vcnt];
bvec->bv_page = page;
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
- bio->bi_phys_segments++;
bio->bi_iter.bi_size += len;
-
- /*
- * Perform a recount if the number of segments is greater
- * than queue_max_segments(q).
- */
-
- while (bio->bi_phys_segments > queue_max_segments(q)) {
-
- if (retried_segments)
- goto failed;
-
- retried_segments = 1;
- blk_recount_segments(q, bio);
- }
-
- /* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- done:
return len;
+}
- failed:
- bvec->bv_page = NULL;
- bvec->bv_len = 0;
- bvec->bv_offset = 0;
- bio->bi_vcnt--;
- bio->bi_iter.bi_size -= len;
- blk_recount_segments(q, bio);
- return 0;
+int bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset)
+{
+ bool same_page = false;
+ return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
}
EXPORT_SYMBOL(bio_add_pc_page);
/**
* __bio_try_merge_page - try appending data to an existing bvec.
* @bio: destination bio
- * @page: page to add
+ * @page: start page to add
* @len: length of the data to add
- * @off: offset of the data in @page
+ * @off: offset of the data relative to @page
+ * @same_page: return if the segment has been merged inside the same page
*
* Try to add the data at @page + @off to the last bvec of @bio. This is a
* a useful optimisation for file systems with a block size smaller than the
* page size.
*
+ * Warn if (@len, @off) crosses pages in case that @same_page is true.
+ *
* Return %true on success or %false on failure.
*/
bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off)
+ unsigned int len, unsigned int off, bool *same_page)
{
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
- if (bio->bi_vcnt > 0) {
+ if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
+ if (page_is_mergeable(bv, page, len, off, same_page)) {
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
@@ -780,11 +765,11 @@
EXPORT_SYMBOL_GPL(__bio_try_merge_page);
/**
- * __bio_add_page - add page to a bio in a new segment
+ * __bio_add_page - add page(s) to a bio in a new segment
* @bio: destination bio
- * @page: page to add
- * @len: length of the data to add
- * @off: offset of the data in @page
+ * @page: start page to add
+ * @len: length of the data to add, may cross pages
+ * @off: offset of the data relative to @page, may cross pages
*
* Add the data at @page + @off to @bio as a new bvec. The caller must ensure
* that @bio has space for another bvec.
@@ -795,7 +780,7 @@
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
- WARN_ON_ONCE(bio_full(bio));
+ WARN_ON_ONCE(bio_full(bio, len));
bv->bv_page = page;
bv->bv_offset = off;
@@ -803,24 +788,29 @@
bio->bi_iter.bi_size += len;
bio->bi_vcnt++;
+
+ if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
+ bio_set_flag(bio, BIO_WORKINGSET);
}
EXPORT_SYMBOL_GPL(__bio_add_page);
/**
- * bio_add_page - attempt to add page to bio
+ * bio_add_page - attempt to add page(s) to bio
* @bio: destination bio
- * @page: page to add
- * @len: vec entry length
- * @offset: vec entry offset
+ * @page: start page to add
+ * @len: vec entry length, may cross pages
+ * @offset: vec entry offset relative to @page, may cross pages
*
- * Attempt to add a page to the bio_vec maplist. This will only fail
+ * Attempt to add page(s) to the bio_vec maplist. This will only fail
* if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
*/
int bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
- if (!__bio_try_merge_page(bio, page, len, offset)) {
- if (bio_full(bio))
+ bool same_page = false;
+
+ if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+ if (bio_full(bio, len))
return 0;
__bio_add_page(bio, page, len, offset);
}
@@ -828,6 +818,41 @@
}
EXPORT_SYMBOL(bio_add_page);
+void bio_release_pages(struct bio *bio, bool mark_dirty)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+
+ if (bio_flagged(bio, BIO_NO_PAGE_REF))
+ return;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ if (mark_dirty && !PageCompound(bvec->bv_page))
+ set_page_dirty_lock(bvec->bv_page);
+ put_page(bvec->bv_page);
+ }
+}
+
+static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
+{
+ const struct bio_vec *bv = iter->bvec;
+ unsigned int len;
+ size_t size;
+
+ if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
+ return -EINVAL;
+
+ len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
+ size = bio_add_page(bio, bv->bv_page, len,
+ bv->bv_offset + iter->iov_offset);
+ if (unlikely(size != len))
+ return -EINVAL;
+ iov_iter_advance(iter, size);
+ return 0;
+}
+
+#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
+
/**
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
@@ -840,69 +865,86 @@
*/
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+ unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
+ bool same_page = false;
+ ssize_t size, left;
+ unsigned len, i;
size_t offset;
- ssize_t size;
+
+ /*
+ * Move page array up in the allocated memory for the bio vecs as far as
+ * possible so that we can start filling biovecs from the beginning
+ * without overwriting the temporary page array.
+ */
+ BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
+ pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
- idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
- /*
- * Deep magic below: We need to walk the pinned pages backwards
- * because we are abusing the space allocated for the bio_vecs
- * for the page array. Because the bio_vecs are larger than the
- * page pointers by definition this will always work. But it also
- * means we can't use bio_add_page, so any changes to it's semantics
- * need to be reflected here as well.
- */
- bio->bi_iter.bi_size += size;
- bio->bi_vcnt += nr_pages;
+ for (left = size, i = 0; left > 0; left -= len, i++) {
+ struct page *page = pages[i];
- while (idx--) {
- bv[idx].bv_page = pages[idx];
- bv[idx].bv_len = PAGE_SIZE;
- bv[idx].bv_offset = 0;
+ len = min_t(size_t, PAGE_SIZE - offset, left);
+
+ if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+ if (same_page)
+ put_page(page);
+ } else {
+ if (WARN_ON_ONCE(bio_full(bio, len)))
+ return -EINVAL;
+ __bio_add_page(bio, page, len, offset);
+ }
+ offset = 0;
}
- bv[0].bv_offset += offset;
- bv[0].bv_len -= offset;
- bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
-
iov_iter_advance(iter, size);
return 0;
}
/**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * bio_iov_iter_get_pages - add user or kernel pages to a bio
* @bio: bio to add pages to
- * @iter: iov iterator describing the region to be mapped
+ * @iter: iov iterator describing the region to be added
*
- * Pins pages from *iter and appends them to @bio's bvec array. The
- * pages will have to be released using put_page() when done.
+ * This takes either an iterator pointing to user memory, or one pointing to
+ * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
+ * map them into the kernel. On IO completion, the caller should put those
+ * pages. If we're adding kernel pages, and the caller told us it's safe to
+ * do so, we just have to add the pages to the bio directly. We don't grab an
+ * extra reference to those pages (the user should already have that), and we
+ * don't put the page on IO completion. The caller needs to check if the bio is
+ * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
+ * released.
+ *
* The function tries, but does not guarantee, to pin as many pages as
- * fit into the bio, or are requested in *iter, whatever is smaller.
- * If MM encounters an error pinning the requested pages, it stops.
- * Error is returned only if 0 pages could be pinned.
+ * fit into the bio, or are requested in *iter, whatever is smaller. If
+ * MM encounters an error pinning the requested pages, it stops. Error
+ * is returned only if 0 pages could be pinned.
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short orig_vcnt = bio->bi_vcnt;
+ const bool is_bvec = iov_iter_is_bvec(iter);
+ int ret;
+
+ if (WARN_ON_ONCE(bio->bi_vcnt))
+ return -EINVAL;
do {
- int ret = __bio_iov_iter_get_pages(bio, iter);
+ if (is_bvec)
+ ret = __bio_iov_bvec_add_pages(bio, iter);
+ else
+ ret = __bio_iov_iter_get_pages(bio, iter);
+ } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
- if (unlikely(ret))
- return bio->bi_vcnt > orig_vcnt ? 0 : ret;
-
- } while (iov_iter_count(iter) && !bio_full(bio));
-
- return 0;
+ if (is_bvec)
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ return bio->bi_vcnt ? 0 : ret;
}
-EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
static void submit_bio_wait_endio(struct bio *bio)
{
@@ -1052,8 +1094,7 @@
if (data->nr_segs > UIO_MAXIOV)
return NULL;
- bmd = kmalloc(sizeof(struct bio_map_data) +
- sizeof(struct iovec) * data->nr_segs, gfp_mask);
+ bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
if (!bmd)
return NULL;
memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
@@ -1072,10 +1113,10 @@
*/
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
- int i;
struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_from_iter(bvec->bv_page,
@@ -1103,10 +1144,10 @@
*/
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
{
- int i;
struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_to_iter(bvec->bv_page,
@@ -1127,9 +1168,9 @@
void bio_free_pages(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_segment_all(bvec, bio, iter_all)
__free_page(bvec->bv_page);
}
EXPORT_SYMBOL(bio_free_pages);
@@ -1240,8 +1281,11 @@
}
}
- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+ if (!map_data)
+ __free_page(page);
break;
+ }
len -= bytes;
offset = 0;
@@ -1256,7 +1300,7 @@
/*
* success
*/
- if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
+ if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
ret = bio_copy_from_iter(bio, iter);
if (ret)
@@ -1296,7 +1340,6 @@
int j;
struct bio *bio;
int ret;
- struct bio_vec *bvec;
if (!iov_iter_count(iter))
return ERR_PTR(-EINVAL);
@@ -1326,20 +1369,17 @@
for (j = 0; j < npages; j++) {
struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
+ bool same_page = false;
if (n > bytes)
n = bytes;
- if (!bio_add_pc_page(q, bio, page, n, offs))
+ if (!__bio_add_pc_page(q, bio, page, n, offs,
+ &same_page)) {
+ if (same_page)
+ put_page(page);
break;
-
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(page);
+ }
added += n;
bytes -= n;
@@ -1370,31 +1410,11 @@
return bio;
out_unmap:
- bio_for_each_segment_all(bvec, bio, j) {
- put_page(bvec->bv_page);
- }
+ bio_release_pages(bio, false);
bio_put(bio);
return ERR_PTR(ret);
}
-static void __bio_unmap_user(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- /*
- * make sure we dirty pages we wrote to
- */
- bio_for_each_segment_all(bvec, bio, i) {
- if (bio_data_dir(bio) == READ)
- set_page_dirty_lock(bvec->bv_page);
-
- put_page(bvec->bv_page);
- }
-
- bio_put(bio);
-}
-
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
@@ -1406,12 +1426,27 @@
*/
void bio_unmap_user(struct bio *bio)
{
- __bio_unmap_user(bio);
+ bio_release_pages(bio, bio_data_dir(bio) == READ);
bio_put(bio);
+ bio_put(bio);
+}
+
+static void bio_invalidate_vmalloc_pages(struct bio *bio)
+{
+#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+ if (bio->bi_private && !op_is_write(bio_op(bio))) {
+ unsigned long i, len = 0;
+
+ for (i = 0; i < bio->bi_vcnt; i++)
+ len += bio->bi_io_vec[i].bv_len;
+ invalidate_kernel_vmap_range(bio->bi_private, len);
+ }
+#endif
}
static void bio_map_kern_endio(struct bio *bio)
{
+ bio_invalidate_vmalloc_pages(bio);
bio_put(bio);
}
@@ -1432,6 +1467,8 @@
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
+ bool is_vmalloc = is_vmalloc_addr(data);
+ struct page *page;
int offset, i;
struct bio *bio;
@@ -1439,6 +1476,11 @@
if (!bio)
return ERR_PTR(-ENOMEM);
+ if (is_vmalloc) {
+ flush_kernel_vmap_range(data, len);
+ bio->bi_private = data;
+ }
+
offset = offset_in_page(kaddr);
for (i = 0; i < nr_pages; i++) {
unsigned int bytes = PAGE_SIZE - offset;
@@ -1449,7 +1491,11 @@
if (bytes > len)
bytes = len;
- if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
+ if (!is_vmalloc)
+ page = virt_to_page(data);
+ else
+ page = vmalloc_to_page(data);
+ if (bio_add_pc_page(q, bio, page, bytes,
offset) < bytes) {
/* we don't support partial mappings */
bio_put(bio);
@@ -1464,7 +1510,6 @@
bio->bi_end_io = bio_map_kern_endio;
return bio;
}
-EXPORT_SYMBOL(bio_map_kern);
static void bio_copy_kern_endio(struct bio *bio)
{
@@ -1476,9 +1521,9 @@
{
char *p = bio->bi_private;
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
p += bvec->bv_len;
}
@@ -1586,23 +1631,13 @@
void bio_set_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (!PageCompound(bvec->bv_page))
set_page_dirty_lock(bvec->bv_page);
}
}
-EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
-
-static void bio_release_pages(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- bio_for_each_segment_all(bvec, bio, i)
- put_page(bvec->bv_page);
-}
/*
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
@@ -1636,8 +1671,7 @@
while ((bio = next) != NULL) {
next = bio->bi_private;
- bio_set_pages_dirty(bio);
- bio_release_pages(bio);
+ bio_release_pages(bio, true);
bio_put(bio);
}
}
@@ -1646,14 +1680,14 @@
{
struct bio_vec *bvec;
unsigned long flags;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
goto defer;
}
- bio_release_pages(bio);
+ bio_release_pages(bio, false);
bio_put(bio);
return;
defer:
@@ -1663,17 +1697,33 @@
spin_unlock_irqrestore(&bio_dirty_lock, flags);
schedule_work(&bio_dirty_work);
}
-EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
+
+void update_io_ticks(struct hd_struct *part, unsigned long now)
+{
+ unsigned long stamp;
+again:
+ stamp = READ_ONCE(part->stamp);
+ if (unlikely(stamp != now)) {
+ if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
+ __part_stat_add(part, io_ticks, 1);
+ }
+ }
+ if (part->partno) {
+ part = &part_to_disk(part)->part0;
+ goto again;
+ }
+}
void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part)
{
const int sgrp = op_stat_group(op);
- int cpu = part_stat_lock();
- part_round_stats(q, cpu, part);
- part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, sectors[sgrp], sectors);
+ part_stat_lock();
+
+ update_io_ticks(part, jiffies);
+ part_stat_inc(part, ios[sgrp]);
+ part_stat_add(part, sectors[sgrp], sectors);
part_inc_in_flight(q, part, op_is_write(op));
part_stat_unlock();
@@ -1683,30 +1733,21 @@
void generic_end_io_acct(struct request_queue *q, int req_op,
struct hd_struct *part, unsigned long start_time)
{
- unsigned long duration = jiffies - start_time;
+ unsigned long now = jiffies;
+ unsigned long duration = now - start_time;
const int sgrp = op_stat_group(req_op);
- int cpu = part_stat_lock();
- part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
- part_round_stats(q, cpu, part);
+ part_stat_lock();
+
+ update_io_ticks(part, now);
+ part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
+ part_stat_add(part, time_in_queue, duration);
part_dec_in_flight(q, part, op_is_write(req_op));
part_stat_unlock();
}
EXPORT_SYMBOL(generic_end_io_acct);
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-void bio_flush_dcache_pages(struct bio *bi)
-{
- struct bio_vec bvec;
- struct bvec_iter iter;
-
- bio_for_each_segment(bvec, bi, iter)
- flush_dcache_page(bvec.bv_page);
-}
-EXPORT_SYMBOL(bio_flush_dcache_pages);
-#endif
-
static inline bool bio_remaining_done(struct bio *bio)
{
/*
@@ -1789,8 +1830,8 @@
* @bio, and updates @bio to represent the remaining sectors.
*
* Unless this is a discard request the newly allocated bio will point
- * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
- * @bio is not freed before the split.
+ * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
+ * neither @bio nor @bs are freed before the split bio.
*/
struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
@@ -1810,7 +1851,6 @@
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
- bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);
@@ -1835,10 +1875,7 @@
if (offset == 0 && size == bio->bi_iter.bi_size)
return;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
bio_advance(bio, offset << 9);
-
bio->bi_iter.bi_size = size;
if (bio_integrity(bio))
@@ -1959,102 +1996,137 @@
#ifdef CONFIG_BLK_CGROUP
-#ifdef CONFIG_MEMCG
/**
- * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
+ * bio_disassociate_blkg - puts back the blkg reference if associated
* @bio: target bio
- * @page: the page to lookup the blkcg from
*
- * Associate @bio with the blkcg from @page's owning memcg. This works like
- * every other associate function wrt references.
+ * Helper to disassociate the blkg from @bio if a blkg is associated.
*/
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
+void bio_disassociate_blkg(struct bio *bio)
{
- struct cgroup_subsys_state *blkcg_css;
-
- if (unlikely(bio->bi_css))
- return -EBUSY;
- if (!page->mem_cgroup)
- return 0;
- blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
- &io_cgrp_subsys);
- bio->bi_css = blkcg_css;
- return 0;
-}
-#endif /* CONFIG_MEMCG */
-
-/**
- * bio_associate_blkcg - associate a bio with the specified blkcg
- * @bio: target bio
- * @blkcg_css: css of the blkcg to associate
- *
- * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
- * treat @bio as if it were issued by a task which belongs to the blkcg.
- *
- * This function takes an extra reference of @blkcg_css which will be put
- * when @bio is released. The caller must own @bio and is responsible for
- * synchronizing calls to this function.
- */
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
-{
- if (unlikely(bio->bi_css))
- return -EBUSY;
- css_get(blkcg_css);
- bio->bi_css = blkcg_css;
- return 0;
-}
-EXPORT_SYMBOL_GPL(bio_associate_blkcg);
-
-/**
- * bio_associate_blkg - associate a bio with the specified blkg
- * @bio: target bio
- * @blkg: the blkg to associate
- *
- * Associate @bio with the blkg specified by @blkg. This is the queue specific
- * blkcg information associated with the @bio, a reference will be taken on the
- * @blkg and will be freed when the bio is freed.
- */
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
-{
- if (unlikely(bio->bi_blkg))
- return -EBUSY;
- if (!blkg_try_get(blkg))
- return -ENODEV;
- bio->bi_blkg = blkg;
- return 0;
-}
-
-/**
- * bio_disassociate_task - undo bio_associate_current()
- * @bio: target bio
- */
-void bio_disassociate_task(struct bio *bio)
-{
- if (bio->bi_ioc) {
- put_io_context(bio->bi_ioc);
- bio->bi_ioc = NULL;
- }
- if (bio->bi_css) {
- css_put(bio->bi_css);
- bio->bi_css = NULL;
- }
if (bio->bi_blkg) {
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
}
}
+EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
/**
- * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * __bio_associate_blkg - associate a bio with the a blkg
+ * @bio: target bio
+ * @blkg: the blkg to associate
+ *
+ * This tries to associate @bio with the specified @blkg. Association failure
+ * is handled by walking up the blkg tree. Therefore, the blkg associated can
+ * be anything between @blkg and the root_blkg. This situation only happens
+ * when a cgroup is dying and then the remaining bios will spill to the closest
+ * alive blkg.
+ *
+ * A reference will be taken on the @blkg and will be released when @bio is
+ * freed.
+ */
+static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+{
+ bio_disassociate_blkg(bio);
+
+ bio->bi_blkg = blkg_tryget_closest(blkg);
+}
+
+/**
+ * bio_associate_blkg_from_css - associate a bio with a specified css
+ * @bio: target bio
+ * @css: target css
+ *
+ * Associate @bio with the blkg found by combining the css's blkg and the
+ * request_queue of the @bio. This falls back to the queue's root_blkg if
+ * the association fails with the css.
+ */
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{
+ struct request_queue *q = bio->bi_disk->queue;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ if (!css || !css->parent)
+ blkg = q->root_blkg;
+ else
+ blkg = blkg_lookup_create(css_to_blkcg(css), q);
+
+ __bio_associate_blkg(bio, blkg);
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
+
+#ifdef CONFIG_MEMCG
+/**
+ * bio_associate_blkg_from_page - associate a bio with the page's blkg
+ * @bio: target bio
+ * @page: the page to lookup the blkcg from
+ *
+ * Associate @bio with the blkg from @page's owning memcg and the respective
+ * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's
+ * root_blkg.
+ */
+void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
+{
+ struct cgroup_subsys_state *css;
+
+ if (!page->mem_cgroup)
+ return;
+
+ rcu_read_lock();
+
+ css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+ bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+}
+#endif /* CONFIG_MEMCG */
+
+/**
+ * bio_associate_blkg - associate a bio with a blkg
+ * @bio: target bio
+ *
+ * Associate @bio with the blkg found from the bio's css and request_queue.
+ * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
+ * already associated, the css is reused and association redone as the
+ * request_queue may have changed.
+ */
+void bio_associate_blkg(struct bio *bio)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+
+ if (bio->bi_blkg)
+ css = &bio_blkcg(bio)->css;
+ else
+ css = blkcg_css();
+
+ bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(bio_associate_blkg);
+
+/**
+ * bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_css)
- WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+ rcu_read_lock();
+
+ if (src->bi_blkg)
+ __bio_associate_blkg(dst, src->bi_blkg);
+
+ rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
+EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
@@ -2082,6 +2154,9 @@
bio_slab_nr = 0;
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
GFP_KERNEL);
+
+ BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
+
if (!bio_slabs)
panic("bio: can't allocate bios\n");
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c630e02..1eb8895 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common Block IO controller cgroup interface
*
@@ -28,6 +29,7 @@
#include <linux/ctype.h>
#include <linux/blk-cgroup.h>
#include <linux/tracehook.h>
+#include <linux/psi.h>
#include "blk.h"
#define MAX_KEY_LEN 100
@@ -46,12 +48,14 @@
EXPORT_SYMBOL_GPL(blkcg_root);
struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
+EXPORT_SYMBOL_GPL(blkcg_root_css);
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
-static bool blkcg_debug_stats = false;
+bool blkcg_debug_stats = false;
+static struct workqueue_struct *blkcg_punt_bio_wq;
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
@@ -76,14 +80,60 @@
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
- if (blkg->blkcg != &blkcg_root)
- blk_exit_rl(blkg->q, &blkg->rl);
-
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);
+ percpu_ref_exit(&blkg->refcnt);
kfree(blkg);
}
+static void __blkg_release(struct rcu_head *rcu)
+{
+ struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
+
+ WARN_ON(!bio_list_empty(&blkg->async_bios));
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+
+ wb_congested_put(blkg->wb_congested);
+
+ blkg_free(blkg);
+}
+
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+static void blkg_release(struct percpu_ref *ref)
+{
+ struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
+
+ call_rcu(&blkg->rcu_head, __blkg_release);
+}
+
+static void blkg_async_bio_workfn(struct work_struct *work)
+{
+ struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ async_bio_work);
+ struct bio_list bios = BIO_EMPTY_LIST;
+ struct bio *bio;
+
+ /* as long as there are pending bios, @blkg can't go away */
+ spin_lock_bh(&blkg->async_bio_lock);
+ bio_list_merge(&bios, &blkg->async_bios);
+ bio_list_init(&blkg->async_bios);
+ spin_unlock_bh(&blkg->async_bio_lock);
+
+ while ((bio = bio_list_pop(&bios)))
+ submit_bio(bio);
+}
+
/**
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
@@ -103,21 +153,19 @@
if (!blkg)
return NULL;
+ if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
+ goto err_free;
+
if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
goto err_free;
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
+ spin_lock_init(&blkg->async_bio_lock);
+ bio_list_init(&blkg->async_bios);
+ INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
blkg->blkcg = blkcg;
- atomic_set(&blkg->refcnt, 1);
-
- /* root blkg uses @q->root_rl, init rl only for !root blkgs */
- if (blkcg != &blkcg_root) {
- if (blk_init_rl(&blkg->rl, q, gfp_mask))
- goto err_free;
- blkg->rl.blkg = blkg;
- }
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -127,7 +175,7 @@
continue;
/* alloc per-policy data and attach it to blkg */
- pd = pol->pd_alloc_fn(gfp_mask, q->node);
+ pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
if (!pd)
goto err_free;
@@ -157,7 +205,7 @@
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) {
if (update_hint) {
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg);
}
return blkg;
@@ -180,7 +228,13 @@
int i, ret;
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
+
+ /* request_queue is dying, do not create/recreate a blkg */
+ if (blk_queue_dying(q)) {
+ ret = -ENODEV;
+ goto err_free_blkg;
+ }
/* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) {
@@ -259,7 +313,7 @@
}
/**
- * blkg_lookup_create - lookup blkg, try to create one if not there
+ * __blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
@@ -268,24 +322,16 @@
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock.
*
- * Returns pointer to the looked up or created blkg on success, ERR_PTR()
- * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
- * dead and bypassing, returns ERR_PTR(-EBUSY).
+ * Returns the blkg or the closest blkg if blkg_create() fails as it walks
+ * down from root.
*/
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
-
- /*
- * This could be the first entry point of blkcg implementation and
- * we shouldn't allow anything to go through for a bypassing queue.
- */
- if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
+ lockdep_assert_held(&q->queue_lock);
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
@@ -293,30 +339,64 @@
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
- * non-root blkgs have access to their parents.
+ * non-root blkgs have access to their parents. Returns the closest
+ * blkg to the intended blkg should blkg_create() fail.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent = blkcg_parent(blkcg);
+ struct blkcg_gq *ret_blkg = q->root_blkg;
- while (parent && !__blkg_lookup(parent, q, false)) {
+ while (parent) {
+ blkg = __blkg_lookup(parent, q, false);
+ if (blkg) {
+ /* remember closest blkg */
+ ret_blkg = blkg;
+ break;
+ }
pos = parent;
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, q, NULL);
- if (pos == blkcg || IS_ERR(blkg))
+ if (IS_ERR(blkg))
+ return ret_blkg;
+ if (pos == blkcg)
return blkg;
}
}
+/**
+ * blkg_lookup_create - find or create a blkg
+ * @blkcg: target block cgroup
+ * @q: target request_queue
+ *
+ * This looks up or creates the blkg representing the unique pair
+ * of the blkcg and the request_queue.
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
+
+ if (unlikely(!blkg)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->queue_lock, flags);
+ blkg = __blkg_lookup_create(blkcg, q);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
+ }
+
+ return blkg;
+}
+
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
struct blkcg_gq *parent = blkg->parent;
int i;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
/* Something wrong if we are trying to remove same group twice */
@@ -353,7 +433,7 @@
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- blkg_put(blkg);
+ percpu_ref_kill(&blkg->refcnt);
}
/**
@@ -366,8 +446,7 @@
{
struct blkcg_gq *blkg, *n;
- lockdep_assert_held(q->queue_lock);
-
+ spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
@@ -377,65 +456,7 @@
}
q->root_blkg = NULL;
- q->root_rl.blkg = NULL;
-}
-
-/*
- * A group is RCU protected, but having an rcu lock does not mean that one
- * can access all the fields of blkg and assume these are valid. For
- * example, don't try to follow throtl_data and request queue links.
- *
- * Having a reference to blkg under an rcu allows accesses to only values
- * local to groups like group stats and group rate limits.
- */
-void __blkg_release_rcu(struct rcu_head *rcu_head)
-{
- struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
-
- /* release the blkcg and parent blkg refs this blkg has been holding */
- css_put(&blkg->blkcg->css);
- if (blkg->parent)
- blkg_put(blkg->parent);
-
- wb_congested_put(blkg->wb_congested);
-
- blkg_free(blkg);
-}
-EXPORT_SYMBOL_GPL(__blkg_release_rcu);
-
-/*
- * The next function used by blk_queue_for_each_rl(). It's a bit tricky
- * because the root blkg uses @q->root_rl instead of its own rl.
- */
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q)
-{
- struct list_head *ent;
- struct blkcg_gq *blkg;
-
- /*
- * Determine the current blkg list_head. The first entry is
- * root_rl which is off @q->blkg_list and mapped to the head.
- */
- if (rl == &q->root_rl) {
- ent = &q->blkg_list;
- /* There are no more block groups, hence no request lists */
- if (list_empty(ent))
- return NULL;
- } else {
- blkg = container_of(rl, struct blkcg_gq, rl);
- ent = &blkg->q_node;
- }
-
- /* walk to the next list_head, skip root blkcg */
- ent = ent->next;
- if (ent == &q->root_blkg->q_node)
- ent = ent->next;
- if (ent == &q->blkg_list)
- return NULL;
-
- blkg = container_of(ent, struct blkcg_gq, q_node);
- return &blkg->rl;
+ spin_unlock_irq(&q->queue_lock);
}
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
@@ -477,7 +498,6 @@
return dev_name(blkg->q->backing_dev_info->dev);
return NULL;
}
-EXPORT_SYMBOL_GPL(blkg_dev_name);
/**
* blkcg_print_blkgs - helper for printing per-blkg data
@@ -508,10 +528,10 @@
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- spin_lock_irq(blkg->q->queue_lock);
+ spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
@@ -549,7 +569,7 @@
* Print @rwstat to @sf for the device assocaited with @pd.
*/
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat *rwstat)
+ const struct blkg_rwstat_sample *rwstat)
{
static const char *rwstr[] = {
[BLKG_RWSTAT_READ] = "Read",
@@ -567,31 +587,17 @@
for (i = 0; i < BLKG_RWSTAT_NR; i++)
seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
- (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
+ rwstat->cnt[i]);
- v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
- atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
- seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+ v = rwstat->cnt[BLKG_RWSTAT_READ] +
+ rwstat->cnt[BLKG_RWSTAT_WRITE] +
+ rwstat->cnt[BLKG_RWSTAT_DISCARD];
+ seq_printf(sf, "%s Total %llu\n", dname, v);
return v;
}
EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
/**
- * blkg_prfill_stat - prfill callback for blkg_stat
- * @sf: seq_file to print to
- * @pd: policy private data of interest
- * @off: offset to the blkg_stat in @pd
- *
- * prfill callback for printing a blkg_stat.
- */
-u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
-{
- return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
-}
-EXPORT_SYMBOL_GPL(blkg_prfill_stat);
-
-/**
* blkg_prfill_rwstat - prfill callback for blkg_rwstat
* @sf: seq_file to print to
* @pd: policy private data of interest
@@ -602,8 +608,9 @@
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{
- struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
+ struct blkg_rwstat_sample rwstat = { };
+ blkg_rwstat_read((void *)pd + off, &rwstat);
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
@@ -611,8 +618,9 @@
static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
- struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
+ struct blkg_rwstat_sample rwstat = { };
+ blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
@@ -654,8 +662,9 @@
struct blkg_policy_data *pd,
int off)
{
- struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
- NULL, off);
+ struct blkg_rwstat_sample rwstat;
+
+ blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
@@ -690,52 +699,11 @@
EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
/**
- * blkg_stat_recursive_sum - collect hierarchical blkg_stat
- * @blkg: blkg of interest
- * @pol: blkcg_policy which contains the blkg_stat
- * @off: offset to the blkg_stat in blkg_policy_data or @blkg
- *
- * Collect the blkg_stat specified by @blkg, @pol and @off and all its
- * online descendants and their aux counts. The caller must be holding the
- * queue lock for online tests.
- *
- * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
- * at @off bytes into @blkg's blkg_policy_data of the policy.
- */
-u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
- struct blkcg_policy *pol, int off)
-{
- struct blkcg_gq *pos_blkg;
- struct cgroup_subsys_state *pos_css;
- u64 sum = 0;
-
- lockdep_assert_held(blkg->q->queue_lock);
-
- rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
- struct blkg_stat *stat;
-
- if (!pos_blkg->online)
- continue;
-
- if (pol)
- stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
- else
- stat = (void *)blkg + off;
-
- sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
- }
- rcu_read_unlock();
-
- return sum;
-}
-EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
-
-/**
* blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
* @blkg: blkg of interest
* @pol: blkcg_policy which contains the blkg_rwstat
* @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
+ * @sum: blkg_rwstat_sample structure containing the results
*
* Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
* online descendants and their aux counts. The caller must be holding the
@@ -744,15 +712,14 @@
* If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
* is at @off bytes into @blkg's blkg_policy_data of the policy.
*/
-struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
- struct blkcg_policy *pol, int off)
+void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ int off, struct blkg_rwstat_sample *sum)
{
struct blkcg_gq *pos_blkg;
struct cgroup_subsys_state *pos_css;
- struct blkg_rwstat sum = { };
- int i;
+ unsigned int i;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -767,13 +734,9 @@
rwstat = (void *)pos_blkg + off;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
- percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
- &sum.aux_cnt[i]);
+ sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
}
rcu_read_unlock();
-
- return sum;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
@@ -783,23 +746,53 @@
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP);
-
- /*
- * This could be the first entry point of blkcg implementation and
- * we shouldn't allow anything to go through for a bypassing queue.
- */
- if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
return __blkg_lookup(blkcg, q, true /* update_hint */);
}
/**
* blkg_conf_prep - parse and prepare for per-blkg config update
+ * @inputp: input string pointer
+ *
+ * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
+ * from @input and get and return the matching gendisk. *@inputp is
+ * updated to point past the device node prefix. Returns an ERR_PTR()
+ * value on error.
+ *
+ * Use this function iff blkg_conf_prep() can't be used for some reason.
+ */
+struct gendisk *blkcg_conf_get_disk(char **inputp)
+{
+ char *input = *inputp;
+ unsigned int major, minor;
+ struct gendisk *disk;
+ int key_len, part;
+
+ if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
+ return ERR_PTR(-EINVAL);
+
+ input += key_len;
+ if (!isspace(*input))
+ return ERR_PTR(-EINVAL);
+ input = skip_spaces(input);
+
+ disk = get_gendisk(MKDEV(major, minor), &part);
+ if (!disk)
+ return ERR_PTR(-ENODEV);
+ if (part) {
+ put_disk_and_module(disk);
+ return ERR_PTR(-ENODEV);
+ }
+
+ *inputp = input;
+ return disk;
+}
+
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
* @blkcg: target block cgroup
* @pol: target policy
* @input: input string
@@ -812,35 +805,21 @@
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
- __acquires(rcu) __acquires(disk->queue->queue_lock)
+ __acquires(rcu) __acquires(&disk->queue->queue_lock)
{
struct gendisk *disk;
struct request_queue *q;
struct blkcg_gq *blkg;
- unsigned int major, minor;
- int key_len, part, ret;
- char *body;
+ int ret;
- if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
- return -EINVAL;
-
- body = input + key_len;
- if (!isspace(*body))
- return -EINVAL;
- body = skip_spaces(body);
-
- disk = get_gendisk(MKDEV(major, minor), &part);
- if (!disk)
- return -ENODEV;
- if (part) {
- ret = -ENODEV;
- goto fail;
- }
+ disk = blkcg_conf_get_disk(&input);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
q = disk->queue;
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(blkcg, pol, q);
if (IS_ERR(blkg)) {
@@ -867,7 +846,7 @@
}
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@@ -877,7 +856,7 @@
}
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
@@ -889,7 +868,7 @@
blkg_free(new_blkg);
} else {
blkg = blkg_create(pos, q, new_blkg);
- if (unlikely(IS_ERR(blkg))) {
+ if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
}
@@ -901,11 +880,11 @@
success:
ctx->disk = disk;
ctx->blkg = blkg;
- ctx->body = body;
+ ctx->body = input;
return 0;
fail_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
fail:
put_disk_and_module(disk);
@@ -931,9 +910,9 @@
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(ctx->disk->queue->queue_lock) __releases(rcu)
+ __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
{
- spin_unlock_irq(ctx->disk->queue->queue_lock);
+ spin_unlock_irq(&ctx->disk->queue->queue_lock);
rcu_read_unlock();
put_disk_and_module(ctx->disk);
}
@@ -949,15 +928,20 @@
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
const char *dname;
char *buf;
- struct blkg_rwstat rwstat;
+ struct blkg_rwstat_sample rwstat;
u64 rbytes, wbytes, rios, wios, dbytes, dios;
size_t size = seq_get_buf(sf, &buf), off = 0;
int i;
bool has_stats = false;
+ spin_lock_irq(&blkg->q->queue_lock);
+
+ if (!blkg->online)
+ goto skip;
+
dname = blkg_dev_name(blkg);
if (!dname)
- continue;
+ goto skip;
/*
* Hooray string manipulation, count is the size written NOT
@@ -967,21 +951,17 @@
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
- spin_lock_irq(blkg->q->queue_lock);
+ blkg_rwstat_recursive_sum(blkg, NULL,
+ offsetof(struct blkcg_gq, stat_bytes), &rwstat);
+ rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
+ wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
+ dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
- rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes));
- rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
- wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
- dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
-
- rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
- offsetof(struct blkcg_gq, stat_ios));
- rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
- wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
- dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
-
- spin_unlock_irq(blkg->q->queue_lock);
+ blkg_rwstat_recursive_sum(blkg, NULL,
+ offsetof(struct blkcg_gq, stat_ios), &rwstat);
+ rios = rwstat.cnt[BLKG_RWSTAT_READ];
+ wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
+ dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
if (rbytes || wbytes || rios || wios) {
has_stats = true;
@@ -991,10 +971,7 @@
dbytes, dios);
}
- if (!blkcg_debug_stats)
- goto next;
-
- if (atomic_read(&blkg->use_delay)) {
+ if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
has_stats = true;
off += scnprintf(buf+off, size-off,
" use_delay=%d delay_nsec=%llu",
@@ -1014,11 +991,17 @@
has_stats = true;
off += written;
}
-next:
+
if (has_stats) {
- off += scnprintf(buf+off, size-off, "\n");
- seq_commit(sf, off);
+ if (off < size - 1) {
+ off += scnprintf(buf+off, size-off, "\n");
+ seq_commit(sf, off);
+ } else {
+ seq_commit(sf, -1);
+ }
}
+ skip:
+ spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
@@ -1102,9 +1085,9 @@
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(q->queue_lock)) {
+ if (spin_trylock(&q->queue_lock)) {
blkg_destroy(blkg);
- spin_unlock(q->queue_lock);
+ spin_unlock(&q->queue_lock);
} else {
spin_unlock_irq(&blkcg->lock);
cpu_relax();
@@ -1225,36 +1208,31 @@
/* Make sure the root blkg exists. */
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
- q->root_rl.blkg = blkg;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
ret = blk_iolatency_init(q);
- if (ret) {
- spin_lock_irq(q->queue_lock);
- blkg_destroy_all(q);
- spin_unlock_irq(q->queue_lock);
- return ret;
- }
+ if (ret)
+ goto err_destroy_all;
ret = blk_throtl_init(q);
- if (ret) {
- spin_lock_irq(q->queue_lock);
- blkg_destroy_all(q);
- spin_unlock_irq(q->queue_lock);
- }
- return ret;
+ if (ret)
+ goto err_destroy_all;
+ return 0;
+err_destroy_all:
+ blkg_destroy_all(q);
+ return ret;
err_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
@@ -1269,7 +1247,7 @@
*/
void blkcg_drain_queue(struct request_queue *q)
{
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
@@ -1285,14 +1263,11 @@
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
- * Called from blk_release_queue(). Responsible for exiting blkcg part.
+ * Called from blk_exit_queue(). Responsible for exiting blkcg part.
*/
void blkcg_exit_queue(struct request_queue *q)
{
- spin_lock_irq(q->queue_lock);
blkg_destroy_all(q);
- spin_unlock_irq(q->queue_lock);
-
blk_throtl_exit(q);
}
@@ -1390,60 +1365,90 @@
const struct blkcg_policy *pol)
{
struct blkg_policy_data *pd_prealloc = NULL;
- struct blkcg_gq *blkg;
+ struct blkcg_gq *blkg, *pinned_blkg = NULL;
int ret;
if (blkcg_policy_enabled(q, pol))
return 0;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
- else
- blk_queue_bypass_start(q);
-pd_prealloc:
- if (!pd_prealloc) {
- pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
- if (!pd_prealloc) {
- ret = -ENOMEM;
- goto out_bypass_end;
- }
- }
+retry:
+ spin_lock_irq(&q->queue_lock);
- spin_lock_irq(q->queue_lock);
-
- list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ /* blkg_list is pushed at the head, reverse walk to allocate parents first */
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
if (blkg->pd[pol->plid])
continue;
- pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
- if (!pd)
- swap(pd, pd_prealloc);
+ /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
+ if (blkg == pinned_blkg) {
+ pd = pd_prealloc;
+ pd_prealloc = NULL;
+ } else {
+ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
+ blkg->blkcg);
+ }
+
if (!pd) {
- spin_unlock_irq(q->queue_lock);
- goto pd_prealloc;
+ /*
+ * GFP_NOWAIT failed. Free the existing one and
+ * prealloc for @blkg w/ GFP_KERNEL.
+ */
+ if (pinned_blkg)
+ blkg_put(pinned_blkg);
+ blkg_get(blkg);
+ pinned_blkg = blkg;
+
+ spin_unlock_irq(&q->queue_lock);
+
+ if (pd_prealloc)
+ pol->pd_free_fn(pd_prealloc);
+ pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
+ blkg->blkcg);
+ if (pd_prealloc)
+ goto retry;
+ else
+ goto enomem;
}
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
- if (pol->pd_init_fn)
- pol->pd_init_fn(pd);
}
+ /* all allocated, init in the same order */
+ if (pol->pd_init_fn)
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_init_fn(blkg->pd[pol->plid]);
+
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
- spin_unlock_irq(q->queue_lock);
-out_bypass_end:
- if (q->mq_ops)
+ spin_unlock_irq(&q->queue_lock);
+out:
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
- else
- blk_queue_bypass_end(q);
+ if (pinned_blkg)
+ blkg_put(pinned_blkg);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
+
+enomem:
+ /* alloc failed, nothing's initialized yet, free everything */
+ spin_lock_irq(&q->queue_lock);
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (blkg->pd[pol->plid]) {
+ pol->pd_free_fn(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+ }
+ }
+ spin_unlock_irq(&q->queue_lock);
+ ret = -ENOMEM;
+ goto out;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1463,12 +1468,10 @@
if (!blkcg_policy_enabled(q, pol))
return;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
- else
- blk_queue_bypass_start(q);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols);
@@ -1481,12 +1484,10 @@
}
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
- else
- blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
@@ -1536,7 +1537,8 @@
blkcg->cpd[pol->plid] = cpd;
cpd->blkcg = blkcg;
cpd->plid = pol->plid;
- pol->cpd_init_fn(cpd);
+ if (pol->cpd_init_fn)
+ pol->cpd_init_fn(cpd);
}
}
@@ -1609,6 +1611,25 @@
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
+bool __blkcg_punt_bio_submit(struct bio *bio)
+{
+ struct blkcg_gq *blkg = bio->bi_blkg;
+
+ /* consume the flag first */
+ bio->bi_opf &= ~REQ_CGROUP_PUNT;
+
+ /* never bounce for the root cgroup */
+ if (!blkg->parent)
+ return false;
+
+ spin_lock_bh(&blkg->async_bio_lock);
+ bio_list_add(&blkg->async_bios, bio);
+ spin_unlock_bh(&blkg->async_bio_lock);
+
+ queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
+ return true;
+}
+
/*
* Scale the accumulated delay based on how long it has been since we updated
* the delay. We only call this when we are adding delay, in case it's been a
@@ -1670,6 +1691,7 @@
*/
static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
{
+ unsigned long pflags;
u64 now = ktime_to_ns(ktime_get());
u64 exp;
u64 delay_nsec = 0;
@@ -1696,11 +1718,8 @@
*/
delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
- /*
- * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
- * that hasn't landed upstream yet. Once that stuff is in place we need
- * to do a psi_memstall_enter/leave if memdelay is set.
- */
+ if (use_memdelay)
+ psi_memstall_enter(&pflags);
exp = ktime_add_ns(now, delay_nsec);
tok = io_schedule_prepare();
@@ -1710,6 +1729,9 @@
break;
} while (!fatal_signal_pending(current));
io_schedule_finish(tok);
+
+ if (use_memdelay)
+ psi_memstall_leave(&pflags);
}
/**
@@ -1748,8 +1770,7 @@
blkg = blkg_lookup(blkcg, q);
if (!blkg)
goto out;
- blkg = blkg_try_get(blkg);
- if (!blkg)
+ if (!blkg_tryget(blkg))
goto out;
rcu_read_unlock();
@@ -1761,12 +1782,11 @@
rcu_read_unlock();
blk_put_queue(q);
}
-EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
/**
* blkcg_schedule_throttle - this task needs to check for throttling
- * @q - the request queue IO was submitted on
- * @use_memdelay - do we charge this to memory delay for PSI
+ * @q: the request queue IO was submitted on
+ * @use_memdelay: do we charge this to memory delay for PSI
*
* This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places
@@ -1795,12 +1815,12 @@
current->use_memdelay = use_memdelay;
set_notify_resume(current);
}
-EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
/**
* blkcg_add_delay - add delay to this blkg
- * @now - the current time in nanoseconds
- * @delta - how many nanoseconds of delay to add
+ * @blkg: blkg of interest
+ * @now: the current time in nanoseconds
+ * @delta: how many nanoseconds of delay to add
*
* Charge @delta to the blkg's current delay accumulation. This is used to
* throttle tasks if an IO controller thinks we need more throttling.
@@ -1810,7 +1830,17 @@
blkcg_scale_delay(blkg, now);
atomic64_add(delta, &blkg->delay_nsec);
}
-EXPORT_SYMBOL_GPL(blkcg_add_delay);
+
+static int __init blkcg_init(void)
+{
+ blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
+ WQ_MEM_RECLAIM | WQ_FREEZABLE |
+ WQ_UNBOUND | WQ_SYSFS, 0);
+ if (!blkcg_punt_bio_wq)
+ return -ENOMEM;
+ return 0;
+}
+subsys_initcall(blkcg_init);
module_param(blkcg_debug_stats, bool, 0644);
MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
diff --git a/block/blk-core.c b/block/blk-core.c
index eb8b522..d5e668e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
@@ -33,8 +34,10 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/t10-pi.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
+#include <linux/psi.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -42,6 +45,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
+#include "blk-pm.h"
#include "blk-rq-qos.h"
#ifdef CONFIG_DEBUG_FS
@@ -57,11 +61,6 @@
DEFINE_IDA(blk_queue_ida);
/*
- * For the allocated request tables
- */
-struct kmem_cache *request_cachep;
-
-/*
* For queue allocation
*/
struct kmem_cache *blk_requestq_cachep;
@@ -78,11 +77,7 @@
*/
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_set(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_set);
@@ -93,11 +88,7 @@
*/
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ clear_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_clear);
@@ -111,85 +102,15 @@
*/
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_set(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
+ return test_and_set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
-/**
- * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
- * @flag: flag to be cleared
- * @q: request queue
- *
- * Returns the previous value of @flag - 0 if the flag was not set and 1 if
- * the flag was set.
- */
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
-{
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_clear(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
-
-static void blk_clear_congested(struct request_list *rl, int sync)
-{
-#ifdef CONFIG_CGROUP_WRITEBACK
- clear_wb_congested(rl->blkg->wb_congested, sync);
-#else
- /*
- * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
- * flip its congestion state for events on other blkcgs.
- */
- if (rl == &rl->q->root_rl)
- clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
-#endif
-}
-
-static void blk_set_congested(struct request_list *rl, int sync)
-{
-#ifdef CONFIG_CGROUP_WRITEBACK
- set_wb_congested(rl->blkg->wb_congested, sync);
-#else
- /* see blk_clear_congested() */
- if (rl == &rl->q->root_rl)
- set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
-#endif
-}
-
-void blk_queue_congestion_threshold(struct request_queue *q)
-{
- int nr;
-
- nr = q->nr_requests - (q->nr_requests / 8) + 1;
- if (nr > q->nr_requests)
- nr = q->nr_requests;
- q->nr_congestion_on = nr;
-
- nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
- if (nr < 1)
- nr = 1;
- q->nr_congestion_off = nr;
-}
-
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
- INIT_LIST_HEAD(&rq->timeout_list);
- rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
@@ -198,9 +119,47 @@
rq->internal_tag = -1;
rq->start_time_ns = ktime_get_ns();
rq->part = NULL;
+ refcount_set(&rq->ref, 1);
}
EXPORT_SYMBOL(blk_rq_init);
+#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
+static const char *const blk_op_name[] = {
+ REQ_OP_NAME(READ),
+ REQ_OP_NAME(WRITE),
+ REQ_OP_NAME(FLUSH),
+ REQ_OP_NAME(DISCARD),
+ REQ_OP_NAME(SECURE_ERASE),
+ REQ_OP_NAME(ZONE_RESET),
+ REQ_OP_NAME(ZONE_RESET_ALL),
+ REQ_OP_NAME(WRITE_SAME),
+ REQ_OP_NAME(WRITE_ZEROES),
+ REQ_OP_NAME(SCSI_IN),
+ REQ_OP_NAME(SCSI_OUT),
+ REQ_OP_NAME(DRV_IN),
+ REQ_OP_NAME(DRV_OUT),
+};
+#undef REQ_OP_NAME
+
+/**
+ * blk_op_str - Return string XXX in the REQ_OP_XXX.
+ * @op: REQ_OP_XXX.
+ *
+ * Description: Centralize block layer function to convert REQ_OP_XXX into
+ * string format. Useful in the debugging and tracing bio or request. For
+ * invalid REQ_OP_XXX it returns string "UNKNOWN".
+ */
+inline const char *blk_op_str(unsigned int op)
+{
+ const char *op_str = "UNKNOWN";
+
+ if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
+ op_str = blk_op_name[op];
+
+ return op_str;
+}
+EXPORT_SYMBOL_GPL(blk_op_str);
+
static const struct {
int errno;
const char *name;
@@ -248,17 +207,23 @@
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);
-static void print_req_error(struct request *req, blk_status_t status)
+static void print_req_error(struct request *req, blk_status_t status,
+ const char *caller)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
return;
- printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
- __func__, blk_errors[idx].name, req->rq_disk ?
- req->rq_disk->disk_name : "?",
- (unsigned long long)blk_rq_pos(req));
+ printk_ratelimited(KERN_ERR
+ "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
+ "phys_seg %u prio class %u\n",
+ caller, blk_errors[idx].name,
+ req->rq_disk ? req->rq_disk->disk_name : "?",
+ blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
+ req->cmd_flags & ~REQ_OP_MASK,
+ req->nr_phys_segments,
+ IOPRIO_PRIO_CLASS(req->ioprio));
}
static void req_bio_endio(struct request *rq, struct bio *bio,
@@ -291,99 +256,6 @@
}
EXPORT_SYMBOL(blk_dump_rq_flags);
-static void blk_delay_work(struct work_struct *work)
-{
- struct request_queue *q;
-
- q = container_of(work, struct request_queue, delay_work.work);
- spin_lock_irq(q->queue_lock);
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
-}
-
-/**
- * blk_delay_queue - restart queueing after defined interval
- * @q: The &struct request_queue in question
- * @msecs: Delay in msecs
- *
- * Description:
- * Sometimes queueing needs to be postponed for a little while, to allow
- * resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
- */
-void blk_delay_queue(struct request_queue *q, unsigned long msecs)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (likely(!blk_queue_dead(q)))
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
-}
-EXPORT_SYMBOL(blk_delay_queue);
-
-/**
- * blk_start_queue_async - asynchronously restart a previously stopped queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * blk_start_queue_async() will clear the stop flag on the queue, and
- * ensure that the request_fn for the queue is run from an async
- * context.
- **/
-void blk_start_queue_async(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- blk_run_queue_async(q);
-}
-EXPORT_SYMBOL(blk_start_queue_async);
-
-/**
- * blk_start_queue - restart a previously stopped queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * blk_start_queue() will clear the stop flag on the queue, and call
- * the request_fn for the queue if it was in a stopped state when
- * entered. Also see blk_stop_queue().
- **/
-void blk_start_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
-}
-EXPORT_SYMBOL(blk_start_queue);
-
-/**
- * blk_stop_queue - stop a queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * The Linux block layer assumes that a block driver will consume all
- * entries on the request queue when the request_fn strategy is called.
- * Often this will not happen, because of hardware limitations (queue
- * depth settings). If a device driver gets a 'queue full' response,
- * or if it simply chooses not to queue more I/O at one point, it can
- * call this function to prevent the request_fn from being called until
- * the driver has signalled it's ready to go again. This happens by calling
- * blk_start_queue() to restart queue operations.
- **/
-void blk_stop_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- cancel_delayed_work(&q->delay_work);
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
-}
-EXPORT_SYMBOL(blk_stop_queue);
-
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
@@ -406,133 +278,29 @@
{
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
-
- if (q->mq_ops) {
- struct blk_mq_hw_ctx *hctx;
- int i;
-
- cancel_delayed_work_sync(&q->requeue_work);
- queue_for_each_hw_ctx(q, hctx, i)
- cancel_delayed_work_sync(&hctx->run_work);
- } else {
- cancel_delayed_work_sync(&q->delay_work);
- }
}
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
+ * blk_set_pm_only - increment pm_only counter
* @q: request queue pointer
- *
- * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
- * set and 1 if the flag was already set.
*/
-int blk_set_preempt_only(struct request_queue *q)
+void blk_set_pm_only(struct request_queue *q)
{
- return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
+ atomic_inc(&q->pm_only);
}
-EXPORT_SYMBOL_GPL(blk_set_preempt_only);
+EXPORT_SYMBOL_GPL(blk_set_pm_only);
-void blk_clear_preempt_only(struct request_queue *q)
+void blk_clear_pm_only(struct request_queue *q)
{
- blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
- wake_up_all(&q->mq_freeze_wq);
+ int pm_only;
+
+ pm_only = atomic_dec_return(&q->pm_only);
+ WARN_ON_ONCE(pm_only < 0);
+ if (pm_only == 0)
+ wake_up_all(&q->mq_freeze_wq);
}
-EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
-
-/**
- * __blk_run_queue_uncond - run a queue whether or not it has been stopped
- * @q: The queue to run
- *
- * Description:
- * Invoke request handling on a queue if there are any pending requests.
- * May be used to restart request handling after a request has completed.
- * This variant runs the queue whether or not the queue has been
- * stopped. Must be called with the queue lock held and interrupts
- * disabled. See also @blk_run_queue.
- */
-inline void __blk_run_queue_uncond(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (unlikely(blk_queue_dead(q)))
- return;
-
- /*
- * Some request_fn implementations, e.g. scsi_request_fn(), unlock
- * the queue lock internally. As a result multiple threads may be
- * running such a request function concurrently. Keep track of the
- * number of active request_fn invocations such that blk_drain_queue()
- * can wait until all these request_fn calls have finished.
- */
- q->request_fn_active++;
- q->request_fn(q);
- q->request_fn_active--;
-}
-EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
-
-/**
- * __blk_run_queue - run a single device queue
- * @q: The queue to run
- *
- * Description:
- * See @blk_run_queue.
- */
-void __blk_run_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (unlikely(blk_queue_stopped(q)))
- return;
-
- __blk_run_queue_uncond(q);
-}
-EXPORT_SYMBOL(__blk_run_queue);
-
-/**
- * blk_run_queue_async - run a single device queue in workqueue context
- * @q: The queue to run
- *
- * Description:
- * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
- *
- * Note:
- * Since it is not allowed to run q->delay_work after blk_cleanup_queue()
- * has canceled q->delay_work, callers must hold the queue lock to avoid
- * race conditions between blk_cleanup_queue() and blk_run_queue_async().
- */
-void blk_run_queue_async(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
- mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
-}
-EXPORT_SYMBOL(blk_run_queue_async);
-
-/**
- * blk_run_queue - run a single device queue
- * @q: The queue to run
- *
- * Description:
- * Invoke request handling on this queue, if it has pending work to do.
- * May be used to restart queueing when a request has completed.
- */
-void blk_run_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_run_queue);
+EXPORT_SYMBOL_GPL(blk_clear_pm_only);
void blk_put_queue(struct request_queue *q)
{
@@ -540,150 +308,6 @@
}
EXPORT_SYMBOL(blk_put_queue);
-/**
- * __blk_drain_queue - drain requests from request_queue
- * @q: queue to drain
- * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
- *
- * Drain requests from @q. If @drain_all is set, all requests are drained.
- * If not, only ELVPRIV requests are drained. The caller is responsible
- * for ensuring that no new requests which need to be drained are queued.
- */
-static void __blk_drain_queue(struct request_queue *q, bool drain_all)
- __releases(q->queue_lock)
- __acquires(q->queue_lock)
-{
- int i;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- while (true) {
- bool drain = false;
-
- /*
- * The caller might be trying to drain @q before its
- * elevator is initialized.
- */
- if (q->elevator)
- elv_drain_elevator(q);
-
- blkcg_drain_queue(q);
-
- /*
- * This function might be called on a queue which failed
- * driver init after queue creation or is not yet fully
- * active yet. Some drivers (e.g. fd and loop) get unhappy
- * in such cases. Kick queue iff dispatch queue has
- * something on it and @q has request_fn set.
- */
- if (!list_empty(&q->queue_head) && q->request_fn)
- __blk_run_queue(q);
-
- drain |= q->nr_rqs_elvpriv;
- drain |= q->request_fn_active;
-
- /*
- * Unfortunately, requests are queued at and tracked from
- * multiple places and there's no single counter which can
- * be drained. Check all the queues and counters.
- */
- if (drain_all) {
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
- drain |= !list_empty(&q->queue_head);
- for (i = 0; i < 2; i++) {
- drain |= q->nr_rqs[i];
- drain |= q->in_flight[i];
- if (fq)
- drain |= !list_empty(&fq->flush_queue[i]);
- }
- }
-
- if (!drain)
- break;
-
- spin_unlock_irq(q->queue_lock);
-
- msleep(10);
-
- spin_lock_irq(q->queue_lock);
- }
-
- /*
- * With queue marked dead, any woken up waiter will fail the
- * allocation path, so the wakeup chaining is lost and we're
- * left with hung waiters. We need to wake up those waiters.
- */
- if (q->request_fn) {
- struct request_list *rl;
-
- blk_queue_for_each_rl(rl, q)
- for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
- wake_up_all(&rl->wait[i]);
- }
-}
-
-void blk_drain_queue(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- __blk_drain_queue(q, true);
- spin_unlock_irq(q->queue_lock);
-}
-
-/**
- * blk_queue_bypass_start - enter queue bypass mode
- * @q: queue of interest
- *
- * In bypass mode, only the dispatch FIFO queue of @q is used. This
- * function makes @q enter bypass mode and drains all requests which were
- * throttled or issued before. On return, it's guaranteed that no request
- * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
- * inside queue or RCU read lock.
- */
-void blk_queue_bypass_start(struct request_queue *q)
-{
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irq(q->queue_lock);
- q->bypass_depth++;
- queue_flag_set(QUEUE_FLAG_BYPASS, q);
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Queues start drained. Skip actual draining till init is
- * complete. This avoids lenghty delays during queue init which
- * can happen many times during boot.
- */
- if (blk_queue_init_done(q)) {
- spin_lock_irq(q->queue_lock);
- __blk_drain_queue(q, false);
- spin_unlock_irq(q->queue_lock);
-
- /* ensure blk_queue_bypass() is %true inside RCU read lock */
- synchronize_rcu();
- }
-}
-EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
-
-/**
- * blk_queue_bypass_end - leave queue bypass mode
- * @q: queue of interest
- *
- * Leave bypass mode and restore the normal queueing behavior.
- *
- * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
- * this function is called for both blk-sq and blk-mq queues.
- */
-void blk_queue_bypass_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- if (!--q->bypass_depth)
- queue_flag_clear(QUEUE_FLAG_BYPASS, q);
- WARN_ON_ONCE(q->bypass_depth < 0);
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
-
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
@@ -695,55 +319,14 @@
*/
blk_freeze_queue_start(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_wake_waiters(q);
- else {
- struct request_list *rl;
-
- spin_lock_irq(q->queue_lock);
- blk_queue_for_each_rl(rl, q) {
- if (rl->rq_pool) {
- wake_up_all(&rl->wait[BLK_RW_SYNC]);
- wake_up_all(&rl->wait[BLK_RW_ASYNC]);
- }
- }
- spin_unlock_irq(q->queue_lock);
- }
/* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
-void blk_exit_queue(struct request_queue *q)
-{
- /*
- * Since the I/O scheduler exit code may access cgroup information,
- * perform I/O scheduler exit before disassociating from the block
- * cgroup controller.
- */
- if (q->elevator) {
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
- q->elevator = NULL;
- }
-
- /*
- * Remove all references to @q from the block cgroup controller before
- * restoring @q->queue_lock to avoid that restoring this pointer causes
- * e.g. blkcg_print_blkgs() to crash.
- */
- blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
-}
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -753,51 +336,25 @@
*/
void blk_cleanup_queue(struct request_queue *q)
{
- spinlock_t *lock = q->queue_lock;
-
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
- spin_lock_irq(lock);
- /*
- * A dying queue is permanently in bypass mode till released. Note
- * that, unlike blk_queue_bypass_start(), we aren't performing
- * synchronize_rcu() after entering bypass mode to avoid the delay
- * as some drivers create and destroy a lot of queues while
- * probing. This is still safe because blk_release_queue() will be
- * called only after the queue refcnt drops to zero and nothing,
- * RCU or not, would be traversing the queue by then.
- */
- q->bypass_depth++;
- queue_flag_set(QUEUE_FLAG_BYPASS, q);
-
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
mutex_unlock(&q->sysfs_lock);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
- * prevent that q->request_fn() gets invoked after draining finished.
+ * prevent that blk_mq_run_hw_queues() accesses the hardware queues
+ * after draining finished.
*/
blk_freeze_queue(q);
- spin_lock_irq(lock);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
- spin_unlock_irq(lock);
- /*
- * make sure all in-progress dispatch are completed because
- * blk_freeze_queue() can only complete all requests, and
- * dispatch may still be in-progress since we dispatch requests
- * from more than one contexts.
- *
- * We rely on driver to deal with the race in case that queue
- * initialization isn't done.
- */
- if (q->mq_ops && blk_queue_init_done(q))
- blk_mq_quiesce_queue(q);
+ rq_qos_exit(q);
+
+ blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
@@ -806,106 +363,32 @@
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
+ if (queue_is_mq(q))
+ blk_mq_exit_queue(q);
+
/*
- * I/O scheduler exit is only safe after the sysfs scheduler attribute
- * has been removed.
+ * In theory, request pool of sched_tags belongs to request queue.
+ * However, the current implementation requires tag_set for freeing
+ * requests, so free the pool now.
+ *
+ * Queue has become frozen, there can't be any in-queue requests, so
+ * it is safe to free requests now.
*/
- WARN_ON_ONCE(q->kobj.state_in_sysfs);
+ mutex_lock(&q->sysfs_lock);
+ if (q->elevator)
+ blk_mq_sched_free_requests(q);
+ mutex_unlock(&q->sysfs_lock);
- blk_exit_queue(q);
-
- if (q->mq_ops)
- blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
- spin_lock_irq(lock);
- if (q->queue_lock != &q->__queue_lock)
- q->queue_lock = &q->__queue_lock;
- spin_unlock_irq(lock);
-
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
-/* Allocate memory local to the request queue */
-static void *alloc_request_simple(gfp_t gfp_mask, void *data)
-{
- struct request_queue *q = data;
-
- return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
-}
-
-static void free_request_simple(void *element, void *data)
-{
- kmem_cache_free(request_cachep, element);
-}
-
-static void *alloc_request_size(gfp_t gfp_mask, void *data)
-{
- struct request_queue *q = data;
- struct request *rq;
-
- rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
- q->node);
- if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
- kfree(rq);
- rq = NULL;
- }
- return rq;
-}
-
-static void free_request_size(void *element, void *data)
-{
- struct request_queue *q = data;
-
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, element);
- kfree(element);
-}
-
-int blk_init_rl(struct request_list *rl, struct request_queue *q,
- gfp_t gfp_mask)
-{
- if (unlikely(rl->rq_pool) || q->mq_ops)
- return 0;
-
- rl->q = q;
- rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
- rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
- init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
- init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
-
- if (q->cmd_size) {
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
- alloc_request_size, free_request_size,
- q, gfp_mask, q->node);
- } else {
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
- alloc_request_simple, free_request_simple,
- q, gfp_mask, q->node);
- }
- if (!rl->rq_pool)
- return -ENOMEM;
-
- if (rl != &q->root_rl)
- WARN_ON_ONCE(!blk_get_queue(q));
-
- return 0;
-}
-
-void blk_exit_rl(struct request_queue *q, struct request_list *rl)
-{
- if (rl->rq_pool) {
- mempool_destroy(rl->rq_pool);
- if (rl != &q->root_rl)
- blk_put_queue(q);
- }
-}
-
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
- return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
+ return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);
@@ -916,7 +399,7 @@
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
- const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+ const bool pm = flags & BLK_MQ_REQ_PREEMPT;
while (true) {
bool success = false;
@@ -924,11 +407,11 @@
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
- * The code that sets the PREEMPT_ONLY flag is
- * responsible for ensuring that that flag is globally
- * visible before the queue is unfrozen.
+ * The code that increments the pm_only counter is
+ * responsible for ensuring that that counter is
+ * globally visible before the queue is unfrozen.
*/
- if (preempt || !blk_queue_preempt_only(q)) {
+ if (pm || !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@@ -952,8 +435,9 @@
smp_rmb();
wait_event(q->mq_freeze_wq,
- (atomic_read(&q->mq_freeze_depth) == 0 &&
- (preempt || !blk_queue_preempt_only(q))) ||
+ (!q->mq_freeze_depth &&
+ (pm || (blk_pm_request_resume(q),
+ !blk_queue_pm_only(q)))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -980,21 +464,16 @@
kblockd_schedule_work(&q->timeout_work);
}
+static void blk_timeout_work(struct work_struct *work)
+{
+}
+
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
* @node_id: NUMA node to allocate memory from
- * @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
- * serialize calls to the legacy .request_fn() callback. Ignored for
- * blk-mq request queues.
- *
- * Note: pass the queue lock as the third argument to this function instead of
- * setting the queue lock pointer explicitly to avoid triggering a sporadic
- * crash in the blkcg code. This function namely calls blkcg_init_queue() and
- * the queue lock pointer must be set before blkcg_init_queue() is called.
*/
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
- spinlock_t *lock)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
int ret;
@@ -1004,10 +483,7 @@
if (!q)
return NULL;
- INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
- q->end_sector = 0;
- q->boundary_rq = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
@@ -1025,8 +501,7 @@
if (!q->stats)
goto fail_stats;
- q->backing_dev_info->ra_pages =
- (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
+ q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
@@ -1034,13 +509,11 @@
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
- INIT_WORK(&q->timeout_work, NULL);
- INIT_LIST_HEAD(&q->timeout_list);
+ INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
#endif
- INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -1048,21 +521,11 @@
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
- spin_lock_init(&q->__queue_lock);
-
- if (!q->mq_ops)
- q->queue_lock = lock ? : &q->__queue_lock;
-
- /*
- * A queue starts its life with bypass turned on to avoid
- * unnecessary bypass on/off overhead and nasty surprises during
- * init. The initial bypass will be finished when the queue is
- * registered by blk_register_queue().
- */
- q->bypass_depth = 1;
- queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
+ mutex_init(&q->sysfs_dir_lock);
+ spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
+ mutex_init(&q->mq_freeze_lock);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -1094,105 +557,6 @@
}
EXPORT_SYMBOL(blk_alloc_queue_node);
-/**
- * blk_init_queue - prepare a request queue for use with a block device
- * @rfn: The function to be called to process requests that have been
- * placed on the queue.
- * @lock: Request queue spin lock
- *
- * Description:
- * If a block device wishes to use the standard request handling procedures,
- * which sorts requests and coalesces adjacent requests, then it must
- * call blk_init_queue(). The function @rfn will be called when there
- * are requests on the queue that need to be processed. If the device
- * supports plugging, then @rfn may not be called immediately when requests
- * are available on the queue, but may be called at some time later instead.
- * Plugged queues are generally unplugged when a buffer belonging to one
- * of the requests on the queue is needed, or due to memory pressure.
- *
- * @rfn is not required, or even expected, to remove all requests off the
- * queue, but only as many as it can handle at a time. If it does leave
- * requests on the queue, it is responsible for arranging that the requests
- * get dealt with eventually.
- *
- * The queue spin lock must be held while manipulating the requests on the
- * request queue; this lock will be taken also from interrupt context, so irq
- * disabling is needed for it.
- *
- * Function returns a pointer to the initialized request queue, or %NULL if
- * it didn't succeed.
- *
- * Note:
- * blk_init_queue() must be paired with a blk_cleanup_queue() call
- * when the block device is deactivated (such as at module unload).
- **/
-
-struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
-{
- return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(blk_init_queue);
-
-struct request_queue *
-blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
-{
- struct request_queue *q;
-
- q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock);
- if (!q)
- return NULL;
-
- q->request_fn = rfn;
- if (blk_init_allocated_queue(q) < 0) {
- blk_cleanup_queue(q);
- return NULL;
- }
-
- return q;
-}
-EXPORT_SYMBOL(blk_init_queue_node);
-
-static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
-
-
-int blk_init_allocated_queue(struct request_queue *q)
-{
- WARN_ON_ONCE(q->mq_ops);
-
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
- if (!q->fq)
- return -ENOMEM;
-
- if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
- goto out_free_flush_queue;
-
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
- goto out_exit_flush_rq;
-
- INIT_WORK(&q->timeout_work, blk_timeout_work);
- q->queue_flags |= QUEUE_FLAG_DEFAULT;
-
- /*
- * This also sets hw/phys segments, boundary and size
- */
- blk_queue_make_request(q, blk_queue_bio);
-
- q->sg_reserved_size = INT_MAX;
-
- if (elevator_init(q))
- goto out_exit_flush_rq;
- return 0;
-
-out_exit_flush_rq:
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, q->fq->flush_rq);
-out_free_flush_queue:
- blk_free_flush_queue(q->fq);
- q->fq = NULL;
- return -ENOMEM;
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dying(q))) {
@@ -1204,406 +568,6 @@
}
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(struct request_list *rl, struct request *rq)
-{
- if (rq->rq_flags & RQF_ELVPRIV) {
- elv_put_request(rl->q, rq);
- if (rq->elv.icq)
- put_io_context(rq->elv.icq->ioc);
- }
-
- mempool_free(rq, rl->rq_pool);
-}
-
-/*
- * ioc_batching returns true if the ioc is a valid batching request and
- * should be given priority access to a request.
- */
-static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
-{
- if (!ioc)
- return 0;
-
- /*
- * Make sure the process is able to allocate at least 1 request
- * even if the batch times out, otherwise we could theoretically
- * lose wakeups.
- */
- return ioc->nr_batch_requests == q->nr_batching ||
- (ioc->nr_batch_requests > 0
- && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
-}
-
-/*
- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
- * will cause the process to be a "batcher" on all queues in the system. This
- * is the behaviour we want though - once it gets a wakeup it should be given
- * a nice run.
- */
-static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
-{
- if (!ioc || ioc_batching(q, ioc))
- return;
-
- ioc->nr_batch_requests = q->nr_batching;
- ioc->last_waited = jiffies;
-}
-
-static void __freed_request(struct request_list *rl, int sync)
-{
- struct request_queue *q = rl->q;
-
- if (rl->count[sync] < queue_congestion_off_threshold(q))
- blk_clear_congested(rl, sync);
-
- if (rl->count[sync] + 1 <= q->nr_requests) {
- if (waitqueue_active(&rl->wait[sync]))
- wake_up(&rl->wait[sync]);
-
- blk_clear_rl_full(rl, sync);
- }
-}
-
-/*
- * A request has just been released. Account for it, update the full and
- * congestion status, wake up any waiters. Called under q->queue_lock.
- */
-static void freed_request(struct request_list *rl, bool sync,
- req_flags_t rq_flags)
-{
- struct request_queue *q = rl->q;
-
- q->nr_rqs[sync]--;
- rl->count[sync]--;
- if (rq_flags & RQF_ELVPRIV)
- q->nr_rqs_elvpriv--;
-
- __freed_request(rl, sync);
-
- if (unlikely(rl->starved[sync ^ 1]))
- __freed_request(rl, sync ^ 1);
-}
-
-int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
-{
- struct request_list *rl;
- int on_thresh, off_thresh;
-
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irq(q->queue_lock);
- q->nr_requests = nr;
- blk_queue_congestion_threshold(q);
- on_thresh = queue_congestion_on_threshold(q);
- off_thresh = queue_congestion_off_threshold(q);
-
- blk_queue_for_each_rl(rl, q) {
- if (rl->count[BLK_RW_SYNC] >= on_thresh)
- blk_set_congested(rl, BLK_RW_SYNC);
- else if (rl->count[BLK_RW_SYNC] < off_thresh)
- blk_clear_congested(rl, BLK_RW_SYNC);
-
- if (rl->count[BLK_RW_ASYNC] >= on_thresh)
- blk_set_congested(rl, BLK_RW_ASYNC);
- else if (rl->count[BLK_RW_ASYNC] < off_thresh)
- blk_clear_congested(rl, BLK_RW_ASYNC);
-
- if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
- blk_set_rl_full(rl, BLK_RW_SYNC);
- } else {
- blk_clear_rl_full(rl, BLK_RW_SYNC);
- wake_up(&rl->wait[BLK_RW_SYNC]);
- }
-
- if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
- blk_set_rl_full(rl, BLK_RW_ASYNC);
- } else {
- blk_clear_rl_full(rl, BLK_RW_ASYNC);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
- }
- }
-
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-/**
- * __get_request - get a free request
- * @rl: request list to allocate from
- * @op: operation and flags
- * @bio: bio to allocate request for (can be %NULL)
- * @flags: BLQ_MQ_REQ_* flags
- * @gfp_mask: allocator flags
- *
- * Get a free request from @q. This function may fail under memory
- * pressure or if @q is dead.
- *
- * Must be called with @q->queue_lock held and,
- * Returns ERR_PTR on failure, with @q->queue_lock held.
- * Returns request pointer on success, with @q->queue_lock *not held*.
- */
-static struct request *__get_request(struct request_list *rl, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
-{
- struct request_queue *q = rl->q;
- struct request *rq;
- struct elevator_type *et = q->elevator->type;
- struct io_context *ioc = rq_ioc(bio);
- struct io_cq *icq = NULL;
- const bool is_sync = op_is_sync(op);
- int may_queue;
- req_flags_t rq_flags = RQF_ALLOCED;
-
- lockdep_assert_held(q->queue_lock);
-
- if (unlikely(blk_queue_dying(q)))
- return ERR_PTR(-ENODEV);
-
- may_queue = elv_may_queue(q, op);
- if (may_queue == ELV_MQUEUE_NO)
- goto rq_starved;
-
- if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
- if (rl->count[is_sync]+1 >= q->nr_requests) {
- /*
- * The queue will fill after this allocation, so set
- * it as full, and mark this process as "batching".
- * This process will be allowed to complete a batch of
- * requests, others will be blocked.
- */
- if (!blk_rl_full(rl, is_sync)) {
- ioc_set_batching(q, ioc);
- blk_set_rl_full(rl, is_sync);
- } else {
- if (may_queue != ELV_MQUEUE_MUST
- && !ioc_batching(q, ioc)) {
- /*
- * The queue is full and the allocating
- * process is not a "batcher", and not
- * exempted by the IO scheduler
- */
- return ERR_PTR(-ENOMEM);
- }
- }
- }
- blk_set_congested(rl, is_sync);
- }
-
- /*
- * Only allow batching queuers to allocate up to 50% over the defined
- * limit of requests, otherwise we could have thousands of requests
- * allocated with any setting of ->nr_requests
- */
- if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- return ERR_PTR(-ENOMEM);
-
- q->nr_rqs[is_sync]++;
- rl->count[is_sync]++;
- rl->starved[is_sync] = 0;
-
- /*
- * Decide whether the new request will be managed by elevator. If
- * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
- * prevent the current elevator from being destroyed until the new
- * request is freed. This guarantees icq's won't be destroyed and
- * makes creating new ones safe.
- *
- * Flush requests do not use the elevator so skip initialization.
- * This allows a request to share the flush and elevator data.
- *
- * Also, lookup icq while holding queue_lock. If it doesn't exist,
- * it will be created after releasing queue_lock.
- */
- if (!op_is_flush(op) && !blk_queue_bypass(q)) {
- rq_flags |= RQF_ELVPRIV;
- q->nr_rqs_elvpriv++;
- if (et->icq_cache && ioc)
- icq = ioc_lookup_icq(ioc, q);
- }
-
- if (blk_queue_io_stat(q))
- rq_flags |= RQF_IO_STAT;
- spin_unlock_irq(q->queue_lock);
-
- /* allocate and init request */
- rq = mempool_alloc(rl->rq_pool, gfp_mask);
- if (!rq)
- goto fail_alloc;
-
- blk_rq_init(q, rq);
- blk_rq_set_rl(rq, rl);
- rq->cmd_flags = op;
- rq->rq_flags = rq_flags;
- if (flags & BLK_MQ_REQ_PREEMPT)
- rq->rq_flags |= RQF_PREEMPT;
-
- /* init elvpriv */
- if (rq_flags & RQF_ELVPRIV) {
- if (unlikely(et->icq_cache && !icq)) {
- if (ioc)
- icq = ioc_create_icq(ioc, q, gfp_mask);
- if (!icq)
- goto fail_elvpriv;
- }
-
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
- goto fail_elvpriv;
-
- /* @rq->elv.icq holds io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-out:
- /*
- * ioc may be NULL here, and ioc_batching will be false. That's
- * OK, if the queue is under the request limit then requests need
- * not count toward the nr_batch_requests limit. There will always
- * be some limit enforced by BLK_BATCH_TIME.
- */
- if (ioc_batching(q, ioc))
- ioc->nr_batch_requests--;
-
- trace_block_getrq(q, bio, op);
- return rq;
-
-fail_elvpriv:
- /*
- * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
- * and may fail indefinitely under memory pressure and thus
- * shouldn't stall IO. Treat this request as !elvpriv. This will
- * disturb iosched and blkcg but weird is bettern than dead.
- */
- printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
- __func__, dev_name(q->backing_dev_info->dev));
-
- rq->rq_flags &= ~RQF_ELVPRIV;
- rq->elv.icq = NULL;
-
- spin_lock_irq(q->queue_lock);
- q->nr_rqs_elvpriv--;
- spin_unlock_irq(q->queue_lock);
- goto out;
-
-fail_alloc:
- /*
- * Allocation failed presumably due to memory. Undo anything we
- * might have messed up.
- *
- * Allocating task should really be put onto the front of the wait
- * queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(rl, is_sync, rq_flags);
-
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved so that
- * freeing of a request in the other direction will notice
- * us. another possible fix would be to split the rq mempool into
- * READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
- return ERR_PTR(-ENOMEM);
-}
-
-/**
- * get_request - get a free request
- * @q: request_queue to allocate request from
- * @op: operation and flags
- * @bio: bio to allocate request for (can be %NULL)
- * @flags: BLK_MQ_REQ_* flags.
- * @gfp: allocator flags
- *
- * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
- * this function keeps retrying under memory pressure and fails iff @q is dead.
- *
- * Must be called with @q->queue_lock held and,
- * Returns ERR_PTR on failure, with @q->queue_lock held.
- * Returns request pointer on success, with @q->queue_lock *not held*.
- */
-static struct request *get_request(struct request_queue *q, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
-{
- const bool is_sync = op_is_sync(op);
- DEFINE_WAIT(wait);
- struct request_list *rl;
- struct request *rq;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- rl = blk_get_rl(q, bio); /* transferred to @rq on success */
-retry:
- rq = __get_request(rl, op, bio, flags, gfp);
- if (!IS_ERR(rq))
- return rq;
-
- if (op & REQ_NOWAIT) {
- blk_put_rl(rl);
- return ERR_PTR(-EAGAIN);
- }
-
- if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
- blk_put_rl(rl);
- return rq;
- }
-
- /* wait on @rl and retry */
- prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
- TASK_UNINTERRUPTIBLE);
-
- trace_block_sleeprq(q, bio, op);
-
- spin_unlock_irq(q->queue_lock);
- io_schedule();
-
- /*
- * After sleeping, we become a "batching" process and will be able
- * to allocate at least one request, and up to a big batch of them
- * for a small period time. See ioc_batching, ioc_set_batching
- */
- ioc_set_batching(q, current->io_context);
-
- spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[is_sync], &wait);
-
- goto retry;
-}
-
-/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
-static struct request *blk_old_get_request(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags)
-{
- struct request *rq;
- gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO;
- int ret = 0;
-
- WARN_ON_ONCE(q->mq_ops);
-
- /* create ioc upfront */
- create_io_context(gfp_mask, q->node);
-
- ret = blk_queue_enter(q, flags);
- if (ret)
- return ERR_PTR(ret);
- spin_lock_irq(q->queue_lock);
- rq = get_request(q, op, NULL, flags, gfp_mask);
- if (IS_ERR(rq)) {
- spin_unlock_irq(q->queue_lock);
- blk_queue_exit(q);
- return rq;
- }
-
- /* q->queue_lock is unlocked at this point */
- rq->__data_len = 0;
- rq->__sector = (sector_t) -1;
- rq->bio = rq->biotail = NULL;
- return rq;
-}
-
/**
* blk_get_request - allocate a request
* @q: request queue to allocate a request for
@@ -1618,191 +582,30 @@
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
- if (q->mq_ops) {
- req = blk_mq_alloc_request(q, op, flags);
- if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
- q->mq_ops->initialize_rq_fn(req);
- } else {
- req = blk_old_get_request(q, op, flags);
- if (!IS_ERR(req) && q->initialize_rq_fn)
- q->initialize_rq_fn(req);
- }
+ req = blk_mq_alloc_request(q, op, flags);
+ if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+ q->mq_ops->initialize_rq_fn(req);
return req;
}
EXPORT_SYMBOL(blk_get_request);
-/**
- * blk_requeue_request - put a request back on queue
- * @q: request queue where request should be inserted
- * @rq: request to be inserted
- *
- * Description:
- * Drivers often keep queueing requests until the hardware cannot accept
- * more, when that condition happens we need to put the request back
- * on the queue. Must be called with queue lock held.
- */
-void blk_requeue_request(struct request_queue *q, struct request *rq)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- blk_delete_timer(rq);
- blk_clear_rq_complete(rq);
- trace_block_rq_requeue(q, rq);
- rq_qos_requeue(q, rq);
-
- if (rq->rq_flags & RQF_QUEUED)
- blk_queue_end_tag(q, rq);
-
- BUG_ON(blk_queued_rq(rq));
-
- elv_requeue_request(q, rq);
-}
-EXPORT_SYMBOL(blk_requeue_request);
-
-static void add_acct_request(struct request_queue *q, struct request *rq,
- int where)
-{
- blk_account_io_start(rq, true);
- __elv_add_request(q, rq, where);
-}
-
-static void part_round_stats_single(struct request_queue *q, int cpu,
- struct hd_struct *part, unsigned long now,
- unsigned int inflight)
-{
- if (inflight) {
- __part_stat_add(cpu, part, time_in_queue,
- inflight * (now - part->stamp));
- __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
- }
- part->stamp = now;
-}
-
-/**
- * part_round_stats() - Round off the performance stats on a struct disk_stats.
- * @q: target block queue
- * @cpu: cpu number for stats access
- * @part: target partition
- *
- * The average IO queue length and utilisation statistics are maintained
- * by observing the current state of the queue length and the amount of
- * time it has been in this state for.
- *
- * Normally, that accounting is done on IO completion, but that can result
- * in more than a second's worth of IO being accounted for within any one
- * second, leading to >100% utilisation. To deal with that, we call this
- * function to do a round-off before returning the results when reading
- * /proc/diskstats. This accounts immediately for all queue usage up to
- * the current jiffies and restarts the counters again.
- */
-void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
-{
- struct hd_struct *part2 = NULL;
- unsigned long now = jiffies;
- unsigned int inflight[2];
- int stats = 0;
-
- if (part->stamp != now)
- stats |= 1;
-
- if (part->partno) {
- part2 = &part_to_disk(part)->part0;
- if (part2->stamp != now)
- stats |= 2;
- }
-
- if (!stats)
- return;
-
- part_in_flight(q, part, inflight);
-
- if (stats & 2)
- part_round_stats_single(q, cpu, part2, now, inflight[1]);
- if (stats & 1)
- part_round_stats_single(q, cpu, part, now, inflight[0]);
-}
-EXPORT_SYMBOL_GPL(part_round_stats);
-
-#ifdef CONFIG_PM
-static void blk_pm_put_request(struct request *rq)
-{
- if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
- pm_runtime_mark_last_busy(rq->q->dev);
-}
-#else
-static inline void blk_pm_put_request(struct request *rq) {}
-#endif
-
-void __blk_put_request(struct request_queue *q, struct request *req)
-{
- req_flags_t rq_flags = req->rq_flags;
-
- if (unlikely(!q))
- return;
-
- if (q->mq_ops) {
- blk_mq_free_request(req);
- return;
- }
-
- lockdep_assert_held(q->queue_lock);
-
- blk_req_zone_write_unlock(req);
- blk_pm_put_request(req);
-
- elv_completed_request(q, req);
-
- /* this is a bio leak */
- WARN_ON(req->bio != NULL);
-
- rq_qos_done(q, req);
-
- /*
- * Request may not have originated from ll_rw_blk. if not,
- * it didn't come out of our reserved rq pools
- */
- if (rq_flags & RQF_ALLOCED) {
- struct request_list *rl = blk_rq_rl(req);
- bool sync = op_is_sync(req->cmd_flags);
-
- BUG_ON(!list_empty(&req->queuelist));
- BUG_ON(ELV_ON_HASH(req));
-
- blk_free_request(rl, req);
- freed_request(rl, sync, rq_flags);
- blk_put_rl(rl);
- blk_queue_exit(q);
- }
-}
-EXPORT_SYMBOL_GPL(__blk_put_request);
-
void blk_put_request(struct request *req)
{
- struct request_queue *q = req->q;
-
- if (q->mq_ops)
- blk_mq_free_request(req);
- else {
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_put_request(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ blk_mq_free_request(req);
}
EXPORT_SYMBOL(blk_put_request);
-bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
- struct bio *bio)
+bool bio_attempt_back_merge(struct request *req, struct bio *bio,
+ unsigned int nr_segs)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
- if (!ll_back_merge_fn(q, req, bio))
+ if (!ll_back_merge_fn(req, bio, nr_segs))
return false;
- trace_block_bio_backmerge(q, req, bio);
+ trace_block_bio_backmerge(req->q, req, bio);
+ rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
@@ -1810,21 +613,21 @@
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
- req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
return true;
}
-bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
- struct bio *bio)
+bool bio_attempt_front_merge(struct request *req, struct bio *bio,
+ unsigned int nr_segs)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
- if (!ll_front_merge_fn(q, req, bio))
+ if (!ll_front_merge_fn(req, bio, nr_segs))
return false;
- trace_block_bio_frontmerge(q, req, bio);
+ trace_block_bio_frontmerge(req->q, req, bio);
+ rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
blk_rq_set_mixed_merge(req);
@@ -1834,7 +637,6 @@
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
- req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
return true;
@@ -1851,10 +653,11 @@
blk_rq_get_max_sectors(req, blk_rq_pos(req)))
goto no_merge;
+ rq_qos_merge(q, req, bio);
+
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
- req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
req->nr_phys_segments = segments + 1;
blk_account_io_start(req, false);
@@ -1868,7 +671,7 @@
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
- * @request_count: out parameter for number of traversed plugged requests
+ * @nr_segs: number of segments in @bio
* @same_queue_rq: pointer to &struct request that gets filled in when
* another request associated with @q is found on the plug list
* (optional, may be %NULL)
@@ -1887,35 +690,28 @@
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int *request_count,
- struct request **same_queue_rq)
+ unsigned int nr_segs, struct request **same_queue_rq)
{
struct blk_plug *plug;
struct request *rq;
struct list_head *plug_list;
- plug = current->plug;
+ plug = blk_mq_plug(q, bio);
if (!plug)
return false;
- *request_count = 0;
- if (q->mq_ops)
- plug_list = &plug->mq_list;
- else
- plug_list = &plug->list;
+ plug_list = &plug->mq_list;
list_for_each_entry_reverse(rq, plug_list, queuelist) {
bool merged = false;
- if (rq->q == q) {
- (*request_count)++;
+ if (rq->q == q && same_queue_rq) {
/*
* Only blk-mq multiple hardware queues case checks the
* rq in the same queue, there should be only one such
* rq in a queue
**/
- if (same_queue_rq)
- *same_queue_rq = rq;
+ *same_queue_rq = rq;
}
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
@@ -1923,10 +719,10 @@
switch (blk_try_merge(rq, bio)) {
case ELEVATOR_BACK_MERGE:
- merged = bio_attempt_back_merge(q, rq, bio);
+ merged = bio_attempt_back_merge(rq, bio, nr_segs);
break;
case ELEVATOR_FRONT_MERGE:
- merged = bio_attempt_front_merge(q, rq, bio);
+ merged = bio_attempt_front_merge(rq, bio, nr_segs);
break;
case ELEVATOR_DISCARD_MERGE:
merged = bio_attempt_discard_merge(q, rq, bio);
@@ -1942,176 +738,6 @@
return false;
}
-unsigned int blk_plug_queued_count(struct request_queue *q)
-{
- struct blk_plug *plug;
- struct request *rq;
- struct list_head *plug_list;
- unsigned int ret = 0;
-
- plug = current->plug;
- if (!plug)
- goto out;
-
- if (q->mq_ops)
- plug_list = &plug->mq_list;
- else
- plug_list = &plug->list;
-
- list_for_each_entry(rq, plug_list, queuelist) {
- if (rq->q == q)
- ret++;
- }
-out:
- return ret;
-}
-
-void blk_init_request_from_bio(struct request *req, struct bio *bio)
-{
- struct io_context *ioc = rq_ioc(bio);
-
- if (bio->bi_opf & REQ_RAHEAD)
- req->cmd_flags |= REQ_FAILFAST_MASK;
-
- req->__sector = bio->bi_iter.bi_sector;
- if (ioprio_valid(bio_prio(bio)))
- req->ioprio = bio_prio(bio);
- else if (ioc)
- req->ioprio = ioc->ioprio;
- else
- req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
- req->write_hint = bio->bi_write_hint;
- blk_rq_bio_prep(req->q, req, bio);
-}
-EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
-
-static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
-{
- struct blk_plug *plug;
- int where = ELEVATOR_INSERT_SORT;
- struct request *req, *free;
- unsigned int request_count = 0;
-
- /*
- * low level driver can indicate that it wants pages above a
- * certain limit bounced to low memory (ie for highmem, or even
- * ISA dma in theory)
- */
- blk_queue_bounce(q, &bio);
-
- blk_queue_split(q, &bio);
-
- if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
-
- if (op_is_flush(bio->bi_opf)) {
- spin_lock_irq(q->queue_lock);
- where = ELEVATOR_INSERT_FLUSH;
- goto get_rq;
- }
-
- /*
- * Check if we can merge with the plugged list before grabbing
- * any locks.
- */
- if (!blk_queue_nomerges(q)) {
- if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return BLK_QC_T_NONE;
- } else
- request_count = blk_plug_queued_count(q);
-
- spin_lock_irq(q->queue_lock);
-
- switch (elv_merge(q, &req, bio)) {
- case ELEVATOR_BACK_MERGE:
- if (!bio_attempt_back_merge(q, req, bio))
- break;
- elv_bio_merged(q, req, bio);
- free = attempt_back_merge(q, req);
- if (free)
- __blk_put_request(q, free);
- else
- elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
- goto out_unlock;
- case ELEVATOR_FRONT_MERGE:
- if (!bio_attempt_front_merge(q, req, bio))
- break;
- elv_bio_merged(q, req, bio);
- free = attempt_front_merge(q, req);
- if (free)
- __blk_put_request(q, free);
- else
- elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
- goto out_unlock;
- default:
- break;
- }
-
-get_rq:
- rq_qos_throttle(q, bio, q->queue_lock);
-
- /*
- * Grab a free request. This is might sleep but can not fail.
- * Returns with the queue unlocked.
- */
- blk_queue_enter_live(q);
- req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO);
- if (IS_ERR(req)) {
- blk_queue_exit(q);
- rq_qos_cleanup(q, bio);
- if (PTR_ERR(req) == -ENOMEM)
- bio->bi_status = BLK_STS_RESOURCE;
- else
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- goto out_unlock;
- }
-
- rq_qos_track(q, req, bio);
-
- /*
- * After dropping the lock and possibly sleeping here, our request
- * may now be mergeable after it had proven unmergeable (above).
- * We don't worry about that case for efficiency. It won't happen
- * often, and the elevators are able to handle it.
- */
- blk_init_request_from_bio(req, bio);
-
- if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
- req->cpu = raw_smp_processor_id();
-
- plug = current->plug;
- if (plug) {
- /*
- * If this is the first request added after a plug, fire
- * of a plug trace.
- *
- * @request_count may become stale because of schedule
- * out, so check plug list again.
- */
- if (!request_count || list_empty(&plug->list))
- trace_block_plug(q);
- else {
- struct request *last = list_entry_rq(plug->list.prev);
- if (request_count >= BLK_MAX_REQUEST_COUNT ||
- blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
- blk_flush_plug_list(plug, false);
- trace_block_plug(q);
- }
- }
- list_add_tail(&req->queuelist, &plug->list);
- blk_account_io_start(req, true);
- } else {
- spin_lock_irq(q->queue_lock);
- add_acct_request(q, req, where);
- __blk_run_queue(q);
-out_unlock:
- spin_unlock_irq(q->queue_lock);
- }
-
- return BLK_QC_T_NONE;
-}
-
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
{
char b[BDEVNAME_SIZE];
@@ -2263,7 +889,7 @@
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
goto not_supported;
if (should_fail_bio(bio))
@@ -2293,6 +919,9 @@
}
}
+ if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ bio->bi_opf &= ~REQ_HIPRI;
+
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
if (!blk_queue_discard(q))
@@ -2306,11 +935,14 @@
if (!q->limits.max_write_same_sectors)
goto not_supported;
break;
- case REQ_OP_ZONE_REPORT:
case REQ_OP_ZONE_RESET:
if (!blk_queue_is_zoned(q))
goto not_supported;
break;
+ case REQ_OP_ZONE_RESET_ALL:
+ if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
+ goto not_supported;
+ break;
case REQ_OP_WRITE_ZEROES:
if (!q->limits.max_write_zeroes_sectors)
goto not_supported;
@@ -2381,22 +1013,8 @@
* yet.
*/
struct bio_list bio_list_on_stack[2];
- blk_mq_req_flags_t flags = 0;
- struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (bio_flagged(bio, BIO_QUEUE_ENTERED))
- blk_queue_enter_live(q);
- else if (blk_queue_enter(q, flags) < 0) {
- if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
- bio_wouldblock_error(bio);
- else
- bio_io_error(bio);
- return ret;
- }
-
if (!generic_make_request_checks(bio))
goto out;
@@ -2433,22 +1051,11 @@
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- bool enter_succeeded = true;
+ struct request_queue *q = bio->bi_disk->queue;
+ blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
+ BLK_MQ_REQ_NOWAIT : 0;
- if (unlikely(q != bio->bi_disk->queue)) {
- if (q)
- blk_queue_exit(q);
- q = bio->bi_disk->queue;
- flags = 0;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (blk_queue_enter(q, flags) < 0) {
- enter_succeeded = false;
- q = NULL;
- }
- }
-
- if (enter_succeeded) {
+ if (likely(blk_queue_enter(q, flags) == 0)) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -2456,6 +1063,8 @@
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
+ blk_queue_exit(q);
+
/* sort new bios into those for a lower level
* and those for the same level
*/
@@ -2482,8 +1091,6 @@
current->bio_list = NULL; /* deactivate */
out:
- if (q)
- blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);
@@ -2533,6 +1140,13 @@
*/
blk_qc_t submit_bio(struct bio *bio)
{
+ bool workingset_read = false;
+ unsigned long pflags;
+ blk_qc_t ret;
+
+ if (blkcg_punt_bio_submit(bio))
+ return BLK_QC_T_NONE;
+
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
@@ -2548,6 +1162,8 @@
if (op_is_write(bio_op(bio))) {
count_vm_events(PGPGOUT, count);
} else {
+ if (bio_flagged(bio, BIO_WORKINGSET))
+ workingset_read = true;
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
@@ -2562,21 +1178,24 @@
}
}
- return generic_make_request(bio);
+ /*
+ * If we're reading data that is part of the userspace
+ * workingset, count submission time as memory stall. When the
+ * device is congested, or the submitting cgroup IO-throttled,
+ * submission can be a significant part of overall IO time.
+ */
+ if (workingset_read)
+ psi_memstall_enter(&pflags);
+
+ ret = generic_make_request(bio);
+
+ if (workingset_read)
+ psi_memstall_leave(&pflags);
+
+ return ret;
}
EXPORT_SYMBOL(submit_bio);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie)
-{
- if (!q->poll_fn || !blk_qc_t_valid(cookie))
- return false;
-
- if (current->plug)
- blk_flush_plug_list(current->plug, false);
- return q->poll_fn(q, cookie);
-}
-EXPORT_SYMBOL_GPL(blk_poll);
-
/**
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for new the queue limits
@@ -2598,7 +1217,9 @@
struct request *rq)
{
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
- printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
+ __func__, blk_rq_sectors(rq),
+ blk_queue_get_max_sectors(q, req_op(rq)));
return -EIO;
}
@@ -2608,9 +1229,10 @@
* Recalculate it to check the request correctly on this queue's
* limitation.
*/
- blk_recalc_rq_segments(rq);
+ rq->nr_phys_segments = blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_segments(q)) {
- printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
+ __func__, rq->nr_phys_segments, queue_max_segments(q));
return -EIO;
}
@@ -2624,9 +1246,6 @@
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
- unsigned long flags;
- int where = ELEVATOR_INSERT_BACK;
-
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
@@ -2634,38 +1253,15 @@
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
return BLK_STS_IOERR;
- if (q->mq_ops) {
- if (blk_queue_io_stat(q))
- blk_account_io_start(rq, true);
- /*
- * Since we have a scheduler attached on the top device,
- * bypass a potential scheduler on the bottom device for
- * insert.
- */
- return blk_mq_request_issue_directly(rq);
- }
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dying(q))) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- return BLK_STS_IOERR;
- }
+ if (blk_queue_io_stat(q))
+ blk_account_io_start(rq, true);
/*
- * Submitting request must be dequeued before calling this function
- * because it will be linked to another request_queue
+ * Since we have a scheduler attached on the top device,
+ * bypass a potential scheduler on the bottom device for
+ * insert.
*/
- BUG_ON(blk_queued_rq(rq));
-
- if (op_is_flush(rq->cmd_flags))
- where = ELEVATOR_INSERT_FLUSH;
-
- add_acct_request(q, rq, where);
- if (where == ELEVATOR_INSERT_FLUSH)
- __blk_run_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return BLK_STS_OK;
+ return blk_mq_request_issue_directly(rq, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
@@ -2715,11 +1311,10 @@
if (blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
+ part_stat_add(part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
@@ -2734,14 +1329,14 @@
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
- part_round_stats(req->q, cpu, part);
+ update_io_ticks(part, jiffies);
+ part_stat_inc(part, ios[sgrp]);
+ part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
+ part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
@@ -2749,44 +1344,19 @@
}
}
-#ifdef CONFIG_PM
-/*
- * Don't process normal requests when queue is suspended
- * or in the process of suspending/resuming
- */
-static bool blk_pm_allow_request(struct request *rq)
-{
- switch (rq->q->rpm_status) {
- case RPM_RESUMING:
- case RPM_SUSPENDING:
- return rq->rq_flags & RQF_PM;
- case RPM_SUSPENDED:
- return false;
- default:
- return true;
- }
-}
-#else
-static bool blk_pm_allow_request(struct request *rq)
-{
- return true;
-}
-#endif
-
void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
- int cpu;
if (!blk_do_io_stat(rq))
return;
- cpu = part_stat_lock();
+ part_stat_lock();
if (!new_io) {
part = rq->part;
- part_stat_inc(cpu, part, merges[rw]);
+ part_stat_inc(part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
@@ -2801,230 +1371,15 @@
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
- part_round_stats(rq->q, cpu, part);
part_inc_in_flight(rq->q, part, rw);
rq->part = part;
}
+ update_io_ticks(part, jiffies);
+
part_stat_unlock();
}
-static struct request *elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
-
- WARN_ON_ONCE(q->mq_ops);
-
- while (1) {
- list_for_each_entry(rq, &q->queue_head, queuelist) {
- if (blk_pm_allow_request(rq))
- return rq;
-
- if (rq->rq_flags & RQF_SOFTBARRIER)
- break;
- }
-
- /*
- * Flush request is running and flush request isn't queueable
- * in the drive, we can hold the queue till flush request is
- * finished. Even we don't do this, driver can't dispatch next
- * requests and will requeue them. And this can improve
- * throughput too. For example, we have request flush1, write1,
- * flush 2. flush1 is dispatched, then queue is hold, write1
- * isn't inserted to queue. After flush1 is finished, flush2
- * will be dispatched. Since disk cache is already clean,
- * flush2 will be finished very soon, so looks like flush2 is
- * folded to flush1.
- * Since the queue is hold, a flag is set to indicate the queue
- * should be restarted later. Please see flush_end_io() for
- * details.
- */
- if (fq->flush_pending_idx != fq->flush_running_idx &&
- !queue_flush_queueable(q)) {
- fq->flush_queue_delayed = 1;
- return NULL;
- }
- if (unlikely(blk_queue_bypass(q)) ||
- !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
-/**
- * blk_peek_request - peek at the top of a request queue
- * @q: request queue to peek at
- *
- * Description:
- * Return the request at the top of @q. The returned request
- * should be started using blk_start_request() before LLD starts
- * processing it.
- *
- * Return:
- * Pointer to the request at the top of @q if available. Null
- * otherwise.
- */
-struct request *blk_peek_request(struct request_queue *q)
-{
- struct request *rq;
- int ret;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- while ((rq = elv_next_request(q)) != NULL) {
- if (!(rq->rq_flags & RQF_STARTED)) {
- /*
- * This is the first time the device driver
- * sees this request (possibly after
- * requeueing). Notify IO scheduler.
- */
- if (rq->rq_flags & RQF_SORTED)
- elv_activate_rq(q, rq);
-
- /*
- * just mark as started even if we don't start
- * it, a request that has been delayed should
- * not be passed by new incoming requests
- */
- rq->rq_flags |= RQF_STARTED;
- trace_block_rq_issue(q, rq);
- }
-
- if (!q->boundary_rq || q->boundary_rq == rq) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = NULL;
- }
-
- if (rq->rq_flags & RQF_DONTPREP)
- break;
-
- if (q->dma_drain_size && blk_rq_bytes(rq)) {
- /*
- * make sure space for the drain appears we
- * know we can do this because max_hw_segments
- * has been adjusted to be one fewer than the
- * device can handle
- */
- rq->nr_phys_segments++;
- }
-
- if (!q->prep_rq_fn)
- break;
-
- ret = q->prep_rq_fn(q, rq);
- if (ret == BLKPREP_OK) {
- break;
- } else if (ret == BLKPREP_DEFER) {
- /*
- * the request may have been (partially) prepped.
- * we need to keep this request in the front to
- * avoid resource deadlock. RQF_STARTED will
- * prevent other fs requests from passing this one.
- */
- if (q->dma_drain_size && blk_rq_bytes(rq) &&
- !(rq->rq_flags & RQF_DONTPREP)) {
- /*
- * remove the space for the drain we added
- * so that we don't add it again
- */
- --rq->nr_phys_segments;
- }
-
- rq = NULL;
- break;
- } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
- rq->rq_flags |= RQF_QUIET;
- /*
- * Mark this request as started so we don't trigger
- * any debug logic in the end I/O path.
- */
- blk_start_request(rq);
- __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
- BLK_STS_TARGET : BLK_STS_IOERR);
- } else {
- printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
- break;
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(blk_peek_request);
-
-static void blk_dequeue_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- BUG_ON(list_empty(&rq->queuelist));
- BUG_ON(ELV_ON_HASH(rq));
-
- list_del_init(&rq->queuelist);
-
- /*
- * the time frame between a request being removed from the lists
- * and to it is freed is accounted as io that is in progress at
- * the driver side.
- */
- if (blk_account_rq(rq))
- q->in_flight[rq_is_sync(rq)]++;
-}
-
-/**
- * blk_start_request - start request processing on the driver
- * @req: request to dequeue
- *
- * Description:
- * Dequeue @req and start timeout timer on it. This hands off the
- * request to the driver.
- */
-void blk_start_request(struct request *req)
-{
- lockdep_assert_held(req->q->queue_lock);
- WARN_ON_ONCE(req->q->mq_ops);
-
- blk_dequeue_request(req);
-
- if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
- req->io_start_time_ns = ktime_get_ns();
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- req->throtl_size = blk_rq_sectors(req);
-#endif
- req->rq_flags |= RQF_STATS;
- rq_qos_issue(req->q, req);
- }
-
- BUG_ON(blk_rq_is_complete(req));
- blk_add_timer(req);
-}
-EXPORT_SYMBOL(blk_start_request);
-
-/**
- * blk_fetch_request - fetch a request from a request queue
- * @q: request queue to fetch a request from
- *
- * Description:
- * Return the request at the top of @q. The request is started on
- * return and LLD can start processing it immediately.
- *
- * Return:
- * Pointer to the request at the top of @q if available. Null
- * otherwise.
- */
-struct request *blk_fetch_request(struct request_queue *q)
-{
- struct request *rq;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- rq = blk_peek_request(q);
- if (rq)
- blk_start_request(rq);
- return rq;
-}
-EXPORT_SYMBOL(blk_fetch_request);
-
/*
* Steal bios from a request and add them to a bio list.
* The request must not have been partially completed before.
@@ -3059,7 +1414,7 @@
*
* This special helper function is only for request stacking drivers
* (e.g. request-based dm) so that they can handle partial completion.
- * Actual device drivers should use blk_end_request instead.
+ * Actual device drivers should use blk_mq_end_request instead.
*
* Passing the result of blk_rq_bytes() as @nr_bytes guarantees
* %false return from this function.
@@ -3082,9 +1437,15 @@
if (!req->bio)
return false;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+ error == BLK_STS_OK)
+ req->q->integrity.profile->complete_fn(req, nr_bytes);
+#endif
+
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
- print_req_error(req, error);
+ print_req_error(req, error, __func__);
blk_account_io_completion(req, nr_bytes);
@@ -3143,277 +1504,13 @@
}
/* recalculate the number of segments */
- blk_recalc_rq_segments(req);
+ req->nr_phys_segments = blk_recalc_rq_segments(req);
}
return true;
}
EXPORT_SYMBOL_GPL(blk_update_request);
-static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes,
- unsigned int bidi_bytes)
-{
- if (blk_update_request(rq, error, nr_bytes))
- return true;
-
- /* Bidi request must be completed as a whole */
- if (unlikely(blk_bidi_rq(rq)) &&
- blk_update_request(rq->next_rq, error, bidi_bytes))
- return true;
-
- if (blk_queue_add_random(rq->q))
- add_disk_randomness(rq->rq_disk);
-
- return false;
-}
-
-/**
- * blk_unprep_request - unprepare a request
- * @req: the request
- *
- * This function makes a request ready for complete resubmission (or
- * completion). It happens only after all error handling is complete,
- * so represents the appropriate moment to deallocate any resources
- * that were allocated to the request in the prep_rq_fn. The queue
- * lock is held when calling this.
- */
-void blk_unprep_request(struct request *req)
-{
- struct request_queue *q = req->q;
-
- req->rq_flags &= ~RQF_DONTPREP;
- if (q->unprep_rq_fn)
- q->unprep_rq_fn(q, req);
-}
-EXPORT_SYMBOL_GPL(blk_unprep_request);
-
-void blk_finish_request(struct request *req, blk_status_t error)
-{
- struct request_queue *q = req->q;
- u64 now = ktime_get_ns();
-
- lockdep_assert_held(req->q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (req->rq_flags & RQF_STATS)
- blk_stat_add(req, now);
-
- if (req->rq_flags & RQF_QUEUED)
- blk_queue_end_tag(q, req);
-
- BUG_ON(blk_queued_rq(req));
-
- if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
- laptop_io_completion(req->q->backing_dev_info);
-
- blk_delete_timer(req);
-
- if (req->rq_flags & RQF_DONTPREP)
- blk_unprep_request(req);
-
- blk_account_io_done(req, now);
-
- if (req->end_io) {
- rq_qos_done(q, req);
- req->end_io(req, error);
- } else {
- if (blk_bidi_rq(req))
- __blk_put_request(req->next_rq->q, req->next_rq);
-
- __blk_put_request(q, req);
- }
-}
-EXPORT_SYMBOL(blk_finish_request);
-
-/**
- * blk_end_bidi_request - Complete a bidi request
- * @rq: the request to complete
- * @error: block status code
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
- *
- * Description:
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
- * Drivers that supports bidi can safely call this member for any
- * type of request, bidi or uni. In the later case @bidi_bytes is
- * just ignored.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes, unsigned int bidi_bytes)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- WARN_ON_ONCE(q->mq_ops);
-
- if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
- return true;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_finish_request(rq, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return false;
-}
-
-/**
- * __blk_end_bidi_request - Complete a bidi request with queue lock held
- * @rq: the request to complete
- * @error: block status code
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
- *
- * Description:
- * Identical to blk_end_bidi_request() except that queue lock is
- * assumed to be locked on entry and remains so on return.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes, unsigned int bidi_bytes)
-{
- lockdep_assert_held(rq->q->queue_lock);
- WARN_ON_ONCE(rq->q->mq_ops);
-
- if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
- return true;
-
- blk_finish_request(rq, error);
-
- return false;
-}
-
-/**
- * blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: block status code
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Ends I/O on a number of bytes attached to @rq.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes)
-{
- WARN_ON_ONCE(rq->q->mq_ops);
- return blk_end_bidi_request(rq, error, nr_bytes, 0);
-}
-EXPORT_SYMBOL(blk_end_request);
-
-/**
- * blk_end_request_all - Helper function for drives to finish the request.
- * @rq: the request to finish
- * @error: block status code
- *
- * Description:
- * Completely finish @rq.
- */
-void blk_end_request_all(struct request *rq, blk_status_t error)
-{
- bool pending;
- unsigned int bidi_bytes = 0;
-
- if (unlikely(blk_bidi_rq(rq)))
- bidi_bytes = blk_rq_bytes(rq->next_rq);
-
- pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
- BUG_ON(pending);
-}
-EXPORT_SYMBOL(blk_end_request_all);
-
-/**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: block status code
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-bool __blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes)
-{
- lockdep_assert_held(rq->q->queue_lock);
- WARN_ON_ONCE(rq->q->mq_ops);
-
- return __blk_end_bidi_request(rq, error, nr_bytes, 0);
-}
-EXPORT_SYMBOL(__blk_end_request);
-
-/**
- * __blk_end_request_all - Helper function for drives to finish the request.
- * @rq: the request to finish
- * @error: block status code
- *
- * Description:
- * Completely finish @rq. Must be called with queue lock held.
- */
-void __blk_end_request_all(struct request *rq, blk_status_t error)
-{
- bool pending;
- unsigned int bidi_bytes = 0;
-
- lockdep_assert_held(rq->q->queue_lock);
- WARN_ON_ONCE(rq->q->mq_ops);
-
- if (unlikely(blk_bidi_rq(rq)))
- bidi_bytes = blk_rq_bytes(rq->next_rq);
-
- pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
- BUG_ON(pending);
-}
-EXPORT_SYMBOL(__blk_end_request_all);
-
-/**
- * __blk_end_request_cur - Helper function to finish the current request chunk.
- * @rq: the request to finish the current chunk for
- * @error: block status code
- *
- * Description:
- * Complete the current consecutively mapped chunk from @rq. Must
- * be called with queue lock held.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- */
-bool __blk_end_request_cur(struct request *rq, blk_status_t error)
-{
- return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
-}
-EXPORT_SYMBOL(__blk_end_request_cur);
-
-void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
- struct bio *bio)
-{
- if (bio_has_data(bio))
- rq->nr_phys_segments = bio_phys_segments(q, bio);
- else if (bio_op(bio) == REQ_OP_DISCARD)
- rq->nr_phys_segments = 1;
-
- rq->__data_len = bio->bi_iter.bi_size;
- rq->bio = rq->biotail = bio;
-
- if (bio->bi_disk)
- rq->rq_disk = bio->bi_disk;
-}
-
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
* rq_flush_dcache_pages - Helper function to flush all pages in a request
@@ -3454,8 +1551,8 @@
*/
int blk_lld_busy(struct request_queue *q)
{
- if (q->lld_busy_fn)
- return q->lld_busy_fn(q);
+ if (queue_is_mq(q) && q->mq_ops->busy)
+ return q->mq_ops->busy(q);
return 0;
}
@@ -3486,7 +1583,6 @@
*/
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
- dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
@@ -3579,6 +1675,15 @@
* @plug: The &struct blk_plug that needs to be initialized
*
* Description:
+ * blk_start_plug() indicates to the block layer an intent by the caller
+ * to submit multiple I/O requests in a batch. The block layer may use
+ * this hint to defer submitting I/Os from the caller until blk_finish_plug()
+ * is called. However, the block layer may choose to submit requests
+ * before a call to blk_finish_plug() if the number of queued I/Os
+ * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
+ * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
+ * the task schedules (see below).
+ *
* Tracking blk_plug inside the task_struct will help with auto-flushing the
* pending I/O should the task end up blocking between blk_start_plug() and
* blk_finish_plug(). This is important from a performance perspective, but
@@ -3598,9 +1703,11 @@
if (tsk->plug)
return;
- INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list);
+ plug->rq_count = 0;
+ plug->multiple_queues = false;
+
/*
* Store ordering should not be needed here, since a potential
* preempt will imply a full memory barrier
@@ -3609,36 +1716,6 @@
}
EXPORT_SYMBOL(blk_start_plug);
-static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct request *rqa = container_of(a, struct request, queuelist);
- struct request *rqb = container_of(b, struct request, queuelist);
-
- return !(rqa->q < rqb->q ||
- (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
-}
-
-/*
- * If 'from_schedule' is true, then postpone the dispatch of requests
- * until a safe kblockd context. We due this to avoid accidental big
- * additional stack usage in driver dispatch, in places where the originally
- * plugger did not intend it.
- */
-static void queue_unplugged(struct request_queue *q, unsigned int depth,
- bool from_schedule)
- __releases(q->queue_lock)
-{
- lockdep_assert_held(q->queue_lock);
-
- trace_block_unplug(q, depth, !from_schedule);
-
- if (from_schedule)
- blk_run_queue_async(q);
- else
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
-}
-
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
LIST_HEAD(callbacks);
@@ -3683,67 +1760,22 @@
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
- struct request_queue *q;
- struct request *rq;
- LIST_HEAD(list);
- unsigned int depth;
-
flush_plug_callbacks(plug, from_schedule);
if (!list_empty(&plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
-
- if (list_empty(&plug->list))
- return;
-
- list_splice_init(&plug->list, &list);
-
- list_sort(NULL, &list, plug_rq_cmp);
-
- q = NULL;
- depth = 0;
-
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
- BUG_ON(!rq->q);
- if (rq->q != q) {
- /*
- * This drops the queue lock
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
- q = rq->q;
- depth = 0;
- spin_lock_irq(q->queue_lock);
- }
-
- /*
- * Short-circuit if @q is dead
- */
- if (unlikely(blk_queue_dying(q))) {
- __blk_end_request_all(rq, BLK_STS_IOERR);
- continue;
- }
-
- /*
- * rq is already accounted, so use raw insert
- */
- if (op_is_flush(rq->cmd_flags))
- __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
- else
- __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
-
- depth++;
- }
-
- /*
- * This drops the queue lock
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
}
+/**
+ * blk_finish_plug - mark the end of a batch of submitted I/O
+ * @plug: The &struct blk_plug passed to blk_start_plug()
+ *
+ * Description:
+ * Indicate that a batch of I/O submissions is complete. This function
+ * must be paired with an initial call to blk_start_plug(). The intent
+ * is to allow the block layer to optimize I/O submission. See the
+ * documentation for blk_start_plug() for more information.
+ */
void blk_finish_plug(struct blk_plug *plug)
{
if (plug != current->plug)
@@ -3754,191 +1786,6 @@
}
EXPORT_SYMBOL(blk_finish_plug);
-#ifdef CONFIG_PM
-/**
- * blk_pm_runtime_init - Block layer runtime PM initialization routine
- * @q: the queue of the device
- * @dev: the device the queue belongs to
- *
- * Description:
- * Initialize runtime-PM-related fields for @q and start auto suspend for
- * @dev. Drivers that want to take advantage of request-based runtime PM
- * should call this function after @dev has been initialized, and its
- * request queue @q has been allocated, and runtime PM for it can not happen
- * yet(either due to disabled/forbidden or its usage_count > 0). In most
- * cases, driver should call this function before any I/O has taken place.
- *
- * This function takes care of setting up using auto suspend for the device,
- * the autosuspend delay is set to -1 to make runtime suspend impossible
- * until an updated value is either set by user or by driver. Drivers do
- * not need to touch other autosuspend settings.
- *
- * The block layer runtime PM is request based, so only works for drivers
- * that use request as their IO unit instead of those directly use bio's.
- */
-void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-{
- /* Don't enable runtime PM for blk-mq until it is ready */
- if (q->mq_ops) {
- pm_runtime_disable(dev);
- return;
- }
-
- q->dev = dev;
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_set_autosuspend_delay(q->dev, -1);
- pm_runtime_use_autosuspend(q->dev);
-}
-EXPORT_SYMBOL(blk_pm_runtime_init);
-
-/**
- * blk_pre_runtime_suspend - Pre runtime suspend check
- * @q: the queue of the device
- *
- * Description:
- * This function will check if runtime suspend is allowed for the device
- * by examining if there are any requests pending in the queue. If there
- * are requests pending, the device can not be runtime suspended; otherwise,
- * the queue's status will be updated to SUSPENDING and the driver can
- * proceed to suspend the device.
- *
- * For the not allowed case, we mark last busy for the device so that
- * runtime PM core will try to autosuspend it some time later.
- *
- * This function should be called near the start of the device's
- * runtime_suspend callback.
- *
- * Return:
- * 0 - OK to runtime suspend the device
- * -EBUSY - Device should not be runtime suspended
- */
-int blk_pre_runtime_suspend(struct request_queue *q)
-{
- int ret = 0;
-
- if (!q->dev)
- return ret;
-
- spin_lock_irq(q->queue_lock);
- if (q->nr_pending) {
- ret = -EBUSY;
- pm_runtime_mark_last_busy(q->dev);
- } else {
- q->rpm_status = RPM_SUSPENDING;
- }
- spin_unlock_irq(q->queue_lock);
- return ret;
-}
-EXPORT_SYMBOL(blk_pre_runtime_suspend);
-
-/**
- * blk_post_runtime_suspend - Post runtime suspend processing
- * @q: the queue of the device
- * @err: return value of the device's runtime_suspend function
- *
- * Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime suspend function and mark last busy for the device so
- * that PM core will try to auto suspend the device at a later time.
- *
- * This function should be called near the end of the device's
- * runtime_suspend callback.
- */
-void blk_post_runtime_suspend(struct request_queue *q, int err)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- if (!err) {
- q->rpm_status = RPM_SUSPENDED;
- } else {
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- }
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_post_runtime_suspend);
-
-/**
- * blk_pre_runtime_resume - Pre runtime resume processing
- * @q: the queue of the device
- *
- * Description:
- * Update the queue's runtime status to RESUMING in preparation for the
- * runtime resume of the device.
- *
- * This function should be called near the start of the device's
- * runtime_resume callback.
- */
-void blk_pre_runtime_resume(struct request_queue *q)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- q->rpm_status = RPM_RESUMING;
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_pre_runtime_resume);
-
-/**
- * blk_post_runtime_resume - Post runtime resume processing
- * @q: the queue of the device
- * @err: return value of the device's runtime_resume function
- *
- * Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime_resume function. If it is successfully resumed, process
- * the requests that are queued into the device's queue when it is resuming
- * and then mark last busy and initiate autosuspend for it.
- *
- * This function should be called near the end of the device's
- * runtime_resume callback.
- */
-void blk_post_runtime_resume(struct request_queue *q, int err)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- if (!err) {
- q->rpm_status = RPM_ACTIVE;
- __blk_run_queue(q);
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- } else {
- q->rpm_status = RPM_SUSPENDED;
- }
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_post_runtime_resume);
-
-/**
- * blk_set_runtime_active - Force runtime status of the queue to be active
- * @q: the queue of the device
- *
- * If the device is left runtime suspended during system suspend the resume
- * hook typically resumes the device and corrects runtime status
- * accordingly. However, that does not affect the queue runtime PM status
- * which is still "suspended". This prevents processing requests from the
- * queue.
- *
- * This function can be used in driver's resume hook to correct queue
- * runtime PM status and re-enable peeking requests from the queue. It
- * should be called before first request is added to the queue.
- */
-void blk_set_runtime_active(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_set_runtime_active);
-#endif
-
int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
@@ -3953,9 +1800,6 @@
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
- request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, SLAB_PANIC, NULL);
-
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f7b292f..1db44ca 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to setting various queue properties from drivers
*/
@@ -48,8 +49,6 @@
struct request *rq, int at_head,
rq_end_io_fn *done)
{
- int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
-
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
@@ -60,23 +59,7 @@
* don't check dying flag for MQ because the request won't
* be reused after dying flag is set
*/
- if (q->mq_ops) {
- blk_mq_sched_insert_request(rq, at_head, true, false);
- return;
- }
-
- spin_lock_irq(q->queue_lock);
-
- if (unlikely(blk_queue_dying(q))) {
- rq->rq_flags |= RQF_QUIET;
- __blk_end_request_all(rq, BLK_STS_IOERR);
- spin_unlock_irq(q->queue_lock);
- return;
- }
-
- __elv_add_request(q, rq, where);
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
+ blk_mq_sched_insert_request(rq, at_head, true, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ce41f66..1eec9cb 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions to sequence PREFLUSH and FUA writes.
*
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
*
- * This file is released under the GPLv2.
- *
* REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
* properties and hardware capability.
@@ -93,7 +92,7 @@
FLUSH_PENDING_TIMEOUT = 5 * HZ,
};
-static bool blk_kick_flush(struct request_queue *q,
+static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, unsigned int flags);
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
@@ -132,18 +131,9 @@
rq->end_io = rq->flush.saved_end_io;
}
-static bool blk_flush_queue_rq(struct request *rq, bool add_front)
+static void blk_flush_queue_rq(struct request *rq, bool add_front)
{
- if (rq->q->mq_ops) {
- blk_mq_add_to_requeue_list(rq, add_front, true);
- return false;
- } else {
- if (add_front)
- list_add(&rq->queuelist, &rq->q->queue_head);
- else
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
- return true;
- }
+ blk_mq_add_to_requeue_list(rq, add_front, true);
}
/**
@@ -157,18 +147,17 @@
* completion and trigger the next step.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
+ * spin_lock_irq(fq->mq_flush_lock)
*
* RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise.
*/
-static bool blk_flush_complete_seq(struct request *rq,
+static void blk_flush_complete_seq(struct request *rq,
struct blk_flush_queue *fq,
unsigned int seq, blk_status_t error)
{
struct request_queue *q = rq->q;
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
- bool queued = false, kicked;
unsigned int cmd_flags;
BUG_ON(rq->flush.seq & seq);
@@ -191,7 +180,7 @@
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
- queued = blk_flush_queue_rq(rq, true);
+ blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -204,42 +193,44 @@
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
- if (q->mq_ops)
- blk_mq_end_request(rq, error);
- else
- __blk_end_request_all(rq, error);
+ blk_mq_end_request(rq, error);
break;
default:
BUG();
}
- kicked = blk_kick_flush(q, fq, cmd_flags);
- return kicked | queued;
+ blk_kick_flush(q, fq, cmd_flags);
}
static void flush_end_io(struct request *flush_rq, blk_status_t error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running;
- bool queued = false;
struct request *rq, *n;
unsigned long flags = 0;
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+ struct blk_mq_hw_ctx *hctx;
- if (q->mq_ops) {
- struct blk_mq_hw_ctx *hctx;
+ /* release the tag's ownership to the req cloned from */
+ spin_lock_irqsave(&fq->mq_flush_lock, flags);
- /* release the tag's ownership to the req cloned from */
- spin_lock_irqsave(&fq->mq_flush_lock, flags);
- hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
- if (!q->elevator) {
- blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
- flush_rq->tag = -1;
- } else {
- blk_mq_put_driver_tag_hctx(hctx, flush_rq);
- flush_rq->internal_tag = -1;
- }
+ if (!refcount_dec_and_test(&flush_rq->ref)) {
+ fq->rq_status = error;
+ spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ return;
+ }
+
+ if (fq->rq_status != BLK_STS_OK)
+ error = fq->rq_status;
+
+ hctx = flush_rq->mq_hctx;
+ if (!q->elevator) {
+ blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+ flush_rq->tag = -1;
+ } else {
+ blk_mq_put_driver_tag(flush_rq);
+ flush_rq->internal_tag = -1;
}
running = &fq->flush_queue[fq->flush_running_idx];
@@ -248,35 +239,16 @@
/* account completion of the flush request */
fq->flush_running_idx ^= 1;
- if (!q->mq_ops)
- elv_completed_request(q, flush_rq);
-
/* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) {
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
- queued |= blk_flush_complete_seq(rq, fq, seq, error);
+ blk_flush_complete_seq(rq, fq, seq, error);
}
- /*
- * Kick the queue to avoid stall for two cases:
- * 1. Moving a request silently to empty queue_head may stall the
- * queue.
- * 2. When flush request is running in non-queueable queue, the
- * queue is hold. Restart the queue after flush request is finished
- * to avoid stall.
- * This function is called from request completion path and calling
- * directly into request_fn may confuse the driver. Always use
- * kblockd.
- */
- if (queued || fq->flush_queue_delayed) {
- WARN_ON(q->mq_ops);
- blk_run_queue_async(q);
- }
fq->flush_queue_delayed = 0;
- if (q->mq_ops)
- spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}
/**
@@ -289,12 +261,10 @@
* Please read the comment at the top of this file for more info.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
+ * spin_lock_irq(fq->mq_flush_lock)
*
- * RETURNS:
- * %true if flush was issued, %false otherwise.
*/
-static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
+static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
unsigned int flags)
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
@@ -304,7 +274,7 @@
/* C1 described at the top of this file */
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
- return false;
+ return;
/* C2 and C3
*
@@ -312,11 +282,10 @@
* assigned to empty flushes, and we deadlock if we are expecting
* other requests to make progress. Don't defer for that case.
*/
- if (!list_empty(&fq->flush_data_in_flight) &&
- !(q->mq_ops && q->elevator) &&
+ if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
- return false;
+ return;
/*
* Issue flush and toggle pending_idx. This makes pending_idx
@@ -334,19 +303,15 @@
* In case of IO scheduler, flush rq need to borrow scheduler tag
* just for cheating put/get driver tag.
*/
- if (q->mq_ops) {
- struct blk_mq_hw_ctx *hctx;
+ flush_rq->mq_ctx = first_rq->mq_ctx;
+ flush_rq->mq_hctx = first_rq->mq_hctx;
- flush_rq->mq_ctx = first_rq->mq_ctx;
-
- if (!q->elevator) {
- fq->orig_rq = first_rq;
- flush_rq->tag = first_rq->tag;
- hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
- blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
- } else {
- flush_rq->internal_tag = first_rq->internal_tag;
- }
+ if (!q->elevator) {
+ fq->orig_rq = first_rq;
+ flush_rq->tag = first_rq->tag;
+ blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
+ } else {
+ flush_rq->internal_tag = first_rq->internal_tag;
}
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -355,65 +320,20 @@
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
- return blk_flush_queue_rq(flush_rq, false);
-}
-
-static void flush_data_end_io(struct request *rq, blk_status_t error)
-{
- struct request_queue *q = rq->q;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
-
- lockdep_assert_held(q->queue_lock);
-
- /*
- * Updating q->in_flight[] here for making this tag usable
- * early. Because in blk_queue_start_tag(),
- * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
- * reserve tags for sync I/O.
- *
- * More importantly this way can avoid the following I/O
- * deadlock:
- *
- * - suppose there are 40 fua requests comming to flush queue
- * and queue depth is 31
- * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
- * tag for async I/O any more
- * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
- * and flush_data_end_io() is called
- * - the other rqs still can't go ahead if not updating
- * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
- * are held in flush data queue and make no progress of
- * handling post flush rq
- * - only after the post flush rq is handled, all these rqs
- * can be completed
- */
-
- elv_completed_request(q, rq);
-
- /* for avoiding double accounting */
- rq->rq_flags &= ~RQF_STARTED;
-
- /*
- * After populating an empty queue, kick it to avoid stall. Read
- * the comment in flush_end_io().
- */
- if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
- blk_run_queue_async(q);
+ blk_flush_queue_rq(flush_rq, false);
}
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
{
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_ctx *ctx = rq->mq_ctx;
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- hctx = blk_mq_map_queue(q, ctx->cpu);
-
if (q->elevator) {
WARN_ON(rq->tag < 0);
- blk_mq_put_driver_tag_hctx(hctx, rq);
+ blk_mq_put_driver_tag(rq);
}
/*
@@ -424,7 +344,7 @@
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
- blk_mq_run_hw_queue(hctx, true);
+ blk_mq_sched_restart(hctx);
}
/**
@@ -443,9 +363,6 @@
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
- if (!q->mq_ops)
- lockdep_assert_held(q->queue_lock);
-
/*
* @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver.
@@ -468,10 +385,7 @@
* complete the request.
*/
if (!policy) {
- if (q->mq_ops)
- blk_mq_end_request(rq, 0);
- else
- __blk_end_request(rq, 0, 0);
+ blk_mq_end_request(rq, 0);
return;
}
@@ -484,10 +398,7 @@
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- if (q->mq_ops)
- blk_mq_request_bypass_insert(rq, false);
- else
- list_add_tail(&rq->queuelist, &q->queue_head);
+ blk_mq_request_bypass_insert(rq, false);
return;
}
@@ -499,17 +410,12 @@
INIT_LIST_HEAD(&rq->flush.list);
rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
- if (q->mq_ops) {
- rq->end_io = mq_flush_data_end_io;
- spin_lock_irq(&fq->mq_flush_lock);
- blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
- spin_unlock_irq(&fq->mq_flush_lock);
- return;
- }
- rq->end_io = flush_data_end_io;
+ rq->end_io = mq_flush_data_end_io;
+ spin_lock_irq(&fq->mq_flush_lock);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
+ spin_unlock_irq(&fq->mq_flush_lock);
}
/**
@@ -566,20 +472,19 @@
EXPORT_SYMBOL(blkdev_issue_flush);
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size)
+ int node, int cmd_size, gfp_t flags)
{
struct blk_flush_queue *fq;
int rq_sz = sizeof(struct request);
- fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
+ fq = kzalloc_node(sizeof(*fq), flags, node);
if (!fq)
goto fail;
- if (q->mq_ops)
- spin_lock_init(&fq->mq_flush_lock);
+ spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
- fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
+ fq->flush_rq = kzalloc_node(rq_sz, flags, node);
if (!fq->flush_rq)
goto fail_rq;
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 6121611..ff1070e 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* blk-integrity.c - Block layer data integrity extensions
*
* Copyright (C) 2007, 2008 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/blkdev.h>
@@ -49,12 +35,8 @@
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
- if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
+ if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
-
- if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
- goto new_segment;
-
if (seg_size + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
@@ -95,12 +77,8 @@
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
- if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
+ if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
-
- if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
- goto new_segment;
-
if (sg->length + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
@@ -373,6 +351,7 @@
&integrity_device_entry.attr,
NULL,
};
+ATTRIBUTE_GROUPS(integrity);
static const struct sysfs_ops integrity_ops = {
.show = &integrity_attr_show,
@@ -380,7 +359,7 @@
};
static struct kobj_type integrity_ktype = {
- .default_attrs = integrity_attrs,
+ .default_groups = integrity_groups,
.sysfs_ops = &integrity_ops,
};
@@ -389,10 +368,21 @@
return BLK_STS_OK;
}
+static void blk_integrity_nop_prepare(struct request *rq)
+{
+}
+
+static void blk_integrity_nop_complete(struct request *rq,
+ unsigned int nr_bytes)
+{
+}
+
static const struct blk_integrity_profile nop_profile = {
.name = "nop",
.generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn,
+ .prepare_fn = blk_integrity_nop_prepare,
+ .complete_fn = blk_integrity_nop_complete,
};
/**
@@ -404,7 +394,7 @@
* send/receive integrity metadata it must use this function to register
* the capability with the block layer. The template is a blk_integrity
* struct with values appropriate for the underlying hardware. See
- * Documentation/block/data-integrity.txt.
+ * Documentation/block/data-integrity.rst.
*/
void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 01580f8..5ed59ac 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -28,7 +28,6 @@
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
atomic_long_inc(&ioc->refcount);
}
-EXPORT_SYMBOL(get_io_context);
static void icq_free_icq_rcu(struct rcu_head *head)
{
@@ -48,10 +47,8 @@
if (icq->flags & ICQ_EXITED)
return;
- if (et->uses_mq && et->ops.mq.exit_icq)
- et->ops.mq.exit_icq(icq);
- else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
- et->ops.sq.elevator_exit_icq_fn(icq);
+ if (et->ops.exit_icq)
+ et->ops.exit_icq(icq);
icq->flags |= ICQ_EXITED;
}
@@ -113,9 +110,9 @@
struct io_cq, ioc_node);
struct request_queue *q = icq->q;
- if (spin_trylock(q->queue_lock)) {
+ if (spin_trylock(&q->queue_lock)) {
ioc_destroy_icq(icq);
- spin_unlock(q->queue_lock);
+ spin_unlock(&q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax();
@@ -162,7 +159,6 @@
if (free_ioc)
kmem_cache_free(iocontext_cachep, ioc);
}
-EXPORT_SYMBOL(put_io_context);
/**
* put_io_context_active - put active reference on ioc
@@ -173,7 +169,6 @@
*/
void put_io_context_active(struct io_context *ioc)
{
- struct elevator_type *et;
unsigned long flags;
struct io_cq *icq;
@@ -187,25 +182,12 @@
* reverse double locking. Read comment in ioc_release_fn() for
* explanation on the nested locking annotation.
*/
-retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
- et = icq->q->elevator->type;
- if (et->uses_mq) {
- ioc_exit_icq(icq);
- } else {
- if (spin_trylock(icq->q->queue_lock)) {
- ioc_exit_icq(icq);
- spin_unlock(icq->q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
- goto retry;
- }
- }
+ ioc_exit_icq(icq);
}
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -232,7 +214,7 @@
while (!list_empty(icq_list)) {
struct io_cq *icq = list_entry(icq_list->next,
- struct io_cq, q_node);
+ struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irqsave(&ioc->lock, flags);
@@ -251,16 +233,11 @@
{
LIST_HEAD(icq_list);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
list_splice_init(&q->icq_list, &icq_list);
+ spin_unlock_irq(&q->queue_lock);
- if (q->mq_ops) {
- spin_unlock_irq(q->queue_lock);
- __ioc_clear_queue(&icq_list);
- } else {
- __ioc_clear_queue(&icq_list);
- spin_unlock_irq(q->queue_lock);
- }
+ __ioc_clear_queue(&icq_list);
}
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
@@ -336,7 +313,6 @@
return NULL;
}
-EXPORT_SYMBOL(get_task_io_context);
/**
* ioc_lookup_icq - lookup io_cq from ioc
@@ -350,7 +326,7 @@
{
struct io_cq *icq;
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/*
* icq's are indexed from @ioc using radix tree and hint pointer,
@@ -409,16 +385,14 @@
INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
spin_lock(&ioc->lock);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
- if (et->uses_mq && et->ops.mq.init_icq)
- et->ops.mq.init_icq(icq);
- else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
- et->ops.sq.elevator_init_icq_fn(icq);
+ if (et->ops.init_icq)
+ et->ops.init_icq(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
@@ -427,7 +401,7 @@
}
spin_unlock(&ioc->lock);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
radix_tree_preload_end();
return icq;
}
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
new file mode 100644
index 0000000..e01267f
--- /dev/null
+++ b/block/blk-iocost.c
@@ -0,0 +1,2469 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * IO cost model based controller.
+ *
+ * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
+ * Copyright (C) 2019 Andy Newell <newella@fb.com>
+ * Copyright (C) 2019 Facebook
+ *
+ * One challenge of controlling IO resources is the lack of trivially
+ * observable cost metric. This is distinguished from CPU and memory where
+ * wallclock time and the number of bytes can serve as accurate enough
+ * approximations.
+ *
+ * Bandwidth and iops are the most commonly used metrics for IO devices but
+ * depending on the type and specifics of the device, different IO patterns
+ * easily lead to multiple orders of magnitude variations rendering them
+ * useless for the purpose of IO capacity distribution. While on-device
+ * time, with a lot of clutches, could serve as a useful approximation for
+ * non-queued rotational devices, this is no longer viable with modern
+ * devices, even the rotational ones.
+ *
+ * While there is no cost metric we can trivially observe, it isn't a
+ * complete mystery. For example, on a rotational device, seek cost
+ * dominates while a contiguous transfer contributes a smaller amount
+ * proportional to the size. If we can characterize at least the relative
+ * costs of these different types of IOs, it should be possible to
+ * implement a reasonable work-conserving proportional IO resource
+ * distribution.
+ *
+ * 1. IO Cost Model
+ *
+ * IO cost model estimates the cost of an IO given its basic parameters and
+ * history (e.g. the end sector of the last IO). The cost is measured in
+ * device time. If a given IO is estimated to cost 10ms, the device should
+ * be able to process ~100 of those IOs in a second.
+ *
+ * Currently, there's only one builtin cost model - linear. Each IO is
+ * classified as sequential or random and given a base cost accordingly.
+ * On top of that, a size cost proportional to the length of the IO is
+ * added. While simple, this model captures the operational
+ * characteristics of a wide varienty of devices well enough. Default
+ * paramters for several different classes of devices are provided and the
+ * parameters can be configured from userspace via
+ * /sys/fs/cgroup/io.cost.model.
+ *
+ * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
+ * device-specific coefficients.
+ *
+ * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
+ * device-specific coefficients.
+ *
+ * 2. Control Strategy
+ *
+ * The device virtual time (vtime) is used as the primary control metric.
+ * The control strategy is composed of the following three parts.
+ *
+ * 2-1. Vtime Distribution
+ *
+ * When a cgroup becomes active in terms of IOs, its hierarchical share is
+ * calculated. Please consider the following hierarchy where the numbers
+ * inside parentheses denote the configured weights.
+ *
+ * root
+ * / \
+ * A (w:100) B (w:300)
+ * / \
+ * A0 (w:100) A1 (w:100)
+ *
+ * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
+ * of equal weight, each gets 50% share. If then B starts issuing IOs, B
+ * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
+ * 12.5% each. The distribution mechanism only cares about these flattened
+ * shares. They're called hweights (hierarchical weights) and always add
+ * upto 1 (HWEIGHT_WHOLE).
+ *
+ * A given cgroup's vtime runs slower in inverse proportion to its hweight.
+ * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
+ * against the device vtime - an IO which takes 10ms on the underlying
+ * device is considered to take 80ms on A0.
+ *
+ * This constitutes the basis of IO capacity distribution. Each cgroup's
+ * vtime is running at a rate determined by its hweight. A cgroup tracks
+ * the vtime consumed by past IOs and can issue a new IO iff doing so
+ * wouldn't outrun the current device vtime. Otherwise, the IO is
+ * suspended until the vtime has progressed enough to cover it.
+ *
+ * 2-2. Vrate Adjustment
+ *
+ * It's unrealistic to expect the cost model to be perfect. There are too
+ * many devices and even on the same device the overall performance
+ * fluctuates depending on numerous factors such as IO mixture and device
+ * internal garbage collection. The controller needs to adapt dynamically.
+ *
+ * This is achieved by adjusting the overall IO rate according to how busy
+ * the device is. If the device becomes overloaded, we're sending down too
+ * many IOs and should generally slow down. If there are waiting issuers
+ * but the device isn't saturated, we're issuing too few and should
+ * generally speed up.
+ *
+ * To slow down, we lower the vrate - the rate at which the device vtime
+ * passes compared to the wall clock. For example, if the vtime is running
+ * at the vrate of 75%, all cgroups added up would only be able to issue
+ * 750ms worth of IOs per second, and vice-versa for speeding up.
+ *
+ * Device business is determined using two criteria - rq wait and
+ * completion latencies.
+ *
+ * When a device gets saturated, the on-device and then the request queues
+ * fill up and a bio which is ready to be issued has to wait for a request
+ * to become available. When this delay becomes noticeable, it's a clear
+ * indication that the device is saturated and we lower the vrate. This
+ * saturation signal is fairly conservative as it only triggers when both
+ * hardware and software queues are filled up, and is used as the default
+ * busy signal.
+ *
+ * As devices can have deep queues and be unfair in how the queued commands
+ * are executed, soley depending on rq wait may not result in satisfactory
+ * control quality. For a better control quality, completion latency QoS
+ * parameters can be configured so that the device is considered saturated
+ * if N'th percentile completion latency rises above the set point.
+ *
+ * The completion latency requirements are a function of both the
+ * underlying device characteristics and the desired IO latency quality of
+ * service. There is an inherent trade-off - the tighter the latency QoS,
+ * the higher the bandwidth lossage. Latency QoS is disabled by default
+ * and can be set through /sys/fs/cgroup/io.cost.qos.
+ *
+ * 2-3. Work Conservation
+ *
+ * Imagine two cgroups A and B with equal weights. A is issuing a small IO
+ * periodically while B is sending out enough parallel IOs to saturate the
+ * device on its own. Let's say A's usage amounts to 100ms worth of IO
+ * cost per second, i.e., 10% of the device capacity. The naive
+ * distribution of half and half would lead to 60% utilization of the
+ * device, a significant reduction in the total amount of work done
+ * compared to free-for-all competition. This is too high a cost to pay
+ * for IO control.
+ *
+ * To conserve the total amount of work done, we keep track of how much
+ * each active cgroup is actually using and yield part of its weight if
+ * there are other cgroups which can make use of it. In the above case,
+ * A's weight will be lowered so that it hovers above the actual usage and
+ * B would be able to use the rest.
+ *
+ * As we don't want to penalize a cgroup for donating its weight, the
+ * surplus weight adjustment factors in a margin and has an immediate
+ * snapback mechanism in case the cgroup needs more IO vtime for itself.
+ *
+ * Note that adjusting down surplus weights has the same effects as
+ * accelerating vtime for other cgroups and work conservation can also be
+ * implemented by adjusting vrate dynamically. However, squaring who can
+ * donate and should take back how much requires hweight propagations
+ * anyway making it easier to implement and understand as a separate
+ * mechanism.
+ *
+ * 3. Monitoring
+ *
+ * Instead of debugfs or other clumsy monitoring mechanisms, this
+ * controller uses a drgn based monitoring script -
+ * tools/cgroup/iocost_monitor.py. For details on drgn, please see
+ * https://github.com/osandov/drgn. The ouput looks like the following.
+ *
+ * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
+ * active weight hweight% inflt% dbt delay usages%
+ * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
+ * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
+ *
+ * - per : Timer period
+ * - cur_per : Internal wall and device vtime clock
+ * - vrate : Device virtual time rate against wall clock
+ * - weight : Surplus-adjusted and configured weights
+ * - hweight : Surplus-adjusted and configured hierarchical weights
+ * - inflt : The percentage of in-flight IO cost at the end of last period
+ * - del_ms : Deferred issuer delay induction level and duration
+ * - usages : Usage history
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/time64.h>
+#include <linux/parser.h>
+#include <linux/sched/signal.h>
+#include <linux/blk-cgroup.h>
+#include "blk-rq-qos.h"
+#include "blk-stat.h"
+#include "blk-wbt.h"
+
+#ifdef CONFIG_TRACEPOINTS
+
+/* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
+#define TRACE_IOCG_PATH_LEN 1024
+static DEFINE_SPINLOCK(trace_iocg_path_lock);
+static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
+
+#define TRACE_IOCG_PATH(type, iocg, ...) \
+ do { \
+ unsigned long flags; \
+ if (trace_iocost_##type##_enabled()) { \
+ spin_lock_irqsave(&trace_iocg_path_lock, flags); \
+ cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
+ trace_iocg_path, TRACE_IOCG_PATH_LEN); \
+ trace_iocost_##type(iocg, trace_iocg_path, \
+ ##__VA_ARGS__); \
+ spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
+ } \
+ } while (0)
+
+#else /* CONFIG_TRACE_POINTS */
+#define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
+#endif /* CONFIG_TRACE_POINTS */
+
+enum {
+ MILLION = 1000000,
+
+ /* timer period is calculated from latency requirements, bound it */
+ MIN_PERIOD = USEC_PER_MSEC,
+ MAX_PERIOD = USEC_PER_SEC,
+
+ /*
+ * A cgroup's vtime can run 50% behind the device vtime, which
+ * serves as its IO credit buffer. Surplus weight adjustment is
+ * immediately canceled if the vtime margin runs below 10%.
+ */
+ MARGIN_PCT = 50,
+ INUSE_MARGIN_PCT = 10,
+
+ /* Have some play in waitq timer operations */
+ WAITQ_TIMER_MARGIN_PCT = 5,
+
+ /*
+ * vtime can wrap well within a reasonable uptime when vrate is
+ * consistently raised. Don't trust recorded cgroup vtime if the
+ * period counter indicates that it's older than 5mins.
+ */
+ VTIME_VALID_DUR = 300 * USEC_PER_SEC,
+
+ /*
+ * Remember the past three non-zero usages and use the max for
+ * surplus calculation. Three slots guarantee that we remember one
+ * full period usage from the last active stretch even after
+ * partial deactivation and re-activation periods. Don't start
+ * giving away weight before collecting two data points to prevent
+ * hweight adjustments based on one partial activation period.
+ */
+ NR_USAGE_SLOTS = 3,
+ MIN_VALID_USAGES = 2,
+
+ /* 1/64k is granular enough and can easily be handled w/ u32 */
+ HWEIGHT_WHOLE = 1 << 16,
+
+ /*
+ * As vtime is used to calculate the cost of each IO, it needs to
+ * be fairly high precision. For example, it should be able to
+ * represent the cost of a single page worth of discard with
+ * suffificient accuracy. At the same time, it should be able to
+ * represent reasonably long enough durations to be useful and
+ * convenient during operation.
+ *
+ * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
+ * granularity and days of wrap-around time even at extreme vrates.
+ */
+ VTIME_PER_SEC_SHIFT = 37,
+ VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
+ VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
+
+ /* bound vrate adjustments within two orders of magnitude */
+ VRATE_MIN_PPM = 10000, /* 1% */
+ VRATE_MAX_PPM = 100000000, /* 10000% */
+
+ VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
+ VRATE_CLAMP_ADJ_PCT = 4,
+
+ /* if IOs end up waiting for requests, issue less */
+ RQ_WAIT_BUSY_PCT = 5,
+
+ /* unbusy hysterisis */
+ UNBUSY_THR_PCT = 75,
+
+ /* don't let cmds which take a very long time pin lagging for too long */
+ MAX_LAGGING_PERIODS = 10,
+
+ /*
+ * If usage% * 1.25 + 2% is lower than hweight% by more than 3%,
+ * donate the surplus.
+ */
+ SURPLUS_SCALE_PCT = 125, /* * 125% */
+ SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */
+ SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */
+
+ /* switch iff the conditions are met for longer than this */
+ AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
+
+ /*
+ * Count IO size in 4k pages. The 12bit shift helps keeping
+ * size-proportional components of cost calculation in closer
+ * numbers of digits to per-IO cost components.
+ */
+ IOC_PAGE_SHIFT = 12,
+ IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
+ IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
+
+ /* if apart further than 16M, consider randio for linear model */
+ LCOEF_RANDIO_PAGES = 4096,
+};
+
+enum ioc_running {
+ IOC_IDLE,
+ IOC_RUNNING,
+ IOC_STOP,
+};
+
+/* io.cost.qos controls including per-dev enable of the whole controller */
+enum {
+ QOS_ENABLE,
+ QOS_CTRL,
+ NR_QOS_CTRL_PARAMS,
+};
+
+/* io.cost.qos params */
+enum {
+ QOS_RPPM,
+ QOS_RLAT,
+ QOS_WPPM,
+ QOS_WLAT,
+ QOS_MIN,
+ QOS_MAX,
+ NR_QOS_PARAMS,
+};
+
+/* io.cost.model controls */
+enum {
+ COST_CTRL,
+ COST_MODEL,
+ NR_COST_CTRL_PARAMS,
+};
+
+/* builtin linear cost model coefficients */
+enum {
+ I_LCOEF_RBPS,
+ I_LCOEF_RSEQIOPS,
+ I_LCOEF_RRANDIOPS,
+ I_LCOEF_WBPS,
+ I_LCOEF_WSEQIOPS,
+ I_LCOEF_WRANDIOPS,
+ NR_I_LCOEFS,
+};
+
+enum {
+ LCOEF_RPAGE,
+ LCOEF_RSEQIO,
+ LCOEF_RRANDIO,
+ LCOEF_WPAGE,
+ LCOEF_WSEQIO,
+ LCOEF_WRANDIO,
+ NR_LCOEFS,
+};
+
+enum {
+ AUTOP_INVALID,
+ AUTOP_HDD,
+ AUTOP_SSD_QD1,
+ AUTOP_SSD_DFL,
+ AUTOP_SSD_FAST,
+};
+
+struct ioc_gq;
+
+struct ioc_params {
+ u32 qos[NR_QOS_PARAMS];
+ u64 i_lcoefs[NR_I_LCOEFS];
+ u64 lcoefs[NR_LCOEFS];
+ u32 too_fast_vrate_pct;
+ u32 too_slow_vrate_pct;
+};
+
+struct ioc_missed {
+ u32 nr_met;
+ u32 nr_missed;
+ u32 last_met;
+ u32 last_missed;
+};
+
+struct ioc_pcpu_stat {
+ struct ioc_missed missed[2];
+
+ u64 rq_wait_ns;
+ u64 last_rq_wait_ns;
+};
+
+/* per device */
+struct ioc {
+ struct rq_qos rqos;
+
+ bool enabled;
+
+ struct ioc_params params;
+ u32 period_us;
+ u32 margin_us;
+ u64 vrate_min;
+ u64 vrate_max;
+
+ spinlock_t lock;
+ struct timer_list timer;
+ struct list_head active_iocgs; /* active cgroups */
+ struct ioc_pcpu_stat __percpu *pcpu_stat;
+
+ enum ioc_running running;
+ atomic64_t vtime_rate;
+
+ seqcount_t period_seqcount;
+ u32 period_at; /* wallclock starttime */
+ u64 period_at_vtime; /* vtime starttime */
+
+ atomic64_t cur_period; /* inc'd each period */
+ int busy_level; /* saturation history */
+
+ u64 inuse_margin_vtime;
+ bool weights_updated;
+ atomic_t hweight_gen; /* for lazy hweights */
+
+ u64 autop_too_fast_at;
+ u64 autop_too_slow_at;
+ int autop_idx;
+ bool user_qos_params:1;
+ bool user_cost_model:1;
+};
+
+/* per device-cgroup pair */
+struct ioc_gq {
+ struct blkg_policy_data pd;
+ struct ioc *ioc;
+
+ /*
+ * A iocg can get its weight from two sources - an explicit
+ * per-device-cgroup configuration or the default weight of the
+ * cgroup. `cfg_weight` is the explicit per-device-cgroup
+ * configuration. `weight` is the effective considering both
+ * sources.
+ *
+ * When an idle cgroup becomes active its `active` goes from 0 to
+ * `weight`. `inuse` is the surplus adjusted active weight.
+ * `active` and `inuse` are used to calculate `hweight_active` and
+ * `hweight_inuse`.
+ *
+ * `last_inuse` remembers `inuse` while an iocg is idle to persist
+ * surplus adjustments.
+ */
+ u32 cfg_weight;
+ u32 weight;
+ u32 active;
+ u32 inuse;
+ u32 last_inuse;
+
+ sector_t cursor; /* to detect randio */
+
+ /*
+ * `vtime` is this iocg's vtime cursor which progresses as IOs are
+ * issued. If lagging behind device vtime, the delta represents
+ * the currently available IO budget. If runnning ahead, the
+ * overage.
+ *
+ * `vtime_done` is the same but progressed on completion rather
+ * than issue. The delta behind `vtime` represents the cost of
+ * currently in-flight IOs.
+ *
+ * `last_vtime` is used to remember `vtime` at the end of the last
+ * period to calculate utilization.
+ */
+ atomic64_t vtime;
+ atomic64_t done_vtime;
+ atomic64_t abs_vdebt;
+ u64 last_vtime;
+
+ /*
+ * The period this iocg was last active in. Used for deactivation
+ * and invalidating `vtime`.
+ */
+ atomic64_t active_period;
+ struct list_head active_list;
+
+ /* see __propagate_active_weight() and current_hweight() for details */
+ u64 child_active_sum;
+ u64 child_inuse_sum;
+ int hweight_gen;
+ u32 hweight_active;
+ u32 hweight_inuse;
+ bool has_surplus;
+
+ struct wait_queue_head waitq;
+ struct hrtimer waitq_timer;
+ struct hrtimer delay_timer;
+
+ /* usage is recorded as fractions of HWEIGHT_WHOLE */
+ int usage_idx;
+ u32 usages[NR_USAGE_SLOTS];
+
+ /* this iocg's depth in the hierarchy and ancestors including self */
+ int level;
+ struct ioc_gq *ancestors[];
+};
+
+/* per cgroup */
+struct ioc_cgrp {
+ struct blkcg_policy_data cpd;
+ unsigned int dfl_weight;
+};
+
+struct ioc_now {
+ u64 now_ns;
+ u32 now;
+ u64 vnow;
+ u64 vrate;
+};
+
+struct iocg_wait {
+ struct wait_queue_entry wait;
+ struct bio *bio;
+ u64 abs_cost;
+ bool committed;
+};
+
+struct iocg_wake_ctx {
+ struct ioc_gq *iocg;
+ u32 hw_inuse;
+ s64 vbudget;
+};
+
+static const struct ioc_params autop[] = {
+ [AUTOP_HDD] = {
+ .qos = {
+ [QOS_RLAT] = 250000, /* 250ms */
+ [QOS_WLAT] = 250000,
+ [QOS_MIN] = VRATE_MIN_PPM,
+ [QOS_MAX] = VRATE_MAX_PPM,
+ },
+ .i_lcoefs = {
+ [I_LCOEF_RBPS] = 174019176,
+ [I_LCOEF_RSEQIOPS] = 41708,
+ [I_LCOEF_RRANDIOPS] = 370,
+ [I_LCOEF_WBPS] = 178075866,
+ [I_LCOEF_WSEQIOPS] = 42705,
+ [I_LCOEF_WRANDIOPS] = 378,
+ },
+ },
+ [AUTOP_SSD_QD1] = {
+ .qos = {
+ [QOS_RLAT] = 25000, /* 25ms */
+ [QOS_WLAT] = 25000,
+ [QOS_MIN] = VRATE_MIN_PPM,
+ [QOS_MAX] = VRATE_MAX_PPM,
+ },
+ .i_lcoefs = {
+ [I_LCOEF_RBPS] = 245855193,
+ [I_LCOEF_RSEQIOPS] = 61575,
+ [I_LCOEF_RRANDIOPS] = 6946,
+ [I_LCOEF_WBPS] = 141365009,
+ [I_LCOEF_WSEQIOPS] = 33716,
+ [I_LCOEF_WRANDIOPS] = 26796,
+ },
+ },
+ [AUTOP_SSD_DFL] = {
+ .qos = {
+ [QOS_RLAT] = 25000, /* 25ms */
+ [QOS_WLAT] = 25000,
+ [QOS_MIN] = VRATE_MIN_PPM,
+ [QOS_MAX] = VRATE_MAX_PPM,
+ },
+ .i_lcoefs = {
+ [I_LCOEF_RBPS] = 488636629,
+ [I_LCOEF_RSEQIOPS] = 8932,
+ [I_LCOEF_RRANDIOPS] = 8518,
+ [I_LCOEF_WBPS] = 427891549,
+ [I_LCOEF_WSEQIOPS] = 28755,
+ [I_LCOEF_WRANDIOPS] = 21940,
+ },
+ .too_fast_vrate_pct = 500,
+ },
+ [AUTOP_SSD_FAST] = {
+ .qos = {
+ [QOS_RLAT] = 5000, /* 5ms */
+ [QOS_WLAT] = 5000,
+ [QOS_MIN] = VRATE_MIN_PPM,
+ [QOS_MAX] = VRATE_MAX_PPM,
+ },
+ .i_lcoefs = {
+ [I_LCOEF_RBPS] = 3102524156LLU,
+ [I_LCOEF_RSEQIOPS] = 724816,
+ [I_LCOEF_RRANDIOPS] = 778122,
+ [I_LCOEF_WBPS] = 1742780862LLU,
+ [I_LCOEF_WSEQIOPS] = 425702,
+ [I_LCOEF_WRANDIOPS] = 443193,
+ },
+ .too_slow_vrate_pct = 10,
+ },
+};
+
+/*
+ * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
+ * vtime credit shortage and down on device saturation.
+ */
+static u32 vrate_adj_pct[] =
+ { 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
+
+static struct blkcg_policy blkcg_policy_iocost;
+
+/* accessors and helpers */
+static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
+{
+ return container_of(rqos, struct ioc, rqos);
+}
+
+static struct ioc *q_to_ioc(struct request_queue *q)
+{
+ return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
+}
+
+static const char *q_name(struct request_queue *q)
+{
+ if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ return kobject_name(q->kobj.parent);
+ else
+ return "<unknown>";
+}
+
+static const char __maybe_unused *ioc_name(struct ioc *ioc)
+{
+ return q_name(ioc->rqos.q);
+}
+
+static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
+}
+
+static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
+{
+ return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
+}
+
+static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
+{
+ return pd_to_blkg(&iocg->pd);
+}
+
+static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
+{
+ return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
+ struct ioc_cgrp, cpd);
+}
+
+/*
+ * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
+ * weight, the more expensive each IO. Must round up.
+ */
+static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
+{
+ return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse);
+}
+
+/*
+ * The inverse of abs_cost_to_cost(). Must round up.
+ */
+static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
+{
+ return DIV64_U64_ROUND_UP(cost * hw_inuse, HWEIGHT_WHOLE);
+}
+
+static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost)
+{
+ bio->bi_iocost_cost = cost;
+ atomic64_add(cost, &iocg->vtime);
+}
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/iocost.h>
+
+/* latency Qos params changed, update period_us and all the dependent params */
+static void ioc_refresh_period_us(struct ioc *ioc)
+{
+ u32 ppm, lat, multi, period_us;
+
+ lockdep_assert_held(&ioc->lock);
+
+ /* pick the higher latency target */
+ if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
+ ppm = ioc->params.qos[QOS_RPPM];
+ lat = ioc->params.qos[QOS_RLAT];
+ } else {
+ ppm = ioc->params.qos[QOS_WPPM];
+ lat = ioc->params.qos[QOS_WLAT];
+ }
+
+ /*
+ * We want the period to be long enough to contain a healthy number
+ * of IOs while short enough for granular control. Define it as a
+ * multiple of the latency target. Ideally, the multiplier should
+ * be scaled according to the percentile so that it would nominally
+ * contain a certain number of requests. Let's be simpler and
+ * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
+ */
+ if (ppm)
+ multi = max_t(u32, (MILLION - ppm) / 50000, 2);
+ else
+ multi = 2;
+ period_us = multi * lat;
+ period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
+
+ /* calculate dependent params */
+ ioc->period_us = period_us;
+ ioc->margin_us = period_us * MARGIN_PCT / 100;
+ ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
+ period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100);
+}
+
+static int ioc_autop_idx(struct ioc *ioc)
+{
+ int idx = ioc->autop_idx;
+ const struct ioc_params *p = &autop[idx];
+ u32 vrate_pct;
+ u64 now_ns;
+
+ /* rotational? */
+ if (!blk_queue_nonrot(ioc->rqos.q))
+ return AUTOP_HDD;
+
+ /* handle SATA SSDs w/ broken NCQ */
+ if (blk_queue_depth(ioc->rqos.q) == 1)
+ return AUTOP_SSD_QD1;
+
+ /* use one of the normal ssd sets */
+ if (idx < AUTOP_SSD_DFL)
+ return AUTOP_SSD_DFL;
+
+ /* if user is overriding anything, maintain what was there */
+ if (ioc->user_qos_params || ioc->user_cost_model)
+ return idx;
+
+ /* step up/down based on the vrate */
+ vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100,
+ VTIME_PER_USEC);
+ now_ns = ktime_get_ns();
+
+ if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
+ if (!ioc->autop_too_fast_at)
+ ioc->autop_too_fast_at = now_ns;
+ if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
+ return idx + 1;
+ } else {
+ ioc->autop_too_fast_at = 0;
+ }
+
+ if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
+ if (!ioc->autop_too_slow_at)
+ ioc->autop_too_slow_at = now_ns;
+ if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
+ return idx - 1;
+ } else {
+ ioc->autop_too_slow_at = 0;
+ }
+
+ return idx;
+}
+
+/*
+ * Take the followings as input
+ *
+ * @bps maximum sequential throughput
+ * @seqiops maximum sequential 4k iops
+ * @randiops maximum random 4k iops
+ *
+ * and calculate the linear model cost coefficients.
+ *
+ * *@page per-page cost 1s / (@bps / 4096)
+ * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
+ * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
+ */
+static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
+ u64 *page, u64 *seqio, u64 *randio)
+{
+ u64 v;
+
+ *page = *seqio = *randio = 0;
+
+ if (bps)
+ *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
+ DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
+
+ if (seqiops) {
+ v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
+ if (v > *page)
+ *seqio = v - *page;
+ }
+
+ if (randiops) {
+ v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
+ if (v > *page)
+ *randio = v - *page;
+ }
+}
+
+static void ioc_refresh_lcoefs(struct ioc *ioc)
+{
+ u64 *u = ioc->params.i_lcoefs;
+ u64 *c = ioc->params.lcoefs;
+
+ calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
+ &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
+ calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
+ &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
+}
+
+static bool ioc_refresh_params(struct ioc *ioc, bool force)
+{
+ const struct ioc_params *p;
+ int idx;
+
+ lockdep_assert_held(&ioc->lock);
+
+ idx = ioc_autop_idx(ioc);
+ p = &autop[idx];
+
+ if (idx == ioc->autop_idx && !force)
+ return false;
+
+ if (idx != ioc->autop_idx)
+ atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
+
+ ioc->autop_idx = idx;
+ ioc->autop_too_fast_at = 0;
+ ioc->autop_too_slow_at = 0;
+
+ if (!ioc->user_qos_params)
+ memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
+ if (!ioc->user_cost_model)
+ memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
+
+ ioc_refresh_period_us(ioc);
+ ioc_refresh_lcoefs(ioc);
+
+ ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
+ VTIME_PER_USEC, MILLION);
+ ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
+ VTIME_PER_USEC, MILLION);
+
+ return true;
+}
+
+/* take a snapshot of the current [v]time and vrate */
+static void ioc_now(struct ioc *ioc, struct ioc_now *now)
+{
+ unsigned seq;
+
+ now->now_ns = ktime_get();
+ now->now = ktime_to_us(now->now_ns);
+ now->vrate = atomic64_read(&ioc->vtime_rate);
+
+ /*
+ * The current vtime is
+ *
+ * vtime at period start + (wallclock time since the start) * vrate
+ *
+ * As a consistent snapshot of `period_at_vtime` and `period_at` is
+ * needed, they're seqcount protected.
+ */
+ do {
+ seq = read_seqcount_begin(&ioc->period_seqcount);
+ now->vnow = ioc->period_at_vtime +
+ (now->now - ioc->period_at) * now->vrate;
+ } while (read_seqcount_retry(&ioc->period_seqcount, seq));
+}
+
+static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
+{
+ lockdep_assert_held(&ioc->lock);
+ WARN_ON_ONCE(ioc->running != IOC_RUNNING);
+
+ write_seqcount_begin(&ioc->period_seqcount);
+ ioc->period_at = now->now;
+ ioc->period_at_vtime = now->vnow;
+ write_seqcount_end(&ioc->period_seqcount);
+
+ ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
+ add_timer(&ioc->timer);
+}
+
+/*
+ * Update @iocg's `active` and `inuse` to @active and @inuse, update level
+ * weight sums and propagate upwards accordingly.
+ */
+static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
+{
+ struct ioc *ioc = iocg->ioc;
+ int lvl;
+
+ lockdep_assert_held(&ioc->lock);
+
+ inuse = min(active, inuse);
+
+ for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
+ struct ioc_gq *parent = iocg->ancestors[lvl];
+ struct ioc_gq *child = iocg->ancestors[lvl + 1];
+ u32 parent_active = 0, parent_inuse = 0;
+
+ /* update the level sums */
+ parent->child_active_sum += (s32)(active - child->active);
+ parent->child_inuse_sum += (s32)(inuse - child->inuse);
+ /* apply the udpates */
+ child->active = active;
+ child->inuse = inuse;
+
+ /*
+ * The delta between inuse and active sums indicates that
+ * that much of weight is being given away. Parent's inuse
+ * and active should reflect the ratio.
+ */
+ if (parent->child_active_sum) {
+ parent_active = parent->weight;
+ parent_inuse = DIV64_U64_ROUND_UP(
+ parent_active * parent->child_inuse_sum,
+ parent->child_active_sum);
+ }
+
+ /* do we need to keep walking up? */
+ if (parent_active == parent->active &&
+ parent_inuse == parent->inuse)
+ break;
+
+ active = parent_active;
+ inuse = parent_inuse;
+ }
+
+ ioc->weights_updated = true;
+}
+
+static void commit_active_weights(struct ioc *ioc)
+{
+ lockdep_assert_held(&ioc->lock);
+
+ if (ioc->weights_updated) {
+ /* paired with rmb in current_hweight(), see there */
+ smp_wmb();
+ atomic_inc(&ioc->hweight_gen);
+ ioc->weights_updated = false;
+ }
+}
+
+static void propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
+{
+ __propagate_active_weight(iocg, active, inuse);
+ commit_active_weights(iocg->ioc);
+}
+
+static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
+{
+ struct ioc *ioc = iocg->ioc;
+ int lvl;
+ u32 hwa, hwi;
+ int ioc_gen;
+
+ /* hot path - if uptodate, use cached */
+ ioc_gen = atomic_read(&ioc->hweight_gen);
+ if (ioc_gen == iocg->hweight_gen)
+ goto out;
+
+ /*
+ * Paired with wmb in commit_active_weights(). If we saw the
+ * updated hweight_gen, all the weight updates from
+ * __propagate_active_weight() are visible too.
+ *
+ * We can race with weight updates during calculation and get it
+ * wrong. However, hweight_gen would have changed and a future
+ * reader will recalculate and we're guaranteed to discard the
+ * wrong result soon.
+ */
+ smp_rmb();
+
+ hwa = hwi = HWEIGHT_WHOLE;
+ for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
+ struct ioc_gq *parent = iocg->ancestors[lvl];
+ struct ioc_gq *child = iocg->ancestors[lvl + 1];
+ u32 active_sum = READ_ONCE(parent->child_active_sum);
+ u32 inuse_sum = READ_ONCE(parent->child_inuse_sum);
+ u32 active = READ_ONCE(child->active);
+ u32 inuse = READ_ONCE(child->inuse);
+
+ /* we can race with deactivations and either may read as zero */
+ if (!active_sum || !inuse_sum)
+ continue;
+
+ active_sum = max(active, active_sum);
+ hwa = hwa * active / active_sum; /* max 16bits * 10000 */
+
+ inuse_sum = max(inuse, inuse_sum);
+ hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */
+ }
+
+ iocg->hweight_active = max_t(u32, hwa, 1);
+ iocg->hweight_inuse = max_t(u32, hwi, 1);
+ iocg->hweight_gen = ioc_gen;
+out:
+ if (hw_activep)
+ *hw_activep = iocg->hweight_active;
+ if (hw_inusep)
+ *hw_inusep = iocg->hweight_inuse;
+}
+
+static void weight_updated(struct ioc_gq *iocg)
+{
+ struct ioc *ioc = iocg->ioc;
+ struct blkcg_gq *blkg = iocg_to_blkg(iocg);
+ struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
+ u32 weight;
+
+ lockdep_assert_held(&ioc->lock);
+
+ weight = iocg->cfg_weight ?: iocc->dfl_weight;
+ if (weight != iocg->weight && iocg->active)
+ propagate_active_weight(iocg, weight,
+ DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight));
+ iocg->weight = weight;
+}
+
+static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
+{
+ struct ioc *ioc = iocg->ioc;
+ u64 last_period, cur_period, max_period_delta;
+ u64 vtime, vmargin, vmin;
+ int i;
+
+ /*
+ * If seem to be already active, just update the stamp to tell the
+ * timer that we're still active. We don't mind occassional races.
+ */
+ if (!list_empty(&iocg->active_list)) {
+ ioc_now(ioc, now);
+ cur_period = atomic64_read(&ioc->cur_period);
+ if (atomic64_read(&iocg->active_period) != cur_period)
+ atomic64_set(&iocg->active_period, cur_period);
+ return true;
+ }
+
+ /* racy check on internal node IOs, treat as root level IOs */
+ if (iocg->child_active_sum)
+ return false;
+
+ spin_lock_irq(&ioc->lock);
+
+ ioc_now(ioc, now);
+
+ /* update period */
+ cur_period = atomic64_read(&ioc->cur_period);
+ last_period = atomic64_read(&iocg->active_period);
+ atomic64_set(&iocg->active_period, cur_period);
+
+ /* already activated or breaking leaf-only constraint? */
+ if (!list_empty(&iocg->active_list))
+ goto succeed_unlock;
+ for (i = iocg->level - 1; i > 0; i--)
+ if (!list_empty(&iocg->ancestors[i]->active_list))
+ goto fail_unlock;
+
+ if (iocg->child_active_sum)
+ goto fail_unlock;
+
+ /*
+ * vtime may wrap when vrate is raised substantially due to
+ * underestimated IO costs. Look at the period and ignore its
+ * vtime if the iocg has been idle for too long. Also, cap the
+ * budget it can start with to the margin.
+ */
+ max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us);
+ vtime = atomic64_read(&iocg->vtime);
+ vmargin = ioc->margin_us * now->vrate;
+ vmin = now->vnow - vmargin;
+
+ if (last_period + max_period_delta < cur_period ||
+ time_before64(vtime, vmin)) {
+ atomic64_add(vmin - vtime, &iocg->vtime);
+ atomic64_add(vmin - vtime, &iocg->done_vtime);
+ vtime = vmin;
+ }
+
+ /*
+ * Activate, propagate weight and start period timer if not
+ * running. Reset hweight_gen to avoid accidental match from
+ * wrapping.
+ */
+ iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
+ list_add(&iocg->active_list, &ioc->active_iocgs);
+ propagate_active_weight(iocg, iocg->weight,
+ iocg->last_inuse ?: iocg->weight);
+
+ TRACE_IOCG_PATH(iocg_activate, iocg, now,
+ last_period, cur_period, vtime);
+
+ iocg->last_vtime = vtime;
+
+ if (ioc->running == IOC_IDLE) {
+ ioc->running = IOC_RUNNING;
+ ioc_start_period(ioc, now);
+ }
+
+succeed_unlock:
+ spin_unlock_irq(&ioc->lock);
+ return true;
+
+fail_unlock:
+ spin_unlock_irq(&ioc->lock);
+ return false;
+}
+
+static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
+ int flags, void *key)
+{
+ struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
+ struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
+ u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
+
+ ctx->vbudget -= cost;
+
+ if (ctx->vbudget < 0)
+ return -1;
+
+ iocg_commit_bio(ctx->iocg, wait->bio, cost);
+
+ /*
+ * autoremove_wake_function() removes the wait entry only when it
+ * actually changed the task state. We want the wait always
+ * removed. Remove explicitly and use default_wake_function().
+ */
+ list_del_init(&wq_entry->entry);
+ wait->committed = true;
+
+ default_wake_function(wq_entry, mode, flags, key);
+ return 0;
+}
+
+static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
+{
+ struct ioc *ioc = iocg->ioc;
+ struct iocg_wake_ctx ctx = { .iocg = iocg };
+ u64 margin_ns = (u64)(ioc->period_us *
+ WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
+ u64 abs_vdebt, vdebt, vshortage, expires, oexpires;
+ s64 vbudget;
+ u32 hw_inuse;
+
+ lockdep_assert_held(&iocg->waitq.lock);
+
+ current_hweight(iocg, NULL, &hw_inuse);
+ vbudget = now->vnow - atomic64_read(&iocg->vtime);
+
+ /* pay off debt */
+ abs_vdebt = atomic64_read(&iocg->abs_vdebt);
+ vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse);
+ if (vdebt && vbudget > 0) {
+ u64 delta = min_t(u64, vbudget, vdebt);
+ u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
+ abs_vdebt);
+
+ atomic64_add(delta, &iocg->vtime);
+ atomic64_add(delta, &iocg->done_vtime);
+ atomic64_sub(abs_delta, &iocg->abs_vdebt);
+ if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0))
+ atomic64_set(&iocg->abs_vdebt, 0);
+ }
+
+ /*
+ * Wake up the ones which are due and see how much vtime we'll need
+ * for the next one.
+ */
+ ctx.hw_inuse = hw_inuse;
+ ctx.vbudget = vbudget - vdebt;
+ __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
+ if (!waitqueue_active(&iocg->waitq))
+ return;
+ if (WARN_ON_ONCE(ctx.vbudget >= 0))
+ return;
+
+ /* determine next wakeup, add a quarter margin to guarantee chunking */
+ vshortage = -ctx.vbudget;
+ expires = now->now_ns +
+ DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC;
+ expires += margin_ns / 4;
+
+ /* if already active and close enough, don't bother */
+ oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
+ if (hrtimer_is_queued(&iocg->waitq_timer) &&
+ abs(oexpires - expires) <= margin_ns / 4)
+ return;
+
+ hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
+ margin_ns / 4, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
+{
+ struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
+ struct ioc_now now;
+ unsigned long flags;
+
+ ioc_now(iocg->ioc, &now);
+
+ spin_lock_irqsave(&iocg->waitq.lock, flags);
+ iocg_kick_waitq(iocg, &now);
+ spin_unlock_irqrestore(&iocg->waitq.lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+{
+ struct ioc *ioc = iocg->ioc;
+ struct blkcg_gq *blkg = iocg_to_blkg(iocg);
+ u64 vtime = atomic64_read(&iocg->vtime);
+ u64 vmargin = ioc->margin_us * now->vrate;
+ u64 margin_ns = ioc->margin_us * NSEC_PER_USEC;
+ u64 expires, oexpires;
+ u32 hw_inuse;
+
+ /* debt-adjust vtime */
+ current_hweight(iocg, NULL, &hw_inuse);
+ vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse);
+
+ /* clear or maintain depending on the overage */
+ if (time_before_eq64(vtime, now->vnow)) {
+ blkcg_clear_delay(blkg);
+ return;
+ }
+ if (!atomic_read(&blkg->use_delay) &&
+ time_before_eq64(vtime, now->vnow + vmargin))
+ return;
+
+ /* use delay */
+ if (cost) {
+ u64 cost_ns = DIV64_U64_ROUND_UP(cost * NSEC_PER_USEC,
+ now->vrate);
+ blkcg_add_delay(blkg, now->now_ns, cost_ns);
+ }
+ blkcg_use_delay(blkg);
+
+ expires = now->now_ns + DIV64_U64_ROUND_UP(vtime - now->vnow,
+ now->vrate) * NSEC_PER_USEC;
+
+ /* if already active and close enough, don't bother */
+ oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
+ if (hrtimer_is_queued(&iocg->delay_timer) &&
+ abs(oexpires - expires) <= margin_ns / 4)
+ return;
+
+ hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
+ margin_ns / 4, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
+{
+ struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
+ struct ioc_now now;
+
+ ioc_now(iocg->ioc, &now);
+ iocg_kick_delay(iocg, &now, 0);
+
+ return HRTIMER_NORESTART;
+}
+
+static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
+{
+ u32 nr_met[2] = { };
+ u32 nr_missed[2] = { };
+ u64 rq_wait_ns = 0;
+ int cpu, rw;
+
+ for_each_online_cpu(cpu) {
+ struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
+ u64 this_rq_wait_ns;
+
+ for (rw = READ; rw <= WRITE; rw++) {
+ u32 this_met = READ_ONCE(stat->missed[rw].nr_met);
+ u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed);
+
+ nr_met[rw] += this_met - stat->missed[rw].last_met;
+ nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
+ stat->missed[rw].last_met = this_met;
+ stat->missed[rw].last_missed = this_missed;
+ }
+
+ this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns);
+ rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
+ stat->last_rq_wait_ns = this_rq_wait_ns;
+ }
+
+ for (rw = READ; rw <= WRITE; rw++) {
+ if (nr_met[rw] + nr_missed[rw])
+ missed_ppm_ar[rw] =
+ DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
+ nr_met[rw] + nr_missed[rw]);
+ else
+ missed_ppm_ar[rw] = 0;
+ }
+
+ *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
+ ioc->period_us * NSEC_PER_USEC);
+}
+
+/* was iocg idle this period? */
+static bool iocg_is_idle(struct ioc_gq *iocg)
+{
+ struct ioc *ioc = iocg->ioc;
+
+ /* did something get issued this period? */
+ if (atomic64_read(&iocg->active_period) ==
+ atomic64_read(&ioc->cur_period))
+ return false;
+
+ /* is something in flight? */
+ if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
+ return false;
+
+ return true;
+}
+
+/* returns usage with margin added if surplus is large enough */
+static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse)
+{
+ /* add margin */
+ usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100);
+ usage += SURPLUS_SCALE_ABS;
+
+ /* don't bother if the surplus is too small */
+ if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse)
+ return 0;
+
+ return usage;
+}
+
+static void ioc_timer_fn(struct timer_list *timer)
+{
+ struct ioc *ioc = container_of(timer, struct ioc, timer);
+ struct ioc_gq *iocg, *tiocg;
+ struct ioc_now now;
+ int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0;
+ u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
+ u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
+ u32 missed_ppm[2], rq_wait_pct;
+ u64 period_vtime;
+ int prev_busy_level, i;
+
+ /* how were the latencies during the period? */
+ ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
+
+ /* take care of active iocgs */
+ spin_lock_irq(&ioc->lock);
+
+ ioc_now(ioc, &now);
+
+ period_vtime = now.vnow - ioc->period_at_vtime;
+ if (WARN_ON_ONCE(!period_vtime)) {
+ spin_unlock_irq(&ioc->lock);
+ return;
+ }
+
+ /*
+ * Waiters determine the sleep durations based on the vrate they
+ * saw at the time of sleep. If vrate has increased, some waiters
+ * could be sleeping for too long. Wake up tardy waiters which
+ * should have woken up in the last period and expire idle iocgs.
+ */
+ list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
+ if (!waitqueue_active(&iocg->waitq) &&
+ !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg))
+ continue;
+
+ spin_lock(&iocg->waitq.lock);
+
+ if (waitqueue_active(&iocg->waitq) ||
+ atomic64_read(&iocg->abs_vdebt)) {
+ /* might be oversleeping vtime / hweight changes, kick */
+ iocg_kick_waitq(iocg, &now);
+ iocg_kick_delay(iocg, &now, 0);
+ } else if (iocg_is_idle(iocg)) {
+ /* no waiter and idle, deactivate */
+ iocg->last_inuse = iocg->inuse;
+ __propagate_active_weight(iocg, 0, 0);
+ list_del_init(&iocg->active_list);
+ }
+
+ spin_unlock(&iocg->waitq.lock);
+ }
+ commit_active_weights(ioc);
+
+ /* calc usages and see whether some weights need to be moved around */
+ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
+ u64 vdone, vtime, vusage, vmargin, vmin;
+ u32 hw_active, hw_inuse, usage;
+
+ /*
+ * Collect unused and wind vtime closer to vnow to prevent
+ * iocgs from accumulating a large amount of budget.
+ */
+ vdone = atomic64_read(&iocg->done_vtime);
+ vtime = atomic64_read(&iocg->vtime);
+ current_hweight(iocg, &hw_active, &hw_inuse);
+
+ /*
+ * Latency QoS detection doesn't account for IOs which are
+ * in-flight for longer than a period. Detect them by
+ * comparing vdone against period start. If lagging behind
+ * IOs from past periods, don't increase vrate.
+ */
+ if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
+ !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
+ time_after64(vtime, vdone) &&
+ time_after64(vtime, now.vnow -
+ MAX_LAGGING_PERIODS * period_vtime) &&
+ time_before64(vdone, now.vnow - period_vtime))
+ nr_lagging++;
+
+ if (waitqueue_active(&iocg->waitq))
+ vusage = now.vnow - iocg->last_vtime;
+ else if (time_before64(iocg->last_vtime, vtime))
+ vusage = vtime - iocg->last_vtime;
+ else
+ vusage = 0;
+
+ iocg->last_vtime += vusage;
+ /*
+ * Factor in in-flight vtime into vusage to avoid
+ * high-latency completions appearing as idle. This should
+ * be done after the above ->last_time adjustment.
+ */
+ vusage = max(vusage, vtime - vdone);
+
+ /* calculate hweight based usage ratio and record */
+ if (vusage) {
+ usage = DIV64_U64_ROUND_UP(vusage * hw_inuse,
+ period_vtime);
+ iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS;
+ iocg->usages[iocg->usage_idx] = usage;
+ } else {
+ usage = 0;
+ }
+
+ /* see whether there's surplus vtime */
+ vmargin = ioc->margin_us * now.vrate;
+ vmin = now.vnow - vmargin;
+
+ iocg->has_surplus = false;
+
+ if (!waitqueue_active(&iocg->waitq) &&
+ time_before64(vtime, vmin)) {
+ u64 delta = vmin - vtime;
+
+ /* throw away surplus vtime */
+ atomic64_add(delta, &iocg->vtime);
+ atomic64_add(delta, &iocg->done_vtime);
+ iocg->last_vtime += delta;
+ /* if usage is sufficiently low, maybe it can donate */
+ if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) {
+ iocg->has_surplus = true;
+ nr_surpluses++;
+ }
+ } else if (hw_inuse < hw_active) {
+ u32 new_hwi, new_inuse;
+
+ /* was donating but might need to take back some */
+ if (waitqueue_active(&iocg->waitq)) {
+ new_hwi = hw_active;
+ } else {
+ new_hwi = max(hw_inuse,
+ usage * SURPLUS_SCALE_PCT / 100 +
+ SURPLUS_SCALE_ABS);
+ }
+
+ new_inuse = div64_u64((u64)iocg->inuse * new_hwi,
+ hw_inuse);
+ new_inuse = clamp_t(u32, new_inuse, 1, iocg->active);
+
+ if (new_inuse > iocg->inuse) {
+ TRACE_IOCG_PATH(inuse_takeback, iocg, &now,
+ iocg->inuse, new_inuse,
+ hw_inuse, new_hwi);
+ __propagate_active_weight(iocg, iocg->weight,
+ new_inuse);
+ }
+ } else {
+ /* genuninely out of vtime */
+ nr_shortages++;
+ }
+ }
+
+ if (!nr_shortages || !nr_surpluses)
+ goto skip_surplus_transfers;
+
+ /* there are both shortages and surpluses, transfer surpluses */
+ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
+ u32 usage, hw_active, hw_inuse, new_hwi, new_inuse;
+ int nr_valid = 0;
+
+ if (!iocg->has_surplus)
+ continue;
+
+ /* base the decision on max historical usage */
+ for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) {
+ if (iocg->usages[i]) {
+ usage = max(usage, iocg->usages[i]);
+ nr_valid++;
+ }
+ }
+ if (nr_valid < MIN_VALID_USAGES)
+ continue;
+
+ current_hweight(iocg, &hw_active, &hw_inuse);
+ new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse);
+ if (!new_hwi)
+ continue;
+
+ new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi,
+ hw_inuse);
+ if (new_inuse < iocg->inuse) {
+ TRACE_IOCG_PATH(inuse_giveaway, iocg, &now,
+ iocg->inuse, new_inuse,
+ hw_inuse, new_hwi);
+ __propagate_active_weight(iocg, iocg->weight, new_inuse);
+ }
+ }
+skip_surplus_transfers:
+ commit_active_weights(ioc);
+
+ /*
+ * If q is getting clogged or we're missing too much, we're issuing
+ * too much IO and should lower vtime rate. If we're not missing
+ * and experiencing shortages but not surpluses, we're too stingy
+ * and should increase vtime rate.
+ */
+ prev_busy_level = ioc->busy_level;
+ if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
+ missed_ppm[READ] > ppm_rthr ||
+ missed_ppm[WRITE] > ppm_wthr) {
+ ioc->busy_level = max(ioc->busy_level, 0);
+ ioc->busy_level++;
+ } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
+ missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
+ missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
+ /* take action iff there is contention */
+ if (nr_shortages && !nr_lagging) {
+ ioc->busy_level = min(ioc->busy_level, 0);
+ /* redistribute surpluses first */
+ if (!nr_surpluses)
+ ioc->busy_level--;
+ }
+ } else {
+ ioc->busy_level = 0;
+ }
+
+ ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
+
+ if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
+ u64 vrate = atomic64_read(&ioc->vtime_rate);
+ u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
+
+ /* rq_wait signal is always reliable, ignore user vrate_min */
+ if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
+ vrate_min = VRATE_MIN;
+
+ /*
+ * If vrate is out of bounds, apply clamp gradually as the
+ * bounds can change abruptly. Otherwise, apply busy_level
+ * based adjustment.
+ */
+ if (vrate < vrate_min) {
+ vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
+ 100);
+ vrate = min(vrate, vrate_min);
+ } else if (vrate > vrate_max) {
+ vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
+ 100);
+ vrate = max(vrate, vrate_max);
+ } else {
+ int idx = min_t(int, abs(ioc->busy_level),
+ ARRAY_SIZE(vrate_adj_pct) - 1);
+ u32 adj_pct = vrate_adj_pct[idx];
+
+ if (ioc->busy_level > 0)
+ adj_pct = 100 - adj_pct;
+ else
+ adj_pct = 100 + adj_pct;
+
+ vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
+ vrate_min, vrate_max);
+ }
+
+ trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct,
+ nr_lagging, nr_shortages,
+ nr_surpluses);
+
+ atomic64_set(&ioc->vtime_rate, vrate);
+ ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
+ ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
+ } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
+ trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
+ &missed_ppm, rq_wait_pct, nr_lagging,
+ nr_shortages, nr_surpluses);
+ }
+
+ ioc_refresh_params(ioc, false);
+
+ /*
+ * This period is done. Move onto the next one. If nothing's
+ * going on with the device, stop the timer.
+ */
+ atomic64_inc(&ioc->cur_period);
+
+ if (ioc->running != IOC_STOP) {
+ if (!list_empty(&ioc->active_iocgs)) {
+ ioc_start_period(ioc, &now);
+ } else {
+ ioc->busy_level = 0;
+ ioc->running = IOC_IDLE;
+ }
+ }
+
+ spin_unlock_irq(&ioc->lock);
+}
+
+static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
+ bool is_merge, u64 *costp)
+{
+ struct ioc *ioc = iocg->ioc;
+ u64 coef_seqio, coef_randio, coef_page;
+ u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
+ u64 seek_pages = 0;
+ u64 cost = 0;
+
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
+ coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
+ coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
+ break;
+ case REQ_OP_WRITE:
+ coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
+ coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
+ coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
+ break;
+ default:
+ goto out;
+ }
+
+ if (iocg->cursor) {
+ seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
+ seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
+ }
+
+ if (!is_merge) {
+ if (seek_pages > LCOEF_RANDIO_PAGES) {
+ cost += coef_randio;
+ } else {
+ cost += coef_seqio;
+ }
+ }
+ cost += pages * coef_page;
+out:
+ *costp = cost;
+}
+
+static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
+{
+ u64 cost;
+
+ calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
+ return cost;
+}
+
+static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
+{
+ struct blkcg_gq *blkg = bio->bi_blkg;
+ struct ioc *ioc = rqos_to_ioc(rqos);
+ struct ioc_gq *iocg = blkg_to_iocg(blkg);
+ struct ioc_now now;
+ struct iocg_wait wait;
+ u32 hw_active, hw_inuse;
+ u64 abs_cost, cost, vtime;
+
+ /* bypass IOs if disabled or for root cgroup */
+ if (!ioc->enabled || !iocg->level)
+ return;
+
+ /* always activate so that even 0 cost IOs get protected to some level */
+ if (!iocg_activate(iocg, &now))
+ return;
+
+ /* calculate the absolute vtime cost */
+ abs_cost = calc_vtime_cost(bio, iocg, false);
+ if (!abs_cost)
+ return;
+
+ iocg->cursor = bio_end_sector(bio);
+
+ vtime = atomic64_read(&iocg->vtime);
+ current_hweight(iocg, &hw_active, &hw_inuse);
+
+ if (hw_inuse < hw_active &&
+ time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) {
+ TRACE_IOCG_PATH(inuse_reset, iocg, &now,
+ iocg->inuse, iocg->weight, hw_inuse, hw_active);
+ spin_lock_irq(&ioc->lock);
+ propagate_active_weight(iocg, iocg->weight, iocg->weight);
+ spin_unlock_irq(&ioc->lock);
+ current_hweight(iocg, &hw_active, &hw_inuse);
+ }
+
+ cost = abs_cost_to_cost(abs_cost, hw_inuse);
+
+ /*
+ * If no one's waiting and within budget, issue right away. The
+ * tests are racy but the races aren't systemic - we only miss once
+ * in a while which is fine.
+ */
+ if (!waitqueue_active(&iocg->waitq) &&
+ !atomic64_read(&iocg->abs_vdebt) &&
+ time_before_eq64(vtime + cost, now.vnow)) {
+ iocg_commit_bio(iocg, bio, cost);
+ return;
+ }
+
+ /*
+ * We're over budget. If @bio has to be issued regardless,
+ * remember the abs_cost instead of advancing vtime.
+ * iocg_kick_waitq() will pay off the debt before waking more IOs.
+ * This way, the debt is continuously paid off each period with the
+ * actual budget available to the cgroup. If we just wound vtime,
+ * we would incorrectly use the current hw_inuse for the entire
+ * amount which, for example, can lead to the cgroup staying
+ * blocked for a long time even with substantially raised hw_inuse.
+ */
+ if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
+ atomic64_add(abs_cost, &iocg->abs_vdebt);
+ iocg_kick_delay(iocg, &now, cost);
+ return;
+ }
+
+ /*
+ * Append self to the waitq and schedule the wakeup timer if we're
+ * the first waiter. The timer duration is calculated based on the
+ * current vrate. vtime and hweight changes can make it too short
+ * or too long. Each wait entry records the absolute cost it's
+ * waiting for to allow re-evaluation using a custom wait entry.
+ *
+ * If too short, the timer simply reschedules itself. If too long,
+ * the period timer will notice and trigger wakeups.
+ *
+ * All waiters are on iocg->waitq and the wait states are
+ * synchronized using waitq.lock.
+ */
+ spin_lock_irq(&iocg->waitq.lock);
+
+ /*
+ * We activated above but w/o any synchronization. Deactivation is
+ * synchronized with waitq.lock and we won't get deactivated as
+ * long as we're waiting, so we're good if we're activated here.
+ * In the unlikely case that we are deactivated, just issue the IO.
+ */
+ if (unlikely(list_empty(&iocg->active_list))) {
+ spin_unlock_irq(&iocg->waitq.lock);
+ iocg_commit_bio(iocg, bio, cost);
+ return;
+ }
+
+ init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
+ wait.wait.private = current;
+ wait.bio = bio;
+ wait.abs_cost = abs_cost;
+ wait.committed = false; /* will be set true by waker */
+
+ __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
+ iocg_kick_waitq(iocg, &now);
+
+ spin_unlock_irq(&iocg->waitq.lock);
+
+ while (true) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (wait.committed)
+ break;
+ io_schedule();
+ }
+
+ /* waker already committed us, proceed */
+ finish_wait(&iocg->waitq, &wait.wait);
+}
+
+static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
+ struct bio *bio)
+{
+ struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
+ struct ioc *ioc = iocg->ioc;
+ sector_t bio_end = bio_end_sector(bio);
+ struct ioc_now now;
+ u32 hw_inuse;
+ u64 abs_cost, cost;
+
+ /* bypass if disabled or for root cgroup */
+ if (!ioc->enabled || !iocg->level)
+ return;
+
+ abs_cost = calc_vtime_cost(bio, iocg, true);
+ if (!abs_cost)
+ return;
+
+ ioc_now(ioc, &now);
+ current_hweight(iocg, NULL, &hw_inuse);
+ cost = abs_cost_to_cost(abs_cost, hw_inuse);
+
+ /* update cursor if backmerging into the request at the cursor */
+ if (blk_rq_pos(rq) < bio_end &&
+ blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
+ iocg->cursor = bio_end;
+
+ /*
+ * Charge if there's enough vtime budget and the existing request
+ * has cost assigned. Otherwise, account it as debt. See debt
+ * handling in ioc_rqos_throttle() for details.
+ */
+ if (rq->bio && rq->bio->bi_iocost_cost &&
+ time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow))
+ iocg_commit_bio(iocg, bio, cost);
+ else
+ atomic64_add(abs_cost, &iocg->abs_vdebt);
+}
+
+static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
+{
+ struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
+
+ if (iocg && bio->bi_iocost_cost)
+ atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
+}
+
+static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
+{
+ struct ioc *ioc = rqos_to_ioc(rqos);
+ u64 on_q_ns, rq_wait_ns;
+ int pidx, rw;
+
+ if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
+ return;
+
+ switch (req_op(rq) & REQ_OP_MASK) {
+ case REQ_OP_READ:
+ pidx = QOS_RLAT;
+ rw = READ;
+ break;
+ case REQ_OP_WRITE:
+ pidx = QOS_WLAT;
+ rw = WRITE;
+ break;
+ default:
+ return;
+ }
+
+ on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
+ rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
+
+ if (on_q_ns <= ioc->params.qos[pidx] * NSEC_PER_USEC)
+ this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met);
+ else
+ this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed);
+
+ this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns);
+}
+
+static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
+{
+ struct ioc *ioc = rqos_to_ioc(rqos);
+
+ spin_lock_irq(&ioc->lock);
+ ioc_refresh_params(ioc, false);
+ spin_unlock_irq(&ioc->lock);
+}
+
+static void ioc_rqos_exit(struct rq_qos *rqos)
+{
+ struct ioc *ioc = rqos_to_ioc(rqos);
+
+ blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
+
+ spin_lock_irq(&ioc->lock);
+ ioc->running = IOC_STOP;
+ spin_unlock_irq(&ioc->lock);
+
+ del_timer_sync(&ioc->timer);
+ free_percpu(ioc->pcpu_stat);
+ kfree(ioc);
+}
+
+static struct rq_qos_ops ioc_rqos_ops = {
+ .throttle = ioc_rqos_throttle,
+ .merge = ioc_rqos_merge,
+ .done_bio = ioc_rqos_done_bio,
+ .done = ioc_rqos_done,
+ .queue_depth_changed = ioc_rqos_queue_depth_changed,
+ .exit = ioc_rqos_exit,
+};
+
+static int blk_iocost_init(struct request_queue *q)
+{
+ struct ioc *ioc;
+ struct rq_qos *rqos;
+ int ret;
+
+ ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return -ENOMEM;
+
+ ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
+ if (!ioc->pcpu_stat) {
+ kfree(ioc);
+ return -ENOMEM;
+ }
+
+ rqos = &ioc->rqos;
+ rqos->id = RQ_QOS_COST;
+ rqos->ops = &ioc_rqos_ops;
+ rqos->q = q;
+
+ spin_lock_init(&ioc->lock);
+ timer_setup(&ioc->timer, ioc_timer_fn, 0);
+ INIT_LIST_HEAD(&ioc->active_iocgs);
+
+ ioc->running = IOC_IDLE;
+ atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
+ seqcount_init(&ioc->period_seqcount);
+ ioc->period_at = ktime_to_us(ktime_get());
+ atomic64_set(&ioc->cur_period, 0);
+ atomic_set(&ioc->hweight_gen, 0);
+
+ spin_lock_irq(&ioc->lock);
+ ioc->autop_idx = AUTOP_INVALID;
+ ioc_refresh_params(ioc, true);
+ spin_unlock_irq(&ioc->lock);
+
+ rq_qos_add(q, rqos);
+ ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
+ if (ret) {
+ rq_qos_del(q, rqos);
+ free_percpu(ioc->pcpu_stat);
+ kfree(ioc);
+ return ret;
+ }
+ return 0;
+}
+
+static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
+{
+ struct ioc_cgrp *iocc;
+
+ iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
+ if (!iocc)
+ return NULL;
+
+ iocc->dfl_weight = CGROUP_WEIGHT_DFL;
+ return &iocc->cpd;
+}
+
+static void ioc_cpd_free(struct blkcg_policy_data *cpd)
+{
+ kfree(container_of(cpd, struct ioc_cgrp, cpd));
+}
+
+static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
+ struct blkcg *blkcg)
+{
+ int levels = blkcg->css.cgroup->level + 1;
+ struct ioc_gq *iocg;
+
+ iocg = kzalloc_node(sizeof(*iocg) + levels * sizeof(iocg->ancestors[0]),
+ gfp, q->node);
+ if (!iocg)
+ return NULL;
+
+ return &iocg->pd;
+}
+
+static void ioc_pd_init(struct blkg_policy_data *pd)
+{
+ struct ioc_gq *iocg = pd_to_iocg(pd);
+ struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
+ struct ioc *ioc = q_to_ioc(blkg->q);
+ struct ioc_now now;
+ struct blkcg_gq *tblkg;
+ unsigned long flags;
+
+ ioc_now(ioc, &now);
+
+ iocg->ioc = ioc;
+ atomic64_set(&iocg->vtime, now.vnow);
+ atomic64_set(&iocg->done_vtime, now.vnow);
+ atomic64_set(&iocg->abs_vdebt, 0);
+ atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
+ INIT_LIST_HEAD(&iocg->active_list);
+ iocg->hweight_active = HWEIGHT_WHOLE;
+ iocg->hweight_inuse = HWEIGHT_WHOLE;
+
+ init_waitqueue_head(&iocg->waitq);
+ hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ iocg->waitq_timer.function = iocg_waitq_timer_fn;
+ hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ iocg->delay_timer.function = iocg_delay_timer_fn;
+
+ iocg->level = blkg->blkcg->css.cgroup->level;
+
+ for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
+ struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
+ iocg->ancestors[tiocg->level] = tiocg;
+ }
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ weight_updated(iocg);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
+static void ioc_pd_free(struct blkg_policy_data *pd)
+{
+ struct ioc_gq *iocg = pd_to_iocg(pd);
+ struct ioc *ioc = iocg->ioc;
+
+ if (ioc) {
+ spin_lock(&ioc->lock);
+ if (!list_empty(&iocg->active_list)) {
+ propagate_active_weight(iocg, 0, 0);
+ list_del_init(&iocg->active_list);
+ }
+ spin_unlock(&ioc->lock);
+
+ hrtimer_cancel(&iocg->waitq_timer);
+ hrtimer_cancel(&iocg->delay_timer);
+ }
+ kfree(iocg);
+}
+
+static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ const char *dname = blkg_dev_name(pd->blkg);
+ struct ioc_gq *iocg = pd_to_iocg(pd);
+
+ if (dname && iocg->cfg_weight)
+ seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight);
+ return 0;
+}
+
+
+static int ioc_weight_show(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+ struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
+
+ seq_printf(sf, "default %u\n", iocc->dfl_weight);
+ blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
+ &blkcg_policy_iocost, seq_cft(sf)->private, false);
+ return 0;
+}
+
+static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct blkcg *blkcg = css_to_blkcg(of_css(of));
+ struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
+ struct blkg_conf_ctx ctx;
+ struct ioc_gq *iocg;
+ u32 v;
+ int ret;
+
+ if (!strchr(buf, ':')) {
+ struct blkcg_gq *blkg;
+
+ if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
+ return -EINVAL;
+
+ if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
+ return -EINVAL;
+
+ spin_lock(&blkcg->lock);
+ iocc->dfl_weight = v;
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct ioc_gq *iocg = blkg_to_iocg(blkg);
+
+ if (iocg) {
+ spin_lock_irq(&iocg->ioc->lock);
+ weight_updated(iocg);
+ spin_unlock_irq(&iocg->ioc->lock);
+ }
+ }
+ spin_unlock(&blkcg->lock);
+
+ return nbytes;
+ }
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
+ if (ret)
+ return ret;
+
+ iocg = blkg_to_iocg(ctx.blkg);
+
+ if (!strncmp(ctx.body, "default", 7)) {
+ v = 0;
+ } else {
+ if (!sscanf(ctx.body, "%u", &v))
+ goto einval;
+ if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
+ goto einval;
+ }
+
+ spin_lock(&iocg->ioc->lock);
+ iocg->cfg_weight = v;
+ weight_updated(iocg);
+ spin_unlock(&iocg->ioc->lock);
+
+ blkg_conf_finish(&ctx);
+ return nbytes;
+
+einval:
+ blkg_conf_finish(&ctx);
+ return -EINVAL;
+}
+
+static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
+{
+ const char *dname = blkg_dev_name(pd->blkg);
+ struct ioc *ioc = pd_to_iocg(pd)->ioc;
+
+ if (!dname)
+ return 0;
+
+ seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
+ dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
+ ioc->params.qos[QOS_RPPM] / 10000,
+ ioc->params.qos[QOS_RPPM] % 10000 / 100,
+ ioc->params.qos[QOS_RLAT],
+ ioc->params.qos[QOS_WPPM] / 10000,
+ ioc->params.qos[QOS_WPPM] % 10000 / 100,
+ ioc->params.qos[QOS_WLAT],
+ ioc->params.qos[QOS_MIN] / 10000,
+ ioc->params.qos[QOS_MIN] % 10000 / 100,
+ ioc->params.qos[QOS_MAX] / 10000,
+ ioc->params.qos[QOS_MAX] % 10000 / 100);
+ return 0;
+}
+
+static int ioc_qos_show(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+
+ blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
+ &blkcg_policy_iocost, seq_cft(sf)->private, false);
+ return 0;
+}
+
+static const match_table_t qos_ctrl_tokens = {
+ { QOS_ENABLE, "enable=%u" },
+ { QOS_CTRL, "ctrl=%s" },
+ { NR_QOS_CTRL_PARAMS, NULL },
+};
+
+static const match_table_t qos_tokens = {
+ { QOS_RPPM, "rpct=%s" },
+ { QOS_RLAT, "rlat=%u" },
+ { QOS_WPPM, "wpct=%s" },
+ { QOS_WLAT, "wlat=%u" },
+ { QOS_MIN, "min=%s" },
+ { QOS_MAX, "max=%s" },
+ { NR_QOS_PARAMS, NULL },
+};
+
+static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
+ size_t nbytes, loff_t off)
+{
+ struct gendisk *disk;
+ struct ioc *ioc;
+ u32 qos[NR_QOS_PARAMS];
+ bool enable, user;
+ char *p;
+ int ret;
+
+ disk = blkcg_conf_get_disk(&input);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
+
+ ioc = q_to_ioc(disk->queue);
+ if (!ioc) {
+ ret = blk_iocost_init(disk->queue);
+ if (ret)
+ goto err;
+ ioc = q_to_ioc(disk->queue);
+ }
+
+ spin_lock_irq(&ioc->lock);
+ memcpy(qos, ioc->params.qos, sizeof(qos));
+ enable = ioc->enabled;
+ user = ioc->user_qos_params;
+ spin_unlock_irq(&ioc->lock);
+
+ while ((p = strsep(&input, " \t\n"))) {
+ substring_t args[MAX_OPT_ARGS];
+ char buf[32];
+ int tok;
+ s64 v;
+
+ if (!*p)
+ continue;
+
+ switch (match_token(p, qos_ctrl_tokens, args)) {
+ case QOS_ENABLE:
+ match_u64(&args[0], &v);
+ enable = v;
+ continue;
+ case QOS_CTRL:
+ match_strlcpy(buf, &args[0], sizeof(buf));
+ if (!strcmp(buf, "auto"))
+ user = false;
+ else if (!strcmp(buf, "user"))
+ user = true;
+ else
+ goto einval;
+ continue;
+ }
+
+ tok = match_token(p, qos_tokens, args);
+ switch (tok) {
+ case QOS_RPPM:
+ case QOS_WPPM:
+ if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
+ sizeof(buf))
+ goto einval;
+ if (cgroup_parse_float(buf, 2, &v))
+ goto einval;
+ if (v < 0 || v > 10000)
+ goto einval;
+ qos[tok] = v * 100;
+ break;
+ case QOS_RLAT:
+ case QOS_WLAT:
+ if (match_u64(&args[0], &v))
+ goto einval;
+ qos[tok] = v;
+ break;
+ case QOS_MIN:
+ case QOS_MAX:
+ if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
+ sizeof(buf))
+ goto einval;
+ if (cgroup_parse_float(buf, 2, &v))
+ goto einval;
+ if (v < 0)
+ goto einval;
+ qos[tok] = clamp_t(s64, v * 100,
+ VRATE_MIN_PPM, VRATE_MAX_PPM);
+ break;
+ default:
+ goto einval;
+ }
+ user = true;
+ }
+
+ if (qos[QOS_MIN] > qos[QOS_MAX])
+ goto einval;
+
+ spin_lock_irq(&ioc->lock);
+
+ if (enable) {
+ blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
+ ioc->enabled = true;
+ } else {
+ blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
+ ioc->enabled = false;
+ }
+
+ if (user) {
+ memcpy(ioc->params.qos, qos, sizeof(qos));
+ ioc->user_qos_params = true;
+ } else {
+ ioc->user_qos_params = false;
+ }
+
+ ioc_refresh_params(ioc, true);
+ spin_unlock_irq(&ioc->lock);
+
+ put_disk_and_module(disk);
+ return nbytes;
+einval:
+ ret = -EINVAL;
+err:
+ put_disk_and_module(disk);
+ return ret;
+}
+
+static u64 ioc_cost_model_prfill(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ const char *dname = blkg_dev_name(pd->blkg);
+ struct ioc *ioc = pd_to_iocg(pd)->ioc;
+ u64 *u = ioc->params.i_lcoefs;
+
+ if (!dname)
+ return 0;
+
+ seq_printf(sf, "%s ctrl=%s model=linear "
+ "rbps=%llu rseqiops=%llu rrandiops=%llu "
+ "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
+ dname, ioc->user_cost_model ? "user" : "auto",
+ u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
+ u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
+ return 0;
+}
+
+static int ioc_cost_model_show(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+
+ blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
+ &blkcg_policy_iocost, seq_cft(sf)->private, false);
+ return 0;
+}
+
+static const match_table_t cost_ctrl_tokens = {
+ { COST_CTRL, "ctrl=%s" },
+ { COST_MODEL, "model=%s" },
+ { NR_COST_CTRL_PARAMS, NULL },
+};
+
+static const match_table_t i_lcoef_tokens = {
+ { I_LCOEF_RBPS, "rbps=%u" },
+ { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
+ { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
+ { I_LCOEF_WBPS, "wbps=%u" },
+ { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
+ { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
+ { NR_I_LCOEFS, NULL },
+};
+
+static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
+ size_t nbytes, loff_t off)
+{
+ struct gendisk *disk;
+ struct ioc *ioc;
+ u64 u[NR_I_LCOEFS];
+ bool user;
+ char *p;
+ int ret;
+
+ disk = blkcg_conf_get_disk(&input);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
+
+ ioc = q_to_ioc(disk->queue);
+ if (!ioc) {
+ ret = blk_iocost_init(disk->queue);
+ if (ret)
+ goto err;
+ ioc = q_to_ioc(disk->queue);
+ }
+
+ spin_lock_irq(&ioc->lock);
+ memcpy(u, ioc->params.i_lcoefs, sizeof(u));
+ user = ioc->user_cost_model;
+ spin_unlock_irq(&ioc->lock);
+
+ while ((p = strsep(&input, " \t\n"))) {
+ substring_t args[MAX_OPT_ARGS];
+ char buf[32];
+ int tok;
+ u64 v;
+
+ if (!*p)
+ continue;
+
+ switch (match_token(p, cost_ctrl_tokens, args)) {
+ case COST_CTRL:
+ match_strlcpy(buf, &args[0], sizeof(buf));
+ if (!strcmp(buf, "auto"))
+ user = false;
+ else if (!strcmp(buf, "user"))
+ user = true;
+ else
+ goto einval;
+ continue;
+ case COST_MODEL:
+ match_strlcpy(buf, &args[0], sizeof(buf));
+ if (strcmp(buf, "linear"))
+ goto einval;
+ continue;
+ }
+
+ tok = match_token(p, i_lcoef_tokens, args);
+ if (tok == NR_I_LCOEFS)
+ goto einval;
+ if (match_u64(&args[0], &v))
+ goto einval;
+ u[tok] = v;
+ user = true;
+ }
+
+ spin_lock_irq(&ioc->lock);
+ if (user) {
+ memcpy(ioc->params.i_lcoefs, u, sizeof(u));
+ ioc->user_cost_model = true;
+ } else {
+ ioc->user_cost_model = false;
+ }
+ ioc_refresh_params(ioc, true);
+ spin_unlock_irq(&ioc->lock);
+
+ put_disk_and_module(disk);
+ return nbytes;
+
+einval:
+ ret = -EINVAL;
+err:
+ put_disk_and_module(disk);
+ return ret;
+}
+
+static struct cftype ioc_files[] = {
+ {
+ .name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = ioc_weight_show,
+ .write = ioc_weight_write,
+ },
+ {
+ .name = "cost.qos",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .seq_show = ioc_qos_show,
+ .write = ioc_qos_write,
+ },
+ {
+ .name = "cost.model",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .seq_show = ioc_cost_model_show,
+ .write = ioc_cost_model_write,
+ },
+ {}
+};
+
+static struct blkcg_policy blkcg_policy_iocost = {
+ .dfl_cftypes = ioc_files,
+ .cpd_alloc_fn = ioc_cpd_alloc,
+ .cpd_free_fn = ioc_cpd_free,
+ .pd_alloc_fn = ioc_pd_alloc,
+ .pd_init_fn = ioc_pd_init,
+ .pd_free_fn = ioc_pd_free,
+};
+
+static int __init ioc_init(void)
+{
+ return blkcg_policy_register(&blkcg_policy_iocost);
+}
+
+static void __exit ioc_exit(void)
+{
+ return blkcg_policy_unregister(&blkcg_policy_iocost);
+}
+
+module_init(ioc_init);
+module_exit(ioc_exit);
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 19923f8..c128d50 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block rq-qos base io controller
*
@@ -72,8 +73,10 @@
#include <linux/sched/loadavg.h>
#include <linux/sched/signal.h>
#include <trace/events/block.h>
+#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
+#include "blk.h"
#define DEFAULT_SCALE_COOKIE 1000000U
@@ -115,9 +118,22 @@
atomic_t scale_cookie;
};
+struct percentile_stats {
+ u64 total;
+ u64 missed;
+};
+
+struct latency_stat {
+ union {
+ struct percentile_stats ps;
+ struct blk_rq_stat rqs;
+ };
+};
+
struct iolatency_grp {
struct blkg_policy_data pd;
- struct blk_rq_stat __percpu *stats;
+ struct latency_stat __percpu *stats;
+ struct latency_stat cur_stat;
struct blk_iolatency *blkiolat;
struct rq_depth rq_depth;
struct rq_wait rq_wait;
@@ -132,6 +148,7 @@
/* Our current number of IO's for the last summation. */
u64 nr_samples;
+ bool ssd;
struct child_latency_info child_lat;
};
@@ -139,7 +156,7 @@
#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
/*
* These are the constants used to fake the fixed-point moving average
- * calculation just like load average. The call to CALC_LOAD folds
+ * calculation just like load average. The call to calc_load() folds
* (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
* window size is bucketed to try to approximately calculate average
* latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
@@ -172,29 +189,101 @@
return pd_to_blkg(&iolat->pd);
}
-static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
- wait_queue_entry_t *wait,
- bool first_block)
+static inline void latency_stat_init(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
{
- struct rq_wait *rqw = &iolat->rq_wait;
+ if (iolat->ssd) {
+ stat->ps.total = 0;
+ stat->ps.missed = 0;
+ } else
+ blk_rq_stat_init(&stat->rqs);
+}
- if (first_block && waitqueue_active(&rqw->wait) &&
- rqw->wait.head.next != &wait->entry)
- return false;
+static inline void latency_stat_sum(struct iolatency_grp *iolat,
+ struct latency_stat *sum,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ sum->ps.total += stat->ps.total;
+ sum->ps.missed += stat->ps.missed;
+ } else
+ blk_rq_stat_sum(&sum->rqs, &stat->rqs);
+}
+
+static inline void latency_stat_record_time(struct iolatency_grp *iolat,
+ u64 req_time)
+{
+ struct latency_stat *stat = get_cpu_ptr(iolat->stats);
+ if (iolat->ssd) {
+ if (req_time >= iolat->min_lat_nsec)
+ stat->ps.missed++;
+ stat->ps.total++;
+ } else
+ blk_rq_stat_add(&stat->rqs, req_time);
+ put_cpu_ptr(stat);
+}
+
+static inline bool latency_sum_ok(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ u64 thresh = div64_u64(stat->ps.total, 10);
+ thresh = max(thresh, 1ULL);
+ return stat->ps.missed < thresh;
+ }
+ return stat->rqs.mean <= iolat->min_lat_nsec;
+}
+
+static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd)
+ return stat->ps.total;
+ return stat->rqs.nr_samples;
+}
+
+static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ int exp_idx;
+
+ if (iolat->ssd)
+ return;
+
+ /*
+ * calc_load() takes in a number stored in fixed point representation.
+ * Because we are using this for IO time in ns, the values stored
+ * are significantly larger than the FIXED_1 denominator (2048).
+ * Therefore, rounding errors in the calculation are negligible and
+ * can be ignored.
+ */
+ exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
+ div64_u64(iolat->cur_win_nsec,
+ BLKIOLATENCY_EXP_BUCKET_SIZE));
+ iolat->lat_avg = calc_load(iolat->lat_avg,
+ iolatency_exp_factors[exp_idx],
+ stat->rqs.mean);
+}
+
+static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
+{
+ atomic_dec(&rqw->inflight);
+ wake_up(&rqw->wait);
+}
+
+static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
+{
+ struct iolatency_grp *iolat = private_data;
return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
}
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
struct iolatency_grp *iolat,
- spinlock_t *lock, bool issue_as_root,
+ bool issue_as_root,
bool use_memdelay)
- __releases(lock)
- __acquires(lock)
{
struct rq_wait *rqw = &iolat->rq_wait;
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
- DEFINE_WAIT(wait);
- bool first_block = true;
if (use_delay)
blkcg_schedule_throttle(rqos->q, use_memdelay);
@@ -211,27 +300,7 @@
return;
}
- if (iolatency_may_queue(iolat, &wait, first_block))
- return;
-
- do {
- prepare_to_wait_exclusive(&rqw->wait, &wait,
- TASK_UNINTERRUPTIBLE);
-
- if (iolatency_may_queue(iolat, &wait, first_block))
- break;
- first_block = false;
-
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else {
- io_schedule();
- }
- } while (1);
-
- finish_wait(&rqw->wait, &wait);
+ rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
}
#define SCALE_DOWN_FACTOR 2
@@ -255,7 +324,7 @@
struct child_latency_info *lat_info,
bool up)
{
- unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
+ unsigned long qd = blkiolat->rqos.q->nr_requests;
unsigned long scale = scale_amount(qd, up);
unsigned long old = atomic_read(&lat_info->scale_cookie);
unsigned long max_scale = qd << 1;
@@ -295,10 +364,9 @@
*/
static void scale_change(struct iolatency_grp *iolat, bool up)
{
- unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
+ unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
unsigned long scale = scale_amount(qd, up);
unsigned long old = iolat->rq_depth.max_depth;
- bool changed = false;
if (old > qd)
old = qd;
@@ -308,15 +376,13 @@
return;
if (old < qd) {
- changed = true;
old += scale;
old = min(old, qd);
iolat->rq_depth.max_depth = old;
wake_up_all(&iolat->rq_wait.wait);
}
- } else if (old > 1) {
+ } else {
old >>= 1;
- changed = true;
iolat->rq_depth.max_depth = max(old, 1UL);
}
}
@@ -369,7 +435,7 @@
* scale down event.
*/
samples_thresh = lat_info->nr_samples * 5;
- samples_thresh = div64_u64(samples_thresh, 100);
+ samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
if (iolat->nr_samples <= samples_thresh)
return;
}
@@ -391,38 +457,15 @@
scale_change(iolat, direction > 0);
}
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
- spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- struct request_queue *q = rqos->q;
+ struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
return;
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
- bio_associate_blkcg(bio, &blkcg->css);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
- }
- if (!blkg)
- goto out;
-
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
- bio_associate_blkg(bio, blkg);
-out:
- rcu_read_unlock();
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -431,7 +474,7 @@
}
check_scale_change(iolat);
- __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+ __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
blkg = blkg->parent;
}
@@ -443,7 +486,6 @@
struct bio_issue *issue, u64 now,
bool issue_as_root)
{
- struct blk_rq_stat *rq_stat;
u64 start = bio_issue_time(issue);
u64 req_time;
@@ -469,9 +511,7 @@
return;
}
- rq_stat = get_cpu_ptr(iolat->stats);
- blk_rq_stat_add(rq_stat, req_time);
- put_cpu_ptr(rq_stat);
+ latency_stat_record_time(iolat, req_time);
}
#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
@@ -482,17 +522,17 @@
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct iolatency_grp *parent;
struct child_latency_info *lat_info;
- struct blk_rq_stat stat;
+ struct latency_stat stat;
unsigned long flags;
- int cpu, exp_idx;
+ int cpu;
- blk_rq_stat_init(&stat);
+ latency_stat_init(iolat, &stat);
preempt_disable();
for_each_online_cpu(cpu) {
- struct blk_rq_stat *s;
+ struct latency_stat *s;
s = per_cpu_ptr(iolat->stats, cpu);
- blk_rq_stat_sum(&stat, s);
- blk_rq_stat_init(s);
+ latency_stat_sum(iolat, &stat, s);
+ latency_stat_init(iolat, s);
}
preempt_enable();
@@ -502,41 +542,36 @@
lat_info = &parent->child_lat;
- /*
- * CALC_LOAD takes in a number stored in fixed point representation.
- * Because we are using this for IO time in ns, the values stored
- * are significantly larger than the FIXED_1 denominator (2048).
- * Therefore, rounding errors in the calculation are negligible and
- * can be ignored.
- */
- exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
- div64_u64(iolat->cur_win_nsec,
- BLKIOLATENCY_EXP_BUCKET_SIZE));
- CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
+ iolat_update_total_lat_avg(iolat, &stat);
/* Everything is ok and we don't need to adjust the scale. */
- if (stat.mean <= iolat->min_lat_nsec &&
+ if (latency_sum_ok(iolat, &stat) &&
atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
return;
/* Somebody beat us to the punch, just bail. */
spin_lock_irqsave(&lat_info->lock, flags);
+
+ latency_stat_sum(iolat, &iolat->cur_stat, &stat);
lat_info->nr_samples -= iolat->nr_samples;
- lat_info->nr_samples += stat.nr_samples;
- iolat->nr_samples = stat.nr_samples;
+ lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
+ iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
if ((lat_info->last_scale_event >= now ||
- now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
- lat_info->scale_lat <= iolat->min_lat_nsec)
+ now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
goto out;
- if (stat.mean <= iolat->min_lat_nsec &&
- stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
+ if (latency_sum_ok(iolat, &iolat->cur_stat) &&
+ latency_sum_ok(iolat, &stat)) {
+ if (latency_stat_samples(iolat, &iolat->cur_stat) <
+ BLKIOLATENCY_MIN_GOOD_SAMPLES)
+ goto out;
if (lat_info->scale_grp == iolat) {
lat_info->last_scale_event = now;
scale_cookie_change(iolat->blkiolat, lat_info, true);
}
- } else if (stat.mean > iolat->min_lat_nsec) {
+ } else if (lat_info->scale_lat == 0 ||
+ lat_info->scale_lat >= iolat->min_lat_nsec) {
lat_info->last_scale_event = now;
if (!lat_info->scale_grp ||
lat_info->scale_lat > iolat->min_lat_nsec) {
@@ -545,6 +580,7 @@
}
scale_cookie_change(iolat->blkiolat, lat_info, false);
}
+ latency_stat_init(iolat, &iolat->cur_stat);
out:
spin_unlock_irqrestore(&lat_info->lock, flags);
}
@@ -558,9 +594,10 @@
u64 now = ktime_to_ns(ktime_get());
bool issue_as_root = bio_issue_as_root_blkg(bio);
bool enabled = false;
+ int inflight = 0;
blkg = bio->bi_blkg;
- if (!blkg)
+ if (!blkg || !bio_flagged(bio, BIO_TRACKED))
return;
iolat = blkg_to_lat(bio->bi_blkg);
@@ -568,6 +605,9 @@
return;
enabled = blk_iolatency_enabled(iolat->blkiolat);
+ if (!enabled)
+ return;
+
while (blkg && blkg->parent) {
iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -576,45 +616,28 @@
}
rqw = &iolat->rq_wait;
- atomic_dec(&rqw->inflight);
- if (!enabled || iolat->min_lat_nsec == 0)
- goto next;
- iolatency_record_time(iolat, &bio->bi_issue, now,
- issue_as_root);
- window_start = atomic64_read(&iolat->window_start);
- if (now > window_start &&
- (now - window_start) >= iolat->cur_win_nsec) {
- if (atomic64_cmpxchg(&iolat->window_start,
- window_start, now) == window_start)
- iolatency_check_latencies(iolat, now);
+ inflight = atomic_dec_return(&rqw->inflight);
+ WARN_ON_ONCE(inflight < 0);
+ /*
+ * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
+ * submitted, so do not account for it.
+ */
+ if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
+ iolatency_record_time(iolat, &bio->bi_issue, now,
+ issue_as_root);
+ window_start = atomic64_read(&iolat->window_start);
+ if (now > window_start &&
+ (now - window_start) >= iolat->cur_win_nsec) {
+ if (atomic64_cmpxchg(&iolat->window_start,
+ window_start, now) == window_start)
+ iolatency_check_latencies(iolat, now);
+ }
}
-next:
wake_up(&rqw->wait);
blkg = blkg->parent;
}
}
-static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
-{
- struct blkcg_gq *blkg;
-
- blkg = bio->bi_blkg;
- while (blkg && blkg->parent) {
- struct rq_wait *rqw;
- struct iolatency_grp *iolat;
-
- iolat = blkg_to_lat(blkg);
- if (!iolat)
- goto next;
-
- rqw = &iolat->rq_wait;
- atomic_dec(&rqw->inflight);
- wake_up(&rqw->wait);
-next:
- blkg = blkg->parent;
- }
-}
-
static void blkcg_iolatency_exit(struct rq_qos *rqos)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
@@ -626,7 +649,6 @@
static struct rq_qos_ops blkcg_iolatency_ops = {
.throttle = blkcg_iolatency_throttle,
- .cleanup = blkcg_iolatency_cleanup,
.done_bio = blkcg_iolatency_done_bio,
.exit = blkcg_iolatency_exit,
};
@@ -650,7 +672,7 @@
* We could be exiting, don't access the pd unless we have a
* ref on the blkg.
*/
- if (!blkg_try_get(blkg))
+ if (!blkg_tryget(blkg))
continue;
iolat = blkg_to_lat(blkg);
@@ -703,7 +725,7 @@
return -ENOMEM;
rqos = &blkiolat->rqos;
- rqos->id = RQ_QOS_CGROUP;
+ rqos->id = RQ_QOS_LATENCY;
rqos->ops = &blkcg_iolatency_ops;
rqos->q = q;
@@ -721,10 +743,13 @@
return 0;
}
-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+/*
+ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+ * return 0.
+ */
+static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
- struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@@ -733,9 +758,12 @@
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val)
- atomic_inc(&blkiolat->enabled);
- if (oldval && !val)
- atomic_dec(&blkiolat->enabled);
+ return 1;
+ if (oldval && !val) {
+ blkcg_clear_delay(blkg);
+ return -1;
+ }
+ return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -761,20 +789,19 @@
{
struct blkcg *blkcg = css_to_blkcg(of_css(of));
struct blkcg_gq *blkg;
- struct blk_iolatency *blkiolat;
struct blkg_conf_ctx ctx;
struct iolatency_grp *iolat;
char *p, *tok;
u64 lat_val = 0;
u64 oldval;
int ret;
+ int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
return ret;
iolat = blkg_to_lat(ctx.blkg);
- blkiolat = iolat->blkiolat;
p = ctx.body;
ret = -EINVAL;
@@ -803,7 +830,12 @@
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
- iolatency_set_min_lat_nsec(blkg, lat_val);
+ enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+ if (enable) {
+ WARN_ON_ONCE(!blk_get_queue(blkg->q));
+ blkg_get(blkg);
+ }
+
if (oldval != iolat->min_lat_nsec) {
iolatency_clear_scaling(blkg);
}
@@ -811,6 +843,24 @@
ret = 0;
out:
blkg_conf_finish(&ctx);
+ if (ret == 0 && enable) {
+ struct iolatency_grp *tmp = blkg_to_lat(blkg);
+ struct blk_iolatency *blkiolat = tmp->blkiolat;
+
+ blk_mq_freeze_queue(blkg->q);
+
+ if (enable == 1)
+ atomic_inc(&blkiolat->enabled);
+ else if (enable == -1)
+ atomic_dec(&blkiolat->enabled);
+ else
+ WARN_ON_ONCE(1);
+
+ blk_mq_unfreeze_queue(blkg->q);
+
+ blkg_put(blkg);
+ blk_put_queue(blkg->q);
+ }
return ret ?: nbytes;
}
@@ -835,13 +885,46 @@
return 0;
}
+static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
+ size_t size)
+{
+ struct latency_stat stat;
+ int cpu;
+
+ latency_stat_init(iolat, &stat);
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ struct latency_stat *s;
+ s = per_cpu_ptr(iolat->stats, cpu);
+ latency_stat_sum(iolat, &stat, s);
+ }
+ preempt_enable();
+
+ if (iolat->rq_depth.max_depth == UINT_MAX)
+ return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total);
+ return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total,
+ iolat->rq_depth.max_depth);
+}
+
static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
size_t size)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
- unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
- unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
+ unsigned long long avg_lat;
+ unsigned long long cur_win;
+ if (!blkcg_debug_stats)
+ return 0;
+
+ if (iolat->ssd)
+ return iolatency_ssd_stat(iolat, buf, size);
+
+ avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
+ cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
avg_lat, cur_win);
@@ -851,15 +934,17 @@
}
-static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
+static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
+ struct request_queue *q,
+ struct blkcg *blkcg)
{
struct iolatency_grp *iolat;
- iolat = kzalloc_node(sizeof(*iolat), gfp, node);
+ iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
if (!iolat)
return NULL;
- iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
- __alignof__(struct blk_rq_stat), gfp);
+ iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
+ __alignof__(struct latency_stat), gfp);
if (!iolat->stats) {
kfree(iolat);
return NULL;
@@ -876,15 +961,21 @@
u64 now = ktime_to_ns(ktime_get());
int cpu;
+ if (blk_queue_nonrot(blkg->q))
+ iolat->ssd = true;
+ else
+ iolat->ssd = false;
+
for_each_possible_cpu(cpu) {
- struct blk_rq_stat *stat;
+ struct latency_stat *stat;
stat = per_cpu_ptr(iolat->stats, cpu);
- blk_rq_stat_init(stat);
+ latency_stat_init(iolat, stat);
}
+ latency_stat_init(iolat, &iolat->cur_stat);
rq_wait_init(&iolat->rq_wait);
spin_lock_init(&iolat->child_lat.lock);
- iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
+ iolat->rq_depth.queue_depth = blkg->q->nr_requests;
iolat->rq_depth.max_depth = UINT_MAX;
iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
iolat->blkiolat = blkiolat;
@@ -910,8 +1001,14 @@
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct blk_iolatency *blkiolat = iolat->blkiolat;
+ int ret;
- iolatency_set_min_lat_nsec(blkg, 0);
+ ret = iolatency_set_min_lat_nsec(blkg, 0);
+ if (ret == 1)
+ atomic_inc(&blkiolat->enabled);
+ if (ret == -1)
+ atomic_dec(&blkiolat->enabled);
iolatency_clear_scaling(blkg);
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1f196cf..5f2c429 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -10,8 +10,7 @@
#include "blk.h"
-static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
- gfp_t gfp)
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
{
struct bio *new = bio_alloc(gfp, nr_pages);
@@ -52,24 +51,23 @@
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
+ if (!nr_sects)
+ return -EINVAL;
+
while (nr_sects) {
- unsigned int req_sects = nr_sects;
- sector_t end_sect;
+ sector_t req_sects = min_t(sector_t, nr_sects,
+ bio_allowed_max_sectors(q));
- if (!req_sects)
- goto fail;
- req_sects = min(req_sects, bio_allowed_max_sectors(q));
+ WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
- end_sect = sector + req_sects;
-
- bio = next_bio(bio, 0, gfp_mask);
+ bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9;
+ sector += req_sects;
nr_sects -= req_sects;
- sector = end_sect;
/*
* We can loop for a long time in here, if someone does
@@ -82,14 +80,6 @@
*biop = bio;
return 0;
-
-fail:
- if (bio) {
- submit_bio_wait(bio);
- bio_put(bio);
- }
- *biop = NULL;
- return -EOPNOTSUPP;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
@@ -164,7 +154,7 @@
max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
- bio = next_bio(bio, 1, gfp_mask);
+ bio = blk_next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_vcnt = 1;
@@ -240,7 +230,7 @@
return -EOPNOTSUPP;
while (nr_sects) {
- bio = next_bio(bio, 0, gfp_mask);
+ bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES;
@@ -291,8 +281,8 @@
return -EPERM;
while (nr_sects != 0) {
- bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
- gfp_mask);
+ bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
+ gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
diff --git a/block/blk-map.c b/block/blk-map.c
index db9373b..3a62e47 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -18,13 +18,19 @@
int blk_rq_append_bio(struct request *rq, struct bio **bio)
{
struct bio *orig_bio = *bio;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ unsigned int nr_segs = 0;
blk_queue_bounce(rq->q, bio);
+ bio_for_each_bvec(bv, *bio, iter)
+ nr_segs++;
+
if (!rq->bio) {
- blk_rq_bio_prep(rq->q, rq, *bio);
+ blk_rq_bio_prep(rq, *bio, nr_segs);
} else {
- if (!ll_back_merge_fn(rq->q, rq, *bio)) {
+ if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
if (orig_bio != *bio) {
bio_put(*bio);
*bio = orig_bio;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2e04219..48e6725 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,6 +12,52 @@
#include "blk.h"
+static inline bool bio_will_gap(struct request_queue *q,
+ struct request *prev_rq, struct bio *prev, struct bio *next)
+{
+ struct bio_vec pb, nb;
+
+ if (!bio_has_data(prev) || !queue_virt_boundary(q))
+ return false;
+
+ /*
+ * Don't merge if the 1st bio starts with non-zero offset, otherwise it
+ * is quite difficult to respect the sg gap limit. We work hard to
+ * merge a huge number of small single bios in case of mkfs.
+ */
+ if (prev_rq)
+ bio_get_first_bvec(prev_rq->bio, &pb);
+ else
+ bio_get_first_bvec(prev, &pb);
+ if (pb.bv_offset & queue_virt_boundary(q))
+ return true;
+
+ /*
+ * We don't need to worry about the situation that the merged segment
+ * ends in unaligned virt boundary:
+ *
+ * - if 'pb' ends aligned, the merged segment ends aligned
+ * - if 'pb' ends unaligned, the next bio must include
+ * one single bvec of 'nb', otherwise the 'nb' can't
+ * merge with 'pb'
+ */
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
+ if (biovec_phys_mergeable(q, &pb, &nb))
+ return false;
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, req, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, NULL, bio, req->bio);
+}
+
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -59,7 +105,7 @@
static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
struct bio *bio, struct bio_set *bs, unsigned *nsegs)
{
- *nsegs = 1;
+ *nsegs = 0;
if (!q->limits.max_write_zeroes_sectors)
return NULL;
@@ -86,18 +132,111 @@
return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}
+/*
+ * Return the maximum number of sectors from the start of a bio that may be
+ * submitted as a single request to a block device. If enough sectors remain,
+ * align the end to the physical block size. Otherwise align the end to the
+ * logical block size. This approach minimizes the number of non-aligned
+ * requests that are submitted to a block device if the start of a bio is not
+ * aligned to a physical block boundary.
+ */
static inline unsigned get_max_io_size(struct request_queue *q,
struct bio *bio)
{
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
- unsigned mask = queue_logical_block_size(q) - 1;
+ unsigned max_sectors = sectors;
+ unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
+ unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
+ unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
- /* aligned to logical block size */
- sectors &= ~(mask >> 9);
+ max_sectors += start_offset;
+ max_sectors &= ~(pbs - 1);
+ if (max_sectors > start_offset)
+ return max_sectors - start_offset;
- return sectors;
+ return sectors & (lbs - 1);
}
+static unsigned get_max_segment_size(const struct request_queue *q,
+ unsigned offset)
+{
+ unsigned long mask = queue_segment_boundary(q);
+
+ /* default segment boundary mask means no boundary limit */
+ if (mask == BLK_SEG_BOUNDARY_MASK)
+ return queue_max_segment_size(q);
+
+ return min_t(unsigned long, mask - (mask & offset) + 1,
+ queue_max_segment_size(q));
+}
+
+/**
+ * bvec_split_segs - verify whether or not a bvec should be split in the middle
+ * @q: [in] request queue associated with the bio associated with @bv
+ * @bv: [in] bvec to examine
+ * @nsegs: [in,out] Number of segments in the bio being built. Incremented
+ * by the number of segments from @bv that may be appended to that
+ * bio without exceeding @max_segs
+ * @sectors: [in,out] Number of sectors in the bio being built. Incremented
+ * by the number of sectors from @bv that may be appended to that
+ * bio without exceeding @max_sectors
+ * @max_segs: [in] upper bound for *@nsegs
+ * @max_sectors: [in] upper bound for *@sectors
+ *
+ * When splitting a bio, it can happen that a bvec is encountered that is too
+ * big to fit in a single segment and hence that it has to be split in the
+ * middle. This function verifies whether or not that should happen. The value
+ * %true is returned if and only if appending the entire @bv to a bio with
+ * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
+ * the block driver.
+ */
+static bool bvec_split_segs(const struct request_queue *q,
+ const struct bio_vec *bv, unsigned *nsegs,
+ unsigned *sectors, unsigned max_segs,
+ unsigned max_sectors)
+{
+ unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
+ unsigned len = min(bv->bv_len, max_len);
+ unsigned total_len = 0;
+ unsigned seg_size = 0;
+
+ while (len && *nsegs < max_segs) {
+ seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+ seg_size = min(seg_size, len);
+
+ (*nsegs)++;
+ total_len += seg_size;
+ len -= seg_size;
+
+ if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
+ break;
+ }
+
+ *sectors += total_len >> 9;
+
+ /* tell the caller to split the bvec if it is too big to fit */
+ return len > 0 || bv->bv_len > max_len;
+}
+
+/**
+ * blk_bio_segment_split - split a bio in two bios
+ * @q: [in] request queue pointer
+ * @bio: [in] bio to be split
+ * @bs: [in] bio set to allocate the clone from
+ * @segs: [out] number of segments in the bio with the first half of the sectors
+ *
+ * Clone @bio, update the bi_iter of the clone to represent the first sectors
+ * of @bio and update @bio->bi_iter to represent the remaining sectors. The
+ * following is guaranteed for the cloned bio:
+ * - That it has at most get_max_io_size(@q, @bio) sectors.
+ * - That it has at most queue_max_segments(@q) segments.
+ *
+ * Except for discard requests the cloned bio will point at the bi_io_vec of
+ * the original bio. It is the responsibility of the caller to ensure that the
+ * original bio is not freed before the cloned bio. The caller is also
+ * responsible for ensuring that @bs is only destroyed after processing of the
+ * split bio has finished.
+ */
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -105,13 +244,11 @@
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
- unsigned seg_size = 0, nsegs = 0, sectors = 0;
- unsigned front_seg_size = bio->bi_seg_front_size;
- bool do_split = true;
- struct bio *new = NULL;
+ unsigned nsegs = 0, sectors = 0;
const unsigned max_sectors = get_max_io_size(q, bio);
+ const unsigned max_segs = queue_max_segments(q);
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_bvec(bv, bio, iter) {
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
@@ -119,94 +256,63 @@
if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
goto split;
- if (sectors + (bv.bv_len >> 9) > max_sectors) {
- /*
- * Consider this a new segment if we're splitting in
- * the middle of this vector.
- */
- if (nsegs < queue_max_segments(q) &&
- sectors < max_sectors) {
- nsegs++;
- sectors = max_sectors;
- }
- goto split;
- }
-
- if (bvprvp && blk_queue_cluster(q)) {
- if (seg_size + bv.bv_len > queue_max_segment_size(q))
- goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
- bvprv = bv;
- bvprvp = &bvprv;
+ if (nsegs < max_segs &&
+ sectors + (bv.bv_len >> 9) <= max_sectors &&
+ bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
+ nsegs++;
sectors += bv.bv_len >> 9;
-
- continue;
- }
-new_segment:
- if (nsegs == queue_max_segments(q))
+ } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs,
+ max_sectors)) {
goto split;
+ }
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
-
- nsegs++;
bvprv = bv;
bvprvp = &bvprv;
- seg_size = bv.bv_len;
- sectors += bv.bv_len >> 9;
-
}
- do_split = false;
+ *segs = nsegs;
+ return NULL;
split:
*segs = nsegs;
-
- if (do_split) {
- new = bio_split(bio, sectors, GFP_NOIO, bs);
- if (new)
- bio = new;
- }
-
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
- bio->bi_seg_front_size = front_seg_size;
- if (seg_size > bio->bi_seg_back_size)
- bio->bi_seg_back_size = seg_size;
-
- return do_split ? new : NULL;
+ return bio_split(bio, sectors, GFP_NOIO, bs);
}
-void blk_queue_split(struct request_queue *q, struct bio **bio)
+/**
+ * __blk_queue_split - split a bio and submit the second half
+ * @q: [in] request queue pointer
+ * @bio: [in, out] bio to be split
+ * @nr_segs: [out] number of segments in the first bio
+ *
+ * Split a bio into two bios, chain the two bios, submit the second half and
+ * store a pointer to the first half in *@bio. If the second bio is still too
+ * big it will be split by a recursive call to this function. Since this
+ * function may allocate a new bio from @q->bio_split, it is the responsibility
+ * of the caller to ensure that @q is only released after processing of the
+ * split bio has finished.
+ */
+void __blk_queue_split(struct request_queue *q, struct bio **bio,
+ unsigned int *nr_segs)
{
- struct bio *split, *res;
- unsigned nsegs;
+ struct bio *split;
switch (bio_op(*bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
- split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
+ split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
break;
case REQ_OP_WRITE_ZEROES:
- split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
+ split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
+ nr_segs);
break;
case REQ_OP_WRITE_SAME:
- split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
+ split = blk_bio_write_same_split(q, *bio, &q->bio_split,
+ nr_segs);
break;
default:
- split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
+ split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
break;
}
- /* physical segments can be figured out during splitting */
- res = split ? split : *bio;
- res->bi_phys_segments = nsegs;
- bio_set_flag(res, BIO_SEG_VALID);
-
if (split) {
/* there isn't chance to merge the splitted bio */
split->bi_opf |= REQ_NOMERGE;
@@ -227,22 +333,37 @@
*bio = split;
}
}
+
+/**
+ * blk_queue_split - split a bio and submit the second half
+ * @q: [in] request queue pointer
+ * @bio: [in, out] bio to be split
+ *
+ * Split a bio into two bios, chains the two bios, submit the second half and
+ * store a pointer to the first half in *@bio. Since this function may allocate
+ * a new bio from @q->bio_split, it is the responsibility of the caller to
+ * ensure that @q is only released after processing of the split bio has
+ * finished.
+ */
+void blk_queue_split(struct request_queue *q, struct bio **bio)
+{
+ unsigned int nr_segs;
+
+ __blk_queue_split(q, bio, &nr_segs);
+}
EXPORT_SYMBOL(blk_queue_split);
-static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
- struct bio *bio,
- bool no_sg_merge)
+unsigned int blk_recalc_rq_segments(struct request *rq)
{
- struct bio_vec bv, bvprv = { NULL };
- int cluster, prev = 0;
- unsigned int seg_size, nr_phys_segs;
- struct bio *fbio, *bbio;
- struct bvec_iter iter;
+ unsigned int nr_phys_segs = 0;
+ unsigned int nr_sectors = 0;
+ struct req_iterator iter;
+ struct bio_vec bv;
- if (!bio)
+ if (!rq->bio)
return 0;
- switch (bio_op(bio)) {
+ switch (bio_op(rq->bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
@@ -251,181 +372,125 @@
return 1;
}
- fbio = bio;
- cluster = blk_queue_cluster(q);
- seg_size = 0;
- nr_phys_segs = 0;
- for_each_bio(bio) {
- bio_for_each_segment(bv, bio, iter) {
- /*
- * If SG merging is disabled, each bio vector is
- * a segment
- */
- if (no_sg_merge)
- goto new_segment;
-
- if (prev && cluster) {
- if (seg_size + bv.bv_len
- > queue_max_segment_size(q))
- goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
- bvprv = bv;
- continue;
- }
-new_segment:
- if (nr_phys_segs == 1 && seg_size >
- fbio->bi_seg_front_size)
- fbio->bi_seg_front_size = seg_size;
-
- nr_phys_segs++;
- bvprv = bv;
- prev = 1;
- seg_size = bv.bv_len;
- }
- bbio = bio;
- }
-
- if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
- fbio->bi_seg_front_size = seg_size;
- if (seg_size > bbio->bi_seg_back_size)
- bbio->bi_seg_back_size = seg_size;
-
+ rq_for_each_bvec(bv, rq, iter)
+ bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
+ UINT_MAX, UINT_MAX);
return nr_phys_segs;
}
-void blk_recalc_rq_segments(struct request *rq)
+static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
+ struct scatterlist *sglist)
{
- bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
- &rq->q->queue_flags);
-
- rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
- no_sg_merge);
-}
-
-void blk_recount_segments(struct request_queue *q, struct bio *bio)
-{
- unsigned short seg_cnt;
-
- /* estimate segment number by bi_vcnt for non-cloned bio */
- if (bio_flagged(bio, BIO_CLONED))
- seg_cnt = bio_segments(bio);
- else
- seg_cnt = bio->bi_vcnt;
-
- if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
- (seg_cnt < queue_max_segments(q)))
- bio->bi_phys_segments = seg_cnt;
- else {
- struct bio *nxt = bio->bi_next;
-
- bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
- bio->bi_next = nxt;
- }
-
- bio_set_flag(bio, BIO_SEG_VALID);
-}
-EXPORT_SYMBOL(blk_recount_segments);
-
-static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
- struct bio *nxt)
-{
- struct bio_vec end_bv = { NULL }, nxt_bv;
-
- if (!blk_queue_cluster(q))
- return 0;
-
- if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
- queue_max_segment_size(q))
- return 0;
-
- if (!bio_has_data(bio))
- return 1;
-
- bio_get_last_bvec(bio, &end_bv);
- bio_get_first_bvec(nxt, &nxt_bv);
-
- if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
- return 0;
+ if (!*sg)
+ return sglist;
/*
- * bio and nxt are contiguous in memory; check if the queue allows
- * these two to be merged into one
+ * If the driver previously mapped a shorter list, we could see a
+ * termination bit prematurely unless it fully inits the sg table
+ * on each mapping. We KNOW that there must be more entries here
+ * or the driver would be buggy, so force clear the termination bit
+ * to avoid doing a full sg_init_table() in drivers for each command.
*/
- if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
- return 1;
-
- return 0;
+ sg_unmark_end(*sg);
+ return sg_next(*sg);
}
-static inline void
-__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
- struct scatterlist *sglist, struct bio_vec *bvprv,
- struct scatterlist **sg, int *nsegs, int *cluster)
+static unsigned blk_bvec_map_sg(struct request_queue *q,
+ struct bio_vec *bvec, struct scatterlist *sglist,
+ struct scatterlist **sg)
+{
+ unsigned nbytes = bvec->bv_len;
+ unsigned nsegs = 0, total = 0;
+
+ while (nbytes > 0) {
+ unsigned offset = bvec->bv_offset + total;
+ unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ struct page *page = bvec->bv_page;
+
+ /*
+ * Unfortunately a fair number of drivers barf on scatterlists
+ * that have an offset larger than PAGE_SIZE, despite other
+ * subsystems dealing with that invariant just fine. For now
+ * stick to the legacy format where we never present those from
+ * the block layer, but the code below should be removed once
+ * these offenders (mostly MMC/SD drivers) are fixed.
+ */
+ page += (offset >> PAGE_SHIFT);
+ offset &= ~PAGE_MASK;
+
+ *sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, page, len, offset);
+
+ total += len;
+ nbytes -= len;
+ nsegs++;
+ }
+
+ return nsegs;
+}
+
+static inline int __blk_bvec_map_sg(struct bio_vec bv,
+ struct scatterlist *sglist, struct scatterlist **sg)
+{
+ *sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
+ return 1;
+}
+
+/* only try to merge bvecs into one sg if they are from two bios */
+static inline bool
+__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
+ struct bio_vec *bvprv, struct scatterlist **sg)
{
int nbytes = bvec->bv_len;
- if (*sg && *cluster) {
- if ((*sg)->length + nbytes > queue_max_segment_size(q))
- goto new_segment;
+ if (!*sg)
+ return false;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
- goto new_segment;
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ return false;
- (*sg)->length += nbytes;
- } else {
-new_segment:
- if (!*sg)
- *sg = sglist;
- else {
- /*
- * If the driver previously mapped a shorter
- * list, we could see a termination bit
- * prematurely unless it fully inits the sg
- * table on each mapping. We KNOW that there
- * must be more entries here or the driver
- * would be buggy, so force clear the
- * termination bit to avoid doing a full
- * sg_init_table() in drivers for each command.
- */
- sg_unmark_end(*sg);
- *sg = sg_next(*sg);
- }
+ if (!biovec_phys_mergeable(q, bvprv, bvec))
+ return false;
- sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
- (*nsegs)++;
- }
- *bvprv = *bvec;
-}
+ (*sg)->length += nbytes;
-static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
- struct scatterlist *sglist, struct scatterlist **sg)
-{
- *sg = sglist;
- sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
- return 1;
+ return true;
}
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)
{
- struct bio_vec bvec, bvprv = { NULL };
+ struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
struct bvec_iter iter;
- int cluster = blk_queue_cluster(q), nsegs = 0;
+ int nsegs = 0;
+ bool new_bio = false;
- for_each_bio(bio)
- bio_for_each_segment(bvec, bio, iter)
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
- &nsegs, &cluster);
+ for_each_bio(bio) {
+ bio_for_each_bvec(bvec, bio, iter) {
+ /*
+ * Only try to merge bvecs from two bios given we
+ * have done bio internal merge when adding pages
+ * to bio
+ */
+ if (new_bio &&
+ __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
+ goto next_bvec;
+
+ if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
+ nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
+ else
+ nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
+ next_bvec:
+ new_bio = false;
+ }
+ if (likely(bio->bi_iter.bi_size)) {
+ bvprv = bvec;
+ new_bio = true;
+ }
+ }
return nsegs;
}
@@ -441,9 +506,9 @@
int nsegs = 0;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
+ nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
- nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
+ nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
@@ -483,16 +548,13 @@
}
EXPORT_SYMBOL(blk_rq_map_sg);
-static inline int ll_new_hw_segment(struct request_queue *q,
- struct request *req,
- struct bio *bio)
+static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
+ unsigned int nr_phys_segs)
{
- int nr_phys_segs = bio_phys_segments(q, bio);
-
- if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
+ if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
goto no_merge;
- if (blk_integrity_merge_bio(q, req, bio) == false)
+ if (blk_integrity_merge_bio(req->q, req, bio) == false)
goto no_merge;
/*
@@ -503,12 +565,11 @@
return 1;
no_merge:
- req_set_nomerge(q, req);
+ req_set_nomerge(req->q, req);
return 0;
}
-int ll_back_merge_fn(struct request_queue *q, struct request *req,
- struct bio *bio)
+int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
{
if (req_gap_back_merge(req, bio))
return 0;
@@ -517,21 +578,15 @@
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
- req_set_nomerge(q, req);
+ req_set_nomerge(req->q, req);
return 0;
}
- if (!bio_flagged(req->biotail, BIO_SEG_VALID))
- blk_recount_segments(q, req->biotail);
- if (!bio_flagged(bio, BIO_SEG_VALID))
- blk_recount_segments(q, bio);
- return ll_new_hw_segment(q, req, bio);
+ return ll_new_hw_segment(req, bio, nr_segs);
}
-int ll_front_merge_fn(struct request_queue *q, struct request *req,
- struct bio *bio)
+int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
{
-
if (req_gap_front_merge(req, bio))
return 0;
if (blk_integrity_rq(req) &&
@@ -539,26 +594,11 @@
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
- req_set_nomerge(q, req);
+ req_set_nomerge(req->q, req);
return 0;
}
- if (!bio_flagged(bio, BIO_SEG_VALID))
- blk_recount_segments(q, bio);
- if (!bio_flagged(req->bio, BIO_SEG_VALID))
- blk_recount_segments(q, req->bio);
- return ll_new_hw_segment(q, req, bio);
-}
-
-/*
- * blk-mq uses req->special to carry normal driver per-request payload, it
- * does not indicate a prepared command that we cannot merge with.
- */
-static bool req_no_special_merge(struct request *req)
-{
- struct request_queue *q = req->q;
-
- return !q->mq_ops && req->special;
+ return ll_new_hw_segment(req, bio, nr_segs);
}
static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
@@ -583,15 +623,6 @@
struct request *next)
{
int total_phys_segments;
- unsigned int seg_size =
- req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
-
- /*
- * First check if the either of the requests are re-queued
- * requests. Can't merge them if they are.
- */
- if (req_no_special_merge(req) || req_no_special_merge(next))
- return 0;
if (req_gap_back_merge(req, next->bio))
return 0;
@@ -604,14 +635,6 @@
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
- if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
- if (req->nr_phys_segments == 1)
- req->bio->bi_seg_front_size = seg_size;
- if (next->nr_phys_segments == 1)
- next->biotail->bi_seg_back_size = seg_size;
- total_phys_segments--;
- }
-
if (total_phys_segments > queue_max_segments(q))
return 0;
@@ -657,18 +680,42 @@
{
if (blk_do_io_stat(req)) {
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
part_stat_unlock();
}
}
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+ if (req_op(req) == REQ_OP_DISCARD &&
+ queue_max_discard_segments(req->q) > 1)
+ return true;
+ return false;
+}
+
+static enum elv_merge blk_try_req_merge(struct request *req,
+ struct request *next)
+{
+ if (blk_discard_mergable(req))
+ return ELEVATOR_DISCARD_MERGE;
+ else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
+ return ELEVATOR_BACK_MERGE;
+
+ return ELEVATOR_NO_MERGE;
+}
/*
* For non-mq, this has to be called with the request spinlock acquired.
@@ -677,24 +724,14 @@
static struct request *attempt_merge(struct request_queue *q,
struct request *req, struct request *next)
{
- if (!q->mq_ops)
- lockdep_assert_held(q->queue_lock);
-
if (!rq_mergeable(req) || !rq_mergeable(next))
return NULL;
if (req_op(req) != req_op(next))
return NULL;
- /*
- * not contiguous
- */
- if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
- return NULL;
-
if (rq_data_dir(req) != rq_data_dir(next)
- || req->rq_disk != next->rq_disk
- || req_no_special_merge(next))
+ || req->rq_disk != next->rq_disk)
return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME &&
@@ -708,6 +745,9 @@
if (req->write_hint != next->write_hint)
return NULL;
+ if (req->ioprio != next->ioprio)
+ return NULL;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -715,11 +755,19 @@
* counts here. Handle DISCARDs separately, as they
* have separate settings.
*/
- if (req_op(req) == REQ_OP_DISCARD) {
+
+ switch (blk_try_req_merge(req, next)) {
+ case ELEVATOR_DISCARD_MERGE:
if (!req_attempt_discard_merge(q, req, next))
return NULL;
- } else if (!ll_merge_requests_fn(q, req, next))
+ break;
+ case ELEVATOR_BACK_MERGE:
+ if (!ll_merge_requests_fn(q, req, next))
+ return NULL;
+ break;
+ default:
return NULL;
+ }
/*
* If failfast settings disagree or any of the two is already
@@ -747,7 +795,7 @@
req->__data_len += blk_rq_bytes(next);
- if (req_op(req) != REQ_OP_DISCARD)
+ if (!blk_discard_mergable(req))
elv_merge_requests(q, req, next);
/*
@@ -755,10 +803,6 @@
*/
blk_account_io_merge(next);
- req->ioprio = ioprio_best(req->ioprio, next->ioprio);
- if (blk_rq_cpu_valid(next))
- req->cpu = next->cpu;
-
/*
* ownership of bio passed from next to req, return 'next' for
* the caller to free
@@ -790,16 +834,11 @@
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next)
{
- struct elevator_queue *e = q->elevator;
struct request *free;
- if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
- if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
- return 0;
-
free = attempt_merge(q, rq, next);
if (free) {
- __blk_put_request(q, free);
+ blk_put_request(free);
return 1;
}
@@ -818,8 +857,8 @@
if (bio_data_dir(bio) != rq_data_dir(rq))
return false;
- /* must be same device and not a special request */
- if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
+ /* must be same device */
+ if (rq->rq_disk != bio->bi_disk)
return false;
/* only merge integrity protected bio into ditto rq */
@@ -838,13 +877,15 @@
if (rq->write_hint != bio->bi_write_hint)
return false;
+ if (rq->ioprio != bio_prio(bio))
+ return false;
+
return true;
}
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
- if (req_op(rq) == REQ_OP_DISCARD &&
- queue_max_discard_segments(rq->q) > 1)
+ if (blk_discard_mergable(rq))
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 3eb169f..0157f2b 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPU <-> hardware queue mapping helpers
*
@@ -14,9 +15,10 @@
#include "blk.h"
#include "blk-mq.h"
-static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
+static int queue_index(struct blk_mq_queue_map *qmap,
+ unsigned int nr_queues, const int q)
{
- return cpu % nr_queues;
+ return qmap->queue_offset + (q % nr_queues);
}
static int get_first_sibling(unsigned int cpu)
@@ -30,25 +32,40 @@
return cpu;
}
-int blk_mq_map_queues(struct blk_mq_tag_set *set)
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
- unsigned int *map = set->mq_map;
- unsigned int nr_queues = set->nr_hw_queues;
- unsigned int cpu, first_sibling;
+ unsigned int *map = qmap->mq_map;
+ unsigned int nr_queues = qmap->nr_queues;
+ unsigned int cpu, first_sibling, q = 0;
+
+ for_each_possible_cpu(cpu)
+ map[cpu] = -1;
+
+ /*
+ * Spread queues among present CPUs first for minimizing
+ * count of dead queues which are mapped by all un-present CPUs
+ */
+ for_each_present_cpu(cpu) {
+ if (q >= nr_queues)
+ break;
+ map[cpu] = queue_index(qmap, nr_queues, q++);
+ }
for_each_possible_cpu(cpu) {
+ if (map[cpu] != -1)
+ continue;
/*
* First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of
- * threads per cores then map sibling threads to the same queue for
- * performace optimizations.
+ * threads per cores then map sibling threads to the same queue
+ * for performance optimizations.
*/
- if (cpu < nr_queues) {
- map[cpu] = cpu_to_queue_index(nr_queues, cpu);
+ if (q < nr_queues) {
+ map[cpu] = queue_index(qmap, nr_queues, q++);
} else {
first_sibling = get_first_sibling(cpu);
if (first_sibling == cpu)
- map[cpu] = cpu_to_queue_index(nr_queues, cpu);
+ map[cpu] = queue_index(qmap, nr_queues, q++);
else
map[cpu] = map[first_sibling];
}
@@ -58,16 +75,20 @@
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
-/*
+/**
+ * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
+ * @qmap: CPU to hardware queue map.
+ * @index: hardware queue index.
+ *
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
-int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
+int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
- if (index == mq_map[i])
+ if (index == qmap->mq_map[i])
return local_memory_node(cpu_to_node(i));
}
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
index fb2c82c..038cb62 100644
--- a/block/blk-mq-debugfs-zoned.c
+++ b/block/blk-mq-debugfs-zoned.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
- *
- * This file is released under the GPL.
*/
#include <linux/blkdev.h>
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index cb1e6cf..b3f2ba4 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -23,11 +12,12 @@
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
+#include "blk-rq-qos.h"
static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
{
if (stat->nr_samples) {
- seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
+ seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
stat->nr_samples, stat->mean, stat->min, stat->max);
} else {
seq_puts(m, "samples=0");
@@ -39,13 +29,13 @@
struct request_queue *q = data;
int bucket;
- for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
- seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
- print_stat(m, &q->poll_stat[2*bucket]);
+ for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
+ seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
+ print_stat(m, &q->poll_stat[2 * bucket]);
seq_puts(m, "\n");
- seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
- print_stat(m, &q->poll_stat[2*bucket+1]);
+ seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
+ print_stat(m, &q->poll_stat[2 * bucket + 1]);
seq_puts(m, "\n");
}
return 0;
@@ -102,13 +92,18 @@
return 0;
}
+static int queue_pm_only_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+
+ seq_printf(m, "%d\n", atomic_read(&q->pm_only));
+ return 0;
+}
+
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
- QUEUE_FLAG_NAME(QUEUED),
QUEUE_FLAG_NAME(STOPPED),
QUEUE_FLAG_NAME(DYING),
- QUEUE_FLAG_NAME(BYPASS),
- QUEUE_FLAG_NAME(BIDI),
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
@@ -121,18 +116,15 @@
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(DEAD),
QUEUE_FLAG_NAME(INIT_DONE),
- QUEUE_FLAG_NAME(NO_SG_MERGE),
QUEUE_FLAG_NAME(POLL),
QUEUE_FLAG_NAME(WC),
QUEUE_FLAG_NAME(FUA),
- QUEUE_FLAG_NAME(FLUSH_NQ),
QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
- QUEUE_FLAG_NAME(PREEMPT_ONLY),
};
#undef QUEUE_FLAG_NAME
@@ -209,6 +201,7 @@
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
+ { "pm_only", 0600, queue_pm_only_show, NULL },
{ "state", 0600, queue_state_show, queue_state_write },
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
@@ -244,7 +237,6 @@
static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(SHOULD_MERGE),
HCTX_FLAG_NAME(TAG_SHARED),
- HCTX_FLAG_NAME(SG_MERGE),
HCTX_FLAG_NAME(BLOCKING),
HCTX_FLAG_NAME(NO_SCHED),
};
@@ -269,24 +261,6 @@
return 0;
}
-#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
-static const char *const op_name[] = {
- REQ_OP_NAME(READ),
- REQ_OP_NAME(WRITE),
- REQ_OP_NAME(FLUSH),
- REQ_OP_NAME(DISCARD),
- REQ_OP_NAME(ZONE_REPORT),
- REQ_OP_NAME(SECURE_ERASE),
- REQ_OP_NAME(ZONE_RESET),
- REQ_OP_NAME(WRITE_SAME),
- REQ_OP_NAME(WRITE_ZEROES),
- REQ_OP_NAME(SCSI_IN),
- REQ_OP_NAME(SCSI_OUT),
- REQ_OP_NAME(DRV_IN),
- REQ_OP_NAME(DRV_OUT),
-};
-#undef REQ_OP_NAME
-
#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(FAILFAST_DEV),
@@ -302,8 +276,9 @@
CMD_FLAG_NAME(PREFLUSH),
CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND),
- CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(NOWAIT),
+ CMD_FLAG_NAME(NOUNMAP),
+ CMD_FLAG_NAME(HIPRI),
};
#undef CMD_FLAG_NAME
@@ -311,7 +286,6 @@
static const char *const rqf_name[] = {
RQF_NAME(SORTED),
RQF_NAME(STARTED),
- RQF_NAME(QUEUED),
RQF_NAME(SOFTBARRIER),
RQF_NAME(FLUSH_SEQ),
RQF_NAME(MIXED_MERGE),
@@ -350,13 +324,14 @@
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
{
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
- const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
+ const unsigned int op = req_op(rq);
+ const char *op_str = blk_op_str(op);
seq_printf(m, "%p {.op=", rq);
- if (op < ARRAY_SIZE(op_name) && op_name[op])
- seq_printf(m, "%s", op_name[op]);
+ if (strcmp(op_str, "UNKNOWN") == 0)
+ seq_printf(m, "%u", op);
else
- seq_printf(m, "%d", op);
+ seq_printf(m, "%s", op_str);
seq_puts(m, ", .cmd_flags=");
blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
ARRAY_SIZE(cmd_flag_name));
@@ -417,16 +392,18 @@
/*
* Note: the state of a request may change while this function is in progress,
- * e.g. due to a concurrent blk_mq_finish_request() call.
+ * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
+ * keep iterating requests.
*/
-static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
+static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{
const struct show_busy_params *params = data;
- if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
- blk_mq_rq_state(rq) != MQ_RQ_IDLE)
+ if (rq->mq_hctx == params->hctx)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
+
+ return true;
}
static int hctx_busy_show(void *data, struct seq_file *m)
@@ -440,6 +417,21 @@
return 0;
}
+static const char *const hctx_types[] = {
+ [HCTX_TYPE_DEFAULT] = "default",
+ [HCTX_TYPE_READ] = "read",
+ [HCTX_TYPE_POLL] = "poll",
+};
+
+static int hctx_type_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+
+ BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
+ seq_printf(m, "%s\n", hctx_types[hctx->type]);
+ return 0;
+}
+
static int hctx_ctx_map_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
@@ -630,36 +622,43 @@
return 0;
}
-static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
- __acquires(&ctx->lock)
-{
- struct blk_mq_ctx *ctx = m->private;
-
- spin_lock(&ctx->lock);
- return seq_list_start(&ctx->rq_list, *pos);
+#define CTX_RQ_SEQ_OPS(name, type) \
+static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
+ __acquires(&ctx->lock) \
+{ \
+ struct blk_mq_ctx *ctx = m->private; \
+ \
+ spin_lock(&ctx->lock); \
+ return seq_list_start(&ctx->rq_lists[type], *pos); \
+} \
+ \
+static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct blk_mq_ctx *ctx = m->private; \
+ \
+ return seq_list_next(v, &ctx->rq_lists[type], pos); \
+} \
+ \
+static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
+ __releases(&ctx->lock) \
+{ \
+ struct blk_mq_ctx *ctx = m->private; \
+ \
+ spin_unlock(&ctx->lock); \
+} \
+ \
+static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
+ .start = ctx_##name##_rq_list_start, \
+ .next = ctx_##name##_rq_list_next, \
+ .stop = ctx_##name##_rq_list_stop, \
+ .show = blk_mq_debugfs_rq_show, \
}
-static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct blk_mq_ctx *ctx = m->private;
+CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
+CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
+CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
- return seq_list_next(v, &ctx->rq_list, pos);
-}
-
-static void ctx_rq_list_stop(struct seq_file *m, void *v)
- __releases(&ctx->lock)
-{
- struct blk_mq_ctx *ctx = m->private;
-
- spin_unlock(&ctx->lock);
-}
-
-static const struct seq_operations ctx_rq_list_seq_ops = {
- .start = ctx_rq_list_start,
- .next = ctx_rq_list_next,
- .stop = ctx_rq_list_stop,
- .show = blk_mq_debugfs_rq_show,
-};
static int ctx_dispatched_show(void *data, struct seq_file *m)
{
struct blk_mq_ctx *ctx = data;
@@ -764,8 +763,8 @@
if (attr->show)
return single_release(inode, file);
- else
- return seq_release(inode, file);
+
+ return seq_release(inode, file);
}
static const struct file_operations blk_mq_debugfs_fops = {
@@ -792,46 +791,42 @@
{"run", 0600, hctx_run_show, hctx_run_write},
{"active", 0400, hctx_active_show},
{"dispatch_busy", 0400, hctx_dispatch_busy_show},
+ {"type", 0400, hctx_type_show},
{},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
- {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
+ {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
+ {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
+ {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
{"merged", 0600, ctx_merged_show, ctx_merged_write},
{"completed", 0600, ctx_completed_show, ctx_completed_write},
{},
};
-static bool debugfs_create_files(struct dentry *parent, void *data,
+static void debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
+ if (IS_ERR_OR_NULL(parent))
+ return;
+
d_inode(parent)->i_private = data;
- for (; attr->name; attr++) {
- if (!debugfs_create_file(attr->name, attr->mode, parent,
- (void *)attr, &blk_mq_debugfs_fops))
- return false;
- }
- return true;
+ for (; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, parent,
+ (void *)attr, &blk_mq_debugfs_fops);
}
-int blk_mq_debugfs_register(struct request_queue *q)
+void blk_mq_debugfs_register(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- if (!blk_debugfs_root)
- return -ENOENT;
-
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
blk_debugfs_root);
- if (!q->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(q->debugfs_dir, q,
- blk_mq_debugfs_queue_attrs))
- goto err;
+ debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
/*
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
@@ -843,18 +838,20 @@
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
- goto err;
- if (q->elevator && !hctx->sched_debugfs_dir &&
- blk_mq_debugfs_register_sched_hctx(q, hctx))
- goto err;
+ if (!hctx->debugfs_dir)
+ blk_mq_debugfs_register_hctx(q, hctx);
+ if (q->elevator && !hctx->sched_debugfs_dir)
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
}
- return 0;
+ if (q->rq_qos) {
+ struct rq_qos *rqos = q->rq_qos;
-err:
- blk_mq_debugfs_unregister(q);
- return -ENOMEM;
+ while (rqos) {
+ blk_mq_debugfs_register_rqos(rqos);
+ rqos = rqos->next;
+ }
+ }
}
void blk_mq_debugfs_unregister(struct request_queue *q)
@@ -864,52 +861,32 @@
q->debugfs_dir = NULL;
}
-static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx)
+static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
{
struct dentry *ctx_dir;
char name[20];
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
- if (!ctx_dir)
- return -ENOMEM;
- if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
- return -ENOMEM;
-
- return 0;
+ debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
}
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
char name[20];
int i;
- if (!q->debugfs_dir)
- return -ENOENT;
-
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
- if (!hctx->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(hctx->debugfs_dir, hctx,
- blk_mq_debugfs_hctx_attrs))
- goto err;
+ debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
- hctx_for_each_ctx(hctx, ctx, i) {
- if (blk_mq_debugfs_register_ctx(hctx, ctx))
- goto err;
- }
-
- return 0;
-
-err:
- blk_mq_debugfs_unregister_hctx(hctx);
- return -ENOMEM;
+ hctx_for_each_ctx(hctx, ctx, i)
+ blk_mq_debugfs_register_ctx(hctx, ctx);
}
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -919,17 +896,13 @@
hctx->debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (blk_mq_debugfs_register_hctx(q, hctx))
- return -ENOMEM;
- }
-
- return 0;
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_register_hctx(q, hctx);
}
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
@@ -941,29 +914,23 @@
blk_mq_debugfs_unregister_hctx(hctx);
}
-int blk_mq_debugfs_register_sched(struct request_queue *q)
+void blk_mq_debugfs_register_sched(struct request_queue *q)
{
struct elevator_type *e = q->elevator->type;
+ /*
+ * If the parent directory has not been created yet, return, we will be
+ * called again later on and the directory/files will be created then.
+ */
if (!q->debugfs_dir)
- return -ENOENT;
+ return;
if (!e->queue_debugfs_attrs)
- return 0;
+ return;
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
- if (!q->sched_debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(q->sched_debugfs_dir, q,
- e->queue_debugfs_attrs))
- goto err;
-
- return 0;
-
-err:
- blk_mq_debugfs_unregister_sched(q);
- return -ENOMEM;
+ debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
@@ -972,27 +939,48 @@
q->sched_debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
+{
+ debugfs_remove_recursive(rqos->debugfs_dir);
+ rqos->debugfs_dir = NULL;
+}
+
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+{
+ struct request_queue *q = rqos->q;
+ const char *dir_name = rq_qos_id_to_name(rqos->id);
+
+ if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
+ return;
+
+ if (!q->rqos_debugfs_dir)
+ q->rqos_debugfs_dir = debugfs_create_dir("rqos",
+ q->debugfs_dir);
+
+ rqos->debugfs_dir = debugfs_create_dir(dir_name,
+ rqos->q->rqos_debugfs_dir);
+
+ debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
+}
+
+void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
+{
+ debugfs_remove_recursive(q->rqos_debugfs_dir);
+ q->rqos_debugfs_dir = NULL;
+}
+
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct elevator_type *e = q->elevator->type;
- if (!hctx->debugfs_dir)
- return -ENOENT;
-
if (!e->hctx_debugfs_attrs)
- return 0;
+ return;
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
hctx->debugfs_dir);
- if (!hctx->sched_debugfs_dir)
- return -ENOMEM;
-
- if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
- e->hctx_debugfs_attrs))
- return -ENOMEM;
-
- return 0;
+ debugfs_create_files(hctx->sched_debugfs_dir, hctx,
+ e->hctx_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index a9160be..a68aa60 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -18,66 +18,77 @@
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
-int blk_mq_debugfs_register(struct request_queue *q);
+void blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
-int blk_mq_debugfs_register_hctxs(struct request_queue *q);
+void blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
-int blk_mq_debugfs_register_sched(struct request_queue *q);
+void blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
+
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
#else
-static inline int blk_mq_debugfs_register(struct request_queue *q)
+static inline void blk_mq_debugfs_register(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}
-static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
+static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
+
+static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+{
+}
+
+static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
+{
+}
+
+static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
+{
+}
#endif
#ifdef CONFIG_BLK_DEBUG_FS_ZONED
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index db644ec..b595a94 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Christoph Hellwig.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kobject.h>
#include <linux/blkdev.h>
@@ -21,7 +13,7 @@
/**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
- * @set: tagset to provide the mapping for
+ * @qmap: CPU to hardware queue map.
* @pdev: PCI device associated with @set.
* @offset: Offset to use for the pci irq vector
*
@@ -31,26 +23,26 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue + offset);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return 0;
fallback:
- WARN_ON_ONCE(set->nr_hw_queues > 1);
- blk_mq_clear_mq_map(set);
+ WARN_ON_ONCE(qmap->nr_queues > 1);
+ blk_mq_clear_mq_map(qmap);
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index 996167f..14f968e 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Sagi Grimberg.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
@@ -16,8 +8,8 @@
/**
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
- * @set: tagset to provide the mapping for
- * @dev: rdma device associated with @set.
+ * @map: CPU to hardware queue map.
+ * @dev: rdma device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the rdma device @dev has at least as many available
@@ -29,24 +21,24 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping.
*/
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec)
{
const struct cpumask *mask;
unsigned int queue, cpu;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < map->nr_queues; queue++) {
mask = ib_get_vector_affinity(dev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ map->mq_map[cpu] = map->queue_offset + queue;
}
return 0;
fallback:
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(map);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 29bfe80..ca22afd 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* blk-mq scheduling framework
*
@@ -31,15 +32,22 @@
}
EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
-void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
+void blk_mq_sched_assign_ioc(struct request *rq)
{
struct request_queue *q = rq->q;
- struct io_context *ioc = rq_ioc(bio);
+ struct io_context *ioc;
struct io_cq *icq;
- spin_lock_irq(q->queue_lock);
+ /*
+ * May not have an IO context if it's a passthrough request
+ */
+ ioc = current->io_context;
+ if (!ioc)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!icq) {
icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
@@ -54,13 +62,14 @@
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
*/
-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
+EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
@@ -85,14 +94,13 @@
do {
struct request *rq;
- if (e->type->ops.mq.has_work &&
- !e->type->ops.mq.has_work(hctx))
+ if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
break;
if (!blk_mq_get_dispatch_budget(hctx))
break;
- rq = e->type->ops.mq.dispatch_request(hctx);
+ rq = e->type->ops.dispatch_request(hctx);
if (!rq) {
blk_mq_put_dispatch_budget(hctx);
break;
@@ -110,7 +118,7 @@
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- unsigned idx = ctx->index_hw;
+ unsigned short idx = ctx->index_hw[hctx->type];
if (++idx == hctx->nr_ctx)
idx = 0;
@@ -163,7 +171,7 @@
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
- const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
+ const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -216,7 +224,7 @@
}
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
- struct request **merged_request)
+ unsigned int nr_segs, struct request **merged_request)
{
struct request *rq;
@@ -224,7 +232,7 @@
case ELEVATOR_BACK_MERGE:
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
- if (!bio_attempt_back_merge(q, rq, bio))
+ if (!bio_attempt_back_merge(rq, bio, nr_segs))
return false;
*merged_request = attempt_back_merge(q, rq);
if (!*merged_request)
@@ -233,7 +241,7 @@
case ELEVATOR_FRONT_MERGE:
if (!blk_mq_sched_allow_merge(q, rq, bio))
return false;
- if (!bio_attempt_front_merge(q, rq, bio))
+ if (!bio_attempt_front_merge(rq, bio, nr_segs))
return false;
*merged_request = attempt_front_merge(q, rq);
if (!*merged_request)
@@ -252,7 +260,7 @@
* of them.
*/
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
- struct bio *bio)
+ struct bio *bio, unsigned int nr_segs)
{
struct request *rq;
int checked = 8;
@@ -269,11 +277,13 @@
switch (blk_try_merge(rq, bio)) {
case ELEVATOR_BACK_MERGE:
if (blk_mq_sched_allow_merge(q, rq, bio))
- merged = bio_attempt_back_merge(q, rq, bio);
+ merged = bio_attempt_back_merge(rq, bio,
+ nr_segs);
break;
case ELEVATOR_FRONT_MERGE:
if (blk_mq_sched_allow_merge(q, rq, bio))
- merged = bio_attempt_front_merge(q, rq, bio);
+ merged = bio_attempt_front_merge(rq, bio,
+ nr_segs);
break;
case ELEVATOR_DISCARD_MERGE:
merged = bio_attempt_discard_merge(q, rq, bio);
@@ -295,11 +305,15 @@
* too much time checking for merges.
*/
static bool blk_mq_attempt_merge(struct request_queue *q,
- struct blk_mq_ctx *ctx, struct bio *bio)
+ struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx, struct bio *bio,
+ unsigned int nr_segs)
{
+ enum hctx_type type = hctx->type;
+
lockdep_assert_held(&ctx->lock);
- if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
+ if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
ctx->rq_merged++;
return true;
}
@@ -307,27 +321,27 @@
return false;
}
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
+bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+ unsigned int nr_segs)
{
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
bool ret = false;
+ enum hctx_type type;
- if (e && e->type->ops.mq.bio_merge) {
- blk_mq_put_ctx(ctx);
- return e->type->ops.mq.bio_merge(hctx, bio);
- }
+ if (e && e->type->ops.bio_merge)
+ return e->type->ops.bio_merge(hctx, bio, nr_segs);
+ type = hctx->type;
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
- !list_empty_careful(&ctx->rq_list)) {
+ !list_empty_careful(&ctx->rq_lists[type])) {
/* default per sw-queue merge */
spin_lock(&ctx->lock);
- ret = blk_mq_attempt_merge(q, ctx, bio);
+ ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
spin_unlock(&ctx->lock);
}
- blk_mq_put_ctx(ctx);
return ret;
}
@@ -367,7 +381,7 @@
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
/* flush rq in flush machinery need to be dispatched directly */
if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
@@ -380,11 +394,11 @@
if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
goto run;
- if (e && e->type->ops.mq.insert_requests) {
+ if (e && e->type->ops.insert_requests) {
LIST_HEAD(list);
list_add(&rq->queuelist, &list);
- e->type->ops.mq.insert_requests(hctx, &list, at_head);
+ e->type->ops.insert_requests(hctx, &list, at_head);
} else {
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
@@ -396,15 +410,23 @@
blk_mq_run_hw_queue(hctx, async);
}
-void blk_mq_sched_insert_requests(struct request_queue *q,
+void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async)
{
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
- struct elevator_queue *e = hctx->queue->elevator;
+ struct elevator_queue *e;
+ struct request_queue *q = hctx->queue;
- if (e && e->type->ops.mq.insert_requests)
- e->type->ops.mq.insert_requests(hctx, list, false);
+ /*
+ * blk_mq_sched_insert_requests() is called from flush plug
+ * context only, and hold one usage counter to prevent queue
+ * from being released.
+ */
+ percpu_ref_get(&q->q_usage_counter);
+
+ e = hctx->queue->elevator;
+ if (e && e->type->ops.insert_requests)
+ e->type->ops.insert_requests(hctx, list, false);
else {
/*
* try to issue requests directly if the hw queue isn't
@@ -414,12 +436,14 @@
if (!hctx->dispatch_busy && !e && !run_queue_async) {
blk_mq_try_issue_list_directly(hctx, list);
if (list_empty(list))
- return;
+ goto out;
}
blk_mq_insert_requests(hctx, ctx, list);
}
blk_mq_run_hw_queue(hctx, run_queue_async);
+ out:
+ percpu_ref_put(&q->q_usage_counter);
}
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
@@ -452,14 +476,18 @@
return ret;
}
+/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags) {
+ blk_mq_free_rq_map(hctx->sched_tags);
+ hctx->sched_tags = NULL;
+ }
+ }
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -489,17 +517,18 @@
goto err;
}
- ret = e->ops.mq.init_sched(q, e);
+ ret = e->ops.init_sched(q, e);
if (ret)
goto err;
blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i) {
- if (e->ops.mq.init_hctx) {
- ret = e->ops.mq.init_hctx(hctx, i);
+ if (e->ops.init_hctx) {
+ ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
+ blk_mq_sched_free_requests(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
@@ -511,11 +540,27 @@
return 0;
err:
+ blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;
}
+/*
+ * called in either blk_queue_cleanup or elevator_switch, tagset
+ * is required for freeing requests
+ */
+void blk_mq_sched_free_requests(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags)
+ blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+ }
+}
+
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;
@@ -523,14 +568,14 @@
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_unregister_sched_hctx(hctx);
- if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, i);
+ if (e->type->ops.exit_hctx && hctx->sched_data) {
+ e->type->ops.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
}
blk_mq_debugfs_unregister_sched(q);
- if (e->type->ops.mq.exit_sched)
- e->type->ops.mq.exit_sched(e);
+ if (e->type->ops.exit_sched)
+ e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 4e028ee..126021f 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -8,18 +8,20 @@
void blk_mq_sched_free_hctx_data(struct request_queue *q,
void (*exit)(struct blk_mq_hw_ctx *));
-void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
+void blk_mq_sched_assign_ioc(struct request *rq);
void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
- struct request **merged_request);
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
+ unsigned int nr_segs, struct request **merged_request);
+bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+ unsigned int nr_segs);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async);
-void blk_mq_sched_insert_requests(struct request_queue *q,
+void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
@@ -27,14 +29,16 @@
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+void blk_mq_sched_free_requests(struct request_queue *q);
static inline bool
-blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
+blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+ unsigned int nr_segs)
{
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
return false;
- return __blk_mq_sched_bio_merge(q, bio);
+ return __blk_mq_sched_bio_merge(q, bio, nr_segs);
}
static inline bool
@@ -43,27 +47,18 @@
{
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.mq.allow_merge)
- return e->type->ops.mq.allow_merge(q, rq, bio);
+ if (e && e->type->ops.allow_merge)
+ return e->type->ops.allow_merge(q, rq, bio);
return true;
}
-static inline void blk_mq_sched_completed_request(struct request *rq)
+static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
struct elevator_queue *e = rq->q->elevator;
- if (e && e->type->ops.mq.completed_request)
- e->type->ops.mq.completed_request(rq);
-}
-
-static inline void blk_mq_sched_started_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
- struct elevator_queue *e = q->elevator;
-
- if (e && e->type->ops.mq.started_request)
- e->type->ops.mq.started_request(rq);
+ if (e && e->type->ops.completed_request)
+ e->type->ops.completed_request(rq, now);
}
static inline void blk_mq_sched_requeue_request(struct request *rq)
@@ -71,16 +66,16 @@
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.mq.requeue_request)
- e->type->ops.mq.requeue_request(rq);
+ if (e && e->type->ops.requeue_request)
+ e->type->ops.requeue_request(rq);
}
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
{
struct elevator_queue *e = hctx->queue->elevator;
- if (e && e->type->ops.mq.has_work)
- return e->type->ops.mq.has_work(hctx);
+ if (e && e->type->ops.has_work)
+ return e->type->ops.has_work(hctx);
return false;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index aafb442..a0d3ce3 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
@@ -10,17 +11,37 @@
#include <linux/smp.h>
#include <linux/blk-mq.h>
+#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
static void blk_mq_sysfs_release(struct kobject *kobj)
{
+ struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
+
+ free_percpu(ctxs->queue_ctx);
+ kfree(ctxs);
+}
+
+static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
+{
+ struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+
+ /* ctx->ctxs won't be released until all ctx are freed */
+ kobject_put(&ctx->ctxs->kobj);
}
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj);
+
+ cancel_delayed_work_sync(&hctx->run_work);
+
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ cleanup_srcu_struct(hctx->srcu);
+ blk_free_flush_queue(hctx->fq);
+ sbitmap_free(&hctx->ctx_map);
free_cpumask_var(hctx->cpumask);
kfree(hctx->ctxs);
kfree(hctx);
@@ -161,10 +182,6 @@
return ret;
}
-static struct attribute *default_ctx_attrs[] = {
- NULL,
-};
-
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
.attr = {.name = "nr_tags", .mode = 0444 },
.show = blk_mq_hw_sysfs_nr_tags_show,
@@ -184,6 +201,7 @@
&blk_mq_hw_sysfs_cpus.attr,
NULL,
};
+ATTRIBUTE_GROUPS(default_hw_ctx);
static const struct sysfs_ops blk_mq_sysfs_ops = {
.show = blk_mq_sysfs_show,
@@ -202,13 +220,12 @@
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
- .default_attrs = default_ctx_attrs,
- .release = blk_mq_sysfs_release,
+ .release = blk_mq_ctx_sysfs_release,
};
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
- .default_attrs = default_hw_ctx_attrs,
+ .default_groups = default_hw_ctx_groups,
.release = blk_mq_hw_sysfs_release,
};
@@ -235,7 +252,7 @@
if (!hctx->nr_ctx)
return 0;
- ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
+ ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
if (ret)
return ret;
@@ -253,13 +270,13 @@
struct blk_mq_hw_ctx *hctx;
int i;
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->sysfs_dir_lock);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
- kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
- kobject_del(&q->mq_kobj);
+ kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
+ kobject_del(q->mq_kobj);
kobject_put(&dev->kobj);
q->mq_sysfs_init_done = false;
@@ -279,7 +296,7 @@
ctx = per_cpu_ptr(q->queue_ctx, cpu);
kobject_put(&ctx->kobj);
}
- kobject_put(&q->mq_kobj);
+ kobject_put(q->mq_kobj);
}
void blk_mq_sysfs_init(struct request_queue *q)
@@ -287,10 +304,12 @@
struct blk_mq_ctx *ctx;
int cpu;
- kobject_init(&q->mq_kobj, &blk_mq_ktype);
+ kobject_init(q->mq_kobj, &blk_mq_ktype);
for_each_possible_cpu(cpu) {
ctx = per_cpu_ptr(q->queue_ctx, cpu);
+
+ kobject_get(q->mq_kobj);
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
}
@@ -301,13 +320,13 @@
int ret, i;
WARN_ON_ONCE(!q->kobj.parent);
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->sysfs_dir_lock);
- ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
+ ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
goto out;
- kobject_uevent(&q->mq_kobj, KOBJ_ADD);
+ kobject_uevent(q->mq_kobj, KOBJ_ADD);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
@@ -324,30 +343,18 @@
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
- kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
- kobject_del(&q->mq_kobj);
+ kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
+ kobject_del(q->mq_kobj);
kobject_put(&dev->kobj);
return ret;
}
-int blk_mq_register_dev(struct device *dev, struct request_queue *q)
-{
- int ret;
-
- mutex_lock(&q->sysfs_lock);
- ret = __blk_mq_register_dev(dev, q);
- mutex_unlock(&q->sysfs_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(blk_mq_register_dev);
-
void blk_mq_sysfs_unregister(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
goto unlock;
@@ -355,7 +362,7 @@
blk_mq_unregister_hctx(hctx);
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register(struct request_queue *q)
@@ -363,7 +370,7 @@
struct blk_mq_hw_ctx *hctx;
int i, ret = 0;
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
goto unlock;
@@ -374,7 +381,7 @@
}
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
return ret;
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 41317c5..008388e 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tag allocation using scalable bitmaps. Uses active queue tracking to support
* fairer distribution of tags between multiple submitters when a shared tag map
@@ -9,6 +10,7 @@
#include <linux/module.h>
#include <linux/blk-mq.h>
+#include <linux/delay.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
@@ -110,9 +112,8 @@
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct sbitmap_queue *bt;
struct sbq_wait_state *ws;
- DEFINE_WAIT(wait);
+ DEFINE_SBQ_WAIT(wait);
unsigned int tag_offset;
- bool drop_ctx;
int tag;
if (data->flags & BLK_MQ_REQ_RESERVED) {
@@ -135,7 +136,6 @@
return BLK_MQ_TAG_FAIL;
ws = bt_wait_ptr(bt, data->hctx);
- drop_ctx = data->ctx == NULL;
do {
struct sbitmap_queue *bt_prev;
@@ -154,29 +154,26 @@
if (tag != -1)
break;
- prepare_to_wait_exclusive(&ws->wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
- if (data->ctx)
- blk_mq_put_ctx(data->ctx);
-
bt_prev = bt;
io_schedule();
+ sbitmap_finish_wait(bt, ws, &wait);
+
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
+ data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
+ data->ctx);
tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags;
else
bt = &tags->bitmap_tags;
- finish_wait(&ws->wait, &wait);
-
/*
* If destination hw queue is changed, fake wake up on
* previous queue for compensating the wake up miss, so
@@ -188,10 +185,7 @@
ws = bt_wait_ptr(bt, data->hctx);
} while (1);
- if (drop_ctx && data->ctx)
- blk_mq_put_ctx(data->ctx);
-
- finish_wait(&ws->wait, &wait);
+ sbitmap_finish_wait(bt, ws, &wait);
found_tag:
return tag + tag_offset;
@@ -232,13 +226,27 @@
/*
* We can hit rq == NULL here, because the tagging functions
- * test and set the bit before assining ->rqs[].
+ * test and set the bit before assigning ->rqs[].
*/
if (rq && rq->q == hctx->queue)
- iter_data->fn(hctx, rq, iter_data->data, reserved);
+ return iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
}
+/**
+ * bt_for_each - iterate over the requests associated with a hardware queue
+ * @hctx: Hardware queue to examine.
+ * @bt: sbitmap to examine. This is either the breserved_tags member
+ * or the bitmap_tags member of struct blk_mq_tags.
+ * @fn: Pointer to the function that will be called for each request
+ * associated with @hctx that has been assigned a driver tag.
+ * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
+ * where rq is a pointer to a request. Return true to continue
+ * iterating tags, false to stop.
+ * @data: Will be passed as third argument to @fn.
+ * @reserved: Indicates whether @bt is the breserved_tags member or the
+ * bitmap_tags member of struct blk_mq_tags.
+ */
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
busy_iter_fn *fn, void *data, bool reserved)
{
@@ -275,11 +283,24 @@
*/
rq = tags->rqs[bitnr];
if (rq && blk_mq_request_started(rq))
- iter_data->fn(rq, iter_data->data, reserved);
+ return iter_data->fn(rq, iter_data->data, reserved);
return true;
}
+/**
+ * bt_tags_for_each - iterate over the requests in a tag map
+ * @tags: Tag map to iterate over.
+ * @bt: sbitmap to examine. This is either the breserved_tags member
+ * or the bitmap_tags member of struct blk_mq_tags.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @data,
+ * @reserved) where rq is a pointer to a request. Return true
+ * to continue iterating tags, false to stop.
+ * @data: Will be passed as second argument to @fn.
+ * @reserved: Indicates whether @bt is the breserved_tags member or the
+ * bitmap_tags member of struct blk_mq_tags.
+ */
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
busy_tag_iter_fn *fn, void *data, bool reserved)
{
@@ -294,6 +315,16 @@
sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
}
+/**
+ * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
+ * @tags: Tag map to iterate over.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @priv,
+ * reserved) where rq is a pointer to a request. 'reserved'
+ * indicates whether or not @rq is a reserved request. Return
+ * true to continue iterating tags, false to stop.
+ * @priv: Will be passed as second argument to @fn.
+ */
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
busy_tag_iter_fn *fn, void *priv)
{
@@ -302,6 +333,16 @@
bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
}
+/**
+ * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
+ * @tagset: Tag set to iterate over.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @priv,
+ * reserved) where rq is a pointer to a request. 'reserved'
+ * indicates whether or not @rq is a reserved request. Return
+ * true to continue iterating tags, false to stop.
+ * @priv: Will be passed as second argument to @fn.
+ */
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
{
@@ -314,6 +355,51 @@
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
+static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
+ void *data, bool reserved)
+{
+ unsigned *count = data;
+
+ if (blk_mq_request_completed(rq))
+ (*count)++;
+ return true;
+}
+
+/**
+ * blk_mq_tagset_wait_completed_request - wait until all completed req's
+ * complete funtion is run
+ * @tagset: Tag set to drain completed request
+ *
+ * Note: This function has to be run after all IO queues are shutdown
+ */
+void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
+{
+ while (true) {
+ unsigned count = 0;
+
+ blk_mq_tagset_busy_iter(tagset,
+ blk_mq_tagset_count_completed_rqs, &count);
+ if (!count)
+ break;
+ msleep(5);
+ }
+}
+EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
+
+/**
+ * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
+ * @q: Request queue to examine.
+ * @fn: Pointer to the function that will be called for each request
+ * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
+ * reserved) where rq is a pointer to a request and hctx points
+ * to the hardware queue associated with the request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as third argument to @fn.
+ *
+ * Note: if @q->tag_set is shared with other request queues then @fn will be
+ * called for all requests on all queues that share that tag set and not only
+ * for requests associated with @q.
+ */
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
@@ -321,9 +407,11 @@
int i;
/*
- * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
- * queue_hw_ctx after freeze the queue, so we use q_usage_counter
- * to avoid race with it.
+ * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
+ * while the queue is frozen. So we can use q_usage_counter to avoid
+ * racing with it. __blk_mq_update_nr_hw_queues() uses
+ * synchronize_rcu() to ensure this function left the critical section
+ * below.
*/
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
@@ -332,7 +420,7 @@
struct blk_mq_tags *tags = hctx->tags;
/*
- * If not software queues are currently mapped to this
+ * If no software queues are currently mapped to this
* hardware queue, there's nothing to check
*/
if (!blk_mq_hw_queue_mapped(hctx))
@@ -467,16 +555,7 @@
*/
u32 blk_mq_unique_tag(struct request *rq)
{
- struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
- int hwq = 0;
-
- if (q->mq_ops) {
- hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
- hwq = hctx->queue_num;
- }
-
- return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
+ return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index c3afbca..4883416 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Christoph Hellwig.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/device.h>
#include <linux/blk-mq.h>
@@ -19,8 +11,8 @@
/**
* blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
- * @set: tagset to provide the mapping for
- * @vdev: virtio device associated with @set.
+ * @qmap: CPU to hardware queue map.
+ * @vdev: virtio device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the virtio device @vdev has at least as many available
@@ -29,7 +21,7 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec)
{
const struct cpumask *mask;
@@ -38,17 +30,17 @@
if (!vdev->config->get_vq_affinity)
goto fallback;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return 0;
fallback:
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 23a53b6..ec79115 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block multiqueue core code
*
@@ -29,26 +30,27 @@
#include <trace/events/block.h>
#include <linux/blk-mq.h>
+#include <linux/t10-pi.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
+#include "blk-pm.h"
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
- int ddir, bytes, bucket;
+ int ddir, sectors, bucket;
ddir = rq_data_dir(rq);
- bytes = blk_rq_bytes(rq);
+ sectors = blk_rq_stats_sectors(rq);
- bucket = ddir + 2*(ilog2(bytes) - 9);
+ bucket = ddir + 2 * ilog2(sectors);
if (bucket < 0)
return -1;
@@ -59,7 +61,8 @@
}
/*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
*/
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
@@ -74,14 +77,18 @@
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
- sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
+ const int bit = ctx->index_hw[hctx->type];
+
+ if (!sbitmap_test_bit(&hctx->ctx_map, bit))
+ sbitmap_set_bit(&hctx->ctx_map, bit);
}
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
+ const int bit = ctx->index_hw[hctx->type];
+
+ sbitmap_clear_bit(&hctx->ctx_map, bit);
}
struct mq_inflight {
@@ -89,33 +96,33 @@
unsigned int *inflight;
};
-static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
+static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
{
struct mq_inflight *mi = priv;
/*
- * index[0] counts the specific partition that was asked for. index[1]
- * counts the ones that are active on the whole device, so increment
- * that if mi->part is indeed a partition, and not a whole device.
+ * index[0] counts the specific partition that was asked for.
*/
if (rq->part == mi->part)
mi->inflight[0]++;
- if (mi->part->partno)
- mi->inflight[1]++;
+
+ return true;
}
-void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2])
+unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
{
+ unsigned inflight[2];
struct mq_inflight mi = { .part = part, .inflight = inflight, };
inflight[0] = inflight[1] = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+
+ return inflight[0];
}
-static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
+static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
{
@@ -123,6 +130,8 @@
if (rq->part == mi->part)
mi->inflight[rq_data_dir(rq)]++;
+
+ return true;
}
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
@@ -136,13 +145,14 @@
void blk_freeze_queue_start(struct request_queue *q)
{
- int freeze_depth;
-
- freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
- if (freeze_depth == 1) {
+ mutex_lock(&q->mq_freeze_lock);
+ if (++q->mq_freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
- if (q->mq_ops)
+ mutex_unlock(&q->mq_freeze_lock);
+ if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
+ } else {
+ mutex_unlock(&q->mq_freeze_lock);
}
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -176,8 +186,6 @@
* exported to drivers as the only user for unfreeze is blk_mq.
*/
blk_freeze_queue_start(q);
- if (!q->mq_ops)
- blk_drain_queue(q);
blk_mq_freeze_queue_wait(q);
}
@@ -193,14 +201,14 @@
void blk_mq_unfreeze_queue(struct request_queue *q)
{
- int freeze_depth;
-
- freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
- WARN_ON_ONCE(freeze_depth < 0);
- if (!freeze_depth) {
- percpu_ref_reinit(&q->q_usage_counter);
+ mutex_lock(&q->mq_freeze_lock);
+ q->mq_freeze_depth--;
+ WARN_ON_ONCE(q->mq_freeze_depth < 0);
+ if (!q->mq_freeze_depth) {
+ percpu_ref_resurrect(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
+ mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -274,8 +282,17 @@
}
EXPORT_SYMBOL(blk_mq_can_queue);
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
+}
+
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
- unsigned int tag, unsigned int op)
+ unsigned int tag, unsigned int op, u64 alloc_time_ns)
{
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct request *rq = tags->static_rqs[tag];
@@ -297,8 +314,8 @@
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q;
rq->mq_ctx = data->ctx;
+ rq->mq_hctx = data->hctx;
rq->rq_flags = rq_flags;
- rq->cpu = -1;
rq->cmd_flags = op;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
@@ -309,27 +326,27 @@
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->part = NULL;
- rq->start_time_ns = ktime_get_ns();
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ rq->alloc_time_ns = alloc_time_ns;
+#endif
+ if (blk_mq_need_time_stamp(rq))
+ rq->start_time_ns = ktime_get_ns();
+ else
+ rq->start_time_ns = 0;
rq->io_start_time_ns = 0;
+ rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
#endif
- rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
- rq->__deadline = 0;
+ WRITE_ONCE(rq->deadline, 0);
- INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
rq->end_io = NULL;
rq->end_io_data = NULL;
- rq->next_rq = NULL;
-
-#ifdef CONFIG_BLK_CGROUP
- rq->rl = NULL;
-#endif
data->ctx->rq_dispatched[op_is_sync(op)]++;
refcount_set(&rq->ref, 1);
@@ -337,23 +354,30 @@
}
static struct request *blk_mq_get_request(struct request_queue *q,
- struct bio *bio, unsigned int op,
- struct blk_mq_alloc_data *data)
+ struct bio *bio,
+ struct blk_mq_alloc_data *data)
{
struct elevator_queue *e = q->elevator;
struct request *rq;
unsigned int tag;
- bool put_ctx_on_error = false;
+ bool clear_ctx_on_error = false;
+ u64 alloc_time_ns = 0;
blk_queue_enter_live(q);
+
+ /* alloc_time includes depth and tag waits */
+ if (blk_queue_rq_alloc_time(q))
+ alloc_time_ns = ktime_get_ns();
+
data->q = q;
if (likely(!data->ctx)) {
data->ctx = blk_mq_get_ctx(q);
- put_ctx_on_error = true;
+ clear_ctx_on_error = true;
}
if (likely(!data->hctx))
- data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
- if (op & REQ_NOWAIT)
+ data->hctx = blk_mq_map_queue(q, data->cmd_flags,
+ data->ctx);
+ if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;
if (e) {
@@ -364,31 +388,30 @@
* dispatch list. Don't include reserved tags in the
* limiting, as it isn't useful.
*/
- if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
+ if (!op_is_flush(data->cmd_flags) &&
+ e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
- e->type->ops.mq.limit_depth(op, data);
+ e->type->ops.limit_depth(data->cmd_flags, data);
} else {
blk_mq_tag_busy(data->hctx);
}
tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_TAG_FAIL) {
- if (put_ctx_on_error) {
- blk_mq_put_ctx(data->ctx);
+ if (clear_ctx_on_error)
data->ctx = NULL;
- }
blk_queue_exit(q);
return NULL;
}
- rq = blk_mq_rq_ctx_init(data, tag, op);
- if (!op_is_flush(op)) {
+ rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags, alloc_time_ns);
+ if (!op_is_flush(data->cmd_flags)) {
rq->elv.icq = NULL;
- if (e && e->type->ops.mq.prepare_request) {
- if (e->type->icq_cache && rq_ioc(bio))
- blk_mq_sched_assign_ioc(rq, bio);
+ if (e && e->type->ops.prepare_request) {
+ if (e->type->icq_cache)
+ blk_mq_sched_assign_ioc(rq);
- e->type->ops.mq.prepare_request(rq, bio);
+ e->type->ops.prepare_request(rq, bio);
rq->rq_flags |= RQF_ELVPRIV;
}
}
@@ -399,7 +422,7 @@
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
blk_mq_req_flags_t flags)
{
- struct blk_mq_alloc_data alloc_data = { .flags = flags };
+ struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq;
int ret;
@@ -407,14 +430,12 @@
if (ret)
return ERR_PTR(ret);
- rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+ rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
- blk_mq_put_ctx(alloc_data.ctx);
-
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
@@ -425,7 +446,7 @@
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{
- struct blk_mq_alloc_data alloc_data = { .flags = flags };
+ struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq;
unsigned int cpu;
int ret;
@@ -458,7 +479,7 @@
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
- rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+ rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
@@ -472,9 +493,11 @@
{
struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
const int sched_tag = rq->internal_tag;
+ blk_pm_mark_last_busy(rq);
+ rq->mq_hctx = NULL;
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -488,11 +511,11 @@
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->rq_flags & RQF_ELVPRIV) {
- if (e && e->type->ops.mq.finish_request)
- e->type->ops.mq.finish_request(rq);
+ if (e && e->type->ops.finish_request)
+ e->type->ops.finish_request(rq);
if (rq->elv.icq) {
put_io_context(rq->elv.icq->ioc);
rq->elv.icq = NULL;
@@ -508,9 +531,6 @@
rq_qos_done(q, rq);
- if (blk_rq_rl(rq))
- blk_put_rl(blk_rq_rl(rq));
-
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
@@ -519,21 +539,25 @@
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
- u64 now = ktime_get_ns();
+ u64 now = 0;
+
+ if (blk_mq_need_time_stamp(rq))
+ now = ktime_get_ns();
if (rq->rq_flags & RQF_STATS) {
blk_mq_poll_stats_start(rq->q);
blk_stat_add(rq, now);
}
+ if (rq->internal_tag != -1)
+ blk_mq_sched_completed_request(rq, now);
+
blk_account_io_done(rq, now);
if (rq->end_io) {
rq_qos_done(rq->q, rq);
rq->end_io(rq, error);
} else {
- if (unlikely(blk_bidi_rq(rq)))
- blk_mq_free_request(rq->next_rq);
blk_mq_free_request(rq);
}
}
@@ -550,28 +574,45 @@
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
+ struct request_queue *q = rq->q;
- rq->q->softirq_done_fn(rq);
+ q->mq_ops->complete(rq);
}
static void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct request_queue *q = rq->q;
bool shared = false;
int cpu;
- if (!blk_mq_mark_complete(rq))
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ /*
+ * Most of single queue controllers, there is only one irq vector
+ * for handling IO completion, and the only irq's affinity is set
+ * as all possible CPUs. On most of ARCHs, this affinity means the
+ * irq is handled on one specific CPU.
+ *
+ * So complete IO reqeust in softirq context in case of single queue
+ * for not degrading IO performance by irqsoff latency.
+ */
+ if (q->nr_hw_queues == 1) {
+ __blk_complete_request(rq);
return;
- if (rq->internal_tag != -1)
- blk_mq_sched_completed_request(rq);
+ }
- if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
- rq->q->softirq_done_fn(rq);
+ /*
+ * For a polled request, always complete locallly, it's pointless
+ * to redirect the completion.
+ */
+ if ((rq->cmd_flags & REQ_HIPRI) ||
+ !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
+ q->mq_ops->complete(rq);
return;
}
cpu = get_cpu();
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -580,7 +621,7 @@
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
- rq->q->softirq_done_fn(rq);
+ q->mq_ops->complete(rq);
}
put_cpu();
}
@@ -613,11 +654,12 @@
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
-void blk_mq_complete_request(struct request *rq)
+bool blk_mq_complete_request(struct request *rq)
{
if (unlikely(blk_should_fake_timeout(rq->q)))
- return;
+ return false;
__blk_mq_complete_request(rq);
+ return true;
}
EXPORT_SYMBOL(blk_mq_complete_request);
@@ -627,19 +669,21 @@
}
EXPORT_SYMBOL_GPL(blk_mq_request_started);
+int blk_mq_request_completed(struct request *rq)
+{
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_completed);
+
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
- blk_mq_sched_started_request(rq);
-
trace_block_rq_issue(q, rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
rq->io_start_time_ns = ktime_get_ns();
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- rq->throtl_size = blk_rq_sectors(rq);
-#endif
+ rq->stats_sectors = blk_rq_sectors(rq);
rq->rq_flags |= RQF_STATS;
rq_qos_issue(q, rq);
}
@@ -657,6 +701,11 @@
*/
rq->nr_phys_segments++;
}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
+ q->integrity.profile->prepare_fn(rq);
+#endif
}
EXPORT_SYMBOL(blk_mq_start_request);
@@ -684,7 +733,7 @@
/* this request will be re-inserted to io scheduler queue */
blk_mq_sched_requeue_request(rq);
- BUG_ON(blk_queued_rq(rq));
+ BUG_ON(!list_empty(&rq->queuelist));
blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
@@ -701,12 +750,20 @@
spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
- if (!(rq->rq_flags & RQF_SOFTBARRIER))
+ if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
continue;
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
- blk_mq_sched_insert_request(rq, true, false, false);
+ /*
+ * If RQF_DONTPREP, rq has contained some driver specific
+ * data, so insert it to hctx dispatch list to avoid any
+ * merge.
+ */
+ if (rq->rq_flags & RQF_DONTPREP)
+ blk_mq_request_bypass_insert(rq, false);
+ else
+ blk_mq_sched_insert_request(rq, true, false, false);
}
while (!list_empty(&rq_list)) {
@@ -742,7 +799,6 @@
if (kick_requeue_list)
blk_mq_kick_requeue_list(q);
}
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
@@ -769,6 +825,32 @@
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
+static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ void *priv, bool reserved)
+{
+ /*
+ * If we find a request that is inflight and the queue matches,
+ * we know the queue is busy. Return false to stop the iteration.
+ */
+ if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
+ bool *busy = priv;
+
+ *busy = true;
+ return false;
+ }
+
+ return true;
+}
+
+bool blk_mq_queue_inflight(struct request_queue *q)
+{
+ bool busy = false;
+
+ blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
+ return busy;
+}
+EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
+
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
req->rq_flags |= RQF_TIMED_OUT;
@@ -793,7 +875,7 @@
if (rq->rq_flags & RQF_TIMED_OUT)
return false;
- deadline = blk_rq_deadline(rq);
+ deadline = READ_ONCE(rq->deadline);
if (time_after_eq(jiffies, deadline))
return true;
@@ -804,7 +886,7 @@
return false;
}
-static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
unsigned long *next = priv;
@@ -814,7 +896,7 @@
* so we're not unnecessarilly synchronizing across CPUs.
*/
if (!blk_mq_req_expired(rq, next))
- return;
+ return true;
/*
* We have reason to believe the request may be expired. Take a
@@ -826,7 +908,7 @@
* timeout handler to posting a natural completion.
*/
if (!refcount_inc_not_zero(&rq->ref))
- return;
+ return true;
/*
* The request is now locked and cannot be reallocated underneath the
@@ -836,8 +918,13 @@
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
- if (refcount_dec_and_test(&rq->ref))
+
+ if (is_flush_rq(rq, hctx))
+ rq->end_io(rq, 0);
+ else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
+
+ return true;
}
static void blk_mq_timeout_work(struct work_struct *work)
@@ -894,9 +981,10 @@
struct flush_busy_ctx_data *flush_data = data;
struct blk_mq_hw_ctx *hctx = flush_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+ enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- list_splice_tail_init(&ctx->rq_list, flush_data->list);
+ list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
sbitmap_clear_bit(sb, bitnr);
spin_unlock(&ctx->lock);
return true;
@@ -928,12 +1016,13 @@
struct dispatch_rq_data *dispatch_data = data;
struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+ enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_list)) {
- dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
list_del_init(&dispatch_data->rq->queuelist);
- if (list_empty(&ctx->rq_list))
+ if (list_empty(&ctx->rq_lists[type]))
sbitmap_clear_bit(sb, bitnr);
}
spin_unlock(&ctx->lock);
@@ -944,7 +1033,7 @@
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start)
{
- unsigned off = start ? start->index_hw : 0;
+ unsigned off = start ? start->index_hw[hctx->type] : 0;
struct dispatch_rq_data data = {
.hctx = hctx,
.rq = NULL,
@@ -968,8 +1057,9 @@
{
struct blk_mq_alloc_data data = {
.q = rq->q,
- .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
+ .hctx = rq->mq_hctx,
.flags = BLK_MQ_REQ_NOWAIT,
+ .cmd_flags = rq->cmd_flags,
};
bool shared;
@@ -1001,7 +1091,13 @@
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
spin_lock(&hctx->dispatch_wait_lock);
- list_del_init(&wait->entry);
+ if (!list_empty(&wait->entry)) {
+ struct sbitmap_queue *sbq;
+
+ list_del_init(&wait->entry);
+ sbq = &hctx->tags->bitmap_tags;
+ atomic_dec(&sbq->ws_active);
+ }
spin_unlock(&hctx->dispatch_wait_lock);
blk_mq_run_hw_queue(hctx, true);
@@ -1017,13 +1113,13 @@
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
+ struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ blk_mq_sched_mark_restart_hctx(hctx);
/*
* It's possible that a tag was freed in the window between the
@@ -1040,7 +1136,7 @@
if (!list_empty_careful(&wait->entry))
return false;
- wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+ wq = &bt_wait_ptr(sbq, hctx)->wait;
spin_lock_irq(&wq->lock);
spin_lock(&hctx->dispatch_wait_lock);
@@ -1050,6 +1146,7 @@
return false;
}
+ atomic_inc(&sbq->ws_active);
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
__add_wait_queue(wq, wait);
@@ -1070,6 +1167,7 @@
* someone else gets the wakeup.
*/
list_del_init(&wait->entry);
+ atomic_dec(&sbq->ws_active);
spin_unlock(&hctx->dispatch_wait_lock);
spin_unlock_irq(&wq->lock);
@@ -1133,7 +1231,7 @@
rq = list_first_entry(list, struct request, queuelist);
- hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
+ hctx = rq->mq_hctx;
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break;
@@ -1206,6 +1304,14 @@
if (!list_empty(list)) {
bool needs_restart;
+ /*
+ * If we didn't flush the entire list, we could have told
+ * the driver there was more coming, but that turned out to
+ * be a lie.
+ */
+ if (q->mq_ops->commit_rqs)
+ q->mq_ops->commit_rqs(hctx);
+
spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
@@ -1535,15 +1641,16 @@
bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ enum hctx_type type = hctx->type;
lockdep_assert_held(&ctx->lock);
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
- list_add(&rq->queuelist, &ctx->rq_list);
+ list_add(&rq->queuelist, &ctx->rq_lists[type]);
else
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
}
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
@@ -1563,8 +1670,7 @@
*/
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -1579,6 +1685,7 @@
{
struct request *rq;
+ enum hctx_type type = hctx->type;
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
@@ -1590,35 +1697,47 @@
}
spin_lock(&ctx->lock);
- list_splice_tail_init(list, &ctx->rq_list);
+ list_splice_tail_init(list, &ctx->rq_lists[type]);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
}
-static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
+static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->mq_ctx < rqb->mq_ctx ||
- (rqa->mq_ctx == rqb->mq_ctx &&
- blk_rq_pos(rqa) < blk_rq_pos(rqb)));
+ if (rqa->mq_ctx < rqb->mq_ctx)
+ return -1;
+ else if (rqa->mq_ctx > rqb->mq_ctx)
+ return 1;
+ else if (rqa->mq_hctx < rqb->mq_hctx)
+ return -1;
+ else if (rqa->mq_hctx > rqb->mq_hctx)
+ return 1;
+
+ return blk_rq_pos(rqa) > blk_rq_pos(rqb);
}
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
+ struct blk_mq_hw_ctx *this_hctx;
struct blk_mq_ctx *this_ctx;
struct request_queue *this_q;
struct request *rq;
LIST_HEAD(list);
- LIST_HEAD(ctx_list);
+ LIST_HEAD(rq_list);
unsigned int depth;
list_splice_init(&plug->mq_list, &list);
- list_sort(NULL, &list, plug_ctx_cmp);
+ if (plug->rq_count > 2 && plug->multiple_queues)
+ list_sort(NULL, &list, plug_rq_cmp);
+
+ plug->rq_count = 0;
this_q = NULL;
+ this_hctx = NULL;
this_ctx = NULL;
depth = 0;
@@ -1626,59 +1745,56 @@
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!rq->q);
- if (rq->mq_ctx != this_ctx) {
- if (this_ctx) {
+ if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
+ if (this_hctx) {
trace_block_unplug(this_q, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_q, this_ctx,
- &ctx_list,
+ blk_mq_sched_insert_requests(this_hctx, this_ctx,
+ &rq_list,
from_schedule);
}
- this_ctx = rq->mq_ctx;
this_q = rq->q;
+ this_ctx = rq->mq_ctx;
+ this_hctx = rq->mq_hctx;
depth = 0;
}
depth++;
- list_add_tail(&rq->queuelist, &ctx_list);
+ list_add_tail(&rq->queuelist, &rq_list);
}
/*
- * If 'this_ctx' is set, we know we have entries to complete
- * on 'ctx_list'. Do those.
+ * If 'this_hctx' is set, we know we have entries to complete
+ * on 'rq_list'. Do those.
*/
- if (this_ctx) {
+ if (this_hctx) {
trace_block_unplug(this_q, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
+ blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
from_schedule);
}
}
-static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
+static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
+ unsigned int nr_segs)
{
- blk_init_request_from_bio(rq, bio);
+ if (bio->bi_opf & REQ_RAHEAD)
+ rq->cmd_flags |= REQ_FAILFAST_MASK;
- blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
+ rq->__sector = bio->bi_iter.bi_sector;
+ rq->write_hint = bio->bi_write_hint;
+ blk_rq_bio_prep(rq, bio, nr_segs);
blk_account_io_start(rq, true);
}
-static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
-{
- if (rq->tag != -1)
- return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
-
- return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
-}
-
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
- blk_qc_t *cookie)
+ blk_qc_t *cookie, bool last)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
.rq = rq,
- .last = true,
+ .last = last,
};
blk_qc_t new_cookie;
blk_status_t ret;
@@ -1713,7 +1829,7 @@
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass_insert)
+ bool bypass_insert, bool last)
{
struct request_queue *q = rq->q;
bool run_queue = true;
@@ -1742,7 +1858,7 @@
goto insert;
}
- return __blk_mq_issue_directly(hctx, rq, cookie);
+ return __blk_mq_issue_directly(hctx, rq, cookie, last);
insert:
if (bypass_insert)
return BLK_STS_RESOURCE;
@@ -1761,7 +1877,7 @@
hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
+ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, true);
else if (ret != BLK_STS_OK)
@@ -1770,16 +1886,15 @@
hctx_unlock(hctx, srcu_idx);
}
-blk_status_t blk_mq_request_issue_directly(struct request *rq)
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
blk_status_t ret;
int srcu_idx;
blk_qc_t unused_cookie;
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
+ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
hctx_unlock(hctx, srcu_idx);
return ret;
@@ -1794,7 +1909,7 @@
queuelist);
list_del_init(&rq->queuelist);
- ret = blk_mq_request_issue_directly(rq);
+ ret = blk_mq_request_issue_directly(rq, list_empty(list));
if (ret != BLK_STS_OK) {
if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) {
@@ -1805,38 +1920,58 @@
blk_mq_end_request(rq, ret);
}
}
+
+ /*
+ * If we didn't flush the entire list, we could have told
+ * the driver there was more coming, but that turned out to
+ * be a lie.
+ */
+ if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
+ hctx->queue->mq_ops->commit_rqs(hctx);
+}
+
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ plug->rq_count++;
+ if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
+ struct request *tmp;
+
+ tmp = list_first_entry(&plug->mq_list, struct request,
+ queuelist);
+ if (tmp->q != rq->q)
+ plug->multiple_queues = true;
+ }
}
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
- struct blk_mq_alloc_data data = { .flags = 0 };
+ struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq;
- unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
+ unsigned int nr_segs;
blk_qc_t cookie;
blk_queue_bounce(q, &bio);
-
- blk_queue_split(q, &bio);
+ __blk_queue_split(q, &bio, &nr_segs);
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+ blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
return BLK_QC_T_NONE;
- if (blk_mq_sched_bio_merge(q, bio))
+ if (blk_mq_sched_bio_merge(q, bio, nr_segs))
return BLK_QC_T_NONE;
- rq_qos_throttle(q, bio, NULL);
+ rq_qos_throttle(q, bio);
- trace_block_getrq(q, bio, bio->bi_opf);
-
- rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
+ data.cmd_flags = bio->bi_opf;
+ rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
@@ -1844,32 +1979,30 @@
return BLK_QC_T_NONE;
}
+ trace_block_getrq(q, bio, bio->bi_opf);
+
rq_qos_track(q, rq, bio);
cookie = request_to_qc_t(data.hctx, rq);
- plug = current->plug;
- if (unlikely(is_flush_fua)) {
- blk_mq_put_ctx(data.ctx);
- blk_mq_bio_to_request(rq, bio);
+ blk_mq_bio_to_request(rq, bio, nr_segs);
+ plug = blk_mq_plug(q, bio);
+ if (unlikely(is_flush_fua)) {
/* bypass scheduler for flush rq */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
- } else if (plug && q->nr_hw_queues == 1) {
- struct request *last = NULL;
-
- blk_mq_put_ctx(data.ctx);
- blk_mq_bio_to_request(rq, bio);
-
+ } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+ !blk_queue_nonrot(q))) {
/*
- * @request_count may become stale because of schedule
- * out, so check the list again.
+ * Use plugging if we have a ->commit_rqs() hook as well, as
+ * we know the driver uses bd->last in a smart fashion.
+ *
+ * Use normal plugging if this disk is slow HDD, as sequential
+ * IO may benefit a lot from plug merging.
*/
- if (list_empty(&plug->mq_list))
- request_count = 0;
- else if (blk_queue_nomerges(q))
- request_count = blk_plug_queued_count(q);
+ unsigned int request_count = plug->rq_count;
+ struct request *last = NULL;
if (!request_count)
trace_block_plug(q);
@@ -1882,10 +2015,10 @@
trace_block_plug(q);
}
- list_add_tail(&rq->queuelist, &plug->mq_list);
+ blk_add_rq_to_plug(plug, rq);
+ } else if (q->elevator) {
+ blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) {
- blk_mq_bio_to_request(rq, bio);
-
/*
* We do limited plugging. If the bio can be merged, do that.
* Otherwise the existing request in the plug list will be
@@ -1895,26 +2028,23 @@
*/
if (list_empty(&plug->mq_list))
same_queue_rq = NULL;
- if (same_queue_rq)
+ if (same_queue_rq) {
list_del_init(&same_queue_rq->queuelist);
- list_add_tail(&rq->queuelist, &plug->mq_list);
-
- blk_mq_put_ctx(data.ctx);
+ plug->rq_count--;
+ }
+ blk_add_rq_to_plug(plug, rq);
+ trace_block_plug(q);
if (same_queue_rq) {
- data.hctx = blk_mq_map_queue(q,
- same_queue_rq->mq_ctx->cpu);
+ data.hctx = same_queue_rq->mq_hctx;
+ trace_block_unplug(q, 1, true);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
- } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
- !data.hctx->dispatch_busy)) {
- blk_mq_put_ctx(data.ctx);
- blk_mq_bio_to_request(rq, bio);
+ } else if ((q->nr_hw_queues > 1 && is_sync) ||
+ !data.hctx->dispatch_busy) {
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
- blk_mq_put_ctx(data.ctx);
- blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true, true);
}
@@ -1944,7 +2074,7 @@
list_del_init(&page->lru);
/*
* Remove kmemleak object previously allocated in
- * blk_mq_init_rq_map().
+ * blk_mq_alloc_rqs().
*/
kmemleak_free(page_address(page));
__free_pages(page, page->private);
@@ -1969,7 +2099,7 @@
struct blk_mq_tags *tags;
int node;
- node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2025,7 +2155,7 @@
size_t rq_size, left;
int node;
- node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2105,13 +2235,15 @@
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
+ enum hctx_type type;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+ type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_list)) {
- list_splice_init(&ctx->rq_list, &tmp);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ list_splice_init(&ctx->rq_lists[type], &tmp);
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
@@ -2138,8 +2270,6 @@
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
- blk_mq_debugfs_unregister_hctx(hctx);
-
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);
@@ -2149,12 +2279,11 @@
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
- if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->srcu);
-
blk_mq_remove_cpuhp(hctx);
- blk_free_flush_queue(hctx->fq);
- sbitmap_free(&hctx->ctx_map);
+
+ spin_lock(&q->unused_hctx_lock);
+ list_add(&hctx->hctx_list, &q->unused_hctx_list);
+ spin_unlock(&q->unused_hctx_lock);
}
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2166,19 +2295,70 @@
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;
+ blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_exit_hctx(q, set, hctx, i);
}
}
+static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+{
+ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+
+ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
+ __alignof__(struct blk_mq_hw_ctx)) !=
+ sizeof(struct blk_mq_hw_ctx));
+
+ if (tag_set->flags & BLK_MQ_F_BLOCKING)
+ hw_ctx_size += sizeof(struct srcu_struct);
+
+ return hw_ctx_size;
+}
+
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
- int node;
+ hctx->queue_num = hctx_idx;
- node = hctx->numa_node;
+ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
+
+ hctx->tags = set->tags[hctx_idx];
+
+ if (set->ops->init_hctx &&
+ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+ goto unregister_cpu_notifier;
+
+ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
+ hctx->numa_node))
+ goto exit_hctx;
+ return 0;
+
+ exit_hctx:
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, hctx_idx);
+ unregister_cpu_notifier:
+ blk_mq_remove_cpuhp(hctx);
+ return -1;
+}
+
+static struct blk_mq_hw_ctx *
+blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
+ int node)
+{
+ struct blk_mq_hw_ctx *hctx;
+ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
+
+ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
+ if (!hctx)
+ goto fail_alloc_hctx;
+
+ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
+ goto free_hctx;
+
+ atomic_set(&hctx->nr_active, 0);
if (node == NUMA_NO_NODE)
- node = hctx->numa_node = set->numa_node;
+ node = set->numa_node;
+ hctx->numa_node = node;
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
spin_lock_init(&hctx->lock);
@@ -2186,82 +2366,76 @@
hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
-
- hctx->tags = set->tags[hctx_idx];
+ INIT_LIST_HEAD(&hctx->hctx_list);
/*
* Allocate space for all possible cpus to avoid allocation at
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
- GFP_KERNEL, node);
+ gfp, node);
if (!hctx->ctxs)
- goto unregister_cpu_notifier;
+ goto free_cpumask;
- if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
- node))
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+ gfp, node))
goto free_ctxs;
-
hctx->nr_ctx = 0;
spin_lock_init(&hctx->dispatch_wait_lock);
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
- if (set->ops->init_hctx &&
- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
- goto free_bitmap;
-
- hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+ hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+ gfp);
if (!hctx->fq)
- goto exit_hctx;
-
- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
- goto free_fq;
+ goto free_bitmap;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu);
+ blk_mq_hctx_kobj_init(hctx);
- blk_mq_debugfs_register_hctx(q, hctx);
+ return hctx;
- return 0;
-
- free_fq:
- kfree(hctx->fq);
- exit_hctx:
- if (set->ops->exit_hctx)
- set->ops->exit_hctx(hctx, hctx_idx);
free_bitmap:
sbitmap_free(&hctx->ctx_map);
free_ctxs:
kfree(hctx->ctxs);
- unregister_cpu_notifier:
- blk_mq_remove_cpuhp(hctx);
- return -1;
+ free_cpumask:
+ free_cpumask_var(hctx->cpumask);
+ free_hctx:
+ kfree(hctx);
+ fail_alloc_hctx:
+ return NULL;
}
static void blk_mq_init_cpu_queues(struct request_queue *q,
unsigned int nr_hw_queues)
{
- unsigned int i;
+ struct blk_mq_tag_set *set = q->tag_set;
+ unsigned int i, j;
for_each_possible_cpu(i) {
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
struct blk_mq_hw_ctx *hctx;
+ int k;
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
- INIT_LIST_HEAD(&__ctx->rq_list);
+ for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
+ INIT_LIST_HEAD(&__ctx->rq_lists[k]);
+
__ctx->queue = q;
/*
* Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device
*/
- hctx = blk_mq_map_queue(q, i);
- if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
- hctx->numa_node = local_memory_node(cpu_to_node(i));
+ for (j = 0; j < set->nr_maps; j++) {
+ hctx = blk_mq_map_queue_type(q, j, i);
+ if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
+ hctx->numa_node = local_memory_node(cpu_to_node(i));
+ }
}
}
@@ -2287,7 +2461,7 @@
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
unsigned int hctx_idx)
{
- if (set->tags[hctx_idx]) {
+ if (set->tags && set->tags[hctx_idx]) {
blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
blk_mq_free_rq_map(set->tags[hctx_idx]);
set->tags[hctx_idx] = NULL;
@@ -2296,16 +2470,11 @@
static void blk_mq_map_swqueue(struct request_queue *q)
{
- unsigned int i, hctx_idx;
+ unsigned int i, j, hctx_idx;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
- /*
- * Avoid others reading imcomplete hctx->cpumask through sysfs
- */
- mutex_lock(&q->sysfs_lock);
-
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
@@ -2318,7 +2487,7 @@
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = q->mq_map[i];
+ hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2328,19 +2497,44 @@
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
- q->mq_map[i] = 0;
+ set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
- hctx = blk_mq_map_queue(q, i);
+ for (j = 0; j < set->nr_maps; j++) {
+ if (!set->map[j].nr_queues) {
+ ctx->hctxs[j] = blk_mq_map_queue_type(q,
+ HCTX_TYPE_DEFAULT, i);
+ continue;
+ }
- cpumask_set_cpu(i, hctx->cpumask);
- ctx->index_hw = hctx->nr_ctx;
- hctx->ctxs[hctx->nr_ctx++] = ctx;
+ hctx = blk_mq_map_queue_type(q, j, i);
+ ctx->hctxs[j] = hctx;
+ /*
+ * If the CPU is already set in the mask, then we've
+ * mapped this one already. This can happen if
+ * devices share queues across queue maps.
+ */
+ if (cpumask_test_cpu(i, hctx->cpumask))
+ continue;
+
+ cpumask_set_cpu(i, hctx->cpumask);
+ hctx->type = j;
+ ctx->index_hw[hctx->type] = hctx->nr_ctx;
+ hctx->ctxs[hctx->nr_ctx++] = ctx;
+
+ /*
+ * If the nr_ctx type overflows, we have exceeded the
+ * amount of sw queues we can support.
+ */
+ BUG_ON(!hctx->nr_ctx);
+ }
+
+ for (; j < HCTX_MAX_TYPES; j++)
+ ctx->hctxs[j] = blk_mq_map_queue_type(q,
+ HCTX_TYPE_DEFAULT, i);
}
- mutex_unlock(&q->sysfs_lock);
-
queue_for_each_hw_ctx(q, hctx, i) {
/*
* If no software queues are mapped to this hardware queue,
@@ -2426,8 +2620,6 @@
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- q->tag_set = set;
-
mutex_lock(&set->tag_list_lock);
/*
@@ -2446,6 +2638,34 @@
mutex_unlock(&set->tag_list_lock);
}
+/* All allocations will be freed in release handler of q->mq_kobj */
+static int blk_mq_alloc_ctxs(struct request_queue *q)
+{
+ struct blk_mq_ctxs *ctxs;
+ int cpu;
+
+ ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
+ if (!ctxs)
+ return -ENOMEM;
+
+ ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
+ if (!ctxs->queue_ctx)
+ goto fail;
+
+ for_each_possible_cpu(cpu) {
+ struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
+ ctx->ctxs = ctxs;
+ }
+
+ q->mq_kobj = &ctxs->kobj;
+ q->queue_ctx = ctxs->queue_ctx;
+
+ return 0;
+ fail:
+ kfree(ctxs);
+ return -ENOMEM;
+}
+
/*
* It is the actual release handler for mq, but we do it from
* request queue's release handler for avoiding use-after-free
@@ -2454,18 +2674,18 @@
*/
void blk_mq_release(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
+ struct blk_mq_hw_ctx *hctx, *next;
+ int i;
- /* hctx kobj stays in hctx */
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx)
- continue;
+ queue_for_each_hw_ctx(q, hctx, i)
+ WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
+
+ /* all hctx are in .unused_hctx_list now */
+ list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
+ list_del_init(&hctx->hctx_list);
kobject_put(&hctx->kobj);
}
- q->mq_map = NULL;
-
kfree(q->queue_hw_ctx);
/*
@@ -2473,19 +2693,21 @@
* both share lifetime with request queue.
*/
blk_mq_sysfs_deinit(q);
-
- free_percpu(q->queue_ctx);
}
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
struct request_queue *uninit_q, *q;
- uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
+ uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
if (!uninit_q)
return ERR_PTR(-ENOMEM);
- q = blk_mq_init_allocated_queue(set, uninit_q);
+ /*
+ * Initialize the queue without an elevator. device_add_disk() will do
+ * the initialization.
+ */
+ q = blk_mq_init_allocated_queue(set, uninit_q, false);
if (IS_ERR(q))
blk_cleanup_queue(uninit_q);
@@ -2493,80 +2715,151 @@
}
EXPORT_SYMBOL(blk_mq_init_queue);
-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+/*
+ * Helper for setting up a queue with mq ops, given queue depth, and
+ * the passed in mq ops flags.
+ */
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops,
+ unsigned int queue_depth,
+ unsigned int set_flags)
{
- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+ struct request_queue *q;
+ int ret;
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
- __alignof__(struct blk_mq_hw_ctx)) !=
- sizeof(struct blk_mq_hw_ctx));
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->nr_hw_queues = 1;
+ set->nr_maps = 1;
+ set->queue_depth = queue_depth;
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = set_flags;
- if (tag_set->flags & BLK_MQ_F_BLOCKING)
- hw_ctx_size += sizeof(struct srcu_struct);
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ERR_PTR(ret);
- return hw_ctx_size;
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(set);
+ return q;
+ }
+
+ return q;
+}
+EXPORT_SYMBOL(blk_mq_init_sq_queue);
+
+static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ struct blk_mq_tag_set *set, struct request_queue *q,
+ int hctx_idx, int node)
+{
+ struct blk_mq_hw_ctx *hctx = NULL, *tmp;
+
+ /* reuse dead hctx first */
+ spin_lock(&q->unused_hctx_lock);
+ list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
+ if (tmp->numa_node == node) {
+ hctx = tmp;
+ break;
+ }
+ }
+ if (hctx)
+ list_del_init(&hctx->hctx_list);
+ spin_unlock(&q->unused_hctx_lock);
+
+ if (!hctx)
+ hctx = blk_mq_alloc_hctx(q, set, node);
+ if (!hctx)
+ goto fail;
+
+ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
+ goto free_hctx;
+
+ return hctx;
+
+ free_hctx:
+ kobject_put(&hctx->kobj);
+ fail:
+ return NULL;
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- int i, j;
+ int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
- blk_mq_sysfs_unregister(q);
-
/* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
+ struct blk_mq_hw_ctx *hctx;
- if (hctxs[i])
+ node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
+ /*
+ * If the hw queue has been mapped to another numa node,
+ * we need to realloc the hctx. If allocation fails, fallback
+ * to use the previous one.
+ */
+ if (hctxs[i] && (hctxs[i]->numa_node == node))
continue;
- node = blk_mq_hw_queue_to_node(q->mq_map, i);
- hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
- GFP_KERNEL, node);
- if (!hctxs[i])
- break;
-
- if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
- node)) {
- kfree(hctxs[i]);
- hctxs[i] = NULL;
- break;
+ hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
+ if (hctx) {
+ if (hctxs[i])
+ blk_mq_exit_hctx(q, set, hctxs[i], i);
+ hctxs[i] = hctx;
+ } else {
+ if (hctxs[i])
+ pr_warn("Allocate new hctx on node %d fails,\
+ fallback to previous one on node %d\n",
+ node, hctxs[i]->numa_node);
+ else
+ break;
}
-
- atomic_set(&hctxs[i]->nr_active, 0);
- hctxs[i]->numa_node = node;
- hctxs[i]->queue_num = i;
-
- if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
- free_cpumask_var(hctxs[i]->cpumask);
- kfree(hctxs[i]);
- hctxs[i] = NULL;
- break;
- }
- blk_mq_hctx_kobj_init(hctxs[i]);
}
- for (j = i; j < q->nr_hw_queues; j++) {
+ /*
+ * Increasing nr_hw_queues fails. Free the newly allocated
+ * hctxs and keep the previous q->nr_hw_queues.
+ */
+ if (i != set->nr_hw_queues) {
+ j = q->nr_hw_queues;
+ end = i;
+ } else {
+ j = i;
+ end = q->nr_hw_queues;
+ q->nr_hw_queues = set->nr_hw_queues;
+ }
+
+ for (; j < end; j++) {
struct blk_mq_hw_ctx *hctx = hctxs[j];
if (hctx) {
if (hctx->tags)
blk_mq_free_map_and_requests(set, j);
blk_mq_exit_hctx(q, set, hctx, j);
- kobject_put(&hctx->kobj);
hctxs[j] = NULL;
-
}
}
- q->nr_hw_queues = i;
mutex_unlock(&q->sysfs_lock);
- blk_mq_sysfs_register(q);
+}
+
+/*
+ * Maximum number of hardware queues we support. For single sets, we'll never
+ * have more than the CPUs (software queues). For multiple sets, the tag_set
+ * user may have set ->nr_hw_queues larger.
+ */
+static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
+{
+ if (set->nr_maps == 1)
+ return nr_cpu_ids;
+
+ return max(set->nr_hw_queues, nr_cpu_ids);
}
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q)
+ struct request_queue *q,
+ bool elevator_init)
{
/* mark the queue as mq asap */
q->mq_ops = set->ops;
@@ -2577,19 +2870,20 @@
if (!q->poll_cb)
goto err_exit;
- q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
- if (!q->queue_ctx)
- goto err_exit;
+ if (blk_mq_alloc_ctxs(q))
+ goto err_poll;
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
+ q->nr_queues = nr_hw_queues(set);
+ q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
- goto err_percpu;
+ goto err_sys_init;
- q->mq_map = set->mq_map;
+ INIT_LIST_HEAD(&q->unused_hctx_list);
+ spin_lock_init(&q->unused_hctx_lock);
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
@@ -2598,12 +2892,12 @@
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
- q->nr_queues = nr_cpu_ids;
+ q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
-
- if (!(set->flags & BLK_MQ_F_SG_MERGE))
- queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+ if (set->nr_maps > HCTX_TYPE_POLL &&
+ set->map[HCTX_TYPE_POLL].nr_queues)
+ blk_queue_flag_set(QUEUE_FLAG_POLL, q);
q->sg_reserved_size = INT_MAX;
@@ -2612,8 +2906,6 @@
spin_lock_init(&q->requeue_lock);
blk_queue_make_request(q, blk_mq_make_request);
- if (q->mq_ops->poll)
- q->poll_fn = blk_mq_poll;
/*
* Do this after blk_queue_make_request() overrides it...
@@ -2623,36 +2915,33 @@
/*
* Default to classic polling
*/
- q->poll_nsec = -1;
-
- if (set->ops->complete)
- blk_queue_softirq_done(q, set->ops->complete);
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q);
- if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
- int ret;
-
- ret = elevator_init_mq(q);
- if (ret)
- return ERR_PTR(ret);
- }
+ if (elevator_init)
+ elevator_init_mq(q);
return q;
err_hctxs:
kfree(q->queue_hw_ctx);
-err_percpu:
- free_percpu(q->queue_ctx);
+ q->nr_hw_queues = 0;
+err_sys_init:
+ blk_mq_sysfs_deinit(q);
+err_poll:
+ blk_stat_free_callback(q->poll_cb);
+ q->poll_cb = NULL;
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
-void blk_mq_free_queue(struct request_queue *q)
+/* tags can _not_ be used after returning from blk_mq_exit_queue */
+void blk_mq_exit_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
@@ -2660,25 +2949,6 @@
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
}
-/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
-{
- WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
-
- blk_mq_debugfs_unregister_hctxs(q);
- blk_mq_sysfs_unregister(q);
-
- /*
- * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
- * we should change hctx numa_node according to the new topology (this
- * involves freeing and re-allocating memory, worth doing?)
- */
- blk_mq_map_swqueue(q);
-
- blk_mq_sysfs_register(q);
- blk_mq_debugfs_register_hctxs(q);
-}
-
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
int i;
@@ -2733,7 +3003,9 @@
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
- if (set->ops->map_queues) {
+ if (set->ops->map_queues && !is_kdump_kernel()) {
+ int i;
+
/*
* transport .map_queues is usually done in the following
* way:
@@ -2741,18 +3013,21 @@
* for (queue = 0; queue < set->nr_hw_queues; queue++) {
* mask = get_cpu_mask(queue)
* for_each_cpu(cpu, mask)
- * set->mq_map[cpu] = queue;
+ * set->map[x].mq_map[cpu] = queue;
* }
*
* When we need to remap, the table has to be cleared for
* killing stale mapping since one CPU may not be mapped
* to any hw queue.
*/
- blk_mq_clear_mq_map(set);
+ for (i = 0; i < set->nr_maps; i++)
+ blk_mq_clear_mq_map(&set->map[i]);
return set->ops->map_queues(set);
- } else
- return blk_mq_map_queues(set);
+ } else {
+ BUG_ON(set->nr_maps > 1);
+ return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ }
}
/*
@@ -2763,7 +3038,7 @@
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
- int ret;
+ int i, ret;
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
@@ -2786,6 +3061,11 @@
set->queue_depth = BLK_MQ_MAX_DEPTH;
}
+ if (!set->nr_maps)
+ set->nr_maps = 1;
+ else if (set->nr_maps > HCTX_MAX_TYPES)
+ return -EINVAL;
+
/*
* If a crashdump is active, then we are potentially in a very
* memory constrained environment. Limit us to 1 queue and
@@ -2793,24 +3073,30 @@
*/
if (is_kdump_kernel()) {
set->nr_hw_queues = 1;
+ set->nr_maps = 1;
set->queue_depth = min(64U, set->queue_depth);
}
/*
- * There is no use for more h/w queues than cpus.
+ * There is no use for more h/w queues than cpus if we just have
+ * a single map
*/
- if (set->nr_hw_queues > nr_cpu_ids)
+ if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
+ set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
return -ENOMEM;
ret = -ENOMEM;
- set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
- GFP_KERNEL, set->numa_node);
- if (!set->mq_map)
- goto out_free_tags;
+ for (i = 0; i < set->nr_maps; i++) {
+ set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
+ sizeof(set->map[i].mq_map[0]),
+ GFP_KERNEL, set->numa_node);
+ if (!set->map[i].mq_map)
+ goto out_free_mq_map;
+ set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
+ }
ret = blk_mq_update_queue_map(set);
if (ret)
@@ -2826,9 +3112,10 @@
return 0;
out_free_mq_map:
- kfree(set->mq_map);
- set->mq_map = NULL;
-out_free_tags:
+ for (i = 0; i < set->nr_maps; i++) {
+ kfree(set->map[i].mq_map);
+ set->map[i].mq_map = NULL;
+ }
kfree(set->tags);
set->tags = NULL;
return ret;
@@ -2837,13 +3124,15 @@
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
- int i;
+ int i, j;
- for (i = 0; i < nr_cpu_ids; i++)
+ for (i = 0; i < nr_hw_queues(set); i++)
blk_mq_free_map_and_requests(set, i);
- kfree(set->mq_map);
- set->mq_map = NULL;
+ for (j = 0; j < set->nr_maps; j++) {
+ kfree(set->map[j].mq_map);
+ set->map[j].mq_map = NULL;
+ }
kfree(set->tags);
set->tags = NULL;
@@ -2859,6 +3148,9 @@
if (!set)
return -EINVAL;
+ if (q->nr_requests == nr)
+ return 0;
+
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
@@ -2879,6 +3171,8 @@
}
if (ret)
break;
+ if (q->elevator && q->elevator->type->ops.depth_updated)
+ q->elevator->type->ops.depth_updated(hctx);
}
if (!ret)
@@ -2965,10 +3259,11 @@
{
struct request_queue *q;
LIST_HEAD(head);
+ int prev_nr_hw_queues;
lockdep_assert_held(&set->tag_list_lock);
- if (nr_hw_queues > nr_cpu_ids)
+ if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
return;
@@ -2988,11 +3283,30 @@
if (!blk_mq_elv_switch_none(&head, q))
goto switch_back;
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_debugfs_unregister_hctxs(q);
+ blk_mq_sysfs_unregister(q);
+ }
+
+ prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
+fallback:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
- blk_mq_queue_reinit(q);
+ if (q->nr_hw_queues != set->nr_hw_queues) {
+ pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
+ nr_hw_queues, prev_nr_hw_queues);
+ set->nr_hw_queues = prev_nr_hw_queues;
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ goto fallback;
+ }
+ blk_mq_map_swqueue(q);
+ }
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_sysfs_register(q);
+ blk_mq_debugfs_register_hctxs(q);
}
switch_back:
@@ -3091,15 +3405,12 @@
return false;
/*
- * poll_nsec can be:
+ * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
*
- * -1: don't ever hybrid sleep
* 0: use half of prev avg
* >0: use this specific value
*/
- if (q->poll_nsec == -1)
- return false;
- else if (q->poll_nsec > 0)
+ if (q->poll_nsec > 0)
nsecs = q->poll_nsec;
else
nsecs = blk_mq_poll_nsecs(q, hctx, rq);
@@ -3116,15 +3427,14 @@
kt = nsecs;
mode = HRTIMER_MODE_REL;
- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
+ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
hrtimer_set_expires(&hs.timer, kt);
- hrtimer_init_sleeper(&hs, current);
do {
if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- hrtimer_start_expires(&hs.timer, mode);
+ hrtimer_sleeper_start_expires(&hs, mode);
if (hs.task)
io_schedule();
hrtimer_cancel(&hs.timer);
@@ -3136,59 +3446,14 @@
return true;
}
-static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static bool blk_mq_poll_hybrid(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
{
- struct request_queue *q = hctx->queue;
- long state;
-
- /*
- * If we sleep, have the caller restart the poll loop to reset
- * the state. Like for the other success return cases, the
- * caller is responsible for checking if the IO completed. If
- * the IO isn't complete, we'll get called again and will go
- * straight to the busy poll loop.
- */
- if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
- return true;
-
- hctx->poll_considered++;
-
- state = current->state;
- while (!need_resched()) {
- int ret;
-
- hctx->poll_invoked++;
-
- ret = q->mq_ops->poll(hctx, rq->tag);
- if (ret > 0) {
- hctx->poll_success++;
- set_current_state(TASK_RUNNING);
- return true;
- }
-
- if (signal_pending_state(state, current))
- set_current_state(TASK_RUNNING);
-
- if (current->state == TASK_RUNNING)
- return true;
- if (ret < 0)
- break;
- cpu_relax();
- }
-
- __set_current_state(TASK_RUNNING);
- return false;
-}
-
-static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
-{
- struct blk_mq_hw_ctx *hctx;
struct request *rq;
- if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
return false;
- hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
if (!blk_qc_t_is_internal(cookie))
rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
else {
@@ -3203,9 +3468,81 @@
return false;
}
- return __blk_mq_poll(hctx, rq);
+ return blk_mq_poll_hybrid_sleep(q, hctx, rq);
}
+/**
+ * blk_poll - poll for IO completions
+ * @q: the queue
+ * @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
+ *
+ * Description:
+ * Poll for completions on the passed in queue. Returns number of
+ * completed entries found. If @spin is true, then blk_poll will continue
+ * looping until at least one completion is found, unless the task is
+ * otherwise marked running (or we need to reschedule).
+ */
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+{
+ struct blk_mq_hw_ctx *hctx;
+ long state;
+
+ if (!blk_qc_t_valid(cookie) ||
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ return 0;
+
+ if (current->plug)
+ blk_flush_plug_list(current->plug, false);
+
+ hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+
+ /*
+ * If we sleep, have the caller restart the poll loop to reset
+ * the state. Like for the other success return cases, the
+ * caller is responsible for checking if the IO completed. If
+ * the IO isn't complete, we'll get called again and will go
+ * straight to the busy poll loop.
+ */
+ if (blk_mq_poll_hybrid(q, hctx, cookie))
+ return 1;
+
+ hctx->poll_considered++;
+
+ state = current->state;
+ do {
+ int ret;
+
+ hctx->poll_invoked++;
+
+ ret = q->mq_ops->poll(hctx);
+ if (ret > 0) {
+ hctx->poll_success++;
+ __set_current_state(TASK_RUNNING);
+ return ret;
+ }
+
+ if (signal_pending_state(state, current))
+ __set_current_state(TASK_RUNNING);
+
+ if (current->state == TASK_RUNNING)
+ return 1;
+ if (ret < 0 || !spin)
+ break;
+ cpu_relax();
+ } while (!need_resched());
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_poll);
+
+unsigned int blk_mq_rq_cpu(struct request *rq)
+{
+ return rq->mq_ctx->cpu;
+}
+EXPORT_SYMBOL(blk_mq_rq_cpu);
+
static int __init blk_mq_init(void)
{
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9497b47..32c62c6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,17 +7,23 @@
struct blk_mq_tag_set;
+struct blk_mq_ctxs {
+ struct kobject kobj;
+ struct blk_mq_ctx __percpu *queue_ctx;
+};
+
/**
* struct blk_mq_ctx - State for a software queue facing the submitting CPUs
*/
struct blk_mq_ctx {
struct {
spinlock_t lock;
- struct list_head rq_list;
- } ____cacheline_aligned_in_smp;
+ struct list_head rq_lists[HCTX_MAX_TYPES];
+ } ____cacheline_aligned_in_smp;
unsigned int cpu;
- unsigned int index_hw;
+ unsigned short index_hw[HCTX_MAX_TYPES];
+ struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
/* incremented at dispatch time */
unsigned long rq_dispatched[2];
@@ -27,14 +33,16 @@
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
struct request_queue *queue;
+ struct blk_mq_ctxs *ctxs;
struct kobject kobj;
} ____cacheline_aligned_in_smp;
-void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
@@ -63,19 +71,49 @@
struct list_head *list);
/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq);
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
/*
* CPU -> queue mappings
*/
-extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
- int cpu)
+/*
+ * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
+ * @q: request queue
+ * @type: the hctx type index
+ * @cpu: CPU
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
+ enum hctx_type type,
+ unsigned int cpu)
{
- return q->queue_hw_ctx[q->mq_map[cpu]];
+ return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
+}
+
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @cpu: cpu ctx
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+ unsigned int flags,
+ struct blk_mq_ctx *ctx)
+{
+ enum hctx_type type = HCTX_TYPE_DEFAULT;
+
+ /*
+ * The caller ensure that if REQ_HIPRI, poll must be enabled.
+ */
+ if (flags & REQ_HIPRI)
+ type = HCTX_TYPE_POLL;
+ else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
+ type = HCTX_TYPE_READ;
+
+ return ctx->hctxs[type];
}
/*
@@ -113,12 +151,7 @@
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
- return __blk_mq_get_ctx(q, get_cpu());
-}
-
-static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
-{
- put_cpu();
+ return __blk_mq_get_ctx(q, raw_smp_processor_id());
}
struct blk_mq_alloc_data {
@@ -126,6 +159,7 @@
struct request_queue *q;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
+ unsigned int cmd_flags;
/* input & output parameter */
struct blk_mq_ctx *ctx;
@@ -150,8 +184,7 @@
return hctx->nr_ctx && hctx->tags;
}
-void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
@@ -184,32 +217,52 @@
}
}
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
-{
- if (rq->tag == -1 || rq->internal_tag == -1)
- return;
-
- __blk_mq_put_driver_tag(hctx, rq);
-}
-
static inline void blk_mq_put_driver_tag(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
-
if (rq->tag == -1 || rq->internal_tag == -1)
return;
- hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
- __blk_mq_put_driver_tag(hctx, rq);
+ __blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
-static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{
int cpu;
for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ qmap->mq_map[cpu] = 0;
+}
+
+/*
+ * blk_mq_plug() - Get caller context plug
+ * @q: request queue
+ * @bio : the bio being submitted by the caller context
+ *
+ * Plugging, by design, may delay the insertion of BIOs into the elevator in
+ * order to increase BIO merging opportunities. This however can cause BIO
+ * insertion order to change from the order in which submit_bio() is being
+ * executed in the case of multiple contexts concurrently issuing BIOs to a
+ * device, even if these context are synchronized to tightly control BIO issuing
+ * order. While this is not a problem with regular block devices, this ordering
+ * change can cause write BIO failures with zoned block devices as these
+ * require sequential write patterns to zones. Prevent this from happening by
+ * ignoring the plug state of a BIO issuing context if the target request queue
+ * is for a zoned block device and the BIO to plug is a write operation.
+ *
+ * Return current->plug if the bio can be plugged and NULL otherwise
+ */
+static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
+ struct bio *bio)
+{
+ /*
+ * For regular block devices or read operations, use the context plug
+ * which may be NULL if blk_start_plug() was not executed.
+ */
+ if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
+ return current->plug;
+
+ /* Zoned block device write operation case: do not plug the BIO */
+ return NULL;
}
#endif
diff --git a/block/blk-pm.c b/block/blk-pm.c
new file mode 100644
index 0000000..1adc1cd
--- /dev/null
+++ b/block/blk-pm.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
+#include <linux/blkdev.h>
+#include <linux/pm_runtime.h>
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ * Initialize runtime-PM-related fields for @q and start auto suspend for
+ * @dev. Drivers that want to take advantage of request-based runtime PM
+ * should call this function after @dev has been initialized, and its
+ * request queue @q has been allocated, and runtime PM for it can not happen
+ * yet(either due to disabled/forbidden or its usage_count > 0). In most
+ * cases, driver should call this function before any I/O has taken place.
+ *
+ * This function takes care of setting up using auto suspend for the device,
+ * the autosuspend delay is set to -1 to make runtime suspend impossible
+ * until an updated value is either set by user or by driver. Drivers do
+ * not need to touch other autosuspend settings.
+ *
+ * The block layer runtime PM is request based, so only works for drivers
+ * that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+ pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ * This function will check if runtime suspend is allowed for the device
+ * by examining if there are any requests pending in the queue. If there
+ * are requests pending, the device can not be runtime suspended; otherwise,
+ * the queue's status will be updated to SUSPENDING and the driver can
+ * proceed to suspend the device.
+ *
+ * For the not allowed case, we mark last busy for the device so that
+ * runtime PM core will try to autosuspend it some time later.
+ *
+ * This function should be called near the start of the device's
+ * runtime_suspend callback.
+ *
+ * Return:
+ * 0 - OK to runtime suspend the device
+ * -EBUSY - Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ int ret = 0;
+
+ if (!q->dev)
+ return ret;
+
+ WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
+
+ /*
+ * Increase the pm_only counter before checking whether any
+ * non-PM blk_queue_enter() calls are in progress to avoid that any
+ * new non-PM blk_queue_enter() calls succeed before the pm_only
+ * counter is decreased again.
+ */
+ blk_set_pm_only(q);
+ ret = -EBUSY;
+ /* Switch q_usage_counter from per-cpu to atomic mode. */
+ blk_freeze_queue_start(q);
+ /*
+ * Wait until atomic mode has been reached. Since that
+ * involves calling call_rcu(), it is guaranteed that later
+ * blk_queue_enter() calls see the pm-only state. See also
+ * http://lwn.net/Articles/573497/.
+ */
+ percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
+ if (percpu_ref_is_zero(&q->q_usage_counter))
+ ret = 0;
+ /* Switch q_usage_counter back to per-cpu mode. */
+ blk_mq_unfreeze_queue(q);
+
+ spin_lock_irq(&q->queue_lock);
+ if (ret < 0)
+ pm_runtime_mark_last_busy(q->dev);
+ else
+ q->rpm_status = RPM_SUSPENDING;
+ spin_unlock_irq(&q->queue_lock);
+
+ if (ret)
+ blk_clear_pm_only(q);
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime suspend function and mark last busy for the device so
+ * that PM core will try to auto suspend the device at a later time.
+ *
+ * This function should be called near the end of the device's
+ * runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_SUSPENDED;
+ } else {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ }
+ spin_unlock_irq(&q->queue_lock);
+
+ if (err)
+ blk_clear_pm_only(q);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ * Update the queue's runtime status to RESUMING in preparation for the
+ * runtime resume of the device.
+ *
+ * This function should be called near the start of the device's
+ * runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
+ q->rpm_status = RPM_RESUMING;
+ spin_unlock_irq(&q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime_resume function. If it is successfully resumed, process
+ * the requests that are queued into the device's queue when it is resuming
+ * and then mark last busy and initiate autosuspend for it.
+ *
+ * This function should be called near the end of the device's
+ * runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDED;
+ }
+ spin_unlock_irq(&q->queue_lock);
+
+ if (!err)
+ blk_clear_pm_only(q);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+
+/**
+ * blk_set_runtime_active - Force runtime status of the queue to be active
+ * @q: the queue of the device
+ *
+ * If the device is left runtime suspended during system suspend the resume
+ * hook typically resumes the device and corrects runtime status
+ * accordingly. However, that does not affect the queue runtime PM status
+ * which is still "suspended". This prevents processing requests from the
+ * queue.
+ *
+ * This function can be used in driver's resume hook to correct queue
+ * runtime PM status and re-enable peeking requests from the queue. It
+ * should be called before first request is added to the queue.
+ */
+void blk_set_runtime_active(struct request_queue *q)
+{
+ if (q->dev) {
+ spin_lock_irq(&q->queue_lock);
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ spin_unlock_irq(&q->queue_lock);
+ }
+}
+EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-pm.h b/block/blk-pm.h
new file mode 100644
index 0000000..ea5507d
--- /dev/null
+++ b/block/blk-pm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLOCK_BLK_PM_H_
+#define _BLOCK_BLK_PM_H_
+
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM
+static inline void blk_pm_request_resume(struct request_queue *q)
+{
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+}
+
+static inline void blk_pm_mark_last_busy(struct request *rq)
+{
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ pm_runtime_mark_last_busy(rq->q->dev);
+}
+
+static inline void blk_pm_requeue_request(struct request *rq)
+{
+ lockdep_assert_held(&rq->q->queue_lock);
+
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ rq->q->nr_pending--;
+}
+
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+ lockdep_assert_held(&q->queue_lock);
+
+ if (q->dev && !(rq->rq_flags & RQF_PM))
+ q->nr_pending++;
+}
+
+static inline void blk_pm_put_request(struct request *rq)
+{
+ lockdep_assert_held(&rq->q->queue_lock);
+
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ --rq->q->nr_pending;
+}
+#else
+static inline void blk_pm_request_resume(struct request_queue *q)
+{
+}
+
+static inline void blk_pm_mark_last_busy(struct request *rq)
+{
+}
+
+static inline void blk_pm_requeue_request(struct request *rq)
+{
+}
+
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+}
+
+static inline void blk_pm_put_request(struct request *rq)
+{
+}
+#endif
+
+#endif /* _BLOCK_BLK_PM_H_ */
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd..6564606 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
#include "blk-rq-qos.h"
/*
@@ -27,75 +29,85 @@
return atomic_inc_below(&rq_wait->inflight, limit);
}
-void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->cleanup)
rqos->ops->cleanup(rqos, bio);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_done(struct request_queue *q, struct request *rq)
+void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
{
- struct rq_qos *rqos;
-
- for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->done)
rqos->ops->done(rqos, rq);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_issue(struct request_queue *q, struct request *rq)
+void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->issue)
rqos->ops->issue(rqos, rq);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_requeue(struct request_queue *q, struct request *rq)
+void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->requeue)
rqos->ops->requeue(rqos, rq);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
- spinlock_t *lock)
+void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->throttle)
- rqos->ops->throttle(rqos, bio, lock);
- }
+ rqos->ops->throttle(rqos, bio);
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
+void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->track)
rqos->ops->track(rqos, rq, bio);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
+void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
- struct rq_qos *rqos;
+ do {
+ if (rqos->ops->merge)
+ rqos->ops->merge(rqos, rq, bio);
+ rqos = rqos->next;
+ } while (rqos);
+}
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
+{
+ do {
if (rqos->ops->done_bio)
rqos->ops->done_bio(rqos, bio);
- }
+ rqos = rqos->next;
+ } while (rqos);
+}
+
+void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
+{
+ do {
+ if (rqos->ops->queue_depth_changed)
+ rqos->ops->queue_depth_changed(rqos);
+ rqos = rqos->next;
+ } while (rqos);
}
/*
@@ -148,24 +160,27 @@
return ret;
}
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
{
/*
* Hit max in previous round, stop here
*/
if (rqd->scaled_max)
- return;
+ return false;
rqd->scale_step--;
rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+ return true;
}
/*
* Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
*/
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
{
/*
* Stop scaling down when we've hit the limit. This also prevents
@@ -173,7 +188,7 @@
* keep up.
*/
if (rqd->max_depth == 1)
- return;
+ return false;
if (rqd->scale_step < 0 && hard_throttle)
rqd->scale_step = 0;
@@ -182,10 +197,105 @@
rqd->scaled_max = false;
rq_depth_calc_max_depth(rqd);
+ return true;
+}
+
+struct rq_qos_wait_data {
+ struct wait_queue_entry wq;
+ struct task_struct *task;
+ struct rq_wait *rqw;
+ acquire_inflight_cb_t *cb;
+ void *private_data;
+ bool got_token;
+};
+
+static int rq_qos_wake_function(struct wait_queue_entry *curr,
+ unsigned int mode, int wake_flags, void *key)
+{
+ struct rq_qos_wait_data *data = container_of(curr,
+ struct rq_qos_wait_data,
+ wq);
+
+ /*
+ * If we fail to get a budget, return -1 to interrupt the wake up loop
+ * in __wake_up_common.
+ */
+ if (!data->cb(data->rqw, data->private_data))
+ return -1;
+
+ data->got_token = true;
+ smp_wmb();
+ list_del_init(&curr->entry);
+ wake_up_process(data->task);
+ return 1;
+}
+
+/**
+ * rq_qos_wait - throttle on a rqw if we need to
+ * @rqw: rqw to throttle on
+ * @private_data: caller provided specific data
+ * @acquire_inflight_cb: inc the rqw->inflight counter if we can
+ * @cleanup_cb: the callback to cleanup in case we race with a waker
+ *
+ * This provides a uniform place for the rq_qos users to do their throttling.
+ * Since you can end up with a lot of things sleeping at once, this manages the
+ * waking up based on the resources available. The acquire_inflight_cb should
+ * inc the rqw->inflight if we have the ability to do so, or return false if not
+ * and then we will sleep until the room becomes available.
+ *
+ * cleanup_cb is in case that we race with a waker and need to cleanup the
+ * inflight count accordingly.
+ */
+void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ acquire_inflight_cb_t *acquire_inflight_cb,
+ cleanup_cb_t *cleanup_cb)
+{
+ struct rq_qos_wait_data data = {
+ .wq = {
+ .func = rq_qos_wake_function,
+ .entry = LIST_HEAD_INIT(data.wq.entry),
+ },
+ .task = current,
+ .rqw = rqw,
+ .cb = acquire_inflight_cb,
+ .private_data = private_data,
+ };
+ bool has_sleeper;
+
+ has_sleeper = wq_has_sleeper(&rqw->wait);
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
+ return;
+
+ prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
+ has_sleeper = !wq_has_single_sleeper(&rqw->wait);
+ do {
+ /* The memory barrier in set_task_state saves us here. */
+ if (data.got_token)
+ break;
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
+ finish_wait(&rqw->wait, &data.wq);
+
+ /*
+ * We raced with wbt_wake_function() getting a token,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ */
+ smp_rmb();
+ if (data.got_token)
+ cleanup_cb(rqw, private_data);
+ break;
+ }
+ io_schedule();
+ has_sleeper = true;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ } while (1);
+ finish_wait(&rqw->wait, &data.wq);
}
void rq_qos_exit(struct request_queue *q)
{
+ blk_mq_debugfs_unregister_queue_rqos(q);
+
while (q->rq_qos) {
struct rq_qos *rqos = q->rq_qos;
q->rq_qos = rqos->next;
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02ef..2bc43e9 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef RQ_QOS_H
#define RQ_QOS_H
@@ -7,9 +8,14 @@
#include <linux/atomic.h>
#include <linux/wait.h>
+#include "blk-mq-debugfs.h"
+
+struct blk_mq_debugfs_attr;
+
enum rq_qos_id {
RQ_QOS_WBT,
- RQ_QOS_CGROUP,
+ RQ_QOS_LATENCY,
+ RQ_QOS_COST,
};
struct rq_wait {
@@ -22,17 +28,23 @@
struct request_queue *q;
enum rq_qos_id id;
struct rq_qos *next;
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+#endif
};
struct rq_qos_ops {
- void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+ void (*throttle)(struct rq_qos *, struct bio *);
void (*track)(struct rq_qos *, struct request *, struct bio *);
+ void (*merge)(struct rq_qos *, struct request *, struct bio *);
void (*issue)(struct rq_qos *, struct request *);
void (*requeue)(struct rq_qos *, struct request *);
void (*done)(struct rq_qos *, struct request *);
void (*done_bio)(struct rq_qos *, struct bio *);
void (*cleanup)(struct rq_qos *, struct bio *);
+ void (*queue_depth_changed)(struct rq_qos *);
void (*exit)(struct rq_qos *);
+ const struct blk_mq_debugfs_attr *debugfs_attrs;
};
struct rq_depth {
@@ -63,7 +75,20 @@
static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
{
- return rq_qos_id(q, RQ_QOS_CGROUP);
+ return rq_qos_id(q, RQ_QOS_LATENCY);
+}
+
+static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
+{
+ switch (id) {
+ case RQ_QOS_WBT:
+ return "wbt";
+ case RQ_QOS_LATENCY:
+ return "latency";
+ case RQ_QOS_COST:
+ return "cost";
+ }
+ return "unknown";
}
static inline void rq_wait_init(struct rq_wait *rq_wait)
@@ -76,34 +101,107 @@
{
rqos->next = q->rq_qos;
q->rq_qos = rqos;
+
+ if (rqos->ops->debugfs_attrs)
+ blk_mq_debugfs_register_rqos(rqos);
}
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
- struct rq_qos *cur, *prev = NULL;
- for (cur = q->rq_qos; cur; cur = cur->next) {
- if (cur == rqos) {
- if (prev)
- prev->next = rqos->next;
- else
- q->rq_qos = cur;
+ struct rq_qos **cur;
+
+ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+ if (*cur == rqos) {
+ *cur = rqos->next;
break;
}
- prev = cur;
}
+
+ blk_mq_debugfs_unregister_rqos(rqos);
}
+typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
+typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
+
+void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ acquire_inflight_cb_t *acquire_inflight_cb,
+ cleanup_cb_t *cleanup_cb);
bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd);
-void rq_qos_cleanup(struct request_queue *, struct bio *);
-void rq_qos_done(struct request_queue *, struct request *);
-void rq_qos_issue(struct request_queue *, struct request *);
-void rq_qos_requeue(struct request_queue *, struct request *);
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
-void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
+void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
+void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
+void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
+void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
+void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
+void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
+void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
+void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
+void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
+
+static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_cleanup(q->rq_qos, bio);
+}
+
+static inline void rq_qos_done(struct request_queue *q, struct request *rq)
+{
+ if (q->rq_qos)
+ __rq_qos_done(q->rq_qos, rq);
+}
+
+static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
+{
+ if (q->rq_qos)
+ __rq_qos_issue(q->rq_qos, rq);
+}
+
+static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
+{
+ if (q->rq_qos)
+ __rq_qos_requeue(q->rq_qos, rq);
+}
+
+static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_done_bio(q->rq_qos, bio);
+}
+
+static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
+{
+ /*
+ * BIO_TRACKED lets controllers know that a bio went through the
+ * normal rq_qos path.
+ */
+ bio_set_flag(bio, BIO_TRACKED);
+ if (q->rq_qos)
+ __rq_qos_throttle(q->rq_qos, bio);
+}
+
+static inline void rq_qos_track(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_track(q->rq_qos, rq, bio);
+}
+
+static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_merge(q->rq_qos, rq, bio);
+}
+
+static inline void rq_qos_queue_depth_changed(struct request_queue *q)
+{
+ if (q->rq_qos)
+ __rq_qos_queue_depth_changed(q->rq_qos);
+}
+
void rq_qos_exit(struct request_queue *);
+
#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index ffd4599..5f6dcc7 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to setting various queue properties from drivers
*/
@@ -6,11 +7,12 @@
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
#include "blk.h"
#include "blk-wbt.h"
@@ -20,65 +22,12 @@
unsigned long blk_max_pfn;
-/**
- * blk_queue_prep_rq - set a prepare_request function for queue
- * @q: queue
- * @pfn: prepare_request function
- *
- * It's possible for a queue to register a prepare_request callback which
- * is invoked before the request is handed to the request_fn. The goal of
- * the function is to prepare a request for I/O, it can be used to build a
- * cdb from the request data for instance.
- *
- */
-void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
-{
- q->prep_rq_fn = pfn;
-}
-EXPORT_SYMBOL(blk_queue_prep_rq);
-
-/**
- * blk_queue_unprep_rq - set an unprepare_request function for queue
- * @q: queue
- * @ufn: unprepare_request function
- *
- * It's possible for a queue to register an unprepare_request callback
- * which is invoked before the request is finally completed. The goal
- * of the function is to deallocate any data that was allocated in the
- * prepare_request callback.
- *
- */
-void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
-{
- q->unprep_rq_fn = ufn;
-}
-EXPORT_SYMBOL(blk_queue_unprep_rq);
-
-void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
-{
- q->softirq_done_fn = fn;
-}
-EXPORT_SYMBOL(blk_queue_softirq_done);
-
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
-void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
-{
- WARN_ON_ONCE(q->mq_ops);
- q->rq_timed_out_fn = fn;
-}
-EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
-
-void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
-{
- q->lld_busy_fn = fn;
-}
-EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
-
/**
* blk_set_default_limits - reset limits to default values
* @lim: the queue_limits structure to reset
@@ -109,7 +58,6 @@
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->cluster = 1;
lim->zoned = BLK_ZONED_NONE;
}
EXPORT_SYMBOL(blk_set_default_limits);
@@ -169,8 +117,6 @@
q->make_request_fn = mfn;
blk_queue_dma_alignment(q, 511);
- blk_queue_congestion_threshold(q);
- q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits);
}
@@ -365,6 +311,9 @@
__func__, max_size);
}
+ /* see blk_queue_virt_boundary() for the explanation */
+ WARN_ON_ONCE(q->limits.virt_boundary_mask);
+
q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
@@ -602,8 +551,6 @@
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
- t->cluster &= b->cluster;
-
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
@@ -721,22 +668,6 @@
EXPORT_SYMBOL(disk_stack_limits);
/**
- * blk_queue_dma_pad - set pad mask
- * @q: the request queue for the device
- * @mask: pad mask
- *
- * Set dma pad mask.
- *
- * Appending pad buffer to a request modifies the last entry of a
- * scatter list such that it includes the pad buffer.
- **/
-void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
-{
- q->dma_pad_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_dma_pad);
-
-/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
* @mask: pad mask
@@ -815,6 +746,15 @@
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
{
q->limits.virt_boundary_mask = mask;
+
+ /*
+ * Devices that require a virtual boundary do not support scatter/gather
+ * I/O natively, but instead require a descriptor list entry for each
+ * page (which might not be idential to the Linux PAGE_SIZE). Because
+ * of that they are not limited by our notion of "segment size".
+ */
+ if (mask)
+ q->limits.max_segment_size = UINT_MAX;
}
EXPORT_SYMBOL(blk_queue_virt_boundary);
@@ -857,15 +797,6 @@
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
-{
- if (queueable)
- blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
-
/**
* blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
@@ -875,7 +806,7 @@
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
- wbt_set_queue_depth(q, depth);
+ rq_qos_queue_depth_changed(q);
}
EXPORT_SYMBOL(blk_set_queue_depth);
@@ -889,21 +820,57 @@
*/
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
- spin_lock_irq(q->queue_lock);
if (wc)
- queue_flag_set(QUEUE_FLAG_WC, q);
+ blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
- queue_flag_clear(QUEUE_FLAG_WC, q);
+ blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua)
- queue_flag_set(QUEUE_FLAG_FUA, q);
+ blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
- queue_flag_clear(QUEUE_FLAG_FUA, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+/**
+ * blk_queue_required_elevator_features - Set a queue required elevator features
+ * @q: the request queue for the target device
+ * @features: Required elevator features OR'ed together
+ *
+ * Tell the block layer that for the device controlled through @q, only the
+ * only elevators that can be used are those that implement at least the set of
+ * features specified by @features.
+ */
+void blk_queue_required_elevator_features(struct request_queue *q,
+ unsigned int features)
+{
+ q->required_elevator_features = features;
+}
+EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
+
+/**
+ * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
+ * @q: the request queue for the device
+ * @dev: the device pointer for dma
+ *
+ * Tell the block layer about merging the segments by dma map of @q.
+ */
+bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ struct device *dev)
+{
+ unsigned long boundary = dma_get_merge_boundary(dev);
+
+ if (!boundary)
+ return false;
+
+ /* No need to update max_segment_size. see blk_queue_virt_boundary() */
+ blk_queue_virt_boundary(q, boundary);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 15c1f5e..457d9ba 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -34,7 +34,7 @@
rq = list_entry(local_list.next, struct request, ipi_list);
list_del_init(&rq->ipi_list);
- rq->q->softirq_done_fn(rq);
+ rq->q->mq_ops->complete(rq);
}
}
@@ -97,12 +97,12 @@
void __blk_complete_request(struct request *req)
{
- int ccpu, cpu;
struct request_queue *q = req->q;
+ int cpu, ccpu = req->mq_ctx->cpu;
unsigned long flags;
bool shared = false;
- BUG_ON(!q->softirq_done_fn);
+ BUG_ON(!q->mq_ops->complete);
local_irq_save(flags);
cpu = smp_processor_id();
@@ -110,8 +110,7 @@
/*
* Select completion CPU
*/
- if (req->cpu != -1) {
- ccpu = req->cpu;
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ccpu);
} else
@@ -144,27 +143,6 @@
local_irq_restore(flags);
}
-EXPORT_SYMBOL(__blk_complete_request);
-
-/**
- * blk_complete_request - end I/O on a request
- * @req: the request being processed
- *
- * Description:
- * Ends all I/O on a request. It does not handle partial completions,
- * unless the driver actually implements this in its completion callback
- * through requeueing. The actual completion happens out-of-order,
- * through a softirq handler. The user must have registered a completion
- * callback through blk_queue_softirq_done().
- **/
-void blk_complete_request(struct request *req)
-{
- if (unlikely(blk_should_fake_timeout(req->q)))
- return;
- if (!blk_mark_rq_complete(req))
- __blk_complete_request(req);
-}
-EXPORT_SYMBOL(blk_complete_request);
static __init int blk_softirq_init(void)
{
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 7587b1c..940f15d 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block stat tracking code
*
@@ -130,7 +131,6 @@
return cb;
}
-EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
void blk_stat_add_callback(struct request_queue *q,
struct blk_stat_callback *cb)
@@ -151,7 +151,6 @@
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}
-EXPORT_SYMBOL_GPL(blk_stat_add_callback);
void blk_stat_remove_callback(struct request_queue *q,
struct blk_stat_callback *cb)
@@ -164,7 +163,6 @@
del_timer_sync(&cb->timer);
}
-EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
static void blk_stat_free_callback_rcu(struct rcu_head *head)
{
@@ -181,7 +179,6 @@
if (cb)
call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
}
-EXPORT_SYMBOL_GPL(blk_stat_free_callback);
void blk_stat_enable_accounting(struct request_queue *q)
{
@@ -190,6 +187,7 @@
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}
+EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
struct blk_queue_stats *blk_alloc_queue_stats(void)
{
diff --git a/block/blk-stat.h b/block/blk-stat.h
index f4a1568..17b47a8 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -145,6 +145,11 @@
mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
}
+static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
+{
+ del_timer_sync(&cb->timer);
+}
+
/**
* blk_stat_activate_msecs() - Gather block statistics during a time window in
* milliseconds.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3772671..46f5198 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -68,7 +68,7 @@
unsigned long nr;
int ret, err;
- if (!q->request_fn && !q->mq_ops)
+ if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
@@ -78,11 +78,7 @@
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- if (q->request_fn)
- err = blk_update_nr_requests(q, nr);
- else
- err = blk_mq_update_nr_requests(q, nr);
-
+ err = blk_mq_update_nr_requests(q, nr);
if (err)
return err;
@@ -136,10 +132,7 @@
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
- if (blk_queue_cluster(q))
- return queue_var_show(queue_max_segment_size(q), (page));
-
- return queue_var_show(PAGE_SIZE, (page));
+ return queue_var_show(queue_max_segment_size(q), (page));
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -242,10 +235,10 @@
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
return ret;
}
@@ -300,6 +293,11 @@
}
}
+static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_nr_zones(q), page);
+}
+
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -315,14 +313,12 @@
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
- queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
if (nm == 2)
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else if (nm)
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
return ret;
}
@@ -346,18 +342,16 @@
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
if (val == 2) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 1) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 0) {
- queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
}
- spin_unlock_irq(q->queue_lock);
#endif
return ret;
}
@@ -366,8 +360,8 @@
{
int val;
- if (q->poll_nsec == -1)
- val = -1;
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+ val = BLK_MQ_POLL_CLASSIC;
else
val = q->poll_nsec / 1000;
@@ -386,10 +380,12 @@
if (err < 0)
return err;
- if (val == -1)
- q->poll_nsec = -1;
- else
+ if (val == BLK_MQ_POLL_CLASSIC)
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+ else if (val >= 0)
q->poll_nsec = val * 1000;
+ else
+ return -EINVAL;
return count;
}
@@ -405,7 +401,8 @@
unsigned long poll_on;
ssize_t ret;
- if (!q->mq_ops || !q->mq_ops->poll)
+ if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
+ !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
return -EINVAL;
ret = queue_var_store(&poll_on, page, count);
@@ -420,6 +417,26 @@
return ret;
}
+static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
+}
+
+static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtou32(page, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+
+ blk_queue_rq_timeout(q, msecs_to_jiffies(val));
+
+ return count;
+}
+
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
{
if (!wbt_rq_qos(q))
@@ -453,25 +470,21 @@
else if (val >= 0)
val *= 1000ULL;
+ if (wbt_get_min_lat(q) == val)
+ return count;
+
/*
* Ensure that the queue is idled, in case the latency update
* ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens.
*/
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- } else
- blk_queue_bypass_start(q);
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val);
- wbt_update_limits(q);
- if (q->mq_ops) {
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
- } else
- blk_queue_bypass_end(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
return count;
}
@@ -637,6 +650,11 @@
.show = queue_zoned_show,
};
+static struct queue_sysfs_entry queue_nr_zones_entry = {
+ .attr = {.name = "nr_zones", .mode = 0444 },
+ .show = queue_nr_zones_show,
+};
+
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = 0644 },
.show = queue_nomerges_show,
@@ -689,6 +707,12 @@
.show = queue_dax_show,
};
+static struct queue_sysfs_entry queue_io_timeout_entry = {
+ .attr = {.name = "io_timeout", .mode = 0644 },
+ .show = queue_io_timeout_show,
+ .store = queue_io_timeout_store,
+};
+
static struct queue_sysfs_entry queue_wb_lat_entry = {
.attr = {.name = "wbt_lat_usec", .mode = 0644 },
.show = queue_wb_lat_show,
@@ -703,7 +727,7 @@
};
#endif
-static struct attribute *default_attrs[] = {
+static struct attribute *queue_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@@ -727,6 +751,7 @@
&queue_write_zeroes_max_entry.attr,
&queue_nonrot_entry.attr,
&queue_zoned_entry.attr,
+ &queue_nr_zones_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
@@ -737,12 +762,32 @@
&queue_dax_entry.attr,
&queue_wb_lat_entry.attr,
&queue_poll_delay_entry.attr,
+ &queue_io_timeout_entry.attr,
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
&throtl_sample_time_entry.attr,
#endif
NULL,
};
+static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+
+ if (attr == &queue_io_timeout_entry.attr &&
+ (!q->mq_ops || !q->mq_ops->timeout))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group queue_attr_group = {
+ .attrs = queue_attrs,
+ .is_visible = queue_attr_visible,
+};
+
+
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
static ssize_t
@@ -794,22 +839,47 @@
kmem_cache_free(blk_requestq_cachep, q);
}
+/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+static void blk_exit_queue(struct request_queue *q)
+{
+ /*
+ * Since the I/O scheduler exit code may access cgroup information,
+ * perform I/O scheduler exit before disassociating from the block
+ * cgroup controller.
+ */
+ if (q->elevator) {
+ ioc_clear_queue(q);
+ __elevator_exit(q, q->elevator);
+ q->elevator = NULL;
+ }
+
+ /*
+ * Remove all references to @q from the block cgroup controller before
+ * restoring @q->queue_lock to avoid that restoring this pointer causes
+ * e.g. blkcg_print_blkgs() to crash.
+ */
+ blkcg_exit_queue(q);
+
+ /*
+ * Since the cgroup code may dereference the @q->backing_dev_info
+ * pointer, only decrease its reference count after having removed the
+ * association with the block cgroup controller.
+ */
+ bdi_put(q->backing_dev_info);
+}
+
+
/**
- * __blk_release_queue - release a request queue when it is no longer needed
+ * __blk_release_queue - release a request queue
* @work: pointer to the release_work member of the request queue to be released
*
* Description:
- * blk_release_queue is the counterpart of blk_init_queue(). It should be
- * called when a request queue is being released; typically when a block
- * device is being de-registered. Its primary task it to free the queue
- * itself.
- *
- * Notes:
- * The low level driver must have finished any outstanding requests first
- * via blk_cleanup_queue().
- *
- * Although blk_release_queue() may be called with preemption disabled,
- * __blk_release_queue() may sleep.
+ * This function is called when a block device is being unregistered. The
+ * process of releasing a request queue starts with blk_cleanup_queue, which
+ * set the appropriate flags and then calls blk_put_queue, that decrements
+ * the reference counter of the request queue. Once the reference counter
+ * of the request queue reaches zero, blk_release_queue is called to release
+ * all allocated resources of the request queue.
*/
static void __blk_release_queue(struct work_struct *work)
{
@@ -819,39 +889,21 @@
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
- if (!blk_queue_dead(q)) {
- /*
- * Last reference was dropped without having called
- * blk_cleanup_queue().
- */
- WARN_ONCE(blk_queue_init_done(q),
- "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
- q);
- blk_exit_queue(q);
- }
-
- WARN(blk_queue_root_blkg(q),
- "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
- q);
-
blk_free_queue_stats(q->stats);
- blk_exit_rl(q, &q->root_rl);
+ if (queue_is_mq(q))
+ cancel_delayed_work_sync(&q->requeue_work);
- if (q->queue_tags)
- __blk_queue_free_tags(q);
+ blk_exit_queue(q);
- if (!q->mq_ops) {
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, q->fq->flush_rq);
- blk_free_flush_queue(q->fq);
- } else {
+ blk_queue_free_zone_bitmaps(q);
+
+ if (queue_is_mq(q))
blk_mq_release(q);
- }
blk_trace_shutdown(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_debugfs_unregister(q);
bioset_exit(&q->bio_split);
@@ -876,7 +928,6 @@
struct kobj_type blk_queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
- .default_attrs = default_attrs,
.release = blk_release_queue,
};
@@ -889,14 +940,14 @@
int ret;
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
+ bool has_elevator = false;
if (WARN_ON(!q))
return -ENXIO;
- WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
+ WARN_ONCE(blk_queue_registered(q),
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
/*
* SCSI probing may synchronously create and destroy a lot of
@@ -908,17 +959,15 @@
* request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
- queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
percpu_ref_switch_to_percpu(&q->q_usage_counter);
- blk_queue_bypass_end(q);
}
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
- /* Prevent changes through sysfs until registration is completed. */
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0) {
@@ -926,31 +975,46 @@
goto unlock;
}
- if (q->mq_ops) {
+ ret = sysfs_create_group(&q->kobj, &queue_attr_group);
+ if (ret) {
+ blk_trace_remove_sysfs(dev);
+ kobject_del(&q->kobj);
+ kobject_put(&dev->kobj);
+ goto unlock;
+ }
+
+ if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
}
- kobject_uevent(&q->kobj, KOBJ_ADD);
-
- wbt_enable_default(q);
-
- blk_throtl_register_queue(q);
-
- if (q->request_fn || (q->mq_ops && q->elevator)) {
- ret = elv_register_queue(q);
+ mutex_lock(&q->sysfs_lock);
+ if (q->elevator) {
+ ret = elv_register_queue(q, false);
if (ret) {
mutex_unlock(&q->sysfs_lock);
- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ mutex_unlock(&q->sysfs_dir_lock);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
return ret;
}
+ has_elevator = true;
}
+
+ blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
+ wbt_enable_default(q);
+ blk_throtl_register_queue(q);
+
+ /* Now everything is ready and send out KOBJ_ADD uevent */
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+ if (has_elevator)
+ kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
+ mutex_unlock(&q->sysfs_lock);
+
ret = 0;
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
return ret;
}
EXPORT_SYMBOL_GPL(blk_register_queue);
@@ -970,7 +1034,7 @@
return;
/* Return early if disk->queue was never registered. */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ if (!blk_queue_registered(q))
return;
/*
@@ -979,27 +1043,26 @@
* concurrent elv_iosched_store() calls.
*/
mutex_lock(&q->sysfs_lock);
-
blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
+ mutex_unlock(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_dir_lock);
/*
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
*/
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unregister_dev(disk_to_dev(disk), q);
- mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
- rq_qos_exit(q);
-
mutex_lock(&q->sysfs_lock);
- if (q->request_fn || (q->mq_ops && q->elevator))
+ if (q->elevator)
elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->sysfs_dir_lock);
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/block/blk-tag.c b/block/blk-tag.c
deleted file mode 100644
index fbc153a..0000000
--- a/block/blk-tag.c
+++ /dev/null
@@ -1,378 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Functions related to tagged command queuing
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-
-#include "blk.h"
-
-/**
- * blk_queue_find_tag - find a request by its tag and queue
- * @q: The request queue for the device
- * @tag: The tag of the request
- *
- * Notes:
- * Should be used when a device returns a tag and you want to match
- * it with a request.
- *
- * no locks need be held.
- **/
-struct request *blk_queue_find_tag(struct request_queue *q, int tag)
-{
- return blk_map_queue_find_tag(q->queue_tags, tag);
-}
-EXPORT_SYMBOL(blk_queue_find_tag);
-
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt: the tag map to free
- *
- * Drop the reference count on @bqt and frees it when the last reference
- * is dropped.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
- if (atomic_dec_and_test(&bqt->refcnt)) {
- BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
- bqt->max_depth);
-
- kfree(bqt->tag_index);
- bqt->tag_index = NULL;
-
- kfree(bqt->tag_map);
- bqt->tag_map = NULL;
-
- kfree(bqt);
- }
-}
-EXPORT_SYMBOL(blk_free_tags);
-
-/**
- * __blk_queue_free_tags - release tag maintenance info
- * @q: the request queue for the device
- *
- * Notes:
- * blk_cleanup_queue() will take care of calling this function, if tagging
- * has been used. So there's no need to call this directly.
- **/
-void __blk_queue_free_tags(struct request_queue *q)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
-
- if (!bqt)
- return;
-
- blk_free_tags(bqt);
-
- q->queue_tags = NULL;
- queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
-}
-
-/**
- * blk_queue_free_tags - release tag maintenance info
- * @q: the request queue for the device
- *
- * Notes:
- * This is used to disable tagged queuing to a device, yet leave
- * queue in function.
- **/
-void blk_queue_free_tags(struct request_queue *q)
-{
- queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
-}
-EXPORT_SYMBOL(blk_queue_free_tags);
-
-static int
-init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
-{
- struct request **tag_index;
- unsigned long *tag_map;
- int nr_ulongs;
-
- if (q && depth > q->nr_requests * 2) {
- depth = q->nr_requests * 2;
- printk(KERN_ERR "%s: adjusted depth to %d\n",
- __func__, depth);
- }
-
- tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
- if (!tag_index)
- goto fail;
-
- nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
- tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
- if (!tag_map)
- goto fail;
-
- tags->real_max_depth = depth;
- tags->max_depth = depth;
- tags->tag_index = tag_index;
- tags->tag_map = tag_map;
-
- return 0;
-fail:
- kfree(tag_index);
- return -ENOMEM;
-}
-
-static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
- int depth, int alloc_policy)
-{
- struct blk_queue_tag *tags;
-
- tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
- if (!tags)
- goto fail;
-
- if (init_tag_map(q, tags, depth))
- goto fail;
-
- atomic_set(&tags->refcnt, 1);
- tags->alloc_policy = alloc_policy;
- tags->next_tag = 0;
- return tags;
-fail:
- kfree(tags);
- return NULL;
-}
-
-/**
- * blk_init_tags - initialize the tag info for an external tag map
- * @depth: the maximum queue depth supported
- * @alloc_policy: tag allocation policy
- **/
-struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
-{
- return __blk_queue_init_tags(NULL, depth, alloc_policy);
-}
-EXPORT_SYMBOL(blk_init_tags);
-
-/**
- * blk_queue_init_tags - initialize the queue tag info
- * @q: the request queue for the device
- * @depth: the maximum queue depth supported
- * @tags: the tag to use
- * @alloc_policy: tag allocation policy
- *
- * Queue lock must be held here if the function is called to resize an
- * existing map.
- **/
-int blk_queue_init_tags(struct request_queue *q, int depth,
- struct blk_queue_tag *tags, int alloc_policy)
-{
- int rc;
-
- BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
-
- if (!tags && !q->queue_tags) {
- tags = __blk_queue_init_tags(q, depth, alloc_policy);
-
- if (!tags)
- return -ENOMEM;
-
- } else if (q->queue_tags) {
- rc = blk_queue_resize_tags(q, depth);
- if (rc)
- return rc;
- queue_flag_set(QUEUE_FLAG_QUEUED, q);
- return 0;
- } else
- atomic_inc(&tags->refcnt);
-
- /*
- * assign it, all done
- */
- q->queue_tags = tags;
- queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
- return 0;
-}
-EXPORT_SYMBOL(blk_queue_init_tags);
-
-/**
- * blk_queue_resize_tags - change the queueing depth
- * @q: the request queue for the device
- * @new_depth: the new max command queueing depth
- *
- * Notes:
- * Must be called with the queue lock held.
- **/
-int blk_queue_resize_tags(struct request_queue *q, int new_depth)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
- struct request **tag_index;
- unsigned long *tag_map;
- int max_depth, nr_ulongs;
-
- if (!bqt)
- return -ENXIO;
-
- /*
- * if we already have large enough real_max_depth. just
- * adjust max_depth. *NOTE* as requests with tag value
- * between new_depth and real_max_depth can be in-flight, tag
- * map can not be shrunk blindly here.
- */
- if (new_depth <= bqt->real_max_depth) {
- bqt->max_depth = new_depth;
- return 0;
- }
-
- /*
- * Currently cannot replace a shared tag map with a new
- * one, so error out if this is the case
- */
- if (atomic_read(&bqt->refcnt) != 1)
- return -EBUSY;
-
- /*
- * save the old state info, so we can copy it back
- */
- tag_index = bqt->tag_index;
- tag_map = bqt->tag_map;
- max_depth = bqt->real_max_depth;
-
- if (init_tag_map(q, bqt, new_depth))
- return -ENOMEM;
-
- memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
- nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
- memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
-
- kfree(tag_index);
- kfree(tag_map);
- return 0;
-}
-EXPORT_SYMBOL(blk_queue_resize_tags);
-
-/**
- * blk_queue_end_tag - end tag operations for a request
- * @q: the request queue for the device
- * @rq: the request that has completed
- *
- * Description:
- * Typically called when end_that_request_first() returns %0, meaning
- * all transfers have been done for a request. It's important to call
- * this function before end_that_request_last(), as that will put the
- * request back on the free list thus corrupting the internal tag list.
- **/
-void blk_queue_end_tag(struct request_queue *q, struct request *rq)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
- unsigned tag = rq->tag; /* negative tags invalid */
-
- lockdep_assert_held(q->queue_lock);
-
- BUG_ON(tag >= bqt->real_max_depth);
-
- list_del_init(&rq->queuelist);
- rq->rq_flags &= ~RQF_QUEUED;
- rq->tag = -1;
- rq->internal_tag = -1;
-
- if (unlikely(bqt->tag_index[tag] == NULL))
- printk(KERN_ERR "%s: tag %d is missing\n",
- __func__, tag);
-
- bqt->tag_index[tag] = NULL;
-
- if (unlikely(!test_bit(tag, bqt->tag_map))) {
- printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __func__, tag);
- return;
- }
- /*
- * The tag_map bit acts as a lock for tag_index[bit], so we need
- * unlock memory barrier semantics.
- */
- clear_bit_unlock(tag, bqt->tag_map);
-}
-
-/**
- * blk_queue_start_tag - find a free tag and assign it
- * @q: the request queue for the device
- * @rq: the block request that needs tagging
- *
- * Description:
- * This can either be used as a stand-alone helper, or possibly be
- * assigned as the queue &prep_rq_fn (in which case &struct request
- * automagically gets a tag assigned). Note that this function
- * assumes that any type of request can be queued! if this is not
- * true for your device, you must check the request type before
- * calling this function. The request will also be removed from
- * the request queue, so it's the drivers responsibility to readd
- * it if it should need to be restarted for some reason.
- **/
-int blk_queue_start_tag(struct request_queue *q, struct request *rq)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
- unsigned max_depth;
- int tag;
-
- lockdep_assert_held(q->queue_lock);
-
- if (unlikely((rq->rq_flags & RQF_QUEUED))) {
- printk(KERN_ERR
- "%s: request %p for device [%s] already tagged %d",
- __func__, rq,
- rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
- BUG();
- }
-
- /*
- * Protect against shared tag maps, as we may not have exclusive
- * access to the tag map.
- *
- * We reserve a few tags just for sync IO, since we don't want
- * to starve sync IO on behalf of flooding async IO.
- */
- max_depth = bqt->max_depth;
- if (!rq_is_sync(rq) && max_depth > 1) {
- switch (max_depth) {
- case 2:
- max_depth = 1;
- break;
- case 3:
- max_depth = 2;
- break;
- default:
- max_depth -= 2;
- }
- if (q->in_flight[BLK_RW_ASYNC] > max_depth)
- return 1;
- }
-
- do {
- if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
- tag = find_first_zero_bit(bqt->tag_map, max_depth);
- if (tag >= max_depth)
- return 1;
- } else {
- int start = bqt->next_tag;
- int size = min_t(int, bqt->max_depth, max_depth + start);
- tag = find_next_zero_bit(bqt->tag_map, size, start);
- if (tag >= size && start + size > bqt->max_depth) {
- size = start + size - bqt->max_depth;
- tag = find_first_zero_bit(bqt->tag_map, size);
- }
- if (tag >= size)
- return 1;
- }
-
- } while (test_and_set_bit_lock(tag, bqt->tag_map));
- /*
- * We need lock ordering semantics given by test_and_set_bit_lock.
- * See blk_queue_end_tag for details.
- */
-
- bqt->next_tag = (tag + 1) % bqt->max_depth;
- rq->rq_flags |= RQF_QUEUED;
- rq->tag = tag;
- bqt->tag_index[tag] = rq;
- blk_start_request(rq);
- return 0;
-}
-EXPORT_SYMBOL(blk_queue_start_tag);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 01d0620..18f773e 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -84,8 +84,7 @@
* RB tree of active children throtl_grp's, which are sorted by
* their ->disptime.
*/
- struct rb_root pending_tree; /* RB tree of active tgs */
- struct rb_node *first_pending; /* first node in the tree */
+ struct rb_root_cached pending_tree; /* RB tree of active tgs */
unsigned int nr_pending; /* # queued in the tree */
unsigned long first_pending_disptime; /* disptime of the first tg */
struct timer_list pending_timer; /* fires on first_pending_disptime */
@@ -475,16 +474,18 @@
{
INIT_LIST_HEAD(&sq->queued[0]);
INIT_LIST_HEAD(&sq->queued[1]);
- sq->pending_tree = RB_ROOT;
+ sq->pending_tree = RB_ROOT_CACHED;
timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
}
-static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
+static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
+ struct request_queue *q,
+ struct blkcg *blkcg)
{
struct throtl_grp *tg;
int rw;
- tg = kzalloc_node(sizeof(*tg), gfp, node);
+ tg = kzalloc_node(sizeof(*tg), gfp, q->node);
if (!tg)
return NULL;
@@ -616,31 +617,23 @@
static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue *parent_sq)
{
+ struct rb_node *n;
/* Service tree is empty */
if (!parent_sq->nr_pending)
return NULL;
- if (!parent_sq->first_pending)
- parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
-
- if (parent_sq->first_pending)
- return rb_entry_tg(parent_sq->first_pending);
-
- return NULL;
-}
-
-static void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
- rb_erase(n, root);
- RB_CLEAR_NODE(n);
+ n = rb_first_cached(&parent_sq->pending_tree);
+ WARN_ON_ONCE(!n);
+ if (!n)
+ return NULL;
+ return rb_entry_tg(n);
}
static void throtl_rb_erase(struct rb_node *n,
struct throtl_service_queue *parent_sq)
{
- if (parent_sq->first_pending == n)
- parent_sq->first_pending = NULL;
- rb_erase_init(n, &parent_sq->pending_tree);
+ rb_erase_cached(n, &parent_sq->pending_tree);
+ RB_CLEAR_NODE(n);
--parent_sq->nr_pending;
}
@@ -658,11 +651,11 @@
static void tg_service_queue_add(struct throtl_grp *tg)
{
struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
- struct rb_node **node = &parent_sq->pending_tree.rb_node;
+ struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
struct rb_node *parent = NULL;
struct throtl_grp *__tg;
unsigned long key = tg->disptime;
- int left = 1;
+ bool leftmost = true;
while (*node != NULL) {
parent = *node;
@@ -672,15 +665,13 @@
node = &parent->rb_left;
else {
node = &parent->rb_right;
- left = 0;
+ leftmost = false;
}
}
- if (left)
- parent_sq->first_pending = &tg->rb_node;
-
rb_link_node(&tg->rb_node, parent, node);
- rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
+ rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
+ leftmost);
}
static void __throtl_enqueue_tg(struct throtl_grp *tg)
@@ -892,13 +883,10 @@
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
u64 tmp;
- jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
+ jiffy_elapsed = jiffies - tg->slice_start[rw];
- /* Slice has just started. Consider one slice interval */
- if (!jiffy_elapsed)
- jiffy_elapsed_rnd = tg->td->throtl_slice;
-
- jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
+ /* Round up to the next throttle slice, wait time must be nonzero */
+ jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
/*
* jiffy_elapsed_rnd should not be a big value as minimum iops can be
@@ -1231,7 +1219,7 @@
struct throtl_grp *this_tg);
/**
* throtl_pending_timer_fn - timer function for service_queue->pending_timer
- * @arg: the throtl_service_queue being serviced
+ * @t: the pending_timer member of the throtl_service_queue being serviced
*
* This timer is armed when a child throtl_grp with active bio's become
* pending and queued on the service_queue's pending_tree and expires when
@@ -1254,7 +1242,7 @@
bool dispatched;
int ret;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (throtl_can_upgrade(td, NULL))
throtl_upgrade_state(td);
@@ -1277,9 +1265,9 @@
break;
/* this dispatch windows is still open, relax and repeat */
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
cpu_relax();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
}
if (!dispatched)
@@ -1301,7 +1289,7 @@
queue_work(kthrotld_workqueue, &td->dispatch_work);
}
out_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
/**
@@ -1325,11 +1313,11 @@
bio_list_init(&bio_list_on_stack);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
for (rw = READ; rw <= WRITE; rw++)
while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
bio_list_add(&bio_list_on_stack, bio);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
@@ -2126,16 +2114,6 @@
}
#endif
-static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
-{
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- /* fallback to root_blkg if we fail to get a blkg ref */
- if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
- bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-#endif
-}
-
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
{
@@ -2152,14 +2130,10 @@
if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
goto out;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
throtl_update_latency_buckets(td);
- if (unlikely(blk_queue_bypass(q)))
- goto out_unlock;
-
- blk_throtl_assoc_bio(tg, bio);
blk_throtl_update_idletime(tg);
sq = &tg->service_queue;
@@ -2238,7 +2212,7 @@
}
out_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
out:
bio_set_flag(bio, BIO_THROTTLED);
@@ -2274,7 +2248,8 @@
struct request_queue *q = rq->q;
struct throtl_data *td = q->td;
- throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
+ throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
+ time_ns >> 10);
}
void blk_throtl_bio_endio(struct bio *bio)
@@ -2359,7 +2334,7 @@
* Dispatch all currently throttled bios on @q through ->make_request_fn().
*/
void blk_throtl_drain(struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
+ __releases(&q->queue_lock) __acquires(&q->queue_lock)
{
struct throtl_data *td = q->td;
struct blkcg_gq *blkg;
@@ -2367,7 +2342,6 @@
struct bio *bio;
int rw;
- queue_lockdep_assert_held(q);
rcu_read_lock();
/*
@@ -2383,7 +2357,7 @@
tg_drain_bios(&td->service_queue);
rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
/* all bios now should be in td->service_queue, issue them */
for (rw = READ; rw <= WRITE; rw++)
@@ -2391,7 +2365,7 @@
NULL)))
generic_make_request(bio);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
}
int blk_throtl_init(struct request_queue *q)
@@ -2471,7 +2445,7 @@
td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
- td->track_bio_latency = !queue_is_rq_based(q);
+ td->track_bio_latency = !queue_is_mq(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index f2cfd56..8aa68fa 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to generic timeout handling of requests.
*/
@@ -68,80 +69,6 @@
#endif /* CONFIG_FAIL_IO_TIMEOUT */
-/*
- * blk_delete_timer - Delete/cancel timer for a given function.
- * @req: request that we are canceling timer for
- *
- */
-void blk_delete_timer(struct request *req)
-{
- list_del_init(&req->timeout_list);
-}
-
-static void blk_rq_timed_out(struct request *req)
-{
- struct request_queue *q = req->q;
- enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
-
- if (q->rq_timed_out_fn)
- ret = q->rq_timed_out_fn(req);
- switch (ret) {
- case BLK_EH_RESET_TIMER:
- blk_add_timer(req);
- blk_clear_rq_complete(req);
- break;
- case BLK_EH_DONE:
- /*
- * LLD handles this for now but in the future
- * we can send a request msg to abort the command
- * and we can move more of the generic scsi eh code to
- * the blk layer.
- */
- break;
- default:
- printk(KERN_ERR "block: bad eh return: %d\n", ret);
- break;
- }
-}
-
-static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
- unsigned int *next_set)
-{
- const unsigned long deadline = blk_rq_deadline(rq);
-
- if (time_after_eq(jiffies, deadline)) {
- list_del_init(&rq->timeout_list);
-
- /*
- * Check if we raced with end io completion
- */
- if (!blk_mark_rq_complete(rq))
- blk_rq_timed_out(rq);
- } else if (!*next_set || time_after(*next_timeout, deadline)) {
- *next_timeout = deadline;
- *next_set = 1;
- }
-}
-
-void blk_timeout_work(struct work_struct *work)
-{
- struct request_queue *q =
- container_of(work, struct request_queue, timeout_work);
- unsigned long flags, next = 0;
- struct request *rq, *tmp;
- int next_set = 0;
-
- spin_lock_irqsave(q->queue_lock, flags);
-
- list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
- blk_rq_check_expired(rq, &next, &next_set);
-
- if (next_set)
- mod_timer(&q->timeout, round_jiffies_up(next));
-
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
/**
* blk_abort_request -- Request request recovery for the specified command
* @req: pointer to the request of interest
@@ -149,24 +76,17 @@
* This function requests that the block layer start recovery for the
* request by deleting the timer and calling the q's timeout function.
* LLDDs who implement their own error recovery MAY ignore the timeout
- * event if they generated blk_abort_req. Must hold queue lock.
+ * event if they generated blk_abort_request.
*/
void blk_abort_request(struct request *req)
{
- if (req->q->mq_ops) {
- /*
- * All we need to ensure is that timeout scan takes place
- * immediately and that scan sees the new timeout value.
- * No need for fancy synchronizations.
- */
- blk_rq_set_deadline(req, jiffies);
- kblockd_schedule_work(&req->q->timeout_work);
- } else {
- if (blk_mark_rq_complete(req))
- return;
- blk_delete_timer(req);
- blk_rq_timed_out(req);
- }
+ /*
+ * All we need to ensure is that timeout scan takes place
+ * immediately and that scan sees the new timeout value.
+ * No need for fancy synchronizations.
+ */
+ WRITE_ONCE(req->deadline, jiffies);
+ kblockd_schedule_work(&req->q->timeout_work);
}
EXPORT_SYMBOL_GPL(blk_abort_request);
@@ -194,15 +114,6 @@
struct request_queue *q = req->q;
unsigned long expiry;
- if (!q->mq_ops)
- lockdep_assert_held(q->queue_lock);
-
- /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
- if (!q->mq_ops && !q->rq_timed_out_fn)
- return;
-
- BUG_ON(!list_empty(&req->timeout_list));
-
/*
* Some LLDs, like scsi, peek at the timeout to prevent a
* command from being retried forever.
@@ -211,21 +122,16 @@
req->timeout = q->rq_timeout;
req->rq_flags &= ~RQF_TIMED_OUT;
- blk_rq_set_deadline(req, jiffies + req->timeout);
- /*
- * Only the non-mq case needs to add the request to a protected list.
- * For the mq case we simply scan the tag map.
- */
- if (!q->mq_ops)
- list_add_tail(&req->timeout_list, &req->q->timeout_list);
+ expiry = jiffies + req->timeout;
+ WRITE_ONCE(req->deadline, expiry);
/*
* If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
+ expiry = blk_rq_timeout(round_jiffies_up(expiry));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8ac93fc..8641ba9 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* buffered writeback throttling. loosely based on CoDel. We can't drop
* packets for IO scheduling, so the logic is something like this:
@@ -307,7 +308,8 @@
static void scale_up(struct rq_wb *rwb)
{
- rq_depth_scale_up(&rwb->rq_depth);
+ if (!rq_depth_scale_up(&rwb->rq_depth))
+ return;
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
rwb_wake_all(rwb);
@@ -316,7 +318,8 @@
static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
- rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+ if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+ return;
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
rwb_trace_step(rwb, "scale down");
@@ -489,31 +492,21 @@
}
struct wbt_wait_data {
- struct wait_queue_entry wq;
- struct task_struct *task;
struct rq_wb *rwb;
- struct rq_wait *rqw;
+ enum wbt_flags wb_acct;
unsigned long rw;
- bool got_token;
};
-static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
- int wake_flags, void *key)
+static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
{
- struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
- wq);
+ struct wbt_wait_data *data = private_data;
+ return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
+}
- /*
- * If we fail to get a budget, return -1 to interrupt the wake up
- * loop in __wake_up_common.
- */
- if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
- return -1;
-
- data->got_token = true;
- list_del_init(&curr->entry);
- wake_up_process(data->task);
- return 1;
+static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
+{
+ struct wbt_wait_data *data = private_data;
+ wbt_rqw_done(data->rwb, rqw, data->wb_acct);
}
/*
@@ -521,57 +514,16 @@
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
- unsigned long rw, spinlock_t *lock)
- __releases(lock)
- __acquires(lock)
+ unsigned long rw)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
struct wbt_wait_data data = {
- .wq = {
- .func = wbt_wake_function,
- .entry = LIST_HEAD_INIT(data.wq.entry),
- },
- .task = current,
.rwb = rwb,
- .rqw = rqw,
+ .wb_acct = wb_acct,
.rw = rw,
};
- bool has_sleeper;
- has_sleeper = wq_has_sleeper(&rqw->wait);
- if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
- return;
-
- prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
- do {
- if (data.got_token)
- break;
-
- if (!has_sleeper &&
- rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
- finish_wait(&rqw->wait, &data.wq);
-
- /*
- * We raced with wbt_wake_function() getting a token,
- * which means we now have two. Put our local token
- * and wake anyone else potentially waiting for one.
- */
- if (data.got_token)
- wbt_rqw_done(rwb, rqw, wb_acct);
- break;
- }
-
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else
- io_schedule();
-
- has_sleeper = false;
- } while (1);
-
- finish_wait(&rqw->wait, &data.wq);
+ rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -624,7 +576,7 @@
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
{
struct rq_wb *rwb = RQWB(rqos);
enum wbt_flags flags;
@@ -636,7 +588,7 @@
return;
}
- __wbt_wait(rwb, flags, bio->bi_opf, lock);
+ __wbt_wait(rwb, flags, bio->bi_opf);
if (!blk_stat_is_active(rwb->cb))
rwb_arm_timer(rwb);
@@ -648,7 +600,7 @@
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
}
-void wbt_issue(struct rq_qos *rqos, struct request *rq)
+static void wbt_issue(struct rq_qos *rqos, struct request *rq)
{
struct rq_wb *rwb = RQWB(rqos);
@@ -668,7 +620,7 @@
}
}
-void wbt_requeue(struct rq_qos *rqos, struct request *rq)
+static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{
struct rq_wb *rwb = RQWB(rqos);
if (!rwb_enabled(rwb))
@@ -679,15 +631,6 @@
}
}
-void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
-{
- struct rq_qos *rqos = wbt_rq_qos(q);
- if (rqos) {
- RQWB(rqos)->rq_depth.queue_depth = depth;
- __wbt_update_limits(RQWB(rqos));
- }
-}
-
void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
{
struct rq_qos *rqos = wbt_rq_qos(q);
@@ -706,11 +649,10 @@
return;
/* Queue not registered? Maybe shutting down... */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ if (!blk_queue_registered(q))
return;
- if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
- (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
+ if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);
@@ -740,6 +682,12 @@
return -1;
}
+static void wbt_queue_depth_changed(struct rq_qos *rqos)
+{
+ RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
+ __wbt_update_limits(RQWB(rqos));
+}
+
static void wbt_exit(struct rq_qos *rqos)
{
struct rq_wb *rwb = RQWB(rqos);
@@ -760,11 +708,100 @@
if (!rqos)
return;
rwb = RQWB(rqos);
- if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+ blk_stat_deactivate(rwb->cb);
rwb->wb_normal = 0;
+ }
}
EXPORT_SYMBOL_GPL(wbt_disable_default);
+#ifdef CONFIG_BLK_DEBUG_FS
+static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%llu\n", rwb->cur_win_nsec);
+ return 0;
+}
+
+static int wbt_enabled_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%d\n", rwb->enable_state);
+ return 0;
+}
+
+static int wbt_id_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+
+ seq_printf(m, "%u\n", rqos->id);
+ return 0;
+}
+
+static int wbt_inflight_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+ int i;
+
+ for (i = 0; i < WBT_NUM_RWQ; i++)
+ seq_printf(m, "%d: inflight %d\n", i,
+ atomic_read(&rwb->rq_wait[i].inflight));
+ return 0;
+}
+
+static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%lu\n", rwb->min_lat_nsec);
+ return 0;
+}
+
+static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%u\n", rwb->unknown_cnt);
+ return 0;
+}
+
+static int wbt_normal_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%u\n", rwb->wb_normal);
+ return 0;
+}
+
+static int wbt_background_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%u\n", rwb->wb_background);
+ return 0;
+}
+
+static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
+ {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
+ {"enabled", 0400, wbt_enabled_show},
+ {"id", 0400, wbt_id_show},
+ {"inflight", 0400, wbt_inflight_show},
+ {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
+ {"unknown_cnt", 0400, wbt_unknown_cnt_show},
+ {"wb_normal", 0400, wbt_normal_show},
+ {"wb_background", 0400, wbt_background_show},
+ {},
+};
+#endif
static struct rq_qos_ops wbt_rqos_ops = {
.throttle = wbt_wait,
@@ -773,7 +810,11 @@
.requeue = wbt_requeue,
.done = wbt_done,
.cleanup = wbt_cleanup,
+ .queue_depth_changed = wbt_queue_depth_changed,
.exit = wbt_exit,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .debugfs_attrs = wbt_debugfs_attrs,
+#endif
};
int wbt_init(struct request_queue *q)
@@ -812,7 +853,7 @@
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
- wbt_set_queue_depth(q, blk_queue_depth(q));
+ wbt_queue_depth_changed(&rwb->rqos);
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
return 0;
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index f47218d..8e4e376 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -95,7 +95,6 @@
u64 wbt_get_min_lat(struct request_queue *q);
void wbt_set_min_lat(struct request_queue *q, u64 val);
-void wbt_set_queue_depth(struct request_queue *, unsigned int);
void wbt_set_write_cache(struct request_queue *, bool);
u64 wbt_default_latency_nsec(struct request_queue *);
@@ -118,9 +117,6 @@
static inline void wbt_enable_default(struct request_queue *q)
{
}
-static inline void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
-{
-}
static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
{
}
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index c461cf6..4bc5f26 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Zoned block device handling
*
@@ -12,6 +13,12 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
+
+#include "blk.h"
static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector)
@@ -63,14 +70,38 @@
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
-/*
- * Check that a zone report belongs to the partition.
- * If yes, fix its start sector and write pointer, copy it in the
- * zone information array and return true. Return false otherwise.
+static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
+ sector_t nr_sectors)
+{
+ sector_t zone_sectors = blk_queue_zone_sectors(q);
+
+ return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
+}
+
+/**
+ * blkdev_nr_zones - Get number of zones
+ * @bdev: Target block device
+ *
+ * Description:
+ * Return the total number of zones of a zoned block device.
+ * For a regular block device, the number of zones is always 0.
*/
-static bool blkdev_report_zone(struct block_device *bdev,
- struct blk_zone *rep,
- struct blk_zone *zone)
+unsigned int blkdev_nr_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (!blk_queue_is_zoned(q))
+ return 0;
+
+ return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
+}
+EXPORT_SYMBOL_GPL(blkdev_nr_zones);
+
+/*
+ * Check that a zone report belongs to this partition, and if yes, fix its start
+ * sector and write pointer and return true. Return false otherwise.
+ */
+static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
{
sector_t offset = get_start_sect(bdev);
@@ -85,151 +116,127 @@
rep->wp = rep->start + rep->len;
else
rep->wp -= offset;
- memcpy(zone, rep, sizeof(struct blk_zone));
-
return true;
}
+static int blk_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones)
+{
+ struct request_queue *q = disk->queue;
+ unsigned int z = 0, n, nrz = *nr_zones;
+ sector_t capacity = get_capacity(disk);
+ int ret;
+
+ while (z < nrz && sector < capacity) {
+ n = nrz - z;
+ ret = disk->fops->report_zones(disk, sector, &zones[z], &n);
+ if (ret)
+ return ret;
+ if (!n)
+ break;
+ sector += blk_queue_zone_sectors(q) * n;
+ z += n;
+ }
+
+ WARN_ON(z > *nr_zones);
+ *nr_zones = z;
+
+ return 0;
+}
+
/**
* blkdev_report_zones - Get zones information
* @bdev: Target block device
* @sector: Sector from which to report zones
* @zones: Array of zone structures where to return the zones information
* @nr_zones: Number of zone structures in the zone array
- * @gfp_mask: Memory allocation flags (for bio_alloc)
*
* Description:
* Get zone information starting from the zone containing @sector.
* The number of zone information reported may be less than the number
* requested by @nr_zones. The number of zones actually reported is
* returned in @nr_zones.
+ * The caller must use memalloc_noXX_save/restore() calls to control
+ * memory allocations done within this function (zone array and command
+ * buffer allocation by the device driver).
*/
-int blkdev_report_zones(struct block_device *bdev,
- sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones,
- gfp_t gfp_mask)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones)
{
struct request_queue *q = bdev_get_queue(bdev);
- struct blk_zone_report_hdr *hdr;
- unsigned int nrz = *nr_zones;
- struct page *page;
- unsigned int nr_rep;
- size_t rep_bytes;
- unsigned int nr_pages;
- struct bio *bio;
- struct bio_vec *bv;
- unsigned int i, n, nz;
- unsigned int ofst;
- void *addr;
+ unsigned int i, nrz;
int ret;
- if (!q)
- return -ENXIO;
-
if (!blk_queue_is_zoned(q))
return -EOPNOTSUPP;
- if (!nrz)
- return 0;
+ /*
+ * A block device that advertized itself as zoned must have a
+ * report_zones method. If it does not have one defined, the device
+ * driver has a bug. So warn about that.
+ */
+ if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
+ return -EOPNOTSUPP;
- if (sector > bdev->bd_part->nr_sects) {
+ if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
*nr_zones = 0;
return 0;
}
- /*
- * The zone report has a header. So make room for it in the
- * payload. Also make sure that the report fits in a single BIO
- * that will not be split down the stack.
- */
- rep_bytes = sizeof(struct blk_zone_report_hdr) +
- sizeof(struct blk_zone) * nrz;
- rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK;
- if (rep_bytes > (queue_max_sectors(q) << 9))
- rep_bytes = queue_max_sectors(q) << 9;
-
- nr_pages = min_t(unsigned int, BIO_MAX_PAGES,
- rep_bytes >> PAGE_SHIFT);
- nr_pages = min_t(unsigned int, nr_pages,
- queue_max_segments(q));
-
- bio = bio_alloc(gfp_mask, nr_pages);
- if (!bio)
- return -ENOMEM;
-
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = blk_zone_start(q, sector);
- bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
-
- for (i = 0; i < nr_pages; i++) {
- page = alloc_page(gfp_mask);
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
- if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
- __free_page(page);
- break;
- }
- }
-
- if (i == 0)
- ret = -ENOMEM;
- else
- ret = submit_bio_wait(bio);
+ nrz = min(*nr_zones,
+ __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
+ ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
+ zones, &nrz);
if (ret)
- goto out;
+ return ret;
- /*
- * Process the report result: skip the header and go through the
- * reported zones to fixup and fixup the zone information for
- * partitions. At the same time, return the zone information into
- * the zone array.
- */
- n = 0;
- nz = 0;
- nr_rep = 0;
- bio_for_each_segment_all(bv, bio, i) {
-
- if (!bv->bv_page)
+ for (i = 0; i < nrz; i++) {
+ if (!blkdev_report_zone(bdev, zones))
break;
-
- addr = kmap_atomic(bv->bv_page);
-
- /* Get header in the first page */
- ofst = 0;
- if (!nr_rep) {
- hdr = addr;
- nr_rep = hdr->nr_zones;
- ofst = sizeof(struct blk_zone_report_hdr);
- }
-
- /* Fixup and report zones */
- while (ofst < bv->bv_len &&
- n < nr_rep && nz < nrz) {
- if (blkdev_report_zone(bdev, addr + ofst, &zones[nz]))
- nz++;
- ofst += sizeof(struct blk_zone);
- n++;
- }
-
- kunmap_atomic(addr);
-
- if (n >= nr_rep || nz >= nrz)
- break;
-
+ zones++;
}
- *nr_zones = nz;
-out:
- bio_for_each_segment_all(bv, bio, i)
- __free_page(bv->bv_page);
+ *nr_zones = i;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blkdev_report_zones);
+
+/*
+ * Special case of zone reset operation to reset all zones in one command,
+ * useful for applications like mkfs.
+ */
+static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
+{
+ struct bio *bio = bio_alloc(gfp_mask, 0);
+ int ret;
+
+ /* across the zones operations, don't need any sectors */
+ bio_set_dev(bio, bdev);
+ bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0);
+
+ ret = submit_bio_wait(bio);
bio_put(bio);
return ret;
}
-EXPORT_SYMBOL_GPL(blkdev_report_zones);
+
+static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
+ sector_t nr_sectors)
+{
+ if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
+ return false;
+
+ if (nr_sectors != part_nr_sects_read(bdev->bd_part))
+ return false;
+ /*
+ * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
+ * the entire disk, that is, if the blocks device start offset is 0 and
+ * its capacity is the same as the entire disk.
+ */
+ return get_start_sect(bdev) == 0 &&
+ part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
+}
/**
* blkdev_reset_zones - Reset zones write pointer
@@ -250,19 +257,23 @@
struct request_queue *q = bdev_get_queue(bdev);
sector_t zone_sectors;
sector_t end_sector = sector + nr_sectors;
- struct bio *bio;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
int ret;
- if (!q)
- return -ENXIO;
-
if (!blk_queue_is_zoned(q))
return -EOPNOTSUPP;
- if (end_sector > bdev->bd_part->nr_sects)
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
+ if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
/* Out of range */
return -EINVAL;
+ if (blkdev_allow_reset_all_zones(bdev, nr_sectors))
+ return __blkdev_reset_all_zones(bdev, gfp_mask);
+
/* Check alignment (handle eventual smaller last zone) */
zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1))
@@ -272,19 +283,14 @@
end_sector != bdev->bd_part->nr_sects)
return -EINVAL;
+ blk_start_plug(&plug);
while (sector < end_sector) {
- bio = bio_alloc(gfp_mask, 0);
+ bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
- ret = submit_bio_wait(bio);
- bio_put(bio);
-
- if (ret)
- return ret;
-
sector += zone_sectors;
/* This may take a while, so be nice to others */
@@ -292,7 +298,12 @@
}
- return 0;
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+
+ blk_finish_plug(&plug);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(blkdev_reset_zones);
@@ -328,17 +339,14 @@
if (!rep.nr_zones)
return -EINVAL;
- if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
- return -ERANGE;
+ rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
if (!zones)
return -ENOMEM;
- ret = blkdev_report_zones(bdev, rep.sector,
- zones, &rep.nr_zones,
- GFP_KERNEL);
+ ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
if (ret)
goto out;
@@ -392,3 +400,149 @@
return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
GFP_KERNEL);
}
+
+static inline unsigned long *blk_alloc_zone_bitmap(int node,
+ unsigned int nr_zones)
+{
+ return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
+ GFP_NOIO, node);
+}
+
+/*
+ * Allocate an array of struct blk_zone to get nr_zones zone information.
+ * The allocated array may be smaller than nr_zones.
+ */
+static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
+{
+ struct blk_zone *zones;
+ size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
+
+ /*
+ * GFP_KERNEL here is meaningless as the caller task context has
+ * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
+ * with memalloc_noio_save().
+ */
+ zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
+ if (!zones) {
+ *nr_zones = 0;
+ return NULL;
+ }
+
+ *nr_zones = nrz;
+
+ return zones;
+}
+
+void blk_queue_free_zone_bitmaps(struct request_queue *q)
+{
+ kfree(q->seq_zones_bitmap);
+ q->seq_zones_bitmap = NULL;
+ kfree(q->seq_zones_wlock);
+ q->seq_zones_wlock = NULL;
+}
+
+/**
+ * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
+ * @disk: Target disk
+ *
+ * Helper function for low-level device drivers to (re) allocate and initialize
+ * a disk request queue zone bitmaps. This functions should normally be called
+ * within the disk ->revalidate method. For BIO based queues, no zone bitmap
+ * is allocated.
+ */
+int blk_revalidate_disk_zones(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+ unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
+ unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
+ unsigned int i, rep_nr_zones = 0, z = 0, nrz;
+ struct blk_zone *zones = NULL;
+ unsigned int noio_flag;
+ sector_t sector = 0;
+ int ret = 0;
+
+ /*
+ * BIO based queues do not use a scheduler so only q->nr_zones
+ * needs to be updated so that the sysfs exposed value is correct.
+ */
+ if (!queue_is_mq(q)) {
+ q->nr_zones = nr_zones;
+ return 0;
+ }
+
+ /*
+ * Ensure that all memory allocations in this context are done as
+ * if GFP_NOIO was specified.
+ */
+ noio_flag = memalloc_noio_save();
+
+ if (!blk_queue_is_zoned(q) || !nr_zones) {
+ nr_zones = 0;
+ goto update;
+ }
+
+ /* Allocate bitmaps */
+ ret = -ENOMEM;
+ seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!seq_zones_wlock)
+ goto out;
+ seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!seq_zones_bitmap)
+ goto out;
+
+ /* Get zone information and initialize seq_zones_bitmap */
+ rep_nr_zones = nr_zones;
+ zones = blk_alloc_zones(&rep_nr_zones);
+ if (!zones)
+ goto out;
+
+ while (z < nr_zones) {
+ nrz = min(nr_zones - z, rep_nr_zones);
+ ret = blk_report_zones(disk, sector, zones, &nrz);
+ if (ret)
+ goto out;
+ if (!nrz)
+ break;
+ for (i = 0; i < nrz; i++) {
+ if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(z, seq_zones_bitmap);
+ z++;
+ }
+ sector += nrz * blk_queue_zone_sectors(q);
+ }
+
+ if (WARN_ON(z != nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+
+update:
+ /*
+ * Install the new bitmaps, making sure the queue is stopped and
+ * all I/Os are completed (i.e. a scheduler is not referencing the
+ * bitmaps).
+ */
+ blk_mq_freeze_queue(q);
+ q->nr_zones = nr_zones;
+ swap(q->seq_zones_wlock, seq_zones_wlock);
+ swap(q->seq_zones_bitmap, seq_zones_bitmap);
+ blk_mq_unfreeze_queue(q);
+
+out:
+ memalloc_noio_restore(noio_flag);
+
+ kvfree(zones);
+ kfree(seq_zones_wlock);
+ kfree(seq_zones_bitmap);
+
+ if (ret) {
+ pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
+ blk_mq_freeze_queue(q);
+ blk_queue_free_zone_bitmaps(q);
+ blk_mq_unfreeze_queue(q);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
+
diff --git a/block/blk.h b/block/blk.h
index 977d4b5..47fba93 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -4,13 +4,9 @@
#include <linux/idr.h>
#include <linux/blk-mq.h>
+#include <xen/xen.h>
#include "blk-mq.h"
-
-/* Amount of time in which a process may batch requests */
-#define BLK_BATCH_TIME (HZ/50UL)
-
-/* Number of requests a "batching" process may submit */
-#define BLK_BATCH_REQ 32
+#include "blk-mq-sched.h"
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -23,6 +19,7 @@
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
+ blk_status_t rq_status;
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
@@ -37,85 +34,13 @@
};
extern struct kmem_cache *blk_requestq_cachep;
-extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
+static inline struct blk_flush_queue *
+blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
-}
-
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline struct blk_flush_queue *blk_get_flush_queue(
- struct request_queue *q, struct blk_mq_ctx *ctx)
-{
- if (q->mq_ops)
- return blk_mq_map_queue(q, ctx->cpu)->fq;
- return q->fq;
+ return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
}
static inline void __blk_get_queue(struct request_queue *q)
@@ -123,19 +48,16 @@
kobject_get(&q->kobj);
}
+static inline bool
+is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
+{
+ return hctx->fq->flush_rq == req;
+}
+
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size);
+ int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
-int blk_init_rl(struct request_list *rl, struct request_queue *q,
- gfp_t gfp_mask);
-void blk_exit_rl(struct request_queue *q, struct request_list *rl);
-void blk_exit_queue(struct request_queue *q);
-void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
- struct bio *bio);
-void blk_queue_bypass_start(struct request_queue *q);
-void blk_queue_bypass_end(struct request_queue *q);
-void __blk_queue_free_tags(struct request_queue *q);
void blk_freeze_queue(struct request_queue *q);
static inline void blk_queue_enter_live(struct request_queue *q)
@@ -149,6 +71,53 @@
percpu_ref_get(&q->q_usage_counter);
}
+static inline bool biovec_phys_mergeable(struct request_queue *q,
+ struct bio_vec *vec1, struct bio_vec *vec2)
+{
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
+ phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
+
+ if (addr1 + vec1->bv_len != addr2)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
+ return false;
+ if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
+ return false;
+ return true;
+}
+
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return (offset & queue_virt_boundary(q)) ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ if (!queue_virt_boundary(q))
+ return false;
+ return __bvec_gap_to_prev(q, bprv, offset);
+}
+
+static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
+ unsigned int nr_segs)
+{
+ rq->nr_phys_segments = nr_segs;
+ rq->__data_len = bio->bi_iter.bi_size;
+ rq->bio = rq->biotail = bio;
+ rq->ioprio = bio_prio(bio);
+
+ if (bio->bi_disk)
+ rq->rq_disk = bio->bi_disk;
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
bool __bio_integrity_endio(struct bio *);
@@ -158,7 +127,38 @@
return __bio_integrity_endio(bio);
return true;
}
-#else
+
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ return false;
+}
+
static inline void blk_flush_integrity(void)
{
}
@@ -166,80 +166,47 @@
{
return true;
}
-#endif
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
-void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
-void blk_delete_timer(struct request *);
-
-bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
- struct bio *bio);
-bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
- struct bio *bio);
+bool bio_attempt_front_merge(struct request *req, struct bio *bio,
+ unsigned int nr_segs);
+bool bio_attempt_back_merge(struct request *req, struct bio *bio,
+ unsigned int nr_segs);
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int *request_count,
- struct request **same_queue_rq);
-unsigned int blk_plug_queued_count(struct request_queue *q);
+ unsigned int nr_segs, struct request **same_queue_rq);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req, u64 now);
/*
- * EH timer and IO completion will both attempt to 'grab' the request, make
- * sure that only one of them succeeds. Steal the bottom bit of the
- * __deadline field for this.
- */
-static inline int blk_mark_rq_complete(struct request *rq)
-{
- return test_and_set_bit(0, &rq->__deadline);
-}
-
-static inline void blk_clear_rq_complete(struct request *rq)
-{
- clear_bit(0, &rq->__deadline);
-}
-
-static inline bool blk_rq_is_complete(struct request *rq)
-{
- return test_bit(0, &rq->__deadline);
-}
-
-/*
* Internal elevator interface
*/
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
void blk_insert_flush(struct request *rq);
-static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->type->ops.sq.elevator_activate_req_fn)
- e->type->ops.sq.elevator_activate_req_fn(q, rq);
-}
-
-static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->type->ops.sq.elevator_deactivate_req_fn)
- e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
-}
-
-int elevator_init(struct request_queue *);
-int elevator_init_mq(struct request_queue *q);
+void elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
-void elevator_exit(struct request_queue *, struct elevator_queue *);
-int elv_register_queue(struct request_queue *q);
+void __elevator_exit(struct request_queue *, struct elevator_queue *);
+int elv_register_queue(struct request_queue *q, bool uevent);
void elv_unregister_queue(struct request_queue *q);
+static inline void elevator_exit(struct request_queue *q,
+ struct elevator_queue *e)
+{
+ lockdep_assert_held(&q->sysfs_lock);
+
+ blk_mq_sched_free_requests(q);
+ __elevator_exit(q, e);
+}
+
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
#ifdef CONFIG_FAIL_IO_TIMEOUT
@@ -254,44 +221,23 @@
}
#endif
-int ll_back_merge_fn(struct request_queue *q, struct request *req,
- struct bio *bio);
-int ll_front_merge_fn(struct request_queue *q, struct request *req,
- struct bio *bio);
+void __blk_queue_split(struct request_queue *q, struct bio **bio,
+ unsigned int *nr_segs);
+int ll_back_merge_fn(struct request *req, struct bio *bio,
+ unsigned int nr_segs);
+int ll_front_merge_fn(struct request *req, struct bio *bio,
+ unsigned int nr_segs);
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
-void blk_recalc_rq_segments(struct request *rq);
+unsigned int blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
-void blk_queue_congestion_threshold(struct request_queue *q);
-
int blk_dev_init(void);
-
-/*
- * Return the threshold (number of used requests) at which the queue is
- * considered to be congested. It include a little hysteresis to keep the
- * context switch rate down.
- */
-static inline int queue_congestion_on_threshold(struct request_queue *q)
-{
- return q->nr_congestion_on;
-}
-
-/*
- * The threshold at which a queue is considered to be uncongested
- */
-static inline int queue_congestion_off_threshold(struct request_queue *q)
-{
- return q->nr_congestion_off;
-}
-
-extern int blk_update_nr_requests(struct request_queue *, unsigned int);
-
/*
* Contribute to IO statistics IFF:
*
@@ -314,21 +260,6 @@
}
/*
- * Steal a bit from this field for legacy IO path atomic IO marking. Note that
- * setting the deadline clears the bottom bit, potentially clearing the
- * completed bit. The user has to be OK with this (current ones are fine).
- */
-static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
-{
- rq->__deadline = time & ~0x1UL;
-}
-
-static inline unsigned long blk_rq_deadline(struct request *rq)
-{
- return rq->__deadline & ~0x1UL;
-}
-
-/*
* The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
* is defined as 'unsigned int', meantime it has to aligned to with logical
* block size which is the minimum accepted unit by hardware.
@@ -350,22 +281,6 @@
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
- * rq_ioc - determine io_context for request allocation
- * @bio: request being allocated is for this bio (can be %NULL)
- *
- * Determine io_context to use for request allocation for @bio. May return
- * %NULL if %current->io_context doesn't exist.
- */
-static inline struct io_context *rq_ioc(struct bio *bio)
-{
-#ifdef CONFIG_BLK_CGROUP
- if (bio && bio->bi_ioc)
- return bio->bi_ioc;
-#endif
- return current->io_context;
-}
-
-/**
* create_io_context - try to create task->io_context
* @gfp_mask: allocation mask
* @node: allocation node
@@ -423,12 +338,18 @@
}
#endif /* CONFIG_BOUNCE */
-extern void blk_drain_queue(struct request_queue *q);
-
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q);
#else
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+void blk_queue_free_zone_bitmaps(struct request_queue *q);
+#else
+static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
+#endif
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
index abb50e7..f8ed677 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/printk.h>
#include <asm/tlbflush.h>
@@ -163,13 +163,13 @@
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, orig_vec;
- int i;
struct bvec_iter orig_iter = bio_orig->bi_iter;
+ struct bvec_iter_all iter_all;
/*
* free up bounce indirect pages used
*/
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
if (bvec->bv_page != orig_vec.bv_page) {
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
@@ -277,7 +277,8 @@
}
}
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+ blkcg_bio_issue_init(bio);
return bio;
}
@@ -312,7 +313,12 @@
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
&bounce_bio_set);
- bio_for_each_segment_all(to, bio, i) {
+ /*
+ * Bvec table can't be updated by bio_for_each_segment_all(),
+ * so retrieve bvec from the table directly. This way is safe
+ * because the 'bio' is single-page bvec.
+ */
+ for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
struct page *page = to->bv_page;
if (page_to_pfn(page) <= q->limits.bounce_pfn)
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index f3501cd..347dda1 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -1,27 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BSG helper library
*
* Copyright (C) 2008 James Smart, Emulex Corporation
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/bsg-lib.h>
@@ -31,6 +17,12 @@
#define uptr64(val) ((void __user *)(uintptr_t)(val))
+struct bsg_set {
+ struct blk_mq_tag_set tag_set;
+ bsg_job_fn *job_fn;
+ bsg_timeout_fn *timeout_fn;
+};
+
static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
{
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
@@ -45,11 +37,40 @@
fmode_t mode)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
+ int ret;
job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
+ if (IS_ERR(job->request))
+ return PTR_ERR(job->request);
- return PTR_ERR_OR_ZERO(job->request);
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
+ if (IS_ERR(job->bidi_rq)) {
+ ret = PTR_ERR(job->bidi_rq);
+ goto out;
+ }
+
+ ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
+ uptr64(hdr->din_xferp), hdr->din_xfer_len,
+ GFP_KERNEL);
+ if (ret)
+ goto out_free_bidi_rq;
+
+ job->bidi_bio = job->bidi_rq->bio;
+ } else {
+ job->bidi_rq = NULL;
+ job->bidi_bio = NULL;
+ }
+
+ return 0;
+
+out_free_bidi_rq:
+ if (job->bidi_rq)
+ blk_put_request(job->bidi_rq);
+out:
+ kfree(job->request);
+ return ret;
}
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
@@ -87,7 +108,7 @@
/* we assume all request payload was transferred, residual == 0 */
hdr->dout_resid = 0;
- if (rq->next_rq) {
+ if (job->bidi_rq) {
unsigned int rsp_len = job->reply_payload.payload_len;
if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
@@ -105,6 +126,11 @@
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
+ if (job->bidi_rq) {
+ blk_rq_unmap_user(job->bidi_bio);
+ blk_put_request(job->bidi_rq);
+ }
+
kfree(job->request);
}
@@ -129,7 +155,7 @@
kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list);
- blk_end_request_all(rq, BLK_STS_OK);
+ blk_mq_end_request(rq, BLK_STS_OK);
}
void bsg_job_put(struct bsg_job *job)
@@ -157,15 +183,15 @@
{
job->result = result;
job->reply_payload_rcv_len = reply_payload_rcv_len;
- blk_complete_request(blk_mq_rq_from_pdu(job));
+ blk_mq_complete_request(blk_mq_rq_from_pdu(job));
}
EXPORT_SYMBOL_GPL(bsg_job_done);
/**
- * bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * bsg_complete - softirq done routine for destroying the bsg requests
* @rq: BSG request that holds the job to be destroyed
*/
-static void bsg_softirq_done(struct request *rq)
+static void bsg_complete(struct request *rq)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
@@ -194,7 +220,6 @@
*/
static bool bsg_prepare_job(struct device *dev, struct request *req)
{
- struct request *rsp = req->next_rq;
struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret;
@@ -205,8 +230,8 @@
if (ret)
goto failjob_rls_job;
}
- if (rsp && rsp->bio) {
- ret = bsg_map_buffer(&job->reply_payload, rsp);
+ if (job->bidi_rq) {
+ ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
if (ret)
goto failjob_rls_rqst_payload;
}
@@ -224,54 +249,50 @@
}
/**
- * bsg_request_fn - generic handler for bsg requests
- * @q: request queue to manage
+ * bsg_queue_rq - generic handler for bsg requests
+ * @hctx: hardware queue
+ * @bd: queue data
*
* On error the create_bsg_job function should return a -Exyz error value
* that will be set to ->result.
*
* Drivers/subsys should pass this to the queue init function.
*/
-static void bsg_request_fn(struct request_queue *q)
- __releases(q->queue_lock)
- __acquires(q->queue_lock)
+static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct device *dev = q->queuedata;
- struct request *req;
+ struct request *req = bd->rq;
+ struct bsg_set *bset =
+ container_of(q->tag_set, struct bsg_set, tag_set);
+ int sts = BLK_STS_IOERR;
int ret;
+ blk_mq_start_request(req);
+
if (!get_device(dev))
- return;
+ return BLK_STS_IOERR;
- while (1) {
- req = blk_fetch_request(q);
- if (!req)
- break;
- spin_unlock_irq(q->queue_lock);
+ if (!bsg_prepare_job(dev, req))
+ goto out;
- if (!bsg_prepare_job(dev, req)) {
- blk_end_request_all(req, BLK_STS_OK);
- spin_lock_irq(q->queue_lock);
- continue;
- }
+ ret = bset->job_fn(blk_mq_rq_to_pdu(req));
+ if (!ret)
+ sts = BLK_STS_OK;
- ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
- spin_lock_irq(q->queue_lock);
- if (ret)
- break;
- }
-
- spin_unlock_irq(q->queue_lock);
+out:
put_device(dev);
- spin_lock_irq(q->queue_lock);
+ return sts;
}
/* called right after the request is allocated for the request_queue */
-static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
+static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
- job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
+ job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!job->reply)
return -ENOMEM;
return 0;
@@ -289,43 +310,87 @@
job->dd_data = job + 1;
}
-static void bsg_exit_rq(struct request_queue *q, struct request *req)
+static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
kfree(job->reply);
}
+void bsg_remove_queue(struct request_queue *q)
+{
+ if (q) {
+ struct bsg_set *bset =
+ container_of(q->tag_set, struct bsg_set, tag_set);
+
+ bsg_unregister_queue(q);
+ blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&bset->tag_set);
+ kfree(bset);
+ }
+}
+EXPORT_SYMBOL_GPL(bsg_remove_queue);
+
+static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
+{
+ struct bsg_set *bset =
+ container_of(rq->q->tag_set, struct bsg_set, tag_set);
+
+ if (!bset->timeout_fn)
+ return BLK_EH_DONE;
+ return bset->timeout_fn(rq);
+}
+
+static const struct blk_mq_ops bsg_mq_ops = {
+ .queue_rq = bsg_queue_rq,
+ .init_request = bsg_init_rq,
+ .exit_request = bsg_exit_rq,
+ .initialize_rq_fn = bsg_initialize_rq,
+ .complete = bsg_complete,
+ .timeout = bsg_timeout,
+};
+
/**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
* @name: device to give bsg device
* @job_fn: bsg job handler
+ * @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, int dd_job_size)
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
{
+ struct bsg_set *bset;
+ struct blk_mq_tag_set *set;
struct request_queue *q;
- int ret;
+ int ret = -ENOMEM;
- q = blk_alloc_queue(GFP_KERNEL);
- if (!q)
+ bset = kzalloc(sizeof(*bset), GFP_KERNEL);
+ if (!bset)
return ERR_PTR(-ENOMEM);
- q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
- q->init_rq_fn = bsg_init_rq;
- q->exit_rq_fn = bsg_exit_rq;
- q->initialize_rq_fn = bsg_initialize_rq;
- q->request_fn = bsg_request_fn;
- ret = blk_init_allocated_queue(q);
- if (ret)
- goto out_cleanup_queue;
+ bset->job_fn = job_fn;
+ bset->timeout_fn = timeout;
+
+ set = &bset->tag_set;
+ set->ops = &bsg_mq_ops,
+ set->nr_hw_queues = 1;
+ set->queue_depth = 128;
+ set->numa_node = NUMA_NO_NODE;
+ set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
+ set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
+ if (blk_mq_alloc_tag_set(set))
+ goto out_tag_set;
+
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto out_queue;
+ }
q->queuedata = dev;
- q->bsg_job_fn = job_fn;
- blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, bsg_softirq_done);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
@@ -338,6 +403,10 @@
return q;
out_cleanup_queue:
blk_cleanup_queue(q);
+out_queue:
+ blk_mq_free_tag_set(set);
+out_tag_set:
+ kfree(bset);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(bsg_setup_queue);
diff --git a/block/bsg.c b/block/bsg.c
index 9a442c2..833c44b 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1,13 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bsg.c - block layer implementation of the sg v4 interface
- *
- * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
- * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License version 2. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -74,6 +67,11 @@
{
struct scsi_request *sreq = scsi_req(rq);
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
sreq->cmd_len = hdr->request_len;
if (sreq->cmd_len > BLK_MAX_CDB) {
sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
@@ -114,14 +112,10 @@
hdr->response_len = len;
}
- if (rq->next_rq) {
- hdr->dout_resid = sreq->resid_len;
- hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
- } else if (rq_data_dir(rq) == READ) {
+ if (rq_data_dir(rq) == READ)
hdr->din_resid = sreq->resid_len;
- } else {
+ else
hdr->dout_resid = sreq->resid_len;
- }
return ret;
}
@@ -138,32 +132,35 @@
.free_rq = bsg_scsi_free_rq,
};
-static struct request *
-bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
+static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
{
- struct request *rq, *next_rq = NULL;
+ struct request *rq;
+ struct bio *bio;
+ struct sg_io_v4 hdr;
int ret;
+ if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+ return -EFAULT;
+
if (!q->bsg_dev.class_dev)
- return ERR_PTR(-ENXIO);
+ return -ENXIO;
- if (hdr->guard != 'Q')
- return ERR_PTR(-EINVAL);
-
- ret = q->bsg_dev.ops->check_proto(hdr);
+ if (hdr.guard != 'Q')
+ return -EINVAL;
+ ret = q->bsg_dev.ops->check_proto(&hdr);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- rq = blk_get_request(q, hdr->dout_xfer_len ?
+ rq = blk_get_request(q, hdr.dout_xfer_len ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq))
- return rq;
+ return PTR_ERR(rq);
- ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
+ ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
if (ret)
- goto out;
+ return ret;
- rq->timeout = msecs_to_jiffies(hdr->timeout);
+ rq->timeout = msecs_to_jiffies(hdr.timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
@@ -171,64 +168,28 @@
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
- if (hdr->dout_xfer_len && hdr->din_xfer_len) {
- if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
- if (IS_ERR(next_rq)) {
- ret = PTR_ERR(next_rq);
- goto out;
- }
-
- rq->next_rq = next_rq;
- ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
- hdr->din_xfer_len, GFP_KERNEL);
- if (ret)
- goto out_free_nextrq;
- }
-
- if (hdr->dout_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
- hdr->dout_xfer_len, GFP_KERNEL);
- } else if (hdr->din_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
- hdr->din_xfer_len, GFP_KERNEL);
+ if (hdr.dout_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
+ hdr.dout_xfer_len, GFP_KERNEL);
+ } else if (hdr.din_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
+ hdr.din_xfer_len, GFP_KERNEL);
}
if (ret)
- goto out_unmap_nextrq;
- return rq;
+ goto out_free_rq;
-out_unmap_nextrq:
- if (rq->next_rq)
- blk_rq_unmap_user(rq->next_rq->bio);
-out_free_nextrq:
- if (rq->next_rq)
- blk_put_request(rq->next_rq);
-out:
- q->bsg_dev.ops->free_rq(rq);
- blk_put_request(rq);
- return ERR_PTR(ret);
-}
+ bio = rq->bio;
-static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
- struct bio *bio, struct bio *bidi_bio)
-{
- int ret;
-
- ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
-
- if (rq->next_rq) {
- blk_rq_unmap_user(bidi_bio);
- blk_put_request(rq->next_rq);
- }
-
+ blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
+ ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
blk_rq_unmap_user(bio);
+
+out_free_rq:
rq->q->bsg_dev.ops->free_rq(rq);
blk_put_request(rq);
+ if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
+ return -EFAULT;
return ret;
}
@@ -363,31 +324,39 @@
return bsg_put_device(bd);
}
+static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
+{
+ return put_user(bd->max_queue, uarg);
+}
+
+static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
+{
+ int queue;
+
+ if (get_user(queue, uarg))
+ return -EFAULT;
+ if (queue < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&bd->lock);
+ bd->max_queue = queue;
+ spin_unlock_irq(&bd->lock);
+ return 0;
+}
+
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
- int __user *uarg = (int __user *) arg;
- int ret;
+ void __user *uarg = (void __user *) arg;
switch (cmd) {
- /*
- * our own ioctls
- */
+ /*
+ * Our own ioctls
+ */
case SG_GET_COMMAND_Q:
- return put_user(bd->max_queue, uarg);
- case SG_SET_COMMAND_Q: {
- int queue;
-
- if (get_user(queue, uarg))
- return -EFAULT;
- if (queue < 1)
- return -EINVAL;
-
- spin_lock_irq(&bd->lock);
- bd->max_queue = queue;
- spin_unlock_irq(&bd->lock);
- return 0;
- }
+ return bsg_get_command_q(bd, uarg);
+ case SG_SET_COMMAND_Q:
+ return bsg_set_command_q(bd, uarg);
/*
* SCSI/sg ioctls
@@ -400,36 +369,10 @@
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
- case SCSI_IOCTL_SEND_COMMAND: {
- void __user *uarg = (void __user *) arg;
+ case SCSI_IOCTL_SEND_COMMAND:
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
- }
- case SG_IO: {
- struct request *rq;
- struct bio *bio, *bidi_bio = NULL;
- struct sg_io_v4 hdr;
- int at_head;
-
- if (copy_from_user(&hdr, uarg, sizeof(hdr)))
- return -EFAULT;
-
- rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- bio = rq->bio;
- if (rq->next_rq)
- bidi_bio = rq->next_rq->bio;
-
- at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
- blk_execute_rq(bd->queue, NULL, rq, at_head);
- ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
-
- if (copy_to_user(uarg, &hdr, sizeof(hdr)))
- return -EFAULT;
-
- return ret;
- }
+ case SG_IO:
+ return bsg_sg_io(bd->queue, file->f_mode, uarg);
default:
return -ENOTTY;
}
@@ -471,7 +414,7 @@
/*
* we need a proper transport to send commands, not a stacked device
*/
- if (!queue_is_rq_based(q))
+ if (!queue_is_mq(q))
return 0;
bcd = &q->bsg_dev;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
deleted file mode 100644
index 2eb8744..0000000
--- a/block/cfq-iosched.c
+++ /dev/null
@@ -1,4910 +0,0 @@
-/*
- * CFQ, or complete fairness queueing, disk scheduler.
- *
- * Based on ideas from a previously unfinished io
- * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
- *
- * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched/clock.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/ktime.h>
-#include <linux/rbtree.h>
-#include <linux/ioprio.h>
-#include <linux/blktrace_api.h>
-#include <linux/blk-cgroup.h>
-#include "blk.h"
-#include "blk-wbt.h"
-
-/*
- * tunables
- */
-/* max queue in one round of service */
-static const int cfq_quantum = 8;
-static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
-/* maximum backwards seek, in KiB */
-static const int cfq_back_max = 16 * 1024;
-/* penalty of a backwards seek */
-static const int cfq_back_penalty = 2;
-static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
-static u64 cfq_slice_async = NSEC_PER_SEC / 25;
-static const int cfq_slice_async_rq = 2;
-static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
-static u64 cfq_group_idle = NSEC_PER_SEC / 125;
-static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
-static const int cfq_hist_divisor = 4;
-
-/*
- * offset from end of queue service tree for idle class
- */
-#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
-/* offset from end of group service tree under time slice mode */
-#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
-/* offset from end of group service under IOPS mode */
-#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
-
-/*
- * below this threshold, we consider thinktime immediate
- */
-#define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ)
-
-#define CFQ_SLICE_SCALE (5)
-#define CFQ_HW_QUEUE_MIN (5)
-#define CFQ_SERVICE_SHIFT 12
-
-#define CFQQ_SEEK_THR (sector_t)(8 * 100)
-#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
-#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
-#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
-
-#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
-#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
-#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
-
-static struct kmem_cache *cfq_pool;
-
-#define CFQ_PRIO_LISTS IOPRIO_BE_NR
-#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
-#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
-
-#define sample_valid(samples) ((samples) > 80)
-#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
-
-/* blkio-related constants */
-#define CFQ_WEIGHT_LEGACY_MIN 10
-#define CFQ_WEIGHT_LEGACY_DFL 500
-#define CFQ_WEIGHT_LEGACY_MAX 1000
-
-struct cfq_ttime {
- u64 last_end_request;
-
- u64 ttime_total;
- u64 ttime_mean;
- unsigned long ttime_samples;
-};
-
-/*
- * Most of our rbtree usage is for sorting with min extraction, so
- * if we cache the leftmost node we don't have to walk down the tree
- * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
- * move this into the elevator for the rq sorting as well.
- */
-struct cfq_rb_root {
- struct rb_root_cached rb;
- struct rb_node *rb_rightmost;
- unsigned count;
- u64 min_vdisktime;
- struct cfq_ttime ttime;
-};
-#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \
- .rb_rightmost = NULL, \
- .ttime = {.last_end_request = ktime_get_ns(),},}
-
-/*
- * Per process-grouping structure
- */
-struct cfq_queue {
- /* reference count */
- int ref;
- /* various state flags, see below */
- unsigned int flags;
- /* parent cfq_data */
- struct cfq_data *cfqd;
- /* service_tree member */
- struct rb_node rb_node;
- /* service_tree key */
- u64 rb_key;
- /* prio tree member */
- struct rb_node p_node;
- /* prio tree root we belong to, if any */
- struct rb_root *p_root;
- /* sorted list of pending requests */
- struct rb_root sort_list;
- /* if fifo isn't expired, next request to serve */
- struct request *next_rq;
- /* requests queued in sort_list */
- int queued[2];
- /* currently allocated requests */
- int allocated[2];
- /* fifo list of requests in sort_list */
- struct list_head fifo;
-
- /* time when queue got scheduled in to dispatch first request. */
- u64 dispatch_start;
- u64 allocated_slice;
- u64 slice_dispatch;
- /* time when first request from queue completed and slice started. */
- u64 slice_start;
- u64 slice_end;
- s64 slice_resid;
-
- /* pending priority requests */
- int prio_pending;
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
-
- /* io prio of this group */
- unsigned short ioprio, org_ioprio;
- unsigned short ioprio_class, org_ioprio_class;
-
- pid_t pid;
-
- u32 seek_history;
- sector_t last_request_pos;
-
- struct cfq_rb_root *service_tree;
- struct cfq_queue *new_cfqq;
- struct cfq_group *cfqg;
- /* Number of sectors dispatched from queue in single dispatch round */
- unsigned long nr_sectors;
-};
-
-/*
- * First index in the service_trees.
- * IDLE is handled separately, so it has negative index
- */
-enum wl_class_t {
- BE_WORKLOAD = 0,
- RT_WORKLOAD = 1,
- IDLE_WORKLOAD = 2,
- CFQ_PRIO_NR,
-};
-
-/*
- * Second index in the service_trees.
- */
-enum wl_type_t {
- ASYNC_WORKLOAD = 0,
- SYNC_NOIDLE_WORKLOAD = 1,
- SYNC_WORKLOAD = 2
-};
-
-struct cfqg_stats {
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* number of ios merged */
- struct blkg_rwstat merged;
- /* total time spent on device in ns, may not be accurate w/ queueing */
- struct blkg_rwstat service_time;
- /* total time spent waiting in scheduler queue in ns */
- struct blkg_rwstat wait_time;
- /* number of IOs queued up */
- struct blkg_rwstat queued;
- /* total disk time and nr sectors dispatched by this group */
- struct blkg_stat time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* time not charged to this cgroup */
- struct blkg_stat unaccounted_time;
- /* sum of number of ios queued across all samples */
- struct blkg_stat avg_queue_size_sum;
- /* count of samples taken for average */
- struct blkg_stat avg_queue_size_samples;
- /* how many times this group has been removed from service tree */
- struct blkg_stat dequeue;
- /* total time spent waiting for it to be assigned a timeslice. */
- struct blkg_stat group_wait_time;
- /* time spent idling for this blkcg_gq */
- struct blkg_stat idle_time;
- /* total time with empty current active q with other requests queued */
- struct blkg_stat empty_time;
- /* fields after this shouldn't be cleared on stat reset */
- u64 start_group_wait_time;
- u64 start_idle_time;
- u64 start_empty_time;
- uint16_t flags;
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
-#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-};
-
-/* Per-cgroup data */
-struct cfq_group_data {
- /* must be the first member */
- struct blkcg_policy_data cpd;
-
- unsigned int weight;
- unsigned int leaf_weight;
-};
-
-/* This is per cgroup per device grouping structure */
-struct cfq_group {
- /* must be the first member */
- struct blkg_policy_data pd;
-
- /* group service_tree member */
- struct rb_node rb_node;
-
- /* group service_tree key */
- u64 vdisktime;
-
- /*
- * The number of active cfqgs and sum of their weights under this
- * cfqg. This covers this cfqg's leaf_weight and all children's
- * weights, but does not cover weights of further descendants.
- *
- * If a cfqg is on the service tree, it's active. An active cfqg
- * also activates its parent and contributes to the children_weight
- * of the parent.
- */
- int nr_active;
- unsigned int children_weight;
-
- /*
- * vfraction is the fraction of vdisktime that the tasks in this
- * cfqg are entitled to. This is determined by compounding the
- * ratios walking up from this cfqg to the root.
- *
- * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
- * vfractions on a service tree is approximately 1. The sum may
- * deviate a bit due to rounding errors and fluctuations caused by
- * cfqgs entering and leaving the service tree.
- */
- unsigned int vfraction;
-
- /*
- * There are two weights - (internal) weight is the weight of this
- * cfqg against the sibling cfqgs. leaf_weight is the wight of
- * this cfqg against the child cfqgs. For the root cfqg, both
- * weights are kept in sync for backward compatibility.
- */
- unsigned int weight;
- unsigned int new_weight;
- unsigned int dev_weight;
-
- unsigned int leaf_weight;
- unsigned int new_leaf_weight;
- unsigned int dev_leaf_weight;
-
- /* number of cfqq currently on this group */
- int nr_cfqq;
-
- /*
- * Per group busy queues average. Useful for workload slice calc. We
- * create the array for each prio class but at run time it is used
- * only for RT and BE class and slot for IDLE class remains unused.
- * This is primarily done to avoid confusion and a gcc warning.
- */
- unsigned int busy_queues_avg[CFQ_PRIO_NR];
- /*
- * rr lists of queues with requests. We maintain service trees for
- * RT and BE classes. These trees are subdivided in subclasses
- * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
- * class there is no subclassification and all the cfq queues go on
- * a single tree service_tree_idle.
- * Counts are embedded in the cfq_rb_root
- */
- struct cfq_rb_root service_trees[2][3];
- struct cfq_rb_root service_tree_idle;
-
- u64 saved_wl_slice;
- enum wl_type_t saved_wl_type;
- enum wl_class_t saved_wl_class;
-
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
- struct cfq_ttime ttime;
- struct cfqg_stats stats; /* stats for this cfqg */
-
- /* async queue for each priority case */
- struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
- struct cfq_queue *async_idle_cfqq;
-
-};
-
-struct cfq_io_cq {
- struct io_cq icq; /* must be the first member */
- struct cfq_queue *cfqq[2];
- struct cfq_ttime ttime;
- int ioprio; /* the current ioprio */
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- uint64_t blkcg_serial_nr; /* the current blkcg serial */
-#endif
-};
-
-/*
- * Per block device queue structure
- */
-struct cfq_data {
- struct request_queue *queue;
- /* Root service tree for cfq_groups */
- struct cfq_rb_root grp_service_tree;
- struct cfq_group *root_group;
-
- /*
- * The priority currently being served
- */
- enum wl_class_t serving_wl_class;
- enum wl_type_t serving_wl_type;
- u64 workload_expires;
- struct cfq_group *serving_group;
-
- /*
- * Each priority tree is sorted by next_request position. These
- * trees are used when determining if two or more queues are
- * interleaving requests (see cfq_close_cooperator).
- */
- struct rb_root prio_trees[CFQ_PRIO_LISTS];
-
- unsigned int busy_queues;
- unsigned int busy_sync_queues;
-
- int rq_in_driver;
- int rq_in_flight[2];
-
- /*
- * queue-depth detection
- */
- int rq_queued;
- int hw_tag;
- /*
- * hw_tag can be
- * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
- * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
- * 0 => no NCQ
- */
- int hw_tag_est_depth;
- unsigned int hw_tag_samples;
-
- /*
- * idle window management
- */
- struct hrtimer idle_slice_timer;
- struct work_struct unplug_work;
-
- struct cfq_queue *active_queue;
- struct cfq_io_cq *active_cic;
-
- sector_t last_position;
-
- /*
- * tunables, see top of file
- */
- unsigned int cfq_quantum;
- unsigned int cfq_back_penalty;
- unsigned int cfq_back_max;
- unsigned int cfq_slice_async_rq;
- unsigned int cfq_latency;
- u64 cfq_fifo_expire[2];
- u64 cfq_slice[2];
- u64 cfq_slice_idle;
- u64 cfq_group_idle;
- u64 cfq_target_latency;
-
- /*
- * Fallback dummy cfqq for extreme OOM conditions
- */
- struct cfq_queue oom_cfqq;
-
- u64 last_delayed_sync;
-};
-
-static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
-static void cfq_put_queue(struct cfq_queue *cfqq);
-
-static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
- enum wl_class_t class,
- enum wl_type_t type)
-{
- if (!cfqg)
- return NULL;
-
- if (class == IDLE_WORKLOAD)
- return &cfqg->service_tree_idle;
-
- return &cfqg->service_trees[class][type];
-}
-
-enum cfqq_state_flags {
- CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
- CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
- CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
- CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
- CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
- CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
- CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
- CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
- CFQ_CFQQ_FLAG_sync, /* synchronous queue */
- CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
- CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
- CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
- CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
-};
-
-#define CFQ_CFQQ_FNS(name) \
-static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
-{ \
- (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
-} \
-static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
-{ \
- (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
-} \
-static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
-{ \
- return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
-}
-
-CFQ_CFQQ_FNS(on_rr);
-CFQ_CFQQ_FNS(wait_request);
-CFQ_CFQQ_FNS(must_dispatch);
-CFQ_CFQQ_FNS(must_alloc_slice);
-CFQ_CFQQ_FNS(fifo_expire);
-CFQ_CFQQ_FNS(idle_window);
-CFQ_CFQQ_FNS(prio_changed);
-CFQ_CFQQ_FNS(slice_new);
-CFQ_CFQQ_FNS(sync);
-CFQ_CFQQ_FNS(coop);
-CFQ_CFQQ_FNS(split_coop);
-CFQ_CFQQ_FNS(deep);
-CFQ_CFQQ_FNS(wait_busy);
-#undef CFQ_CFQQ_FNS
-
-#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
-
-/* cfqg stats flags */
-enum cfqg_stats_flags {
- CFQG_stats_waiting = 0,
- CFQG_stats_idling,
- CFQG_stats_empty,
-};
-
-#define CFQG_FLAG_FNS(name) \
-static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
-{ \
- stats->flags |= (1 << CFQG_stats_##name); \
-} \
-static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
-{ \
- stats->flags &= ~(1 << CFQG_stats_##name); \
-} \
-static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
-{ \
- return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
-} \
-
-CFQG_FLAG_FNS(waiting)
-CFQG_FLAG_FNS(idling)
-CFQG_FLAG_FNS(empty)
-#undef CFQG_FLAG_FNS
-
-/* This should be called with the queue_lock held. */
-static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
-{
- u64 now;
-
- if (!cfqg_stats_waiting(stats))
- return;
-
- now = ktime_get_ns();
- if (now > stats->start_group_wait_time)
- blkg_stat_add(&stats->group_wait_time,
- now - stats->start_group_wait_time);
- cfqg_stats_clear_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- if (cfqg_stats_waiting(stats))
- return;
- if (cfqg == curr_cfqg)
- return;
- stats->start_group_wait_time = ktime_get_ns();
- cfqg_stats_mark_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
-{
- u64 now;
-
- if (!cfqg_stats_empty(stats))
- return;
-
- now = ktime_get_ns();
- if (now > stats->start_empty_time)
- blkg_stat_add(&stats->empty_time,
- now - stats->start_empty_time);
- cfqg_stats_clear_empty(stats);
-}
-
-static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
-{
- blkg_stat_add(&cfqg->stats.dequeue, 1);
-}
-
-static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- if (blkg_rwstat_total(&stats->queued))
- return;
-
- /*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
- */
- if (cfqg_stats_empty(stats))
- return;
-
- stats->start_empty_time = ktime_get_ns();
- cfqg_stats_mark_empty(stats);
-}
-
-static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- if (cfqg_stats_idling(stats)) {
- u64 now = ktime_get_ns();
-
- if (now > stats->start_idle_time)
- blkg_stat_add(&stats->idle_time,
- now - stats->start_idle_time);
- cfqg_stats_clear_idling(stats);
- }
-}
-
-static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- BUG_ON(cfqg_stats_idling(stats));
-
- stats->start_idle_time = ktime_get_ns();
- cfqg_stats_mark_idling(stats);
-}
-
-static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- blkg_stat_add(&stats->avg_queue_size_sum,
- blkg_rwstat_total(&stats->queued));
- blkg_stat_add(&stats->avg_queue_size_samples, 1);
- cfqg_stats_update_group_wait_time(stats);
-}
-
-#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
-
-static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
-static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
-static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
-
-#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-
-static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
-{
- return pd ? container_of(pd, struct cfq_group, pd) : NULL;
-}
-
-static struct cfq_group_data
-*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
-{
- return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
-}
-
-static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
-{
- return pd_to_blkg(&cfqg->pd);
-}
-
-static struct blkcg_policy blkcg_policy_cfq;
-
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
- return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
-static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
-{
- return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
-}
-
-static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
-{
- struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
-
- return pblkg ? blkg_to_cfqg(pblkg) : NULL;
-}
-
-static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
- struct cfq_group *ancestor)
-{
- return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
- cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
-}
-
-static inline void cfqg_get(struct cfq_group *cfqg)
-{
- return blkg_get(cfqg_to_blkg(cfqg));
-}
-
-static inline void cfqg_put(struct cfq_group *cfqg)
-{
- return blkg_put(cfqg_to_blkg(cfqg));
-}
-
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
- blk_add_cgroup_trace_msg((cfqd)->queue, \
- cfqg_to_blkg((cfqq)->cfqg)->blkcg, \
- "cfq%d%c%c " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
- ##args); \
-} while (0)
-
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
- blk_add_cgroup_trace_msg((cfqd)->queue, \
- cfqg_to_blkg(cfqg)->blkcg, fmt, ##args); \
-} while (0)
-
-static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg,
- unsigned int op)
-{
- blkg_rwstat_add(&cfqg->stats.queued, op, 1);
- cfqg_stats_end_empty_time(&cfqg->stats);
- cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
-}
-
-static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
- uint64_t time, unsigned long unaccounted_time)
-{
- blkg_stat_add(&cfqg->stats.time, time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
-#endif
-}
-
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
- unsigned int op)
-{
- blkg_rwstat_add(&cfqg->stats.queued, op, -1);
-}
-
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
- unsigned int op)
-{
- blkg_rwstat_add(&cfqg->stats.merged, op, 1);
-}
-
-static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- u64 start_time_ns,
- u64 io_start_time_ns,
- unsigned int op)
-{
- struct cfqg_stats *stats = &cfqg->stats;
- u64 now = ktime_get_ns();
-
- if (now > io_start_time_ns)
- blkg_rwstat_add(&stats->service_time, op,
- now - io_start_time_ns);
- if (io_start_time_ns > start_time_ns)
- blkg_rwstat_add(&stats->wait_time, op,
- io_start_time_ns - start_time_ns);
-}
-
-/* @stats = 0 */
-static void cfqg_stats_reset(struct cfqg_stats *stats)
-{
- /* queued stats shouldn't be cleared */
- blkg_rwstat_reset(&stats->merged);
- blkg_rwstat_reset(&stats->service_time);
- blkg_rwstat_reset(&stats->wait_time);
- blkg_stat_reset(&stats->time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_reset(&stats->unaccounted_time);
- blkg_stat_reset(&stats->avg_queue_size_sum);
- blkg_stat_reset(&stats->avg_queue_size_samples);
- blkg_stat_reset(&stats->dequeue);
- blkg_stat_reset(&stats->group_wait_time);
- blkg_stat_reset(&stats->idle_time);
- blkg_stat_reset(&stats->empty_time);
-#endif
-}
-
-/* @to += @from */
-static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
-{
- /* queued stats shouldn't be cleared */
- blkg_rwstat_add_aux(&to->merged, &from->merged);
- blkg_rwstat_add_aux(&to->service_time, &from->service_time);
- blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
- blkg_stat_add_aux(&from->time, &from->time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
- blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
- blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
- blkg_stat_add_aux(&to->dequeue, &from->dequeue);
- blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
- blkg_stat_add_aux(&to->idle_time, &from->idle_time);
- blkg_stat_add_aux(&to->empty_time, &from->empty_time);
-#endif
-}
-
-/*
- * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
- * recursive stats can still account for the amount used by this cfqg after
- * it's gone.
- */
-static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
-{
- struct cfq_group *parent = cfqg_parent(cfqg);
-
- lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
-
- if (unlikely(!parent))
- return;
-
- cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
- cfqg_stats_reset(&cfqg->stats);
-}
-
-#else /* CONFIG_CFQ_GROUP_IOSCHED */
-
-static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
-static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
- struct cfq_group *ancestor)
-{
- return true;
-}
-static inline void cfqg_get(struct cfq_group *cfqg) { }
-static inline void cfqg_put(struct cfq_group *cfqg) { }
-
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
- ##args)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
-
-static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, unsigned int op) { }
-static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
- uint64_t time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
- unsigned int op) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
- unsigned int op) { }
-static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- u64 start_time_ns,
- u64 io_start_time_ns,
- unsigned int op) { }
-
-#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-
-#define cfq_log(cfqd, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
-
-/* Traverses through cfq group service trees */
-#define for_each_cfqg_st(cfqg, i, j, st) \
- for (i = 0; i <= IDLE_WORKLOAD; i++) \
- for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
- : &cfqg->service_tree_idle; \
- (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
- (i == IDLE_WORKLOAD && j == 0); \
- j++, st = i < IDLE_WORKLOAD ? \
- &cfqg->service_trees[i][j]: NULL) \
-
-static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
- struct cfq_ttime *ttime, bool group_idle)
-{
- u64 slice;
- if (!sample_valid(ttime->ttime_samples))
- return false;
- if (group_idle)
- slice = cfqd->cfq_group_idle;
- else
- slice = cfqd->cfq_slice_idle;
- return ttime->ttime_mean > slice;
-}
-
-static inline bool iops_mode(struct cfq_data *cfqd)
-{
- /*
- * If we are not idling on queues and it is a NCQ drive, parallel
- * execution of requests is on and measuring time is not possible
- * in most of the cases until and unless we drive shallower queue
- * depths and that becomes a performance bottleneck. In such cases
- * switch to start providing fairness in terms of number of IOs.
- */
- if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
- return true;
- else
- return false;
-}
-
-static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
-{
- if (cfq_class_idle(cfqq))
- return IDLE_WORKLOAD;
- if (cfq_class_rt(cfqq))
- return RT_WORKLOAD;
- return BE_WORKLOAD;
-}
-
-
-static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
-{
- if (!cfq_cfqq_sync(cfqq))
- return ASYNC_WORKLOAD;
- if (!cfq_cfqq_idle_window(cfqq))
- return SYNC_NOIDLE_WORKLOAD;
- return SYNC_WORKLOAD;
-}
-
-static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
- struct cfq_data *cfqd,
- struct cfq_group *cfqg)
-{
- if (wl_class == IDLE_WORKLOAD)
- return cfqg->service_tree_idle.count;
-
- return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
- cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
- cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
-}
-
-static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
- struct cfq_group *cfqg)
-{
- return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
- cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
-}
-
-static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
- struct cfq_io_cq *cic, struct bio *bio);
-
-static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
-{
- /* cic->icq is the first member, %NULL will convert to %NULL */
- return container_of(icq, struct cfq_io_cq, icq);
-}
-
-static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
- struct io_context *ioc)
-{
- if (ioc)
- return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
- return NULL;
-}
-
-static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
-{
- return cic->cfqq[is_sync];
-}
-
-static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
- bool is_sync)
-{
- cic->cfqq[is_sync] = cfqq;
-}
-
-static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
-{
- return cic->icq.q->elevator->elevator_data;
-}
-
-/*
- * scheduler run of queue, if there are requests pending and no one in the
- * driver that will restart queueing
- */
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
-{
- if (cfqd->busy_queues) {
- cfq_log(cfqd, "schedule dispatch");
- kblockd_schedule_work(&cfqd->unplug_work);
- }
-}
-
-/*
- * Scale schedule slice based on io priority. Use the sync time slice only
- * if a queue is marked sync and has sync io queued. A sync queue with async
- * io only, should not get full sync slice length.
- */
-static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
- unsigned short prio)
-{
- u64 base_slice = cfqd->cfq_slice[sync];
- u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
-
- WARN_ON(prio >= IOPRIO_BE_NR);
-
- return base_slice + (slice * (4 - prio));
-}
-
-static inline u64
-cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
-}
-
-/**
- * cfqg_scale_charge - scale disk time charge according to cfqg weight
- * @charge: disk time being charged
- * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
- *
- * Scale @charge according to @vfraction, which is in range (0, 1]. The
- * scaling is inversely proportional.
- *
- * scaled = charge / vfraction
- *
- * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
- */
-static inline u64 cfqg_scale_charge(u64 charge,
- unsigned int vfraction)
-{
- u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
-
- /* charge / vfraction */
- c <<= CFQ_SERVICE_SHIFT;
- return div_u64(c, vfraction);
-}
-
-static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
-{
- s64 delta = (s64)(vdisktime - min_vdisktime);
- if (delta > 0)
- min_vdisktime = vdisktime;
-
- return min_vdisktime;
-}
-
-static void update_min_vdisktime(struct cfq_rb_root *st)
-{
- if (!RB_EMPTY_ROOT(&st->rb.rb_root)) {
- struct cfq_group *cfqg = rb_entry_cfqg(st->rb.rb_leftmost);
-
- st->min_vdisktime = max_vdisktime(st->min_vdisktime,
- cfqg->vdisktime);
- }
-}
-
-/*
- * get averaged number of queues of RT/BE priority.
- * average is updated, with a formula that gives more weight to higher numbers,
- * to quickly follows sudden increases and decrease slowly
- */
-
-static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
- struct cfq_group *cfqg, bool rt)
-{
- unsigned min_q, max_q;
- unsigned mult = cfq_hist_divisor - 1;
- unsigned round = cfq_hist_divisor / 2;
- unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
-
- min_q = min(cfqg->busy_queues_avg[rt], busy);
- max_q = max(cfqg->busy_queues_avg[rt], busy);
- cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
- cfq_hist_divisor;
- return cfqg->busy_queues_avg[rt];
-}
-
-static inline u64
-cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
-}
-
-static inline u64
-cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- u64 slice = cfq_prio_to_slice(cfqd, cfqq);
- if (cfqd->cfq_latency) {
- /*
- * interested queues (we consider only the ones with the same
- * priority class in the cfq group)
- */
- unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
- cfq_class_rt(cfqq));
- u64 sync_slice = cfqd->cfq_slice[1];
- u64 expect_latency = sync_slice * iq;
- u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
-
- if (expect_latency > group_slice) {
- u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
- u64 low_slice;
-
- /* scale low_slice according to IO priority
- * and sync vs async */
- low_slice = div64_u64(base_low_slice*slice, sync_slice);
- low_slice = min(slice, low_slice);
- /* the adapted slice value is scaled to fit all iqs
- * into the target latency */
- slice = div64_u64(slice*group_slice, expect_latency);
- slice = max(slice, low_slice);
- }
- }
- return slice;
-}
-
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
- u64 now = ktime_get_ns();
-
- cfqq->slice_start = now;
- cfqq->slice_end = now + slice;
- cfqq->allocated_slice = slice;
- cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
-}
-
-/*
- * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
- * isn't valid until the first request from the dispatch is activated
- * and the slice time set.
- */
-static inline bool cfq_slice_used(struct cfq_queue *cfqq)
-{
- if (cfq_cfqq_slice_new(cfqq))
- return false;
- if (ktime_get_ns() < cfqq->slice_end)
- return false;
-
- return true;
-}
-
-/*
- * Lifted from AS - choose which of rq1 and rq2 that is best served now.
- * We choose the request that is closest to the head right now. Distance
- * behind the head is penalized and only allowed to a certain extent.
- */
-static struct request *
-cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
-{
- sector_t s1, s2, d1 = 0, d2 = 0;
- unsigned long back_max;
-#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
-#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
- unsigned wrap = 0; /* bit mask: requests behind the disk head? */
-
- if (rq1 == NULL || rq1 == rq2)
- return rq2;
- if (rq2 == NULL)
- return rq1;
-
- if (rq_is_sync(rq1) != rq_is_sync(rq2))
- return rq_is_sync(rq1) ? rq1 : rq2;
-
- if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
- return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
-
- s1 = blk_rq_pos(rq1);
- s2 = blk_rq_pos(rq2);
-
- /*
- * by definition, 1KiB is 2 sectors
- */
- back_max = cfqd->cfq_back_max * 2;
-
- /*
- * Strict one way elevator _except_ in the case where we allow
- * short backward seeks which are biased as twice the cost of a
- * similar forward seek.
- */
- if (s1 >= last)
- d1 = s1 - last;
- else if (s1 + back_max >= last)
- d1 = (last - s1) * cfqd->cfq_back_penalty;
- else
- wrap |= CFQ_RQ1_WRAP;
-
- if (s2 >= last)
- d2 = s2 - last;
- else if (s2 + back_max >= last)
- d2 = (last - s2) * cfqd->cfq_back_penalty;
- else
- wrap |= CFQ_RQ2_WRAP;
-
- /* Found required data */
-
- /*
- * By doing switch() on the bit mask "wrap" we avoid having to
- * check two variables for all permutations: --> faster!
- */
- switch (wrap) {
- case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
- if (d1 < d2)
- return rq1;
- else if (d2 < d1)
- return rq2;
- else {
- if (s1 >= s2)
- return rq1;
- else
- return rq2;
- }
-
- case CFQ_RQ2_WRAP:
- return rq1;
- case CFQ_RQ1_WRAP:
- return rq2;
- case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
- default:
- /*
- * Since both rqs are wrapped,
- * start with the one that's further behind head
- * (--> only *one* back seek required),
- * since back seek takes more time than forward.
- */
- if (s1 <= s2)
- return rq1;
- else
- return rq2;
- }
-}
-
-static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
-{
- /* Service tree is empty */
- if (!root->count)
- return NULL;
-
- return rb_entry(rb_first_cached(&root->rb), struct cfq_queue, rb_node);
-}
-
-static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
-{
- return rb_entry_cfqg(rb_first_cached(&root->rb));
-}
-
-static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
-{
- if (root->rb_rightmost == n)
- root->rb_rightmost = rb_prev(n);
-
- rb_erase_cached(n, &root->rb);
- RB_CLEAR_NODE(n);
-
- --root->count;
-}
-
-/*
- * would be nice to take fifo expire time into account as well
- */
-static struct request *
-cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *last)
-{
- struct rb_node *rbnext = rb_next(&last->rb_node);
- struct rb_node *rbprev = rb_prev(&last->rb_node);
- struct request *next = NULL, *prev = NULL;
-
- BUG_ON(RB_EMPTY_NODE(&last->rb_node));
-
- if (rbprev)
- prev = rb_entry_rq(rbprev);
-
- if (rbnext)
- next = rb_entry_rq(rbnext);
- else {
- rbnext = rb_first(&cfqq->sort_list);
- if (rbnext && rbnext != &last->rb_node)
- next = rb_entry_rq(rbnext);
- }
-
- return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
-}
-
-static u64 cfq_slice_offset(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- /*
- * just an approximation, should be ok.
- */
- return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
- cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
-}
-
-static inline s64
-cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- return cfqg->vdisktime - st->min_vdisktime;
-}
-
-static void
-__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- struct rb_node **node = &st->rb.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct cfq_group *__cfqg;
- s64 key = cfqg_key(st, cfqg);
- bool leftmost = true, rightmost = true;
-
- while (*node != NULL) {
- parent = *node;
- __cfqg = rb_entry_cfqg(parent);
-
- if (key < cfqg_key(st, __cfqg)) {
- node = &parent->rb_left;
- rightmost = false;
- } else {
- node = &parent->rb_right;
- leftmost = false;
- }
- }
-
- if (rightmost)
- st->rb_rightmost = &cfqg->rb_node;
-
- rb_link_node(&cfqg->rb_node, parent, node);
- rb_insert_color_cached(&cfqg->rb_node, &st->rb, leftmost);
-}
-
-/*
- * This has to be called only on activation of cfqg
- */
-static void
-cfq_update_group_weight(struct cfq_group *cfqg)
-{
- if (cfqg->new_weight) {
- cfqg->weight = cfqg->new_weight;
- cfqg->new_weight = 0;
- }
-}
-
-static void
-cfq_update_group_leaf_weight(struct cfq_group *cfqg)
-{
- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
- if (cfqg->new_leaf_weight) {
- cfqg->leaf_weight = cfqg->new_leaf_weight;
- cfqg->new_leaf_weight = 0;
- }
-}
-
-static void
-cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
- struct cfq_group *pos = cfqg;
- struct cfq_group *parent;
- bool propagate;
-
- /* add to the service tree */
- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
- /*
- * Update leaf_weight. We cannot update weight at this point
- * because cfqg might already have been activated and is
- * contributing its current weight to the parent's child_weight.
- */
- cfq_update_group_leaf_weight(cfqg);
- __cfq_group_service_tree_add(st, cfqg);
-
- /*
- * Activate @cfqg and calculate the portion of vfraction @cfqg is
- * entitled to. vfraction is calculated by walking the tree
- * towards the root calculating the fraction it has at each level.
- * The compounded ratio is how much vfraction @cfqg owns.
- *
- * Start with the proportion tasks in this cfqg has against active
- * children cfqgs - its leaf_weight against children_weight.
- */
- propagate = !pos->nr_active++;
- pos->children_weight += pos->leaf_weight;
- vfr = vfr * pos->leaf_weight / pos->children_weight;
-
- /*
- * Compound ->weight walking up the tree. Both activation and
- * vfraction calculation are done in the same loop. Propagation
- * stops once an already activated node is met. vfraction
- * calculation should always continue to the root.
- */
- while ((parent = cfqg_parent(pos))) {
- if (propagate) {
- cfq_update_group_weight(pos);
- propagate = !parent->nr_active++;
- parent->children_weight += pos->weight;
- }
- vfr = vfr * pos->weight / parent->children_weight;
- pos = parent;
- }
-
- cfqg->vfraction = max_t(unsigned, vfr, 1);
-}
-
-static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
-{
- if (!iops_mode(cfqd))
- return CFQ_SLICE_MODE_GROUP_DELAY;
- else
- return CFQ_IOPS_MODE_GROUP_DELAY;
-}
-
-static void
-cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
- struct cfq_group *__cfqg;
- struct rb_node *n;
-
- cfqg->nr_cfqq++;
- if (!RB_EMPTY_NODE(&cfqg->rb_node))
- return;
-
- /*
- * Currently put the group at the end. Later implement something
- * so that groups get lesser vtime based on their weights, so that
- * if group does not loose all if it was not continuously backlogged.
- */
- n = st->rb_rightmost;
- if (n) {
- __cfqg = rb_entry_cfqg(n);
- cfqg->vdisktime = __cfqg->vdisktime +
- cfq_get_cfqg_vdisktime_delay(cfqd);
- } else
- cfqg->vdisktime = st->min_vdisktime;
- cfq_group_service_tree_add(st, cfqg);
-}
-
-static void
-cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- struct cfq_group *pos = cfqg;
- bool propagate;
-
- /*
- * Undo activation from cfq_group_service_tree_add(). Deactivate
- * @cfqg and propagate deactivation upwards.
- */
- propagate = !--pos->nr_active;
- pos->children_weight -= pos->leaf_weight;
-
- while (propagate) {
- struct cfq_group *parent = cfqg_parent(pos);
-
- /* @pos has 0 nr_active at this point */
- WARN_ON_ONCE(pos->children_weight);
- pos->vfraction = 0;
-
- if (!parent)
- break;
-
- propagate = !--parent->nr_active;
- parent->children_weight -= pos->weight;
- pos = parent;
- }
-
- /* remove from the service tree */
- if (!RB_EMPTY_NODE(&cfqg->rb_node))
- cfq_rb_erase(&cfqg->rb_node, st);
-}
-
-static void
-cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
-
- BUG_ON(cfqg->nr_cfqq < 1);
- cfqg->nr_cfqq--;
-
- /* If there are other cfq queues under this group, don't delete it */
- if (cfqg->nr_cfqq)
- return;
-
- cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
- cfq_group_service_tree_del(st, cfqg);
- cfqg->saved_wl_slice = 0;
- cfqg_stats_update_dequeue(cfqg);
-}
-
-static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
- u64 *unaccounted_time)
-{
- u64 slice_used;
- u64 now = ktime_get_ns();
-
- /*
- * Queue got expired before even a single request completed or
- * got expired immediately after first request completion.
- */
- if (!cfqq->slice_start || cfqq->slice_start == now) {
- /*
- * Also charge the seek time incurred to the group, otherwise
- * if there are mutiple queues in the group, each can dispatch
- * a single request on seeky media and cause lots of seek time
- * and group will never know it.
- */
- slice_used = max_t(u64, (now - cfqq->dispatch_start),
- jiffies_to_nsecs(1));
- } else {
- slice_used = now - cfqq->slice_start;
- if (slice_used > cfqq->allocated_slice) {
- *unaccounted_time = slice_used - cfqq->allocated_slice;
- slice_used = cfqq->allocated_slice;
- }
- if (cfqq->slice_start > cfqq->dispatch_start)
- *unaccounted_time += cfqq->slice_start -
- cfqq->dispatch_start;
- }
-
- return slice_used;
-}
-
-static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
- struct cfq_queue *cfqq)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
- u64 used_sl, charge, unaccounted_sl = 0;
- int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- - cfqg->service_tree_idle.count;
- unsigned int vfr;
- u64 now = ktime_get_ns();
-
- BUG_ON(nr_sync < 0);
- used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
-
- if (iops_mode(cfqd))
- charge = cfqq->slice_dispatch;
- else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
- charge = cfqq->allocated_slice;
-
- /*
- * Can't update vdisktime while on service tree and cfqg->vfraction
- * is valid only while on it. Cache vfr, leave the service tree,
- * update vdisktime and go back on. The re-addition to the tree
- * will also update the weights as necessary.
- */
- vfr = cfqg->vfraction;
- cfq_group_service_tree_del(st, cfqg);
- cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
- cfq_group_service_tree_add(st, cfqg);
-
- /* This group is being expired. Save the context */
- if (cfqd->workload_expires > now) {
- cfqg->saved_wl_slice = cfqd->workload_expires - now;
- cfqg->saved_wl_type = cfqd->serving_wl_type;
- cfqg->saved_wl_class = cfqd->serving_wl_class;
- } else
- cfqg->saved_wl_slice = 0;
-
- cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
- st->min_vdisktime);
- cfq_log_cfqq(cfqq->cfqd, cfqq,
- "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
- used_sl, cfqq->slice_dispatch, charge,
- iops_mode(cfqd), cfqq->nr_sectors);
- cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
- cfqg_stats_set_start_empty_time(cfqg);
-}
-
-/**
- * cfq_init_cfqg_base - initialize base part of a cfq_group
- * @cfqg: cfq_group to initialize
- *
- * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
- * is enabled or not.
- */
-static void cfq_init_cfqg_base(struct cfq_group *cfqg)
-{
- struct cfq_rb_root *st;
- int i, j;
-
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
-
- cfqg->ttime.last_end_request = ktime_get_ns();
-}
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
- bool on_dfl, bool reset_dev, bool is_leaf_weight);
-
-static void cfqg_stats_exit(struct cfqg_stats *stats)
-{
- blkg_rwstat_exit(&stats->merged);
- blkg_rwstat_exit(&stats->service_time);
- blkg_rwstat_exit(&stats->wait_time);
- blkg_rwstat_exit(&stats->queued);
- blkg_stat_exit(&stats->time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_exit(&stats->unaccounted_time);
- blkg_stat_exit(&stats->avg_queue_size_sum);
- blkg_stat_exit(&stats->avg_queue_size_samples);
- blkg_stat_exit(&stats->dequeue);
- blkg_stat_exit(&stats->group_wait_time);
- blkg_stat_exit(&stats->idle_time);
- blkg_stat_exit(&stats->empty_time);
-#endif
-}
-
-static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
-{
- if (blkg_rwstat_init(&stats->merged, gfp) ||
- blkg_rwstat_init(&stats->service_time, gfp) ||
- blkg_rwstat_init(&stats->wait_time, gfp) ||
- blkg_rwstat_init(&stats->queued, gfp) ||
- blkg_stat_init(&stats->time, gfp))
- goto err;
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
- blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
- blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
- blkg_stat_init(&stats->dequeue, gfp) ||
- blkg_stat_init(&stats->group_wait_time, gfp) ||
- blkg_stat_init(&stats->idle_time, gfp) ||
- blkg_stat_init(&stats->empty_time, gfp))
- goto err;
-#endif
- return 0;
-err:
- cfqg_stats_exit(stats);
- return -ENOMEM;
-}
-
-static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
-{
- struct cfq_group_data *cgd;
-
- cgd = kzalloc(sizeof(*cgd), gfp);
- if (!cgd)
- return NULL;
- return &cgd->cpd;
-}
-
-static void cfq_cpd_init(struct blkcg_policy_data *cpd)
-{
- struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
- unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
- CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
-
- if (cpd_to_blkcg(cpd) == &blkcg_root)
- weight *= 2;
-
- cgd->weight = weight;
- cgd->leaf_weight = weight;
-}
-
-static void cfq_cpd_free(struct blkcg_policy_data *cpd)
-{
- kfree(cpd_to_cfqgd(cpd));
-}
-
-static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
-{
- struct blkcg *blkcg = cpd_to_blkcg(cpd);
- bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
- unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
-
- if (blkcg == &blkcg_root)
- weight *= 2;
-
- WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
- WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
-}
-
-static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
-{
- struct cfq_group *cfqg;
-
- cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
- if (!cfqg)
- return NULL;
-
- cfq_init_cfqg_base(cfqg);
- if (cfqg_stats_init(&cfqg->stats, gfp)) {
- kfree(cfqg);
- return NULL;
- }
-
- return &cfqg->pd;
-}
-
-static void cfq_pd_init(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
- struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
-
- cfqg->weight = cgd->weight;
- cfqg->leaf_weight = cgd->leaf_weight;
-}
-
-static void cfq_pd_offline(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
- int i;
-
- for (i = 0; i < IOPRIO_BE_NR; i++) {
- if (cfqg->async_cfqq[0][i])
- cfq_put_queue(cfqg->async_cfqq[0][i]);
- if (cfqg->async_cfqq[1][i])
- cfq_put_queue(cfqg->async_cfqq[1][i]);
- }
-
- if (cfqg->async_idle_cfqq)
- cfq_put_queue(cfqg->async_idle_cfqq);
-
- /*
- * @blkg is going offline and will be ignored by
- * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
- * that they don't get lost. If IOs complete after this point, the
- * stats for them will be lost. Oh well...
- */
- cfqg_stats_xfer_dead(cfqg);
-}
-
-static void cfq_pd_free(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- cfqg_stats_exit(&cfqg->stats);
- return kfree(cfqg);
-}
-
-static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- cfqg_stats_reset(&cfqg->stats);
-}
-
-static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
- struct blkcg *blkcg)
-{
- struct blkcg_gq *blkg;
-
- blkg = blkg_lookup(blkcg, cfqd->queue);
- if (likely(blkg))
- return blkg_to_cfqg(blkg);
- return NULL;
-}
-
-static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
-{
- cfqq->cfqg = cfqg;
- /* cfqq reference on cfqg */
- cfqg_get(cfqg);
-}
-
-static u64 cfqg_prfill_weight_device(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- if (!cfqg->dev_weight)
- return 0;
- return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
-}
-
-static int cfqg_print_weight_device(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_weight_device, &blkcg_policy_cfq,
- 0, false);
- return 0;
-}
-
-static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- if (!cfqg->dev_leaf_weight)
- return 0;
- return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
-}
-
-static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
- 0, false);
- return 0;
-}
-
-static int cfq_print_weight(struct seq_file *sf, void *v)
-{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
- unsigned int val = 0;
-
- if (cgd)
- val = cgd->weight;
-
- seq_printf(sf, "%u\n", val);
- return 0;
-}
-
-static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
-{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
- unsigned int val = 0;
-
- if (cgd)
- val = cgd->leaf_weight;
-
- seq_printf(sf, "%u\n", val);
- return 0;
-}
-
-static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off,
- bool on_dfl, bool is_leaf_weight)
-{
- unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
- unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
- struct blkcg *blkcg = css_to_blkcg(of_css(of));
- struct blkg_conf_ctx ctx;
- struct cfq_group *cfqg;
- struct cfq_group_data *cfqgd;
- int ret;
- u64 v;
-
- ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
- if (ret)
- return ret;
-
- if (sscanf(ctx.body, "%llu", &v) == 1) {
- /* require "default" on dfl */
- ret = -ERANGE;
- if (!v && on_dfl)
- goto out_finish;
- } else if (!strcmp(strim(ctx.body), "default")) {
- v = 0;
- } else {
- ret = -EINVAL;
- goto out_finish;
- }
-
- cfqg = blkg_to_cfqg(ctx.blkg);
- cfqgd = blkcg_to_cfqgd(blkcg);
-
- ret = -ERANGE;
- if (!v || (v >= min && v <= max)) {
- if (!is_leaf_weight) {
- cfqg->dev_weight = v;
- cfqg->new_weight = v ?: cfqgd->weight;
- } else {
- cfqg->dev_leaf_weight = v;
- cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
- }
- ret = 0;
- }
-out_finish:
- blkg_conf_finish(&ctx);
- return ret ?: nbytes;
-}
-
-static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
-}
-
-static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
-}
-
-static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
- bool on_dfl, bool reset_dev, bool is_leaf_weight)
-{
- unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
- unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
- struct blkcg *blkcg = css_to_blkcg(css);
- struct blkcg_gq *blkg;
- struct cfq_group_data *cfqgd;
- int ret = 0;
-
- if (val < min || val > max)
- return -ERANGE;
-
- spin_lock_irq(&blkcg->lock);
- cfqgd = blkcg_to_cfqgd(blkcg);
- if (!cfqgd) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!is_leaf_weight)
- cfqgd->weight = val;
- else
- cfqgd->leaf_weight = val;
-
- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- struct cfq_group *cfqg = blkg_to_cfqg(blkg);
-
- if (!cfqg)
- continue;
-
- if (!is_leaf_weight) {
- if (reset_dev)
- cfqg->dev_weight = 0;
- if (!cfqg->dev_weight)
- cfqg->new_weight = cfqgd->weight;
- } else {
- if (reset_dev)
- cfqg->dev_leaf_weight = 0;
- if (!cfqg->dev_leaf_weight)
- cfqg->new_leaf_weight = cfqgd->leaf_weight;
- }
- }
-
-out:
- spin_unlock_irq(&blkcg->lock);
- return ret;
-}
-
-static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 val)
-{
- return __cfq_set_weight(css, val, false, false, false);
-}
-
-static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 val)
-{
- return __cfq_set_weight(css, val, false, false, true);
-}
-
-static int cfqg_print_stat(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
- &blkcg_policy_cfq, seq_cft(sf)->private, false);
- return 0;
-}
-
-static int cfqg_print_rwstat(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
- &blkcg_policy_cfq, seq_cft(sf)->private, true);
- return 0;
-}
-
-static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
- &blkcg_policy_cfq, off);
- return __blkg_prfill_u64(sf, pd, sum);
-}
-
-static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
- &blkcg_policy_cfq, off);
- return __blkg_prfill_rwstat(sf, pd, &sum);
-}
-
-static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
- seq_cft(sf)->private, false);
- return 0;
-}
-
-static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
- seq_cft(sf)->private, true);
- return 0;
-}
-
-static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
- int off)
-{
- u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
-
- return __blkg_prfill_u64(sf, pd, sum >> 9);
-}
-
-static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
- return 0;
-}
-
-static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes));
- u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
-
- return __blkg_prfill_u64(sf, pd, sum >> 9);
-}
-
-static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
- false);
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
- u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
- u64 v = 0;
-
- if (samples) {
- v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
- v = div64_u64(v, samples);
- }
- __blkg_prfill_u64(sf, pd, v);
- return 0;
-}
-
-/* print avg_queue_size */
-static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
- 0, false);
- return 0;
-}
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
-
-static struct cftype cfq_blkcg_legacy_files[] = {
- /* on root, weight is mapped to leaf_weight */
- {
- .name = "weight_device",
- .flags = CFTYPE_ONLY_ON_ROOT,
- .seq_show = cfqg_print_leaf_weight_device,
- .write = cfqg_set_leaf_weight_device,
- },
- {
- .name = "weight",
- .flags = CFTYPE_ONLY_ON_ROOT,
- .seq_show = cfq_print_leaf_weight,
- .write_u64 = cfq_set_leaf_weight,
- },
-
- /* no such mapping necessary for !roots */
- {
- .name = "weight_device",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cfqg_print_weight_device,
- .write = cfqg_set_weight_device,
- },
- {
- .name = "weight",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cfq_print_weight,
- .write_u64 = cfq_set_weight,
- },
-
- {
- .name = "leaf_weight_device",
- .seq_show = cfqg_print_leaf_weight_device,
- .write = cfqg_set_leaf_weight_device,
- },
- {
- .name = "leaf_weight",
- .seq_show = cfq_print_leaf_weight,
- .write_u64 = cfq_set_leaf_weight,
- },
-
- /* statistics, covers only the tasks in the cfqg */
- {
- .name = "time",
- .private = offsetof(struct cfq_group, stats.time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "sectors",
- .seq_show = cfqg_print_stat_sectors,
- },
- {
- .name = "io_service_bytes",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_bytes,
- },
- {
- .name = "io_serviced",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_ios,
- },
- {
- .name = "io_service_time",
- .private = offsetof(struct cfq_group, stats.service_time),
- .seq_show = cfqg_print_rwstat,
- },
- {
- .name = "io_wait_time",
- .private = offsetof(struct cfq_group, stats.wait_time),
- .seq_show = cfqg_print_rwstat,
- },
- {
- .name = "io_merged",
- .private = offsetof(struct cfq_group, stats.merged),
- .seq_show = cfqg_print_rwstat,
- },
- {
- .name = "io_queued",
- .private = offsetof(struct cfq_group, stats.queued),
- .seq_show = cfqg_print_rwstat,
- },
-
- /* the same statictics which cover the cfqg and its descendants */
- {
- .name = "time_recursive",
- .private = offsetof(struct cfq_group, stats.time),
- .seq_show = cfqg_print_stat_recursive,
- },
- {
- .name = "sectors_recursive",
- .seq_show = cfqg_print_stat_sectors_recursive,
- },
- {
- .name = "io_service_bytes_recursive",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_bytes_recursive,
- },
- {
- .name = "io_serviced_recursive",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_ios_recursive,
- },
- {
- .name = "io_service_time_recursive",
- .private = offsetof(struct cfq_group, stats.service_time),
- .seq_show = cfqg_print_rwstat_recursive,
- },
- {
- .name = "io_wait_time_recursive",
- .private = offsetof(struct cfq_group, stats.wait_time),
- .seq_show = cfqg_print_rwstat_recursive,
- },
- {
- .name = "io_merged_recursive",
- .private = offsetof(struct cfq_group, stats.merged),
- .seq_show = cfqg_print_rwstat_recursive,
- },
- {
- .name = "io_queued_recursive",
- .private = offsetof(struct cfq_group, stats.queued),
- .seq_show = cfqg_print_rwstat_recursive,
- },
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
- .name = "avg_queue_size",
- .seq_show = cfqg_print_avg_queue_size,
- },
- {
- .name = "group_wait_time",
- .private = offsetof(struct cfq_group, stats.group_wait_time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "idle_time",
- .private = offsetof(struct cfq_group, stats.idle_time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "empty_time",
- .private = offsetof(struct cfq_group, stats.empty_time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "dequeue",
- .private = offsetof(struct cfq_group, stats.dequeue),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "unaccounted_time",
- .private = offsetof(struct cfq_group, stats.unaccounted_time),
- .seq_show = cfqg_print_stat,
- },
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
- { } /* terminate */
-};
-
-static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
-{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
-
- seq_printf(sf, "default %u\n", cgd->weight);
- blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
- &blkcg_policy_cfq, 0, false);
- return 0;
-}
-
-static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- char *endp;
- int ret;
- u64 v;
-
- buf = strim(buf);
-
- /* "WEIGHT" or "default WEIGHT" sets the default weight */
- v = simple_strtoull(buf, &endp, 0);
- if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
- ret = __cfq_set_weight(of_css(of), v, true, false, false);
- return ret ?: nbytes;
- }
-
- /* "MAJ:MIN WEIGHT" */
- return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
-}
-
-static struct cftype cfq_blkcg_files[] = {
- {
- .name = "weight",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cfq_print_weight_on_dfl,
- .write = cfq_set_weight_on_dfl,
- },
- { } /* terminate */
-};
-
-#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
- struct blkcg *blkcg)
-{
- return cfqd->root_group;
-}
-
-static inline void
-cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
- cfqq->cfqg = cfqg;
-}
-
-#endif /* GROUP_IOSCHED */
-
-/*
- * The cfqd->service_trees holds all pending cfq_queue's that have
- * requests waiting to be processed. It is sorted in the order that
- * we will service the queues.
- */
-static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- bool add_front)
-{
- struct rb_node **p, *parent;
- struct cfq_queue *__cfqq;
- u64 rb_key;
- struct cfq_rb_root *st;
- bool leftmost = true;
- int new_cfqq = 1;
- u64 now = ktime_get_ns();
-
- st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
- if (cfq_class_idle(cfqq)) {
- rb_key = CFQ_IDLE_DELAY;
- parent = st->rb_rightmost;
- if (parent && parent != &cfqq->rb_node) {
- __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
- rb_key += __cfqq->rb_key;
- } else
- rb_key += now;
- } else if (!add_front) {
- /*
- * Get our rb key offset. Subtract any residual slice
- * value carried from last service. A negative resid
- * count indicates slice overrun, and this should position
- * the next service time further away in the tree.
- */
- rb_key = cfq_slice_offset(cfqd, cfqq) + now;
- rb_key -= cfqq->slice_resid;
- cfqq->slice_resid = 0;
- } else {
- rb_key = -NSEC_PER_SEC;
- __cfqq = cfq_rb_first(st);
- rb_key += __cfqq ? __cfqq->rb_key : now;
- }
-
- if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
- new_cfqq = 0;
- /*
- * same position, nothing more to do
- */
- if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
- return;
-
- cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
- cfqq->service_tree = NULL;
- }
-
- parent = NULL;
- cfqq->service_tree = st;
- p = &st->rb.rb_root.rb_node;
- while (*p) {
- parent = *p;
- __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
-
- /*
- * sort by key, that represents service time.
- */
- if (rb_key < __cfqq->rb_key)
- p = &parent->rb_left;
- else {
- p = &parent->rb_right;
- leftmost = false;
- }
- }
-
- cfqq->rb_key = rb_key;
- rb_link_node(&cfqq->rb_node, parent, p);
- rb_insert_color_cached(&cfqq->rb_node, &st->rb, leftmost);
- st->count++;
- if (add_front || !new_cfqq)
- return;
- cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
-}
-
-static struct cfq_queue *
-cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
- sector_t sector, struct rb_node **ret_parent,
- struct rb_node ***rb_link)
-{
- struct rb_node **p, *parent;
- struct cfq_queue *cfqq = NULL;
-
- parent = NULL;
- p = &root->rb_node;
- while (*p) {
- struct rb_node **n;
-
- parent = *p;
- cfqq = rb_entry(parent, struct cfq_queue, p_node);
-
- /*
- * Sort strictly based on sector. Smallest to the left,
- * largest to the right.
- */
- if (sector > blk_rq_pos(cfqq->next_rq))
- n = &(*p)->rb_right;
- else if (sector < blk_rq_pos(cfqq->next_rq))
- n = &(*p)->rb_left;
- else
- break;
- p = n;
- cfqq = NULL;
- }
-
- *ret_parent = parent;
- if (rb_link)
- *rb_link = p;
- return cfqq;
-}
-
-static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- struct rb_node **p, *parent;
- struct cfq_queue *__cfqq;
-
- if (cfqq->p_root) {
- rb_erase(&cfqq->p_node, cfqq->p_root);
- cfqq->p_root = NULL;
- }
-
- if (cfq_class_idle(cfqq))
- return;
- if (!cfqq->next_rq)
- return;
-
- cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
- __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
- blk_rq_pos(cfqq->next_rq), &parent, &p);
- if (!__cfqq) {
- rb_link_node(&cfqq->p_node, parent, p);
- rb_insert_color(&cfqq->p_node, cfqq->p_root);
- } else
- cfqq->p_root = NULL;
-}
-
-/*
- * Update cfqq's position in the service tree.
- */
-static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- /*
- * Resorting requires the cfqq to be on the RR list already.
- */
- if (cfq_cfqq_on_rr(cfqq)) {
- cfq_service_tree_add(cfqd, cfqq, 0);
- cfq_prio_tree_add(cfqd, cfqq);
- }
-}
-
-/*
- * add to busy list of queues for service, trying to be fair in ordering
- * the pending list according to last request service
- */
-static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
- BUG_ON(cfq_cfqq_on_rr(cfqq));
- cfq_mark_cfqq_on_rr(cfqq);
- cfqd->busy_queues++;
- if (cfq_cfqq_sync(cfqq))
- cfqd->busy_sync_queues++;
-
- cfq_resort_rr_list(cfqd, cfqq);
-}
-
-/*
- * Called when the cfqq no longer has requests pending, remove it from
- * the service tree.
- */
-static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
- BUG_ON(!cfq_cfqq_on_rr(cfqq));
- cfq_clear_cfqq_on_rr(cfqq);
-
- if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
- cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
- cfqq->service_tree = NULL;
- }
- if (cfqq->p_root) {
- rb_erase(&cfqq->p_node, cfqq->p_root);
- cfqq->p_root = NULL;
- }
-
- cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
- BUG_ON(!cfqd->busy_queues);
- cfqd->busy_queues--;
- if (cfq_cfqq_sync(cfqq))
- cfqd->busy_sync_queues--;
-}
-
-/*
- * rb tree support functions
- */
-static void cfq_del_rq_rb(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- const int sync = rq_is_sync(rq);
-
- BUG_ON(!cfqq->queued[sync]);
- cfqq->queued[sync]--;
-
- elv_rb_del(&cfqq->sort_list, rq);
-
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
- /*
- * Queue will be deleted from service tree when we actually
- * expire it later. Right now just remove it from prio tree
- * as it is empty.
- */
- if (cfqq->p_root) {
- rb_erase(&cfqq->p_node, cfqq->p_root);
- cfqq->p_root = NULL;
- }
- }
-}
-
-static void cfq_add_rq_rb(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- struct cfq_data *cfqd = cfqq->cfqd;
- struct request *prev;
-
- cfqq->queued[rq_is_sync(rq)]++;
-
- elv_rb_add(&cfqq->sort_list, rq);
-
- if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq);
-
- /*
- * check if this request is a better next-serve candidate
- */
- prev = cfqq->next_rq;
- cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
-
- /*
- * adjust priority tree position, if ->next_rq changes
- */
- if (prev != cfqq->next_rq)
- cfq_prio_tree_add(cfqd, cfqq);
-
- BUG_ON(!cfqq->next_rq);
-}
-
-static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
-{
- elv_rb_del(&cfqq->sort_list, rq);
- cfqq->queued[rq_is_sync(rq)]--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
- cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
- rq->cmd_flags);
-}
-
-static struct request *
-cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
-{
- struct task_struct *tsk = current;
- struct cfq_io_cq *cic;
- struct cfq_queue *cfqq;
-
- cic = cfq_cic_lookup(cfqd, tsk->io_context);
- if (!cic)
- return NULL;
-
- cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
- if (cfqq)
- return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
-
- return NULL;
-}
-
-static void cfq_activate_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- cfqd->rq_in_driver++;
- cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
- cfqd->rq_in_driver);
-
- cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
-}
-
-static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
- cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
- cfqd->rq_in_driver);
-}
-
-static void cfq_remove_request(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- if (cfqq->next_rq == rq)
- cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
-
- list_del_init(&rq->queuelist);
- cfq_del_rq_rb(rq);
-
- cfqq->cfqd->rq_queued--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
- if (rq->cmd_flags & REQ_PRIO) {
- WARN_ON(!cfqq->prio_pending);
- cfqq->prio_pending--;
- }
-}
-
-static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
- struct bio *bio)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct request *__rq;
-
- __rq = cfq_find_rq_fmerge(cfqd, bio);
- if (__rq && elv_bio_merge_ok(__rq, bio)) {
- *req = __rq;
- return ELEVATOR_FRONT_MERGE;
- }
-
- return ELEVATOR_NO_MERGE;
-}
-
-static void cfq_merged_request(struct request_queue *q, struct request *req,
- enum elv_merge type)
-{
- if (type == ELEVATOR_FRONT_MERGE) {
- struct cfq_queue *cfqq = RQ_CFQQ(req);
-
- cfq_reposition_rq_rb(cfqq, req);
- }
-}
-
-static void cfq_bio_merged(struct request_queue *q, struct request *req,
- struct bio *bio)
-{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
-}
-
-static void
-cfq_merged_requests(struct request_queue *q, struct request *rq,
- struct request *next)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- /*
- * reposition in fifo if next is older than rq
- */
- if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- next->fifo_time < rq->fifo_time &&
- cfqq == RQ_CFQQ(next)) {
- list_move(&rq->queuelist, &next->queuelist);
- rq->fifo_time = next->fifo_time;
- }
-
- if (cfqq->next_rq == next)
- cfqq->next_rq = rq;
- cfq_remove_request(next);
- cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
-
- cfqq = RQ_CFQQ(next);
- /*
- * all requests of this queue are merged to other queues, delete it
- * from the service tree. If it's the active_queue,
- * cfq_dispatch_requests() will choose to expire it or do idle
- */
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
- cfqq != cfqd->active_queue)
- cfq_del_cfqq_rr(cfqd, cfqq);
-}
-
-static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
- struct bio *bio)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- bool is_sync = op_is_sync(bio->bi_opf);
- struct cfq_io_cq *cic;
- struct cfq_queue *cfqq;
-
- /*
- * Disallow merge of a sync bio into an async request.
- */
- if (is_sync && !rq_is_sync(rq))
- return false;
-
- /*
- * Lookup the cfqq that this bio will be queued with and allow
- * merge only if rq is queued there.
- */
- cic = cfq_cic_lookup(cfqd, current->io_context);
- if (!cic)
- return false;
-
- cfqq = cic_to_cfqq(cic, is_sync);
- return cfqq == RQ_CFQQ(rq);
-}
-
-static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
- struct request *next)
-{
- return RQ_CFQQ(rq) == RQ_CFQQ(next);
-}
-
-static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
- cfqg_stats_update_idle_time(cfqq->cfqg);
-}
-
-static void __cfq_set_active_queue(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
- cfqd->serving_wl_class, cfqd->serving_wl_type);
- cfqg_stats_update_avg_queue_size(cfqq->cfqg);
- cfqq->slice_start = 0;
- cfqq->dispatch_start = ktime_get_ns();
- cfqq->allocated_slice = 0;
- cfqq->slice_end = 0;
- cfqq->slice_dispatch = 0;
- cfqq->nr_sectors = 0;
-
- cfq_clear_cfqq_wait_request(cfqq);
- cfq_clear_cfqq_must_dispatch(cfqq);
- cfq_clear_cfqq_must_alloc_slice(cfqq);
- cfq_clear_cfqq_fifo_expire(cfqq);
- cfq_mark_cfqq_slice_new(cfqq);
-
- cfq_del_timer(cfqd, cfqq);
- }
-
- cfqd->active_queue = cfqq;
-}
-
-/*
- * current cfqq expired its slice (or was too idle), select new one
- */
-static void
-__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- bool timed_out)
-{
- cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
-
- if (cfq_cfqq_wait_request(cfqq))
- cfq_del_timer(cfqd, cfqq);
-
- cfq_clear_cfqq_wait_request(cfqq);
- cfq_clear_cfqq_wait_busy(cfqq);
-
- /*
- * If this cfqq is shared between multiple processes, check to
- * make sure that those processes are still issuing I/Os within
- * the mean seek distance. If not, it may be time to break the
- * queues apart again.
- */
- if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
- cfq_mark_cfqq_split_coop(cfqq);
-
- /*
- * store what was left of this slice, if the queue idled/timed out
- */
- if (timed_out) {
- if (cfq_cfqq_slice_new(cfqq))
- cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
- else
- cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
- cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
- }
-
- cfq_group_served(cfqd, cfqq->cfqg, cfqq);
-
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
- cfq_del_cfqq_rr(cfqd, cfqq);
-
- cfq_resort_rr_list(cfqd, cfqq);
-
- if (cfqq == cfqd->active_queue)
- cfqd->active_queue = NULL;
-
- if (cfqd->active_cic) {
- put_io_context(cfqd->active_cic->icq.ioc);
- cfqd->active_cic = NULL;
- }
-}
-
-static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
-{
- struct cfq_queue *cfqq = cfqd->active_queue;
-
- if (cfqq)
- __cfq_slice_expired(cfqd, cfqq, timed_out);
-}
-
-/*
- * Get next queue for service. Unless we have a queue preemption,
- * we'll simply select the first cfqq in the service tree.
- */
-static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
-{
- struct cfq_rb_root *st = st_for(cfqd->serving_group,
- cfqd->serving_wl_class, cfqd->serving_wl_type);
-
- if (!cfqd->rq_queued)
- return NULL;
-
- /* There is nothing to dispatch */
- if (!st)
- return NULL;
- if (RB_EMPTY_ROOT(&st->rb.rb_root))
- return NULL;
- return cfq_rb_first(st);
-}
-
-static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
-{
- struct cfq_group *cfqg;
- struct cfq_queue *cfqq;
- int i, j;
- struct cfq_rb_root *st;
-
- if (!cfqd->rq_queued)
- return NULL;
-
- cfqg = cfq_get_next_cfqg(cfqd);
- if (!cfqg)
- return NULL;
-
- for_each_cfqg_st(cfqg, i, j, st) {
- cfqq = cfq_rb_first(st);
- if (cfqq)
- return cfqq;
- }
- return NULL;
-}
-
-/*
- * Get and set a new active queue for service.
- */
-static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- if (!cfqq)
- cfqq = cfq_get_next_queue(cfqd);
-
- __cfq_set_active_queue(cfqd, cfqq);
- return cfqq;
-}
-
-static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
- struct request *rq)
-{
- if (blk_rq_pos(rq) >= cfqd->last_position)
- return blk_rq_pos(rq) - cfqd->last_position;
- else
- return cfqd->last_position - blk_rq_pos(rq);
-}
-
-static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq)
-{
- return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
-}
-
-static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
- struct cfq_queue *cur_cfqq)
-{
- struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
- struct rb_node *parent, *node;
- struct cfq_queue *__cfqq;
- sector_t sector = cfqd->last_position;
-
- if (RB_EMPTY_ROOT(root))
- return NULL;
-
- /*
- * First, if we find a request starting at the end of the last
- * request, choose it.
- */
- __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
- if (__cfqq)
- return __cfqq;
-
- /*
- * If the exact sector wasn't found, the parent of the NULL leaf
- * will contain the closest sector.
- */
- __cfqq = rb_entry(parent, struct cfq_queue, p_node);
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
- return __cfqq;
-
- if (blk_rq_pos(__cfqq->next_rq) < sector)
- node = rb_next(&__cfqq->p_node);
- else
- node = rb_prev(&__cfqq->p_node);
- if (!node)
- return NULL;
-
- __cfqq = rb_entry(node, struct cfq_queue, p_node);
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
- return __cfqq;
-
- return NULL;
-}
-
-/*
- * cfqd - obvious
- * cur_cfqq - passed in so that we don't decide that the current queue is
- * closely cooperating with itself.
- *
- * So, basically we're assuming that that cur_cfqq has dispatched at least
- * one request, and that cfqd->last_position reflects a position on the disk
- * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
- * assumption.
- */
-static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
- struct cfq_queue *cur_cfqq)
-{
- struct cfq_queue *cfqq;
-
- if (cfq_class_idle(cur_cfqq))
- return NULL;
- if (!cfq_cfqq_sync(cur_cfqq))
- return NULL;
- if (CFQQ_SEEKY(cur_cfqq))
- return NULL;
-
- /*
- * Don't search priority tree if it's the only queue in the group.
- */
- if (cur_cfqq->cfqg->nr_cfqq == 1)
- return NULL;
-
- /*
- * We should notice if some of the queues are cooperating, eg
- * working closely on the same area of the disk. In that case,
- * we can group them together and don't waste time idling.
- */
- cfqq = cfqq_close(cfqd, cur_cfqq);
- if (!cfqq)
- return NULL;
-
- /* If new queue belongs to different cfq_group, don't choose it */
- if (cur_cfqq->cfqg != cfqq->cfqg)
- return NULL;
-
- /*
- * It only makes sense to merge sync queues.
- */
- if (!cfq_cfqq_sync(cfqq))
- return NULL;
- if (CFQQ_SEEKY(cfqq))
- return NULL;
-
- /*
- * Do not merge queues of different priority classes
- */
- if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
- return NULL;
-
- return cfqq;
-}
-
-/*
- * Determine whether we should enforce idle window for this queue.
- */
-
-static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- enum wl_class_t wl_class = cfqq_class(cfqq);
- struct cfq_rb_root *st = cfqq->service_tree;
-
- BUG_ON(!st);
- BUG_ON(!st->count);
-
- if (!cfqd->cfq_slice_idle)
- return false;
-
- /* We never do for idle class queues. */
- if (wl_class == IDLE_WORKLOAD)
- return false;
-
- /* We do for queues that were marked with idle window flag. */
- if (cfq_cfqq_idle_window(cfqq) &&
- !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
- return true;
-
- /*
- * Otherwise, we do only if they are the last ones
- * in their service tree.
- */
- if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
- !cfq_io_thinktime_big(cfqd, &st->ttime, false))
- return true;
- cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
- return false;
-}
-
-static void cfq_arm_slice_timer(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq = cfqd->active_queue;
- struct cfq_rb_root *st = cfqq->service_tree;
- struct cfq_io_cq *cic;
- u64 sl, group_idle = 0;
- u64 now = ktime_get_ns();
-
- /*
- * SSD device without seek penalty, disable idling. But only do so
- * for devices that support queuing, otherwise we still have a problem
- * with sync vs async workloads.
- */
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
- !cfqd->cfq_group_idle)
- return;
-
- WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
- WARN_ON(cfq_cfqq_slice_new(cfqq));
-
- /*
- * idle is disabled, either manually or by past process history
- */
- if (!cfq_should_idle(cfqd, cfqq)) {
- /* no queue idling. Check for group idling */
- if (cfqd->cfq_group_idle)
- group_idle = cfqd->cfq_group_idle;
- else
- return;
- }
-
- /*
- * still active requests from this queue, don't idle
- */
- if (cfqq->dispatched)
- return;
-
- /*
- * task has exited, don't wait
- */
- cic = cfqd->active_cic;
- if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
- return;
-
- /*
- * If our average think time is larger than the remaining time
- * slice, then don't idle. This avoids overrunning the allotted
- * time slice.
- */
- if (sample_valid(cic->ttime.ttime_samples) &&
- (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
- cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
- cic->ttime.ttime_mean);
- return;
- }
-
- /*
- * There are other queues in the group or this is the only group and
- * it has too big thinktime, don't do group idle.
- */
- if (group_idle &&
- (cfqq->cfqg->nr_cfqq > 1 ||
- cfq_io_thinktime_big(cfqd, &st->ttime, true)))
- return;
-
- cfq_mark_cfqq_wait_request(cfqq);
-
- if (group_idle)
- sl = cfqd->cfq_group_idle;
- else
- sl = cfqd->cfq_slice_idle;
-
- hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
- HRTIMER_MODE_REL);
- cfqg_stats_set_start_idle_time(cfqq->cfqg);
- cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
- group_idle ? 1 : 0);
-}
-
-/*
- * Move request from internal lists to the request queue dispatch list.
- */
-static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
-
- cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
- cfq_remove_request(rq);
- cfqq->dispatched++;
- (RQ_CFQG(rq))->dispatched++;
- elv_dispatch_sort(q, rq);
-
- cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
- cfqq->nr_sectors += blk_rq_sectors(rq);
-}
-
-/*
- * return expired entry, or NULL to just start from scratch in rbtree
- */
-static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
-{
- struct request *rq = NULL;
-
- if (cfq_cfqq_fifo_expire(cfqq))
- return NULL;
-
- cfq_mark_cfqq_fifo_expire(cfqq);
-
- if (list_empty(&cfqq->fifo))
- return NULL;
-
- rq = rq_entry_fifo(cfqq->fifo.next);
- if (ktime_get_ns() < rq->fifo_time)
- rq = NULL;
-
- return rq;
-}
-
-static inline int
-cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- const int base_rq = cfqd->cfq_slice_async_rq;
-
- WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
-
- return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
-}
-
-/*
- * Must be called with the queue_lock held.
- */
-static int cfqq_process_refs(struct cfq_queue *cfqq)
-{
- int process_refs, io_refs;
-
- io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
- process_refs = cfqq->ref - io_refs;
- BUG_ON(process_refs < 0);
- return process_refs;
-}
-
-static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
-{
- int process_refs, new_process_refs;
- struct cfq_queue *__cfqq;
-
- /*
- * If there are no process references on the new_cfqq, then it is
- * unsafe to follow the ->new_cfqq chain as other cfqq's in the
- * chain may have dropped their last reference (not just their
- * last process reference).
- */
- if (!cfqq_process_refs(new_cfqq))
- return;
-
- /* Avoid a circular list and skip interim queue merges */
- while ((__cfqq = new_cfqq->new_cfqq)) {
- if (__cfqq == cfqq)
- return;
- new_cfqq = __cfqq;
- }
-
- process_refs = cfqq_process_refs(cfqq);
- new_process_refs = cfqq_process_refs(new_cfqq);
- /*
- * If the process for the cfqq has gone away, there is no
- * sense in merging the queues.
- */
- if (process_refs == 0 || new_process_refs == 0)
- return;
-
- /*
- * Merge in the direction of the lesser amount of work.
- */
- if (new_process_refs >= process_refs) {
- cfqq->new_cfqq = new_cfqq;
- new_cfqq->ref += process_refs;
- } else {
- new_cfqq->new_cfqq = cfqq;
- cfqq->ref += new_process_refs;
- }
-}
-
-static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
- struct cfq_group *cfqg, enum wl_class_t wl_class)
-{
- struct cfq_queue *queue;
- int i;
- bool key_valid = false;
- u64 lowest_key = 0;
- enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
-
- for (i = 0; i <= SYNC_WORKLOAD; ++i) {
- /* select the one with lowest rb_key */
- queue = cfq_rb_first(st_for(cfqg, wl_class, i));
- if (queue &&
- (!key_valid || queue->rb_key < lowest_key)) {
- lowest_key = queue->rb_key;
- cur_best = i;
- key_valid = true;
- }
- }
-
- return cur_best;
-}
-
-static void
-choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- u64 slice;
- unsigned count;
- struct cfq_rb_root *st;
- u64 group_slice;
- enum wl_class_t original_class = cfqd->serving_wl_class;
- u64 now = ktime_get_ns();
-
- /* Choose next priority. RT > BE > IDLE */
- if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
- cfqd->serving_wl_class = RT_WORKLOAD;
- else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
- cfqd->serving_wl_class = BE_WORKLOAD;
- else {
- cfqd->serving_wl_class = IDLE_WORKLOAD;
- cfqd->workload_expires = now + jiffies_to_nsecs(1);
- return;
- }
-
- if (original_class != cfqd->serving_wl_class)
- goto new_workload;
-
- /*
- * For RT and BE, we have to choose also the type
- * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
- * expiration time
- */
- st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
- count = st->count;
-
- /*
- * check workload expiration, and that we still have other queues ready
- */
- if (count && !(now > cfqd->workload_expires))
- return;
-
-new_workload:
- /* otherwise select new workload type */
- cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
- cfqd->serving_wl_class);
- st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
- count = st->count;
-
- /*
- * the workload slice is computed as a fraction of target latency
- * proportional to the number of queues in that workload, over
- * all the queues in the same priority class
- */
- group_slice = cfq_group_slice(cfqd, cfqg);
-
- slice = div_u64(group_slice * count,
- max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
- cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
- cfqg)));
-
- if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
- u64 tmp;
-
- /*
- * Async queues are currently system wide. Just taking
- * proportion of queues with-in same group will lead to higher
- * async ratio system wide as generally root group is going
- * to have higher weight. A more accurate thing would be to
- * calculate system wide asnc/sync ratio.
- */
- tmp = cfqd->cfq_target_latency *
- cfqg_busy_async_queues(cfqd, cfqg);
- tmp = div_u64(tmp, cfqd->busy_queues);
- slice = min_t(u64, slice, tmp);
-
- /* async workload slice is scaled down according to
- * the sync/async slice ratio. */
- slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
- } else
- /* sync workload slice is at least 2 * cfq_slice_idle */
- slice = max(slice, 2 * cfqd->cfq_slice_idle);
-
- slice = max_t(u64, slice, CFQ_MIN_TT);
- cfq_log(cfqd, "workload slice:%llu", slice);
- cfqd->workload_expires = now + slice;
-}
-
-static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
- struct cfq_group *cfqg;
-
- if (RB_EMPTY_ROOT(&st->rb.rb_root))
- return NULL;
- cfqg = cfq_rb_first_group(st);
- update_min_vdisktime(st);
- return cfqg;
-}
-
-static void cfq_choose_cfqg(struct cfq_data *cfqd)
-{
- struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
- u64 now = ktime_get_ns();
-
- cfqd->serving_group = cfqg;
-
- /* Restore the workload type data */
- if (cfqg->saved_wl_slice) {
- cfqd->workload_expires = now + cfqg->saved_wl_slice;
- cfqd->serving_wl_type = cfqg->saved_wl_type;
- cfqd->serving_wl_class = cfqg->saved_wl_class;
- } else
- cfqd->workload_expires = now - 1;
-
- choose_wl_class_and_type(cfqd, cfqg);
-}
-
-/*
- * Select a queue for service. If we have a current active queue,
- * check whether to continue servicing it, or retrieve and set a new one.
- */
-static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq, *new_cfqq = NULL;
- u64 now = ktime_get_ns();
-
- cfqq = cfqd->active_queue;
- if (!cfqq)
- goto new_queue;
-
- if (!cfqd->rq_queued)
- return NULL;
-
- /*
- * We were waiting for group to get backlogged. Expire the queue
- */
- if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
- goto expire;
-
- /*
- * The active queue has run out of time, expire it and select new.
- */
- if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
- /*
- * If slice had not expired at the completion of last request
- * we might not have turned on wait_busy flag. Don't expire
- * the queue yet. Allow the group to get backlogged.
- *
- * The very fact that we have used the slice, that means we
- * have been idling all along on this queue and it should be
- * ok to wait for this request to complete.
- */
- if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
- && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
- cfqq = NULL;
- goto keep_queue;
- } else
- goto check_group_idle;
- }
-
- /*
- * The active queue has requests and isn't expired, allow it to
- * dispatch.
- */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- goto keep_queue;
-
- /*
- * If another queue has a request waiting within our mean seek
- * distance, let it run. The expire code will check for close
- * cooperators and put the close queue at the front of the service
- * tree. If possible, merge the expiring queue with the new cfqq.
- */
- new_cfqq = cfq_close_cooperator(cfqd, cfqq);
- if (new_cfqq) {
- if (!cfqq->new_cfqq)
- cfq_setup_merge(cfqq, new_cfqq);
- goto expire;
- }
-
- /*
- * No requests pending. If the active queue still has requests in
- * flight or is idling for a new request, allow either of these
- * conditions to happen (or time out) before selecting a new queue.
- */
- if (hrtimer_active(&cfqd->idle_slice_timer)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
- /*
- * This is a deep seek queue, but the device is much faster than
- * the queue can deliver, don't idle
- **/
- if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
- (cfq_cfqq_slice_new(cfqq) ||
- (cfqq->slice_end - now > now - cfqq->slice_start))) {
- cfq_clear_cfqq_deep(cfqq);
- cfq_clear_cfqq_idle_window(cfqq);
- }
-
- if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
- /*
- * If group idle is enabled and there are requests dispatched from
- * this group, wait for requests to complete.
- */
-check_group_idle:
- if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
- cfqq->cfqg->dispatched &&
- !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
-expire:
- cfq_slice_expired(cfqd, 0);
-new_queue:
- /*
- * Current queue expired. Check if we have to switch to a new
- * service tree
- */
- if (!new_cfqq)
- cfq_choose_cfqg(cfqd);
-
- cfqq = cfq_set_active_queue(cfqd, new_cfqq);
-keep_queue:
- return cfqq;
-}
-
-static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
-{
- int dispatched = 0;
-
- while (cfqq->next_rq) {
- cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
- dispatched++;
- }
-
- BUG_ON(!list_empty(&cfqq->fifo));
-
- /* By default cfqq is not expired if it is empty. Do it explicitly */
- __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
- return dispatched;
-}
-
-/*
- * Drain our current requests. Used for barriers and when switching
- * io schedulers on-the-fly.
- */
-static int cfq_forced_dispatch(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq;
- int dispatched = 0;
-
- /* Expire the timeslice of the current active queue first */
- cfq_slice_expired(cfqd, 0);
- while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
- __cfq_set_active_queue(cfqd, cfqq);
- dispatched += __cfq_forced_dispatch_cfqq(cfqq);
- }
-
- BUG_ON(cfqd->busy_queues);
-
- cfq_log(cfqd, "forced_dispatch=%d", dispatched);
- return dispatched;
-}
-
-static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- u64 now = ktime_get_ns();
-
- /* the queue hasn't finished any request, can't estimate */
- if (cfq_cfqq_slice_new(cfqq))
- return true;
- if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
- return true;
-
- return false;
-}
-
-static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- unsigned int max_dispatch;
-
- if (cfq_cfqq_must_dispatch(cfqq))
- return true;
-
- /*
- * Drain async requests before we start sync IO
- */
- if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
- return false;
-
- /*
- * If this is an async queue and we have sync IO in flight, let it wait
- */
- if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
- return false;
-
- max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
- if (cfq_class_idle(cfqq))
- max_dispatch = 1;
-
- /*
- * Does this cfqq already have too much IO in flight?
- */
- if (cfqq->dispatched >= max_dispatch) {
- bool promote_sync = false;
- /*
- * idle queue must always only have a single IO in flight
- */
- if (cfq_class_idle(cfqq))
- return false;
-
- /*
- * If there is only one sync queue
- * we can ignore async queue here and give the sync
- * queue no dispatch limit. The reason is a sync queue can
- * preempt async queue, limiting the sync queue doesn't make
- * sense. This is useful for aiostress test.
- */
- if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
- promote_sync = true;
-
- /*
- * We have other queues, don't allow more IO from this one
- */
- if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
- !promote_sync)
- return false;
-
- /*
- * Sole queue user, no limit
- */
- if (cfqd->busy_queues == 1 || promote_sync)
- max_dispatch = -1;
- else
- /*
- * Normally we start throttling cfqq when cfq_quantum/2
- * requests have been dispatched. But we can drive
- * deeper queue depths at the beginning of slice
- * subjected to upper limit of cfq_quantum.
- * */
- max_dispatch = cfqd->cfq_quantum;
- }
-
- /*
- * Async queues must wait a bit before being allowed dispatch.
- * We also ramp up the dispatch depth gradually for async IO,
- * based on the last sync IO we serviced
- */
- if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
- u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
- unsigned int depth;
-
- depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
- if (!depth && !cfqq->dispatched)
- depth = 1;
- if (depth < max_dispatch)
- max_dispatch = depth;
- }
-
- /*
- * If we're below the current max, allow a dispatch
- */
- return cfqq->dispatched < max_dispatch;
-}
-
-/*
- * Dispatch a request from cfqq, moving them to the request queue
- * dispatch list.
- */
-static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- struct request *rq;
-
- BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
- rq = cfq_check_fifo(cfqq);
- if (rq)
- cfq_mark_cfqq_must_dispatch(cfqq);
-
- if (!cfq_may_dispatch(cfqd, cfqq))
- return false;
-
- /*
- * follow expired path, else get first next available
- */
- if (!rq)
- rq = cfqq->next_rq;
- else
- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
-
- /*
- * insert request into driver dispatch list
- */
- cfq_dispatch_insert(cfqd->queue, rq);
-
- if (!cfqd->active_cic) {
- struct cfq_io_cq *cic = RQ_CIC(rq);
-
- atomic_long_inc(&cic->icq.ioc->refcount);
- cfqd->active_cic = cic;
- }
-
- return true;
-}
-
-/*
- * Find the cfqq that we need to service and move a request from that to the
- * dispatch list
- */
-static int cfq_dispatch_requests(struct request_queue *q, int force)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
-
- if (!cfqd->busy_queues)
- return 0;
-
- if (unlikely(force))
- return cfq_forced_dispatch(cfqd);
-
- cfqq = cfq_select_queue(cfqd);
- if (!cfqq)
- return 0;
-
- /*
- * Dispatch a request from this cfqq, if it is allowed
- */
- if (!cfq_dispatch_request(cfqd, cfqq))
- return 0;
-
- cfqq->slice_dispatch++;
- cfq_clear_cfqq_must_dispatch(cfqq);
-
- /*
- * expire an async queue immediately if it has used up its slice. idle
- * queue always expire after 1 dispatch round.
- */
- if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
- cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq))) {
- cfqq->slice_end = ktime_get_ns() + 1;
- cfq_slice_expired(cfqd, 0);
- }
-
- cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
- return 1;
-}
-
-/*
- * task holds one reference to the queue, dropped when task exits. each rq
- * in-flight on this queue also holds a reference, dropped when rq is freed.
- *
- * Each cfq queue took a reference on the parent group. Drop it now.
- * queue lock must be held here.
- */
-static void cfq_put_queue(struct cfq_queue *cfqq)
-{
- struct cfq_data *cfqd = cfqq->cfqd;
- struct cfq_group *cfqg;
-
- BUG_ON(cfqq->ref <= 0);
-
- cfqq->ref--;
- if (cfqq->ref)
- return;
-
- cfq_log_cfqq(cfqd, cfqq, "put_queue");
- BUG_ON(rb_first(&cfqq->sort_list));
- BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
- cfqg = cfqq->cfqg;
-
- if (unlikely(cfqd->active_queue == cfqq)) {
- __cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd);
- }
-
- BUG_ON(cfq_cfqq_on_rr(cfqq));
- kmem_cache_free(cfq_pool, cfqq);
- cfqg_put(cfqg);
-}
-
-static void cfq_put_cooperator(struct cfq_queue *cfqq)
-{
- struct cfq_queue *__cfqq, *next;
-
- /*
- * If this queue was scheduled to merge with another queue, be
- * sure to drop the reference taken on that queue (and others in
- * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
- */
- __cfqq = cfqq->new_cfqq;
- while (__cfqq) {
- if (__cfqq == cfqq) {
- WARN(1, "cfqq->new_cfqq loop detected\n");
- break;
- }
- next = __cfqq->new_cfqq;
- cfq_put_queue(__cfqq);
- __cfqq = next;
- }
-}
-
-static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- if (unlikely(cfqq == cfqd->active_queue)) {
- __cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd);
- }
-
- cfq_put_cooperator(cfqq);
-
- cfq_put_queue(cfqq);
-}
-
-static void cfq_init_icq(struct io_cq *icq)
-{
- struct cfq_io_cq *cic = icq_to_cic(icq);
-
- cic->ttime.last_end_request = ktime_get_ns();
-}
-
-static void cfq_exit_icq(struct io_cq *icq)
-{
- struct cfq_io_cq *cic = icq_to_cic(icq);
- struct cfq_data *cfqd = cic_to_cfqd(cic);
-
- if (cic_to_cfqq(cic, false)) {
- cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
- cic_set_cfqq(cic, NULL, false);
- }
-
- if (cic_to_cfqq(cic, true)) {
- cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
- cic_set_cfqq(cic, NULL, true);
- }
-}
-
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
-{
- struct task_struct *tsk = current;
- int ioprio_class;
-
- if (!cfq_cfqq_prio_changed(cfqq))
- return;
-
- ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- switch (ioprio_class) {
- default:
- printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
- /* fall through */
- case IOPRIO_CLASS_NONE:
- /*
- * no prio set, inherit CPU scheduling settings
- */
- cfqq->ioprio = task_nice_ioprio(tsk);
- cfqq->ioprio_class = task_nice_ioclass(tsk);
- break;
- case IOPRIO_CLASS_RT:
- cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- cfqq->ioprio_class = IOPRIO_CLASS_RT;
- break;
- case IOPRIO_CLASS_BE:
- cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- cfqq->ioprio_class = IOPRIO_CLASS_BE;
- break;
- case IOPRIO_CLASS_IDLE:
- cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
- cfqq->ioprio = 7;
- cfq_clear_cfqq_idle_window(cfqq);
- break;
- }
-
- /*
- * keep track of original prio settings in case we have to temporarily
- * elevate the priority of this queue
- */
- cfqq->org_ioprio = cfqq->ioprio;
- cfqq->org_ioprio_class = cfqq->ioprio_class;
- cfq_clear_cfqq_prio_changed(cfqq);
-}
-
-static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
-{
- int ioprio = cic->icq.ioc->ioprio;
- struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct cfq_queue *cfqq;
-
- /*
- * Check whether ioprio has changed. The condition may trigger
- * spuriously on a newly created cic but there's no harm.
- */
- if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
- return;
-
- cfqq = cic_to_cfqq(cic, false);
- if (cfqq) {
- cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
- cic_set_cfqq(cic, cfqq, false);
- }
-
- cfqq = cic_to_cfqq(cic, true);
- if (cfqq)
- cfq_mark_cfqq_prio_changed(cfqq);
-
- cic->ioprio = ioprio;
-}
-
-static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- pid_t pid, bool is_sync)
-{
- RB_CLEAR_NODE(&cfqq->rb_node);
- RB_CLEAR_NODE(&cfqq->p_node);
- INIT_LIST_HEAD(&cfqq->fifo);
-
- cfqq->ref = 0;
- cfqq->cfqd = cfqd;
-
- cfq_mark_cfqq_prio_changed(cfqq);
-
- if (is_sync) {
- if (!cfq_class_idle(cfqq))
- cfq_mark_cfqq_idle_window(cfqq);
- cfq_mark_cfqq_sync(cfqq);
- }
- cfqq->pid = pid;
-}
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
-{
- struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct cfq_queue *cfqq;
- uint64_t serial_nr;
-
- rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
- rcu_read_unlock();
-
- /*
- * Check whether blkcg has changed. The condition may trigger
- * spuriously on a newly created cic but there's no harm.
- */
- if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
- return;
-
- /*
- * Drop reference to queues. New queues will be assigned in new
- * group upon arrival of fresh requests.
- */
- cfqq = cic_to_cfqq(cic, false);
- if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
- cic_set_cfqq(cic, NULL, false);
- cfq_put_queue(cfqq);
- }
-
- cfqq = cic_to_cfqq(cic, true);
- if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
- cic_set_cfqq(cic, NULL, true);
- cfq_put_queue(cfqq);
- }
-
- cic->blkcg_serial_nr = serial_nr;
-}
-#else
-static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
-{
-}
-#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-
-static struct cfq_queue **
-cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
-{
- switch (ioprio_class) {
- case IOPRIO_CLASS_RT:
- return &cfqg->async_cfqq[0][ioprio];
- case IOPRIO_CLASS_NONE:
- ioprio = IOPRIO_NORM;
- /* fall through */
- case IOPRIO_CLASS_BE:
- return &cfqg->async_cfqq[1][ioprio];
- case IOPRIO_CLASS_IDLE:
- return &cfqg->async_idle_cfqq;
- default:
- BUG();
- }
-}
-
-static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio)
-{
- int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- struct cfq_queue **async_cfqq = NULL;
- struct cfq_queue *cfqq;
- struct cfq_group *cfqg;
-
- rcu_read_lock();
- cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
- if (!cfqg) {
- cfqq = &cfqd->oom_cfqq;
- goto out;
- }
-
- if (!is_sync) {
- if (!ioprio_valid(cic->ioprio)) {
- struct task_struct *tsk = current;
- ioprio = task_nice_ioprio(tsk);
- ioprio_class = task_nice_ioclass(tsk);
- }
- async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
- cfqq = *async_cfqq;
- if (cfqq)
- goto out;
- }
-
- cfqq = kmem_cache_alloc_node(cfq_pool,
- GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
- cfqd->queue->node);
- if (!cfqq) {
- cfqq = &cfqd->oom_cfqq;
- goto out;
- }
-
- /* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
- cfqq->ioprio_class = IOPRIO_CLASS_NONE;
- cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, cic);
- cfq_link_cfqq_cfqg(cfqq, cfqg);
- cfq_log_cfqq(cfqd, cfqq, "alloced");
-
- if (async_cfqq) {
- /* a new async queue is created, pin and remember */
- cfqq->ref++;
- *async_cfqq = cfqq;
- }
-out:
- cfqq->ref++;
- rcu_read_unlock();
- return cfqq;
-}
-
-static void
-__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
-{
- u64 elapsed = ktime_get_ns() - ttime->last_end_request;
- elapsed = min(elapsed, 2UL * slice_idle);
-
- ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
- ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
- ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
- ttime->ttime_samples);
-}
-
-static void
-cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_io_cq *cic)
-{
- if (cfq_cfqq_sync(cfqq)) {
- __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
- __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
- cfqd->cfq_slice_idle);
- }
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
-#endif
-}
-
-static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq)
-{
- sector_t sdist = 0;
- sector_t n_sec = blk_rq_sectors(rq);
- if (cfqq->last_request_pos) {
- if (cfqq->last_request_pos < blk_rq_pos(rq))
- sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
- else
- sdist = cfqq->last_request_pos - blk_rq_pos(rq);
- }
-
- cfqq->seek_history <<= 1;
- if (blk_queue_nonrot(cfqd->queue))
- cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
- else
- cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
-}
-
-static inline bool req_noidle(struct request *req)
-{
- return req_op(req) == REQ_OP_WRITE &&
- (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
-}
-
-/*
- * Disable idle window if the process thinks too long or seeks so much that
- * it doesn't matter
- */
-static void
-cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_io_cq *cic)
-{
- int old_idle, enable_idle;
-
- /*
- * Don't idle for async or idle io prio class
- */
- if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
- return;
-
- enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
-
- if (cfqq->queued[0] + cfqq->queued[1] >= 4)
- cfq_mark_cfqq_deep(cfqq);
-
- if (cfqq->next_rq && req_noidle(cfqq->next_rq))
- enable_idle = 0;
- else if (!atomic_read(&cic->icq.ioc->active_ref) ||
- !cfqd->cfq_slice_idle ||
- (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
- enable_idle = 0;
- else if (sample_valid(cic->ttime.ttime_samples)) {
- if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
- enable_idle = 0;
- else
- enable_idle = 1;
- }
-
- if (old_idle != enable_idle) {
- cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
- if (enable_idle)
- cfq_mark_cfqq_idle_window(cfqq);
- else
- cfq_clear_cfqq_idle_window(cfqq);
- }
-}
-
-/*
- * Check if new_cfqq should preempt the currently active queue. Return 0 for
- * no or if we aren't sure, a 1 will cause a preempt.
- */
-static bool
-cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
- struct request *rq)
-{
- struct cfq_queue *cfqq;
-
- cfqq = cfqd->active_queue;
- if (!cfqq)
- return false;
-
- if (cfq_class_idle(new_cfqq))
- return false;
-
- if (cfq_class_idle(cfqq))
- return true;
-
- /*
- * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
- */
- if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
- return false;
-
- /*
- * if the new request is sync, but the currently running queue is
- * not, let the sync request have priority.
- */
- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
- return true;
-
- /*
- * Treat ancestors of current cgroup the same way as current cgroup.
- * For anybody else we disallow preemption to guarantee service
- * fairness among cgroups.
- */
- if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
- return false;
-
- if (cfq_slice_used(cfqq))
- return true;
-
- /*
- * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
- */
- if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
- return true;
-
- WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
- /* Allow preemption only if we are idling on sync-noidle tree */
- if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
- cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
- RB_EMPTY_ROOT(&cfqq->sort_list))
- return true;
-
- /*
- * So both queues are sync. Let the new request get disk time if
- * it's a metadata request and the current queue is doing regular IO.
- */
- if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
- return true;
-
- /* An idle queue should not be idle now for some reason */
- if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
- return true;
-
- if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
- return false;
-
- /*
- * if this request is as-good as one we would expect from the
- * current cfqq, let it preempt
- */
- if (cfq_rq_close(cfqd, cfqq, rq))
- return true;
-
- return false;
-}
-
-/*
- * cfqq preempts the active queue. if we allowed preempt with no slice left,
- * let it have half of its nominal slice.
- */
-static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
-
- cfq_log_cfqq(cfqd, cfqq, "preempt");
- cfq_slice_expired(cfqd, 1);
-
- /*
- * workload type is changed, don't save slice, otherwise preempt
- * doesn't happen
- */
- if (old_type != cfqq_type(cfqq))
- cfqq->cfqg->saved_wl_slice = 0;
-
- /*
- * Put the new queue at the front of the of the current list,
- * so we know that it will be selected next.
- */
- BUG_ON(!cfq_cfqq_on_rr(cfqq));
-
- cfq_service_tree_add(cfqd, cfqq, 1);
-
- cfqq->slice_end = 0;
- cfq_mark_cfqq_slice_new(cfqq);
-}
-
-/*
- * Called when a new fs request (rq) is added (to cfqq). Check if there's
- * something we should do about it
- */
-static void
-cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq)
-{
- struct cfq_io_cq *cic = RQ_CIC(rq);
-
- cfqd->rq_queued++;
- if (rq->cmd_flags & REQ_PRIO)
- cfqq->prio_pending++;
-
- cfq_update_io_thinktime(cfqd, cfqq, cic);
- cfq_update_io_seektime(cfqd, cfqq, rq);
- cfq_update_idle_window(cfqd, cfqq, cic);
-
- cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
-
- if (cfqq == cfqd->active_queue) {
- /*
- * Remember that we saw a request from this process, but
- * don't start queuing just yet. Otherwise we risk seeing lots
- * of tiny requests, because we disrupt the normal plugging
- * and merging. If the request is already larger than a single
- * page, let it rip immediately. For that case we assume that
- * merging is already done. Ditto for a busy system that
- * has other work pending, don't risk delaying until the
- * idle timer unplug to continue working.
- */
- if (cfq_cfqq_wait_request(cfqq)) {
- if (blk_rq_bytes(rq) > PAGE_SIZE ||
- cfqd->busy_queues > 1) {
- cfq_del_timer(cfqd, cfqq);
- cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue);
- } else {
- cfqg_stats_update_idle_time(cfqq->cfqg);
- cfq_mark_cfqq_must_dispatch(cfqq);
- }
- }
- } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
- /*
- * not the active queue - expire current slice if it is
- * idle and has expired it's mean thinktime or this new queue
- * has some old slice time left and is of higher priority or
- * this new queue is RT and the current one is BE
- */
- cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue);
- }
-}
-
-static void cfq_insert_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- cfq_log_cfqq(cfqd, cfqq, "insert_request");
- cfq_init_prio_data(cfqq, RQ_CIC(rq));
-
- rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
- list_add_tail(&rq->queuelist, &cfqq->fifo);
- cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
- rq->cmd_flags);
- cfq_rq_enqueued(cfqd, cfqq, rq);
-}
-
-/*
- * Update hw_tag based on peak queue depth over 50 samples under
- * sufficient load.
- */
-static void cfq_update_hw_tag(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq = cfqd->active_queue;
-
- if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
- cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
-
- if (cfqd->hw_tag == 1)
- return;
-
- if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
- cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
- return;
-
- /*
- * If active queue hasn't enough requests and can idle, cfq might not
- * dispatch sufficient requests to hardware. Don't zero hw_tag in this
- * case
- */
- if (cfqq && cfq_cfqq_idle_window(cfqq) &&
- cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
- CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
- return;
-
- if (cfqd->hw_tag_samples++ < 50)
- return;
-
- if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
- cfqd->hw_tag = 1;
- else
- cfqd->hw_tag = 0;
-}
-
-static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- struct cfq_io_cq *cic = cfqd->active_cic;
- u64 now = ktime_get_ns();
-
- /* If the queue already has requests, don't wait */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- return false;
-
- /* If there are other queues in the group, don't wait */
- if (cfqq->cfqg->nr_cfqq > 1)
- return false;
-
- /* the only queue in the group, but think time is big */
- if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
- return false;
-
- if (cfq_slice_used(cfqq))
- return true;
-
- /* if slice left is less than think time, wait busy */
- if (cic && sample_valid(cic->ttime.ttime_samples)
- && (cfqq->slice_end - now < cic->ttime.ttime_mean))
- return true;
-
- /*
- * If think times is less than a jiffy than ttime_mean=0 and above
- * will not be true. It might happen that slice has not expired yet
- * but will expire soon (4-5 ns) during select_queue(). To cover the
- * case where think time is less than a jiffy, mark the queue wait
- * busy if only 1 jiffy is left in the slice.
- */
- if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
- return true;
-
- return false;
-}
-
-static void cfq_completed_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- struct cfq_data *cfqd = cfqq->cfqd;
- const int sync = rq_is_sync(rq);
- u64 now = ktime_get_ns();
-
- cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
-
- cfq_update_hw_tag(cfqd);
-
- WARN_ON(!cfqd->rq_in_driver);
- WARN_ON(!cfqq->dispatched);
- cfqd->rq_in_driver--;
- cfqq->dispatched--;
- (RQ_CFQG(rq))->dispatched--;
- cfqg_stats_update_completion(cfqq->cfqg, rq->start_time_ns,
- rq->io_start_time_ns, rq->cmd_flags);
-
- cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
-
- if (sync) {
- struct cfq_rb_root *st;
-
- RQ_CIC(rq)->ttime.last_end_request = now;
-
- if (cfq_cfqq_on_rr(cfqq))
- st = cfqq->service_tree;
- else
- st = st_for(cfqq->cfqg, cfqq_class(cfqq),
- cfqq_type(cfqq));
-
- st->ttime.last_end_request = now;
- if (rq->start_time_ns + cfqd->cfq_fifo_expire[1] <= now)
- cfqd->last_delayed_sync = now;
- }
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- cfqq->cfqg->ttime.last_end_request = now;
-#endif
-
- /*
- * If this is the active queue, check if it needs to be expired,
- * or if we want to idle in case it has no pending requests.
- */
- if (cfqd->active_queue == cfqq) {
- const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
-
- if (cfq_cfqq_slice_new(cfqq)) {
- cfq_set_prio_slice(cfqd, cfqq);
- cfq_clear_cfqq_slice_new(cfqq);
- }
-
- /*
- * Should we wait for next request to come in before we expire
- * the queue.
- */
- if (cfq_should_wait_busy(cfqd, cfqq)) {
- u64 extend_sl = cfqd->cfq_slice_idle;
- if (!cfqd->cfq_slice_idle)
- extend_sl = cfqd->cfq_group_idle;
- cfqq->slice_end = now + extend_sl;
- cfq_mark_cfqq_wait_busy(cfqq);
- cfq_log_cfqq(cfqd, cfqq, "will busy wait");
- }
-
- /*
- * Idling is not enabled on:
- * - expired queues
- * - idle-priority queues
- * - async queues
- * - queues with still some requests queued
- * - when there is a close cooperator
- */
- if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
- cfq_slice_expired(cfqd, 1);
- else if (sync && cfqq_empty &&
- !cfq_close_cooperator(cfqd, cfqq)) {
- cfq_arm_slice_timer(cfqd);
- }
- }
-
- if (!cfqd->rq_in_driver)
- cfq_schedule_dispatch(cfqd);
-}
-
-static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
-{
- /*
- * If REQ_PRIO is set, boost class and prio level, if it's below
- * BE/NORM. If prio is not set, restore the potentially boosted
- * class/prio level.
- */
- if (!(op & REQ_PRIO)) {
- cfqq->ioprio_class = cfqq->org_ioprio_class;
- cfqq->ioprio = cfqq->org_ioprio;
- } else {
- if (cfq_class_idle(cfqq))
- cfqq->ioprio_class = IOPRIO_CLASS_BE;
- if (cfqq->ioprio > IOPRIO_NORM)
- cfqq->ioprio = IOPRIO_NORM;
- }
-}
-
-static inline int __cfq_may_queue(struct cfq_queue *cfqq)
-{
- if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
- cfq_mark_cfqq_must_alloc_slice(cfqq);
- return ELV_MQUEUE_MUST;
- }
-
- return ELV_MQUEUE_MAY;
-}
-
-static int cfq_may_queue(struct request_queue *q, unsigned int op)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct task_struct *tsk = current;
- struct cfq_io_cq *cic;
- struct cfq_queue *cfqq;
-
- /*
- * don't force setup of a queue from here, as a call to may_queue
- * does not necessarily imply that a request actually will be queued.
- * so just lookup a possibly existing queue, or return 'may queue'
- * if that fails
- */
- cic = cfq_cic_lookup(cfqd, tsk->io_context);
- if (!cic)
- return ELV_MQUEUE_MAY;
-
- cfqq = cic_to_cfqq(cic, op_is_sync(op));
- if (cfqq) {
- cfq_init_prio_data(cfqq, cic);
- cfqq_boost_on_prio(cfqq, op);
-
- return __cfq_may_queue(cfqq);
- }
-
- return ELV_MQUEUE_MAY;
-}
-
-/*
- * queue lock held here
- */
-static void cfq_put_request(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- if (cfqq) {
- const int rw = rq_data_dir(rq);
-
- BUG_ON(!cfqq->allocated[rw]);
- cfqq->allocated[rw]--;
-
- /* Put down rq reference on cfqg */
- cfqg_put(RQ_CFQG(rq));
- rq->elv.priv[0] = NULL;
- rq->elv.priv[1] = NULL;
-
- cfq_put_queue(cfqq);
- }
-}
-
-static struct cfq_queue *
-cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
- struct cfq_queue *cfqq)
-{
- cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
- cic_set_cfqq(cic, cfqq->new_cfqq, 1);
- cfq_mark_cfqq_coop(cfqq->new_cfqq);
- cfq_put_queue(cfqq);
- return cic_to_cfqq(cic, 1);
-}
-
-/*
- * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
- * was the last process referring to said cfqq.
- */
-static struct cfq_queue *
-split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
-{
- if (cfqq_process_refs(cfqq) == 1) {
- cfqq->pid = current->pid;
- cfq_clear_cfqq_coop(cfqq);
- cfq_clear_cfqq_split_coop(cfqq);
- return cfqq;
- }
-
- cic_set_cfqq(cic, NULL, 1);
-
- cfq_put_cooperator(cfqq);
-
- cfq_put_queue(cfqq);
- return NULL;
-}
-/*
- * Allocate cfq data structures associated with this request.
- */
-static int
-cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
- gfp_t gfp_mask)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
- const int rw = rq_data_dir(rq);
- const bool is_sync = rq_is_sync(rq);
- struct cfq_queue *cfqq;
-
- spin_lock_irq(q->queue_lock);
-
- check_ioprio_changed(cic, bio);
- check_blkcg_changed(cic, bio);
-new_queue:
- cfqq = cic_to_cfqq(cic, is_sync);
- if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- if (cfqq)
- cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
- cic_set_cfqq(cic, cfqq, is_sync);
- } else {
- /*
- * If the queue was seeky for too long, break it apart.
- */
- if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
- cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
- cfqq = split_cfqq(cic, cfqq);
- if (!cfqq)
- goto new_queue;
- }
-
- /*
- * Check to see if this queue is scheduled to merge with
- * another, closely cooperating queue. The merging of
- * queues happens here as it must be done in process context.
- * The reference on new_cfqq was taken in merge_cfqqs.
- */
- if (cfqq->new_cfqq)
- cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
- }
-
- cfqq->allocated[rw]++;
-
- cfqq->ref++;
- cfqg_get(cfqq->cfqg);
- rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfqq->cfqg;
- spin_unlock_irq(q->queue_lock);
-
- return 0;
-}
-
-static void cfq_kick_queue(struct work_struct *work)
-{
- struct cfq_data *cfqd =
- container_of(work, struct cfq_data, unplug_work);
- struct request_queue *q = cfqd->queue;
-
- spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue);
- spin_unlock_irq(q->queue_lock);
-}
-
-/*
- * Timer running if the active_queue is currently idling inside its time slice
- */
-static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
-{
- struct cfq_data *cfqd = container_of(timer, struct cfq_data,
- idle_slice_timer);
- struct cfq_queue *cfqq;
- unsigned long flags;
- int timed_out = 1;
-
- cfq_log(cfqd, "idle timer fired");
-
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-
- cfqq = cfqd->active_queue;
- if (cfqq) {
- timed_out = 0;
-
- /*
- * We saw a request before the queue expired, let it through
- */
- if (cfq_cfqq_must_dispatch(cfqq))
- goto out_kick;
-
- /*
- * expired
- */
- if (cfq_slice_used(cfqq))
- goto expire;
-
- /*
- * only expire and reinvoke request handler, if there are
- * other queues with pending requests
- */
- if (!cfqd->busy_queues)
- goto out_cont;
-
- /*
- * not expired and it has a request pending, let it dispatch
- */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- goto out_kick;
-
- /*
- * Queue depth flag is reset only when the idle didn't succeed
- */
- cfq_clear_cfqq_deep(cfqq);
- }
-expire:
- cfq_slice_expired(cfqd, timed_out);
-out_kick:
- cfq_schedule_dispatch(cfqd);
-out_cont:
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
- return HRTIMER_NORESTART;
-}
-
-static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
-{
- hrtimer_cancel(&cfqd->idle_slice_timer);
- cancel_work_sync(&cfqd->unplug_work);
-}
-
-static void cfq_exit_queue(struct elevator_queue *e)
-{
- struct cfq_data *cfqd = e->elevator_data;
- struct request_queue *q = cfqd->queue;
-
- cfq_shutdown_timer_wq(cfqd);
-
- spin_lock_irq(q->queue_lock);
-
- if (cfqd->active_queue)
- __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
-
- spin_unlock_irq(q->queue_lock);
-
- cfq_shutdown_timer_wq(cfqd);
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- blkcg_deactivate_policy(q, &blkcg_policy_cfq);
-#else
- kfree(cfqd->root_group);
-#endif
- kfree(cfqd);
-}
-
-static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct cfq_data *cfqd;
- struct blkcg_gq *blkg __maybe_unused;
- int i, ret;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
- if (!cfqd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = cfqd;
-
- cfqd->queue = q;
- spin_lock_irq(q->queue_lock);
- q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
-
- /* Init root service tree */
- cfqd->grp_service_tree = CFQ_RB_ROOT;
-
- /* Init root group and prefer root group over other groups by default */
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
- if (ret)
- goto out_free;
-
- cfqd->root_group = blkg_to_cfqg(q->root_blkg);
-#else
- ret = -ENOMEM;
- cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
- GFP_KERNEL, cfqd->queue->node);
- if (!cfqd->root_group)
- goto out_free;
-
- cfq_init_cfqg_base(cfqd->root_group);
- cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
- cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
-#endif
-
- /*
- * Not strictly needed (since RB_ROOT just clears the node and we
- * zeroed cfqd on alloc), but better be safe in case someone decides
- * to add magic to the rb code
- */
- for (i = 0; i < CFQ_PRIO_LISTS; i++)
- cfqd->prio_trees[i] = RB_ROOT;
-
- /*
- * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
- * Grab a permanent reference to it, so that the normal code flow
- * will not attempt to free it. oom_cfqq is linked to root_group
- * but shouldn't hold a reference as it'll never be unlinked. Lose
- * the reference from linking right away.
- */
- cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
- cfqd->oom_cfqq.ref++;
-
- spin_lock_irq(q->queue_lock);
- cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
- cfqg_put(cfqd->root_group);
- spin_unlock_irq(q->queue_lock);
-
- hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
-
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
-
- cfqd->cfq_quantum = cfq_quantum;
- cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
- cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
- cfqd->cfq_back_max = cfq_back_max;
- cfqd->cfq_back_penalty = cfq_back_penalty;
- cfqd->cfq_slice[0] = cfq_slice_async;
- cfqd->cfq_slice[1] = cfq_slice_sync;
- cfqd->cfq_target_latency = cfq_target_latency;
- cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
- cfqd->cfq_slice_idle = cfq_slice_idle;
- cfqd->cfq_group_idle = cfq_group_idle;
- cfqd->cfq_latency = 1;
- cfqd->hw_tag = -1;
- /*
- * we optimistically start assuming sync ops weren't delayed in last
- * second, in order to have larger depth for async operations.
- */
- cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
- return 0;
-
-out_free:
- kfree(cfqd);
- kobject_put(&eq->kobj);
- return ret;
-}
-
-static void cfq_registered_queue(struct request_queue *q)
-{
- struct elevator_queue *e = q->elevator;
- struct cfq_data *cfqd = e->elevator_data;
-
- /*
- * Default to IOPS mode with no idling for SSDs
- */
- if (blk_queue_nonrot(q))
- cfqd->cfq_slice_idle = 0;
- wbt_disable_default(q);
-}
-
-/*
- * sysfs parts below -->
- */
-static ssize_t
-cfq_var_show(unsigned int var, char *page)
-{
- return sprintf(page, "%u\n", var);
-}
-
-static void
-cfq_var_store(unsigned int *var, const char *page)
-{
- char *p = (char *) page;
-
- *var = simple_strtoul(p, &p, 10);
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- u64 __data = __VAR; \
- if (__CONV) \
- __data = div_u64(__data, NSEC_PER_MSEC); \
- return cfq_var_show(__data, (page)); \
-}
-SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
-SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
-SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
-SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
-SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
-SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
-SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
-SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
-SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
-SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
-SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
-SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
-#undef SHOW_FUNCTION
-
-#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- u64 __data = __VAR; \
- __data = div_u64(__data, NSEC_PER_USEC); \
- return cfq_var_show(__data, (page)); \
-}
-USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
-USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
-USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
-USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
-USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
-#undef USEC_SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data, __min = (MIN), __max = (MAX); \
- \
- cfq_var_store(&__data, (page)); \
- if (__data < __min) \
- __data = __min; \
- else if (__data > __max) \
- __data = __max; \
- if (__CONV) \
- *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
- else \
- *(__PTR) = __data; \
- return count; \
-}
-STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
- UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
- UINT_MAX, 1);
-STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
-STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
- UINT_MAX, 0);
-STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
- UINT_MAX, 0);
-STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
-STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
-#undef STORE_FUNCTION
-
-#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data, __min = (MIN), __max = (MAX); \
- \
- cfq_var_store(&__data, (page)); \
- if (__data < __min) \
- __data = __min; \
- else if (__data > __max) \
- __data = __max; \
- *(__PTR) = (u64)__data * NSEC_PER_USEC; \
- return count; \
-}
-USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
-#undef USEC_STORE_FUNCTION
-
-#define CFQ_ATTR(name) \
- __ATTR(name, 0644, cfq_##name##_show, cfq_##name##_store)
-
-static struct elv_fs_entry cfq_attrs[] = {
- CFQ_ATTR(quantum),
- CFQ_ATTR(fifo_expire_sync),
- CFQ_ATTR(fifo_expire_async),
- CFQ_ATTR(back_seek_max),
- CFQ_ATTR(back_seek_penalty),
- CFQ_ATTR(slice_sync),
- CFQ_ATTR(slice_sync_us),
- CFQ_ATTR(slice_async),
- CFQ_ATTR(slice_async_us),
- CFQ_ATTR(slice_async_rq),
- CFQ_ATTR(slice_idle),
- CFQ_ATTR(slice_idle_us),
- CFQ_ATTR(group_idle),
- CFQ_ATTR(group_idle_us),
- CFQ_ATTR(low_latency),
- CFQ_ATTR(target_latency),
- CFQ_ATTR(target_latency_us),
- __ATTR_NULL
-};
-
-static struct elevator_type iosched_cfq = {
- .ops.sq = {
- .elevator_merge_fn = cfq_merge,
- .elevator_merged_fn = cfq_merged_request,
- .elevator_merge_req_fn = cfq_merged_requests,
- .elevator_allow_bio_merge_fn = cfq_allow_bio_merge,
- .elevator_allow_rq_merge_fn = cfq_allow_rq_merge,
- .elevator_bio_merged_fn = cfq_bio_merged,
- .elevator_dispatch_fn = cfq_dispatch_requests,
- .elevator_add_req_fn = cfq_insert_request,
- .elevator_activate_req_fn = cfq_activate_request,
- .elevator_deactivate_req_fn = cfq_deactivate_request,
- .elevator_completed_req_fn = cfq_completed_request,
- .elevator_former_req_fn = elv_rb_former_request,
- .elevator_latter_req_fn = elv_rb_latter_request,
- .elevator_init_icq_fn = cfq_init_icq,
- .elevator_exit_icq_fn = cfq_exit_icq,
- .elevator_set_req_fn = cfq_set_request,
- .elevator_put_req_fn = cfq_put_request,
- .elevator_may_queue_fn = cfq_may_queue,
- .elevator_init_fn = cfq_init_queue,
- .elevator_exit_fn = cfq_exit_queue,
- .elevator_registered_fn = cfq_registered_queue,
- },
- .icq_size = sizeof(struct cfq_io_cq),
- .icq_align = __alignof__(struct cfq_io_cq),
- .elevator_attrs = cfq_attrs,
- .elevator_name = "cfq",
- .elevator_owner = THIS_MODULE,
-};
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkcg_policy blkcg_policy_cfq = {
- .dfl_cftypes = cfq_blkcg_files,
- .legacy_cftypes = cfq_blkcg_legacy_files,
-
- .cpd_alloc_fn = cfq_cpd_alloc,
- .cpd_init_fn = cfq_cpd_init,
- .cpd_free_fn = cfq_cpd_free,
- .cpd_bind_fn = cfq_cpd_bind,
-
- .pd_alloc_fn = cfq_pd_alloc,
- .pd_init_fn = cfq_pd_init,
- .pd_offline_fn = cfq_pd_offline,
- .pd_free_fn = cfq_pd_free,
- .pd_reset_stats_fn = cfq_pd_reset_stats,
-};
-#endif
-
-static int __init cfq_init(void)
-{
- int ret;
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- ret = blkcg_policy_register(&blkcg_policy_cfq);
- if (ret)
- return ret;
-#else
- cfq_group_idle = 0;
-#endif
-
- ret = -ENOMEM;
- cfq_pool = KMEM_CACHE(cfq_queue, 0);
- if (!cfq_pool)
- goto err_pol_unreg;
-
- ret = elv_register(&iosched_cfq);
- if (ret)
- goto err_free_pool;
-
- return 0;
-
-err_free_pool:
- kmem_cache_destroy(cfq_pool);
-err_pol_unreg:
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- blkcg_policy_unregister(&blkcg_policy_cfq);
-#endif
- return ret;
-}
-
-static void __exit cfq_exit(void)
-{
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- blkcg_policy_unregister(&blkcg_policy_cfq);
-#endif
- elv_unregister(&iosched_cfq);
- kmem_cache_destroy(cfq_pool);
-}
-
-module_init(cfq_init);
-module_exit(cfq_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
deleted file mode 100644
index ef2f1f0..0000000
--- a/block/deadline-iosched.c
+++ /dev/null
@@ -1,560 +0,0 @@
-/*
- * Deadline i/o scheduler.
- *
- * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/rbtree.h>
-
-/*
- * See Documentation/block/deadline-iosched.txt
- */
-static const int read_expire = HZ / 2; /* max time before a read is submitted. */
-static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-static const int writes_starved = 2; /* max times reads can starve a write */
-static const int fifo_batch = 16; /* # of sequential requests treated as one
- by the above parameters. For throughput. */
-
-struct deadline_data {
- /*
- * run time data
- */
-
- /*
- * requests (deadline_rq s) are present on both sort_list and fifo_list
- */
- struct rb_root sort_list[2];
- struct list_head fifo_list[2];
-
- /*
- * next in sort order. read, write or both are NULL
- */
- struct request *next_rq[2];
- unsigned int batching; /* number of sequential requests made */
- unsigned int starved; /* times reads have starved writes */
-
- /*
- * settings that change how the i/o scheduler behaves
- */
- int fifo_expire[2];
- int fifo_batch;
- int writes_starved;
- int front_merges;
-};
-
-static inline struct rb_root *
-deadline_rb_root(struct deadline_data *dd, struct request *rq)
-{
- return &dd->sort_list[rq_data_dir(rq)];
-}
-
-/*
- * get the request after `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_latter_request(struct request *rq)
-{
- struct rb_node *node = rb_next(&rq->rb_node);
-
- if (node)
- return rb_entry_rq(node);
-
- return NULL;
-}
-
-static void
-deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
-{
- struct rb_root *root = deadline_rb_root(dd, rq);
-
- elv_rb_add(root, rq);
-}
-
-static inline void
-deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
-{
- const int data_dir = rq_data_dir(rq);
-
- if (dd->next_rq[data_dir] == rq)
- dd->next_rq[data_dir] = deadline_latter_request(rq);
-
- elv_rb_del(deadline_rb_root(dd, rq), rq);
-}
-
-/*
- * add rq to rbtree and fifo
- */
-static void
-deadline_add_request(struct request_queue *q, struct request *rq)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- const int data_dir = rq_data_dir(rq);
-
- /*
- * This may be a requeue of a write request that has locked its
- * target zone. If it is the case, this releases the zone lock.
- */
- blk_req_zone_write_unlock(rq);
-
- deadline_add_rq_rb(dd, rq);
-
- /*
- * set expire time and add to fifo list
- */
- rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
- list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
-}
-
-/*
- * remove rq from rbtree and fifo.
- */
-static void deadline_remove_request(struct request_queue *q, struct request *rq)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- rq_fifo_clear(rq);
- deadline_del_rq_rb(dd, rq);
-}
-
-static enum elv_merge
-deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- struct request *__rq;
-
- /*
- * check for front merge
- */
- if (dd->front_merges) {
- sector_t sector = bio_end_sector(bio);
-
- __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
- if (__rq) {
- BUG_ON(sector != blk_rq_pos(__rq));
-
- if (elv_bio_merge_ok(__rq, bio)) {
- *req = __rq;
- return ELEVATOR_FRONT_MERGE;
- }
- }
- }
-
- return ELEVATOR_NO_MERGE;
-}
-
-static void deadline_merged_request(struct request_queue *q,
- struct request *req, enum elv_merge type)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- /*
- * if the merge was a front merge, we need to reposition request
- */
- if (type == ELEVATOR_FRONT_MERGE) {
- elv_rb_del(deadline_rb_root(dd, req), req);
- deadline_add_rq_rb(dd, req);
- }
-}
-
-static void
-deadline_merged_requests(struct request_queue *q, struct request *req,
- struct request *next)
-{
- /*
- * if next expires before rq, assign its expire time to rq
- * and move into next position (next will be deleted) in fifo
- */
- if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before((unsigned long)next->fifo_time,
- (unsigned long)req->fifo_time)) {
- list_move(&req->queuelist, &next->queuelist);
- req->fifo_time = next->fifo_time;
- }
- }
-
- /*
- * kill knowledge of next, this one is a goner
- */
- deadline_remove_request(q, next);
-}
-
-/*
- * move request from sort list to dispatch queue.
- */
-static inline void
-deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- /*
- * For a zoned block device, write requests must write lock their
- * target zone.
- */
- blk_req_zone_write_lock(rq);
-
- deadline_remove_request(q, rq);
- elv_dispatch_add_tail(q, rq);
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void
-deadline_move_request(struct deadline_data *dd, struct request *rq)
-{
- const int data_dir = rq_data_dir(rq);
-
- dd->next_rq[READ] = NULL;
- dd->next_rq[WRITE] = NULL;
- dd->next_rq[data_dir] = deadline_latter_request(rq);
-
- /*
- * take it off the sort and fifo list, move
- * to dispatch queue
- */
- deadline_move_to_dispatch(dd, rq);
-}
-
-/*
- * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
- * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
- */
-static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
-{
- struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
-
- /*
- * rq is expired!
- */
- if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
- return 1;
-
- return 0;
-}
-
-/*
- * For the specified data direction, return the next request to dispatch using
- * arrival ordered lists.
- */
-static struct request *
-deadline_fifo_request(struct deadline_data *dd, int data_dir)
-{
- struct request *rq;
-
- if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
- return NULL;
-
- if (list_empty(&dd->fifo_list[data_dir]))
- return NULL;
-
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
- if (data_dir == READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone.
- */
- list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
- if (blk_req_can_dispatch_to_zone(rq))
- return rq;
- }
-
- return NULL;
-}
-
-/*
- * For the specified data direction, return the next request to dispatch using
- * sector position sorted lists.
- */
-static struct request *
-deadline_next_request(struct deadline_data *dd, int data_dir)
-{
- struct request *rq;
-
- if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
- return NULL;
-
- rq = dd->next_rq[data_dir];
- if (!rq)
- return NULL;
-
- if (data_dir == READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone.
- */
- while (rq) {
- if (blk_req_can_dispatch_to_zone(rq))
- return rq;
- rq = deadline_latter_request(rq);
- }
-
- return NULL;
-}
-
-/*
- * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc
- */
-static int deadline_dispatch_requests(struct request_queue *q, int force)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- const int reads = !list_empty(&dd->fifo_list[READ]);
- const int writes = !list_empty(&dd->fifo_list[WRITE]);
- struct request *rq, *next_rq;
- int data_dir;
-
- /*
- * batches are currently reads XOR writes
- */
- rq = deadline_next_request(dd, WRITE);
- if (!rq)
- rq = deadline_next_request(dd, READ);
-
- if (rq && dd->batching < dd->fifo_batch)
- /* we have a next request are still entitled to batch */
- goto dispatch_request;
-
- /*
- * at this point we are not running a batch. select the appropriate
- * data direction (read / write)
- */
-
- if (reads) {
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
-
- if (deadline_fifo_request(dd, WRITE) &&
- (dd->starved++ >= dd->writes_starved))
- goto dispatch_writes;
-
- data_dir = READ;
-
- goto dispatch_find_request;
- }
-
- /*
- * there are either no reads or writes have been starved
- */
-
- if (writes) {
-dispatch_writes:
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
-
- dd->starved = 0;
-
- data_dir = WRITE;
-
- goto dispatch_find_request;
- }
-
- return 0;
-
-dispatch_find_request:
- /*
- * we are not running a batch, find best request for selected data_dir
- */
- next_rq = deadline_next_request(dd, data_dir);
- if (deadline_check_fifo(dd, data_dir) || !next_rq) {
- /*
- * A deadline has expired, the last request was in the other
- * direction, or we have run out of higher-sectored requests.
- * Start again from the request with the earliest expiry time.
- */
- rq = deadline_fifo_request(dd, data_dir);
- } else {
- /*
- * The last req was the same dir and we have a next request in
- * sort order. No expired requests so continue on from here.
- */
- rq = next_rq;
- }
-
- /*
- * For a zoned block device, if we only have writes queued and none of
- * them can be dispatched, rq will be NULL.
- */
- if (!rq)
- return 0;
-
- dd->batching = 0;
-
-dispatch_request:
- /*
- * rq is the selected appropriate request.
- */
- dd->batching++;
- deadline_move_request(dd, rq);
-
- return 1;
-}
-
-/*
- * For zoned block devices, write unlock the target zone of completed
- * write requests.
- */
-static void
-deadline_completed_request(struct request_queue *q, struct request *rq)
-{
- blk_req_zone_write_unlock(rq);
-}
-
-static void deadline_exit_queue(struct elevator_queue *e)
-{
- struct deadline_data *dd = e->elevator_data;
-
- BUG_ON(!list_empty(&dd->fifo_list[READ]));
- BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
-
- kfree(dd);
-}
-
-/*
- * initialize elevator private data (deadline_data).
- */
-static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct deadline_data *dd;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
- if (!dd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = dd;
-
- INIT_LIST_HEAD(&dd->fifo_list[READ]);
- INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
- dd->sort_list[READ] = RB_ROOT;
- dd->sort_list[WRITE] = RB_ROOT;
- dd->fifo_expire[READ] = read_expire;
- dd->fifo_expire[WRITE] = write_expire;
- dd->writes_starved = writes_starved;
- dd->front_merges = 1;
- dd->fifo_batch = fifo_batch;
-
- spin_lock_irq(q->queue_lock);
- q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-/*
- * sysfs parts below
- */
-
-static ssize_t
-deadline_var_show(int var, char *page)
-{
- return sprintf(page, "%d\n", var);
-}
-
-static void
-deadline_var_store(int *var, const char *page)
-{
- char *p = (char *) page;
-
- *var = simple_strtol(p, &p, 10);
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct deadline_data *dd = e->elevator_data; \
- int __data = __VAR; \
- if (__CONV) \
- __data = jiffies_to_msecs(__data); \
- return deadline_var_show(__data, (page)); \
-}
-SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
-SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
-SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
-SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
-SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
-#undef SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct deadline_data *dd = e->elevator_data; \
- int __data; \
- deadline_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
- if (__CONV) \
- *(__PTR) = msecs_to_jiffies(__data); \
- else \
- *(__PTR) = __data; \
- return count; \
-}
-STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
-STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
-STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
-#undef STORE_FUNCTION
-
-#define DD_ATTR(name) \
- __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
-
-static struct elv_fs_entry deadline_attrs[] = {
- DD_ATTR(read_expire),
- DD_ATTR(write_expire),
- DD_ATTR(writes_starved),
- DD_ATTR(front_merges),
- DD_ATTR(fifo_batch),
- __ATTR_NULL
-};
-
-static struct elevator_type iosched_deadline = {
- .ops.sq = {
- .elevator_merge_fn = deadline_merge,
- .elevator_merged_fn = deadline_merged_request,
- .elevator_merge_req_fn = deadline_merged_requests,
- .elevator_dispatch_fn = deadline_dispatch_requests,
- .elevator_completed_req_fn = deadline_completed_request,
- .elevator_add_req_fn = deadline_add_request,
- .elevator_former_req_fn = elv_rb_former_request,
- .elevator_latter_req_fn = elv_rb_latter_request,
- .elevator_init_fn = deadline_init_queue,
- .elevator_exit_fn = deadline_exit_queue,
- },
-
- .elevator_attrs = deadline_attrs,
- .elevator_name = "deadline",
- .elevator_owner = THIS_MODULE,
-};
-
-static int __init deadline_init(void)
-{
- return elv_register(&iosched_deadline);
-}
-
-static void __exit deadline_exit(void)
-{
- elv_unregister(&iosched_deadline);
-}
-
-module_init(deadline_init);
-module_exit(deadline_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/block/elevator.c b/block/elevator.c
index fae58b2..076ba73 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block device elevator/IO-scheduler.
*
@@ -41,6 +42,7 @@
#include "blk.h"
#include "blk-mq-sched.h"
+#include "blk-pm.h"
#include "blk-wbt.h"
static DEFINE_SPINLOCK(elv_list_lock);
@@ -60,10 +62,8 @@
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.allow_merge)
- return e->type->ops.mq.allow_merge(q, rq, bio);
- else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
- return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
+ if (e->type->ops.allow_merge)
+ return e->type->ops.allow_merge(q, rq, bio);
return 1;
}
@@ -83,8 +83,26 @@
}
EXPORT_SYMBOL(elv_bio_merge_ok);
-static bool elevator_match(const struct elevator_type *e, const char *name)
+static inline bool elv_support_features(unsigned int elv_features,
+ unsigned int required_features)
{
+ return (required_features & elv_features) == required_features;
+}
+
+/**
+ * elevator_match - Test an elevator name and features
+ * @e: Scheduler to test
+ * @name: Elevator name to test
+ * @required_features: Features that the elevator must provide
+ *
+ * Return true is the elevator @e name matches @name and if @e provides all the
+ * the feratures spcified by @required_features.
+ */
+static bool elevator_match(const struct elevator_type *e, const char *name,
+ unsigned int required_features)
+{
+ if (!elv_support_features(e->elevator_features, required_features))
+ return false;
if (!strcmp(e->elevator_name, name))
return true;
if (e->elevator_alias && !strcmp(e->elevator_alias, name))
@@ -93,15 +111,21 @@
return false;
}
-/*
- * Return scheduler with name 'name' and with matching 'mq capability
+/**
+ * elevator_find - Find an elevator
+ * @name: Name of the elevator to find
+ * @required_features: Features that the elevator must provide
+ *
+ * Return the first registered scheduler with name @name and supporting the
+ * features @required_features and NULL otherwise.
*/
-static struct elevator_type *elevator_find(const char *name, bool mq)
+static struct elevator_type *elevator_find(const char *name,
+ unsigned int required_features)
{
struct elevator_type *e;
list_for_each_entry(e, &elv_list, list) {
- if (elevator_match(e, name) && (mq == e->uses_mq))
+ if (elevator_match(e, name, required_features))
return e;
}
@@ -120,12 +144,12 @@
spin_lock(&elv_list_lock);
- e = elevator_find(name, q->mq_ops != NULL);
+ e = elevator_find(name, q->required_elevator_features);
if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
- e = elevator_find(name, q->mq_ops != NULL);
+ e = elevator_find(name, q->required_elevator_features);
}
if (e && !try_module_get(e->elevator_owner))
@@ -135,40 +159,6 @@
return e;
}
-static char chosen_elevator[ELV_NAME_MAX];
-
-static int __init elevator_setup(char *str)
-{
- /*
- * Be backwards-compatible with previous kernels, so users
- * won't get the wrong elevator.
- */
- strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
- return 1;
-}
-
-__setup("elevator=", elevator_setup);
-
-/* called during boot to load the elevator chosen by the elevator param */
-void __init load_default_elevator_module(void)
-{
- struct elevator_type *e;
-
- if (!chosen_elevator[0])
- return;
-
- /*
- * Boot parameter is deprecated, we haven't supported that for MQ.
- * Only look for non-mq schedulers from here.
- */
- spin_lock(&elv_list_lock);
- e = elevator_find(chosen_elevator, false);
- spin_unlock(&elv_list_lock);
-
- if (!e)
- request_module("%s-iosched", chosen_elevator);
-}
-
static struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
@@ -184,7 +174,6 @@
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
- eq->uses_mq = e->uses_mq;
return eq;
}
@@ -199,54 +188,11 @@
kfree(e);
}
-/*
- * Use the default elevator specified by config boot param for non-mq devices,
- * or by config option. Don't try to load modules as we could be running off
- * async and request_module() isn't allowed from async.
- */
-int elevator_init(struct request_queue *q)
-{
- struct elevator_type *e = NULL;
- int err = 0;
-
- /*
- * q->sysfs_lock must be held to provide mutual exclusion between
- * elevator_switch() and here.
- */
- mutex_lock(&q->sysfs_lock);
- if (unlikely(q->elevator))
- goto out_unlock;
-
- if (*chosen_elevator) {
- e = elevator_get(q, chosen_elevator, false);
- if (!e)
- printk(KERN_ERR "I/O scheduler %s not found\n",
- chosen_elevator);
- }
-
- if (!e)
- e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
- if (!e) {
- printk(KERN_ERR
- "Default I/O scheduler not found. Using noop.\n");
- e = elevator_get(q, "noop", false);
- }
-
- err = e->ops.sq.elevator_init_fn(q, e);
- if (err)
- elevator_put(e);
-out_unlock:
- mutex_unlock(&q->sysfs_lock);
- return err;
-}
-
-void elevator_exit(struct request_queue *q, struct elevator_queue *e)
+void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
- if (e->uses_mq && e->type->ops.mq.exit_sched)
+ if (e->type->ops.exit_sched)
blk_mq_exit_sched(q, e);
- else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
- e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
kobject_put(&e->kobj);
@@ -355,68 +301,6 @@
}
EXPORT_SYMBOL(elv_rb_find);
-/*
- * Insert rq into dispatch queue of q. Queue lock must be held on
- * entry. rq is sort instead into the dispatch queue. To be used by
- * specific elevators.
- */
-void elv_dispatch_sort(struct request_queue *q, struct request *rq)
-{
- sector_t boundary;
- struct list_head *entry;
-
- if (q->last_merge == rq)
- q->last_merge = NULL;
-
- elv_rqhash_del(q, rq);
-
- q->nr_sorted--;
-
- boundary = q->end_sector;
- list_for_each_prev(entry, &q->queue_head) {
- struct request *pos = list_entry_rq(entry);
-
- if (req_op(rq) != req_op(pos))
- break;
- if (rq_data_dir(rq) != rq_data_dir(pos))
- break;
- if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
- break;
- if (blk_rq_pos(rq) >= boundary) {
- if (blk_rq_pos(pos) < boundary)
- continue;
- } else {
- if (blk_rq_pos(pos) >= boundary)
- break;
- }
- if (blk_rq_pos(rq) >= blk_rq_pos(pos))
- break;
- }
-
- list_add(&rq->queuelist, entry);
-}
-EXPORT_SYMBOL(elv_dispatch_sort);
-
-/*
- * Insert rq into dispatch queue of q. Queue lock must be held on
- * entry. rq is added to the back of the dispatch queue. To be used by
- * specific elevators.
- */
-void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
-{
- if (q->last_merge == rq)
- q->last_merge = NULL;
-
- elv_rqhash_del(q, rq);
-
- q->nr_sorted--;
-
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = rq;
- list_add_tail(&rq->queuelist, &q->queue_head);
-}
-EXPORT_SYMBOL(elv_dispatch_add_tail);
-
enum elv_merge elv_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
@@ -456,10 +340,8 @@
return ELEVATOR_BACK_MERGE;
}
- if (e->uses_mq && e->type->ops.mq.request_merge)
- return e->type->ops.mq.request_merge(q, req, bio);
- else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
- return e->type->ops.sq.elevator_merge_fn(q, req, bio);
+ if (e->type->ops.request_merge)
+ return e->type->ops.request_merge(q, req, bio);
return ELEVATOR_NO_MERGE;
}
@@ -510,10 +392,8 @@
{
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.request_merged)
- e->type->ops.mq.request_merged(q, rq, type);
- else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
- e->type->ops.sq.elevator_merged_fn(q, rq, type);
+ if (e->type->ops.request_merged)
+ e->type->ops.request_merged(q, rq, type);
if (type == ELEVATOR_BACK_MERGE)
elv_rqhash_reposition(q, rq);
@@ -525,197 +405,20 @@
struct request *next)
{
struct elevator_queue *e = q->elevator;
- bool next_sorted = false;
- if (e->uses_mq && e->type->ops.mq.requests_merged)
- e->type->ops.mq.requests_merged(q, rq, next);
- else if (e->type->ops.sq.elevator_merge_req_fn) {
- next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
- if (next_sorted)
- e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
- }
+ if (e->type->ops.requests_merged)
+ e->type->ops.requests_merged(q, rq, next);
elv_rqhash_reposition(q, rq);
-
- if (next_sorted) {
- elv_rqhash_del(q, next);
- q->nr_sorted--;
- }
-
q->last_merge = rq;
}
-void elv_bio_merged(struct request_queue *q, struct request *rq,
- struct bio *bio)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- if (e->type->ops.sq.elevator_bio_merged_fn)
- e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
-}
-
-#ifdef CONFIG_PM
-static void blk_pm_requeue_request(struct request *rq)
-{
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- rq->q->nr_pending--;
-}
-
-static void blk_pm_add_request(struct request_queue *q, struct request *rq)
-{
- if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
- (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
- pm_request_resume(q->dev);
-}
-#else
-static inline void blk_pm_requeue_request(struct request *rq) {}
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
-}
-#endif
-
-void elv_requeue_request(struct request_queue *q, struct request *rq)
-{
- /*
- * it already went through dequeue, we need to decrement the
- * in_flight count again
- */
- if (blk_account_rq(rq)) {
- q->in_flight[rq_is_sync(rq)]--;
- if (rq->rq_flags & RQF_SORTED)
- elv_deactivate_rq(q, rq);
- }
-
- rq->rq_flags &= ~RQF_STARTED;
-
- blk_pm_requeue_request(rq);
-
- __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
-}
-
-void elv_drain_elevator(struct request_queue *q)
-{
- struct elevator_queue *e = q->elevator;
- static int printed;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- lockdep_assert_held(q->queue_lock);
-
- while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
- ;
- if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
- printk(KERN_ERR "%s: forced dispatching is broken "
- "(nr_sorted=%u), please report this\n",
- q->elevator->type->elevator_name, q->nr_sorted);
- }
-}
-
-void __elv_add_request(struct request_queue *q, struct request *rq, int where)
-{
- trace_block_rq_insert(q, rq);
-
- blk_pm_add_request(q, rq);
-
- rq->q = q;
-
- if (rq->rq_flags & RQF_SOFTBARRIER) {
- /* barriers are scheduling boundary, update end_sector */
- if (!blk_rq_is_passthrough(rq)) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = rq;
- }
- } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
- (where == ELEVATOR_INSERT_SORT ||
- where == ELEVATOR_INSERT_SORT_MERGE))
- where = ELEVATOR_INSERT_BACK;
-
- switch (where) {
- case ELEVATOR_INSERT_REQUEUE:
- case ELEVATOR_INSERT_FRONT:
- rq->rq_flags |= RQF_SOFTBARRIER;
- list_add(&rq->queuelist, &q->queue_head);
- break;
-
- case ELEVATOR_INSERT_BACK:
- rq->rq_flags |= RQF_SOFTBARRIER;
- elv_drain_elevator(q);
- list_add_tail(&rq->queuelist, &q->queue_head);
- /*
- * We kick the queue here for the following reasons.
- * - The elevator might have returned NULL previously
- * to delay requests and returned them now. As the
- * queue wasn't empty before this request, ll_rw_blk
- * won't run the queue on return, resulting in hang.
- * - Usually, back inserted requests won't be merged
- * with anything. There's no point in delaying queue
- * processing.
- */
- __blk_run_queue(q);
- break;
-
- case ELEVATOR_INSERT_SORT_MERGE:
- /*
- * If we succeed in merging this request with one in the
- * queue already, we are done - rq has now been freed,
- * so no need to do anything further.
- */
- if (elv_attempt_insert_merge(q, rq))
- break;
- /* fall through */
- case ELEVATOR_INSERT_SORT:
- BUG_ON(blk_rq_is_passthrough(rq));
- rq->rq_flags |= RQF_SORTED;
- q->nr_sorted++;
- if (rq_mergeable(rq)) {
- elv_rqhash_add(q, rq);
- if (!q->last_merge)
- q->last_merge = rq;
- }
-
- /*
- * Some ioscheds (cfq) run q->request_fn directly, so
- * rq cannot be accessed after calling
- * elevator_add_req_fn.
- */
- q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
- break;
-
- case ELEVATOR_INSERT_FLUSH:
- rq->rq_flags |= RQF_SOFTBARRIER;
- blk_insert_flush(rq);
- break;
- default:
- printk(KERN_ERR "%s: bad insertion point %d\n",
- __func__, where);
- BUG();
- }
-}
-EXPORT_SYMBOL(__elv_add_request);
-
-void elv_add_request(struct request_queue *q, struct request *rq, int where)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- __elv_add_request(q, rq, where);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(elv_add_request);
-
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.next_request)
- return e->type->ops.mq.next_request(q, rq);
- else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
- return e->type->ops.sq.elevator_latter_req_fn(q, rq);
+ if (e->type->ops.next_request)
+ return e->type->ops.next_request(q, rq);
return NULL;
}
@@ -724,68 +427,12 @@
{
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.former_request)
- return e->type->ops.mq.former_request(q, rq);
- if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
- return e->type->ops.sq.elevator_former_req_fn(q, rq);
+ if (e->type->ops.former_request)
+ return e->type->ops.former_request(q, rq);
+
return NULL;
}
-int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return 0;
-
- if (e->type->ops.sq.elevator_set_req_fn)
- return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
- return 0;
-}
-
-void elv_put_request(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- if (e->type->ops.sq.elevator_put_req_fn)
- e->type->ops.sq.elevator_put_req_fn(rq);
-}
-
-int elv_may_queue(struct request_queue *q, unsigned int op)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return 0;
-
- if (e->type->ops.sq.elevator_may_queue_fn)
- return e->type->ops.sq.elevator_may_queue_fn(q, op);
-
- return ELV_MQUEUE_MAY;
-}
-
-void elv_completed_request(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- /*
- * request is released from the driver, io must be done
- */
- if (blk_account_rq(rq)) {
- q->in_flight[rq_is_sync(rq)]--;
- if ((rq->rq_flags & RQF_SORTED) &&
- e->type->ops.sq.elevator_completed_req_fn)
- e->type->ops.sq.elevator_completed_req_fn(q, rq);
- }
-}
-
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
static ssize_t
@@ -833,13 +480,16 @@
.release = elevator_release,
};
-int elv_register_queue(struct request_queue *q)
+/*
+ * elv_register_queue is called from either blk_register_queue or
+ * elevator_switch, elevator switch is prevented from being happen
+ * in the two paths, so it is safe to not hold q->sysfs_lock.
+ */
+int elv_register_queue(struct request_queue *q, bool uevent)
{
struct elevator_queue *e = q->elevator;
int error;
- lockdep_assert_held(&q->sysfs_lock);
-
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
if (!error) {
struct elv_fs_entry *attr = e->type->elevator_attrs;
@@ -850,23 +500,27 @@
attr++;
}
}
- kobject_uevent(&e->kobj, KOBJ_ADD);
+ if (uevent)
+ kobject_uevent(&e->kobj, KOBJ_ADD);
+
e->registered = 1;
- if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
- e->type->ops.sq.elevator_registered_fn(q);
}
return error;
}
+/*
+ * elv_unregister_queue is called from either blk_unregister_queue or
+ * elevator_switch, elevator switch is prevented from being happen
+ * in the two paths, so it is safe to not hold q->sysfs_lock.
+ */
void elv_unregister_queue(struct request_queue *q)
{
- lockdep_assert_held(&q->sysfs_lock);
-
if (q) {
struct elevator_queue *e = q->elevator;
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
+
e->registered = 0;
/* Re-enable throttling in case elevator disabled it */
wbt_enable_default(q);
@@ -875,8 +529,6 @@
int elv_register(struct elevator_type *e)
{
- char *def = "";
-
/* create icq_cache if requested */
if (e->icq_size) {
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
@@ -893,7 +545,7 @@
/* register, don't allow duplicate names */
spin_lock(&elv_list_lock);
- if (elevator_find(e->elevator_name, e->uses_mq)) {
+ if (elevator_find(e->elevator_name, 0)) {
spin_unlock(&elv_list_lock);
kmem_cache_destroy(e->icq_cache);
return -EBUSY;
@@ -901,14 +553,8 @@
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
- /* print pretty message */
- if (elevator_match(e, chosen_elevator) ||
- (!*chosen_elevator &&
- elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
- def = " (default)";
+ printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
- printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
- def);
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
@@ -942,6 +588,7 @@
if (q->elevator) {
if (q->elevator->registered)
elv_unregister_queue(q);
+
ioc_clear_queue(q);
elevator_exit(q, q->elevator);
}
@@ -951,7 +598,7 @@
goto out;
if (new_e) {
- ret = elv_register_queue(q);
+ ret = elv_register_queue(q, true);
if (ret) {
elevator_exit(q, q->elevator);
goto out;
@@ -967,37 +614,90 @@
return ret;
}
+static inline bool elv_support_iosched(struct request_queue *q)
+{
+ if (!q->mq_ops ||
+ (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
+ return false;
+ return true;
+}
+
/*
- * For blk-mq devices, we default to using mq-deadline, if available, for single
- * queue devices. If deadline isn't available OR we have multiple queues,
- * default to "none".
+ * For single queue devices, default to using mq-deadline. If we have multiple
+ * queues or mq-deadline is not available, default to "none".
*/
-int elevator_init_mq(struct request_queue *q)
+static struct elevator_type *elevator_get_default(struct request_queue *q)
+{
+ if (q->nr_hw_queues != 1)
+ return NULL;
+
+ return elevator_get(q, "mq-deadline", false);
+}
+
+/*
+ * Get the first elevator providing the features required by the request queue.
+ * Default to "none" if no matching elevator is found.
+ */
+static struct elevator_type *elevator_get_by_features(struct request_queue *q)
+{
+ struct elevator_type *e, *found = NULL;
+
+ spin_lock(&elv_list_lock);
+
+ list_for_each_entry(e, &elv_list, list) {
+ if (elv_support_features(e->elevator_features,
+ q->required_elevator_features)) {
+ found = e;
+ break;
+ }
+ }
+
+ if (found && !try_module_get(found->elevator_owner))
+ found = NULL;
+
+ spin_unlock(&elv_list_lock);
+ return found;
+}
+
+/*
+ * For a device queue that has no required features, use the default elevator
+ * settings. Otherwise, use the first elevator available matching the required
+ * features. If no suitable elevator is find or if the chosen elevator
+ * initialization fails, fall back to the "none" elevator (no elevator).
+ */
+void elevator_init_mq(struct request_queue *q)
{
struct elevator_type *e;
- int err = 0;
+ int err;
- if (q->nr_hw_queues != 1)
- return 0;
+ if (!elv_support_iosched(q))
+ return;
- /*
- * q->sysfs_lock must be held to provide mutual exclusion between
- * elevator_switch() and here.
- */
- mutex_lock(&q->sysfs_lock);
+ WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags));
+
if (unlikely(q->elevator))
- goto out_unlock;
+ return;
- e = elevator_get(q, "mq-deadline", false);
+ if (!q->required_elevator_features)
+ e = elevator_get_default(q);
+ else
+ e = elevator_get_by_features(q);
if (!e)
- goto out_unlock;
+ return;
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
err = blk_mq_init_sched(q, e);
- if (err)
+
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
+
+ if (err) {
+ pr_warn("\"%s\" elevator initialization failed, "
+ "falling back to \"none\"\n", e->elevator_name);
elevator_put(e);
-out_unlock:
- mutex_unlock(&q->sysfs_lock);
- return err;
+ }
}
@@ -1009,71 +709,17 @@
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- struct elevator_queue *old = q->elevator;
- bool old_registered = false;
int err;
lockdep_assert_held(&q->sysfs_lock);
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
- err = elevator_switch_mq(q, new_e);
+ err = elevator_switch_mq(q, new_e);
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
-
- return err;
- }
-
- /*
- * Turn on BYPASS and drain all requests w/ elevator private data.
- * Block layer doesn't call into a quiesced elevator - all requests
- * are directly put on the dispatch list without elevator data
- * using INSERT_BACK. All requests have SOFTBARRIER set and no
- * merge happens either.
- */
- if (old) {
- old_registered = old->registered;
-
- blk_queue_bypass_start(q);
-
- /* unregister and clear all auxiliary data of the old elevator */
- if (old_registered)
- elv_unregister_queue(q);
-
- ioc_clear_queue(q);
- }
-
- /* allocate, init and register new elevator */
- err = new_e->ops.sq.elevator_init_fn(q, new_e);
- if (err)
- goto fail_init;
-
- err = elv_register_queue(q);
- if (err)
- goto fail_register;
-
- /* done, kill the old one and finish */
- if (old) {
- elevator_exit(q, old);
- blk_queue_bypass_end(q);
- }
-
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
-
- return 0;
-
-fail_register:
- elevator_exit(q, q->elevator);
-fail_init:
- /* switch failed, restore and re-register old elevator */
- if (old) {
- q->elevator = old;
- elv_register_queue(q);
- blk_queue_bypass_end(q);
- }
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
return err;
}
@@ -1087,21 +733,25 @@
struct elevator_type *e;
/* Make sure queue is not in the middle of being removed */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ if (!blk_queue_registered(q))
return -ENOENT;
/*
* Special case for mq, turn off scheduling
*/
- if (q->mq_ops && !strncmp(name, "none", 4))
+ if (!strncmp(name, "none", 4)) {
+ if (!q->elevator)
+ return 0;
return elevator_switch(q, NULL);
+ }
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(q, strstrip(elevator_name), true);
if (!e)
return -EINVAL;
- if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
+ if (q->elevator &&
+ elevator_match(q->elevator->type, elevator_name, 0)) {
elevator_put(e);
return 0;
}
@@ -1109,20 +759,12 @@
return elevator_switch(q, e);
}
-static inline bool elv_support_iosched(struct request_queue *q)
-{
- if (q->mq_ops && q->tag_set && (q->tag_set->flags &
- BLK_MQ_F_NO_SCHED))
- return false;
- return true;
-}
-
ssize_t elv_iosched_store(struct request_queue *q, const char *name,
size_t count)
{
int ret;
- if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
+ if (!queue_is_mq(q) || !elv_support_iosched(q))
return count;
ret = __elevator_change(q, name);
@@ -1137,10 +779,9 @@
struct elevator_queue *e = q->elevator;
struct elevator_type *elv = NULL;
struct elevator_type *__e;
- bool uses_mq = q->mq_ops != NULL;
int len = 0;
- if (!queue_is_rq_based(q))
+ if (!queue_is_mq(q))
return sprintf(name, "none\n");
if (!q->elevator)
@@ -1150,19 +791,18 @@
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
- if (elv && elevator_match(elv, __e->elevator_name) &&
- (__e->uses_mq == uses_mq)) {
+ if (elv && elevator_match(elv, __e->elevator_name, 0)) {
len += sprintf(name+len, "[%s] ", elv->elevator_name);
continue;
}
- if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
- len += sprintf(name+len, "%s ", __e->elevator_name);
- else if (!__e->uses_mq && !q->mq_ops)
+ if (elv_support_iosched(q) &&
+ elevator_match(__e, __e->elevator_name,
+ q->required_elevator_features))
len += sprintf(name+len, "%s ", __e->elevator_name);
}
spin_unlock(&elv_list_lock);
- if (q->mq_ops && q->elevator)
+ if (q->elevator)
len += sprintf(name+len, "none");
len += sprintf(len+name, "\n");
diff --git a/block/genhd.c b/block/genhd.c
index be5bab2..26b31fc 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gendisk handling
*/
@@ -47,51 +48,64 @@
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
- if (q->mq_ops)
+ if (queue_is_mq(q))
return;
- atomic_inc(&part->in_flight[rw]);
+ part_stat_local_inc(part, in_flight[rw]);
if (part->partno)
- atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
+ part_stat_local_inc(&part_to_disk(part)->part0, in_flight[rw]);
}
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
- if (q->mq_ops)
+ if (queue_is_mq(q))
return;
- atomic_dec(&part->in_flight[rw]);
+ part_stat_local_dec(part, in_flight[rw]);
if (part->partno)
- atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
+ part_stat_local_dec(&part_to_disk(part)->part0, in_flight[rw]);
}
-void part_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2])
+unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part)
{
- if (q->mq_ops) {
- blk_mq_in_flight(q, part, inflight);
- return;
+ int cpu;
+ unsigned int inflight;
+
+ if (queue_is_mq(q)) {
+ return blk_mq_in_flight(q, part);
}
- inflight[0] = atomic_read(&part->in_flight[0]) +
- atomic_read(&part->in_flight[1]);
- if (part->partno) {
- part = &part_to_disk(part)->part0;
- inflight[1] = atomic_read(&part->in_flight[0]) +
- atomic_read(&part->in_flight[1]);
+ inflight = 0;
+ for_each_possible_cpu(cpu) {
+ inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
+ part_stat_local_read_cpu(part, in_flight[1], cpu);
}
+ if ((int)inflight < 0)
+ inflight = 0;
+
+ return inflight;
}
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
- if (q->mq_ops) {
+ int cpu;
+
+ if (queue_is_mq(q)) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}
- inflight[0] = atomic_read(&part->in_flight[0]);
- inflight[1] = atomic_read(&part->in_flight[1]);
+ inflight[0] = 0;
+ inflight[1] = 0;
+ for_each_possible_cpu(cpu) {
+ inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
+ inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+ if ((int)inflight[0] < 0)
+ inflight[0] = 0;
+ if ((int)inflight[1] < 0)
+ inflight[1] = 0;
}
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
@@ -352,8 +366,8 @@
}
if (index == 0) {
- printk("register_blkdev: failed to get major for %s\n",
- name);
+ printk("%s: failed to get major for %s\n",
+ __func__, name);
ret = -EBUSY;
goto out;
}
@@ -362,8 +376,8 @@
}
if (major >= BLKDEV_MAJOR_MAX) {
- pr_err("register_blkdev: major requested (%u) is greater than the maximum (%u) for %s\n",
- major, BLKDEV_MAJOR_MAX-1, name);
+ pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
+ __func__, major, BLKDEV_MAJOR_MAX-1, name);
ret = -EINVAL;
goto out;
@@ -518,6 +532,18 @@
}
}
+/*
+ * We invalidate devt by assigning NULL pointer for devt in idr.
+ */
+void blk_invalidate_devt(dev_t devt)
+{
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+ spin_lock_bh(&ext_devt_lock);
+ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
+ spin_unlock_bh(&ext_devt_lock);
+ }
+}
+
static char *bdevt_str(dev_t devt, char *buf)
{
if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
@@ -567,7 +593,8 @@
return 0;
}
-static void register_disk(struct device *parent, struct gendisk *disk)
+static void register_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
@@ -582,6 +609,10 @@
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
+ if (groups) {
+ WARN_ON(ddev->groups);
+ ddev->groups = groups;
+ }
if (device_add(ddev))
return;
if (!sysfs_deprecated) {
@@ -637,16 +668,19 @@
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
disk_part_iter_exit(&piter);
- err = sysfs_create_link(&ddev->kobj,
- &disk->queue->backing_dev_info->dev->kobj,
- "bdi");
- WARN_ON(err);
+ if (disk->queue->backing_dev_info->dev) {
+ err = sysfs_create_link(&ddev->kobj,
+ &disk->queue->backing_dev_info->dev->kobj,
+ "bdi");
+ WARN_ON(err);
+ }
}
/**
* __device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
+ * @groups: Additional per-device sysfs groups
* @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
@@ -655,11 +689,21 @@
* FIXME: error handling
*/
static void __device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
bool register_queue)
{
dev_t devt;
int retval;
+ /*
+ * The disk queue should now be all set with enough information about
+ * the device for the elevator code to pick an adequate default
+ * elevator if one is needed, that is, for devices requesting queue
+ * registration.
+ */
+ if (register_queue)
+ elevator_init_mq(disk->queue);
+
/* minors == 0 indicates to use ext devt from part0 and should
* be accompanied with EXT_DEVT flag. Make sure all
* parameters make sense.
@@ -698,7 +742,7 @@
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
}
- register_disk(parent, disk);
+ register_disk(parent, disk, groups);
if (register_queue)
blk_register_queue(disk);
@@ -712,15 +756,17 @@
blk_integrity_add(disk);
}
-void device_add_disk(struct device *parent, struct gendisk *disk)
+void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
+
{
- __device_add_disk(parent, disk, true);
+ __device_add_disk(parent, disk, groups, true);
}
EXPORT_SYMBOL(device_add_disk);
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
{
- __device_add_disk(parent, disk, false);
+ __device_add_disk(parent, disk, NULL, false);
}
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
@@ -769,6 +815,13 @@
if (!(disk->flags & GENHD_FL_HIDDEN))
blk_unregister_region(disk_devt(disk), disk->minors);
+ /*
+ * Remove gendisk pointer from idr so that it cannot be looked up
+ * while RCU period before freeing gendisk is running to prevent
+ * use-after-free issues. Note that the device number stays
+ * "in-use" until we really free the gendisk.
+ */
+ blk_invalidate_devt(disk_devt(disk));
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
@@ -1237,7 +1290,6 @@
struct disk_part_tbl *new_ptbl;
int len = old_ptbl ? old_ptbl->len : 0;
int i, target;
- size_t size;
/*
* check for int overflow, since we can get here from blkpg_ioctl()
@@ -1254,8 +1306,8 @@
if (target <= len)
return 0;
- size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
- new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
+ new_ptbl = kzalloc_node(struct_size(new_ptbl, part, target), GFP_KERNEL,
+ disk->node_id);
if (!new_ptbl)
return -ENOMEM;
@@ -1316,8 +1368,7 @@
struct disk_part_iter piter;
struct hd_struct *hd;
char buf[BDEVNAME_SIZE];
- unsigned int inflight[2];
- int cpu;
+ unsigned int inflight;
/*
if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
@@ -1329,10 +1380,7 @@
disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
while ((hd = disk_part_iter_next(&piter))) {
- cpu = part_stat_lock();
- part_round_stats(gp->queue, cpu, hd);
- part_stat_unlock();
- part_in_flight(gp->queue, hd, inflight);
+ inflight = part_in_flight(gp->queue, hd);
seq_printf(seqf, "%4d %7d %s "
"%lu %lu %lu %u "
"%lu %lu %lu %u "
@@ -1348,7 +1396,7 @@
part_stat_read(hd, merges[STAT_WRITE]),
part_stat_read(hd, sectors[STAT_WRITE]),
(unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
- inflight[0],
+ inflight,
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
part_stat_read(hd, ios[STAT_DISCARD]),
@@ -1608,12 +1656,11 @@
/*
* If device-specific poll interval is set, always use it. If
- * the default is being used, poll iff there are events which
- * can't be monitored asynchronously.
+ * the default is being used, poll if the POLL flag is set.
*/
if (ev->poll_msecs >= 0)
intv_msecs = ev->poll_msecs;
- else if (disk->events & ~disk->async_events)
+ else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
intv_msecs = disk_events_dfl_poll_msecs;
return msecs_to_jiffies(intv_msecs);
@@ -1823,11 +1870,13 @@
/*
* Tell userland about new events. Only the events listed in
- * @disk->events are reported. Unlisted events are processed the
- * same internally but never get reported to userland.
+ * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
+ * is set. Otherwise, events are processed internally but never
+ * get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & disk->events & (1 << i))
+ if ((events & disk->events & (1 << i)) &&
+ (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
@@ -1840,6 +1889,7 @@
*
* events : list of all supported events
* events_async : list of events which can be detected w/o polling
+ * (always empty, only for backwards compatibility)
* events_poll_msecs : polling interval, 0: disable, -1: system default
*/
static ssize_t __disk_events_show(unsigned int events, char *buf)
@@ -1864,15 +1914,16 @@
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+
return __disk_events_show(disk->events, buf);
}
static ssize_t disk_events_async_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct gendisk *disk = dev_to_disk(dev);
-
- return __disk_events_show(disk->async_events, buf);
+ return 0;
}
static ssize_t disk_events_poll_msecs_show(struct device *dev,
@@ -1881,6 +1932,9 @@
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!disk->ev)
+ return sprintf(buf, "-1\n");
+
return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
}
@@ -1897,6 +1951,9 @@
if (intv < 0 && intv != -1)
return -EINVAL;
+ if (!disk->ev)
+ return -ENODEV;
+
disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1921,7 +1978,7 @@
* The default polling interval can be specified by the kernel
* parameter block.events_dfl_poll_msecs which defaults to 0
* (disable). This can also be modified runtime by writing to
- * /sys/module/block/events_dfl_poll_msecs.
+ * /sys/module/block/parameters/events_dfl_poll_msecs.
*/
static int disk_events_set_dfl_poll_msecs(const char *val,
const struct kernel_param *kp)
@@ -1961,7 +2018,7 @@
{
struct disk_events *ev;
- if (!disk->fops->check_events)
+ if (!disk->fops->check_events || !disk->events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -1983,14 +2040,14 @@
static void disk_add_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
/* FIXME: error handling */
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
pr_warn("%s: failed to create sysfs files for events\n",
disk->disk_name);
+ if (!disk->ev)
+ return;
+
mutex_lock(&disk_events_mutex);
list_add_tail(&disk->ev->node, &disk_events);
mutex_unlock(&disk_events_mutex);
@@ -2004,14 +2061,13 @@
static void disk_del_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
+ if (disk->ev) {
+ disk_block_events(disk);
- disk_block_events(disk);
-
- mutex_lock(&disk_events_mutex);
- list_del_init(&disk->ev->node);
- mutex_unlock(&disk_events_mutex);
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+ }
sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 3884d81..15a0eb8 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/export.h>
@@ -532,6 +533,10 @@
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
case BLKRESETZONE:
return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
+ case BLKGETZONESZ:
+ return put_uint(arg, bdev_zone_sectors(bdev));
+ case BLKGETNRZONES:
+ return put_uint(arg, blkdev_nr_zones(bdev));
case HDIO_GETGEO:
return blkdev_getgeo(bdev, argp);
case BLKRAGET:
diff --git a/block/ioprio.c b/block/ioprio.c
index f982108..77bcab1 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/ioprio.c
*
@@ -16,7 +17,7 @@
*
* ioprio_set(PRIO_PROCESS, pid, prio);
*
- * See also Documentation/block/ioprio.txt
+ * See also Documentation/block/ioprio.rst
*
*/
#include <linux/gfp.h>
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index a1660ba..34dcea0 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The Kyber I/O scheduler. Controls latency by throttling queue depths using
* scalable techniques.
*
* Copyright (C) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -29,19 +18,30 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
-#include "blk-stat.h"
-/* Scheduling domains. */
+#define CREATE_TRACE_POINTS
+#include <trace/events/kyber.h>
+
+/*
+ * Scheduling domains: the device is divided into multiple domains based on the
+ * request type.
+ */
enum {
KYBER_READ,
- KYBER_SYNC_WRITE,
- KYBER_OTHER, /* Async writes, discard, etc. */
+ KYBER_WRITE,
+ KYBER_DISCARD,
+ KYBER_OTHER,
KYBER_NUM_DOMAINS,
};
-enum {
- KYBER_MIN_DEPTH = 256,
+static const char *kyber_domain_names[] = {
+ [KYBER_READ] = "READ",
+ [KYBER_WRITE] = "WRITE",
+ [KYBER_DISCARD] = "DISCARD",
+ [KYBER_OTHER] = "OTHER",
+};
+enum {
/*
* In order to prevent starvation of synchronous requests by a flood of
* asynchronous requests, we reserve 25% of requests for synchronous
@@ -51,25 +51,87 @@
};
/*
- * Initial device-wide depths for each scheduling domain.
+ * Maximum device-wide depth for each scheduling domain.
*
- * Even for fast devices with lots of tags like NVMe, you can saturate
- * the device with only a fraction of the maximum possible queue depth.
- * So, we cap these to a reasonable value.
+ * Even for fast devices with lots of tags like NVMe, you can saturate the
+ * device with only a fraction of the maximum possible queue depth. So, we cap
+ * these to a reasonable value.
*/
static const unsigned int kyber_depth[] = {
[KYBER_READ] = 256,
- [KYBER_SYNC_WRITE] = 128,
- [KYBER_OTHER] = 64,
+ [KYBER_WRITE] = 128,
+ [KYBER_DISCARD] = 64,
+ [KYBER_OTHER] = 16,
};
/*
- * Scheduling domain batch sizes. We favor reads.
+ * Default latency targets for each scheduling domain.
+ */
+static const u64 kyber_latency_targets[] = {
+ [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
+ [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
+ [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
+};
+
+/*
+ * Batch size (number of requests we'll dispatch in a row) for each scheduling
+ * domain.
*/
static const unsigned int kyber_batch_size[] = {
[KYBER_READ] = 16,
- [KYBER_SYNC_WRITE] = 8,
- [KYBER_OTHER] = 8,
+ [KYBER_WRITE] = 8,
+ [KYBER_DISCARD] = 1,
+ [KYBER_OTHER] = 1,
+};
+
+/*
+ * Requests latencies are recorded in a histogram with buckets defined relative
+ * to the target latency:
+ *
+ * <= 1/4 * target latency
+ * <= 1/2 * target latency
+ * <= 3/4 * target latency
+ * <= target latency
+ * <= 1 1/4 * target latency
+ * <= 1 1/2 * target latency
+ * <= 1 3/4 * target latency
+ * > 1 3/4 * target latency
+ */
+enum {
+ /*
+ * The width of the latency histogram buckets is
+ * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
+ */
+ KYBER_LATENCY_SHIFT = 2,
+ /*
+ * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
+ * thus, "good".
+ */
+ KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
+ /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
+ KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
+};
+
+/*
+ * We measure both the total latency and the I/O latency (i.e., latency after
+ * submitting to the device).
+ */
+enum {
+ KYBER_TOTAL_LATENCY,
+ KYBER_IO_LATENCY,
+};
+
+static const char *kyber_latency_type_names[] = {
+ [KYBER_TOTAL_LATENCY] = "total",
+ [KYBER_IO_LATENCY] = "I/O",
+};
+
+/*
+ * Per-cpu latency histograms: total latency and I/O latency for each scheduling
+ * domain except for KYBER_OTHER.
+ */
+struct kyber_cpu_latency {
+ atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
};
/*
@@ -88,12 +150,9 @@
struct kyber_queue_data {
struct request_queue *q;
- struct blk_stat_callback *cb;
-
/*
- * The device is divided into multiple scheduling domains based on the
- * request type. Each domain has a fixed number of in-flight requests of
- * that type device-wide, limited by these tokens.
+ * Each scheduling domain has a limited number of in-flight requests
+ * device-wide, limited by these tokens.
*/
struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
@@ -103,8 +162,19 @@
*/
unsigned int async_depth;
+ struct kyber_cpu_latency __percpu *cpu_latency;
+
+ /* Timer for stats aggregation and adjusting domain tokens. */
+ struct timer_list timer;
+
+ unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
+
+ unsigned long latency_timeout[KYBER_OTHER];
+
+ int domain_p99[KYBER_OTHER];
+
/* Target latencies in nanoseconds. */
- u64 read_lat_nsec, write_lat_nsec;
+ u64 latency_targets[KYBER_OTHER];
};
struct kyber_hctx_data {
@@ -114,7 +184,7 @@
unsigned int batching;
struct kyber_ctx_queue *kcqs;
struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
- wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+ struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
atomic_t wait_index[KYBER_NUM_DOMAINS];
};
@@ -124,233 +194,219 @@
static unsigned int kyber_sched_domain(unsigned int op)
{
- if ((op & REQ_OP_MASK) == REQ_OP_READ)
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_READ:
return KYBER_READ;
- else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op))
- return KYBER_SYNC_WRITE;
- else
+ case REQ_OP_WRITE:
+ return KYBER_WRITE;
+ case REQ_OP_DISCARD:
+ return KYBER_DISCARD;
+ default:
return KYBER_OTHER;
+ }
}
-enum {
- NONE = 0,
- GOOD = 1,
- GREAT = 2,
- BAD = -1,
- AWFUL = -2,
-};
-
-#define IS_GOOD(status) ((status) > 0)
-#define IS_BAD(status) ((status) < 0)
-
-static int kyber_lat_status(struct blk_stat_callback *cb,
- unsigned int sched_domain, u64 target)
+static void flush_latency_buckets(struct kyber_queue_data *kqd,
+ struct kyber_cpu_latency *cpu_latency,
+ unsigned int sched_domain, unsigned int type)
{
- u64 latency;
+ unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
+ atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
+ unsigned int bucket;
- if (!cb->stat[sched_domain].nr_samples)
- return NONE;
-
- latency = cb->stat[sched_domain].mean;
- if (latency >= 2 * target)
- return AWFUL;
- else if (latency > target)
- return BAD;
- else if (latency <= target / 2)
- return GREAT;
- else /* (latency <= target) */
- return GOOD;
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
+ buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
}
/*
- * Adjust the read or synchronous write depth given the status of reads and
- * writes. The goal is that the latencies of the two domains are fair (i.e., if
- * one is good, then the other is good).
+ * Calculate the histogram bucket with the given percentile rank, or -1 if there
+ * aren't enough samples yet.
*/
-static void kyber_adjust_rw_depth(struct kyber_queue_data *kqd,
- unsigned int sched_domain, int this_status,
- int other_status)
+static int calculate_percentile(struct kyber_queue_data *kqd,
+ unsigned int sched_domain, unsigned int type,
+ unsigned int percentile)
{
- unsigned int orig_depth, depth;
+ unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
+ unsigned int bucket, samples = 0, percentile_samples;
+
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
+ samples += buckets[bucket];
+
+ if (!samples)
+ return -1;
/*
- * If this domain had no samples, or reads and writes are both good or
- * both bad, don't adjust the depth.
+ * We do the calculation once we have 500 samples or one second passes
+ * since the first sample was recorded, whichever comes first.
*/
- if (this_status == NONE ||
- (IS_GOOD(this_status) && IS_GOOD(other_status)) ||
- (IS_BAD(this_status) && IS_BAD(other_status)))
- return;
-
- orig_depth = depth = kqd->domain_tokens[sched_domain].sb.depth;
-
- if (other_status == NONE) {
- depth++;
- } else {
- switch (this_status) {
- case GOOD:
- if (other_status == AWFUL)
- depth -= max(depth / 4, 1U);
- else
- depth -= max(depth / 8, 1U);
- break;
- case GREAT:
- if (other_status == AWFUL)
- depth /= 2;
- else
- depth -= max(depth / 4, 1U);
- break;
- case BAD:
- depth++;
- break;
- case AWFUL:
- if (other_status == GREAT)
- depth += 2;
- else
- depth++;
- break;
- }
+ if (!kqd->latency_timeout[sched_domain])
+ kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
+ if (samples < 500 &&
+ time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
+ return -1;
}
+ kqd->latency_timeout[sched_domain] = 0;
+ percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
+ if (buckets[bucket] >= percentile_samples)
+ break;
+ percentile_samples -= buckets[bucket];
+ }
+ memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
+
+ trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
+ kyber_latency_type_names[type], percentile,
+ bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
+
+ return bucket;
+}
+
+static void kyber_resize_domain(struct kyber_queue_data *kqd,
+ unsigned int sched_domain, unsigned int depth)
+{
depth = clamp(depth, 1U, kyber_depth[sched_domain]);
- if (depth != orig_depth)
+ if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
+ trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
+ depth);
+ }
}
-/*
- * Adjust the depth of other requests given the status of reads and synchronous
- * writes. As long as either domain is doing fine, we don't throttle, but if
- * both domains are doing badly, we throttle heavily.
- */
-static void kyber_adjust_other_depth(struct kyber_queue_data *kqd,
- int read_status, int write_status,
- bool have_samples)
+static void kyber_timer_fn(struct timer_list *t)
{
- unsigned int orig_depth, depth;
- int status;
+ struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
+ unsigned int sched_domain;
+ int cpu;
+ bool bad = false;
- orig_depth = depth = kqd->domain_tokens[KYBER_OTHER].sb.depth;
+ /* Sum all of the per-cpu latency histograms. */
+ for_each_online_cpu(cpu) {
+ struct kyber_cpu_latency *cpu_latency;
- if (read_status == NONE && write_status == NONE) {
- depth += 2;
- } else if (have_samples) {
- if (read_status == NONE)
- status = write_status;
- else if (write_status == NONE)
- status = read_status;
- else
- status = max(read_status, write_status);
- switch (status) {
- case GREAT:
- depth += 2;
- break;
- case GOOD:
- depth++;
- break;
- case BAD:
- depth -= max(depth / 4, 1U);
- break;
- case AWFUL:
- depth /= 2;
- break;
+ cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ flush_latency_buckets(kqd, cpu_latency, sched_domain,
+ KYBER_TOTAL_LATENCY);
+ flush_latency_buckets(kqd, cpu_latency, sched_domain,
+ KYBER_IO_LATENCY);
}
}
- depth = clamp(depth, 1U, kyber_depth[KYBER_OTHER]);
- if (depth != orig_depth)
- sbitmap_queue_resize(&kqd->domain_tokens[KYBER_OTHER], depth);
-}
+ /*
+ * Check if any domains have a high I/O latency, which might indicate
+ * congestion in the device. Note that we use the p90; we don't want to
+ * be too sensitive to outliers here.
+ */
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ int p90;
-/*
- * Apply heuristics for limiting queue depths based on gathered latency
- * statistics.
- */
-static void kyber_stat_timer_fn(struct blk_stat_callback *cb)
-{
- struct kyber_queue_data *kqd = cb->data;
- int read_status, write_status;
-
- read_status = kyber_lat_status(cb, KYBER_READ, kqd->read_lat_nsec);
- write_status = kyber_lat_status(cb, KYBER_SYNC_WRITE, kqd->write_lat_nsec);
-
- kyber_adjust_rw_depth(kqd, KYBER_READ, read_status, write_status);
- kyber_adjust_rw_depth(kqd, KYBER_SYNC_WRITE, write_status, read_status);
- kyber_adjust_other_depth(kqd, read_status, write_status,
- cb->stat[KYBER_OTHER].nr_samples != 0);
+ p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
+ 90);
+ if (p90 >= KYBER_GOOD_BUCKETS)
+ bad = true;
+ }
/*
- * Continue monitoring latencies if we aren't hitting the targets or
- * we're still throttling other requests.
+ * Adjust the scheduling domain depths. If we determined that there was
+ * congestion, we throttle all domains with good latencies. Either way,
+ * we ease up on throttling domains with bad latencies.
*/
- if (!blk_stat_is_active(kqd->cb) &&
- ((IS_BAD(read_status) || IS_BAD(write_status) ||
- kqd->domain_tokens[KYBER_OTHER].sb.depth < kyber_depth[KYBER_OTHER])))
- blk_stat_activate_msecs(kqd->cb, 100);
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ unsigned int orig_depth, depth;
+ int p99;
+
+ p99 = calculate_percentile(kqd, sched_domain,
+ KYBER_TOTAL_LATENCY, 99);
+ /*
+ * This is kind of subtle: different domains will not
+ * necessarily have enough samples to calculate the latency
+ * percentiles during the same window, so we have to remember
+ * the p99 for the next time we observe congestion; once we do,
+ * we don't want to throttle again until we get more data, so we
+ * reset it to -1.
+ */
+ if (bad) {
+ if (p99 < 0)
+ p99 = kqd->domain_p99[sched_domain];
+ kqd->domain_p99[sched_domain] = -1;
+ } else if (p99 >= 0) {
+ kqd->domain_p99[sched_domain] = p99;
+ }
+ if (p99 < 0)
+ continue;
+
+ /*
+ * If this domain has bad latency, throttle less. Otherwise,
+ * throttle more iff we determined that there is congestion.
+ *
+ * The new depth is scaled linearly with the p99 latency vs the
+ * latency target. E.g., if the p99 is 3/4 of the target, then
+ * we throttle down to 3/4 of the current depth, and if the p99
+ * is 2x the target, then we double the depth.
+ */
+ if (bad || p99 >= KYBER_GOOD_BUCKETS) {
+ orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
+ depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
+ kyber_resize_domain(kqd, sched_domain, depth);
+ }
+ }
}
-static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd)
+static unsigned int kyber_sched_tags_shift(struct request_queue *q)
{
/*
* All of the hardware queues have the same depth, so we can just grab
* the shift of the first one.
*/
- return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
-}
-
-static int kyber_bucket_fn(const struct request *rq)
-{
- return kyber_sched_domain(rq->cmd_flags);
+ return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
}
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
- unsigned int max_tokens;
unsigned int shift;
int ret = -ENOMEM;
int i;
- kqd = kmalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
+ kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
if (!kqd)
goto err;
+
kqd->q = q;
- kqd->cb = blk_stat_alloc_callback(kyber_stat_timer_fn, kyber_bucket_fn,
- KYBER_NUM_DOMAINS, kqd);
- if (!kqd->cb)
+ kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!kqd->cpu_latency)
goto err_kqd;
- /*
- * The maximum number of tokens for any scheduling domain is at least
- * the queue depth of a single hardware queue. If the hardware doesn't
- * have many tags, still provide a reasonable number.
- */
- max_tokens = max_t(unsigned int, q->tag_set->queue_depth,
- KYBER_MIN_DEPTH);
+ timer_setup(&kqd->timer, kyber_timer_fn, 0);
+
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
WARN_ON(!kyber_depth[i]);
WARN_ON(!kyber_batch_size[i]);
ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
- max_tokens, -1, false, GFP_KERNEL,
- q->node);
+ kyber_depth[i], -1, false,
+ GFP_KERNEL, q->node);
if (ret) {
while (--i >= 0)
sbitmap_queue_free(&kqd->domain_tokens[i]);
- goto err_cb;
+ goto err_buckets;
}
- sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]);
}
- shift = kyber_sched_tags_shift(kqd);
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+ for (i = 0; i < KYBER_OTHER; i++) {
+ kqd->domain_p99[i] = -1;
+ kqd->latency_targets[i] = kyber_latency_targets[i];
+ }
- kqd->read_lat_nsec = 2000000ULL;
- kqd->write_lat_nsec = 10000000ULL;
+ shift = kyber_sched_tags_shift(q);
+ kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
return kqd;
-err_cb:
- blk_stat_free_callback(kqd->cb);
+err_buckets:
+ free_percpu(kqd->cpu_latency);
err_kqd:
kfree(kqd);
err:
@@ -372,25 +428,24 @@
return PTR_ERR(kqd);
}
+ blk_stat_enable_accounting(q);
+
eq->elevator_data = kqd;
q->elevator = eq;
- blk_stat_add_callback(q, kqd->cb);
-
return 0;
}
static void kyber_exit_sched(struct elevator_queue *e)
{
struct kyber_queue_data *kqd = e->elevator_data;
- struct request_queue *q = kqd->q;
int i;
- blk_stat_remove_callback(q, kqd->cb);
+ del_timer_sync(&kqd->timer);
for (i = 0; i < KYBER_NUM_DOMAINS; i++)
sbitmap_queue_free(&kqd->domain_tokens[i]);
- blk_stat_free_callback(kqd->cb);
+ free_percpu(kqd->cpu_latency);
kfree(kqd);
}
@@ -435,10 +490,11 @@
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
INIT_LIST_HEAD(&khd->rqs[i]);
- init_waitqueue_func_entry(&khd->domain_wait[i],
+ khd->domain_wait[i].sbq = NULL;
+ init_waitqueue_func_entry(&khd->domain_wait[i].wait,
kyber_domain_wake);
- khd->domain_wait[i].private = hctx;
- INIT_LIST_HEAD(&khd->domain_wait[i].entry);
+ khd->domain_wait[i].wait.private = hctx;
+ INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
atomic_set(&khd->wait_index[i], 0);
}
@@ -506,19 +562,19 @@
}
}
-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+ unsigned int nr_segs)
{
struct kyber_hctx_data *khd = hctx->sched_data;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
- struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw];
+ struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];
bool merged;
spin_lock(&kcq->lock);
- merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio);
+ merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
spin_unlock(&kcq->lock);
- blk_mq_put_ctx(ctx);
return merged;
}
@@ -536,7 +592,7 @@
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
- struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw];
+ struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
struct list_head *head = &kcq->rq_list[sched_domain];
spin_lock(&kcq->lock);
@@ -545,7 +601,7 @@
else
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
- rq->mq_ctx->index_hw);
+ rq->mq_ctx->index_hw[hctx->type]);
blk_mq_sched_request_inserted(rq);
spin_unlock(&kcq->lock);
}
@@ -558,41 +614,44 @@
rq_clear_domain_token(kqd, rq);
}
-static void kyber_completed_request(struct request *rq)
+static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
+ unsigned int sched_domain, unsigned int type,
+ u64 target, u64 latency)
{
- struct request_queue *q = rq->q;
- struct kyber_queue_data *kqd = q->elevator->elevator_data;
- unsigned int sched_domain;
- u64 now, latency, target;
+ unsigned int bucket;
+ u64 divisor;
- /*
- * Check if this request met our latency goal. If not, quickly gather
- * some statistics and start throttling.
- */
- sched_domain = kyber_sched_domain(rq->cmd_flags);
- switch (sched_domain) {
- case KYBER_READ:
- target = kqd->read_lat_nsec;
- break;
- case KYBER_SYNC_WRITE:
- target = kqd->write_lat_nsec;
- break;
- default:
- return;
+ if (latency > 0) {
+ divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
+ bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
+ KYBER_LATENCY_BUCKETS - 1);
+ } else {
+ bucket = 0;
}
- /* If we are already monitoring latencies, don't check again. */
- if (blk_stat_is_active(kqd->cb))
+ atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
+}
+
+static void kyber_completed_request(struct request *rq, u64 now)
+{
+ struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
+ struct kyber_cpu_latency *cpu_latency;
+ unsigned int sched_domain;
+ u64 target;
+
+ sched_domain = kyber_sched_domain(rq->cmd_flags);
+ if (sched_domain == KYBER_OTHER)
return;
- now = ktime_get_ns();
- if (now < rq->io_start_time_ns)
- return;
+ cpu_latency = get_cpu_ptr(kqd->cpu_latency);
+ target = kqd->latency_targets[sched_domain];
+ add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
+ target, now - rq->start_time_ns);
+ add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
+ now - rq->io_start_time_ns);
+ put_cpu_ptr(kqd->cpu_latency);
- latency = now - rq->io_start_time_ns;
-
- if (latency > target)
- blk_stat_activate_msecs(kqd->cb, 10);
+ timer_reduce(&kqd->timer, jiffies + HZ / 10);
}
struct flush_kcq_data {
@@ -629,12 +688,13 @@
flush_busy_kcq, &data);
}
-static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
void *key)
{
- struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
+ struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
+ struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
- list_del_init(&wait->entry);
+ sbitmap_del_wait_queue(wait);
blk_mq_run_hw_queue(hctx, true);
return 1;
}
@@ -645,7 +705,7 @@
{
unsigned int sched_domain = khd->cur_domain;
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
- wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
+ struct sbq_wait *wait = &khd->domain_wait[sched_domain];
struct sbq_wait_state *ws;
int nr;
@@ -656,11 +716,11 @@
* run when one becomes available. Note that this is serialized on
* khd->lock, but we still need to be careful about the waker.
*/
- if (nr < 0 && list_empty_careful(&wait->entry)) {
+ if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
ws = sbq_wait_ptr(domain_tokens,
&khd->wait_index[sched_domain]);
khd->domain_ws[sched_domain] = ws;
- add_wait_queue(&ws->wait, wait);
+ sbitmap_add_wait_queue(domain_tokens, ws, wait);
/*
* Try again in case a token was freed before we got on the wait
@@ -676,10 +736,10 @@
* between the !list_empty_careful() check and us grabbing the lock, but
* list_del_init() is okay with that.
*/
- if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+ if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
ws = khd->domain_ws[sched_domain];
spin_lock_irq(&ws->wait.lock);
- list_del_init(&wait->entry);
+ sbitmap_del_wait_queue(wait);
spin_unlock_irq(&ws->wait.lock);
}
@@ -713,6 +773,9 @@
rq_set_domain_token(rq, nr);
list_del_init(&rq->queuelist);
return rq;
+ } else {
+ trace_kyber_throttled(kqd->q,
+ kyber_domain_names[khd->cur_domain]);
}
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
nr = kyber_get_domain_token(kqd, khd, hctx);
@@ -723,6 +786,9 @@
rq_set_domain_token(rq, nr);
list_del_init(&rq->queuelist);
return rq;
+ } else {
+ trace_kyber_throttled(kqd->q,
+ kyber_domain_names[khd->cur_domain]);
}
}
@@ -790,17 +856,17 @@
return false;
}
-#define KYBER_LAT_SHOW_STORE(op) \
-static ssize_t kyber_##op##_lat_show(struct elevator_queue *e, \
- char *page) \
+#define KYBER_LAT_SHOW_STORE(domain, name) \
+static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
+ char *page) \
{ \
struct kyber_queue_data *kqd = e->elevator_data; \
\
- return sprintf(page, "%llu\n", kqd->op##_lat_nsec); \
+ return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
} \
\
-static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \
- const char *page, size_t count) \
+static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
+ const char *page, size_t count) \
{ \
struct kyber_queue_data *kqd = e->elevator_data; \
unsigned long long nsec; \
@@ -810,12 +876,12 @@
if (ret) \
return ret; \
\
- kqd->op##_lat_nsec = nsec; \
+ kqd->latency_targets[domain] = nsec; \
\
return count; \
}
-KYBER_LAT_SHOW_STORE(read);
-KYBER_LAT_SHOW_STORE(write);
+KYBER_LAT_SHOW_STORE(KYBER_READ, read);
+KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
#undef KYBER_LAT_SHOW_STORE
#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
@@ -876,13 +942,14 @@
{ \
struct blk_mq_hw_ctx *hctx = data; \
struct kyber_hctx_data *khd = hctx->sched_data; \
- wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
+ wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
\
seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
return 0; \
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
@@ -900,20 +967,7 @@
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
- switch (khd->cur_domain) {
- case KYBER_READ:
- seq_puts(m, "READ\n");
- break;
- case KYBER_SYNC_WRITE:
- seq_puts(m, "SYNC_WRITE\n");
- break;
- case KYBER_OTHER:
- seq_puts(m, "OTHER\n");
- break;
- default:
- seq_printf(m, "%u\n", khd->cur_domain);
- break;
- }
+ seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
return 0;
}
@@ -930,7 +984,8 @@
{#name "_tokens", 0400, kyber_##name##_tokens_show}
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
KYBER_QUEUE_DOMAIN_ATTRS(read),
- KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
+ KYBER_QUEUE_DOMAIN_ATTRS(write),
+ KYBER_QUEUE_DOMAIN_ATTRS(discard),
KYBER_QUEUE_DOMAIN_ATTRS(other),
{"async_depth", 0400, kyber_async_depth_show},
{},
@@ -942,7 +997,8 @@
{#name "_waiting", 0400, kyber_##name##_waiting_show}
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
KYBER_HCTX_DOMAIN_ATTRS(read),
- KYBER_HCTX_DOMAIN_ATTRS(sync_write),
+ KYBER_HCTX_DOMAIN_ATTRS(write),
+ KYBER_HCTX_DOMAIN_ATTRS(discard),
KYBER_HCTX_DOMAIN_ATTRS(other),
{"cur_domain", 0400, kyber_cur_domain_show},
{"batching", 0400, kyber_batching_show},
@@ -952,7 +1008,7 @@
#endif
static struct elevator_type kyber_sched = {
- .ops.mq = {
+ .ops = {
.init_sched = kyber_init_sched,
.exit_sched = kyber_exit_sched,
.init_hctx = kyber_init_hctx,
@@ -967,7 +1023,6 @@
.dispatch_request = kyber_dispatch_request,
.has_work = kyber_has_work,
},
- .uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 099a9e0..b490f47 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
* for the blk-mq scheduling framework
@@ -24,7 +25,7 @@
#include "blk-mq-sched.h"
/*
- * See Documentation/block/deadline-iosched.txt
+ * See Documentation/block/deadline-iosched.rst
*/
static const int read_expire = HZ / 2; /* max time before a read is submitted. */
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
@@ -373,7 +374,7 @@
/*
* One confusing aspect here is that we get called for a specific
- * hardware queue, but we return a request that may not be for a
+ * hardware queue, but we may return a request that is for a
* different hardware queue. This is because mq-deadline has shared
* state for all hardware queues, in terms of sorting, FIFOs, etc.
*/
@@ -458,7 +459,8 @@
return ELEVATOR_NO_MERGE;
}
-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+ unsigned int nr_segs)
{
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
@@ -466,7 +468,7 @@
bool ret;
spin_lock(&dd->lock);
- ret = blk_mq_sched_try_merge(q, bio, &free);
+ ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
spin_unlock(&dd->lock);
if (free)
@@ -549,6 +551,13 @@
* spinlock so that the zone is never unlocked while deadline_fifo_request()
* or deadline_next_request() are executing. This function is called for
* all requests, whether or not these requests complete successfully.
+ *
+ * For a zoned block device, __dd_dispatch_request() may have stopped
+ * dispatching requests if all the queued requests are write requests directed
+ * at zones that are already locked due to on-going write requests. To ensure
+ * write request dispatch progress in this case, mark the queue as needing a
+ * restart to ensure that the queue is run again after completion of the
+ * request and zones being unlocked.
*/
static void dd_finish_request(struct request *rq)
{
@@ -560,6 +569,8 @@
spin_lock_irqsave(&dd->zone_lock, flags);
blk_req_zone_write_unlock(rq);
+ if (!list_empty(&dd->fifo_list[WRITE]))
+ blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
spin_unlock_irqrestore(&dd->zone_lock, flags);
}
}
@@ -761,7 +772,7 @@
#endif
static struct elevator_type mq_deadline = {
- .ops.mq = {
+ .ops = {
.insert_requests = dd_insert_requests,
.dispatch_request = dd_dispatch_request,
.prepare_request = dd_prepare_request,
@@ -777,13 +788,13 @@
.exit_sched = dd_exit_queue,
},
- .uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
#endif
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
.elevator_alias = "deadline",
+ .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
.elevator_owner = THIS_MODULE,
};
MODULE_ALIAS("mq-deadline-iosched");
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
deleted file mode 100644
index 2d1b15d..0000000
--- a/block/noop-iosched.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * elevator noop
- */
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-struct noop_data {
- struct list_head queue;
-};
-
-static void noop_merged_requests(struct request_queue *q, struct request *rq,
- struct request *next)
-{
- list_del_init(&next->queuelist);
-}
-
-static int noop_dispatch(struct request_queue *q, int force)
-{
- struct noop_data *nd = q->elevator->elevator_data;
- struct request *rq;
-
- rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
- if (rq) {
- list_del_init(&rq->queuelist);
- elv_dispatch_sort(q, rq);
- return 1;
- }
- return 0;
-}
-
-static void noop_add_request(struct request_queue *q, struct request *rq)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- list_add_tail(&rq->queuelist, &nd->queue);
-}
-
-static struct request *
-noop_former_request(struct request_queue *q, struct request *rq)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- if (rq->queuelist.prev == &nd->queue)
- return NULL;
- return list_prev_entry(rq, queuelist);
-}
-
-static struct request *
-noop_latter_request(struct request_queue *q, struct request *rq)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- if (rq->queuelist.next == &nd->queue)
- return NULL;
- return list_next_entry(rq, queuelist);
-}
-
-static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct noop_data *nd;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
- if (!nd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = nd;
-
- INIT_LIST_HEAD(&nd->queue);
-
- spin_lock_irq(q->queue_lock);
- q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-static void noop_exit_queue(struct elevator_queue *e)
-{
- struct noop_data *nd = e->elevator_data;
-
- BUG_ON(!list_empty(&nd->queue));
- kfree(nd);
-}
-
-static struct elevator_type elevator_noop = {
- .ops.sq = {
- .elevator_merge_req_fn = noop_merged_requests,
- .elevator_dispatch_fn = noop_dispatch,
- .elevator_add_req_fn = noop_add_request,
- .elevator_former_req_fn = noop_former_request,
- .elevator_latter_req_fn = noop_latter_request,
- .elevator_init_fn = noop_init_queue,
- .elevator_exit_fn = noop_exit_queue,
- },
- .elevator_name = "noop",
- .elevator_owner = THIS_MODULE,
-};
-
-static int __init noop_init(void)
-{
- return elv_register(&elevator_noop);
-}
-
-static void __exit noop_exit(void)
-{
- elv_unregister(&elevator_noop);
-}
-
-module_init(noop_init);
-module_exit(noop_exit);
-
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/block/opal_proto.h b/block/opal_proto.h
index e20be82..5532412 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
* Scott Bauer <scott.bauer@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/types.h>
@@ -106,6 +98,7 @@
OPAL_ENTERPRISE_BANDMASTER0_UID,
OPAL_ENTERPRISE_ERASEMASTER_UID,
/* tables */
+ OPAL_TABLE_TABLE,
OPAL_LOCKINGRANGE_GLOBAL,
OPAL_LOCKINGRANGE_ACE_RDLOCKED,
OPAL_LOCKINGRANGE_ACE_WRLOCKED,
@@ -126,8 +119,6 @@
OPAL_UID_HEXFF,
};
-#define OPAL_METHOD_LENGTH 8
-
/* Enum for indexing the OPALMETHOD array */
enum opal_method {
OPAL_PROPERTIES,
@@ -160,6 +151,20 @@
OPAL_STARTCOLUMN = 0x03,
OPAL_ENDCOLUMN = 0x04,
OPAL_VALUES = 0x01,
+ /* table table */
+ OPAL_TABLE_UID = 0x00,
+ OPAL_TABLE_NAME = 0x01,
+ OPAL_TABLE_COMMON = 0x02,
+ OPAL_TABLE_TEMPLATE = 0x03,
+ OPAL_TABLE_KIND = 0x04,
+ OPAL_TABLE_COLUMN = 0x05,
+ OPAL_TABLE_COLUMNS = 0x06,
+ OPAL_TABLE_ROWS = 0x07,
+ OPAL_TABLE_ROWS_FREE = 0x08,
+ OPAL_TABLE_ROW_BYTES = 0x09,
+ OPAL_TABLE_LASTID = 0x0A,
+ OPAL_TABLE_MIN = 0x0B,
+ OPAL_TABLE_MAX = 0x0C,
/* authority table */
OPAL_PIN = 0x03,
/* locking tokens */
@@ -170,9 +175,11 @@
OPAL_READLOCKED = 0x07,
OPAL_WRITELOCKED = 0x08,
OPAL_ACTIVEKEY = 0x0A,
+ /* lockingsp table */
+ OPAL_LIFECYCLE = 0x06,
/* locking info table */
OPAL_MAXRANGES = 0x04,
- /* mbr control */
+ /* mbr control */
OPAL_MBRENABLE = 0x01,
OPAL_MBRDONE = 0x02,
/* properties */
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d3d14e8..aee643c 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -120,13 +120,9 @@
{
struct hd_struct *p = dev_to_part(dev);
struct request_queue *q = part_to_disk(p)->queue;
- unsigned int inflight[2];
- int cpu;
+ unsigned int inflight;
- cpu = part_stat_lock();
- part_round_stats(q, cpu, p);
- part_stat_unlock();
- part_in_flight(q, p, inflight);
+ inflight = part_in_flight(q, p);
return sprintf(buf,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
@@ -141,7 +137,7 @@
part_stat_read(p, merges[STAT_WRITE]),
(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
(unsigned int)part_stat_read_msecs(p, STAT_WRITE),
- inflight[0],
+ inflight,
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)),
part_stat_read(p, ios[STAT_DISCARD]),
@@ -249,9 +245,10 @@
.uevent = part_uevent,
};
-static void delete_partition_rcu_cb(struct rcu_head *head)
+static void delete_partition_work_fn(struct work_struct *work)
{
- struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
+ struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
+ rcu_work);
part->start_sect = 0;
part->nr_sects = 0;
@@ -262,7 +259,8 @@
void __delete_partition(struct percpu_ref *ref)
{
struct hd_struct *part = container_of(ref, struct hd_struct, ref);
- call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+ INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
+ queue_rcu_work(system_wq, &part->rcu_work);
}
/*
@@ -287,6 +285,13 @@
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
+ /*
+ * Remove gendisk pointer from idr so that it cannot be looked up
+ * while RCU period before freeing gendisk is running to prevent
+ * use-after-free issues. Note that the device number stays
+ * "in-use" until we really free the gendisk.
+ */
+ blk_invalidate_devt(part_devt(part));
hd_struct_kill(part);
}
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 37b9710..702689a 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -194,7 +194,7 @@
Normal partitions are now called Basic Disks under Windows 2000, XP,
and Vista.
- For a fuller description read <file:Documentation/ldm.txt>.
+ For a fuller description read <file:Documentation/admin-guide/ldm.rst>.
If unsure, say N.
diff --git a/block/partitions/acorn.c b/block/partitions/acorn.c
index fbeb697..7587700 100644
--- a/block/partitions/acorn.c
+++ b/block/partitions/acorn.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/fs/partitions/acorn.c
- *
* Copyright (c) 1996-2000 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Scan ADFS partitions on hard disk drives. Unfortunately, there
* isn't a standard for partitioning drives on Acorn machines, so
* every single manufacturer of SCSI and IDE cards created their own
diff --git a/block/partitions/aix.h b/block/partitions/aix.h
index e0c66a9..b4449f0 100644
--- a/block/partitions/aix.h
+++ b/block/partitions/aix.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int aix_partition(struct parsed_partitions *state);
diff --git a/block/partitions/amiga.h b/block/partitions/amiga.h
index d094585..7e63f4d 100644
--- a/block/partitions/amiga.h
+++ b/block/partitions/amiga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/amiga.h
*/
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index 60fb3df..f1edd54 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -11,7 +11,7 @@
*
* The format for the command line is just like mtdparts.
*
- * For further information, see "Documentation/block/cmdline-partition.txt"
+ * For further information, see "Documentation/block/cmdline-partition.rst"
*
*/
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 39f70d9..db2fef7 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/************************************************************
* EFI GUID Partition Table handling
*
@@ -7,21 +8,6 @@
* efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
* Copyright 2000,2001,2002,2004 Dell Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* TODO:
*
* Changelog:
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index abd0b19..3e85761 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/************************************************************
* EFI GUID Partition Table
* Per Intel EFI Specification v1.02
@@ -5,21 +6,6 @@
*
* By Matt Domsch <Matt_Domsch@dell.com> Fri Sep 22 22:15:56 CDT 2000
* Copyright 2000,2001 Dell Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
************************************************************/
#ifndef FS_PART_EFI_H_INCLUDED
diff --git a/block/partitions/ibm.h b/block/partitions/ibm.h
index 08fb080..8bf13fe 100644
--- a/block/partitions/ibm.h
+++ b/block/partitions/ibm.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
int ibm_partition(struct parsed_partitions *);
diff --git a/block/partitions/karma.h b/block/partitions/karma.h
index c764b2e..48e074d 100644
--- a/block/partitions/karma.h
+++ b/block/partitions/karma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/karma.h
*/
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 16766f2..fe5d970 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/**
* ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
*
@@ -6,21 +7,6 @@
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program (in the main directory of the source in the file COPYING); if
- * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
@@ -33,7 +19,7 @@
#include "check.h"
#include "msdos.h"
-/**
+/*
* ldm_debug/info/error/crit - Output an error message
* @f: A printf format string containing the message
* @...: Variables to substitute into @f
diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
index f4c6055..1ca63e9 100644
--- a/block/partitions/ldm.h
+++ b/block/partitions/ldm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/**
* ldm - Part of the Linux-NTFS project.
*
@@ -6,21 +7,6 @@
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program (in the main directory of the Linux-NTFS source
- * in the file COPYING); if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _FS_PT_LDM_H_
diff --git a/block/partitions/msdos.h b/block/partitions/msdos.h
index 38c781c..fcacfc4 100644
--- a/block/partitions/msdos.h
+++ b/block/partitions/msdos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/msdos.h
*/
diff --git a/block/partitions/osf.h b/block/partitions/osf.h
index 20ed231..4d8088e 100644
--- a/block/partitions/osf.h
+++ b/block/partitions/osf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/osf.h
*/
diff --git a/block/partitions/sgi.h b/block/partitions/sgi.h
index b9553eb..a5b77c3 100644
--- a/block/partitions/sgi.h
+++ b/block/partitions/sgi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/sgi.h
*/
diff --git a/block/partitions/sun.h b/block/partitions/sun.h
index 2424baa..ae1b9ee 100644
--- a/block/partitions/sun.h
+++ b/block/partitions/sun.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/sun.h
*/
diff --git a/block/partitions/sysv68.h b/block/partitions/sysv68.h
index bf2f5ff..4fb6b8e 100644
--- a/block/partitions/sysv68.h
+++ b/block/partitions/sysv68.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int sysv68_partition(struct parsed_partitions *state);
diff --git a/block/partitions/ultrix.h b/block/partitions/ultrix.h
index a3cc00b..9f676ce 100644
--- a/block/partitions/ultrix.h
+++ b/block/partitions/ultrix.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/ultrix.h
*/
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 533f4ae..f5e0ad6 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/block/sed-opal.c b/block/sed-opal.c
index e0de4dd..b4c7619 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Scott Bauer <scott.bauer@intel.com>
* Rafael Antognolli <rafael.antognolli@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":OPAL: " fmt
@@ -34,6 +26,9 @@
#define IO_BUFFER_LENGTH 2048
#define MAX_TOKS 64
+/* Number of bytes needed by cmd_finalize. */
+#define CMD_FINALIZE_BYTES_NEEDED 7
+
struct opal_step {
int (*fn)(struct opal_dev *dev, void *data);
void *data;
@@ -85,7 +80,6 @@
void *data;
sec_send_recv *send_recv;
- const struct opal_step *steps;
struct mutex dev_lock;
u16 comid;
u32 hsn;
@@ -135,7 +129,8 @@
{ 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 },
/* tables */
-
+ [OPAL_TABLE_TABLE] =
+ { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 },
[OPAL_LOCKINGRANGE_GLOBAL] =
{ 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 },
[OPAL_LOCKINGRANGE_ACE_RDLOCKED] =
@@ -156,8 +151,7 @@
{ 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00 },
/* C_PIN_TABLE object ID's */
-
- [OPAL_C_PIN_MSID] =
+ [OPAL_C_PIN_MSID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x84, 0x02},
[OPAL_C_PIN_SID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01},
@@ -165,7 +159,6 @@
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x01, 0x00, 0x01},
/* half UID's (only first 4 bytes used) */
-
[OPAL_HALF_UID_AUTHORITY_OBJ_REF] =
{ 0x00, 0x00, 0x0C, 0x05, 0xff, 0xff, 0xff, 0xff },
[OPAL_HALF_UID_BOOLEAN_ACE] =
@@ -181,7 +174,7 @@
* Derived from: TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Section: 6.3 Assigned UIDs
*/
-static const u8 opalmethod[][OPAL_UID_LENGTH] = {
+static const u8 opalmethod[][OPAL_METHOD_LENGTH] = {
[OPAL_PROPERTIES] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01 },
[OPAL_STARTSESSION] =
@@ -217,6 +210,7 @@
};
static int end_opal_session_error(struct opal_dev *dev);
+static int opal_discovery0_step(struct opal_dev *dev);
struct opal_suspend_data {
struct opal_lock_unlock unlk;
@@ -378,41 +372,54 @@
{
const struct d0_geometry_features *geo = data;
- dev->align = geo->alignment_granularity;
- dev->lowest_lba = geo->lowest_aligned_lba;
+ dev->align = be64_to_cpu(geo->alignment_granularity);
+ dev->lowest_lba = be64_to_cpu(geo->lowest_aligned_lba);
}
-static int next(struct opal_dev *dev)
+static int execute_step(struct opal_dev *dev,
+ const struct opal_step *step, size_t stepIndex)
{
- const struct opal_step *step;
- int state = 0, error = 0;
+ int error = step->fn(dev, step->data);
- do {
- step = &dev->steps[state];
- if (!step->fn)
- break;
+ if (error) {
+ pr_debug("Step %zu (%pS) failed with error %d: %s\n",
+ stepIndex, step->fn, error,
+ opal_error_to_human(error));
+ }
- error = step->fn(dev, step->data);
- if (error) {
- pr_debug("Error on step function: %d with error %d: %s\n",
- state, error,
- opal_error_to_human(error));
+ return error;
+}
- /* For each OPAL command we do a discovery0 then we
- * start some sort of session.
- * If we haven't passed state 1 then there was an error
- * on discovery0 or during the attempt to start a
- * session. Therefore we shouldn't attempt to terminate
- * a session, as one has not yet been created.
- */
- if (state > 1) {
- end_opal_session_error(dev);
- return error;
- }
+static int execute_steps(struct opal_dev *dev,
+ const struct opal_step *steps, size_t n_steps)
+{
+ size_t state = 0;
+ int error;
- }
- state++;
- } while (!error);
+ /* first do a discovery0 */
+ error = opal_discovery0_step(dev);
+ if (error)
+ return error;
+
+ for (state = 0; state < n_steps; state++) {
+ error = execute_step(dev, &steps[state], state);
+ if (error)
+ goto out_error;
+ }
+
+ return 0;
+
+out_error:
+ /*
+ * For each OPAL command the first step in steps starts some sort of
+ * session. If an error occurred in the initial discovery0 or if an
+ * error occurred in the first step (and thus stopping the loop with
+ * state == 0) then there was an error before or during the attempt to
+ * start a session. Therefore we shouldn't attempt to terminate a
+ * session, as one has not yet been created.
+ */
+ if (state > 0)
+ end_opal_session_error(dev);
return error;
}
@@ -507,18 +514,43 @@
ret = opal_recv_cmd(dev);
if (ret)
return ret;
+
return opal_discovery0_end(dev);
}
+static int opal_discovery0_step(struct opal_dev *dev)
+{
+ const struct opal_step discovery0_step = {
+ opal_discovery0,
+ };
+
+ return execute_step(dev, &discovery0_step, 0);
+}
+
+static size_t remaining_size(struct opal_dev *cmd)
+{
+ return IO_BUFFER_LENGTH - cmd->pos;
+}
+
+static bool can_add(int *err, struct opal_dev *cmd, size_t len)
+{
+ if (*err)
+ return false;
+
+ if (remaining_size(cmd) < len) {
+ pr_debug("Error adding %zu bytes: end of buffer.\n", len);
+ *err = -ERANGE;
+ return false;
+ }
+
+ return true;
+}
+
static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
{
- if (*err)
+ if (!can_add(err, cmd, 1))
return;
- if (cmd->pos >= IO_BUFFER_LENGTH - 1) {
- pr_debug("Error adding u8: end of buffer.\n");
- *err = -ERANGE;
- return;
- }
+
cmd->cmd[cmd->pos++] = tok;
}
@@ -545,13 +577,13 @@
header0 |= bytestring ? MEDIUM_ATOM_BYTESTRING : 0;
header0 |= has_sign ? MEDIUM_ATOM_SIGNED : 0;
header0 |= (len >> 8) & MEDIUM_ATOM_LEN_MASK;
+
cmd->cmd[cmd->pos++] = header0;
cmd->cmd[cmd->pos++] = len;
}
static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
{
-
size_t len;
int msb;
@@ -563,9 +595,8 @@
msb = fls64(number);
len = DIV_ROUND_UP(msb, 8);
- if (cmd->pos >= IO_BUFFER_LENGTH - len - 1) {
+ if (!can_add(err, cmd, len + 1)) {
pr_debug("Error adding u64: end of buffer.\n");
- *err = -ERANGE;
return;
}
add_short_atom_header(cmd, false, false, len);
@@ -573,24 +604,19 @@
add_token_u8(err, cmd, number >> (len * 8));
}
-static void add_token_bytestring(int *err, struct opal_dev *cmd,
- const u8 *bytestring, size_t len)
+static u8 *add_bytestring_header(int *err, struct opal_dev *cmd, size_t len)
{
size_t header_len = 1;
bool is_short_atom = true;
- if (*err)
- return;
-
if (len & ~SHORT_ATOM_LEN_MASK) {
header_len = 2;
is_short_atom = false;
}
- if (len >= IO_BUFFER_LENGTH - cmd->pos - header_len) {
+ if (!can_add(err, cmd, header_len + len)) {
pr_debug("Error adding bytestring: end of buffer.\n");
- *err = -ERANGE;
- return;
+ return NULL;
}
if (is_short_atom)
@@ -598,9 +624,19 @@
else
add_medium_atom_header(cmd, true, false, len);
- memcpy(&cmd->cmd[cmd->pos], bytestring, len);
- cmd->pos += len;
+ return &cmd->cmd[cmd->pos];
+}
+static void add_token_bytestring(int *err, struct opal_dev *cmd,
+ const u8 *bytestring, size_t len)
+{
+ u8 *start;
+
+ start = add_bytestring_header(err, cmd, len);
+ if (!start)
+ return;
+ memcpy(start, bytestring, len);
+ cmd->pos += len;
}
static int build_locking_range(u8 *buffer, size_t length, u8 lr)
@@ -614,6 +650,7 @@
if (lr == 0)
return 0;
+
buffer[5] = LOCKING_RANGE_NON_GLOBAL;
buffer[7] = lr;
@@ -623,7 +660,7 @@
static int build_locking_user(u8 *buffer, size_t length, u8 lr)
{
if (length > OPAL_UID_LENGTH) {
- pr_debug("Can't build locking range user, Length OOB\n");
+ pr_debug("Can't build locking range user. Length OOB\n");
return -ERANGE;
}
@@ -649,6 +686,13 @@
struct opal_header *hdr;
int err = 0;
+ /*
+ * Close the parameter list opened from cmd_start.
+ * The number of bytes added must be equal to
+ * CMD_FINALIZE_BYTES_NEEDED.
+ */
+ add_token_u8(&err, cmd, OPAL_ENDLIST);
+
add_token_u8(&err, cmd, OPAL_ENDOFDATA);
add_token_u8(&err, cmd, OPAL_STARTLIST);
add_token_u8(&err, cmd, 0);
@@ -687,6 +731,11 @@
{
const struct opal_resp_tok *tok;
+ if (!resp) {
+ pr_debug("Response is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (n >= resp->num) {
pr_debug("Token number doesn't exist: %d, resp: %d\n",
n, resp->num);
@@ -856,10 +905,6 @@
num_entries++;
}
- if (num_entries == 0) {
- pr_debug("Couldn't parse response.\n");
- return -EINVAL;
- }
resp->num = num_entries;
return 0;
@@ -869,27 +914,19 @@
const char **store)
{
u8 skip;
- const struct opal_resp_tok *token;
+ const struct opal_resp_tok *tok;
*store = NULL;
- if (!resp) {
- pr_debug("Response is NULL\n");
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
- return 0;
- }
-
- token = &resp->toks[n];
- if (token->type != OPAL_DTA_TOKENID_BYTESTRING) {
+ if (tok->type != OPAL_DTA_TOKENID_BYTESTRING) {
pr_debug("Token is not a byte string!\n");
return 0;
}
- switch (token->width) {
+ switch (tok->width) {
case OPAL_WIDTH_TINY:
case OPAL_WIDTH_SHORT:
skip = 1;
@@ -905,37 +942,30 @@
return 0;
}
- *store = token->pos + skip;
- return token->len - skip;
+ *store = tok->pos + skip;
+
+ return tok->len - skip;
}
static u64 response_get_u64(const struct parsed_resp *resp, int n)
{
- if (!resp) {
- pr_debug("Response is NULL\n");
+ const struct opal_resp_tok *tok;
+
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
+ return 0;
+
+ if (tok->type != OPAL_DTA_TOKENID_UINT) {
+ pr_debug("Token is not unsigned int: %d\n", tok->type);
return 0;
}
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
+ if (tok->width != OPAL_WIDTH_TINY && tok->width != OPAL_WIDTH_SHORT) {
+ pr_debug("Atom is not short or tiny: %d\n", tok->width);
return 0;
}
- if (resp->toks[n].type != OPAL_DTA_TOKENID_UINT) {
- pr_debug("Token is not unsigned it: %d\n",
- resp->toks[n].type);
- return 0;
- }
-
- if (!(resp->toks[n].width == OPAL_WIDTH_TINY ||
- resp->toks[n].width == OPAL_WIDTH_SHORT)) {
- pr_debug("Atom is not short or tiny: %d\n",
- resp->toks[n].width);
- return 0;
- }
-
- return resp->toks[n].stored.u;
+ return tok->stored.u;
}
static bool response_token_matches(const struct opal_resp_tok *token, u8 match)
@@ -991,6 +1021,27 @@
memset(dev->cmd, 0, IO_BUFFER_LENGTH);
}
+static int cmd_start(struct opal_dev *dev, const u8 *uid, const u8 *method)
+{
+ int err = 0;
+
+ clear_opal_cmd(dev);
+ set_comid(dev, dev->comid);
+
+ add_token_u8(&err, dev, OPAL_CALL);
+ add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
+ add_token_bytestring(&err, dev, method, OPAL_METHOD_LENGTH);
+
+ /*
+ * Every method call is followed by its parameters enclosed within
+ * OPAL_STARTLIST and OPAL_ENDLIST tokens. We automatically open the
+ * parameter list here and close it later in cmd_finalize.
+ */
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ return err;
+}
+
static int start_opal_session_cont(struct opal_dev *dev)
{
u32 hsn, tsn;
@@ -1010,6 +1061,7 @@
dev->hsn = hsn;
dev->tsn = tsn;
+
return 0;
}
@@ -1032,6 +1084,7 @@
{
dev->hsn = 0;
dev->tsn = 0;
+
return parse_and_check_status(dev);
}
@@ -1050,30 +1103,77 @@
return opal_send_recv(dev, cont);
}
+/*
+ * request @column from table @table on device @dev. On success, the column
+ * data will be available in dev->resp->tok[4]
+ */
+static int generic_get_column(struct opal_dev *dev, const u8 *table,
+ u64 column)
+{
+ int err;
+
+ err = cmd_start(dev, table, opalmethod[OPAL_GET]);
+
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_STARTCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_ENDCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_ENDLIST);
+
+ if (err)
+ return err;
+
+ return finalize_and_send(dev, parse_and_check_status);
+}
+
+/*
+ * see TCG SAS 5.3.2.3 for a description of the available columns
+ *
+ * the result is provided in dev->resp->tok[4]
+ */
+static int generic_get_table_info(struct opal_dev *dev, enum opal_uid table,
+ u64 column)
+{
+ u8 uid[OPAL_UID_LENGTH];
+ const unsigned int half = OPAL_UID_LENGTH/2;
+
+ /* sed-opal UIDs can be split in two halves:
+ * first: actual table index
+ * second: relative index in the table
+ * so we have to get the first half of the OPAL_TABLE_TABLE and use the
+ * first part of the target table as relative index into that table
+ */
+ memcpy(uid, opaluid[OPAL_TABLE_TABLE], half);
+ memcpy(uid+half, opaluid[table], half);
+
+ return generic_get_column(dev, uid, column);
+}
+
static int gen_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
kfree(dev->prev_data);
dev->prev_data = NULL;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GENKEY],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_GENKEY]);
if (err) {
pr_debug("Error building gen key command\n");
return err;
}
+
return finalize_and_send(dev, parse_and_check_status);
}
@@ -1086,12 +1186,14 @@
error = parse_and_check_status(dev);
if (error)
return error;
+
keylen = response_get_string(&dev->parsed, 4, &activekey);
if (!activekey) {
pr_debug("%s: Couldn't extract the Activekey from the response\n",
__func__);
return OPAL_INVAL_PARAM;
}
+
dev->prev_data = kmemdup(activekey, keylen, GFP_KERNEL);
if (!dev->prev_data)
@@ -1105,62 +1207,39 @@
static int get_active_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
+ int err;
u8 *lr = data;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
err = build_locking_range(uid, sizeof(uid), *lr);
if (err)
return err;
- err = 0;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* startCloumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* endColumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- if (err) {
- pr_debug("Error building get active key command\n");
+ err = generic_get_column(dev, uid, OPAL_ACTIVEKEY);
+ if (err)
return err;
- }
- return finalize_and_send(dev, get_active_key_cont);
+ return get_active_key_cont(dev);
}
static int generic_lr_enable_disable(struct opal_dev *dev,
u8 *uid, bool rle, bool wle,
bool rl, bool wl)
{
- int err = 0;
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /* ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u8(&err, dev, rle);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /* WriteLockEnabled */
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u8(&err, dev, wle);
add_token_u8(&err, dev, OPAL_ENDNAME);
@@ -1176,7 +1255,7 @@
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+
return err;
}
@@ -1189,6 +1268,7 @@
0, 0);
if (err)
pr_debug("Failed to create enable global lr command\n");
+
return err;
}
@@ -1197,10 +1277,7 @@
u8 uid[OPAL_UID_LENGTH];
struct opal_user_lr_setup *setup = data;
u8 lr;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
lr = setup->session.opal_key.lr;
err = build_locking_range(uid, sizeof(uid), lr);
@@ -1210,45 +1287,38 @@
if (lr == 0)
err = enable_global_lr(dev, uid, setup);
else {
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Ranges Start */
+ add_token_u8(&err, dev, OPAL_RANGESTART);
add_token_u64(&err, dev, setup->range_start);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* Ranges length */
+ add_token_u8(&err, dev, OPAL_RANGELENGTH);
add_token_u64(&err, dev, setup->range_length);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /*ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u64(&err, dev, !!setup->RLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /*WriteLockEnabled*/
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u64(&err, dev, !!setup->WLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
}
if (err) {
pr_debug("Error building Setup Locking range command.\n");
return err;
-
}
return finalize_and_send(dev, parse_and_check_status);
@@ -1261,32 +1331,25 @@
u8 key_len)
{
u32 hsn;
- int err = 0;
+ int err;
if (key == NULL && auth != OPAL_ANYBODY_UID)
return OPAL_INVAL_PARAM;
- clear_opal_cmd(dev);
-
- set_comid(dev, dev->comid);
hsn = GENERIC_HOST_SESSION_NUM;
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[sp_type], OPAL_UID_LENGTH);
add_token_u8(&err, dev, 1);
switch (auth) {
case OPAL_ANYBODY_UID:
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
case OPAL_ADMIN1_UID:
case OPAL_SID_UID:
+ case OPAL_PSID_UID:
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, 0); /* HostChallenge */
add_token_bytestring(&err, dev, key, key_len);
@@ -1296,7 +1359,6 @@
add_token_bytestring(&err, dev, opaluid[auth],
OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
default:
pr_debug("Cannot start Admin SP session with auth %d\n", auth);
@@ -1324,6 +1386,7 @@
if (!key) {
const struct opal_key *okey = data;
+
ret = start_generic_opal_session(dev, OPAL_SID_UID,
OPAL_ADMINSP_UID,
okey->key,
@@ -1335,17 +1398,29 @@
kfree(key);
dev->prev_data = NULL;
}
+
return ret;
}
static int start_admin1LSP_opal_session(struct opal_dev *dev, void *data)
{
struct opal_key *key = data;
+
return start_generic_opal_session(dev, OPAL_ADMIN1_UID,
OPAL_LOCKINGSP_UID,
key->key, key->key_len);
}
+static int start_PSID_opal_session(struct opal_dev *dev, void *data)
+{
+ const struct opal_key *okey = data;
+
+ return start_generic_opal_session(dev, OPAL_PSID_UID,
+ OPAL_ADMINSP_UID,
+ okey->key,
+ okey->key_len);
+}
+
static int start_auth_opal_session(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
@@ -1356,30 +1431,21 @@
u8 *key = session->opal_key.key;
u32 hsn = GENERIC_HOST_SESSION_NUM;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- if (session->sum) {
+ if (session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->opal_key.lr);
- if (err)
- return err;
-
- } else if (session->who != OPAL_ADMIN1 && !session->sum) {
+ else if (session->who != OPAL_ADMIN1 && !session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->who - 1);
- if (err)
- return err;
- } else
+ else
memcpy(lk_ul_user, opaluid[OPAL_ADMIN1_UID], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
+ if (err)
+ return err;
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
+
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
OPAL_UID_LENGTH);
@@ -1392,7 +1458,6 @@
add_token_u8(&err, dev, 3);
add_token_bytestring(&err, dev, lk_ul_user, OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building STARTSESSION command.\n");
@@ -1404,18 +1469,10 @@
static int revert_tper(struct opal_dev *dev, void *data)
{
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_ADMINSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_REVERT],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, opaluid[OPAL_ADMINSP_UID],
+ opalmethod[OPAL_REVERT]);
if (err) {
pr_debug("Error building REVERT TPER command.\n");
return err;
@@ -1428,18 +1485,12 @@
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, opaluid[OPAL_USER1_UID], OPAL_UID_LENGTH);
uid[7] = session->who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1449,7 +1500,6 @@
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building Activate UserN command.\n");
@@ -1463,51 +1513,38 @@
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
if (build_locking_range(uid, sizeof(uid), session->opal_key.lr) < 0)
return -ERANGE;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ERASE],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_ERASE]);
if (err) {
pr_debug("Error building Erase Locking Range Command.\n");
return err;
}
+
return finalize_and_send(dev, parse_and_check_status);
}
static int set_mbr_done(struct opal_dev *dev, void *data)
{
u8 *mbr_done_tf = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 2); /* Done */
+ add_token_u8(&err, dev, OPAL_MBRDONE);
add_token_u8(&err, dev, *mbr_done_tf); /* Done T or F */
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR Done command\n");
@@ -1520,26 +1557,20 @@
static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
{
u8 *mbr_en_dis = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 1);
+ add_token_u8(&err, dev, OPAL_MBRENABLE);
add_token_u8(&err, dev, *mbr_en_dis);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR done command\n");
@@ -1549,29 +1580,89 @@
return finalize_and_send(dev, parse_and_check_status);
}
+static int write_shadow_mbr(struct opal_dev *dev, void *data)
+{
+ struct opal_shadow_mbr *shadow = data;
+ const u8 __user *src;
+ u8 *dst;
+ size_t off = 0;
+ u64 len;
+ int err = 0;
+
+ /* do we fit in the available shadow mbr space? */
+ err = generic_get_table_info(dev, OPAL_MBR, OPAL_TABLE_ROWS);
+ if (err) {
+ pr_debug("MBR: could not get shadow size\n");
+ return err;
+ }
+
+ len = response_get_u64(&dev->parsed, 4);
+ if (shadow->size > len || shadow->offset > len - shadow->size) {
+ pr_debug("MBR: does not fit in shadow (%llu vs. %llu)\n",
+ shadow->offset + shadow->size, len);
+ return -ENOSPC;
+ }
+
+ /* do the actual transmission(s) */
+ src = (u8 __user *)(uintptr_t)shadow->data;
+ while (off < shadow->size) {
+ err = cmd_start(dev, opaluid[OPAL_MBR], opalmethod[OPAL_SET]);
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_WHERE);
+ add_token_u64(&err, dev, shadow->offset + off);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_VALUES);
+
+ /*
+ * The bytestring header is either 1 or 2 bytes, so assume 2.
+ * There also needs to be enough space to accommodate the
+ * trailing OPAL_ENDNAME (1 byte) and tokens added by
+ * cmd_finalize.
+ */
+ len = min(remaining_size(dev) - (2+1+CMD_FINALIZE_BYTES_NEEDED),
+ (size_t)(shadow->size - off));
+ pr_debug("MBR: write bytes %zu+%llu/%llu\n",
+ off, len, shadow->size);
+
+ dst = add_bytestring_header(&err, dev, len);
+ if (!dst)
+ break;
+ if (copy_from_user(dst, src + off, len))
+ err = -EFAULT;
+ dev->pos += len;
+
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+ if (err)
+ break;
+
+ err = finalize_and_send(dev, parse_and_check_status);
+ if (err)
+ break;
+
+ off += len;
+ }
+
+ return err;
+}
+
static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid,
struct opal_dev *dev)
{
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, cpin_uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, cpin_uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* PIN */
+ add_token_u8(&err, dev, OPAL_PIN);
add_token_bytestring(&err, dev, key, key_len);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1619,10 +1710,7 @@
u8 lr_buffer[OPAL_UID_LENGTH];
u8 user_uid[OPAL_UID_LENGTH];
struct opal_lock_unlock *lkul = data;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(lr_buffer, opaluid[OPAL_LOCKINGRANGE_ACE_RDLOCKED],
OPAL_UID_LENGTH);
@@ -1637,12 +1725,8 @@
user_uid[7] = lkul->session.who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
@@ -1680,7 +1764,6 @@
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building add user to locking range command.\n");
@@ -1697,9 +1780,6 @@
u8 read_locked = 1, write_locked = 1;
int err = 0;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
if (build_locking_range(lr_buffer, sizeof(lr_buffer),
lkul->session.opal_key.lr) < 0)
return -ERANGE;
@@ -1714,17 +1794,15 @@
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state... returning to uland\n");
return OPAL_INVAL_PARAM;
}
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
+
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1741,12 +1819,12 @@
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building SET command.\n");
return err;
}
+
return finalize_and_send(dev, parse_and_check_status);
}
@@ -1775,7 +1853,7 @@
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state.\n");
@@ -1788,6 +1866,7 @@
pr_debug("Error building SET command.\n");
return ret;
}
+
return finalize_and_send(dev, parse_and_check_status);
}
@@ -1796,17 +1875,10 @@
struct opal_lr_act *opal_act = data;
u8 user_lr[OPAL_UID_LENGTH];
u8 uint_3 = 0x83;
- int err = 0, i;
+ int err, i;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ACTIVATE],
- OPAL_UID_LENGTH);
-
+ err = cmd_start(dev, opaluid[OPAL_LOCKINGSP_UID],
+ opalmethod[OPAL_ACTIVATE]);
if (opal_act->sum) {
err = build_locking_range(user_lr, sizeof(user_lr),
@@ -1814,7 +1886,6 @@
if (err)
return err;
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, uint_3);
add_token_u8(&err, dev, 6);
@@ -1829,11 +1900,6 @@
}
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- } else {
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
}
if (err) {
@@ -1844,17 +1910,19 @@
return finalize_and_send(dev, parse_and_check_status);
}
-static int get_lsp_lifecycle_cont(struct opal_dev *dev)
+/* Determine if we're in the Manufactured Inactive or Active state */
+static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
{
u8 lc_status;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_LOCKINGSP_UID],
+ OPAL_LIFECYCLE);
+ if (err)
+ return err;
lc_status = response_get_u64(&dev->parsed, 4);
- /* 0x08 is Manufacured Inactive */
+ /* 0x08 is Manufactured Inactive */
/* 0x09 is Manufactured */
if (lc_status != OPAL_MANUFACTURED_INACTIVE) {
pr_debug("Couldn't determine the status of the Lifecycle state\n");
@@ -1864,56 +1932,19 @@
return 0;
}
-/* Determine if we're in the Manufactured Inactive or Active state */
-static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error Building GET Lifecycle Status command\n");
- return err;
- }
-
- return finalize_and_send(dev, get_lsp_lifecycle_cont);
-}
-
-static int get_msid_cpin_pin_cont(struct opal_dev *dev)
+static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
{
const char *msid_pin;
size_t strlen;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_C_PIN_MSID], OPAL_PIN);
+ if (err)
+ return err;
strlen = response_get_string(&dev->parsed, 4, &msid_pin);
if (!msid_pin) {
- pr_debug("%s: Couldn't extract PIN from response\n", __func__);
+ pr_debug("Couldn't extract MSID_CPIN from response\n");
return OPAL_INVAL_PARAM;
}
@@ -1926,42 +1957,6 @@
return 0;
}
-static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_C_PIN_MSID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 3); /* PIN */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 3); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error building Get MSID CPIN PIN command.\n");
- return err;
- }
-
- return finalize_and_send(dev, get_msid_cpin_pin_cont);
-}
-
static int end_opal_session(struct opal_dev *dev, void *data)
{
int err = 0;
@@ -1972,23 +1967,21 @@
if (err < 0)
return err;
+
return finalize_and_send(dev, end_session_cont);
}
static int end_opal_session_error(struct opal_dev *dev)
{
- const struct opal_step error_end_session[] = {
- { end_opal_session, },
- { NULL, }
+ const struct opal_step error_end_session = {
+ end_opal_session,
};
- dev->steps = error_end_session;
- return next(dev);
+
+ return execute_step(dev, &error_end_session, 0);
}
-static inline void setup_opal_dev(struct opal_dev *dev,
- const struct opal_step *steps)
+static inline void setup_opal_dev(struct opal_dev *dev)
{
- dev->steps = steps;
dev->tsn = 0;
dev->hsn = 0;
dev->prev_data = NULL;
@@ -1996,17 +1989,14 @@
static int check_opal_support(struct opal_dev *dev)
{
- const struct opal_step steps[] = {
- { opal_discovery0, },
- { NULL, }
- };
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = opal_discovery0_step(dev);
dev->supported = !ret;
mutex_unlock(&dev->dev_lock);
+
return ret;
}
@@ -2027,6 +2017,7 @@
{
if (!dev)
return;
+
clean_opal_dev(dev);
kfree(dev);
}
@@ -2049,6 +2040,7 @@
kfree(dev);
return NULL;
}
+
return dev;
}
EXPORT_SYMBOL(init_opal_dev);
@@ -2057,19 +2049,18 @@
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ get_active_key, &opal_session->opal_key.lr },
{ gen_key, },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
@@ -2077,33 +2068,33 @@
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ erase_locking_range, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
struct opal_mbr_data *opal_mbr)
{
+ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
+ OPAL_TRUE : OPAL_FALSE;
+
const struct opal_step mbr_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_done, &opal_mbr->enable_disable },
+ { set_mbr_done, &enable_disable },
{ end_opal_session, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_enable_disable, &opal_mbr->enable_disable },
- { end_opal_session, },
- { NULL, }
+ { set_mbr_enable_disable, &enable_disable },
+ { end_opal_session, }
};
int ret;
@@ -2112,9 +2103,56 @@
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, mbr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, mbr_steps, ARRAY_SIZE(mbr_steps));
mutex_unlock(&dev->dev_lock);
+
+ return ret;
+}
+
+static int opal_set_mbr_done(struct opal_dev *dev,
+ struct opal_mbr_done *mbr_done)
+{
+ u8 mbr_done_tf = mbr_done->done_flag == OPAL_MBR_DONE ?
+ OPAL_TRUE : OPAL_FALSE;
+
+ const struct opal_step mbr_steps[] = {
+ { start_admin1LSP_opal_session, &mbr_done->key },
+ { set_mbr_done, &mbr_done_tf },
+ { end_opal_session, }
+ };
+ int ret;
+
+ if (mbr_done->done_flag != OPAL_MBR_DONE &&
+ mbr_done->done_flag != OPAL_MBR_NOT_DONE)
+ return -EINVAL;
+
+ mutex_lock(&dev->dev_lock);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, mbr_steps, ARRAY_SIZE(mbr_steps));
+ mutex_unlock(&dev->dev_lock);
+
+ return ret;
+}
+
+static int opal_write_shadow_mbr(struct opal_dev *dev,
+ struct opal_shadow_mbr *info)
+{
+ const struct opal_step mbr_steps[] = {
+ { start_admin1LSP_opal_session, &info->key },
+ { write_shadow_mbr, info },
+ { end_opal_session, }
+ };
+ int ret;
+
+ if (info->size == 0)
+ return 0;
+
+ mutex_lock(&dev->dev_lock);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, mbr_steps, ARRAY_SIZE(mbr_steps));
+ mutex_unlock(&dev->dev_lock);
+
return ret;
}
@@ -2130,9 +2168,10 @@
suspend->lr = lk_unlk->session.opal_key.lr;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
add_suspend_info(dev, suspend);
mutex_unlock(&dev->dev_lock);
+
return 0;
}
@@ -2140,11 +2179,9 @@
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &lk_unlk->session.opal_key },
{ add_user_to_lr, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2153,12 +2190,14 @@
pr_debug("Locking state was not RO or RW\n");
return -EINVAL;
}
+
if (lk_unlk->session.who < OPAL_USER1 ||
lk_unlk->session.who > OPAL_USER9) {
pr_debug("Authority was not within the range of users: %d\n",
lk_unlk->session.who);
return -EINVAL;
}
+
if (lk_unlk->session.sum) {
pr_debug("%s not supported in sum. Use setup locking range\n",
__func__);
@@ -2166,25 +2205,35 @@
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, steps, ARRAY_SIZE(steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
-static int opal_reverttper(struct opal_dev *dev, struct opal_key *opal)
+static int opal_reverttper(struct opal_dev *dev, struct opal_key *opal, bool psid)
{
+ /* controller will terminate session */
const struct opal_step revert_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, opal },
- { revert_tper, }, /* controller will terminate session */
- { NULL, }
+ { revert_tper, }
};
+ const struct opal_step psid_revert_steps[] = {
+ { start_PSID_opal_session, opal },
+ { revert_tper, }
+ };
+
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, revert_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ if (psid)
+ ret = execute_steps(dev, psid_revert_steps,
+ ARRAY_SIZE(psid_revert_steps));
+ else
+ ret = execute_steps(dev, revert_steps,
+ ARRAY_SIZE(revert_steps));
mutex_unlock(&dev->dev_lock);
/*
@@ -2201,37 +2250,34 @@
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step unlock_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
const struct opal_step unlock_sum_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range_sum, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = lk_unlk->session.sum ? unlock_sum_steps : unlock_steps;
- return next(dev);
+ if (lk_unlk->session.sum)
+ return execute_steps(dev, unlock_sum_steps,
+ ARRAY_SIZE(unlock_sum_steps));
+ else
+ return execute_steps(dev, unlock_steps,
+ ARRAY_SIZE(unlock_steps));
}
static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
{
- u8 mbr_done_tf = 1;
- const struct opal_step mbrdone_step [] = {
- { opal_discovery0, },
+ u8 mbr_done_tf = OPAL_TRUE;
+ const struct opal_step mbrdone_step[] = {
{ start_admin1LSP_opal_session, key },
{ set_mbr_done, &mbr_done_tf },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = mbrdone_step;
- return next(dev);
+ return execute_steps(dev, mbrdone_step, ARRAY_SIZE(mbrdone_step));
}
static int opal_lock_unlock(struct opal_dev *dev,
@@ -2239,27 +2285,25 @@
{
int ret;
- if (lk_unlk->session.who < OPAL_ADMIN1 ||
- lk_unlk->session.who > OPAL_USER9)
+ if (lk_unlk->session.who > OPAL_USER9)
return -EINVAL;
mutex_lock(&dev->dev_lock);
ret = __opal_lock_unlock(dev, lk_unlk);
mutex_unlock(&dev->dev_lock);
+
return ret;
}
static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step owner_steps[] = {
- { opal_discovery0, },
{ start_anybodyASP_opal_session, },
{ get_msid_cpin_pin, },
{ end_opal_session, },
{ start_SIDASP_opal_session, opal },
{ set_sid_cpin_pin, opal },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2267,21 +2311,21 @@
return -ENODEV;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, owner_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, owner_steps, ARRAY_SIZE(owner_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
-static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_act)
+static int opal_activate_lsp(struct opal_dev *dev,
+ struct opal_lr_act *opal_lr_act)
{
const struct opal_step active_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, &opal_lr_act->key },
{ get_lsp_lifecycle, },
{ activate_lsp, opal_lr_act },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2289,9 +2333,10 @@
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, active_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, active_steps, ARRAY_SIZE(active_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
@@ -2299,42 +2344,38 @@
struct opal_user_lr_setup *opal_lrs)
{
const struct opal_step lr_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_lrs->session },
{ setup_locking_range, opal_lrs },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, lr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, lr_steps, ARRAY_SIZE(lr_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
{
const struct opal_step pw_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_pw->session },
{ set_new_pw, &opal_pw->new_user_pw },
- { end_opal_session, },
- { NULL }
+ { end_opal_session, }
};
int ret;
- if (opal_pw->session.who < OPAL_ADMIN1 ||
- opal_pw->session.who > OPAL_USER9 ||
- opal_pw->new_user_pw.who < OPAL_ADMIN1 ||
+ if (opal_pw->session.who > OPAL_USER9 ||
opal_pw->new_user_pw.who > OPAL_USER9)
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, pw_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, pw_steps, ARRAY_SIZE(pw_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
@@ -2342,11 +2383,9 @@
struct opal_session_info *opal_session)
{
const struct opal_step act_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_session->opal_key },
{ internal_activate_user, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2358,9 +2397,10 @@
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, act_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, act_steps, ARRAY_SIZE(act_steps));
mutex_unlock(&dev->dev_lock);
+
return ret;
}
@@ -2372,11 +2412,12 @@
if (!dev)
return false;
+
if (!dev->supported)
return false;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
list_for_each_entry(suspend, &dev->unlk_lst, node) {
dev->tsn = 0;
@@ -2389,6 +2430,7 @@
suspend->unlk.session.sum);
was_failure = true;
}
+
if (dev->mbr_enabled) {
ret = __opal_set_mbr_done(dev, &suspend->unlk.session.opal_key);
if (ret)
@@ -2396,6 +2438,7 @@
}
}
mutex_unlock(&dev->dev_lock);
+
return was_failure;
}
EXPORT_SYMBOL(opal_unlock_from_suspend);
@@ -2436,7 +2479,7 @@
ret = opal_activate_user(dev, p);
break;
case IOC_OPAL_REVERT_TPR:
- ret = opal_reverttper(dev, p);
+ ret = opal_reverttper(dev, p, false);
break;
case IOC_OPAL_LR_SETUP:
ret = opal_setup_locking_range(dev, p);
@@ -2447,12 +2490,21 @@
case IOC_OPAL_ENABLE_DISABLE_MBR:
ret = opal_enable_disable_shadow_mbr(dev, p);
break;
+ case IOC_OPAL_MBR_DONE:
+ ret = opal_set_mbr_done(dev, p);
+ break;
+ case IOC_OPAL_WRITE_SHADOW_MBR:
+ ret = opal_write_shadow_mbr(dev, p);
+ break;
case IOC_OPAL_ERASE_LR:
ret = opal_erase_locking_range(dev, p);
break;
case IOC_OPAL_SECURE_ERASE_LR:
ret = opal_secure_erase_locking_range(dev, p);
break;
+ case IOC_OPAL_PSID_REVERT_TPR:
+ ret = opal_reverttper(dev, p, true);
+ break;
default:
break;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 62aed77..9803c7e 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -1,24 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* t10_pi.c - Functions for generating and verifying T10 Protection
* Information.
- *
- * Copyright (C) 2007, 2008, 2014 Oracle Corporation
- * Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/t10-pi.h>
@@ -44,7 +27,7 @@
* tag.
*/
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
- csum_fn *fn, unsigned int type)
+ csum_fn *fn, enum t10_dif_type type)
{
unsigned int i;
@@ -54,7 +37,7 @@
pi->guard_tag = fn(iter->data_buf, iter->interval);
pi->app_tag = 0;
- if (type == 1)
+ if (type == T10_PI_TYPE1_PROTECTION)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else
pi->ref_tag = 0;
@@ -68,17 +51,18 @@
}
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
- csum_fn *fn, unsigned int type)
+ csum_fn *fn, enum t10_dif_type type)
{
unsigned int i;
+ BUG_ON(type == T10_PI_TYPE0_PROTECTION);
+
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf;
__be16 csum;
- switch (type) {
- case 1:
- case 2:
+ if (type == T10_PI_TYPE1_PROTECTION ||
+ type == T10_PI_TYPE2_PROTECTION) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@@ -90,12 +74,10 @@
iter->seed, be32_to_cpu(pi->ref_tag));
return BLK_STS_PROTECTION;
}
- break;
- case 3:
+ } else if (type == T10_PI_TYPE3_PROTECTION) {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
pi->ref_tag == T10_PI_REF_ESCAPE)
goto next;
- break;
}
csum = fn(iter->data_buf, iter->interval);
@@ -119,94 +101,40 @@
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{
- return t10_pi_generate(iter, t10_pi_crc_fn, 1);
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{
- return t10_pi_generate(iter, t10_pi_ip_fn, 1);
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{
- return t10_pi_verify(iter, t10_pi_crc_fn, 1);
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{
- return t10_pi_verify(iter, t10_pi_ip_fn, 1);
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
-static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_crc_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_ip_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_crc_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_ip_fn, 3);
-}
-
-const struct blk_integrity_profile t10_pi_type1_crc = {
- .name = "T10-DIF-TYPE1-CRC",
- .generate_fn = t10_pi_type1_generate_crc,
- .verify_fn = t10_pi_type1_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type1_crc);
-
-const struct blk_integrity_profile t10_pi_type1_ip = {
- .name = "T10-DIF-TYPE1-IP",
- .generate_fn = t10_pi_type1_generate_ip,
- .verify_fn = t10_pi_type1_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type1_ip);
-
-const struct blk_integrity_profile t10_pi_type3_crc = {
- .name = "T10-DIF-TYPE3-CRC",
- .generate_fn = t10_pi_type3_generate_crc,
- .verify_fn = t10_pi_type3_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type3_crc);
-
-const struct blk_integrity_profile t10_pi_type3_ip = {
- .name = "T10-DIF-TYPE3-IP",
- .generate_fn = t10_pi_type3_generate_ip,
- .verify_fn = t10_pi_type3_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type3_ip);
-
/**
- * t10_pi_prepare - prepare PI prior submitting request to device
+ * t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
* partitioning, MD/DM cloning, etc. the actual physical start sector is
* likely to be different. Remap protection information to match the
* physical LBA.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_prepare(struct request *rq, u8 protection_type)
+static void t10_pi_type1_prepare(struct request *rq)
{
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -239,13 +167,11 @@
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
-EXPORT_SYMBOL(t10_pi_prepare);
/**
- * t10_pi_complete - prepare PI prior returning request to the block layer
+ * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
- * @intervals: total elements to prepare
+ * @nr_bytes: total bytes to prepare
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
@@ -253,19 +179,14 @@
* likely to be different. Since the physical start sector was submitted
* to the device, we should remap it back to virtual values expected by the
* block layer.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals)
+static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
+ unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -293,4 +214,73 @@
}
}
}
-EXPORT_SYMBOL(t10_pi_complete);
+
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_prepare(struct request *rq)
+{
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
+{
+}
+
+const struct blk_integrity_profile t10_pi_type1_crc = {
+ .name = "T10-DIF-TYPE1-CRC",
+ .generate_fn = t10_pi_type1_generate_crc,
+ .verify_fn = t10_pi_type1_verify_crc,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_crc);
+
+const struct blk_integrity_profile t10_pi_type1_ip = {
+ .name = "T10-DIF-TYPE1-IP",
+ .generate_fn = t10_pi_type1_generate_ip,
+ .verify_fn = t10_pi_type1_verify_ip,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_ip);
+
+const struct blk_integrity_profile t10_pi_type3_crc = {
+ .name = "T10-DIF-TYPE3-CRC",
+ .generate_fn = t10_pi_type3_generate_crc,
+ .verify_fn = t10_pi_type3_verify_crc,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_crc);
+
+const struct blk_integrity_profile t10_pi_type3_ip = {
+ .name = "T10-DIF-TYPE3-IP",
+ .generate_fn = t10_pi_type3_generate_ip,
+ .verify_fn = t10_pi_type3_verify_ip,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_ip);