blob: db18d8a860f9cc50608afe47d1b0fa9996295505 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
4 *
5 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <net/netlink.h>
16#include <net/pkt_cls.h>
17#include <net/pkt_sched.h>
18#include <net/sch_generic.h>
19
20struct mq_sched {
21 struct Qdisc **qdiscs;
22};
23
24static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
25{
26 struct net_device *dev = qdisc_dev(sch);
27 struct tc_mq_qopt_offload opt = {
28 .command = cmd,
29 .handle = sch->handle,
30 };
31
32 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
33 return -EOPNOTSUPP;
34
35 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
36}
37
David Brazdil0f672f62019-12-10 10:32:29 +000038static int mq_offload_stats(struct Qdisc *sch)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040 struct tc_mq_qopt_offload opt = {
41 .command = TC_MQ_STATS,
42 .handle = sch->handle,
43 .stats = {
44 .bstats = &sch->bstats,
45 .qstats = &sch->qstats,
46 },
47 };
48
David Brazdil0f672f62019-12-10 10:32:29 +000049 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050}
51
52static void mq_destroy(struct Qdisc *sch)
53{
54 struct net_device *dev = qdisc_dev(sch);
55 struct mq_sched *priv = qdisc_priv(sch);
56 unsigned int ntx;
57
58 mq_offload(sch, TC_MQ_DESTROY);
59
60 if (!priv->qdiscs)
61 return;
62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
David Brazdil0f672f62019-12-10 10:32:29 +000063 qdisc_put(priv->qdiscs[ntx]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064 kfree(priv->qdiscs);
65}
66
67static int mq_init(struct Qdisc *sch, struct nlattr *opt,
68 struct netlink_ext_ack *extack)
69{
70 struct net_device *dev = qdisc_dev(sch);
71 struct mq_sched *priv = qdisc_priv(sch);
72 struct netdev_queue *dev_queue;
73 struct Qdisc *qdisc;
74 unsigned int ntx;
75
76 if (sch->parent != TC_H_ROOT)
77 return -EOPNOTSUPP;
78
79 if (!netif_is_multiqueue(dev))
80 return -EOPNOTSUPP;
81
82 /* pre-allocate qdiscs, attachment can't fail */
83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
84 GFP_KERNEL);
85 if (!priv->qdiscs)
86 return -ENOMEM;
87
88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
89 dev_queue = netdev_get_tx_queue(dev, ntx);
90 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
91 TC_H_MAKE(TC_H_MAJ(sch->handle),
92 TC_H_MIN(ntx + 1)),
93 extack);
94 if (!qdisc)
95 return -ENOMEM;
96 priv->qdiscs[ntx] = qdisc;
97 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
98 }
99
100 sch->flags |= TCQ_F_MQROOT;
101
102 mq_offload(sch, TC_MQ_CREATE);
103 return 0;
104}
105
106static void mq_attach(struct Qdisc *sch)
107{
108 struct net_device *dev = qdisc_dev(sch);
109 struct mq_sched *priv = qdisc_priv(sch);
110 struct Qdisc *qdisc, *old;
111 unsigned int ntx;
112
113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
114 qdisc = priv->qdiscs[ntx];
115 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
116 if (old)
David Brazdil0f672f62019-12-10 10:32:29 +0000117 qdisc_put(old);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118#ifdef CONFIG_NET_SCHED
119 if (ntx < dev->real_num_tx_queues)
120 qdisc_hash_add(qdisc, false);
121#endif
122
123 }
124 kfree(priv->qdiscs);
125 priv->qdiscs = NULL;
126}
127
Olivier Deprez157378f2022-04-04 15:47:50 +0200128static void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
129{
130#ifdef CONFIG_NET_SCHED
131 struct net_device *dev = qdisc_dev(sch);
132 struct Qdisc *qdisc;
133 unsigned int i;
134
135 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
136 qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
137 /* Only update the default qdiscs we created,
138 * qdiscs with handles are always hashed.
139 */
140 if (qdisc != &noop_qdisc && !qdisc->handle)
141 qdisc_hash_del(qdisc);
142 }
143 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
144 qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
145 if (qdisc != &noop_qdisc && !qdisc->handle)
146 qdisc_hash_add(qdisc, false);
147 }
148#endif
149}
150
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
152{
153 struct net_device *dev = qdisc_dev(sch);
154 struct Qdisc *qdisc;
155 unsigned int ntx;
156 __u32 qlen = 0;
157
158 sch->q.qlen = 0;
159 memset(&sch->bstats, 0, sizeof(sch->bstats));
160 memset(&sch->qstats, 0, sizeof(sch->qstats));
161
162 /* MQ supports lockless qdiscs. However, statistics accounting needs
163 * to account for all, none, or a mix of locked and unlocked child
164 * qdiscs. Percpu stats are added to counters in-band and locking
165 * qdisc totals are added at end.
166 */
167 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
168 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
169 spin_lock_bh(qdisc_lock(qdisc));
170
171 if (qdisc_is_percpu_stats(qdisc)) {
172 qlen = qdisc_qlen_sum(qdisc);
173 __gnet_stats_copy_basic(NULL, &sch->bstats,
174 qdisc->cpu_bstats,
175 &qdisc->bstats);
176 __gnet_stats_copy_queue(&sch->qstats,
177 qdisc->cpu_qstats,
178 &qdisc->qstats, qlen);
Olivier Deprez0e641232021-09-23 10:07:05 +0200179 sch->q.qlen += qlen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180 } else {
181 sch->q.qlen += qdisc->q.qlen;
182 sch->bstats.bytes += qdisc->bstats.bytes;
183 sch->bstats.packets += qdisc->bstats.packets;
184 sch->qstats.qlen += qdisc->qstats.qlen;
185 sch->qstats.backlog += qdisc->qstats.backlog;
186 sch->qstats.drops += qdisc->qstats.drops;
187 sch->qstats.requeues += qdisc->qstats.requeues;
188 sch->qstats.overlimits += qdisc->qstats.overlimits;
189 }
190
191 spin_unlock_bh(qdisc_lock(qdisc));
192 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193
David Brazdil0f672f62019-12-10 10:32:29 +0000194 return mq_offload_stats(sch);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195}
196
197static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
198{
199 struct net_device *dev = qdisc_dev(sch);
200 unsigned long ntx = cl - 1;
201
202 if (ntx >= dev->num_tx_queues)
203 return NULL;
204 return netdev_get_tx_queue(dev, ntx);
205}
206
207static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
208 struct tcmsg *tcm)
209{
210 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
211}
212
213static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
214 struct Qdisc **old, struct netlink_ext_ack *extack)
215{
216 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
David Brazdil0f672f62019-12-10 10:32:29 +0000217 struct tc_mq_qopt_offload graft_offload;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 struct net_device *dev = qdisc_dev(sch);
219
220 if (dev->flags & IFF_UP)
221 dev_deactivate(dev);
222
223 *old = dev_graft_qdisc(dev_queue, new);
224 if (new)
225 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
226 if (dev->flags & IFF_UP)
227 dev_activate(dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000228
229 graft_offload.handle = sch->handle;
230 graft_offload.graft_params.queue = cl - 1;
231 graft_offload.graft_params.child_handle = new ? new->handle : 0;
232 graft_offload.command = TC_MQ_GRAFT;
233
234 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
235 TC_SETUP_QDISC_MQ, &graft_offload, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 return 0;
237}
238
239static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
240{
241 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
242
243 return dev_queue->qdisc_sleeping;
244}
245
246static unsigned long mq_find(struct Qdisc *sch, u32 classid)
247{
248 unsigned int ntx = TC_H_MIN(classid);
249
250 if (!mq_queue_get(sch, ntx))
251 return 0;
252 return ntx;
253}
254
255static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
256 struct sk_buff *skb, struct tcmsg *tcm)
257{
258 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
259
260 tcm->tcm_parent = TC_H_ROOT;
261 tcm->tcm_handle |= TC_H_MIN(cl);
262 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
263 return 0;
264}
265
266static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
267 struct gnet_dump *d)
268{
269 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
270
271 sch = dev_queue->qdisc_sleeping;
David Brazdil0f672f62019-12-10 10:32:29 +0000272 if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
273 &sch->bstats) < 0 ||
274 qdisc_qstats_copy(d, sch) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275 return -1;
276 return 0;
277}
278
279static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
280{
281 struct net_device *dev = qdisc_dev(sch);
282 unsigned int ntx;
283
284 if (arg->stop)
285 return;
286
287 arg->count = arg->skip;
288 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
289 if (arg->fn(sch, ntx + 1, arg) < 0) {
290 arg->stop = 1;
291 break;
292 }
293 arg->count++;
294 }
295}
296
297static const struct Qdisc_class_ops mq_class_ops = {
298 .select_queue = mq_select_queue,
299 .graft = mq_graft,
300 .leaf = mq_leaf,
301 .find = mq_find,
302 .walk = mq_walk,
303 .dump = mq_dump_class,
304 .dump_stats = mq_dump_class_stats,
305};
306
307struct Qdisc_ops mq_qdisc_ops __read_mostly = {
308 .cl_ops = &mq_class_ops,
309 .id = "mq",
310 .priv_size = sizeof(struct mq_sched),
311 .init = mq_init,
312 .destroy = mq_destroy,
313 .attach = mq_attach,
Olivier Deprez157378f2022-04-04 15:47:50 +0200314 .change_real_num_tx = mq_change_real_num_tx,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315 .dump = mq_dump,
316 .owner = THIS_MODULE,
317};