blob: 2c1ffc9ba2eb2567da09139aa204e9c6db8f2d20 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * net/switchdev/switchdev.c - Switch device API
4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
11#include <linux/mutex.h>
12#include <linux/notifier.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_bridge.h>
16#include <linux/list.h>
17#include <linux/workqueue.h>
18#include <linux/if_vlan.h>
19#include <linux/rtnetlink.h>
20#include <net/switchdev.h>
21
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022static LIST_HEAD(deferred);
23static DEFINE_SPINLOCK(deferred_lock);
24
25typedef void switchdev_deferred_func_t(struct net_device *dev,
26 const void *data);
27
28struct switchdev_deferred_item {
29 struct list_head list;
30 struct net_device *dev;
31 switchdev_deferred_func_t *func;
Olivier Deprez157378f2022-04-04 15:47:50 +020032 unsigned long data[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033};
34
35static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36{
37 struct switchdev_deferred_item *dfitem;
38
39 spin_lock_bh(&deferred_lock);
40 if (list_empty(&deferred)) {
41 dfitem = NULL;
42 goto unlock;
43 }
44 dfitem = list_first_entry(&deferred,
45 struct switchdev_deferred_item, list);
46 list_del(&dfitem->list);
47unlock:
48 spin_unlock_bh(&deferred_lock);
49 return dfitem;
50}
51
52/**
53 * switchdev_deferred_process - Process ops in deferred queue
54 *
55 * Called to flush the ops currently queued in deferred ops queue.
56 * rtnl_lock must be held.
57 */
58void switchdev_deferred_process(void)
59{
60 struct switchdev_deferred_item *dfitem;
61
62 ASSERT_RTNL();
63
64 while ((dfitem = switchdev_deferred_dequeue())) {
65 dfitem->func(dfitem->dev, dfitem->data);
66 dev_put(dfitem->dev);
67 kfree(dfitem);
68 }
69}
70EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71
72static void switchdev_deferred_process_work(struct work_struct *work)
73{
74 rtnl_lock();
75 switchdev_deferred_process();
76 rtnl_unlock();
77}
78
79static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80
81static int switchdev_deferred_enqueue(struct net_device *dev,
82 const void *data, size_t data_len,
83 switchdev_deferred_func_t *func)
84{
85 struct switchdev_deferred_item *dfitem;
86
87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88 if (!dfitem)
89 return -ENOMEM;
90 dfitem->dev = dev;
91 dfitem->func = func;
92 memcpy(dfitem->data, data, data_len);
93 dev_hold(dev);
94 spin_lock_bh(&deferred_lock);
95 list_add_tail(&dfitem->list, &deferred);
96 spin_unlock_bh(&deferred_lock);
97 schedule_work(&deferred_process_work);
98 return 0;
99}
100
David Brazdil0f672f62019-12-10 10:32:29 +0000101static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102 struct net_device *dev,
103 const struct switchdev_attr *attr,
104 struct switchdev_trans *trans)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105{
David Brazdil0f672f62019-12-10 10:32:29 +0000106 int err;
107 int rc;
108
109 struct switchdev_notifier_port_attr_info attr_info = {
110 .attr = attr,
111 .trans = trans,
112 .handled = false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
David Brazdil0f672f62019-12-10 10:32:29 +0000115 rc = call_switchdev_blocking_notifiers(nt, dev,
116 &attr_info.info, NULL);
117 err = notifier_to_errno(rc);
118 if (err) {
119 WARN_ON(!attr_info.handled);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121 }
122
David Brazdil0f672f62019-12-10 10:32:29 +0000123 if (!attr_info.handled)
124 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125
David Brazdil0f672f62019-12-10 10:32:29 +0000126 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127}
128
129static int switchdev_port_attr_set_now(struct net_device *dev,
130 const struct switchdev_attr *attr)
131{
132 struct switchdev_trans trans;
133 int err;
134
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 /* Phase I: prepare for attr set. Driver/device should fail
136 * here if there are going to be issues in the commit phase,
137 * such as lack of resources or support. The driver/device
138 * should reserve resources needed for the commit phase here,
139 * but should not commit the attr.
140 */
141
142 trans.ph_prepare = true;
David Brazdil0f672f62019-12-10 10:32:29 +0000143 err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
144 &trans);
145 if (err)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147
148 /* Phase II: commit attr set. This cannot fail as a fault
149 * of driver/device. If it does, it's a bug in the driver/device
150 * because the driver said everythings was OK in phase I.
151 */
152
153 trans.ph_prepare = false;
David Brazdil0f672f62019-12-10 10:32:29 +0000154 err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
155 &trans);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
157 dev->name, attr->id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158
159 return err;
160}
161
162static void switchdev_port_attr_set_deferred(struct net_device *dev,
163 const void *data)
164{
165 const struct switchdev_attr *attr = data;
166 int err;
167
168 err = switchdev_port_attr_set_now(dev, attr);
169 if (err && err != -EOPNOTSUPP)
170 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
171 err, attr->id);
172 if (attr->complete)
173 attr->complete(dev, err, attr->complete_priv);
174}
175
176static int switchdev_port_attr_set_defer(struct net_device *dev,
177 const struct switchdev_attr *attr)
178{
179 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
180 switchdev_port_attr_set_deferred);
181}
182
183/**
184 * switchdev_port_attr_set - Set port attribute
185 *
186 * @dev: port device
187 * @attr: attribute to set
188 *
189 * Use a 2-phase prepare-commit transaction model to ensure
190 * system is not left in a partially updated state due to
191 * failure from driver/device.
192 *
193 * rtnl_lock must be held and must not be in atomic section,
194 * in case SWITCHDEV_F_DEFER flag is not set.
195 */
196int switchdev_port_attr_set(struct net_device *dev,
197 const struct switchdev_attr *attr)
198{
199 if (attr->flags & SWITCHDEV_F_DEFER)
200 return switchdev_port_attr_set_defer(dev, attr);
201 ASSERT_RTNL();
202 return switchdev_port_attr_set_now(dev, attr);
203}
204EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
205
206static size_t switchdev_obj_size(const struct switchdev_obj *obj)
207{
208 switch (obj->id) {
209 case SWITCHDEV_OBJ_ID_PORT_VLAN:
210 return sizeof(struct switchdev_obj_port_vlan);
211 case SWITCHDEV_OBJ_ID_PORT_MDB:
212 return sizeof(struct switchdev_obj_port_mdb);
213 case SWITCHDEV_OBJ_ID_HOST_MDB:
214 return sizeof(struct switchdev_obj_port_mdb);
215 default:
216 BUG();
217 }
218 return 0;
219}
220
David Brazdil0f672f62019-12-10 10:32:29 +0000221static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
222 struct net_device *dev,
223 const struct switchdev_obj *obj,
224 struct switchdev_trans *trans,
225 struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226{
David Brazdil0f672f62019-12-10 10:32:29 +0000227 int rc;
228 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229
David Brazdil0f672f62019-12-10 10:32:29 +0000230 struct switchdev_notifier_port_obj_info obj_info = {
231 .obj = obj,
232 .trans = trans,
233 .handled = false,
234 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235
David Brazdil0f672f62019-12-10 10:32:29 +0000236 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237 err = notifier_to_errno(rc);
238 if (err) {
239 WARN_ON(!obj_info.handled);
240 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 }
David Brazdil0f672f62019-12-10 10:32:29 +0000242 if (!obj_info.handled)
243 return -EOPNOTSUPP;
244 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245}
246
247static int switchdev_port_obj_add_now(struct net_device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000248 const struct switchdev_obj *obj,
249 struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250{
251 struct switchdev_trans trans;
252 int err;
253
254 ASSERT_RTNL();
255
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256 /* Phase I: prepare for obj add. Driver/device should fail
257 * here if there are going to be issues in the commit phase,
258 * such as lack of resources or support. The driver/device
259 * should reserve resources needed for the commit phase here,
260 * but should not commit the obj.
261 */
262
263 trans.ph_prepare = true;
David Brazdil0f672f62019-12-10 10:32:29 +0000264 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
265 dev, obj, &trans, extack);
266 if (err)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000268
269 /* Phase II: commit obj add. This cannot fail as a fault
270 * of driver/device. If it does, it's a bug in the driver/device
271 * because the driver said everythings was OK in phase I.
272 */
273
274 trans.ph_prepare = false;
David Brazdil0f672f62019-12-10 10:32:29 +0000275 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
276 dev, obj, &trans, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278
279 return err;
280}
281
282static void switchdev_port_obj_add_deferred(struct net_device *dev,
283 const void *data)
284{
285 const struct switchdev_obj *obj = data;
286 int err;
287
David Brazdil0f672f62019-12-10 10:32:29 +0000288 err = switchdev_port_obj_add_now(dev, obj, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289 if (err && err != -EOPNOTSUPP)
290 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
291 err, obj->id);
292 if (obj->complete)
293 obj->complete(dev, err, obj->complete_priv);
294}
295
296static int switchdev_port_obj_add_defer(struct net_device *dev,
297 const struct switchdev_obj *obj)
298{
299 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
300 switchdev_port_obj_add_deferred);
301}
302
303/**
304 * switchdev_port_obj_add - Add port object
305 *
306 * @dev: port device
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 * @obj: object to add
Olivier Deprez157378f2022-04-04 15:47:50 +0200308 * @extack: netlink extended ack
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309 *
310 * Use a 2-phase prepare-commit transaction model to ensure
311 * system is not left in a partially updated state due to
312 * failure from driver/device.
313 *
314 * rtnl_lock must be held and must not be in atomic section,
315 * in case SWITCHDEV_F_DEFER flag is not set.
316 */
317int switchdev_port_obj_add(struct net_device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000318 const struct switchdev_obj *obj,
319 struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000320{
321 if (obj->flags & SWITCHDEV_F_DEFER)
322 return switchdev_port_obj_add_defer(dev, obj);
323 ASSERT_RTNL();
David Brazdil0f672f62019-12-10 10:32:29 +0000324 return switchdev_port_obj_add_now(dev, obj, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325}
326EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
327
328static int switchdev_port_obj_del_now(struct net_device *dev,
329 const struct switchdev_obj *obj)
330{
David Brazdil0f672f62019-12-10 10:32:29 +0000331 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
332 dev, obj, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000333}
334
335static void switchdev_port_obj_del_deferred(struct net_device *dev,
336 const void *data)
337{
338 const struct switchdev_obj *obj = data;
339 int err;
340
341 err = switchdev_port_obj_del_now(dev, obj);
342 if (err && err != -EOPNOTSUPP)
343 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
344 err, obj->id);
345 if (obj->complete)
346 obj->complete(dev, err, obj->complete_priv);
347}
348
349static int switchdev_port_obj_del_defer(struct net_device *dev,
350 const struct switchdev_obj *obj)
351{
352 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
353 switchdev_port_obj_del_deferred);
354}
355
356/**
357 * switchdev_port_obj_del - Delete port object
358 *
359 * @dev: port device
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360 * @obj: object to delete
361 *
362 * rtnl_lock must be held and must not be in atomic section,
363 * in case SWITCHDEV_F_DEFER flag is not set.
364 */
365int switchdev_port_obj_del(struct net_device *dev,
366 const struct switchdev_obj *obj)
367{
368 if (obj->flags & SWITCHDEV_F_DEFER)
369 return switchdev_port_obj_del_defer(dev, obj);
370 ASSERT_RTNL();
371 return switchdev_port_obj_del_now(dev, obj);
372}
373EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
374
375static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
David Brazdil0f672f62019-12-10 10:32:29 +0000376static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377
378/**
379 * register_switchdev_notifier - Register notifier
380 * @nb: notifier_block
381 *
382 * Register switch device notifier.
383 */
384int register_switchdev_notifier(struct notifier_block *nb)
385{
386 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
387}
388EXPORT_SYMBOL_GPL(register_switchdev_notifier);
389
390/**
391 * unregister_switchdev_notifier - Unregister notifier
392 * @nb: notifier_block
393 *
394 * Unregister switch device notifier.
395 */
396int unregister_switchdev_notifier(struct notifier_block *nb)
397{
398 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
399}
400EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
401
402/**
403 * call_switchdev_notifiers - Call notifiers
404 * @val: value passed unmodified to notifier function
405 * @dev: port device
406 * @info: notifier information data
Olivier Deprez157378f2022-04-04 15:47:50 +0200407 * @extack: netlink extended ack
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408 * Call all network notifier blocks.
409 */
410int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000411 struct switchdev_notifier_info *info,
412 struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413{
414 info->dev = dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000415 info->extack = extack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000416 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
417}
418EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
419
David Brazdil0f672f62019-12-10 10:32:29 +0000420int register_switchdev_blocking_notifier(struct notifier_block *nb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421{
David Brazdil0f672f62019-12-10 10:32:29 +0000422 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423
David Brazdil0f672f62019-12-10 10:32:29 +0000424 return blocking_notifier_chain_register(chain, nb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425}
David Brazdil0f672f62019-12-10 10:32:29 +0000426EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
427
428int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
429{
430 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
431
432 return blocking_notifier_chain_unregister(chain, nb);
433}
434EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
435
436int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
437 struct switchdev_notifier_info *info,
438 struct netlink_ext_ack *extack)
439{
440 info->dev = dev;
441 info->extack = extack;
442 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
443 val, info);
444}
445EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
446
447static int __switchdev_handle_port_obj_add(struct net_device *dev,
448 struct switchdev_notifier_port_obj_info *port_obj_info,
449 bool (*check_cb)(const struct net_device *dev),
450 int (*add_cb)(struct net_device *dev,
451 const struct switchdev_obj *obj,
452 struct switchdev_trans *trans,
453 struct netlink_ext_ack *extack))
454{
455 struct netlink_ext_ack *extack;
456 struct net_device *lower_dev;
457 struct list_head *iter;
458 int err = -EOPNOTSUPP;
459
460 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
461
462 if (check_cb(dev)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200463 err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
464 extack);
465 if (err != -EOPNOTSUPP)
466 port_obj_info->handled = true;
467 return err;
David Brazdil0f672f62019-12-10 10:32:29 +0000468 }
469
470 /* Switch ports might be stacked under e.g. a LAG. Ignore the
471 * unsupported devices, another driver might be able to handle them. But
472 * propagate to the callers any hard errors.
473 *
474 * If the driver does its own bookkeeping of stacked ports, it's not
475 * necessary to go through this helper.
476 */
477 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200478 if (netif_is_bridge_master(lower_dev))
479 continue;
480
David Brazdil0f672f62019-12-10 10:32:29 +0000481 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
482 check_cb, add_cb);
483 if (err && err != -EOPNOTSUPP)
484 return err;
485 }
486
487 return err;
488}
489
490int switchdev_handle_port_obj_add(struct net_device *dev,
491 struct switchdev_notifier_port_obj_info *port_obj_info,
492 bool (*check_cb)(const struct net_device *dev),
493 int (*add_cb)(struct net_device *dev,
494 const struct switchdev_obj *obj,
495 struct switchdev_trans *trans,
496 struct netlink_ext_ack *extack))
497{
498 int err;
499
500 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
501 add_cb);
502 if (err == -EOPNOTSUPP)
503 err = 0;
504 return err;
505}
506EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
507
508static int __switchdev_handle_port_obj_del(struct net_device *dev,
509 struct switchdev_notifier_port_obj_info *port_obj_info,
510 bool (*check_cb)(const struct net_device *dev),
511 int (*del_cb)(struct net_device *dev,
512 const struct switchdev_obj *obj))
513{
514 struct net_device *lower_dev;
515 struct list_head *iter;
516 int err = -EOPNOTSUPP;
517
518 if (check_cb(dev)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200519 err = del_cb(dev, port_obj_info->obj);
520 if (err != -EOPNOTSUPP)
521 port_obj_info->handled = true;
522 return err;
David Brazdil0f672f62019-12-10 10:32:29 +0000523 }
524
525 /* Switch ports might be stacked under e.g. a LAG. Ignore the
526 * unsupported devices, another driver might be able to handle them. But
527 * propagate to the callers any hard errors.
528 *
529 * If the driver does its own bookkeeping of stacked ports, it's not
530 * necessary to go through this helper.
531 */
532 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200533 if (netif_is_bridge_master(lower_dev))
534 continue;
535
David Brazdil0f672f62019-12-10 10:32:29 +0000536 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
537 check_cb, del_cb);
538 if (err && err != -EOPNOTSUPP)
539 return err;
540 }
541
542 return err;
543}
544
545int switchdev_handle_port_obj_del(struct net_device *dev,
546 struct switchdev_notifier_port_obj_info *port_obj_info,
547 bool (*check_cb)(const struct net_device *dev),
548 int (*del_cb)(struct net_device *dev,
549 const struct switchdev_obj *obj))
550{
551 int err;
552
553 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
554 del_cb);
555 if (err == -EOPNOTSUPP)
556 err = 0;
557 return err;
558}
559EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
560
561static int __switchdev_handle_port_attr_set(struct net_device *dev,
562 struct switchdev_notifier_port_attr_info *port_attr_info,
563 bool (*check_cb)(const struct net_device *dev),
564 int (*set_cb)(struct net_device *dev,
565 const struct switchdev_attr *attr,
566 struct switchdev_trans *trans))
567{
568 struct net_device *lower_dev;
569 struct list_head *iter;
570 int err = -EOPNOTSUPP;
571
572 if (check_cb(dev)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200573 err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
574 if (err != -EOPNOTSUPP)
575 port_attr_info->handled = true;
576 return err;
David Brazdil0f672f62019-12-10 10:32:29 +0000577 }
578
579 /* Switch ports might be stacked under e.g. a LAG. Ignore the
580 * unsupported devices, another driver might be able to handle them. But
581 * propagate to the callers any hard errors.
582 *
583 * If the driver does its own bookkeeping of stacked ports, it's not
584 * necessary to go through this helper.
585 */
586 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200587 if (netif_is_bridge_master(lower_dev))
588 continue;
589
David Brazdil0f672f62019-12-10 10:32:29 +0000590 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
591 check_cb, set_cb);
592 if (err && err != -EOPNOTSUPP)
593 return err;
594 }
595
596 return err;
597}
598
599int switchdev_handle_port_attr_set(struct net_device *dev,
600 struct switchdev_notifier_port_attr_info *port_attr_info,
601 bool (*check_cb)(const struct net_device *dev),
602 int (*set_cb)(struct net_device *dev,
603 const struct switchdev_attr *attr,
604 struct switchdev_trans *trans))
605{
606 int err;
607
608 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
609 set_cb);
610 if (err == -EOPNOTSUPP)
611 err = 0;
612 return err;
613}
614EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);