blob: 7ed97dc0b5617ee3256f610a419a1c235bcab728 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) 2007-2014 Nicira, Inc.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/if_arp.h>
11#include <linux/if_vlan.h>
12#include <linux/in.h>
13#include <linux/ip.h>
14#include <linux/jhash.h>
15#include <linux/delay.h>
16#include <linux/time.h>
17#include <linux/etherdevice.h>
18#include <linux/genetlink.h>
19#include <linux/kernel.h>
20#include <linux/kthread.h>
21#include <linux/mutex.h>
22#include <linux/percpu.h>
23#include <linux/rcupdate.h>
24#include <linux/tcp.h>
25#include <linux/udp.h>
26#include <linux/ethtool.h>
27#include <linux/wait.h>
28#include <asm/div64.h>
29#include <linux/highmem.h>
30#include <linux/netfilter_bridge.h>
31#include <linux/netfilter_ipv4.h>
32#include <linux/inetdevice.h>
33#include <linux/list.h>
34#include <linux/openvswitch.h>
35#include <linux/rculist.h>
36#include <linux/dmi.h>
37#include <net/genetlink.h>
38#include <net/net_namespace.h>
39#include <net/netns/generic.h>
40
41#include "datapath.h"
42#include "flow.h"
43#include "flow_table.h"
44#include "flow_netlink.h"
45#include "meter.h"
46#include "vport-internal_dev.h"
47#include "vport-netdev.h"
48
49unsigned int ovs_net_id __read_mostly;
50
51static struct genl_family dp_packet_genl_family;
52static struct genl_family dp_flow_genl_family;
53static struct genl_family dp_datapath_genl_family;
54
55static const struct nla_policy flow_policy[];
56
57static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
58 .name = OVS_FLOW_MCGROUP,
59};
60
61static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
62 .name = OVS_DATAPATH_MCGROUP,
63};
64
65static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
66 .name = OVS_VPORT_MCGROUP,
67};
68
69/* Check if need to build a reply message.
70 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
71static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
72 unsigned int group)
73{
74 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
75 genl_has_listeners(family, genl_info_net(info), group);
76}
77
78static void ovs_notify(struct genl_family *family,
79 struct sk_buff *skb, struct genl_info *info)
80{
81 genl_notify(family, skb, info, 0, GFP_KERNEL);
82}
83
84/**
85 * DOC: Locking:
86 *
87 * All writes e.g. Writes to device state (add/remove datapath, port, set
88 * operations on vports, etc.), Writes to other state (flow table
89 * modifications, set miscellaneous datapath parameters, etc.) are protected
90 * by ovs_lock.
91 *
92 * Reads are protected by RCU.
93 *
94 * There are a few special cases (mostly stats) that have their own
95 * synchronization but they nest under all of above and don't interact with
96 * each other.
97 *
98 * The RTNL lock nests inside ovs_mutex.
99 */
100
101static DEFINE_MUTEX(ovs_mutex);
102
103void ovs_lock(void)
104{
105 mutex_lock(&ovs_mutex);
106}
107
108void ovs_unlock(void)
109{
110 mutex_unlock(&ovs_mutex);
111}
112
113#ifdef CONFIG_LOCKDEP
114int lockdep_ovsl_is_held(void)
115{
116 if (debug_locks)
117 return lockdep_is_held(&ovs_mutex);
118 else
119 return 1;
120}
121#endif
122
123static struct vport *new_vport(const struct vport_parms *);
124static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
125 const struct sw_flow_key *,
126 const struct dp_upcall_info *,
127 uint32_t cutlen);
128static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
129 const struct sw_flow_key *,
130 const struct dp_upcall_info *,
131 uint32_t cutlen);
132
Olivier Deprez157378f2022-04-04 15:47:50 +0200133static void ovs_dp_masks_rebalance(struct work_struct *work);
134
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135/* Must be called with rcu_read_lock or ovs_mutex. */
136const char *ovs_dp_name(const struct datapath *dp)
137{
138 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
139 return ovs_vport_name(vport);
140}
141
142static int get_dpifindex(const struct datapath *dp)
143{
144 struct vport *local;
145 int ifindex;
146
147 rcu_read_lock();
148
149 local = ovs_vport_rcu(dp, OVSP_LOCAL);
150 if (local)
151 ifindex = local->dev->ifindex;
152 else
153 ifindex = 0;
154
155 rcu_read_unlock();
156
157 return ifindex;
158}
159
160static void destroy_dp_rcu(struct rcu_head *rcu)
161{
162 struct datapath *dp = container_of(rcu, struct datapath, rcu);
163
164 ovs_flow_tbl_destroy(&dp->table);
165 free_percpu(dp->stats_percpu);
166 kfree(dp->ports);
167 ovs_meters_exit(dp);
168 kfree(dp);
169}
170
171static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
172 u16 port_no)
173{
174 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
175}
176
177/* Called with ovs_mutex or RCU read lock. */
178struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
179{
180 struct vport *vport;
181 struct hlist_head *head;
182
183 head = vport_hash_bucket(dp, port_no);
Olivier Deprez157378f2022-04-04 15:47:50 +0200184 hlist_for_each_entry_rcu(vport, head, dp_hash_node,
185 lockdep_ovsl_is_held()) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186 if (vport->port_no == port_no)
187 return vport;
188 }
189 return NULL;
190}
191
192/* Called with ovs_mutex. */
193static struct vport *new_vport(const struct vport_parms *parms)
194{
195 struct vport *vport;
196
197 vport = ovs_vport_add(parms);
198 if (!IS_ERR(vport)) {
199 struct datapath *dp = parms->dp;
200 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
201
202 hlist_add_head_rcu(&vport->dp_hash_node, head);
203 }
204 return vport;
205}
206
207void ovs_dp_detach_port(struct vport *p)
208{
209 ASSERT_OVSL();
210
211 /* First drop references to device. */
212 hlist_del_rcu(&p->dp_hash_node);
213
214 /* Then destroy it. */
215 ovs_vport_del(p);
216}
217
218/* Must be called with rcu_read_lock. */
219void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
220{
221 const struct vport *p = OVS_CB(skb)->input_vport;
222 struct datapath *dp = p->dp;
223 struct sw_flow *flow;
224 struct sw_flow_actions *sf_acts;
225 struct dp_stats_percpu *stats;
226 u64 *stats_counter;
227 u32 n_mask_hit;
Olivier Deprez157378f2022-04-04 15:47:50 +0200228 u32 n_cache_hit;
David Brazdil0f672f62019-12-10 10:32:29 +0000229 int error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
231 stats = this_cpu_ptr(dp->stats_percpu);
232
233 /* Look up flow. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200234 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
235 &n_mask_hit, &n_cache_hit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 if (unlikely(!flow)) {
237 struct dp_upcall_info upcall;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238
239 memset(&upcall, 0, sizeof(upcall));
240 upcall.cmd = OVS_PACKET_CMD_MISS;
241 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
242 upcall.mru = OVS_CB(skb)->mru;
243 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100244 switch (error) {
245 case 0:
246 case -EAGAIN:
247 case -ERESTARTSYS:
248 case -EINTR:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000249 consume_skb(skb);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100250 break;
251 default:
252 kfree_skb(skb);
253 break;
254 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255 stats_counter = &stats->n_missed;
256 goto out;
257 }
258
259 ovs_flow_stats_update(flow, key->tp.flags, skb);
260 sf_acts = rcu_dereference(flow->sf_acts);
David Brazdil0f672f62019-12-10 10:32:29 +0000261 error = ovs_execute_actions(dp, skb, sf_acts, key);
262 if (unlikely(error))
263 net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
Olivier Deprez157378f2022-04-04 15:47:50 +0200264 ovs_dp_name(dp), error);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265
266 stats_counter = &stats->n_hit;
267
268out:
269 /* Update datapath statistics. */
270 u64_stats_update_begin(&stats->syncp);
271 (*stats_counter)++;
272 stats->n_mask_hit += n_mask_hit;
Olivier Deprez157378f2022-04-04 15:47:50 +0200273 stats->n_cache_hit += n_cache_hit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 u64_stats_update_end(&stats->syncp);
275}
276
277int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
278 const struct sw_flow_key *key,
279 const struct dp_upcall_info *upcall_info,
280 uint32_t cutlen)
281{
282 struct dp_stats_percpu *stats;
283 int err;
284
285 if (upcall_info->portid == 0) {
286 err = -ENOTCONN;
287 goto err;
288 }
289
290 if (!skb_is_gso(skb))
291 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
292 else
293 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
294 if (err)
295 goto err;
296
297 return 0;
298
299err:
300 stats = this_cpu_ptr(dp->stats_percpu);
301
302 u64_stats_update_begin(&stats->syncp);
303 stats->n_lost++;
304 u64_stats_update_end(&stats->syncp);
305
306 return err;
307}
308
309static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
310 const struct sw_flow_key *key,
311 const struct dp_upcall_info *upcall_info,
Olivier Deprez157378f2022-04-04 15:47:50 +0200312 uint32_t cutlen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000313{
314 unsigned int gso_type = skb_shinfo(skb)->gso_type;
315 struct sw_flow_key later_key;
316 struct sk_buff *segs, *nskb;
317 int err;
318
Olivier Deprez157378f2022-04-04 15:47:50 +0200319 BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_GSO_CB_OFFSET);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000320 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
321 if (IS_ERR(segs))
322 return PTR_ERR(segs);
323 if (segs == NULL)
324 return -EINVAL;
325
326 if (gso_type & SKB_GSO_UDP) {
327 /* The initial flow key extracted by ovs_flow_key_extract()
328 * in this case is for a first fragment, so we need to
329 * properly mark later fragments.
330 */
331 later_key = *key;
332 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
333 }
334
335 /* Queue all of the segments. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200336 skb_list_walk_safe(segs, skb, nskb) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000337 if (gso_type & SKB_GSO_UDP && skb != segs)
338 key = &later_key;
339
340 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
341 if (err)
342 break;
343
Olivier Deprez157378f2022-04-04 15:47:50 +0200344 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345
346 /* Free all of the segments. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200347 skb_list_walk_safe(segs, skb, nskb) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348 if (err)
349 kfree_skb(skb);
350 else
351 consume_skb(skb);
Olivier Deprez157378f2022-04-04 15:47:50 +0200352 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000353 return err;
354}
355
356static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
357 unsigned int hdrlen, int actions_attrlen)
358{
359 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
360 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
361 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
Olivier Deprez157378f2022-04-04 15:47:50 +0200362 + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
363 + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364
365 /* OVS_PACKET_ATTR_USERDATA */
366 if (upcall_info->userdata)
367 size += NLA_ALIGN(upcall_info->userdata->nla_len);
368
369 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
370 if (upcall_info->egress_tun_info)
371 size += nla_total_size(ovs_tun_key_attr_size());
372
373 /* OVS_PACKET_ATTR_ACTIONS */
374 if (upcall_info->actions_len)
375 size += nla_total_size(actions_attrlen);
376
377 /* OVS_PACKET_ATTR_MRU */
378 if (upcall_info->mru)
379 size += nla_total_size(sizeof(upcall_info->mru));
380
381 return size;
382}
383
384static void pad_packet(struct datapath *dp, struct sk_buff *skb)
385{
386 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
387 size_t plen = NLA_ALIGN(skb->len) - skb->len;
388
389 if (plen > 0)
390 skb_put_zero(skb, plen);
391 }
392}
393
394static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
395 const struct sw_flow_key *key,
396 const struct dp_upcall_info *upcall_info,
397 uint32_t cutlen)
398{
399 struct ovs_header *upcall;
400 struct sk_buff *nskb = NULL;
401 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
402 struct nlattr *nla;
403 size_t len;
404 unsigned int hlen;
405 int err, dp_ifindex;
Olivier Deprez157378f2022-04-04 15:47:50 +0200406 u64 hash;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407
408 dp_ifindex = get_dpifindex(dp);
409 if (!dp_ifindex)
410 return -ENODEV;
411
412 if (skb_vlan_tag_present(skb)) {
413 nskb = skb_clone(skb, GFP_ATOMIC);
414 if (!nskb)
415 return -ENOMEM;
416
417 nskb = __vlan_hwaccel_push_inside(nskb);
418 if (!nskb)
419 return -ENOMEM;
420
421 skb = nskb;
422 }
423
424 if (nla_attr_size(skb->len) > USHRT_MAX) {
425 err = -EFBIG;
426 goto out;
427 }
428
429 /* Complete checksum if needed */
430 if (skb->ip_summed == CHECKSUM_PARTIAL &&
431 (err = skb_csum_hwoffload_help(skb, 0)))
432 goto out;
433
434 /* Older versions of OVS user space enforce alignment of the last
435 * Netlink attribute to NLA_ALIGNTO which would require extensive
436 * padding logic. Only perform zerocopy if padding is not required.
437 */
438 if (dp->user_features & OVS_DP_F_UNALIGNED)
439 hlen = skb_zerocopy_headlen(skb);
440 else
441 hlen = skb->len;
442
443 len = upcall_msg_size(upcall_info, hlen - cutlen,
444 OVS_CB(skb)->acts_origlen);
445 user_skb = genlmsg_new(len, GFP_ATOMIC);
446 if (!user_skb) {
447 err = -ENOMEM;
448 goto out;
449 }
450
451 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
452 0, upcall_info->cmd);
David Brazdil0f672f62019-12-10 10:32:29 +0000453 if (!upcall) {
454 err = -EINVAL;
455 goto out;
456 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 upcall->dp_ifindex = dp_ifindex;
458
459 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000460 if (err)
461 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000462
463 if (upcall_info->userdata)
464 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
465 nla_len(upcall_info->userdata),
466 nla_data(upcall_info->userdata));
467
468 if (upcall_info->egress_tun_info) {
David Brazdil0f672f62019-12-10 10:32:29 +0000469 nla = nla_nest_start_noflag(user_skb,
470 OVS_PACKET_ATTR_EGRESS_TUN_KEY);
471 if (!nla) {
472 err = -EMSGSIZE;
473 goto out;
474 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475 err = ovs_nla_put_tunnel_info(user_skb,
476 upcall_info->egress_tun_info);
David Brazdil0f672f62019-12-10 10:32:29 +0000477 if (err)
478 goto out;
479
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 nla_nest_end(user_skb, nla);
481 }
482
483 if (upcall_info->actions_len) {
David Brazdil0f672f62019-12-10 10:32:29 +0000484 nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
485 if (!nla) {
486 err = -EMSGSIZE;
487 goto out;
488 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489 err = ovs_nla_put_actions(upcall_info->actions,
490 upcall_info->actions_len,
491 user_skb);
492 if (!err)
493 nla_nest_end(user_skb, nla);
494 else
495 nla_nest_cancel(user_skb, nla);
496 }
497
498 /* Add OVS_PACKET_ATTR_MRU */
Olivier Deprez157378f2022-04-04 15:47:50 +0200499 if (upcall_info->mru &&
500 nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, upcall_info->mru)) {
501 err = -ENOBUFS;
502 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 }
504
505 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
Olivier Deprez157378f2022-04-04 15:47:50 +0200506 if (cutlen > 0 &&
507 nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, skb->len)) {
508 err = -ENOBUFS;
509 goto out;
510 }
511
512 /* Add OVS_PACKET_ATTR_HASH */
513 hash = skb_get_hash_raw(skb);
514 if (skb->sw_hash)
515 hash |= OVS_PACKET_HASH_SW_BIT;
516
517 if (skb->l4_hash)
518 hash |= OVS_PACKET_HASH_L4_BIT;
519
520 if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
521 err = -ENOBUFS;
522 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523 }
524
525 /* Only reserve room for attribute header, packet data is added
526 * in skb_zerocopy() */
527 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
528 err = -ENOBUFS;
529 goto out;
530 }
531 nla->nla_len = nla_attr_size(skb->len - cutlen);
532
533 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
534 if (err)
535 goto out;
536
537 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
538 pad_packet(dp, user_skb);
539
540 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
541
542 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
543 user_skb = NULL;
544out:
545 if (err)
546 skb_tx_error(skb);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100547 consume_skb(user_skb);
548 consume_skb(nskb);
549
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550 return err;
551}
552
553static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
554{
555 struct ovs_header *ovs_header = info->userhdr;
556 struct net *net = sock_net(skb->sk);
557 struct nlattr **a = info->attrs;
558 struct sw_flow_actions *acts;
559 struct sk_buff *packet;
560 struct sw_flow *flow;
561 struct sw_flow_actions *sf_acts;
562 struct datapath *dp;
563 struct vport *input_vport;
564 u16 mru = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200565 u64 hash;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566 int len;
567 int err;
568 bool log = !a[OVS_PACKET_ATTR_PROBE];
569
570 err = -EINVAL;
571 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
572 !a[OVS_PACKET_ATTR_ACTIONS])
573 goto err;
574
575 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
576 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
577 err = -ENOMEM;
578 if (!packet)
579 goto err;
580 skb_reserve(packet, NET_IP_ALIGN);
581
582 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
583
584 /* Set packet's mru */
585 if (a[OVS_PACKET_ATTR_MRU]) {
586 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
587 packet->ignore_df = 1;
588 }
589 OVS_CB(packet)->mru = mru;
590
Olivier Deprez157378f2022-04-04 15:47:50 +0200591 if (a[OVS_PACKET_ATTR_HASH]) {
592 hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
593
594 __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
595 !!(hash & OVS_PACKET_HASH_SW_BIT),
596 !!(hash & OVS_PACKET_HASH_L4_BIT));
597 }
598
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599 /* Build an sw_flow for sending this packet. */
600 flow = ovs_flow_alloc();
601 err = PTR_ERR(flow);
602 if (IS_ERR(flow))
603 goto err_kfree_skb;
604
605 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
606 packet, &flow->key, log);
607 if (err)
608 goto err_flow_free;
609
610 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
611 &flow->key, &acts, log);
612 if (err)
613 goto err_flow_free;
614
615 rcu_assign_pointer(flow->sf_acts, acts);
616 packet->priority = flow->key.phy.priority;
617 packet->mark = flow->key.phy.skb_mark;
618
619 rcu_read_lock();
620 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
621 err = -ENODEV;
622 if (!dp)
623 goto err_unlock;
624
625 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
626 if (!input_vport)
627 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
628
629 if (!input_vport)
630 goto err_unlock;
631
632 packet->dev = input_vport->dev;
633 OVS_CB(packet)->input_vport = input_vport;
634 sf_acts = rcu_dereference(flow->sf_acts);
635
636 local_bh_disable();
637 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
638 local_bh_enable();
639 rcu_read_unlock();
640
641 ovs_flow_free(flow, false);
642 return err;
643
644err_unlock:
645 rcu_read_unlock();
646err_flow_free:
647 ovs_flow_free(flow, false);
648err_kfree_skb:
649 kfree_skb(packet);
650err:
651 return err;
652}
653
654static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
655 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
656 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
657 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
658 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
659 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
Olivier Deprez157378f2022-04-04 15:47:50 +0200660 [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661};
662
Olivier Deprez157378f2022-04-04 15:47:50 +0200663static const struct genl_small_ops dp_packet_genl_ops[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664 { .cmd = OVS_PACKET_CMD_EXECUTE,
David Brazdil0f672f62019-12-10 10:32:29 +0000665 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000666 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667 .doit = ovs_packet_cmd_execute
668 }
669};
670
671static struct genl_family dp_packet_genl_family __ro_after_init = {
672 .hdrsize = sizeof(struct ovs_header),
673 .name = OVS_PACKET_FAMILY,
674 .version = OVS_PACKET_VERSION,
675 .maxattr = OVS_PACKET_ATTR_MAX,
David Brazdil0f672f62019-12-10 10:32:29 +0000676 .policy = packet_policy,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000677 .netnsok = true,
678 .parallel_ops = true,
Olivier Deprez157378f2022-04-04 15:47:50 +0200679 .small_ops = dp_packet_genl_ops,
680 .n_small_ops = ARRAY_SIZE(dp_packet_genl_ops),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000681 .module = THIS_MODULE,
682};
683
684static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
685 struct ovs_dp_megaflow_stats *mega_stats)
686{
687 int i;
688
689 memset(mega_stats, 0, sizeof(*mega_stats));
690
691 stats->n_flows = ovs_flow_tbl_count(&dp->table);
692 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
693
694 stats->n_hit = stats->n_missed = stats->n_lost = 0;
695
696 for_each_possible_cpu(i) {
697 const struct dp_stats_percpu *percpu_stats;
698 struct dp_stats_percpu local_stats;
699 unsigned int start;
700
701 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
702
703 do {
704 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
705 local_stats = *percpu_stats;
706 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
707
708 stats->n_hit += local_stats.n_hit;
709 stats->n_missed += local_stats.n_missed;
710 stats->n_lost += local_stats.n_lost;
711 mega_stats->n_mask_hit += local_stats.n_mask_hit;
Olivier Deprez157378f2022-04-04 15:47:50 +0200712 mega_stats->n_cache_hit += local_stats.n_cache_hit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713 }
714}
715
716static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
717{
718 return ovs_identifier_is_ufid(sfid) &&
719 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
720}
721
722static bool should_fill_mask(uint32_t ufid_flags)
723{
724 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
725}
726
727static bool should_fill_actions(uint32_t ufid_flags)
728{
729 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
730}
731
732static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
733 const struct sw_flow_id *sfid,
734 uint32_t ufid_flags)
735{
736 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
737
David Brazdil0f672f62019-12-10 10:32:29 +0000738 /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
739 * see ovs_nla_put_identifier()
740 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000741 if (sfid && ovs_identifier_is_ufid(sfid))
742 len += nla_total_size(sfid->ufid_len);
David Brazdil0f672f62019-12-10 10:32:29 +0000743 else
744 len += nla_total_size(ovs_key_attr_size());
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000745
746 /* OVS_FLOW_ATTR_KEY */
747 if (!sfid || should_fill_key(sfid, ufid_flags))
748 len += nla_total_size(ovs_key_attr_size());
749
750 /* OVS_FLOW_ATTR_MASK */
751 if (should_fill_mask(ufid_flags))
752 len += nla_total_size(ovs_key_attr_size());
753
754 /* OVS_FLOW_ATTR_ACTIONS */
755 if (should_fill_actions(ufid_flags))
756 len += nla_total_size(acts->orig_len);
757
758 return len
759 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
760 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
761 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
762}
763
764/* Called with ovs_mutex or RCU read lock. */
765static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
766 struct sk_buff *skb)
767{
768 struct ovs_flow_stats stats;
769 __be16 tcp_flags;
770 unsigned long used;
771
772 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
773
774 if (used &&
775 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
776 OVS_FLOW_ATTR_PAD))
777 return -EMSGSIZE;
778
779 if (stats.n_packets &&
780 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
781 sizeof(struct ovs_flow_stats), &stats,
782 OVS_FLOW_ATTR_PAD))
783 return -EMSGSIZE;
784
785 if ((u8)ntohs(tcp_flags) &&
786 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
787 return -EMSGSIZE;
788
789 return 0;
790}
791
792/* Called with ovs_mutex or RCU read lock. */
793static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
794 struct sk_buff *skb, int skb_orig_len)
795{
796 struct nlattr *start;
797 int err;
798
799 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
800 * this is the first flow to be dumped into 'skb'. This is unusual for
801 * Netlink but individual action lists can be longer than
802 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
803 * The userspace caller can always fetch the actions separately if it
804 * really wants them. (Most userspace callers in fact don't care.)
805 *
806 * This can only fail for dump operations because the skb is always
807 * properly sized for single flows.
808 */
David Brazdil0f672f62019-12-10 10:32:29 +0000809 start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000810 if (start) {
811 const struct sw_flow_actions *sf_acts;
812
813 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
814 err = ovs_nla_put_actions(sf_acts->actions,
815 sf_acts->actions_len, skb);
816
817 if (!err)
818 nla_nest_end(skb, start);
819 else {
820 if (skb_orig_len)
821 return err;
822
823 nla_nest_cancel(skb, start);
824 }
825 } else if (skb_orig_len) {
826 return -EMSGSIZE;
827 }
828
829 return 0;
830}
831
832/* Called with ovs_mutex or RCU read lock. */
833static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
834 struct sk_buff *skb, u32 portid,
835 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
836{
837 const int skb_orig_len = skb->len;
838 struct ovs_header *ovs_header;
839 int err;
840
841 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
842 flags, cmd);
843 if (!ovs_header)
844 return -EMSGSIZE;
845
846 ovs_header->dp_ifindex = dp_ifindex;
847
848 err = ovs_nla_put_identifier(flow, skb);
849 if (err)
850 goto error;
851
852 if (should_fill_key(&flow->id, ufid_flags)) {
853 err = ovs_nla_put_masked_key(flow, skb);
854 if (err)
855 goto error;
856 }
857
858 if (should_fill_mask(ufid_flags)) {
859 err = ovs_nla_put_mask(flow, skb);
860 if (err)
861 goto error;
862 }
863
864 err = ovs_flow_cmd_fill_stats(flow, skb);
865 if (err)
866 goto error;
867
868 if (should_fill_actions(ufid_flags)) {
869 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
870 if (err)
871 goto error;
872 }
873
874 genlmsg_end(skb, ovs_header);
875 return 0;
876
877error:
878 genlmsg_cancel(skb, ovs_header);
879 return err;
880}
881
882/* May not be called with RCU read lock. */
883static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
884 const struct sw_flow_id *sfid,
885 struct genl_info *info,
886 bool always,
887 uint32_t ufid_flags)
888{
889 struct sk_buff *skb;
890 size_t len;
891
892 if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
893 return NULL;
894
895 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
896 skb = genlmsg_new(len, GFP_KERNEL);
897 if (!skb)
898 return ERR_PTR(-ENOMEM);
899
900 return skb;
901}
902
903/* Called with ovs_mutex. */
904static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
905 int dp_ifindex,
906 struct genl_info *info, u8 cmd,
907 bool always, u32 ufid_flags)
908{
909 struct sk_buff *skb;
910 int retval;
911
912 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
913 &flow->id, info, always, ufid_flags);
914 if (IS_ERR_OR_NULL(skb))
915 return skb;
916
917 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
918 info->snd_portid, info->snd_seq, 0,
919 cmd, ufid_flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000920 if (WARN_ON_ONCE(retval < 0)) {
921 kfree_skb(skb);
922 skb = ERR_PTR(retval);
923 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000924 return skb;
925}
926
927static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
928{
929 struct net *net = sock_net(skb->sk);
930 struct nlattr **a = info->attrs;
931 struct ovs_header *ovs_header = info->userhdr;
932 struct sw_flow *flow = NULL, *new_flow;
933 struct sw_flow_mask mask;
934 struct sk_buff *reply;
935 struct datapath *dp;
936 struct sw_flow_actions *acts;
937 struct sw_flow_match match;
938 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
939 int error;
940 bool log = !a[OVS_FLOW_ATTR_PROBE];
941
942 /* Must have key and actions. */
943 error = -EINVAL;
944 if (!a[OVS_FLOW_ATTR_KEY]) {
945 OVS_NLERR(log, "Flow key attr not present in new flow.");
946 goto error;
947 }
948 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
949 OVS_NLERR(log, "Flow actions attr not present in new flow.");
950 goto error;
951 }
952
953 /* Most of the time we need to allocate a new flow, do it before
954 * locking.
955 */
956 new_flow = ovs_flow_alloc();
957 if (IS_ERR(new_flow)) {
958 error = PTR_ERR(new_flow);
959 goto error;
960 }
961
962 /* Extract key. */
963 ovs_match_init(&match, &new_flow->key, false, &mask);
964 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
965 a[OVS_FLOW_ATTR_MASK], log);
966 if (error)
967 goto err_kfree_flow;
968
969 /* Extract flow identifier. */
970 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
971 &new_flow->key, log);
972 if (error)
973 goto err_kfree_flow;
974
975 /* unmasked key is needed to match when ufid is not used. */
976 if (ovs_identifier_is_key(&new_flow->id))
977 match.key = new_flow->id.unmasked_key;
978
979 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
980
981 /* Validate actions. */
982 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
983 &new_flow->key, &acts, log);
984 if (error) {
985 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
986 goto err_kfree_flow;
987 }
988
989 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
990 ufid_flags);
991 if (IS_ERR(reply)) {
992 error = PTR_ERR(reply);
993 goto err_kfree_acts;
994 }
995
996 ovs_lock();
997 dp = get_dp(net, ovs_header->dp_ifindex);
998 if (unlikely(!dp)) {
999 error = -ENODEV;
1000 goto err_unlock_ovs;
1001 }
1002
1003 /* Check if this is a duplicate flow */
1004 if (ovs_identifier_is_ufid(&new_flow->id))
1005 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1006 if (!flow)
1007 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
1008 if (likely(!flow)) {
1009 rcu_assign_pointer(new_flow->sf_acts, acts);
1010
1011 /* Put flow in bucket. */
1012 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1013 if (unlikely(error)) {
1014 acts = NULL;
1015 goto err_unlock_ovs;
1016 }
1017
1018 if (unlikely(reply)) {
1019 error = ovs_flow_cmd_fill_info(new_flow,
1020 ovs_header->dp_ifindex,
1021 reply, info->snd_portid,
1022 info->snd_seq, 0,
1023 OVS_FLOW_CMD_NEW,
1024 ufid_flags);
1025 BUG_ON(error < 0);
1026 }
1027 ovs_unlock();
1028 } else {
1029 struct sw_flow_actions *old_acts;
1030
1031 /* Bail out if we're not allowed to modify an existing flow.
1032 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1033 * because Generic Netlink treats the latter as a dump
1034 * request. We also accept NLM_F_EXCL in case that bug ever
1035 * gets fixed.
1036 */
1037 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1038 | NLM_F_EXCL))) {
1039 error = -EEXIST;
1040 goto err_unlock_ovs;
1041 }
1042 /* The flow identifier has to be the same for flow updates.
1043 * Look for any overlapping flow.
1044 */
1045 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1046 if (ovs_identifier_is_key(&flow->id))
1047 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1048 &match);
1049 else /* UFID matches but key is different */
1050 flow = NULL;
1051 if (!flow) {
1052 error = -ENOENT;
1053 goto err_unlock_ovs;
1054 }
1055 }
1056 /* Update actions. */
1057 old_acts = ovsl_dereference(flow->sf_acts);
1058 rcu_assign_pointer(flow->sf_acts, acts);
1059
1060 if (unlikely(reply)) {
1061 error = ovs_flow_cmd_fill_info(flow,
1062 ovs_header->dp_ifindex,
1063 reply, info->snd_portid,
1064 info->snd_seq, 0,
1065 OVS_FLOW_CMD_NEW,
1066 ufid_flags);
1067 BUG_ON(error < 0);
1068 }
1069 ovs_unlock();
1070
1071 ovs_nla_free_flow_actions_rcu(old_acts);
1072 ovs_flow_free(new_flow, false);
1073 }
1074
1075 if (reply)
1076 ovs_notify(&dp_flow_genl_family, reply, info);
1077 return 0;
1078
1079err_unlock_ovs:
1080 ovs_unlock();
1081 kfree_skb(reply);
1082err_kfree_acts:
1083 ovs_nla_free_flow_actions(acts);
1084err_kfree_flow:
1085 ovs_flow_free(new_flow, false);
1086error:
1087 return error;
1088}
1089
1090/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
Olivier Deprez157378f2022-04-04 15:47:50 +02001091static noinline_for_stack
1092struct sw_flow_actions *get_flow_actions(struct net *net,
1093 const struct nlattr *a,
1094 const struct sw_flow_key *key,
1095 const struct sw_flow_mask *mask,
1096 bool log)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001097{
1098 struct sw_flow_actions *acts;
1099 struct sw_flow_key masked_key;
1100 int error;
1101
1102 ovs_flow_mask_key(&masked_key, key, true, mask);
1103 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1104 if (error) {
1105 OVS_NLERR(log,
1106 "Actions may not be safe on all matching packets");
1107 return ERR_PTR(error);
1108 }
1109
1110 return acts;
1111}
1112
1113/* Factor out match-init and action-copy to avoid
1114 * "Wframe-larger-than=1024" warning. Because mask is only
1115 * used to get actions, we new a function to save some
1116 * stack space.
1117 *
1118 * If there are not key and action attrs, we return 0
1119 * directly. In the case, the caller will also not use the
1120 * match as before. If there is action attr, we try to get
1121 * actions and save them to *acts. Before returning from
1122 * the function, we reset the match->mask pointer. Because
1123 * we should not to return match object with dangling reference
1124 * to mask.
1125 * */
David Brazdil0f672f62019-12-10 10:32:29 +00001126static noinline_for_stack int
1127ovs_nla_init_match_and_action(struct net *net,
1128 struct sw_flow_match *match,
1129 struct sw_flow_key *key,
1130 struct nlattr **a,
1131 struct sw_flow_actions **acts,
1132 bool log)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001133{
1134 struct sw_flow_mask mask;
1135 int error = 0;
1136
1137 if (a[OVS_FLOW_ATTR_KEY]) {
1138 ovs_match_init(match, key, true, &mask);
1139 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1140 a[OVS_FLOW_ATTR_MASK], log);
1141 if (error)
1142 goto error;
1143 }
1144
1145 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1146 if (!a[OVS_FLOW_ATTR_KEY]) {
1147 OVS_NLERR(log,
1148 "Flow key attribute not present in set flow.");
1149 error = -EINVAL;
1150 goto error;
1151 }
1152
1153 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1154 &mask, log);
1155 if (IS_ERR(*acts)) {
1156 error = PTR_ERR(*acts);
1157 goto error;
1158 }
1159 }
1160
1161 /* On success, error is 0. */
1162error:
1163 match->mask = NULL;
1164 return error;
1165}
1166
1167static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1168{
1169 struct net *net = sock_net(skb->sk);
1170 struct nlattr **a = info->attrs;
1171 struct ovs_header *ovs_header = info->userhdr;
1172 struct sw_flow_key key;
1173 struct sw_flow *flow;
1174 struct sk_buff *reply = NULL;
1175 struct datapath *dp;
1176 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1177 struct sw_flow_match match;
1178 struct sw_flow_id sfid;
1179 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1180 int error = 0;
1181 bool log = !a[OVS_FLOW_ATTR_PROBE];
1182 bool ufid_present;
1183
1184 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1185 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1186 OVS_NLERR(log,
1187 "Flow set message rejected, Key attribute missing.");
1188 return -EINVAL;
1189 }
1190
1191 error = ovs_nla_init_match_and_action(net, &match, &key, a,
1192 &acts, log);
1193 if (error)
1194 goto error;
1195
1196 if (acts) {
1197 /* Can allocate before locking if have acts. */
1198 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1199 ufid_flags);
1200 if (IS_ERR(reply)) {
1201 error = PTR_ERR(reply);
1202 goto err_kfree_acts;
1203 }
1204 }
1205
1206 ovs_lock();
1207 dp = get_dp(net, ovs_header->dp_ifindex);
1208 if (unlikely(!dp)) {
1209 error = -ENODEV;
1210 goto err_unlock_ovs;
1211 }
1212 /* Check that the flow exists. */
1213 if (ufid_present)
1214 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1215 else
1216 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1217 if (unlikely(!flow)) {
1218 error = -ENOENT;
1219 goto err_unlock_ovs;
1220 }
1221
1222 /* Update actions, if present. */
1223 if (likely(acts)) {
1224 old_acts = ovsl_dereference(flow->sf_acts);
1225 rcu_assign_pointer(flow->sf_acts, acts);
1226
1227 if (unlikely(reply)) {
1228 error = ovs_flow_cmd_fill_info(flow,
1229 ovs_header->dp_ifindex,
1230 reply, info->snd_portid,
1231 info->snd_seq, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00001232 OVS_FLOW_CMD_SET,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001233 ufid_flags);
1234 BUG_ON(error < 0);
1235 }
1236 } else {
1237 /* Could not alloc without acts before locking. */
1238 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
David Brazdil0f672f62019-12-10 10:32:29 +00001239 info, OVS_FLOW_CMD_SET, false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240 ufid_flags);
1241
1242 if (IS_ERR(reply)) {
1243 error = PTR_ERR(reply);
1244 goto err_unlock_ovs;
1245 }
1246 }
1247
1248 /* Clear stats. */
1249 if (a[OVS_FLOW_ATTR_CLEAR])
1250 ovs_flow_stats_clear(flow);
1251 ovs_unlock();
1252
1253 if (reply)
1254 ovs_notify(&dp_flow_genl_family, reply, info);
1255 if (old_acts)
1256 ovs_nla_free_flow_actions_rcu(old_acts);
1257
1258 return 0;
1259
1260err_unlock_ovs:
1261 ovs_unlock();
1262 kfree_skb(reply);
1263err_kfree_acts:
1264 ovs_nla_free_flow_actions(acts);
1265error:
1266 return error;
1267}
1268
1269static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1270{
1271 struct nlattr **a = info->attrs;
1272 struct ovs_header *ovs_header = info->userhdr;
1273 struct net *net = sock_net(skb->sk);
1274 struct sw_flow_key key;
1275 struct sk_buff *reply;
1276 struct sw_flow *flow;
1277 struct datapath *dp;
1278 struct sw_flow_match match;
1279 struct sw_flow_id ufid;
1280 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1281 int err = 0;
1282 bool log = !a[OVS_FLOW_ATTR_PROBE];
1283 bool ufid_present;
1284
1285 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1286 if (a[OVS_FLOW_ATTR_KEY]) {
1287 ovs_match_init(&match, &key, true, NULL);
1288 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1289 log);
1290 } else if (!ufid_present) {
1291 OVS_NLERR(log,
1292 "Flow get message rejected, Key attribute missing.");
1293 err = -EINVAL;
1294 }
1295 if (err)
1296 return err;
1297
1298 ovs_lock();
1299 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1300 if (!dp) {
1301 err = -ENODEV;
1302 goto unlock;
1303 }
1304
1305 if (ufid_present)
1306 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1307 else
1308 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1309 if (!flow) {
1310 err = -ENOENT;
1311 goto unlock;
1312 }
1313
1314 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
David Brazdil0f672f62019-12-10 10:32:29 +00001315 OVS_FLOW_CMD_GET, true, ufid_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001316 if (IS_ERR(reply)) {
1317 err = PTR_ERR(reply);
1318 goto unlock;
1319 }
1320
1321 ovs_unlock();
1322 return genlmsg_reply(reply, info);
1323unlock:
1324 ovs_unlock();
1325 return err;
1326}
1327
1328static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1329{
1330 struct nlattr **a = info->attrs;
1331 struct ovs_header *ovs_header = info->userhdr;
1332 struct net *net = sock_net(skb->sk);
1333 struct sw_flow_key key;
1334 struct sk_buff *reply;
1335 struct sw_flow *flow = NULL;
1336 struct datapath *dp;
1337 struct sw_flow_match match;
1338 struct sw_flow_id ufid;
1339 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1340 int err;
1341 bool log = !a[OVS_FLOW_ATTR_PROBE];
1342 bool ufid_present;
1343
1344 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1345 if (a[OVS_FLOW_ATTR_KEY]) {
1346 ovs_match_init(&match, &key, true, NULL);
1347 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1348 NULL, log);
1349 if (unlikely(err))
1350 return err;
1351 }
1352
1353 ovs_lock();
1354 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1355 if (unlikely(!dp)) {
1356 err = -ENODEV;
1357 goto unlock;
1358 }
1359
1360 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1361 err = ovs_flow_tbl_flush(&dp->table);
1362 goto unlock;
1363 }
1364
1365 if (ufid_present)
1366 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1367 else
1368 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1369 if (unlikely(!flow)) {
1370 err = -ENOENT;
1371 goto unlock;
1372 }
1373
1374 ovs_flow_tbl_remove(&dp->table, flow);
1375 ovs_unlock();
1376
1377 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1378 &flow->id, info, false, ufid_flags);
1379 if (likely(reply)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001380 if (!IS_ERR(reply)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001381 rcu_read_lock(); /*To keep RCU checker happy. */
1382 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1383 reply, info->snd_portid,
1384 info->snd_seq, 0,
1385 OVS_FLOW_CMD_DEL,
1386 ufid_flags);
1387 rcu_read_unlock();
David Brazdil0f672f62019-12-10 10:32:29 +00001388 if (WARN_ON_ONCE(err < 0)) {
1389 kfree_skb(reply);
1390 goto out_free;
1391 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001392
1393 ovs_notify(&dp_flow_genl_family, reply, info);
1394 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +02001395 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0,
1396 PTR_ERR(reply));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001397 }
1398 }
1399
David Brazdil0f672f62019-12-10 10:32:29 +00001400out_free:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001401 ovs_flow_free(flow, true);
1402 return 0;
1403unlock:
1404 ovs_unlock();
1405 return err;
1406}
1407
1408static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1409{
1410 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1411 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1412 struct table_instance *ti;
1413 struct datapath *dp;
1414 u32 ufid_flags;
1415 int err;
1416
David Brazdil0f672f62019-12-10 10:32:29 +00001417 err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1418 OVS_FLOW_ATTR_MAX, flow_policy, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001419 if (err)
1420 return err;
1421 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1422
1423 rcu_read_lock();
1424 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1425 if (!dp) {
1426 rcu_read_unlock();
1427 return -ENODEV;
1428 }
1429
1430 ti = rcu_dereference(dp->table.ti);
1431 for (;;) {
1432 struct sw_flow *flow;
1433 u32 bucket, obj;
1434
1435 bucket = cb->args[0];
1436 obj = cb->args[1];
1437 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1438 if (!flow)
1439 break;
1440
1441 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1442 NETLINK_CB(cb->skb).portid,
1443 cb->nlh->nlmsg_seq, NLM_F_MULTI,
David Brazdil0f672f62019-12-10 10:32:29 +00001444 OVS_FLOW_CMD_GET, ufid_flags) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001445 break;
1446
1447 cb->args[0] = bucket;
1448 cb->args[1] = obj;
1449 }
1450 rcu_read_unlock();
1451 return skb->len;
1452}
1453
1454static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1455 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1456 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1457 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1458 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1459 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1460 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1461 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1462};
1463
Olivier Deprez157378f2022-04-04 15:47:50 +02001464static const struct genl_small_ops dp_flow_genl_ops[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001465 { .cmd = OVS_FLOW_CMD_NEW,
David Brazdil0f672f62019-12-10 10:32:29 +00001466 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001467 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001468 .doit = ovs_flow_cmd_new
1469 },
1470 { .cmd = OVS_FLOW_CMD_DEL,
David Brazdil0f672f62019-12-10 10:32:29 +00001471 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001472 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001473 .doit = ovs_flow_cmd_del
1474 },
1475 { .cmd = OVS_FLOW_CMD_GET,
David Brazdil0f672f62019-12-10 10:32:29 +00001476 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001477 .flags = 0, /* OK for unprivileged users. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001478 .doit = ovs_flow_cmd_get,
1479 .dumpit = ovs_flow_cmd_dump
1480 },
1481 { .cmd = OVS_FLOW_CMD_SET,
David Brazdil0f672f62019-12-10 10:32:29 +00001482 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001483 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001484 .doit = ovs_flow_cmd_set,
1485 },
1486};
1487
1488static struct genl_family dp_flow_genl_family __ro_after_init = {
1489 .hdrsize = sizeof(struct ovs_header),
1490 .name = OVS_FLOW_FAMILY,
1491 .version = OVS_FLOW_VERSION,
1492 .maxattr = OVS_FLOW_ATTR_MAX,
David Brazdil0f672f62019-12-10 10:32:29 +00001493 .policy = flow_policy,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001494 .netnsok = true,
1495 .parallel_ops = true,
Olivier Deprez157378f2022-04-04 15:47:50 +02001496 .small_ops = dp_flow_genl_ops,
1497 .n_small_ops = ARRAY_SIZE(dp_flow_genl_ops),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001498 .mcgrps = &ovs_dp_flow_multicast_group,
1499 .n_mcgrps = 1,
1500 .module = THIS_MODULE,
1501};
1502
1503static size_t ovs_dp_cmd_msg_size(void)
1504{
1505 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1506
1507 msgsize += nla_total_size(IFNAMSIZ);
1508 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1509 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1510 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
Olivier Deprez157378f2022-04-04 15:47:50 +02001511 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001512
1513 return msgsize;
1514}
1515
1516/* Called with ovs_mutex. */
1517static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1518 u32 portid, u32 seq, u32 flags, u8 cmd)
1519{
1520 struct ovs_header *ovs_header;
1521 struct ovs_dp_stats dp_stats;
1522 struct ovs_dp_megaflow_stats dp_megaflow_stats;
1523 int err;
1524
1525 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
Olivier Deprez157378f2022-04-04 15:47:50 +02001526 flags, cmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001527 if (!ovs_header)
1528 goto error;
1529
1530 ovs_header->dp_ifindex = get_dpifindex(dp);
1531
1532 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1533 if (err)
1534 goto nla_put_failure;
1535
1536 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1537 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1538 &dp_stats, OVS_DP_ATTR_PAD))
1539 goto nla_put_failure;
1540
1541 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1542 sizeof(struct ovs_dp_megaflow_stats),
1543 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1544 goto nla_put_failure;
1545
1546 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1547 goto nla_put_failure;
1548
Olivier Deprez157378f2022-04-04 15:47:50 +02001549 if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE,
1550 ovs_flow_tbl_masks_cache_size(&dp->table)))
1551 goto nla_put_failure;
1552
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001553 genlmsg_end(skb, ovs_header);
1554 return 0;
1555
1556nla_put_failure:
1557 genlmsg_cancel(skb, ovs_header);
1558error:
1559 return -EMSGSIZE;
1560}
1561
1562static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1563{
1564 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1565}
1566
1567/* Called with rcu_read_lock or ovs_mutex. */
1568static struct datapath *lookup_datapath(struct net *net,
1569 const struct ovs_header *ovs_header,
1570 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1571{
1572 struct datapath *dp;
1573
1574 if (!a[OVS_DP_ATTR_NAME])
1575 dp = get_dp(net, ovs_header->dp_ifindex);
1576 else {
1577 struct vport *vport;
1578
1579 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1580 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1581 }
1582 return dp ? dp : ERR_PTR(-ENODEV);
1583}
1584
Olivier Deprez157378f2022-04-04 15:47:50 +02001585static void ovs_dp_reset_user_features(struct sk_buff *skb,
1586 struct genl_info *info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001587{
1588 struct datapath *dp;
1589
Olivier Deprez157378f2022-04-04 15:47:50 +02001590 dp = lookup_datapath(sock_net(skb->sk), info->userhdr,
1591 info->attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001592 if (IS_ERR(dp))
1593 return;
1594
Olivier Deprez92d4c212022-12-06 15:05:30 +01001595 pr_warn("%s: Dropping previously announced user features\n",
1596 ovs_dp_name(dp));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001597 dp->user_features = 0;
1598}
1599
David Brazdil0f672f62019-12-10 10:32:29 +00001600DEFINE_STATIC_KEY_FALSE(tc_recirc_sharing_support);
1601
1602static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001603{
David Brazdil0f672f62019-12-10 10:32:29 +00001604 u32 user_features = 0;
1605
1606 if (a[OVS_DP_ATTR_USER_FEATURES]) {
1607 user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1608
1609 if (user_features & ~(OVS_DP_F_VPORT_PIDS |
1610 OVS_DP_F_UNALIGNED |
1611 OVS_DP_F_TC_RECIRC_SHARING))
1612 return -EOPNOTSUPP;
1613
1614#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1615 if (user_features & OVS_DP_F_TC_RECIRC_SHARING)
1616 return -EOPNOTSUPP;
1617#endif
1618 }
1619
Olivier Deprez157378f2022-04-04 15:47:50 +02001620 if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) {
1621 int err;
1622 u32 cache_size;
1623
1624 cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]);
1625 err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size);
1626 if (err)
1627 return err;
1628 }
1629
David Brazdil0f672f62019-12-10 10:32:29 +00001630 dp->user_features = user_features;
1631
1632 if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
1633 static_branch_enable(&tc_recirc_sharing_support);
1634 else
1635 static_branch_disable(&tc_recirc_sharing_support);
1636
1637 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001638}
1639
Olivier Deprez157378f2022-04-04 15:47:50 +02001640static int ovs_dp_stats_init(struct datapath *dp)
1641{
1642 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1643 if (!dp->stats_percpu)
1644 return -ENOMEM;
1645
1646 return 0;
1647}
1648
1649static int ovs_dp_vport_init(struct datapath *dp)
1650{
1651 int i;
1652
1653 dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1654 sizeof(struct hlist_head),
1655 GFP_KERNEL);
1656 if (!dp->ports)
1657 return -ENOMEM;
1658
1659 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1660 INIT_HLIST_HEAD(&dp->ports[i]);
1661
1662 return 0;
1663}
1664
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001665static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1666{
1667 struct nlattr **a = info->attrs;
1668 struct vport_parms parms;
1669 struct sk_buff *reply;
1670 struct datapath *dp;
1671 struct vport *vport;
1672 struct ovs_net *ovs_net;
Olivier Deprez157378f2022-04-04 15:47:50 +02001673 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001674
1675 err = -EINVAL;
1676 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1677 goto err;
1678
1679 reply = ovs_dp_cmd_alloc_info();
1680 if (!reply)
1681 return -ENOMEM;
1682
1683 err = -ENOMEM;
1684 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1685 if (dp == NULL)
Olivier Deprez157378f2022-04-04 15:47:50 +02001686 goto err_destroy_reply;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001687
1688 ovs_dp_set_net(dp, sock_net(skb->sk));
1689
1690 /* Allocate table. */
1691 err = ovs_flow_tbl_init(&dp->table);
1692 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +02001693 goto err_destroy_dp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001694
Olivier Deprez157378f2022-04-04 15:47:50 +02001695 err = ovs_dp_stats_init(dp);
1696 if (err)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001697 goto err_destroy_table;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001698
Olivier Deprez157378f2022-04-04 15:47:50 +02001699 err = ovs_dp_vport_init(dp);
1700 if (err)
1701 goto err_destroy_stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001702
1703 err = ovs_meters_init(dp);
1704 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +02001705 goto err_destroy_ports;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001706
1707 /* Set up our datapath device. */
1708 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1709 parms.type = OVS_VPORT_TYPE_INTERNAL;
1710 parms.options = NULL;
1711 parms.dp = dp;
1712 parms.port_no = OVSP_LOCAL;
1713 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1714
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001715 /* So far only local changes have been made, now need the lock. */
1716 ovs_lock();
1717
Olivier Deprez157378f2022-04-04 15:47:50 +02001718 err = ovs_dp_change(dp, a);
1719 if (err)
1720 goto err_unlock_and_destroy_meters;
1721
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001722 vport = new_vport(&parms);
1723 if (IS_ERR(vport)) {
1724 err = PTR_ERR(vport);
1725 if (err == -EBUSY)
1726 err = -EEXIST;
1727
1728 if (err == -EEXIST) {
1729 /* An outdated user space instance that does not understand
1730 * the concept of user_features has attempted to create a new
1731 * datapath and is likely to reuse it. Drop all user features.
1732 */
1733 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1734 ovs_dp_reset_user_features(skb, info);
1735 }
1736
Olivier Deprez157378f2022-04-04 15:47:50 +02001737 goto err_unlock_and_destroy_meters;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001738 }
1739
1740 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1741 info->snd_seq, 0, OVS_DP_CMD_NEW);
1742 BUG_ON(err < 0);
1743
1744 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1745 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1746
1747 ovs_unlock();
1748
1749 ovs_notify(&dp_datapath_genl_family, reply, info);
1750 return 0;
1751
Olivier Deprez157378f2022-04-04 15:47:50 +02001752err_unlock_and_destroy_meters:
1753 ovs_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001754 ovs_meters_exit(dp);
Olivier Deprez157378f2022-04-04 15:47:50 +02001755err_destroy_ports:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001756 kfree(dp->ports);
Olivier Deprez157378f2022-04-04 15:47:50 +02001757err_destroy_stats:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001758 free_percpu(dp->stats_percpu);
1759err_destroy_table:
1760 ovs_flow_tbl_destroy(&dp->table);
Olivier Deprez157378f2022-04-04 15:47:50 +02001761err_destroy_dp:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001762 kfree(dp);
Olivier Deprez157378f2022-04-04 15:47:50 +02001763err_destroy_reply:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001764 kfree_skb(reply);
1765err:
1766 return err;
1767}
1768
1769/* Called with ovs_mutex. */
1770static void __dp_destroy(struct datapath *dp)
1771{
Olivier Deprez157378f2022-04-04 15:47:50 +02001772 struct flow_table *table = &dp->table;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001773 int i;
1774
1775 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1776 struct vport *vport;
1777 struct hlist_node *n;
1778
1779 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1780 if (vport->port_no != OVSP_LOCAL)
1781 ovs_dp_detach_port(vport);
1782 }
1783
1784 list_del_rcu(&dp->list_node);
1785
1786 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1787 * all ports in datapath are destroyed first before freeing datapath.
1788 */
1789 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1790
Olivier Deprez157378f2022-04-04 15:47:50 +02001791 /* Flush sw_flow in the tables. RCU cb only releases resource
1792 * such as dp, ports and tables. That may avoid some issues
1793 * such as RCU usage warning.
1794 */
1795 table_instance_flow_flush(table, ovsl_dereference(table->ti),
1796 ovsl_dereference(table->ufid_ti));
1797
1798 /* RCU destroy the ports, meters and flow tables. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001799 call_rcu(&dp->rcu, destroy_dp_rcu);
1800}
1801
1802static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1803{
1804 struct sk_buff *reply;
1805 struct datapath *dp;
1806 int err;
1807
1808 reply = ovs_dp_cmd_alloc_info();
1809 if (!reply)
1810 return -ENOMEM;
1811
1812 ovs_lock();
1813 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1814 err = PTR_ERR(dp);
1815 if (IS_ERR(dp))
1816 goto err_unlock_free;
1817
1818 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1819 info->snd_seq, 0, OVS_DP_CMD_DEL);
1820 BUG_ON(err < 0);
1821
1822 __dp_destroy(dp);
1823 ovs_unlock();
1824
1825 ovs_notify(&dp_datapath_genl_family, reply, info);
1826
1827 return 0;
1828
1829err_unlock_free:
1830 ovs_unlock();
1831 kfree_skb(reply);
1832 return err;
1833}
1834
1835static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1836{
1837 struct sk_buff *reply;
1838 struct datapath *dp;
1839 int err;
1840
1841 reply = ovs_dp_cmd_alloc_info();
1842 if (!reply)
1843 return -ENOMEM;
1844
1845 ovs_lock();
1846 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1847 err = PTR_ERR(dp);
1848 if (IS_ERR(dp))
1849 goto err_unlock_free;
1850
David Brazdil0f672f62019-12-10 10:32:29 +00001851 err = ovs_dp_change(dp, info->attrs);
1852 if (err)
1853 goto err_unlock_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001854
1855 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
David Brazdil0f672f62019-12-10 10:32:29 +00001856 info->snd_seq, 0, OVS_DP_CMD_SET);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001857 BUG_ON(err < 0);
1858
1859 ovs_unlock();
1860 ovs_notify(&dp_datapath_genl_family, reply, info);
1861
1862 return 0;
1863
1864err_unlock_free:
1865 ovs_unlock();
1866 kfree_skb(reply);
1867 return err;
1868}
1869
1870static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1871{
1872 struct sk_buff *reply;
1873 struct datapath *dp;
1874 int err;
1875
1876 reply = ovs_dp_cmd_alloc_info();
1877 if (!reply)
1878 return -ENOMEM;
1879
1880 ovs_lock();
1881 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1882 if (IS_ERR(dp)) {
1883 err = PTR_ERR(dp);
1884 goto err_unlock_free;
1885 }
1886 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
David Brazdil0f672f62019-12-10 10:32:29 +00001887 info->snd_seq, 0, OVS_DP_CMD_GET);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001888 BUG_ON(err < 0);
1889 ovs_unlock();
1890
1891 return genlmsg_reply(reply, info);
1892
1893err_unlock_free:
1894 ovs_unlock();
1895 kfree_skb(reply);
1896 return err;
1897}
1898
1899static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1900{
1901 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1902 struct datapath *dp;
1903 int skip = cb->args[0];
1904 int i = 0;
1905
1906 ovs_lock();
1907 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1908 if (i >= skip &&
1909 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1910 cb->nlh->nlmsg_seq, NLM_F_MULTI,
David Brazdil0f672f62019-12-10 10:32:29 +00001911 OVS_DP_CMD_GET) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001912 break;
1913 i++;
1914 }
1915 ovs_unlock();
1916
1917 cb->args[0] = i;
1918
1919 return skb->len;
1920}
1921
1922static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1923 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1924 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1925 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
Olivier Deprez157378f2022-04-04 15:47:50 +02001926 [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
1927 PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001928};
1929
Olivier Deprez157378f2022-04-04 15:47:50 +02001930static const struct genl_small_ops dp_datapath_genl_ops[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001931 { .cmd = OVS_DP_CMD_NEW,
David Brazdil0f672f62019-12-10 10:32:29 +00001932 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001933 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001934 .doit = ovs_dp_cmd_new
1935 },
1936 { .cmd = OVS_DP_CMD_DEL,
David Brazdil0f672f62019-12-10 10:32:29 +00001937 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001938 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001939 .doit = ovs_dp_cmd_del
1940 },
1941 { .cmd = OVS_DP_CMD_GET,
David Brazdil0f672f62019-12-10 10:32:29 +00001942 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001943 .flags = 0, /* OK for unprivileged users. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001944 .doit = ovs_dp_cmd_get,
1945 .dumpit = ovs_dp_cmd_dump
1946 },
1947 { .cmd = OVS_DP_CMD_SET,
David Brazdil0f672f62019-12-10 10:32:29 +00001948 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001949 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001950 .doit = ovs_dp_cmd_set,
1951 },
1952};
1953
1954static struct genl_family dp_datapath_genl_family __ro_after_init = {
1955 .hdrsize = sizeof(struct ovs_header),
1956 .name = OVS_DATAPATH_FAMILY,
1957 .version = OVS_DATAPATH_VERSION,
1958 .maxattr = OVS_DP_ATTR_MAX,
David Brazdil0f672f62019-12-10 10:32:29 +00001959 .policy = datapath_policy,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001960 .netnsok = true,
1961 .parallel_ops = true,
Olivier Deprez157378f2022-04-04 15:47:50 +02001962 .small_ops = dp_datapath_genl_ops,
1963 .n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001964 .mcgrps = &ovs_dp_datapath_multicast_group,
1965 .n_mcgrps = 1,
1966 .module = THIS_MODULE,
1967};
1968
1969/* Called with ovs_mutex or RCU read lock. */
1970static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1971 struct net *net, u32 portid, u32 seq,
David Brazdil0f672f62019-12-10 10:32:29 +00001972 u32 flags, u8 cmd, gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001973{
1974 struct ovs_header *ovs_header;
1975 struct ovs_vport_stats vport_stats;
1976 int err;
1977
1978 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1979 flags, cmd);
1980 if (!ovs_header)
1981 return -EMSGSIZE;
1982
1983 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1984
1985 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1986 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1987 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1988 ovs_vport_name(vport)) ||
1989 nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
1990 goto nla_put_failure;
1991
1992 if (!net_eq(net, dev_net(vport->dev))) {
David Brazdil0f672f62019-12-10 10:32:29 +00001993 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001994
1995 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1996 goto nla_put_failure;
1997 }
1998
1999 ovs_vport_get_stats(vport, &vport_stats);
2000 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
2001 sizeof(struct ovs_vport_stats), &vport_stats,
2002 OVS_VPORT_ATTR_PAD))
2003 goto nla_put_failure;
2004
2005 if (ovs_vport_get_upcall_portids(vport, skb))
2006 goto nla_put_failure;
2007
2008 err = ovs_vport_get_options(vport, skb);
2009 if (err == -EMSGSIZE)
2010 goto error;
2011
2012 genlmsg_end(skb, ovs_header);
2013 return 0;
2014
2015nla_put_failure:
2016 err = -EMSGSIZE;
2017error:
2018 genlmsg_cancel(skb, ovs_header);
2019 return err;
2020}
2021
2022static struct sk_buff *ovs_vport_cmd_alloc_info(void)
2023{
2024 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2025}
2026
2027/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
2028struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
2029 u32 portid, u32 seq, u8 cmd)
2030{
2031 struct sk_buff *skb;
2032 int retval;
2033
David Brazdil0f672f62019-12-10 10:32:29 +00002034 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002035 if (!skb)
2036 return ERR_PTR(-ENOMEM);
2037
David Brazdil0f672f62019-12-10 10:32:29 +00002038 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
2039 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002040 BUG_ON(retval < 0);
2041
2042 return skb;
2043}
2044
2045/* Called with ovs_mutex or RCU read lock. */
2046static struct vport *lookup_vport(struct net *net,
2047 const struct ovs_header *ovs_header,
2048 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2049{
2050 struct datapath *dp;
2051 struct vport *vport;
2052
2053 if (a[OVS_VPORT_ATTR_IFINDEX])
2054 return ERR_PTR(-EOPNOTSUPP);
2055 if (a[OVS_VPORT_ATTR_NAME]) {
2056 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2057 if (!vport)
2058 return ERR_PTR(-ENODEV);
2059 if (ovs_header->dp_ifindex &&
2060 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2061 return ERR_PTR(-ENODEV);
2062 return vport;
2063 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2064 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2065
2066 if (port_no >= DP_MAX_PORTS)
2067 return ERR_PTR(-EFBIG);
2068
2069 dp = get_dp(net, ovs_header->dp_ifindex);
2070 if (!dp)
2071 return ERR_PTR(-ENODEV);
2072
2073 vport = ovs_vport_ovsl_rcu(dp, port_no);
2074 if (!vport)
2075 return ERR_PTR(-ENODEV);
2076 return vport;
2077 } else
2078 return ERR_PTR(-EINVAL);
2079
2080}
2081
David Brazdil0f672f62019-12-10 10:32:29 +00002082static unsigned int ovs_get_max_headroom(struct datapath *dp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002083{
David Brazdil0f672f62019-12-10 10:32:29 +00002084 unsigned int dev_headroom, max_headroom = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002085 struct net_device *dev;
2086 struct vport *vport;
2087 int i;
2088
2089 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002090 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2091 lockdep_ovsl_is_held()) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002092 dev = vport->dev;
2093 dev_headroom = netdev_get_fwd_headroom(dev);
2094 if (dev_headroom > max_headroom)
2095 max_headroom = dev_headroom;
2096 }
2097 }
2098
David Brazdil0f672f62019-12-10 10:32:29 +00002099 return max_headroom;
2100}
2101
2102/* Called with ovs_mutex */
2103static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
2104{
2105 struct vport *vport;
2106 int i;
2107
2108 dp->max_headroom = new_headroom;
Olivier Deprez157378f2022-04-04 15:47:50 +02002109 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2110 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
2111 lockdep_ovsl_is_held())
David Brazdil0f672f62019-12-10 10:32:29 +00002112 netdev_set_rx_headroom(vport->dev, new_headroom);
Olivier Deprez157378f2022-04-04 15:47:50 +02002113 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002114}
2115
2116static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2117{
2118 struct nlattr **a = info->attrs;
2119 struct ovs_header *ovs_header = info->userhdr;
2120 struct vport_parms parms;
2121 struct sk_buff *reply;
2122 struct vport *vport;
2123 struct datapath *dp;
David Brazdil0f672f62019-12-10 10:32:29 +00002124 unsigned int new_headroom;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002125 u32 port_no;
2126 int err;
2127
2128 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2129 !a[OVS_VPORT_ATTR_UPCALL_PID])
2130 return -EINVAL;
2131 if (a[OVS_VPORT_ATTR_IFINDEX])
2132 return -EOPNOTSUPP;
2133
2134 port_no = a[OVS_VPORT_ATTR_PORT_NO]
2135 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2136 if (port_no >= DP_MAX_PORTS)
2137 return -EFBIG;
2138
2139 reply = ovs_vport_cmd_alloc_info();
2140 if (!reply)
2141 return -ENOMEM;
2142
2143 ovs_lock();
2144restart:
2145 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2146 err = -ENODEV;
2147 if (!dp)
2148 goto exit_unlock_free;
2149
2150 if (port_no) {
2151 vport = ovs_vport_ovsl(dp, port_no);
2152 err = -EBUSY;
2153 if (vport)
2154 goto exit_unlock_free;
2155 } else {
2156 for (port_no = 1; ; port_no++) {
2157 if (port_no >= DP_MAX_PORTS) {
2158 err = -EFBIG;
2159 goto exit_unlock_free;
2160 }
2161 vport = ovs_vport_ovsl(dp, port_no);
2162 if (!vport)
2163 break;
2164 }
2165 }
2166
2167 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2168 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2169 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2170 parms.dp = dp;
2171 parms.port_no = port_no;
2172 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2173
2174 vport = new_vport(&parms);
2175 err = PTR_ERR(vport);
2176 if (IS_ERR(vport)) {
2177 if (err == -EAGAIN)
2178 goto restart;
2179 goto exit_unlock_free;
2180 }
2181
2182 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2183 info->snd_portid, info->snd_seq, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00002184 OVS_VPORT_CMD_NEW, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002185
David Brazdil0f672f62019-12-10 10:32:29 +00002186 new_headroom = netdev_get_fwd_headroom(vport->dev);
2187
2188 if (new_headroom > dp->max_headroom)
2189 ovs_update_headroom(dp, new_headroom);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002190 else
2191 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2192
2193 BUG_ON(err < 0);
2194 ovs_unlock();
2195
2196 ovs_notify(&dp_vport_genl_family, reply, info);
2197 return 0;
2198
2199exit_unlock_free:
2200 ovs_unlock();
2201 kfree_skb(reply);
2202 return err;
2203}
2204
2205static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2206{
2207 struct nlattr **a = info->attrs;
2208 struct sk_buff *reply;
2209 struct vport *vport;
2210 int err;
2211
2212 reply = ovs_vport_cmd_alloc_info();
2213 if (!reply)
2214 return -ENOMEM;
2215
2216 ovs_lock();
2217 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2218 err = PTR_ERR(vport);
2219 if (IS_ERR(vport))
2220 goto exit_unlock_free;
2221
2222 if (a[OVS_VPORT_ATTR_TYPE] &&
2223 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2224 err = -EINVAL;
2225 goto exit_unlock_free;
2226 }
2227
2228 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2229 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2230 if (err)
2231 goto exit_unlock_free;
2232 }
2233
2234
2235 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2236 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2237
2238 err = ovs_vport_set_upcall_portids(vport, ids);
2239 if (err)
2240 goto exit_unlock_free;
2241 }
2242
2243 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2244 info->snd_portid, info->snd_seq, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00002245 OVS_VPORT_CMD_SET, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002246 BUG_ON(err < 0);
2247
2248 ovs_unlock();
2249 ovs_notify(&dp_vport_genl_family, reply, info);
2250 return 0;
2251
2252exit_unlock_free:
2253 ovs_unlock();
2254 kfree_skb(reply);
2255 return err;
2256}
2257
2258static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2259{
David Brazdil0f672f62019-12-10 10:32:29 +00002260 bool update_headroom = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002261 struct nlattr **a = info->attrs;
2262 struct sk_buff *reply;
2263 struct datapath *dp;
2264 struct vport *vport;
David Brazdil0f672f62019-12-10 10:32:29 +00002265 unsigned int new_headroom;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002266 int err;
2267
2268 reply = ovs_vport_cmd_alloc_info();
2269 if (!reply)
2270 return -ENOMEM;
2271
2272 ovs_lock();
2273 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2274 err = PTR_ERR(vport);
2275 if (IS_ERR(vport))
2276 goto exit_unlock_free;
2277
2278 if (vport->port_no == OVSP_LOCAL) {
2279 err = -EINVAL;
2280 goto exit_unlock_free;
2281 }
2282
2283 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2284 info->snd_portid, info->snd_seq, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00002285 OVS_VPORT_CMD_DEL, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002286 BUG_ON(err < 0);
2287
2288 /* the vport deletion may trigger dp headroom update */
2289 dp = vport->dp;
2290 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
David Brazdil0f672f62019-12-10 10:32:29 +00002291 update_headroom = true;
2292
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002293 netdev_reset_rx_headroom(vport->dev);
2294 ovs_dp_detach_port(vport);
2295
David Brazdil0f672f62019-12-10 10:32:29 +00002296 if (update_headroom) {
2297 new_headroom = ovs_get_max_headroom(dp);
2298
2299 if (new_headroom < dp->max_headroom)
2300 ovs_update_headroom(dp, new_headroom);
2301 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002302 ovs_unlock();
2303
2304 ovs_notify(&dp_vport_genl_family, reply, info);
2305 return 0;
2306
2307exit_unlock_free:
2308 ovs_unlock();
2309 kfree_skb(reply);
2310 return err;
2311}
2312
2313static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2314{
2315 struct nlattr **a = info->attrs;
2316 struct ovs_header *ovs_header = info->userhdr;
2317 struct sk_buff *reply;
2318 struct vport *vport;
2319 int err;
2320
2321 reply = ovs_vport_cmd_alloc_info();
2322 if (!reply)
2323 return -ENOMEM;
2324
2325 rcu_read_lock();
2326 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2327 err = PTR_ERR(vport);
2328 if (IS_ERR(vport))
2329 goto exit_unlock_free;
2330 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2331 info->snd_portid, info->snd_seq, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00002332 OVS_VPORT_CMD_GET, GFP_ATOMIC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002333 BUG_ON(err < 0);
2334 rcu_read_unlock();
2335
2336 return genlmsg_reply(reply, info);
2337
2338exit_unlock_free:
2339 rcu_read_unlock();
2340 kfree_skb(reply);
2341 return err;
2342}
2343
2344static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2345{
2346 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2347 struct datapath *dp;
2348 int bucket = cb->args[0], skip = cb->args[1];
2349 int i, j = 0;
2350
2351 rcu_read_lock();
2352 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2353 if (!dp) {
2354 rcu_read_unlock();
2355 return -ENODEV;
2356 }
2357 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2358 struct vport *vport;
2359
2360 j = 0;
2361 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2362 if (j >= skip &&
2363 ovs_vport_cmd_fill_info(vport, skb,
2364 sock_net(skb->sk),
2365 NETLINK_CB(cb->skb).portid,
2366 cb->nlh->nlmsg_seq,
2367 NLM_F_MULTI,
David Brazdil0f672f62019-12-10 10:32:29 +00002368 OVS_VPORT_CMD_GET,
2369 GFP_ATOMIC) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002370 goto out;
2371
2372 j++;
2373 }
2374 skip = 0;
2375 }
2376out:
2377 rcu_read_unlock();
2378
2379 cb->args[0] = i;
2380 cb->args[1] = j;
2381
2382 return skb->len;
2383}
2384
Olivier Deprez157378f2022-04-04 15:47:50 +02002385static void ovs_dp_masks_rebalance(struct work_struct *work)
2386{
2387 struct ovs_net *ovs_net = container_of(work, struct ovs_net,
2388 masks_rebalance.work);
2389 struct datapath *dp;
2390
2391 ovs_lock();
2392
2393 list_for_each_entry(dp, &ovs_net->dps, list_node)
2394 ovs_flow_masks_rebalance(&dp->table);
2395
2396 ovs_unlock();
2397
2398 schedule_delayed_work(&ovs_net->masks_rebalance,
2399 msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
2400}
2401
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002402static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2403 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2404 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2405 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2406 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
David Brazdil0f672f62019-12-10 10:32:29 +00002407 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002408 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2409 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2410 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2411};
2412
Olivier Deprez157378f2022-04-04 15:47:50 +02002413static const struct genl_small_ops dp_vport_genl_ops[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002414 { .cmd = OVS_VPORT_CMD_NEW,
David Brazdil0f672f62019-12-10 10:32:29 +00002415 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002416 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002417 .doit = ovs_vport_cmd_new
2418 },
2419 { .cmd = OVS_VPORT_CMD_DEL,
David Brazdil0f672f62019-12-10 10:32:29 +00002420 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002421 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002422 .doit = ovs_vport_cmd_del
2423 },
2424 { .cmd = OVS_VPORT_CMD_GET,
David Brazdil0f672f62019-12-10 10:32:29 +00002425 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002426 .flags = 0, /* OK for unprivileged users. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002427 .doit = ovs_vport_cmd_get,
2428 .dumpit = ovs_vport_cmd_dump
2429 },
2430 { .cmd = OVS_VPORT_CMD_SET,
David Brazdil0f672f62019-12-10 10:32:29 +00002431 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002432 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002433 .doit = ovs_vport_cmd_set,
2434 },
2435};
2436
2437struct genl_family dp_vport_genl_family __ro_after_init = {
2438 .hdrsize = sizeof(struct ovs_header),
2439 .name = OVS_VPORT_FAMILY,
2440 .version = OVS_VPORT_VERSION,
2441 .maxattr = OVS_VPORT_ATTR_MAX,
David Brazdil0f672f62019-12-10 10:32:29 +00002442 .policy = vport_policy,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002443 .netnsok = true,
2444 .parallel_ops = true,
Olivier Deprez157378f2022-04-04 15:47:50 +02002445 .small_ops = dp_vport_genl_ops,
2446 .n_small_ops = ARRAY_SIZE(dp_vport_genl_ops),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002447 .mcgrps = &ovs_dp_vport_multicast_group,
2448 .n_mcgrps = 1,
2449 .module = THIS_MODULE,
2450};
2451
2452static struct genl_family * const dp_genl_families[] = {
2453 &dp_datapath_genl_family,
2454 &dp_vport_genl_family,
2455 &dp_flow_genl_family,
2456 &dp_packet_genl_family,
2457 &dp_meter_genl_family,
2458#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2459 &dp_ct_limit_genl_family,
2460#endif
2461};
2462
2463static void dp_unregister_genl(int n_families)
2464{
2465 int i;
2466
2467 for (i = 0; i < n_families; i++)
2468 genl_unregister_family(dp_genl_families[i]);
2469}
2470
2471static int __init dp_register_genl(void)
2472{
2473 int err;
2474 int i;
2475
2476 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2477
2478 err = genl_register_family(dp_genl_families[i]);
2479 if (err)
2480 goto error;
2481 }
2482
2483 return 0;
2484
2485error:
2486 dp_unregister_genl(i);
2487 return err;
2488}
2489
2490static int __net_init ovs_init_net(struct net *net)
2491{
2492 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
Olivier Deprez157378f2022-04-04 15:47:50 +02002493 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002494
2495 INIT_LIST_HEAD(&ovs_net->dps);
2496 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
Olivier Deprez157378f2022-04-04 15:47:50 +02002497 INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance);
2498
2499 err = ovs_ct_init(net);
2500 if (err)
2501 return err;
2502
2503 schedule_delayed_work(&ovs_net->masks_rebalance,
2504 msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
2505 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002506}
2507
2508static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2509 struct list_head *head)
2510{
2511 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2512 struct datapath *dp;
2513
2514 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2515 int i;
2516
2517 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2518 struct vport *vport;
2519
2520 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2521 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2522 continue;
2523
2524 if (dev_net(vport->dev) == dnet)
2525 list_add(&vport->detach_list, head);
2526 }
2527 }
2528 }
2529}
2530
2531static void __net_exit ovs_exit_net(struct net *dnet)
2532{
2533 struct datapath *dp, *dp_next;
2534 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2535 struct vport *vport, *vport_next;
2536 struct net *net;
2537 LIST_HEAD(head);
2538
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002539 ovs_lock();
Olivier Deprez0e641232021-09-23 10:07:05 +02002540
2541 ovs_ct_exit(dnet);
2542
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002543 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2544 __dp_destroy(dp);
2545
2546 down_read(&net_rwsem);
2547 for_each_net(net)
2548 list_vports_from_net(net, dnet, &head);
2549 up_read(&net_rwsem);
2550
2551 /* Detach all vports from given namespace. */
2552 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2553 list_del(&vport->detach_list);
2554 ovs_dp_detach_port(vport);
2555 }
2556
2557 ovs_unlock();
2558
Olivier Deprez157378f2022-04-04 15:47:50 +02002559 cancel_delayed_work_sync(&ovs_net->masks_rebalance);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002560 cancel_work_sync(&ovs_net->dp_notify_work);
2561}
2562
2563static struct pernet_operations ovs_net_ops = {
2564 .init = ovs_init_net,
2565 .exit = ovs_exit_net,
2566 .id = &ovs_net_id,
2567 .size = sizeof(struct ovs_net),
2568};
2569
2570static int __init dp_init(void)
2571{
2572 int err;
2573
Olivier Deprez157378f2022-04-04 15:47:50 +02002574 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) >
2575 sizeof_field(struct sk_buff, cb));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002576
2577 pr_info("Open vSwitch switching datapath\n");
2578
2579 err = action_fifos_init();
2580 if (err)
2581 goto error;
2582
2583 err = ovs_internal_dev_rtnl_link_register();
2584 if (err)
2585 goto error_action_fifos_exit;
2586
2587 err = ovs_flow_init();
2588 if (err)
2589 goto error_unreg_rtnl_link;
2590
2591 err = ovs_vport_init();
2592 if (err)
2593 goto error_flow_exit;
2594
2595 err = register_pernet_device(&ovs_net_ops);
2596 if (err)
2597 goto error_vport_exit;
2598
2599 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2600 if (err)
2601 goto error_netns_exit;
2602
2603 err = ovs_netdev_init();
2604 if (err)
2605 goto error_unreg_notifier;
2606
2607 err = dp_register_genl();
2608 if (err < 0)
2609 goto error_unreg_netdev;
2610
2611 return 0;
2612
2613error_unreg_netdev:
2614 ovs_netdev_exit();
2615error_unreg_notifier:
2616 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2617error_netns_exit:
2618 unregister_pernet_device(&ovs_net_ops);
2619error_vport_exit:
2620 ovs_vport_exit();
2621error_flow_exit:
2622 ovs_flow_exit();
2623error_unreg_rtnl_link:
2624 ovs_internal_dev_rtnl_link_unregister();
2625error_action_fifos_exit:
2626 action_fifos_exit();
2627error:
2628 return err;
2629}
2630
2631static void dp_cleanup(void)
2632{
2633 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2634 ovs_netdev_exit();
2635 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2636 unregister_pernet_device(&ovs_net_ops);
2637 rcu_barrier();
2638 ovs_vport_exit();
2639 ovs_flow_exit();
2640 ovs_internal_dev_rtnl_link_unregister();
2641 action_fifos_exit();
2642}
2643
2644module_init(dp_init);
2645module_exit(dp_cleanup);
2646
2647MODULE_DESCRIPTION("Open vSwitch switching datapath");
2648MODULE_LICENSE("GPL");
2649MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2650MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2651MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2652MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2653MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2654MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);