blob: 0cb8056d980036ddee5a2749e84ef120d25e21af [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * GRE over IPv6 protocol decoder.
4 *
5 * Authors: Dmitry Kozlov (xeb@mail.ru)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/capability.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/uaccess.h>
16#include <linux/skbuff.h>
17#include <linux/netdevice.h>
18#include <linux/in.h>
19#include <linux/tcp.h>
20#include <linux/udp.h>
21#include <linux/if_arp.h>
22#include <linux/init.h>
23#include <linux/in6.h>
24#include <linux/inetdevice.h>
25#include <linux/igmp.h>
26#include <linux/netfilter_ipv4.h>
27#include <linux/etherdevice.h>
28#include <linux/if_ether.h>
29#include <linux/hash.h>
30#include <linux/if_tunnel.h>
31#include <linux/ip6_tunnel.h>
32
33#include <net/sock.h>
34#include <net/ip.h>
35#include <net/ip_tunnels.h>
36#include <net/icmp.h>
37#include <net/protocol.h>
38#include <net/addrconf.h>
39#include <net/arp.h>
40#include <net/checksum.h>
41#include <net/dsfield.h>
42#include <net/inet_ecn.h>
43#include <net/xfrm.h>
44#include <net/net_namespace.h>
45#include <net/netns/generic.h>
46#include <net/rtnetlink.h>
47
48#include <net/ipv6.h>
49#include <net/ip6_fib.h>
50#include <net/ip6_route.h>
51#include <net/ip6_tunnel.h>
52#include <net/gre.h>
53#include <net/erspan.h>
54#include <net/dst_metadata.h>
55
56
57static bool log_ecn_error = true;
58module_param(log_ecn_error, bool, 0644);
59MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
60
61#define IP6_GRE_HASH_SIZE_SHIFT 5
62#define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
63
64static unsigned int ip6gre_net_id __read_mostly;
65struct ip6gre_net {
66 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
67
68 struct ip6_tnl __rcu *collect_md_tun;
69 struct ip6_tnl __rcu *collect_md_tun_erspan;
70 struct net_device *fb_tunnel_dev;
71};
72
73static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
74static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
75static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
76static int ip6gre_tunnel_init(struct net_device *dev);
77static void ip6gre_tunnel_setup(struct net_device *dev);
78static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
79static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
80static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
81
82/* Tunnel hash table */
83
84/*
85 4 hash tables:
86
87 3: (remote,local)
88 2: (remote,*)
89 1: (*,local)
90 0: (*,*)
91
92 We require exact key match i.e. if a key is present in packet
93 it will match only tunnel with the same key; if it is not present,
94 it will match only keyless tunnel.
95
96 All keysless packets, if not matched configured keyless tunnels
97 will match fallback tunnel.
98 */
99
100#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
101static u32 HASH_ADDR(const struct in6_addr *addr)
102{
103 u32 hash = ipv6_addr_hash(addr);
104
105 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
106}
107
108#define tunnels_r_l tunnels[3]
109#define tunnels_r tunnels[2]
110#define tunnels_l tunnels[1]
111#define tunnels_wc tunnels[0]
112
113/* Given src, dst and key, find appropriate for input tunnel. */
114
115static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
116 const struct in6_addr *remote, const struct in6_addr *local,
117 __be32 key, __be16 gre_proto)
118{
119 struct net *net = dev_net(dev);
120 int link = dev->ifindex;
121 unsigned int h0 = HASH_ADDR(remote);
122 unsigned int h1 = HASH_KEY(key);
123 struct ip6_tnl *t, *cand = NULL;
124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
125 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
126 gre_proto == htons(ETH_P_ERSPAN) ||
127 gre_proto == htons(ETH_P_ERSPAN2)) ?
128 ARPHRD_ETHER : ARPHRD_IP6GRE;
129 int score, cand_score = 4;
Olivier Deprez0e641232021-09-23 10:07:05 +0200130 struct net_device *ndev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131
132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
133 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
134 !ipv6_addr_equal(remote, &t->parms.raddr) ||
135 key != t->parms.i_key ||
136 !(t->dev->flags & IFF_UP))
137 continue;
138
139 if (t->dev->type != ARPHRD_IP6GRE &&
140 t->dev->type != dev_type)
141 continue;
142
143 score = 0;
144 if (t->parms.link != link)
145 score |= 1;
146 if (t->dev->type != dev_type)
147 score |= 2;
148 if (score == 0)
149 return t;
150
151 if (score < cand_score) {
152 cand = t;
153 cand_score = score;
154 }
155 }
156
157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
158 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
159 key != t->parms.i_key ||
160 !(t->dev->flags & IFF_UP))
161 continue;
162
163 if (t->dev->type != ARPHRD_IP6GRE &&
164 t->dev->type != dev_type)
165 continue;
166
167 score = 0;
168 if (t->parms.link != link)
169 score |= 1;
170 if (t->dev->type != dev_type)
171 score |= 2;
172 if (score == 0)
173 return t;
174
175 if (score < cand_score) {
176 cand = t;
177 cand_score = score;
178 }
179 }
180
181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
182 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
183 (!ipv6_addr_equal(local, &t->parms.raddr) ||
184 !ipv6_addr_is_multicast(local))) ||
185 key != t->parms.i_key ||
186 !(t->dev->flags & IFF_UP))
187 continue;
188
189 if (t->dev->type != ARPHRD_IP6GRE &&
190 t->dev->type != dev_type)
191 continue;
192
193 score = 0;
194 if (t->parms.link != link)
195 score |= 1;
196 if (t->dev->type != dev_type)
197 score |= 2;
198 if (score == 0)
199 return t;
200
201 if (score < cand_score) {
202 cand = t;
203 cand_score = score;
204 }
205 }
206
207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
208 if (t->parms.i_key != key ||
209 !(t->dev->flags & IFF_UP))
210 continue;
211
212 if (t->dev->type != ARPHRD_IP6GRE &&
213 t->dev->type != dev_type)
214 continue;
215
216 score = 0;
217 if (t->parms.link != link)
218 score |= 1;
219 if (t->dev->type != dev_type)
220 score |= 2;
221 if (score == 0)
222 return t;
223
224 if (score < cand_score) {
225 cand = t;
226 cand_score = score;
227 }
228 }
229
230 if (cand)
231 return cand;
232
233 if (gre_proto == htons(ETH_P_ERSPAN) ||
234 gre_proto == htons(ETH_P_ERSPAN2))
235 t = rcu_dereference(ign->collect_md_tun_erspan);
236 else
237 t = rcu_dereference(ign->collect_md_tun);
238
239 if (t && t->dev->flags & IFF_UP)
240 return t;
241
Olivier Deprez0e641232021-09-23 10:07:05 +0200242 ndev = READ_ONCE(ign->fb_tunnel_dev);
243 if (ndev && ndev->flags & IFF_UP)
244 return netdev_priv(ndev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245
246 return NULL;
247}
248
249static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
250 const struct __ip6_tnl_parm *p)
251{
252 const struct in6_addr *remote = &p->raddr;
253 const struct in6_addr *local = &p->laddr;
254 unsigned int h = HASH_KEY(p->i_key);
255 int prio = 0;
256
257 if (!ipv6_addr_any(local))
258 prio |= 1;
259 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
260 prio |= 2;
261 h ^= HASH_ADDR(remote);
262 }
263
264 return &ign->tunnels[prio][h];
265}
266
267static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
268{
269 if (t->parms.collect_md)
270 rcu_assign_pointer(ign->collect_md_tun, t);
271}
272
273static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
274{
275 if (t->parms.collect_md)
276 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
277}
278
279static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
280{
281 if (t->parms.collect_md)
282 rcu_assign_pointer(ign->collect_md_tun, NULL);
283}
284
285static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
286 struct ip6_tnl *t)
287{
288 if (t->parms.collect_md)
289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
290}
291
292static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
293 const struct ip6_tnl *t)
294{
295 return __ip6gre_bucket(ign, &t->parms);
296}
297
298static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
299{
300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
301
302 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
303 rcu_assign_pointer(*tp, t);
304}
305
306static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
307{
308 struct ip6_tnl __rcu **tp;
309 struct ip6_tnl *iter;
310
311 for (tp = ip6gre_bucket(ign, t);
312 (iter = rtnl_dereference(*tp)) != NULL;
313 tp = &iter->next) {
314 if (t == iter) {
315 rcu_assign_pointer(*tp, t->next);
316 break;
317 }
318 }
319}
320
321static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
322 const struct __ip6_tnl_parm *parms,
323 int type)
324{
325 const struct in6_addr *remote = &parms->raddr;
326 const struct in6_addr *local = &parms->laddr;
327 __be32 key = parms->i_key;
328 int link = parms->link;
329 struct ip6_tnl *t;
330 struct ip6_tnl __rcu **tp;
331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
332
333 for (tp = __ip6gre_bucket(ign, parms);
334 (t = rtnl_dereference(*tp)) != NULL;
335 tp = &t->next)
336 if (ipv6_addr_equal(local, &t->parms.laddr) &&
337 ipv6_addr_equal(remote, &t->parms.raddr) &&
338 key == t->parms.i_key &&
339 link == t->parms.link &&
340 type == t->dev->type)
341 break;
342
343 return t;
344}
345
346static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
347 const struct __ip6_tnl_parm *parms, int create)
348{
349 struct ip6_tnl *t, *nt;
350 struct net_device *dev;
351 char name[IFNAMSIZ];
352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
353
354 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
355 if (t && create)
356 return NULL;
357 if (t || !create)
358 return t;
359
360 if (parms->name[0]) {
361 if (!dev_valid_name(parms->name))
362 return NULL;
363 strlcpy(name, parms->name, IFNAMSIZ);
364 } else {
365 strcpy(name, "ip6gre%d");
366 }
367 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
368 ip6gre_tunnel_setup);
369 if (!dev)
370 return NULL;
371
372 dev_net_set(dev, net);
373
374 nt = netdev_priv(dev);
375 nt->parms = *parms;
376 dev->rtnl_link_ops = &ip6gre_link_ops;
377
378 nt->dev = dev;
379 nt->net = dev_net(dev);
380
381 if (register_netdevice(dev) < 0)
382 goto failed_free;
383
384 ip6gre_tnl_link_config(nt, 1);
385
386 /* Can use a lockless transmit, unless we generate output sequences */
387 if (!(nt->parms.o_flags & TUNNEL_SEQ))
388 dev->features |= NETIF_F_LLTX;
389
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390 ip6gre_tunnel_link(ign, nt);
391 return nt;
392
393failed_free:
394 free_netdev(dev);
395 return NULL;
396}
397
398static void ip6erspan_tunnel_uninit(struct net_device *dev)
399{
400 struct ip6_tnl *t = netdev_priv(dev);
401 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
402
403 ip6erspan_tunnel_unlink_md(ign, t);
404 ip6gre_tunnel_unlink(ign, t);
405 dst_cache_reset(&t->dst_cache);
406 dev_put(dev);
407}
408
409static void ip6gre_tunnel_uninit(struct net_device *dev)
410{
411 struct ip6_tnl *t = netdev_priv(dev);
412 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
413
414 ip6gre_tunnel_unlink_md(ign, t);
415 ip6gre_tunnel_unlink(ign, t);
Olivier Deprez0e641232021-09-23 10:07:05 +0200416 if (ign->fb_tunnel_dev == dev)
417 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418 dst_cache_reset(&t->dst_cache);
419 dev_put(dev);
420}
421
422
David Brazdil0f672f62019-12-10 10:32:29 +0000423static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424 u8 type, u8 code, int offset, __be32 info)
425{
426 struct net *net = dev_net(skb->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 const struct ipv6hdr *ipv6h;
David Brazdil0f672f62019-12-10 10:32:29 +0000428 struct tnl_ptk_info tpi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000429 struct ip6_tnl *t;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430
David Brazdil0f672f62019-12-10 10:32:29 +0000431 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
432 offset) < 0)
433 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435 ipv6h = (const struct ipv6hdr *)skb->data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
David Brazdil0f672f62019-12-10 10:32:29 +0000437 tpi.key, tpi.proto);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438 if (!t)
David Brazdil0f672f62019-12-10 10:32:29 +0000439 return -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000440
441 switch (type) {
442 struct ipv6_tlv_tnl_enc_lim *tel;
443 __u32 teli;
444 case ICMPV6_DEST_UNREACH:
445 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
446 t->parms.name);
447 if (code != ICMPV6_PORT_UNREACH)
448 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000449 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 case ICMPV6_TIME_EXCEED:
451 if (code == ICMPV6_EXC_HOPLIMIT) {
452 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
453 t->parms.name);
454 break;
455 }
David Brazdil0f672f62019-12-10 10:32:29 +0000456 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 case ICMPV6_PARAMPROB:
458 teli = 0;
459 if (code == ICMPV6_HDR_FIELD)
460 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
461
462 if (teli && teli == be32_to_cpu(info) - 2) {
463 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
464 if (tel->encap_limit == 0) {
465 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
466 t->parms.name);
467 }
468 } else {
469 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
470 t->parms.name);
471 }
David Brazdil0f672f62019-12-10 10:32:29 +0000472 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473 case ICMPV6_PKT_TOOBIG:
474 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
David Brazdil0f672f62019-12-10 10:32:29 +0000475 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476 case NDISC_REDIRECT:
477 ip6_redirect(skb, net, skb->dev->ifindex, 0,
478 sock_net_uid(net, NULL));
David Brazdil0f672f62019-12-10 10:32:29 +0000479 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 }
481
482 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
483 t->err_count++;
484 else
485 t->err_count = 1;
486 t->err_time = jiffies;
David Brazdil0f672f62019-12-10 10:32:29 +0000487
488 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489}
490
491static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
492{
493 const struct ipv6hdr *ipv6h;
494 struct ip6_tnl *tunnel;
495
496 ipv6h = ipv6_hdr(skb);
497 tunnel = ip6gre_tunnel_lookup(skb->dev,
498 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
499 tpi->proto);
500 if (tunnel) {
501 if (tunnel->parms.collect_md) {
502 struct metadata_dst *tun_dst;
503 __be64 tun_id;
504 __be16 flags;
505
506 flags = tpi->flags;
507 tun_id = key32_to_tunnel_id(tpi->key);
508
509 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
510 if (!tun_dst)
511 return PACKET_REJECT;
512
513 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
514 } else {
515 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
516 }
517
518 return PACKET_RCVD;
519 }
520
521 return PACKET_REJECT;
522}
523
David Brazdil0f672f62019-12-10 10:32:29 +0000524static int ip6erspan_rcv(struct sk_buff *skb,
525 struct tnl_ptk_info *tpi,
526 int gre_hdr_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527{
528 struct erspan_base_hdr *ershdr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529 const struct ipv6hdr *ipv6h;
530 struct erspan_md2 *md2;
531 struct ip6_tnl *tunnel;
532 u8 ver;
533
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 ipv6h = ipv6_hdr(skb);
535 ershdr = (struct erspan_base_hdr *)skb->data;
536 ver = ershdr->ver;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537
538 tunnel = ip6gre_tunnel_lookup(skb->dev,
539 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
540 tpi->proto);
541 if (tunnel) {
542 int len = erspan_hdr_len(ver);
543
544 if (unlikely(!pskb_may_pull(skb, len)))
545 return PACKET_REJECT;
546
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000547 if (__iptunnel_pull_header(skb, len,
548 htons(ETH_P_TEB),
549 false, false) < 0)
550 return PACKET_REJECT;
551
552 if (tunnel->parms.collect_md) {
David Brazdil0f672f62019-12-10 10:32:29 +0000553 struct erspan_metadata *pkt_md, *md;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000554 struct metadata_dst *tun_dst;
555 struct ip_tunnel_info *info;
David Brazdil0f672f62019-12-10 10:32:29 +0000556 unsigned char *gh;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 __be64 tun_id;
558 __be16 flags;
559
560 tpi->flags |= TUNNEL_KEY;
561 flags = tpi->flags;
562 tun_id = key32_to_tunnel_id(tpi->key);
563
564 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
565 sizeof(*md));
566 if (!tun_dst)
567 return PACKET_REJECT;
568
David Brazdil0f672f62019-12-10 10:32:29 +0000569 /* skb can be uncloned in __iptunnel_pull_header, so
570 * old pkt_md is no longer valid and we need to reset
571 * it
572 */
573 gh = skb_network_header(skb) +
574 skb_network_header_len(skb);
575 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
576 sizeof(*ershdr));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577 info = &tun_dst->u.tun_info;
578 md = ip_tunnel_info_opts(info);
579 md->version = ver;
580 md2 = &md->u.md2;
581 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
582 ERSPAN_V2_MDSIZE);
583 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
584 info->options_len = sizeof(*md);
585
586 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
587
588 } else {
589 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
590 }
591
592 return PACKET_RCVD;
593 }
594
595 return PACKET_REJECT;
596}
597
598static int gre_rcv(struct sk_buff *skb)
599{
600 struct tnl_ptk_info tpi;
601 bool csum_err = false;
602 int hdr_len;
603
604 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
605 if (hdr_len < 0)
606 goto drop;
607
608 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
609 goto drop;
610
611 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
612 tpi.proto == htons(ETH_P_ERSPAN2))) {
David Brazdil0f672f62019-12-10 10:32:29 +0000613 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000614 return 0;
615 goto out;
616 }
617
618 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
619 return 0;
620
621out:
622 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
623drop:
624 kfree_skb(skb);
625 return 0;
626}
627
628static int gre_handle_offloads(struct sk_buff *skb, bool csum)
629{
630 return iptunnel_handle_offloads(skb,
631 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
632}
633
634static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
635 struct net_device *dev,
636 struct flowi6 *fl6, __u8 *dsfield,
637 int *encap_limit)
638{
639 const struct iphdr *iph = ip_hdr(skb);
640 struct ip6_tnl *t = netdev_priv(dev);
641
642 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
643 *encap_limit = t->parms.encap_limit;
644
645 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
646
647 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
648 *dsfield = ipv4_get_dsfield(iph);
649 else
650 *dsfield = ip6_tclass(t->parms.flowinfo);
651
652 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
653 fl6->flowi6_mark = skb->mark;
654 else
655 fl6->flowi6_mark = t->parms.fwmark;
656
657 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
658}
659
660static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
661 struct net_device *dev,
662 struct flowi6 *fl6, __u8 *dsfield,
663 int *encap_limit)
664{
David Brazdil0f672f62019-12-10 10:32:29 +0000665 struct ipv6hdr *ipv6h;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000666 struct ip6_tnl *t = netdev_priv(dev);
667 __u16 offset;
668
669 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
670 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
David Brazdil0f672f62019-12-10 10:32:29 +0000671 ipv6h = ipv6_hdr(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000672
673 if (offset > 0) {
674 struct ipv6_tlv_tnl_enc_lim *tel;
675
676 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
677 if (tel->encap_limit == 0) {
678 icmpv6_send(skb, ICMPV6_PARAMPROB,
679 ICMPV6_HDR_FIELD, offset + 2);
680 return -1;
681 }
682 *encap_limit = tel->encap_limit - 1;
683 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
684 *encap_limit = t->parms.encap_limit;
685 }
686
687 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
688
689 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
690 *dsfield = ipv6_get_dsfield(ipv6h);
691 else
692 *dsfield = ip6_tclass(t->parms.flowinfo);
693
694 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
695 fl6->flowlabel |= ip6_flowlabel(ipv6h);
696
697 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
698 fl6->flowi6_mark = skb->mark;
699 else
700 fl6->flowi6_mark = t->parms.fwmark;
701
702 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
703
704 return 0;
705}
706
707static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
708 struct net_device *dev, __u8 dsfield,
709 struct flowi6 *fl6, int encap_limit,
710 __u32 *pmtu, __be16 proto)
711{
712 struct ip6_tnl *tunnel = netdev_priv(dev);
713 __be16 protocol;
714
715 if (dev->type == ARPHRD_ETHER)
716 IPCB(skb)->flags = 0;
717
718 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
719 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
720 else
721 fl6->daddr = tunnel->parms.raddr;
722
723 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
724 return -ENOMEM;
725
726 /* Push GRE header. */
727 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
728
729 if (tunnel->parms.collect_md) {
730 struct ip_tunnel_info *tun_info;
731 const struct ip_tunnel_key *key;
732 __be16 flags;
733
734 tun_info = skb_tunnel_info(skb);
735 if (unlikely(!tun_info ||
736 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
737 ip_tunnel_info_af(tun_info) != AF_INET6))
738 return -EINVAL;
739
740 key = &tun_info->key;
741 memset(fl6, 0, sizeof(*fl6));
742 fl6->flowi6_proto = IPPROTO_GRE;
743 fl6->daddr = key->u.ipv6.dst;
744 fl6->flowlabel = key->label;
745 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
746
747 dsfield = key->tos;
748 flags = key->tun_flags &
749 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
750 tunnel->tun_hlen = gre_calc_hlen(flags);
751
752 gre_build_header(skb, tunnel->tun_hlen,
753 flags, protocol,
754 tunnel_id_to_key32(tun_info->key.tun_id),
755 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
756 : 0);
757
758 } else {
759 if (tunnel->parms.o_flags & TUNNEL_SEQ)
760 tunnel->o_seqno++;
761
762 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
763 protocol, tunnel->parms.o_key,
764 htonl(tunnel->o_seqno));
765 }
766
767 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
768 NEXTHDR_GRE);
769}
770
771static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
772{
773 struct ip6_tnl *t = netdev_priv(dev);
774 int encap_limit = -1;
775 struct flowi6 fl6;
776 __u8 dsfield = 0;
777 __u32 mtu;
778 int err;
779
780 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
781
782 if (!t->parms.collect_md)
783 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
784 &dsfield, &encap_limit);
785
786 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
787 if (err)
788 return -1;
789
790 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
791 skb->protocol);
792 if (err != 0) {
793 /* XXX: send ICMP error even if DF is not set. */
794 if (err == -EMSGSIZE)
795 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
796 htonl(mtu));
797 return -1;
798 }
799
800 return 0;
801}
802
803static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
804{
805 struct ip6_tnl *t = netdev_priv(dev);
806 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
807 int encap_limit = -1;
808 struct flowi6 fl6;
809 __u8 dsfield = 0;
810 __u32 mtu;
811 int err;
812
813 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
814 return -1;
815
816 if (!t->parms.collect_md &&
817 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
818 return -1;
819
820 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
821 return -1;
822
823 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
824 &mtu, skb->protocol);
825 if (err != 0) {
826 if (err == -EMSGSIZE)
827 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
828 return -1;
829 }
830
831 return 0;
832}
833
834/**
835 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
836 * @t: the outgoing tunnel device
837 * @hdr: IPv6 header from the incoming packet
838 *
839 * Description:
840 * Avoid trivial tunneling loop by checking that tunnel exit-point
841 * doesn't match source of incoming packet.
842 *
843 * Return:
844 * 1 if conflict,
845 * 0 else
846 **/
847
848static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
849 const struct ipv6hdr *hdr)
850{
851 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
852}
853
854static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
855{
856 struct ip6_tnl *t = netdev_priv(dev);
857 int encap_limit = -1;
858 struct flowi6 fl6;
859 __u32 mtu;
860 int err;
861
862 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
863 encap_limit = t->parms.encap_limit;
864
865 if (!t->parms.collect_md)
866 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
867
868 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
869 if (err)
870 return err;
871
872 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
873
874 return err;
875}
876
877static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
878 struct net_device *dev)
879{
880 struct ip6_tnl *t = netdev_priv(dev);
881 struct net_device_stats *stats = &t->dev->stats;
882 int ret;
883
David Brazdil0f672f62019-12-10 10:32:29 +0000884 if (!pskb_inet_may_pull(skb))
885 goto tx_err;
886
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000887 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
888 goto tx_err;
889
890 switch (skb->protocol) {
891 case htons(ETH_P_IP):
892 ret = ip6gre_xmit_ipv4(skb, dev);
893 break;
894 case htons(ETH_P_IPV6):
895 ret = ip6gre_xmit_ipv6(skb, dev);
896 break;
897 default:
898 ret = ip6gre_xmit_other(skb, dev);
899 break;
900 }
901
902 if (ret < 0)
903 goto tx_err;
904
905 return NETDEV_TX_OK;
906
907tx_err:
908 stats->tx_errors++;
909 stats->tx_dropped++;
910 kfree_skb(skb);
911 return NETDEV_TX_OK;
912}
913
914static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
915 struct net_device *dev)
916{
917 struct ip6_tnl *t = netdev_priv(dev);
918 struct dst_entry *dst = skb_dst(skb);
919 struct net_device_stats *stats;
920 bool truncate = false;
921 int encap_limit = -1;
922 __u8 dsfield = false;
923 struct flowi6 fl6;
924 int err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +0000925 __be16 proto;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000926 __u32 mtu;
927 int nhoff;
928 int thoff;
929
David Brazdil0f672f62019-12-10 10:32:29 +0000930 if (!pskb_inet_may_pull(skb))
931 goto tx_err;
932
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000933 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
934 goto tx_err;
935
936 if (gre_handle_offloads(skb, false))
937 goto tx_err;
938
939 if (skb->len > dev->mtu + dev->hard_header_len) {
940 pskb_trim(skb, dev->mtu + dev->hard_header_len);
941 truncate = true;
942 }
943
944 nhoff = skb_network_header(skb) - skb_mac_header(skb);
945 if (skb->protocol == htons(ETH_P_IP) &&
946 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
947 truncate = true;
948
949 thoff = skb_transport_header(skb) - skb_mac_header(skb);
950 if (skb->protocol == htons(ETH_P_IPV6) &&
951 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
952 truncate = true;
953
954 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
955 goto tx_err;
956
957 t->parms.o_flags &= ~TUNNEL_KEY;
958 IPCB(skb)->flags = 0;
959
960 /* For collect_md mode, derive fl6 from the tunnel key,
961 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
962 */
963 if (t->parms.collect_md) {
964 struct ip_tunnel_info *tun_info;
965 const struct ip_tunnel_key *key;
966 struct erspan_metadata *md;
967 __be32 tun_id;
968
969 tun_info = skb_tunnel_info(skb);
970 if (unlikely(!tun_info ||
971 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
972 ip_tunnel_info_af(tun_info) != AF_INET6))
David Brazdil0f672f62019-12-10 10:32:29 +0000973 goto tx_err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000974
975 key = &tun_info->key;
976 memset(&fl6, 0, sizeof(fl6));
977 fl6.flowi6_proto = IPPROTO_GRE;
978 fl6.daddr = key->u.ipv6.dst;
979 fl6.flowlabel = key->label;
980 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
981
982 dsfield = key->tos;
983 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
984 goto tx_err;
David Brazdil0f672f62019-12-10 10:32:29 +0000985 if (tun_info->options_len < sizeof(*md))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986 goto tx_err;
David Brazdil0f672f62019-12-10 10:32:29 +0000987 md = ip_tunnel_info_opts(tun_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000988
989 tun_id = tunnel_id_to_key32(key->tun_id);
990 if (md->version == 1) {
991 erspan_build_header(skb,
992 ntohl(tun_id),
993 ntohl(md->u.index), truncate,
994 false);
995 } else if (md->version == 2) {
996 erspan_build_header_v2(skb,
997 ntohl(tun_id),
998 md->u.md2.dir,
999 get_hwid(&md->u.md2),
1000 truncate, false);
1001 } else {
1002 goto tx_err;
1003 }
1004 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001005 switch (skb->protocol) {
1006 case htons(ETH_P_IP):
1007 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1008 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1009 &dsfield, &encap_limit);
1010 break;
1011 case htons(ETH_P_IPV6):
David Brazdil0f672f62019-12-10 10:32:29 +00001012 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001013 goto tx_err;
1014 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1015 &dsfield, &encap_limit))
1016 goto tx_err;
1017 break;
1018 default:
1019 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1020 break;
1021 }
1022
1023 if (t->parms.erspan_ver == 1)
1024 erspan_build_header(skb, ntohl(t->parms.o_key),
1025 t->parms.index,
1026 truncate, false);
1027 else if (t->parms.erspan_ver == 2)
1028 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1029 t->parms.dir,
1030 t->parms.hwid,
1031 truncate, false);
1032 else
1033 goto tx_err;
1034
1035 fl6.daddr = t->parms.raddr;
1036 }
1037
1038 /* Push GRE header. */
David Brazdil0f672f62019-12-10 10:32:29 +00001039 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1040 : htons(ETH_P_ERSPAN2);
1041 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001042
1043 /* TooBig packet may have updated dst->dev's mtu */
1044 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
Olivier Deprez0e641232021-09-23 10:07:05 +02001045 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001046
1047 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1048 NEXTHDR_GRE);
1049 if (err != 0) {
1050 /* XXX: send ICMP error even if DF is not set. */
1051 if (err == -EMSGSIZE) {
1052 if (skb->protocol == htons(ETH_P_IP))
1053 icmp_send(skb, ICMP_DEST_UNREACH,
1054 ICMP_FRAG_NEEDED, htonl(mtu));
1055 else
1056 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1057 }
1058
1059 goto tx_err;
1060 }
1061 return NETDEV_TX_OK;
1062
1063tx_err:
1064 stats = &t->dev->stats;
1065 stats->tx_errors++;
1066 stats->tx_dropped++;
1067 kfree_skb(skb);
1068 return NETDEV_TX_OK;
1069}
1070
1071static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1072{
1073 struct net_device *dev = t->dev;
1074 struct __ip6_tnl_parm *p = &t->parms;
1075 struct flowi6 *fl6 = &t->fl.u.ip6;
1076
1077 if (dev->type != ARPHRD_ETHER) {
1078 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1079 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1080 }
1081
1082 /* Set up flowi template */
1083 fl6->saddr = p->laddr;
1084 fl6->daddr = p->raddr;
1085 fl6->flowi6_oif = p->link;
1086 fl6->flowlabel = 0;
1087 fl6->flowi6_proto = IPPROTO_GRE;
1088
1089 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1090 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1091 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1092 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1093
1094 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1095 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1096
1097 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1098 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1099 dev->flags |= IFF_POINTOPOINT;
1100 else
1101 dev->flags &= ~IFF_POINTOPOINT;
1102}
1103
1104static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1105 int t_hlen)
1106{
1107 const struct __ip6_tnl_parm *p = &t->parms;
1108 struct net_device *dev = t->dev;
1109
1110 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1111 int strict = (ipv6_addr_type(&p->raddr) &
1112 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1113
1114 struct rt6_info *rt = rt6_lookup(t->net,
1115 &p->raddr, &p->laddr,
1116 p->link, NULL, strict);
1117
1118 if (!rt)
1119 return;
1120
1121 if (rt->dst.dev) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001122 unsigned short dst_len = rt->dst.dev->hard_header_len +
1123 t_hlen;
1124
1125 if (t->dev->header_ops)
1126 dev->hard_header_len = dst_len;
1127 else
1128 dev->needed_headroom = dst_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001129
1130 if (set_mtu) {
1131 dev->mtu = rt->dst.dev->mtu - t_hlen;
1132 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1133 dev->mtu -= 8;
1134 if (dev->type == ARPHRD_ETHER)
1135 dev->mtu -= ETH_HLEN;
1136
1137 if (dev->mtu < IPV6_MIN_MTU)
1138 dev->mtu = IPV6_MIN_MTU;
1139 }
1140 }
1141 ip6_rt_put(rt);
1142 }
1143}
1144
1145static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1146{
1147 int t_hlen;
1148
1149 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1150 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1151
1152 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
Olivier Deprez0e641232021-09-23 10:07:05 +02001153
1154 if (tunnel->dev->header_ops)
1155 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1156 else
1157 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1158
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001159 return t_hlen;
1160}
1161
1162static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1163{
1164 ip6gre_tnl_link_config_common(t);
1165 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1166}
1167
1168static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1169 const struct __ip6_tnl_parm *p)
1170{
1171 t->parms.laddr = p->laddr;
1172 t->parms.raddr = p->raddr;
1173 t->parms.flags = p->flags;
1174 t->parms.hop_limit = p->hop_limit;
1175 t->parms.encap_limit = p->encap_limit;
1176 t->parms.flowinfo = p->flowinfo;
1177 t->parms.link = p->link;
1178 t->parms.proto = p->proto;
1179 t->parms.i_key = p->i_key;
1180 t->parms.o_key = p->o_key;
1181 t->parms.i_flags = p->i_flags;
1182 t->parms.o_flags = p->o_flags;
1183 t->parms.fwmark = p->fwmark;
David Brazdil0f672f62019-12-10 10:32:29 +00001184 t->parms.erspan_ver = p->erspan_ver;
1185 t->parms.index = p->index;
1186 t->parms.dir = p->dir;
1187 t->parms.hwid = p->hwid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001188 dst_cache_reset(&t->dst_cache);
1189}
1190
1191static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1192 int set_mtu)
1193{
1194 ip6gre_tnl_copy_tnl_parm(t, p);
1195 ip6gre_tnl_link_config(t, set_mtu);
1196 return 0;
1197}
1198
1199static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1200 const struct ip6_tnl_parm2 *u)
1201{
1202 p->laddr = u->laddr;
1203 p->raddr = u->raddr;
1204 p->flags = u->flags;
1205 p->hop_limit = u->hop_limit;
1206 p->encap_limit = u->encap_limit;
1207 p->flowinfo = u->flowinfo;
1208 p->link = u->link;
1209 p->i_key = u->i_key;
1210 p->o_key = u->o_key;
1211 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1212 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1213 memcpy(p->name, u->name, sizeof(u->name));
1214}
1215
1216static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1217 const struct __ip6_tnl_parm *p)
1218{
1219 u->proto = IPPROTO_GRE;
1220 u->laddr = p->laddr;
1221 u->raddr = p->raddr;
1222 u->flags = p->flags;
1223 u->hop_limit = p->hop_limit;
1224 u->encap_limit = p->encap_limit;
1225 u->flowinfo = p->flowinfo;
1226 u->link = p->link;
1227 u->i_key = p->i_key;
1228 u->o_key = p->o_key;
1229 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1230 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1231 memcpy(u->name, p->name, sizeof(u->name));
1232}
1233
1234static int ip6gre_tunnel_ioctl(struct net_device *dev,
1235 struct ifreq *ifr, int cmd)
1236{
1237 int err = 0;
1238 struct ip6_tnl_parm2 p;
1239 struct __ip6_tnl_parm p1;
1240 struct ip6_tnl *t = netdev_priv(dev);
1241 struct net *net = t->net;
1242 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1243
1244 memset(&p1, 0, sizeof(p1));
1245
1246 switch (cmd) {
1247 case SIOCGETTUNNEL:
1248 if (dev == ign->fb_tunnel_dev) {
1249 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1250 err = -EFAULT;
1251 break;
1252 }
1253 ip6gre_tnl_parm_from_user(&p1, &p);
1254 t = ip6gre_tunnel_locate(net, &p1, 0);
1255 if (!t)
1256 t = netdev_priv(dev);
1257 }
1258 memset(&p, 0, sizeof(p));
1259 ip6gre_tnl_parm_to_user(&p, &t->parms);
1260 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1261 err = -EFAULT;
1262 break;
1263
1264 case SIOCADDTUNNEL:
1265 case SIOCCHGTUNNEL:
1266 err = -EPERM;
1267 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1268 goto done;
1269
1270 err = -EFAULT;
1271 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1272 goto done;
1273
1274 err = -EINVAL;
1275 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1276 goto done;
1277
1278 if (!(p.i_flags&GRE_KEY))
1279 p.i_key = 0;
1280 if (!(p.o_flags&GRE_KEY))
1281 p.o_key = 0;
1282
1283 ip6gre_tnl_parm_from_user(&p1, &p);
1284 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1285
1286 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1287 if (t) {
1288 if (t->dev != dev) {
1289 err = -EEXIST;
1290 break;
1291 }
1292 } else {
1293 t = netdev_priv(dev);
1294
1295 ip6gre_tunnel_unlink(ign, t);
1296 synchronize_net();
1297 ip6gre_tnl_change(t, &p1, 1);
1298 ip6gre_tunnel_link(ign, t);
1299 netdev_state_change(dev);
1300 }
1301 }
1302
1303 if (t) {
1304 err = 0;
1305
1306 memset(&p, 0, sizeof(p));
1307 ip6gre_tnl_parm_to_user(&p, &t->parms);
1308 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1309 err = -EFAULT;
1310 } else
1311 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1312 break;
1313
1314 case SIOCDELTUNNEL:
1315 err = -EPERM;
1316 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1317 goto done;
1318
1319 if (dev == ign->fb_tunnel_dev) {
1320 err = -EFAULT;
1321 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1322 goto done;
1323 err = -ENOENT;
1324 ip6gre_tnl_parm_from_user(&p1, &p);
1325 t = ip6gre_tunnel_locate(net, &p1, 0);
1326 if (!t)
1327 goto done;
1328 err = -EPERM;
1329 if (t == netdev_priv(ign->fb_tunnel_dev))
1330 goto done;
1331 dev = t->dev;
1332 }
1333 unregister_netdevice(dev);
1334 err = 0;
1335 break;
1336
1337 default:
1338 err = -EINVAL;
1339 }
1340
1341done:
1342 return err;
1343}
1344
1345static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1346 unsigned short type, const void *daddr,
1347 const void *saddr, unsigned int len)
1348{
1349 struct ip6_tnl *t = netdev_priv(dev);
1350 struct ipv6hdr *ipv6h;
1351 __be16 *p;
1352
1353 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1354 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1355 t->fl.u.ip6.flowlabel,
1356 true, &t->fl.u.ip6));
1357 ipv6h->hop_limit = t->parms.hop_limit;
1358 ipv6h->nexthdr = NEXTHDR_GRE;
1359 ipv6h->saddr = t->parms.laddr;
1360 ipv6h->daddr = t->parms.raddr;
1361
1362 p = (__be16 *)(ipv6h + 1);
1363 p[0] = t->parms.o_flags;
1364 p[1] = htons(type);
1365
1366 /*
1367 * Set the source hardware address.
1368 */
1369
1370 if (saddr)
1371 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1372 if (daddr)
1373 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1374 if (!ipv6_addr_any(&ipv6h->daddr))
1375 return t->hlen;
1376
1377 return -t->hlen;
1378}
1379
1380static const struct header_ops ip6gre_header_ops = {
1381 .create = ip6gre_header,
1382};
1383
1384static const struct net_device_ops ip6gre_netdev_ops = {
1385 .ndo_init = ip6gre_tunnel_init,
1386 .ndo_uninit = ip6gre_tunnel_uninit,
1387 .ndo_start_xmit = ip6gre_tunnel_xmit,
1388 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1389 .ndo_change_mtu = ip6_tnl_change_mtu,
1390 .ndo_get_stats64 = ip_tunnel_get_stats64,
1391 .ndo_get_iflink = ip6_tnl_get_iflink,
1392};
1393
1394static void ip6gre_dev_free(struct net_device *dev)
1395{
1396 struct ip6_tnl *t = netdev_priv(dev);
1397
1398 gro_cells_destroy(&t->gro_cells);
1399 dst_cache_destroy(&t->dst_cache);
1400 free_percpu(dev->tstats);
1401}
1402
1403static void ip6gre_tunnel_setup(struct net_device *dev)
1404{
1405 dev->netdev_ops = &ip6gre_netdev_ops;
1406 dev->needs_free_netdev = true;
1407 dev->priv_destructor = ip6gre_dev_free;
1408
1409 dev->type = ARPHRD_IP6GRE;
1410
1411 dev->flags |= IFF_NOARP;
1412 dev->addr_len = sizeof(struct in6_addr);
1413 netif_keep_dst(dev);
1414 /* This perm addr will be used as interface identifier by IPv6 */
1415 dev->addr_assign_type = NET_ADDR_RANDOM;
1416 eth_random_addr(dev->perm_addr);
1417}
1418
1419#define GRE6_FEATURES (NETIF_F_SG | \
1420 NETIF_F_FRAGLIST | \
1421 NETIF_F_HIGHDMA | \
1422 NETIF_F_HW_CSUM)
1423
1424static void ip6gre_tnl_init_features(struct net_device *dev)
1425{
1426 struct ip6_tnl *nt = netdev_priv(dev);
1427
1428 dev->features |= GRE6_FEATURES;
1429 dev->hw_features |= GRE6_FEATURES;
1430
1431 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1432 /* TCP offload with GRE SEQ is not supported, nor
1433 * can we support 2 levels of outer headers requiring
1434 * an update.
1435 */
1436 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1437 nt->encap.type == TUNNEL_ENCAP_NONE) {
1438 dev->features |= NETIF_F_GSO_SOFTWARE;
1439 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1440 }
1441
1442 /* Can use a lockless transmit, unless we generate
1443 * output sequences
1444 */
1445 dev->features |= NETIF_F_LLTX;
1446 }
1447}
1448
1449static int ip6gre_tunnel_init_common(struct net_device *dev)
1450{
1451 struct ip6_tnl *tunnel;
1452 int ret;
1453 int t_hlen;
1454
1455 tunnel = netdev_priv(dev);
1456
1457 tunnel->dev = dev;
1458 tunnel->net = dev_net(dev);
1459 strcpy(tunnel->parms.name, dev->name);
1460
1461 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1462 if (!dev->tstats)
1463 return -ENOMEM;
1464
1465 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1466 if (ret)
1467 goto cleanup_alloc_pcpu_stats;
1468
1469 ret = gro_cells_init(&tunnel->gro_cells, dev);
1470 if (ret)
1471 goto cleanup_dst_cache_init;
1472
1473 t_hlen = ip6gre_calc_hlen(tunnel);
1474 dev->mtu = ETH_DATA_LEN - t_hlen;
1475 if (dev->type == ARPHRD_ETHER)
1476 dev->mtu -= ETH_HLEN;
1477 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1478 dev->mtu -= 8;
1479
1480 if (tunnel->parms.collect_md) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001481 netif_keep_dst(dev);
1482 }
1483 ip6gre_tnl_init_features(dev);
1484
Olivier Deprez0e641232021-09-23 10:07:05 +02001485 dev_hold(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001486 return 0;
1487
1488cleanup_dst_cache_init:
1489 dst_cache_destroy(&tunnel->dst_cache);
1490cleanup_alloc_pcpu_stats:
1491 free_percpu(dev->tstats);
1492 dev->tstats = NULL;
1493 return ret;
1494}
1495
1496static int ip6gre_tunnel_init(struct net_device *dev)
1497{
1498 struct ip6_tnl *tunnel;
1499 int ret;
1500
1501 ret = ip6gre_tunnel_init_common(dev);
1502 if (ret)
1503 return ret;
1504
1505 tunnel = netdev_priv(dev);
1506
1507 if (tunnel->parms.collect_md)
1508 return 0;
1509
1510 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1511 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1512
1513 if (ipv6_addr_any(&tunnel->parms.raddr))
1514 dev->header_ops = &ip6gre_header_ops;
1515
1516 return 0;
1517}
1518
1519static void ip6gre_fb_tunnel_init(struct net_device *dev)
1520{
1521 struct ip6_tnl *tunnel = netdev_priv(dev);
1522
1523 tunnel->dev = dev;
1524 tunnel->net = dev_net(dev);
1525 strcpy(tunnel->parms.name, dev->name);
1526
1527 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001528}
1529
1530static struct inet6_protocol ip6gre_protocol __read_mostly = {
1531 .handler = gre_rcv,
1532 .err_handler = ip6gre_err,
1533 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1534};
1535
1536static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1537{
1538 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1539 struct net_device *dev, *aux;
1540 int prio;
1541
1542 for_each_netdev_safe(net, dev, aux)
1543 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1544 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1545 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1546 unregister_netdevice_queue(dev, head);
1547
1548 for (prio = 0; prio < 4; prio++) {
1549 int h;
1550 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1551 struct ip6_tnl *t;
1552
1553 t = rtnl_dereference(ign->tunnels[prio][h]);
1554
1555 while (t) {
1556 /* If dev is in the same netns, it has already
1557 * been added to the list by the previous loop.
1558 */
1559 if (!net_eq(dev_net(t->dev), net))
1560 unregister_netdevice_queue(t->dev,
1561 head);
1562 t = rtnl_dereference(t->next);
1563 }
1564 }
1565 }
1566}
1567
1568static int __net_init ip6gre_init_net(struct net *net)
1569{
1570 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
Olivier Deprez0e641232021-09-23 10:07:05 +02001571 struct net_device *ndev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001572 int err;
1573
1574 if (!net_has_fallback_tunnels(net))
1575 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02001576 ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1577 NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
1578 if (!ndev) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001579 err = -ENOMEM;
1580 goto err_alloc_dev;
1581 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001582 ign->fb_tunnel_dev = ndev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001583 dev_net_set(ign->fb_tunnel_dev, net);
1584 /* FB netdevice is special: we have one, and only one per netns.
1585 * Allowing to move it to another netns is clearly unsafe.
1586 */
1587 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1588
1589
1590 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1591 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1592
1593 err = register_netdev(ign->fb_tunnel_dev);
1594 if (err)
1595 goto err_reg_dev;
1596
1597 rcu_assign_pointer(ign->tunnels_wc[0],
1598 netdev_priv(ign->fb_tunnel_dev));
1599 return 0;
1600
1601err_reg_dev:
Olivier Deprez0e641232021-09-23 10:07:05 +02001602 free_netdev(ndev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001603err_alloc_dev:
1604 return err;
1605}
1606
1607static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1608{
1609 struct net *net;
1610 LIST_HEAD(list);
1611
1612 rtnl_lock();
1613 list_for_each_entry(net, net_list, exit_list)
1614 ip6gre_destroy_tunnels(net, &list);
1615 unregister_netdevice_many(&list);
1616 rtnl_unlock();
1617}
1618
1619static struct pernet_operations ip6gre_net_ops = {
1620 .init = ip6gre_init_net,
1621 .exit_batch = ip6gre_exit_batch_net,
1622 .id = &ip6gre_net_id,
1623 .size = sizeof(struct ip6gre_net),
1624};
1625
1626static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1627 struct netlink_ext_ack *extack)
1628{
1629 __be16 flags;
1630
1631 if (!data)
1632 return 0;
1633
1634 flags = 0;
1635 if (data[IFLA_GRE_IFLAGS])
1636 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1637 if (data[IFLA_GRE_OFLAGS])
1638 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1639 if (flags & (GRE_VERSION|GRE_ROUTING))
1640 return -EINVAL;
1641
1642 return 0;
1643}
1644
1645static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1646 struct netlink_ext_ack *extack)
1647{
1648 struct in6_addr daddr;
1649
1650 if (tb[IFLA_ADDRESS]) {
1651 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1652 return -EINVAL;
1653 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1654 return -EADDRNOTAVAIL;
1655 }
1656
1657 if (!data)
1658 goto out;
1659
1660 if (data[IFLA_GRE_REMOTE]) {
1661 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1662 if (ipv6_addr_any(&daddr))
1663 return -EINVAL;
1664 }
1665
1666out:
1667 return ip6gre_tunnel_validate(tb, data, extack);
1668}
1669
1670static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1671 struct netlink_ext_ack *extack)
1672{
1673 __be16 flags = 0;
1674 int ret, ver = 0;
1675
1676 if (!data)
1677 return 0;
1678
1679 ret = ip6gre_tap_validate(tb, data, extack);
1680 if (ret)
1681 return ret;
1682
1683 /* ERSPAN should only have GRE sequence and key flag */
1684 if (data[IFLA_GRE_OFLAGS])
1685 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1686 if (data[IFLA_GRE_IFLAGS])
1687 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1688 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1689 flags != (GRE_SEQ | GRE_KEY))
1690 return -EINVAL;
1691
1692 /* ERSPAN Session ID only has 10-bit. Since we reuse
1693 * 32-bit key field as ID, check it's range.
1694 */
1695 if (data[IFLA_GRE_IKEY] &&
1696 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1697 return -EINVAL;
1698
1699 if (data[IFLA_GRE_OKEY] &&
1700 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1701 return -EINVAL;
1702
1703 if (data[IFLA_GRE_ERSPAN_VER]) {
1704 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1705 if (ver != 1 && ver != 2)
1706 return -EINVAL;
1707 }
1708
1709 if (ver == 1) {
1710 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1711 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1712
1713 if (index & ~INDEX_MASK)
1714 return -EINVAL;
1715 }
1716 } else if (ver == 2) {
1717 if (data[IFLA_GRE_ERSPAN_DIR]) {
1718 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1719
1720 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1721 return -EINVAL;
1722 }
1723
1724 if (data[IFLA_GRE_ERSPAN_HWID]) {
1725 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1726
1727 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1728 return -EINVAL;
1729 }
1730 }
1731
1732 return 0;
1733}
1734
David Brazdil0f672f62019-12-10 10:32:29 +00001735static void ip6erspan_set_version(struct nlattr *data[],
1736 struct __ip6_tnl_parm *parms)
1737{
1738 if (!data)
1739 return;
1740
1741 parms->erspan_ver = 1;
1742 if (data[IFLA_GRE_ERSPAN_VER])
1743 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1744
1745 if (parms->erspan_ver == 1) {
1746 if (data[IFLA_GRE_ERSPAN_INDEX])
1747 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1748 } else if (parms->erspan_ver == 2) {
1749 if (data[IFLA_GRE_ERSPAN_DIR])
1750 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1751 if (data[IFLA_GRE_ERSPAN_HWID])
1752 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1753 }
1754}
1755
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001756static void ip6gre_netlink_parms(struct nlattr *data[],
1757 struct __ip6_tnl_parm *parms)
1758{
1759 memset(parms, 0, sizeof(*parms));
1760
1761 if (!data)
1762 return;
1763
1764 if (data[IFLA_GRE_LINK])
1765 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1766
1767 if (data[IFLA_GRE_IFLAGS])
1768 parms->i_flags = gre_flags_to_tnl_flags(
1769 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1770
1771 if (data[IFLA_GRE_OFLAGS])
1772 parms->o_flags = gre_flags_to_tnl_flags(
1773 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1774
1775 if (data[IFLA_GRE_IKEY])
1776 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1777
1778 if (data[IFLA_GRE_OKEY])
1779 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1780
1781 if (data[IFLA_GRE_LOCAL])
1782 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1783
1784 if (data[IFLA_GRE_REMOTE])
1785 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1786
1787 if (data[IFLA_GRE_TTL])
1788 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1789
1790 if (data[IFLA_GRE_ENCAP_LIMIT])
1791 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1792
1793 if (data[IFLA_GRE_FLOWINFO])
1794 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1795
1796 if (data[IFLA_GRE_FLAGS])
1797 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1798
1799 if (data[IFLA_GRE_FWMARK])
1800 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1801
1802 if (data[IFLA_GRE_COLLECT_METADATA])
1803 parms->collect_md = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001804}
1805
1806static int ip6gre_tap_init(struct net_device *dev)
1807{
1808 int ret;
1809
1810 ret = ip6gre_tunnel_init_common(dev);
1811 if (ret)
1812 return ret;
1813
1814 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1815
1816 return 0;
1817}
1818
1819static const struct net_device_ops ip6gre_tap_netdev_ops = {
1820 .ndo_init = ip6gre_tap_init,
1821 .ndo_uninit = ip6gre_tunnel_uninit,
1822 .ndo_start_xmit = ip6gre_tunnel_xmit,
1823 .ndo_set_mac_address = eth_mac_addr,
1824 .ndo_validate_addr = eth_validate_addr,
1825 .ndo_change_mtu = ip6_tnl_change_mtu,
1826 .ndo_get_stats64 = ip_tunnel_get_stats64,
1827 .ndo_get_iflink = ip6_tnl_get_iflink,
1828};
1829
1830static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1831{
1832 int t_hlen;
1833
1834 tunnel->tun_hlen = 8;
1835 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1836 erspan_hdr_len(tunnel->parms.erspan_ver);
1837
1838 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1839 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1840 return t_hlen;
1841}
1842
1843static int ip6erspan_tap_init(struct net_device *dev)
1844{
1845 struct ip6_tnl *tunnel;
1846 int t_hlen;
1847 int ret;
1848
1849 tunnel = netdev_priv(dev);
1850
1851 tunnel->dev = dev;
1852 tunnel->net = dev_net(dev);
1853 strcpy(tunnel->parms.name, dev->name);
1854
1855 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1856 if (!dev->tstats)
1857 return -ENOMEM;
1858
1859 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1860 if (ret)
1861 goto cleanup_alloc_pcpu_stats;
1862
1863 ret = gro_cells_init(&tunnel->gro_cells, dev);
1864 if (ret)
1865 goto cleanup_dst_cache_init;
1866
1867 t_hlen = ip6erspan_calc_hlen(tunnel);
1868 dev->mtu = ETH_DATA_LEN - t_hlen;
1869 if (dev->type == ARPHRD_ETHER)
1870 dev->mtu -= ETH_HLEN;
1871 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1872 dev->mtu -= 8;
1873
1874 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1875 ip6erspan_tnl_link_config(tunnel, 1);
1876
Olivier Deprez0e641232021-09-23 10:07:05 +02001877 dev_hold(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001878 return 0;
1879
1880cleanup_dst_cache_init:
1881 dst_cache_destroy(&tunnel->dst_cache);
1882cleanup_alloc_pcpu_stats:
1883 free_percpu(dev->tstats);
1884 dev->tstats = NULL;
1885 return ret;
1886}
1887
1888static const struct net_device_ops ip6erspan_netdev_ops = {
1889 .ndo_init = ip6erspan_tap_init,
1890 .ndo_uninit = ip6erspan_tunnel_uninit,
1891 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1892 .ndo_set_mac_address = eth_mac_addr,
1893 .ndo_validate_addr = eth_validate_addr,
1894 .ndo_change_mtu = ip6_tnl_change_mtu,
1895 .ndo_get_stats64 = ip_tunnel_get_stats64,
1896 .ndo_get_iflink = ip6_tnl_get_iflink,
1897};
1898
1899static void ip6gre_tap_setup(struct net_device *dev)
1900{
1901
1902 ether_setup(dev);
1903
1904 dev->max_mtu = 0;
1905 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1906 dev->needs_free_netdev = true;
1907 dev->priv_destructor = ip6gre_dev_free;
1908
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001909 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1910 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1911 netif_keep_dst(dev);
1912}
1913
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001914static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1915 struct ip_tunnel_encap *ipencap)
1916{
1917 bool ret = false;
1918
1919 memset(ipencap, 0, sizeof(*ipencap));
1920
1921 if (!data)
1922 return ret;
1923
1924 if (data[IFLA_GRE_ENCAP_TYPE]) {
1925 ret = true;
1926 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1927 }
1928
1929 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1930 ret = true;
1931 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1932 }
1933
1934 if (data[IFLA_GRE_ENCAP_SPORT]) {
1935 ret = true;
1936 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1937 }
1938
1939 if (data[IFLA_GRE_ENCAP_DPORT]) {
1940 ret = true;
1941 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1942 }
1943
1944 return ret;
1945}
1946
1947static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1948 struct nlattr *tb[], struct nlattr *data[],
1949 struct netlink_ext_ack *extack)
1950{
1951 struct ip6_tnl *nt;
1952 struct ip_tunnel_encap ipencap;
1953 int err;
1954
1955 nt = netdev_priv(dev);
1956
1957 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1958 int err = ip6_tnl_encap_setup(nt, &ipencap);
1959
1960 if (err < 0)
1961 return err;
1962 }
1963
1964 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1965 eth_hw_addr_random(dev);
1966
1967 nt->dev = dev;
1968 nt->net = dev_net(dev);
1969
1970 err = register_netdevice(dev);
1971 if (err)
1972 goto out;
1973
1974 if (tb[IFLA_MTU])
1975 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1976
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001977out:
1978 return err;
1979}
1980
1981static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1982 struct nlattr *tb[], struct nlattr *data[],
1983 struct netlink_ext_ack *extack)
1984{
1985 struct ip6_tnl *nt = netdev_priv(dev);
1986 struct net *net = dev_net(dev);
1987 struct ip6gre_net *ign;
1988 int err;
1989
1990 ip6gre_netlink_parms(data, &nt->parms);
1991 ign = net_generic(net, ip6gre_net_id);
1992
1993 if (nt->parms.collect_md) {
1994 if (rtnl_dereference(ign->collect_md_tun))
1995 return -EEXIST;
1996 } else {
1997 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1998 return -EEXIST;
1999 }
2000
2001 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2002 if (!err) {
2003 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2004 ip6gre_tunnel_link_md(ign, nt);
2005 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2006 }
2007 return err;
2008}
2009
2010static struct ip6_tnl *
2011ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2012 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2013 struct netlink_ext_ack *extack)
2014{
2015 struct ip6_tnl *t, *nt = netdev_priv(dev);
2016 struct net *net = nt->net;
2017 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2018 struct ip_tunnel_encap ipencap;
2019
2020 if (dev == ign->fb_tunnel_dev)
2021 return ERR_PTR(-EINVAL);
2022
2023 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2024 int err = ip6_tnl_encap_setup(nt, &ipencap);
2025
2026 if (err < 0)
2027 return ERR_PTR(err);
2028 }
2029
2030 ip6gre_netlink_parms(data, p_p);
2031
2032 t = ip6gre_tunnel_locate(net, p_p, 0);
2033
2034 if (t) {
2035 if (t->dev != dev)
2036 return ERR_PTR(-EEXIST);
2037 } else {
2038 t = nt;
2039 }
2040
2041 return t;
2042}
2043
2044static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2045 struct nlattr *data[],
2046 struct netlink_ext_ack *extack)
2047{
David Brazdil0f672f62019-12-10 10:32:29 +00002048 struct ip6_tnl *t = netdev_priv(dev);
2049 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002050 struct __ip6_tnl_parm p;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002051
2052 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2053 if (IS_ERR(t))
2054 return PTR_ERR(t);
2055
2056 ip6gre_tunnel_unlink_md(ign, t);
2057 ip6gre_tunnel_unlink(ign, t);
2058 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2059 ip6gre_tunnel_link_md(ign, t);
2060 ip6gre_tunnel_link(ign, t);
2061 return 0;
2062}
2063
2064static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2065{
2066 struct net *net = dev_net(dev);
2067 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2068
2069 if (dev != ign->fb_tunnel_dev)
2070 unregister_netdevice_queue(dev, head);
2071}
2072
2073static size_t ip6gre_get_size(const struct net_device *dev)
2074{
2075 return
2076 /* IFLA_GRE_LINK */
2077 nla_total_size(4) +
2078 /* IFLA_GRE_IFLAGS */
2079 nla_total_size(2) +
2080 /* IFLA_GRE_OFLAGS */
2081 nla_total_size(2) +
2082 /* IFLA_GRE_IKEY */
2083 nla_total_size(4) +
2084 /* IFLA_GRE_OKEY */
2085 nla_total_size(4) +
2086 /* IFLA_GRE_LOCAL */
2087 nla_total_size(sizeof(struct in6_addr)) +
2088 /* IFLA_GRE_REMOTE */
2089 nla_total_size(sizeof(struct in6_addr)) +
2090 /* IFLA_GRE_TTL */
2091 nla_total_size(1) +
2092 /* IFLA_GRE_ENCAP_LIMIT */
2093 nla_total_size(1) +
2094 /* IFLA_GRE_FLOWINFO */
2095 nla_total_size(4) +
2096 /* IFLA_GRE_FLAGS */
2097 nla_total_size(4) +
2098 /* IFLA_GRE_ENCAP_TYPE */
2099 nla_total_size(2) +
2100 /* IFLA_GRE_ENCAP_FLAGS */
2101 nla_total_size(2) +
2102 /* IFLA_GRE_ENCAP_SPORT */
2103 nla_total_size(2) +
2104 /* IFLA_GRE_ENCAP_DPORT */
2105 nla_total_size(2) +
2106 /* IFLA_GRE_COLLECT_METADATA */
2107 nla_total_size(0) +
2108 /* IFLA_GRE_FWMARK */
2109 nla_total_size(4) +
2110 /* IFLA_GRE_ERSPAN_INDEX */
2111 nla_total_size(4) +
2112 0;
2113}
2114
2115static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2116{
2117 struct ip6_tnl *t = netdev_priv(dev);
2118 struct __ip6_tnl_parm *p = &t->parms;
David Brazdil0f672f62019-12-10 10:32:29 +00002119 __be16 o_flags = p->o_flags;
2120
2121 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2122 if (!p->collect_md)
2123 o_flags |= TUNNEL_KEY;
2124
2125 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2126 goto nla_put_failure;
2127
2128 if (p->erspan_ver == 1) {
2129 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2130 goto nla_put_failure;
2131 } else {
2132 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2133 goto nla_put_failure;
2134 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2135 goto nla_put_failure;
2136 }
2137 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002138
2139 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2140 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2141 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2142 nla_put_be16(skb, IFLA_GRE_OFLAGS,
David Brazdil0f672f62019-12-10 10:32:29 +00002143 gre_tnl_flags_to_gre_flags(o_flags)) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002144 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2145 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2146 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2147 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2148 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2149 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2150 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2151 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
David Brazdil0f672f62019-12-10 10:32:29 +00002152 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002153 goto nla_put_failure;
2154
2155 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2156 t->encap.type) ||
2157 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2158 t->encap.sport) ||
2159 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2160 t->encap.dport) ||
2161 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2162 t->encap.flags))
2163 goto nla_put_failure;
2164
2165 if (p->collect_md) {
2166 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2167 goto nla_put_failure;
2168 }
2169
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002170 return 0;
2171
2172nla_put_failure:
2173 return -EMSGSIZE;
2174}
2175
2176static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2177 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2178 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2179 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2180 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2181 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2182 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
2183 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
2184 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2185 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2186 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2187 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2188 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2189 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2190 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2191 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2192 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2193 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2194 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2195 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2196 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2197 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2198};
2199
2200static void ip6erspan_tap_setup(struct net_device *dev)
2201{
2202 ether_setup(dev);
2203
David Brazdil0f672f62019-12-10 10:32:29 +00002204 dev->max_mtu = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002205 dev->netdev_ops = &ip6erspan_netdev_ops;
2206 dev->needs_free_netdev = true;
2207 dev->priv_destructor = ip6gre_dev_free;
2208
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002209 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2210 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2211 netif_keep_dst(dev);
2212}
2213
2214static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2215 struct nlattr *tb[], struct nlattr *data[],
2216 struct netlink_ext_ack *extack)
2217{
2218 struct ip6_tnl *nt = netdev_priv(dev);
2219 struct net *net = dev_net(dev);
2220 struct ip6gre_net *ign;
2221 int err;
2222
2223 ip6gre_netlink_parms(data, &nt->parms);
David Brazdil0f672f62019-12-10 10:32:29 +00002224 ip6erspan_set_version(data, &nt->parms);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002225 ign = net_generic(net, ip6gre_net_id);
2226
2227 if (nt->parms.collect_md) {
2228 if (rtnl_dereference(ign->collect_md_tun_erspan))
2229 return -EEXIST;
2230 } else {
2231 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2232 return -EEXIST;
2233 }
2234
2235 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2236 if (!err) {
2237 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2238 ip6erspan_tunnel_link_md(ign, nt);
2239 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2240 }
2241 return err;
2242}
2243
2244static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2245{
2246 ip6gre_tnl_link_config_common(t);
2247 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2248}
2249
2250static int ip6erspan_tnl_change(struct ip6_tnl *t,
2251 const struct __ip6_tnl_parm *p, int set_mtu)
2252{
2253 ip6gre_tnl_copy_tnl_parm(t, p);
2254 ip6erspan_tnl_link_config(t, set_mtu);
2255 return 0;
2256}
2257
2258static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2259 struct nlattr *data[],
2260 struct netlink_ext_ack *extack)
2261{
2262 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2263 struct __ip6_tnl_parm p;
2264 struct ip6_tnl *t;
2265
2266 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2267 if (IS_ERR(t))
2268 return PTR_ERR(t);
2269
David Brazdil0f672f62019-12-10 10:32:29 +00002270 ip6erspan_set_version(data, &p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002271 ip6gre_tunnel_unlink_md(ign, t);
2272 ip6gre_tunnel_unlink(ign, t);
2273 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2274 ip6erspan_tunnel_link_md(ign, t);
2275 ip6gre_tunnel_link(ign, t);
2276 return 0;
2277}
2278
2279static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2280 .kind = "ip6gre",
2281 .maxtype = IFLA_GRE_MAX,
2282 .policy = ip6gre_policy,
2283 .priv_size = sizeof(struct ip6_tnl),
2284 .setup = ip6gre_tunnel_setup,
2285 .validate = ip6gre_tunnel_validate,
2286 .newlink = ip6gre_newlink,
2287 .changelink = ip6gre_changelink,
2288 .dellink = ip6gre_dellink,
2289 .get_size = ip6gre_get_size,
2290 .fill_info = ip6gre_fill_info,
2291 .get_link_net = ip6_tnl_get_link_net,
2292};
2293
2294static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2295 .kind = "ip6gretap",
2296 .maxtype = IFLA_GRE_MAX,
2297 .policy = ip6gre_policy,
2298 .priv_size = sizeof(struct ip6_tnl),
2299 .setup = ip6gre_tap_setup,
2300 .validate = ip6gre_tap_validate,
2301 .newlink = ip6gre_newlink,
2302 .changelink = ip6gre_changelink,
2303 .get_size = ip6gre_get_size,
2304 .fill_info = ip6gre_fill_info,
2305 .get_link_net = ip6_tnl_get_link_net,
2306};
2307
2308static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2309 .kind = "ip6erspan",
2310 .maxtype = IFLA_GRE_MAX,
2311 .policy = ip6gre_policy,
2312 .priv_size = sizeof(struct ip6_tnl),
2313 .setup = ip6erspan_tap_setup,
2314 .validate = ip6erspan_tap_validate,
2315 .newlink = ip6erspan_newlink,
2316 .changelink = ip6erspan_changelink,
2317 .get_size = ip6gre_get_size,
2318 .fill_info = ip6gre_fill_info,
2319 .get_link_net = ip6_tnl_get_link_net,
2320};
2321
2322/*
2323 * And now the modules code and kernel interface.
2324 */
2325
2326static int __init ip6gre_init(void)
2327{
2328 int err;
2329
2330 pr_info("GRE over IPv6 tunneling driver\n");
2331
2332 err = register_pernet_device(&ip6gre_net_ops);
2333 if (err < 0)
2334 return err;
2335
2336 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2337 if (err < 0) {
2338 pr_info("%s: can't add protocol\n", __func__);
2339 goto add_proto_failed;
2340 }
2341
2342 err = rtnl_link_register(&ip6gre_link_ops);
2343 if (err < 0)
2344 goto rtnl_link_failed;
2345
2346 err = rtnl_link_register(&ip6gre_tap_ops);
2347 if (err < 0)
2348 goto tap_ops_failed;
2349
2350 err = rtnl_link_register(&ip6erspan_tap_ops);
2351 if (err < 0)
2352 goto erspan_link_failed;
2353
2354out:
2355 return err;
2356
2357erspan_link_failed:
2358 rtnl_link_unregister(&ip6gre_tap_ops);
2359tap_ops_failed:
2360 rtnl_link_unregister(&ip6gre_link_ops);
2361rtnl_link_failed:
2362 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2363add_proto_failed:
2364 unregister_pernet_device(&ip6gre_net_ops);
2365 goto out;
2366}
2367
2368static void __exit ip6gre_fini(void)
2369{
2370 rtnl_link_unregister(&ip6gre_tap_ops);
2371 rtnl_link_unregister(&ip6gre_link_ops);
2372 rtnl_link_unregister(&ip6erspan_tap_ops);
2373 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2374 unregister_pernet_device(&ip6gre_net_ops);
2375}
2376
2377module_init(ip6gre_init);
2378module_exit(ip6gre_fini);
2379MODULE_LICENSE("GPL");
2380MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2381MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2382MODULE_ALIAS_RTNL_LINK("ip6gre");
2383MODULE_ALIAS_RTNL_LINK("ip6gretap");
2384MODULE_ALIAS_RTNL_LINK("ip6erspan");
2385MODULE_ALIAS_NETDEV("ip6gre0");