blob: 44ad412f9a06f817b74f77425e4401690039a7e9 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
12#include <linux/netdevice.h>
13#include <linux/slab.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/u64_stats_sync.h>
17
18#include <net/rtnetlink.h>
19#include <net/dst.h>
20#include <net/xfrm.h>
21#include <net/xdp.h>
22#include <linux/veth.h>
23#include <linux/module.h>
24#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
27#include <linux/bpf_trace.h>
David Brazdil0f672f62019-12-10 10:32:29 +000028#include <linux/net_tstamp.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
33#define VETH_XDP_FLAG BIT(0)
34#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
37/* Separating two types of XDP xmit */
38#define VETH_XDP_TX BIT(0)
39#define VETH_XDP_REDIR BIT(1)
40
David Brazdil0f672f62019-12-10 10:32:29 +000041#define VETH_XDP_TX_BULK_SIZE 16
42
43struct veth_rq_stats {
44 u64 xdp_packets;
45 u64 xdp_bytes;
46 u64 xdp_drops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047 struct u64_stats_sync syncp;
48};
49
50struct veth_rq {
51 struct napi_struct xdp_napi;
52 struct net_device *dev;
53 struct bpf_prog __rcu *xdp_prog;
54 struct xdp_mem_info xdp_mem;
David Brazdil0f672f62019-12-10 10:32:29 +000055 struct veth_rq_stats stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056 bool rx_notify_masked;
57 struct ptr_ring xdp_ring;
58 struct xdp_rxq_info xdp_rxq;
59};
60
61struct veth_priv {
62 struct net_device __rcu *peer;
63 atomic64_t dropped;
64 struct bpf_prog *_xdp_prog;
65 struct veth_rq *rq;
66 unsigned int requested_headroom;
67};
68
David Brazdil0f672f62019-12-10 10:32:29 +000069struct veth_xdp_tx_bq {
70 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
71 unsigned int count;
72};
73
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074/*
75 * ethtool interface
76 */
77
David Brazdil0f672f62019-12-10 10:32:29 +000078struct veth_q_stat_desc {
79 char desc[ETH_GSTRING_LEN];
80 size_t offset;
81};
82
83#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
84
85static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
86 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
87 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
88 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
89};
90
91#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
92
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093static struct {
94 const char string[ETH_GSTRING_LEN];
95} ethtool_stats_keys[] = {
96 { "peer_ifindex" },
97};
98
99static int veth_get_link_ksettings(struct net_device *dev,
100 struct ethtool_link_ksettings *cmd)
101{
102 cmd->base.speed = SPEED_10000;
103 cmd->base.duplex = DUPLEX_FULL;
104 cmd->base.port = PORT_TP;
105 cmd->base.autoneg = AUTONEG_DISABLE;
106 return 0;
107}
108
109static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
110{
111 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
112 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
113}
114
115static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
116{
David Brazdil0f672f62019-12-10 10:32:29 +0000117 char *p = (char *)buf;
118 int i, j;
119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 switch(stringset) {
121 case ETH_SS_STATS:
David Brazdil0f672f62019-12-10 10:32:29 +0000122 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
123 p += sizeof(ethtool_stats_keys);
124 for (i = 0; i < dev->real_num_rx_queues; i++) {
125 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
126 snprintf(p, ETH_GSTRING_LEN,
127 "rx_queue_%u_%.11s",
128 i, veth_rq_stats_desc[j].desc);
129 p += ETH_GSTRING_LEN;
130 }
131 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 break;
133 }
134}
135
136static int veth_get_sset_count(struct net_device *dev, int sset)
137{
138 switch (sset) {
139 case ETH_SS_STATS:
David Brazdil0f672f62019-12-10 10:32:29 +0000140 return ARRAY_SIZE(ethtool_stats_keys) +
141 VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 default:
143 return -EOPNOTSUPP;
144 }
145}
146
147static void veth_get_ethtool_stats(struct net_device *dev,
148 struct ethtool_stats *stats, u64 *data)
149{
150 struct veth_priv *priv = netdev_priv(dev);
151 struct net_device *peer = rtnl_dereference(priv->peer);
David Brazdil0f672f62019-12-10 10:32:29 +0000152 int i, j, idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000153
154 data[0] = peer ? peer->ifindex : 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000155 idx = 1;
156 for (i = 0; i < dev->real_num_rx_queues; i++) {
157 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
158 const void *stats_base = (void *)rq_stats;
159 unsigned int start;
160 size_t offset;
161
162 do {
163 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
164 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
165 offset = veth_rq_stats_desc[j].offset;
166 data[idx + j] = *(u64 *)(stats_base + offset);
167 }
168 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
169 idx += VETH_RQ_STATS_LEN;
170 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171}
172
173static const struct ethtool_ops veth_ethtool_ops = {
174 .get_drvinfo = veth_get_drvinfo,
175 .get_link = ethtool_op_get_link,
176 .get_strings = veth_get_strings,
177 .get_sset_count = veth_get_sset_count,
178 .get_ethtool_stats = veth_get_ethtool_stats,
179 .get_link_ksettings = veth_get_link_ksettings,
David Brazdil0f672f62019-12-10 10:32:29 +0000180 .get_ts_info = ethtool_op_get_ts_info,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181};
182
183/* general routines */
184
185static bool veth_is_xdp_frame(void *ptr)
186{
187 return (unsigned long)ptr & VETH_XDP_FLAG;
188}
189
190static void *veth_ptr_to_xdp(void *ptr)
191{
192 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
193}
194
195static void *veth_xdp_to_ptr(void *ptr)
196{
197 return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
198}
199
200static void veth_ptr_free(void *ptr)
201{
202 if (veth_is_xdp_frame(ptr))
203 xdp_return_frame(veth_ptr_to_xdp(ptr));
204 else
205 kfree_skb(ptr);
206}
207
208static void __veth_xdp_flush(struct veth_rq *rq)
209{
210 /* Write ptr_ring before reading rx_notify_masked */
211 smp_mb();
212 if (!rq->rx_notify_masked) {
213 rq->rx_notify_masked = true;
214 napi_schedule(&rq->xdp_napi);
215 }
216}
217
218static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
219{
220 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
221 dev_kfree_skb_any(skb);
222 return NET_RX_DROP;
223 }
224
225 return NET_RX_SUCCESS;
226}
227
228static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
229 struct veth_rq *rq, bool xdp)
230{
231 return __dev_forward_skb(dev, skb) ?: xdp ?
232 veth_xdp_rx(rq, skb) :
233 netif_rx(skb);
234}
235
236static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
237{
238 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
239 struct veth_rq *rq = NULL;
240 struct net_device *rcv;
241 int length = skb->len;
242 bool rcv_xdp = false;
243 int rxq;
244
245 rcu_read_lock();
246 rcv = rcu_dereference(priv->peer);
247 if (unlikely(!rcv)) {
248 kfree_skb(skb);
249 goto drop;
250 }
251
252 rcv_priv = netdev_priv(rcv);
253 rxq = skb_get_queue_mapping(skb);
254 if (rxq < rcv->real_num_rx_queues) {
255 rq = &rcv_priv->rq[rxq];
256 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
Olivier Deprez0e641232021-09-23 10:07:05 +0200257 skb_record_rx_queue(skb, rxq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 }
259
David Brazdil0f672f62019-12-10 10:32:29 +0000260 skb_tx_timestamp(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000262 if (!rcv_xdp) {
263 struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264
David Brazdil0f672f62019-12-10 10:32:29 +0000265 u64_stats_update_begin(&stats->syncp);
266 stats->bytes += length;
267 stats->packets++;
268 u64_stats_update_end(&stats->syncp);
269 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270 } else {
271drop:
272 atomic64_inc(&priv->dropped);
273 }
274
275 if (rcv_xdp)
276 __veth_xdp_flush(rq);
277
278 rcu_read_unlock();
279
280 return NETDEV_TX_OK;
281}
282
David Brazdil0f672f62019-12-10 10:32:29 +0000283static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284{
285 struct veth_priv *priv = netdev_priv(dev);
286 int cpu;
287
288 result->packets = 0;
289 result->bytes = 0;
290 for_each_possible_cpu(cpu) {
David Brazdil0f672f62019-12-10 10:32:29 +0000291 struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292 u64 packets, bytes;
293 unsigned int start;
294
295 do {
296 start = u64_stats_fetch_begin_irq(&stats->syncp);
297 packets = stats->packets;
298 bytes = stats->bytes;
299 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
300 result->packets += packets;
301 result->bytes += bytes;
302 }
303 return atomic64_read(&priv->dropped);
304}
305
David Brazdil0f672f62019-12-10 10:32:29 +0000306static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
307{
308 struct veth_priv *priv = netdev_priv(dev);
309 int i;
310
311 result->xdp_packets = 0;
312 result->xdp_bytes = 0;
313 result->xdp_drops = 0;
314 for (i = 0; i < dev->num_rx_queues; i++) {
315 struct veth_rq_stats *stats = &priv->rq[i].stats;
316 u64 packets, bytes, drops;
317 unsigned int start;
318
319 do {
320 start = u64_stats_fetch_begin_irq(&stats->syncp);
321 packets = stats->xdp_packets;
322 bytes = stats->xdp_bytes;
323 drops = stats->xdp_drops;
324 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
325 result->xdp_packets += packets;
326 result->xdp_bytes += bytes;
327 result->xdp_drops += drops;
328 }
329}
330
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331static void veth_get_stats64(struct net_device *dev,
332 struct rtnl_link_stats64 *tot)
333{
334 struct veth_priv *priv = netdev_priv(dev);
335 struct net_device *peer;
David Brazdil0f672f62019-12-10 10:32:29 +0000336 struct veth_rq_stats rx;
337 struct pcpu_lstats tx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000338
David Brazdil0f672f62019-12-10 10:32:29 +0000339 tot->tx_dropped = veth_stats_tx(&tx, dev);
340 tot->tx_bytes = tx.bytes;
341 tot->tx_packets = tx.packets;
342
343 veth_stats_rx(&rx, dev);
344 tot->rx_dropped = rx.xdp_drops;
345 tot->rx_bytes = rx.xdp_bytes;
346 tot->rx_packets = rx.xdp_packets;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347
348 rcu_read_lock();
349 peer = rcu_dereference(priv->peer);
350 if (peer) {
David Brazdil0f672f62019-12-10 10:32:29 +0000351 tot->rx_dropped += veth_stats_tx(&tx, peer);
352 tot->rx_bytes += tx.bytes;
353 tot->rx_packets += tx.packets;
354
355 veth_stats_rx(&rx, peer);
356 tot->tx_bytes += rx.xdp_bytes;
357 tot->tx_packets += rx.xdp_packets;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358 }
359 rcu_read_unlock();
360}
361
362/* fake multicast ability */
363static void veth_set_multicast_list(struct net_device *dev)
364{
365}
366
367static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
368 int buflen)
369{
370 struct sk_buff *skb;
371
372 if (!buflen) {
373 buflen = SKB_DATA_ALIGN(headroom + len) +
374 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
375 }
376 skb = build_skb(head, buflen);
377 if (!skb)
378 return NULL;
379
380 skb_reserve(skb, headroom);
381 skb_put(skb, len);
382
383 return skb;
384}
385
386static int veth_select_rxq(struct net_device *dev)
387{
388 return smp_processor_id() % dev->real_num_rx_queues;
389}
390
391static int veth_xdp_xmit(struct net_device *dev, int n,
392 struct xdp_frame **frames, u32 flags)
393{
394 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
395 struct net_device *rcv;
David Brazdil0f672f62019-12-10 10:32:29 +0000396 int i, ret, drops = n;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397 unsigned int max_len;
398 struct veth_rq *rq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399
David Brazdil0f672f62019-12-10 10:32:29 +0000400 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
401 ret = -EINVAL;
402 goto drop;
403 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404
405 rcv = rcu_dereference(priv->peer);
David Brazdil0f672f62019-12-10 10:32:29 +0000406 if (unlikely(!rcv)) {
407 ret = -ENXIO;
408 goto drop;
409 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410
411 rcv_priv = netdev_priv(rcv);
412 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
413 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
414 * side. This means an XDP program is loaded on the peer and the peer
415 * device is up.
416 */
David Brazdil0f672f62019-12-10 10:32:29 +0000417 if (!rcu_access_pointer(rq->xdp_prog)) {
418 ret = -ENXIO;
419 goto drop;
420 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421
David Brazdil0f672f62019-12-10 10:32:29 +0000422 drops = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
424
425 spin_lock(&rq->xdp_ring.producer_lock);
426 for (i = 0; i < n; i++) {
427 struct xdp_frame *frame = frames[i];
428 void *ptr = veth_xdp_to_ptr(frame);
429
430 if (unlikely(frame->len > max_len ||
431 __ptr_ring_produce(&rq->xdp_ring, ptr))) {
432 xdp_return_frame_rx_napi(frame);
433 drops++;
434 }
435 }
436 spin_unlock(&rq->xdp_ring.producer_lock);
437
438 if (flags & XDP_XMIT_FLUSH)
439 __veth_xdp_flush(rq);
440
David Brazdil0f672f62019-12-10 10:32:29 +0000441 if (likely(!drops))
442 return n;
443
444 ret = n - drops;
445drop:
446 atomic64_add(drops, &priv->dropped);
447
448 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449}
450
David Brazdil0f672f62019-12-10 10:32:29 +0000451static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
452{
453 int sent, i, err = 0;
454
455 sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
456 if (sent < 0) {
457 err = sent;
458 sent = 0;
459 for (i = 0; i < bq->count; i++)
460 xdp_return_frame(bq->q[i]);
461 }
462 trace_xdp_bulk_tx(dev, sent, bq->count - sent, err);
463
464 bq->count = 0;
465}
466
467static void veth_xdp_flush(struct net_device *dev, struct veth_xdp_tx_bq *bq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468{
469 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
470 struct net_device *rcv;
471 struct veth_rq *rq;
472
473 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +0000474 veth_xdp_flush_bq(dev, bq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475 rcv = rcu_dereference(priv->peer);
476 if (unlikely(!rcv))
477 goto out;
478
479 rcv_priv = netdev_priv(rcv);
480 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
481 /* xdp_ring is initialized on receive side? */
482 if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
483 goto out;
484
485 __veth_xdp_flush(rq);
486out:
487 rcu_read_unlock();
488}
489
David Brazdil0f672f62019-12-10 10:32:29 +0000490static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
491 struct veth_xdp_tx_bq *bq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492{
493 struct xdp_frame *frame = convert_to_xdp_frame(xdp);
494
495 if (unlikely(!frame))
496 return -EOVERFLOW;
497
David Brazdil0f672f62019-12-10 10:32:29 +0000498 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
499 veth_xdp_flush_bq(dev, bq);
500
501 bq->q[bq->count++] = frame;
502
503 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504}
505
506static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
507 struct xdp_frame *frame,
David Brazdil0f672f62019-12-10 10:32:29 +0000508 unsigned int *xdp_xmit,
509 struct veth_xdp_tx_bq *bq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510{
511 void *hard_start = frame->data - frame->headroom;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512 int len = frame->len, delta = 0;
513 struct xdp_frame orig_frame;
514 struct bpf_prog *xdp_prog;
515 unsigned int headroom;
516 struct sk_buff *skb;
517
Olivier Deprez0e641232021-09-23 10:07:05 +0200518 /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
519 hard_start -= sizeof(struct xdp_frame);
520
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000521 rcu_read_lock();
522 xdp_prog = rcu_dereference(rq->xdp_prog);
523 if (likely(xdp_prog)) {
524 struct xdp_buff xdp;
525 u32 act;
526
527 xdp.data_hard_start = hard_start;
528 xdp.data = frame->data;
529 xdp.data_end = frame->data + frame->len;
530 xdp.data_meta = frame->data - frame->metasize;
531 xdp.rxq = &rq->xdp_rxq;
532
533 act = bpf_prog_run_xdp(xdp_prog, &xdp);
534
535 switch (act) {
536 case XDP_PASS:
537 delta = frame->data - xdp.data;
538 len = xdp.data_end - xdp.data;
539 break;
540 case XDP_TX:
541 orig_frame = *frame;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542 xdp.rxq->mem = frame->mem;
David Brazdil0f672f62019-12-10 10:32:29 +0000543 if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000544 trace_xdp_exception(rq->dev, xdp_prog, act);
545 frame = &orig_frame;
546 goto err_xdp;
547 }
548 *xdp_xmit |= VETH_XDP_TX;
549 rcu_read_unlock();
550 goto xdp_xmit;
551 case XDP_REDIRECT:
552 orig_frame = *frame;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000553 xdp.rxq->mem = frame->mem;
554 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
555 frame = &orig_frame;
556 goto err_xdp;
557 }
558 *xdp_xmit |= VETH_XDP_REDIR;
559 rcu_read_unlock();
560 goto xdp_xmit;
561 default:
562 bpf_warn_invalid_xdp_action(act);
David Brazdil0f672f62019-12-10 10:32:29 +0000563 /* fall through */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564 case XDP_ABORTED:
565 trace_xdp_exception(rq->dev, xdp_prog, act);
David Brazdil0f672f62019-12-10 10:32:29 +0000566 /* fall through */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000567 case XDP_DROP:
568 goto err_xdp;
569 }
570 }
571 rcu_read_unlock();
572
573 headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
Olivier Deprez0e641232021-09-23 10:07:05 +0200574 skb = veth_build_skb(hard_start, headroom, len, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 if (!skb) {
576 xdp_return_frame(frame);
577 goto err;
578 }
579
David Brazdil0f672f62019-12-10 10:32:29 +0000580 xdp_release_frame(frame);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581 xdp_scrub_frame(frame);
582 skb->protocol = eth_type_trans(skb, rq->dev);
583err:
584 return skb;
585err_xdp:
586 rcu_read_unlock();
587 xdp_return_frame(frame);
588xdp_xmit:
589 return NULL;
590}
591
592static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
David Brazdil0f672f62019-12-10 10:32:29 +0000593 unsigned int *xdp_xmit,
594 struct veth_xdp_tx_bq *bq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000595{
596 u32 pktlen, headroom, act, metalen;
597 void *orig_data, *orig_data_end;
598 struct bpf_prog *xdp_prog;
599 int mac_len, delta, off;
600 struct xdp_buff xdp;
601
602 skb_orphan(skb);
603
604 rcu_read_lock();
605 xdp_prog = rcu_dereference(rq->xdp_prog);
606 if (unlikely(!xdp_prog)) {
607 rcu_read_unlock();
608 goto out;
609 }
610
611 mac_len = skb->data - skb_mac_header(skb);
612 pktlen = skb->len + mac_len;
613 headroom = skb_headroom(skb) - mac_len;
614
615 if (skb_shared(skb) || skb_head_is_locked(skb) ||
616 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
617 struct sk_buff *nskb;
618 int size, head_off;
619 void *head, *start;
620 struct page *page;
621
622 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
623 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
624 if (size > PAGE_SIZE)
625 goto drop;
626
627 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
628 if (!page)
629 goto drop;
630
631 head = page_address(page);
632 start = head + VETH_XDP_HEADROOM;
633 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
634 page_frag_free(head);
635 goto drop;
636 }
637
638 nskb = veth_build_skb(head,
639 VETH_XDP_HEADROOM + mac_len, skb->len,
640 PAGE_SIZE);
641 if (!nskb) {
642 page_frag_free(head);
643 goto drop;
644 }
645
646 skb_copy_header(nskb, skb);
647 head_off = skb_headroom(nskb) - skb_headroom(skb);
648 skb_headers_offset_update(nskb, head_off);
649 consume_skb(skb);
650 skb = nskb;
651 }
652
653 xdp.data_hard_start = skb->head;
654 xdp.data = skb_mac_header(skb);
655 xdp.data_end = xdp.data + pktlen;
656 xdp.data_meta = xdp.data;
657 xdp.rxq = &rq->xdp_rxq;
658 orig_data = xdp.data;
659 orig_data_end = xdp.data_end;
660
661 act = bpf_prog_run_xdp(xdp_prog, &xdp);
662
663 switch (act) {
664 case XDP_PASS:
665 break;
666 case XDP_TX:
667 get_page(virt_to_page(xdp.data));
668 consume_skb(skb);
669 xdp.rxq->mem = rq->xdp_mem;
David Brazdil0f672f62019-12-10 10:32:29 +0000670 if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671 trace_xdp_exception(rq->dev, xdp_prog, act);
672 goto err_xdp;
673 }
674 *xdp_xmit |= VETH_XDP_TX;
675 rcu_read_unlock();
676 goto xdp_xmit;
677 case XDP_REDIRECT:
678 get_page(virt_to_page(xdp.data));
679 consume_skb(skb);
680 xdp.rxq->mem = rq->xdp_mem;
681 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
682 goto err_xdp;
683 *xdp_xmit |= VETH_XDP_REDIR;
684 rcu_read_unlock();
685 goto xdp_xmit;
686 default:
687 bpf_warn_invalid_xdp_action(act);
David Brazdil0f672f62019-12-10 10:32:29 +0000688 /* fall through */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000689 case XDP_ABORTED:
690 trace_xdp_exception(rq->dev, xdp_prog, act);
David Brazdil0f672f62019-12-10 10:32:29 +0000691 /* fall through */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000692 case XDP_DROP:
693 goto drop;
694 }
695 rcu_read_unlock();
696
697 delta = orig_data - xdp.data;
698 off = mac_len + delta;
699 if (off > 0)
700 __skb_push(skb, off);
701 else if (off < 0)
702 __skb_pull(skb, -off);
703 skb->mac_header -= delta;
704 off = xdp.data_end - orig_data_end;
705 if (off != 0)
706 __skb_put(skb, off);
707 skb->protocol = eth_type_trans(skb, rq->dev);
708
709 metalen = xdp.data - xdp.data_meta;
710 if (metalen)
711 skb_metadata_set(skb, metalen);
712out:
713 return skb;
714drop:
715 rcu_read_unlock();
716 kfree_skb(skb);
717 return NULL;
718err_xdp:
719 rcu_read_unlock();
720 page_frag_free(xdp.data);
721xdp_xmit:
722 return NULL;
723}
724
David Brazdil0f672f62019-12-10 10:32:29 +0000725static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
726 struct veth_xdp_tx_bq *bq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000727{
David Brazdil0f672f62019-12-10 10:32:29 +0000728 int i, done = 0, drops = 0, bytes = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000729
730 for (i = 0; i < budget; i++) {
731 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
David Brazdil0f672f62019-12-10 10:32:29 +0000732 unsigned int xdp_xmit_one = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000733 struct sk_buff *skb;
734
735 if (!ptr)
736 break;
737
738 if (veth_is_xdp_frame(ptr)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000739 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
740
741 bytes += frame->len;
742 skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000743 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000744 skb = ptr;
745 bytes += skb->len;
746 skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000747 }
David Brazdil0f672f62019-12-10 10:32:29 +0000748 *xdp_xmit |= xdp_xmit_one;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749
750 if (skb)
751 napi_gro_receive(&rq->xdp_napi, skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000752 else if (!xdp_xmit_one)
753 drops++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754
755 done++;
756 }
757
David Brazdil0f672f62019-12-10 10:32:29 +0000758 u64_stats_update_begin(&rq->stats.syncp);
759 rq->stats.xdp_packets += done;
760 rq->stats.xdp_bytes += bytes;
761 rq->stats.xdp_drops += drops;
762 u64_stats_update_end(&rq->stats.syncp);
763
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764 return done;
765}
766
767static int veth_poll(struct napi_struct *napi, int budget)
768{
769 struct veth_rq *rq =
770 container_of(napi, struct veth_rq, xdp_napi);
771 unsigned int xdp_xmit = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000772 struct veth_xdp_tx_bq bq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000773 int done;
774
David Brazdil0f672f62019-12-10 10:32:29 +0000775 bq.count = 0;
776
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000777 xdp_set_return_frame_no_direct();
David Brazdil0f672f62019-12-10 10:32:29 +0000778 done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779
780 if (done < budget && napi_complete_done(napi, done)) {
781 /* Write rx_notify_masked before reading ptr_ring */
782 smp_store_mb(rq->rx_notify_masked, false);
783 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
784 rq->rx_notify_masked = true;
785 napi_schedule(&rq->xdp_napi);
786 }
787 }
788
789 if (xdp_xmit & VETH_XDP_TX)
David Brazdil0f672f62019-12-10 10:32:29 +0000790 veth_xdp_flush(rq->dev, &bq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791 if (xdp_xmit & VETH_XDP_REDIR)
792 xdp_do_flush_map();
793 xdp_clear_return_frame_no_direct();
794
795 return done;
796}
797
798static int veth_napi_add(struct net_device *dev)
799{
800 struct veth_priv *priv = netdev_priv(dev);
801 int err, i;
802
803 for (i = 0; i < dev->real_num_rx_queues; i++) {
804 struct veth_rq *rq = &priv->rq[i];
805
806 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
807 if (err)
808 goto err_xdp_ring;
809 }
810
811 for (i = 0; i < dev->real_num_rx_queues; i++) {
812 struct veth_rq *rq = &priv->rq[i];
813
814 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
815 napi_enable(&rq->xdp_napi);
816 }
817
818 return 0;
819err_xdp_ring:
820 for (i--; i >= 0; i--)
821 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
822
823 return err;
824}
825
826static void veth_napi_del(struct net_device *dev)
827{
828 struct veth_priv *priv = netdev_priv(dev);
829 int i;
830
831 for (i = 0; i < dev->real_num_rx_queues; i++) {
832 struct veth_rq *rq = &priv->rq[i];
833
834 napi_disable(&rq->xdp_napi);
835 napi_hash_del(&rq->xdp_napi);
836 }
837 synchronize_net();
838
839 for (i = 0; i < dev->real_num_rx_queues; i++) {
840 struct veth_rq *rq = &priv->rq[i];
841
842 netif_napi_del(&rq->xdp_napi);
843 rq->rx_notify_masked = false;
844 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
845 }
846}
847
848static int veth_enable_xdp(struct net_device *dev)
849{
850 struct veth_priv *priv = netdev_priv(dev);
851 int err, i;
852
853 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
854 for (i = 0; i < dev->real_num_rx_queues; i++) {
855 struct veth_rq *rq = &priv->rq[i];
856
857 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
858 if (err < 0)
859 goto err_rxq_reg;
860
861 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
862 MEM_TYPE_PAGE_SHARED,
863 NULL);
864 if (err < 0)
865 goto err_reg_mem;
866
867 /* Save original mem info as it can be overwritten */
868 rq->xdp_mem = rq->xdp_rxq.mem;
869 }
870
871 err = veth_napi_add(dev);
872 if (err)
873 goto err_rxq_reg;
874 }
875
876 for (i = 0; i < dev->real_num_rx_queues; i++)
877 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
878
879 return 0;
880err_reg_mem:
881 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
882err_rxq_reg:
883 for (i--; i >= 0; i--)
884 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
885
886 return err;
887}
888
889static void veth_disable_xdp(struct net_device *dev)
890{
891 struct veth_priv *priv = netdev_priv(dev);
892 int i;
893
894 for (i = 0; i < dev->real_num_rx_queues; i++)
895 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
896 veth_napi_del(dev);
897 for (i = 0; i < dev->real_num_rx_queues; i++) {
898 struct veth_rq *rq = &priv->rq[i];
899
900 rq->xdp_rxq.mem = rq->xdp_mem;
901 xdp_rxq_info_unreg(&rq->xdp_rxq);
902 }
903}
904
905static int veth_open(struct net_device *dev)
906{
907 struct veth_priv *priv = netdev_priv(dev);
908 struct net_device *peer = rtnl_dereference(priv->peer);
909 int err;
910
911 if (!peer)
912 return -ENOTCONN;
913
914 if (priv->_xdp_prog) {
915 err = veth_enable_xdp(dev);
916 if (err)
917 return err;
918 }
919
920 if (peer->flags & IFF_UP) {
921 netif_carrier_on(dev);
922 netif_carrier_on(peer);
923 }
924
925 return 0;
926}
927
928static int veth_close(struct net_device *dev)
929{
930 struct veth_priv *priv = netdev_priv(dev);
931 struct net_device *peer = rtnl_dereference(priv->peer);
932
933 netif_carrier_off(dev);
934 if (peer)
935 netif_carrier_off(peer);
936
937 if (priv->_xdp_prog)
938 veth_disable_xdp(dev);
939
940 return 0;
941}
942
943static int is_valid_veth_mtu(int mtu)
944{
945 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
946}
947
948static int veth_alloc_queues(struct net_device *dev)
949{
950 struct veth_priv *priv = netdev_priv(dev);
951 int i;
952
953 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
954 if (!priv->rq)
955 return -ENOMEM;
956
David Brazdil0f672f62019-12-10 10:32:29 +0000957 for (i = 0; i < dev->num_rx_queues; i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000958 priv->rq[i].dev = dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000959 u64_stats_init(&priv->rq[i].stats.syncp);
960 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000961
962 return 0;
963}
964
965static void veth_free_queues(struct net_device *dev)
966{
967 struct veth_priv *priv = netdev_priv(dev);
968
969 kfree(priv->rq);
970}
971
972static int veth_dev_init(struct net_device *dev)
973{
974 int err;
975
David Brazdil0f672f62019-12-10 10:32:29 +0000976 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
977 if (!dev->lstats)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000978 return -ENOMEM;
979
980 err = veth_alloc_queues(dev);
981 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +0000982 free_percpu(dev->lstats);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000983 return err;
984 }
985
986 return 0;
987}
988
989static void veth_dev_free(struct net_device *dev)
990{
991 veth_free_queues(dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000992 free_percpu(dev->lstats);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000993}
994
995#ifdef CONFIG_NET_POLL_CONTROLLER
996static void veth_poll_controller(struct net_device *dev)
997{
998 /* veth only receives frames when its peer sends one
999 * Since it has nothing to do with disabling irqs, we are guaranteed
1000 * never to have pending data when we poll for it so
1001 * there is nothing to do here.
1002 *
1003 * We need this though so netpoll recognizes us as an interface that
1004 * supports polling, which enables bridge devices in virt setups to
1005 * still use netconsole
1006 */
1007}
1008#endif /* CONFIG_NET_POLL_CONTROLLER */
1009
1010static int veth_get_iflink(const struct net_device *dev)
1011{
1012 struct veth_priv *priv = netdev_priv(dev);
1013 struct net_device *peer;
1014 int iflink;
1015
1016 rcu_read_lock();
1017 peer = rcu_dereference(priv->peer);
1018 iflink = peer ? peer->ifindex : 0;
1019 rcu_read_unlock();
1020
1021 return iflink;
1022}
1023
1024static netdev_features_t veth_fix_features(struct net_device *dev,
1025 netdev_features_t features)
1026{
1027 struct veth_priv *priv = netdev_priv(dev);
1028 struct net_device *peer;
1029
1030 peer = rtnl_dereference(priv->peer);
1031 if (peer) {
1032 struct veth_priv *peer_priv = netdev_priv(peer);
1033
1034 if (peer_priv->_xdp_prog)
1035 features &= ~NETIF_F_GSO_SOFTWARE;
1036 }
1037
1038 return features;
1039}
1040
1041static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1042{
1043 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1044 struct net_device *peer;
1045
1046 if (new_hr < 0)
1047 new_hr = 0;
1048
1049 rcu_read_lock();
1050 peer = rcu_dereference(priv->peer);
1051 if (unlikely(!peer))
1052 goto out;
1053
1054 peer_priv = netdev_priv(peer);
1055 priv->requested_headroom = new_hr;
1056 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1057 dev->needed_headroom = new_hr;
1058 peer->needed_headroom = new_hr;
1059
1060out:
1061 rcu_read_unlock();
1062}
1063
1064static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1065 struct netlink_ext_ack *extack)
1066{
1067 struct veth_priv *priv = netdev_priv(dev);
1068 struct bpf_prog *old_prog;
1069 struct net_device *peer;
1070 unsigned int max_mtu;
1071 int err;
1072
1073 old_prog = priv->_xdp_prog;
1074 priv->_xdp_prog = prog;
1075 peer = rtnl_dereference(priv->peer);
1076
1077 if (prog) {
1078 if (!peer) {
1079 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1080 err = -ENOTCONN;
1081 goto err;
1082 }
1083
1084 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1085 peer->hard_header_len -
1086 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1087 if (peer->mtu > max_mtu) {
1088 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1089 err = -ERANGE;
1090 goto err;
1091 }
1092
1093 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1094 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1095 err = -ENOSPC;
1096 goto err;
1097 }
1098
1099 if (dev->flags & IFF_UP) {
1100 err = veth_enable_xdp(dev);
1101 if (err) {
1102 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1103 goto err;
1104 }
1105 }
1106
1107 if (!old_prog) {
1108 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1109 peer->max_mtu = max_mtu;
1110 }
1111 }
1112
1113 if (old_prog) {
1114 if (!prog) {
1115 if (dev->flags & IFF_UP)
1116 veth_disable_xdp(dev);
1117
1118 if (peer) {
1119 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1120 peer->max_mtu = ETH_MAX_MTU;
1121 }
1122 }
1123 bpf_prog_put(old_prog);
1124 }
1125
1126 if ((!!old_prog ^ !!prog) && peer)
1127 netdev_update_features(peer);
1128
1129 return 0;
1130err:
1131 priv->_xdp_prog = old_prog;
1132
1133 return err;
1134}
1135
1136static u32 veth_xdp_query(struct net_device *dev)
1137{
1138 struct veth_priv *priv = netdev_priv(dev);
1139 const struct bpf_prog *xdp_prog;
1140
1141 xdp_prog = priv->_xdp_prog;
1142 if (xdp_prog)
1143 return xdp_prog->aux->id;
1144
1145 return 0;
1146}
1147
1148static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1149{
1150 switch (xdp->command) {
1151 case XDP_SETUP_PROG:
1152 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1153 case XDP_QUERY_PROG:
1154 xdp->prog_id = veth_xdp_query(dev);
1155 return 0;
1156 default:
1157 return -EINVAL;
1158 }
1159}
1160
1161static const struct net_device_ops veth_netdev_ops = {
1162 .ndo_init = veth_dev_init,
1163 .ndo_open = veth_open,
1164 .ndo_stop = veth_close,
1165 .ndo_start_xmit = veth_xmit,
1166 .ndo_get_stats64 = veth_get_stats64,
1167 .ndo_set_rx_mode = veth_set_multicast_list,
1168 .ndo_set_mac_address = eth_mac_addr,
1169#ifdef CONFIG_NET_POLL_CONTROLLER
1170 .ndo_poll_controller = veth_poll_controller,
1171#endif
1172 .ndo_get_iflink = veth_get_iflink,
1173 .ndo_fix_features = veth_fix_features,
1174 .ndo_features_check = passthru_features_check,
1175 .ndo_set_rx_headroom = veth_set_rx_headroom,
1176 .ndo_bpf = veth_xdp,
1177 .ndo_xdp_xmit = veth_xdp_xmit,
1178};
1179
1180#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1181 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1182 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1183 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1184 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1185
1186static void veth_setup(struct net_device *dev)
1187{
1188 ether_setup(dev);
1189
1190 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1191 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1192 dev->priv_flags |= IFF_NO_QUEUE;
1193 dev->priv_flags |= IFF_PHONY_HEADROOM;
1194
1195 dev->netdev_ops = &veth_netdev_ops;
1196 dev->ethtool_ops = &veth_ethtool_ops;
1197 dev->features |= NETIF_F_LLTX;
1198 dev->features |= VETH_FEATURES;
1199 dev->vlan_features = dev->features &
1200 ~(NETIF_F_HW_VLAN_CTAG_TX |
1201 NETIF_F_HW_VLAN_STAG_TX |
1202 NETIF_F_HW_VLAN_CTAG_RX |
1203 NETIF_F_HW_VLAN_STAG_RX);
1204 dev->needs_free_netdev = true;
1205 dev->priv_destructor = veth_dev_free;
1206 dev->max_mtu = ETH_MAX_MTU;
1207
1208 dev->hw_features = VETH_FEATURES;
1209 dev->hw_enc_features = VETH_FEATURES;
1210 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1211}
1212
1213/*
1214 * netlink interface
1215 */
1216
1217static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1218 struct netlink_ext_ack *extack)
1219{
1220 if (tb[IFLA_ADDRESS]) {
1221 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1222 return -EINVAL;
1223 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1224 return -EADDRNOTAVAIL;
1225 }
1226 if (tb[IFLA_MTU]) {
1227 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1228 return -EINVAL;
1229 }
1230 return 0;
1231}
1232
1233static struct rtnl_link_ops veth_link_ops;
1234
1235static int veth_newlink(struct net *src_net, struct net_device *dev,
1236 struct nlattr *tb[], struct nlattr *data[],
1237 struct netlink_ext_ack *extack)
1238{
1239 int err;
1240 struct net_device *peer;
1241 struct veth_priv *priv;
1242 char ifname[IFNAMSIZ];
1243 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1244 unsigned char name_assign_type;
1245 struct ifinfomsg *ifmp;
1246 struct net *net;
1247
1248 /*
1249 * create and register peer first
1250 */
1251 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1252 struct nlattr *nla_peer;
1253
1254 nla_peer = data[VETH_INFO_PEER];
1255 ifmp = nla_data(nla_peer);
1256 err = rtnl_nla_parse_ifla(peer_tb,
1257 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1258 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1259 NULL);
1260 if (err < 0)
1261 return err;
1262
1263 err = veth_validate(peer_tb, NULL, extack);
1264 if (err < 0)
1265 return err;
1266
1267 tbp = peer_tb;
1268 } else {
1269 ifmp = NULL;
1270 tbp = tb;
1271 }
1272
1273 if (ifmp && tbp[IFLA_IFNAME]) {
1274 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1275 name_assign_type = NET_NAME_USER;
1276 } else {
1277 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1278 name_assign_type = NET_NAME_ENUM;
1279 }
1280
1281 net = rtnl_link_get_net(src_net, tbp);
1282 if (IS_ERR(net))
1283 return PTR_ERR(net);
1284
1285 peer = rtnl_create_link(net, ifname, name_assign_type,
David Brazdil0f672f62019-12-10 10:32:29 +00001286 &veth_link_ops, tbp, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001287 if (IS_ERR(peer)) {
1288 put_net(net);
1289 return PTR_ERR(peer);
1290 }
1291
1292 if (!ifmp || !tbp[IFLA_ADDRESS])
1293 eth_hw_addr_random(peer);
1294
1295 if (ifmp && (dev->ifindex != 0))
1296 peer->ifindex = ifmp->ifi_index;
1297
1298 peer->gso_max_size = dev->gso_max_size;
1299 peer->gso_max_segs = dev->gso_max_segs;
1300
1301 err = register_netdevice(peer);
1302 put_net(net);
1303 net = NULL;
1304 if (err < 0)
1305 goto err_register_peer;
1306
1307 netif_carrier_off(peer);
1308
1309 err = rtnl_configure_link(peer, ifmp);
1310 if (err < 0)
1311 goto err_configure_peer;
1312
1313 /*
1314 * register dev last
1315 *
1316 * note, that since we've registered new device the dev's name
1317 * should be re-allocated
1318 */
1319
1320 if (tb[IFLA_ADDRESS] == NULL)
1321 eth_hw_addr_random(dev);
1322
1323 if (tb[IFLA_IFNAME])
1324 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1325 else
1326 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1327
1328 err = register_netdevice(dev);
1329 if (err < 0)
1330 goto err_register_dev;
1331
1332 netif_carrier_off(dev);
1333
1334 /*
1335 * tie the deviced together
1336 */
1337
1338 priv = netdev_priv(dev);
1339 rcu_assign_pointer(priv->peer, peer);
1340
1341 priv = netdev_priv(peer);
1342 rcu_assign_pointer(priv->peer, dev);
1343
1344 return 0;
1345
1346err_register_dev:
1347 /* nothing to do */
1348err_configure_peer:
1349 unregister_netdevice(peer);
1350 return err;
1351
1352err_register_peer:
1353 free_netdev(peer);
1354 return err;
1355}
1356
1357static void veth_dellink(struct net_device *dev, struct list_head *head)
1358{
1359 struct veth_priv *priv;
1360 struct net_device *peer;
1361
1362 priv = netdev_priv(dev);
1363 peer = rtnl_dereference(priv->peer);
1364
1365 /* Note : dellink() is called from default_device_exit_batch(),
1366 * before a rcu_synchronize() point. The devices are guaranteed
1367 * not being freed before one RCU grace period.
1368 */
1369 RCU_INIT_POINTER(priv->peer, NULL);
1370 unregister_netdevice_queue(dev, head);
1371
1372 if (peer) {
1373 priv = netdev_priv(peer);
1374 RCU_INIT_POINTER(priv->peer, NULL);
1375 unregister_netdevice_queue(peer, head);
1376 }
1377}
1378
1379static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1380 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1381};
1382
1383static struct net *veth_get_link_net(const struct net_device *dev)
1384{
1385 struct veth_priv *priv = netdev_priv(dev);
1386 struct net_device *peer = rtnl_dereference(priv->peer);
1387
1388 return peer ? dev_net(peer) : dev_net(dev);
1389}
1390
1391static struct rtnl_link_ops veth_link_ops = {
1392 .kind = DRV_NAME,
1393 .priv_size = sizeof(struct veth_priv),
1394 .setup = veth_setup,
1395 .validate = veth_validate,
1396 .newlink = veth_newlink,
1397 .dellink = veth_dellink,
1398 .policy = veth_policy,
1399 .maxtype = VETH_INFO_MAX,
1400 .get_link_net = veth_get_link_net,
1401};
1402
1403/*
1404 * init/fini
1405 */
1406
1407static __init int veth_init(void)
1408{
1409 return rtnl_link_register(&veth_link_ops);
1410}
1411
1412static __exit void veth_exit(void)
1413{
1414 rtnl_link_unregister(&veth_link_ops);
1415}
1416
1417module_init(veth_init);
1418module_exit(veth_exit);
1419
1420MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1421MODULE_LICENSE("GPL v2");
1422MODULE_ALIAS_RTNL_LINK(DRV_NAME);