blob: b70b06e312bd09735809c80718d9a37af2ae05c0 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PACKET - implements raw packet sockets.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 *
13 * Fixes:
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
42 * and packet_mreq.
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * layer.
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047 */
48
49#include <linux/types.h>
50#include <linux/mm.h>
51#include <linux/capability.h>
52#include <linux/fcntl.h>
53#include <linux/socket.h>
54#include <linux/in.h>
55#include <linux/inet.h>
56#include <linux/netdevice.h>
57#include <linux/if_packet.h>
58#include <linux/wireless.h>
59#include <linux/kernel.h>
60#include <linux/kmod.h>
61#include <linux/slab.h>
62#include <linux/vmalloc.h>
63#include <net/net_namespace.h>
64#include <net/ip.h>
65#include <net/protocol.h>
66#include <linux/skbuff.h>
67#include <net/sock.h>
68#include <linux/errno.h>
69#include <linux/timer.h>
70#include <linux/uaccess.h>
71#include <asm/ioctls.h>
72#include <asm/page.h>
73#include <asm/cacheflush.h>
74#include <asm/io.h>
75#include <linux/proc_fs.h>
76#include <linux/seq_file.h>
77#include <linux/poll.h>
78#include <linux/module.h>
79#include <linux/init.h>
80#include <linux/mutex.h>
81#include <linux/if_vlan.h>
82#include <linux/virtio_net.h>
83#include <linux/errqueue.h>
84#include <linux/net_tstamp.h>
85#include <linux/percpu.h>
86#ifdef CONFIG_INET
87#include <net/inet_common.h>
88#endif
89#include <linux/bpf.h>
90#include <net/compat.h>
91
92#include "internal.h"
93
94/*
95 Assumptions:
Olivier Deprez157378f2022-04-04 15:47:50 +020096 - If the device has no dev->header_ops->create, there is no LL header
97 visible above the device. In this case, its hard_header_len should be 0.
98 The device may prepend its own header internally. In this case, its
99 needed_headroom should be set to the space needed for it to add its
100 internal header.
101 For example, a WiFi driver pretending to be an Ethernet driver should
102 set its hard_header_len to be the Ethernet header length, and set its
103 needed_headroom to be (the real WiFi header length - the fake Ethernet
104 header length).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105 - packet socket receives packets with pulled ll header,
106 so that SOCK_RAW should push it back.
107
108On receive:
109-----------
110
Olivier Deprez157378f2022-04-04 15:47:50 +0200111Incoming, dev_has_header(dev) == true
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 mac_header -> ll header
113 data -> data
114
Olivier Deprez157378f2022-04-04 15:47:50 +0200115Outgoing, dev_has_header(dev) == true
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 mac_header -> ll header
117 data -> ll header
118
Olivier Deprez157378f2022-04-04 15:47:50 +0200119Incoming, dev_has_header(dev) == false
120 mac_header -> data
121 However drivers often make it point to the ll header.
122 This is incorrect because the ll header should be invisible to us.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 data -> data
124
Olivier Deprez157378f2022-04-04 15:47:50 +0200125Outgoing, dev_has_header(dev) == false
126 mac_header -> data. ll header is invisible to us.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 data -> data
128
129Resume
Olivier Deprez157378f2022-04-04 15:47:50 +0200130 If dev_has_header(dev) == false we are unable to restore the ll header,
131 because it is invisible to us.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132
133
134On transmit:
135------------
136
Olivier Deprez157378f2022-04-04 15:47:50 +0200137dev->header_ops != NULL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138 mac_header -> ll header
139 data -> ll header
140
Olivier Deprez157378f2022-04-04 15:47:50 +0200141dev->header_ops == NULL (ll header is invisible to us)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 mac_header -> data
143 data -> data
144
Olivier Deprez157378f2022-04-04 15:47:50 +0200145 We should set network_header on output to the correct position,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146 packet classifier depends on it.
147 */
148
149/* Private packet socket structures. */
150
151/* identical to struct packet_mreq except it has
152 * a longer address field.
153 */
154struct packet_mreq_max {
155 int mr_ifindex;
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
159};
160
161union tpacket_uhdr {
162 struct tpacket_hdr *h1;
163 struct tpacket2_hdr *h2;
164 struct tpacket3_hdr *h3;
165 void *raw;
166};
167
168static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 int closing, int tx_ring);
170
171#define V3_ALIGNMENT (8)
172
173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
178#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
179#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
180#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
181#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
182#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
183#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184
185struct packet_sock;
186static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
187 struct packet_type *pt, struct net_device *orig_dev);
188
189static void *packet_previous_frame(struct packet_sock *po,
190 struct packet_ring_buffer *rb,
191 int status);
192static void packet_increment_head(struct packet_ring_buffer *buff);
193static int prb_curr_blk_in_use(struct tpacket_block_desc *);
194static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
195 struct packet_sock *);
196static void prb_retire_current_block(struct tpacket_kbdq_core *,
197 struct packet_sock *, unsigned int status);
198static int prb_queue_frozen(struct tpacket_kbdq_core *);
199static void prb_open_block(struct tpacket_kbdq_core *,
200 struct tpacket_block_desc *);
201static void prb_retire_rx_blk_timer_expired(struct timer_list *);
202static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
203static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
204static void prb_clear_rxhash(struct tpacket_kbdq_core *,
205 struct tpacket3_hdr *);
206static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
207 struct tpacket3_hdr *);
208static void packet_flush_mclist(struct sock *sk);
209static u16 packet_pick_tx_queue(struct sk_buff *skb);
210
211struct packet_skb_cb {
212 union {
213 struct sockaddr_pkt pkt;
214 union {
215 /* Trick: alias skb original length with
216 * ll.sll_family and ll.protocol in order
217 * to save room.
218 */
219 unsigned int origlen;
220 struct sockaddr_ll ll;
221 };
222 } sa;
223};
224
225#define vio_le() virtio_legacy_is_little_endian()
226
227#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
228
229#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
230#define GET_PBLOCK_DESC(x, bid) \
231 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
232#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
233 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
234#define GET_NEXT_PRB_BLK_NUM(x) \
235 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
236 ((x)->kactive_blk_num+1) : 0)
237
238static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
239static void __fanout_link(struct sock *sk, struct packet_sock *po);
240
241static int packet_direct_xmit(struct sk_buff *skb)
242{
243 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
244}
245
246static struct net_device *packet_cached_dev_get(struct packet_sock *po)
247{
248 struct net_device *dev;
249
250 rcu_read_lock();
251 dev = rcu_dereference(po->cached_dev);
252 if (likely(dev))
253 dev_hold(dev);
254 rcu_read_unlock();
255
256 return dev;
257}
258
259static void packet_cached_dev_assign(struct packet_sock *po,
260 struct net_device *dev)
261{
262 rcu_assign_pointer(po->cached_dev, dev);
263}
264
265static void packet_cached_dev_reset(struct packet_sock *po)
266{
267 RCU_INIT_POINTER(po->cached_dev, NULL);
268}
269
270static bool packet_use_direct_xmit(const struct packet_sock *po)
271{
272 return po->xmit == packet_direct_xmit;
273}
274
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275static u16 packet_pick_tx_queue(struct sk_buff *skb)
276{
277 struct net_device *dev = skb->dev;
278 const struct net_device_ops *ops = dev->netdev_ops;
David Brazdil0f672f62019-12-10 10:32:29 +0000279 int cpu = raw_smp_processor_id();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 u16 queue_index;
281
David Brazdil0f672f62019-12-10 10:32:29 +0000282#ifdef CONFIG_XPS
283 skb->sender_cpu = cpu + 1;
284#endif
285 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286 if (ops->ndo_select_queue) {
David Brazdil0f672f62019-12-10 10:32:29 +0000287 queue_index = ops->ndo_select_queue(dev, skb, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288 queue_index = netdev_cap_txqueue(dev, queue_index);
289 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000290 queue_index = netdev_pick_tx(dev, skb, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291 }
292
293 return queue_index;
294}
295
296/* __register_prot_hook must be invoked through register_prot_hook
297 * or from a context in which asynchronous accesses to the packet
298 * socket is not possible (packet_create()).
299 */
300static void __register_prot_hook(struct sock *sk)
301{
302 struct packet_sock *po = pkt_sk(sk);
303
304 if (!po->running) {
305 if (po->fanout)
306 __fanout_link(sk, po);
307 else
308 dev_add_pack(&po->prot_hook);
309
310 sock_hold(sk);
311 po->running = 1;
312 }
313}
314
315static void register_prot_hook(struct sock *sk)
316{
317 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
318 __register_prot_hook(sk);
319}
320
321/* If the sync parameter is true, we will temporarily drop
322 * the po->bind_lock and do a synchronize_net to make sure no
323 * asynchronous packet processing paths still refer to the elements
324 * of po->prot_hook. If the sync parameter is false, it is the
325 * callers responsibility to take care of this.
326 */
327static void __unregister_prot_hook(struct sock *sk, bool sync)
328{
329 struct packet_sock *po = pkt_sk(sk);
330
331 lockdep_assert_held_once(&po->bind_lock);
332
333 po->running = 0;
334
335 if (po->fanout)
336 __fanout_unlink(sk, po);
337 else
338 __dev_remove_pack(&po->prot_hook);
339
340 __sock_put(sk);
341
342 if (sync) {
343 spin_unlock(&po->bind_lock);
344 synchronize_net();
345 spin_lock(&po->bind_lock);
346 }
347}
348
349static void unregister_prot_hook(struct sock *sk, bool sync)
350{
351 struct packet_sock *po = pkt_sk(sk);
352
353 if (po->running)
354 __unregister_prot_hook(sk, sync);
355}
356
357static inline struct page * __pure pgv_to_page(void *addr)
358{
359 if (is_vmalloc_addr(addr))
360 return vmalloc_to_page(addr);
361 return virt_to_page(addr);
362}
363
364static void __packet_set_status(struct packet_sock *po, void *frame, int status)
365{
366 union tpacket_uhdr h;
367
368 h.raw = frame;
369 switch (po->tp_version) {
370 case TPACKET_V1:
371 h.h1->tp_status = status;
372 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
373 break;
374 case TPACKET_V2:
375 h.h2->tp_status = status;
376 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
377 break;
378 case TPACKET_V3:
379 h.h3->tp_status = status;
380 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
381 break;
382 default:
383 WARN(1, "TPACKET version not supported.\n");
384 BUG();
385 }
386
387 smp_wmb();
388}
389
David Brazdil0f672f62019-12-10 10:32:29 +0000390static int __packet_get_status(const struct packet_sock *po, void *frame)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391{
392 union tpacket_uhdr h;
393
394 smp_rmb();
395
396 h.raw = frame;
397 switch (po->tp_version) {
398 case TPACKET_V1:
399 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
400 return h.h1->tp_status;
401 case TPACKET_V2:
402 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
403 return h.h2->tp_status;
404 case TPACKET_V3:
405 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
406 return h.h3->tp_status;
407 default:
408 WARN(1, "TPACKET version not supported.\n");
409 BUG();
410 return 0;
411 }
412}
413
Olivier Deprez157378f2022-04-04 15:47:50 +0200414static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415 unsigned int flags)
416{
417 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
418
419 if (shhwtstamps &&
420 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
Olivier Deprez157378f2022-04-04 15:47:50 +0200421 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422 return TP_STATUS_TS_RAW_HARDWARE;
423
Olivier Deprez157378f2022-04-04 15:47:50 +0200424 if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
425 ktime_to_timespec64_cond(skb->tstamp, ts))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 return TP_STATUS_TS_SOFTWARE;
427
428 return 0;
429}
430
431static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
432 struct sk_buff *skb)
433{
434 union tpacket_uhdr h;
Olivier Deprez157378f2022-04-04 15:47:50 +0200435 struct timespec64 ts;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436 __u32 ts_status;
437
438 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
439 return 0;
440
441 h.raw = frame;
Olivier Deprez157378f2022-04-04 15:47:50 +0200442 /*
443 * versions 1 through 3 overflow the timestamps in y2106, since they
444 * all store the seconds in a 32-bit unsigned integer.
445 * If we create a version 4, that should have a 64-bit timestamp,
446 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
447 * nanoseconds.
448 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449 switch (po->tp_version) {
450 case TPACKET_V1:
451 h.h1->tp_sec = ts.tv_sec;
452 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
453 break;
454 case TPACKET_V2:
455 h.h2->tp_sec = ts.tv_sec;
456 h.h2->tp_nsec = ts.tv_nsec;
457 break;
458 case TPACKET_V3:
459 h.h3->tp_sec = ts.tv_sec;
460 h.h3->tp_nsec = ts.tv_nsec;
461 break;
462 default:
463 WARN(1, "TPACKET version not supported.\n");
464 BUG();
465 }
466
467 /* one flush is safe, as both fields always lie on the same cacheline */
468 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
469 smp_wmb();
470
471 return ts_status;
472}
473
David Brazdil0f672f62019-12-10 10:32:29 +0000474static void *packet_lookup_frame(const struct packet_sock *po,
475 const struct packet_ring_buffer *rb,
476 unsigned int position,
477 int status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000478{
479 unsigned int pg_vec_pos, frame_offset;
480 union tpacket_uhdr h;
481
482 pg_vec_pos = position / rb->frames_per_block;
483 frame_offset = position % rb->frames_per_block;
484
485 h.raw = rb->pg_vec[pg_vec_pos].buffer +
486 (frame_offset * rb->frame_size);
487
488 if (status != __packet_get_status(po, h.raw))
489 return NULL;
490
491 return h.raw;
492}
493
494static void *packet_current_frame(struct packet_sock *po,
495 struct packet_ring_buffer *rb,
496 int status)
497{
498 return packet_lookup_frame(po, rb, rb->head, status);
499}
500
501static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
502{
503 del_timer_sync(&pkc->retire_blk_timer);
504}
505
506static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
507 struct sk_buff_head *rb_queue)
508{
509 struct tpacket_kbdq_core *pkc;
510
511 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
512
513 spin_lock_bh(&rb_queue->lock);
514 pkc->delete_blk_timer = 1;
515 spin_unlock_bh(&rb_queue->lock);
516
517 prb_del_retire_blk_timer(pkc);
518}
519
520static void prb_setup_retire_blk_timer(struct packet_sock *po)
521{
522 struct tpacket_kbdq_core *pkc;
523
524 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
525 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
526 0);
527 pkc->retire_blk_timer.expires = jiffies;
528}
529
530static int prb_calc_retire_blk_tmo(struct packet_sock *po,
531 int blk_size_in_bytes)
532{
533 struct net_device *dev;
Olivier Deprez157378f2022-04-04 15:47:50 +0200534 unsigned int mbits, div;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000535 struct ethtool_link_ksettings ecmd;
536 int err;
537
538 rtnl_lock();
539 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
540 if (unlikely(!dev)) {
541 rtnl_unlock();
542 return DEFAULT_PRB_RETIRE_TOV;
543 }
544 err = __ethtool_get_link_ksettings(dev, &ecmd);
545 rtnl_unlock();
Olivier Deprez157378f2022-04-04 15:47:50 +0200546 if (err)
Olivier Deprez0e641232021-09-23 10:07:05 +0200547 return DEFAULT_PRB_RETIRE_TOV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548
Olivier Deprez157378f2022-04-04 15:47:50 +0200549 /* If the link speed is so slow you don't really
550 * need to worry about perf anyways
551 */
552 if (ecmd.base.speed < SPEED_1000 ||
553 ecmd.base.speed == SPEED_UNKNOWN)
554 return DEFAULT_PRB_RETIRE_TOV;
555
556 div = ecmd.base.speed / 1000;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
558
559 if (div)
560 mbits /= div;
561
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 if (div)
Olivier Deprez157378f2022-04-04 15:47:50 +0200563 return mbits + 1;
564 return mbits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000565}
566
567static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
568 union tpacket_req_u *req_u)
569{
570 p1->feature_req_word = req_u->req3.tp_feature_req_word;
571}
572
573static void init_prb_bdqc(struct packet_sock *po,
574 struct packet_ring_buffer *rb,
575 struct pgv *pg_vec,
576 union tpacket_req_u *req_u)
577{
578 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
579 struct tpacket_block_desc *pbd;
580
581 memset(p1, 0x0, sizeof(*p1));
582
583 p1->knxt_seq_num = 1;
584 p1->pkbdq = pg_vec;
585 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
586 p1->pkblk_start = pg_vec[0].buffer;
587 p1->kblk_size = req_u->req3.tp_block_size;
588 p1->knum_blocks = req_u->req3.tp_block_nr;
589 p1->hdrlen = po->tp_hdrlen;
590 p1->version = po->tp_version;
591 p1->last_kactive_blk_num = 0;
592 po->stats.stats3.tp_freeze_q_cnt = 0;
593 if (req_u->req3.tp_retire_blk_tov)
594 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
595 else
596 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
597 req_u->req3.tp_block_size);
598 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
599 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
Olivier Deprez157378f2022-04-04 15:47:50 +0200600 rwlock_init(&p1->blk_fill_in_prog_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601
602 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
603 prb_init_ft_ops(p1, req_u);
604 prb_setup_retire_blk_timer(po);
605 prb_open_block(p1, pbd);
606}
607
608/* Do NOT update the last_blk_num first.
609 * Assumes sk_buff_head lock is held.
610 */
611static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
612{
613 mod_timer(&pkc->retire_blk_timer,
614 jiffies + pkc->tov_in_jiffies);
615 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
616}
617
618/*
619 * Timer logic:
620 * 1) We refresh the timer only when we open a block.
621 * By doing this we don't waste cycles refreshing the timer
622 * on packet-by-packet basis.
623 *
624 * With a 1MB block-size, on a 1Gbps line, it will take
625 * i) ~8 ms to fill a block + ii) memcpy etc.
626 * In this cut we are not accounting for the memcpy time.
627 *
628 * So, if the user sets the 'tmo' to 10ms then the timer
629 * will never fire while the block is still getting filled
630 * (which is what we want). However, the user could choose
631 * to close a block early and that's fine.
632 *
633 * But when the timer does fire, we check whether or not to refresh it.
634 * Since the tmo granularity is in msecs, it is not too expensive
635 * to refresh the timer, lets say every '8' msecs.
636 * Either the user can set the 'tmo' or we can derive it based on
637 * a) line-speed and b) block-size.
638 * prb_calc_retire_blk_tmo() calculates the tmo.
639 *
640 */
641static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
642{
643 struct packet_sock *po =
644 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
645 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
646 unsigned int frozen;
647 struct tpacket_block_desc *pbd;
648
649 spin_lock(&po->sk.sk_receive_queue.lock);
650
651 frozen = prb_queue_frozen(pkc);
652 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
653
654 if (unlikely(pkc->delete_blk_timer))
655 goto out;
656
657 /* We only need to plug the race when the block is partially filled.
658 * tpacket_rcv:
659 * lock(); increment BLOCK_NUM_PKTS; unlock()
660 * copy_bits() is in progress ...
661 * timer fires on other cpu:
662 * we can't retire the current block because copy_bits
663 * is in progress.
664 *
665 */
666 if (BLOCK_NUM_PKTS(pbd)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200667 /* Waiting for skb_copy_bits to finish... */
668 write_lock(&pkc->blk_fill_in_prog_lock);
669 write_unlock(&pkc->blk_fill_in_prog_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000670 }
671
672 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
673 if (!frozen) {
674 if (!BLOCK_NUM_PKTS(pbd)) {
675 /* An empty block. Just refresh the timer. */
676 goto refresh_timer;
677 }
678 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
679 if (!prb_dispatch_next_block(pkc, po))
680 goto refresh_timer;
681 else
682 goto out;
683 } else {
684 /* Case 1. Queue was frozen because user-space was
685 * lagging behind.
686 */
687 if (prb_curr_blk_in_use(pbd)) {
688 /*
689 * Ok, user-space is still behind.
690 * So just refresh the timer.
691 */
692 goto refresh_timer;
693 } else {
694 /* Case 2. queue was frozen,user-space caught up,
695 * now the link went idle && the timer fired.
696 * We don't have a block to close.So we open this
697 * block and restart the timer.
698 * opening a block thaws the queue,restarts timer
699 * Thawing/timer-refresh is a side effect.
700 */
701 prb_open_block(pkc, pbd);
702 goto out;
703 }
704 }
705 }
706
707refresh_timer:
708 _prb_refresh_rx_retire_blk_timer(pkc);
709
710out:
711 spin_unlock(&po->sk.sk_receive_queue.lock);
712}
713
714static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
715 struct tpacket_block_desc *pbd1, __u32 status)
716{
717 /* Flush everything minus the block header */
718
719#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
720 u8 *start, *end;
721
722 start = (u8 *)pbd1;
723
724 /* Skip the block header(we know header WILL fit in 4K) */
725 start += PAGE_SIZE;
726
727 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
728 for (; start < end; start += PAGE_SIZE)
729 flush_dcache_page(pgv_to_page(start));
730
731 smp_wmb();
732#endif
733
734 /* Now update the block status. */
735
736 BLOCK_STATUS(pbd1) = status;
737
738 /* Flush the block header */
739
740#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
741 start = (u8 *)pbd1;
742 flush_dcache_page(pgv_to_page(start));
743
744 smp_wmb();
745#endif
746}
747
748/*
749 * Side effect:
750 *
751 * 1) flush the block
752 * 2) Increment active_blk_num
753 *
754 * Note:We DONT refresh the timer on purpose.
755 * Because almost always the next block will be opened.
756 */
757static void prb_close_block(struct tpacket_kbdq_core *pkc1,
758 struct tpacket_block_desc *pbd1,
759 struct packet_sock *po, unsigned int stat)
760{
761 __u32 status = TP_STATUS_USER | stat;
762
763 struct tpacket3_hdr *last_pkt;
764 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
765 struct sock *sk = &po->sk;
766
David Brazdil0f672f62019-12-10 10:32:29 +0000767 if (atomic_read(&po->tp_drops))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768 status |= TP_STATUS_LOSING;
769
770 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
771 last_pkt->tp_next_offset = 0;
772
773 /* Get the ts of the last pkt */
774 if (BLOCK_NUM_PKTS(pbd1)) {
775 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
776 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
777 } else {
778 /* Ok, we tmo'd - so get the current time.
779 *
780 * It shouldn't really happen as we don't close empty
781 * blocks. See prb_retire_rx_blk_timer_expired().
782 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200783 struct timespec64 ts;
784 ktime_get_real_ts64(&ts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000785 h1->ts_last_pkt.ts_sec = ts.tv_sec;
786 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
787 }
788
789 smp_wmb();
790
791 /* Flush the block */
792 prb_flush_block(pkc1, pbd1, status);
793
794 sk->sk_data_ready(sk);
795
796 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
797}
798
799static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
800{
801 pkc->reset_pending_on_curr_blk = 0;
802}
803
804/*
805 * Side effect of opening a block:
806 *
807 * 1) prb_queue is thawed.
808 * 2) retire_blk_timer is refreshed.
809 *
810 */
811static void prb_open_block(struct tpacket_kbdq_core *pkc1,
812 struct tpacket_block_desc *pbd1)
813{
Olivier Deprez157378f2022-04-04 15:47:50 +0200814 struct timespec64 ts;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000815 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
816
817 smp_rmb();
818
819 /* We could have just memset this but we will lose the
820 * flexibility of making the priv area sticky
821 */
822
823 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
824 BLOCK_NUM_PKTS(pbd1) = 0;
825 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
826
Olivier Deprez157378f2022-04-04 15:47:50 +0200827 ktime_get_real_ts64(&ts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000828
829 h1->ts_first_pkt.ts_sec = ts.tv_sec;
830 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
831
832 pkc1->pkblk_start = (char *)pbd1;
833 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
834
835 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
836 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
837
838 pbd1->version = pkc1->version;
839 pkc1->prev = pkc1->nxt_offset;
840 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
841
842 prb_thaw_queue(pkc1);
843 _prb_refresh_rx_retire_blk_timer(pkc1);
844
845 smp_wmb();
846}
847
848/*
849 * Queue freeze logic:
850 * 1) Assume tp_block_nr = 8 blocks.
851 * 2) At time 't0', user opens Rx ring.
852 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
853 * 4) user-space is either sleeping or processing block '0'.
854 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
855 * it will close block-7,loop around and try to fill block '0'.
856 * call-flow:
857 * __packet_lookup_frame_in_block
858 * prb_retire_current_block()
859 * prb_dispatch_next_block()
860 * |->(BLOCK_STATUS == USER) evaluates to true
861 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
862 * 6) Now there are two cases:
863 * 6.1) Link goes idle right after the queue is frozen.
864 * But remember, the last open_block() refreshed the timer.
865 * When this timer expires,it will refresh itself so that we can
866 * re-open block-0 in near future.
867 * 6.2) Link is busy and keeps on receiving packets. This is a simple
868 * case and __packet_lookup_frame_in_block will check if block-0
869 * is free and can now be re-used.
870 */
871static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
872 struct packet_sock *po)
873{
874 pkc->reset_pending_on_curr_blk = 1;
875 po->stats.stats3.tp_freeze_q_cnt++;
876}
877
878#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
879
880/*
881 * If the next block is free then we will dispatch it
882 * and return a good offset.
883 * Else, we will freeze the queue.
884 * So, caller must check the return value.
885 */
886static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
887 struct packet_sock *po)
888{
889 struct tpacket_block_desc *pbd;
890
891 smp_rmb();
892
893 /* 1. Get current block num */
894 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
895
896 /* 2. If this block is currently in_use then freeze the queue */
897 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
898 prb_freeze_queue(pkc, po);
899 return NULL;
900 }
901
902 /*
903 * 3.
904 * open this block and return the offset where the first packet
905 * needs to get stored.
906 */
907 prb_open_block(pkc, pbd);
908 return (void *)pkc->nxt_offset;
909}
910
911static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
912 struct packet_sock *po, unsigned int status)
913{
914 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
915
916 /* retire/close the current block */
917 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
918 /*
919 * Plug the case where copy_bits() is in progress on
920 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
921 * have space to copy the pkt in the current block and
922 * called prb_retire_current_block()
923 *
924 * We don't need to worry about the TMO case because
925 * the timer-handler already handled this case.
926 */
927 if (!(status & TP_STATUS_BLK_TMO)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200928 /* Waiting for skb_copy_bits to finish... */
929 write_lock(&pkc->blk_fill_in_prog_lock);
930 write_unlock(&pkc->blk_fill_in_prog_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000931 }
932 prb_close_block(pkc, pbd, po, status);
933 return;
934 }
935}
936
937static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
938{
939 return TP_STATUS_USER & BLOCK_STATUS(pbd);
940}
941
942static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
943{
944 return pkc->reset_pending_on_curr_blk;
945}
946
947static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
Olivier Deprez0e641232021-09-23 10:07:05 +0200948 __releases(&pkc->blk_fill_in_prog_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000949{
950 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
Olivier Deprez157378f2022-04-04 15:47:50 +0200951
952 read_unlock(&pkc->blk_fill_in_prog_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000953}
954
955static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
956 struct tpacket3_hdr *ppd)
957{
958 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
959}
960
961static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
962 struct tpacket3_hdr *ppd)
963{
964 ppd->hv1.tp_rxhash = 0;
965}
966
967static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
968 struct tpacket3_hdr *ppd)
969{
970 if (skb_vlan_tag_present(pkc->skb)) {
971 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
972 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
973 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
974 } else {
975 ppd->hv1.tp_vlan_tci = 0;
976 ppd->hv1.tp_vlan_tpid = 0;
977 ppd->tp_status = TP_STATUS_AVAILABLE;
978 }
979}
980
981static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
982 struct tpacket3_hdr *ppd)
983{
984 ppd->hv1.tp_padding = 0;
985 prb_fill_vlan_info(pkc, ppd);
986
987 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
988 prb_fill_rxhash(pkc, ppd);
989 else
990 prb_clear_rxhash(pkc, ppd);
991}
992
993static void prb_fill_curr_block(char *curr,
994 struct tpacket_kbdq_core *pkc,
995 struct tpacket_block_desc *pbd,
996 unsigned int len)
Olivier Deprez0e641232021-09-23 10:07:05 +0200997 __acquires(&pkc->blk_fill_in_prog_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000998{
999 struct tpacket3_hdr *ppd;
1000
1001 ppd = (struct tpacket3_hdr *)curr;
1002 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 pkc->prev = curr;
1004 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 BLOCK_NUM_PKTS(pbd) += 1;
Olivier Deprez157378f2022-04-04 15:47:50 +02001007 read_lock(&pkc->blk_fill_in_prog_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001008 prb_run_all_ft_ops(pkc, ppd);
1009}
1010
1011/* Assumes caller has the sk->rx_queue.lock */
1012static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013 struct sk_buff *skb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001014 unsigned int len
1015 )
1016{
1017 struct tpacket_kbdq_core *pkc;
1018 struct tpacket_block_desc *pbd;
1019 char *curr, *end;
1020
1021 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1022 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1023
1024 /* Queue is frozen when user space is lagging behind */
1025 if (prb_queue_frozen(pkc)) {
1026 /*
1027 * Check if that last block which caused the queue to freeze,
1028 * is still in_use by user-space.
1029 */
1030 if (prb_curr_blk_in_use(pbd)) {
1031 /* Can't record this packet */
1032 return NULL;
1033 } else {
1034 /*
1035 * Ok, the block was released by user-space.
1036 * Now let's open that block.
1037 * opening a block also thaws the queue.
1038 * Thawing is a side effect.
1039 */
1040 prb_open_block(pkc, pbd);
1041 }
1042 }
1043
1044 smp_mb();
1045 curr = pkc->nxt_offset;
1046 pkc->skb = skb;
1047 end = (char *)pbd + pkc->kblk_size;
1048
1049 /* first try the current block */
1050 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1051 prb_fill_curr_block(curr, pkc, pbd, len);
1052 return (void *)curr;
1053 }
1054
1055 /* Ok, close the current block */
1056 prb_retire_current_block(pkc, po, 0);
1057
1058 /* Now, try to dispatch the next block */
1059 curr = (char *)prb_dispatch_next_block(pkc, po);
1060 if (curr) {
1061 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1062 prb_fill_curr_block(curr, pkc, pbd, len);
1063 return (void *)curr;
1064 }
1065
1066 /*
1067 * No free blocks are available.user_space hasn't caught up yet.
1068 * Queue was just frozen and now this packet will get dropped.
1069 */
1070 return NULL;
1071}
1072
1073static void *packet_current_rx_frame(struct packet_sock *po,
1074 struct sk_buff *skb,
1075 int status, unsigned int len)
1076{
1077 char *curr = NULL;
1078 switch (po->tp_version) {
1079 case TPACKET_V1:
1080 case TPACKET_V2:
1081 curr = packet_lookup_frame(po, &po->rx_ring,
1082 po->rx_ring.head, status);
1083 return curr;
1084 case TPACKET_V3:
David Brazdil0f672f62019-12-10 10:32:29 +00001085 return __packet_lookup_frame_in_block(po, skb, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001086 default:
1087 WARN(1, "TPACKET version not supported\n");
1088 BUG();
1089 return NULL;
1090 }
1091}
1092
David Brazdil0f672f62019-12-10 10:32:29 +00001093static void *prb_lookup_block(const struct packet_sock *po,
1094 const struct packet_ring_buffer *rb,
1095 unsigned int idx,
1096 int status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001097{
1098 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1099 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1100
1101 if (status != BLOCK_STATUS(pbd))
1102 return NULL;
1103 return pbd;
1104}
1105
1106static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1107{
1108 unsigned int prev;
1109 if (rb->prb_bdqc.kactive_blk_num)
1110 prev = rb->prb_bdqc.kactive_blk_num-1;
1111 else
1112 prev = rb->prb_bdqc.knum_blocks-1;
1113 return prev;
1114}
1115
1116/* Assumes caller has held the rx_queue.lock */
1117static void *__prb_previous_block(struct packet_sock *po,
1118 struct packet_ring_buffer *rb,
1119 int status)
1120{
1121 unsigned int previous = prb_previous_blk_num(rb);
1122 return prb_lookup_block(po, rb, previous, status);
1123}
1124
1125static void *packet_previous_rx_frame(struct packet_sock *po,
1126 struct packet_ring_buffer *rb,
1127 int status)
1128{
1129 if (po->tp_version <= TPACKET_V2)
1130 return packet_previous_frame(po, rb, status);
1131
1132 return __prb_previous_block(po, rb, status);
1133}
1134
1135static void packet_increment_rx_head(struct packet_sock *po,
1136 struct packet_ring_buffer *rb)
1137{
1138 switch (po->tp_version) {
1139 case TPACKET_V1:
1140 case TPACKET_V2:
1141 return packet_increment_head(rb);
1142 case TPACKET_V3:
1143 default:
1144 WARN(1, "TPACKET version not supported.\n");
1145 BUG();
1146 return;
1147 }
1148}
1149
1150static void *packet_previous_frame(struct packet_sock *po,
1151 struct packet_ring_buffer *rb,
1152 int status)
1153{
1154 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1155 return packet_lookup_frame(po, rb, previous, status);
1156}
1157
1158static void packet_increment_head(struct packet_ring_buffer *buff)
1159{
1160 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1161}
1162
1163static void packet_inc_pending(struct packet_ring_buffer *rb)
1164{
1165 this_cpu_inc(*rb->pending_refcnt);
1166}
1167
1168static void packet_dec_pending(struct packet_ring_buffer *rb)
1169{
1170 this_cpu_dec(*rb->pending_refcnt);
1171}
1172
1173static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1174{
1175 unsigned int refcnt = 0;
1176 int cpu;
1177
1178 /* We don't use pending refcount in rx_ring. */
1179 if (rb->pending_refcnt == NULL)
1180 return 0;
1181
1182 for_each_possible_cpu(cpu)
1183 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1184
1185 return refcnt;
1186}
1187
1188static int packet_alloc_pending(struct packet_sock *po)
1189{
1190 po->rx_ring.pending_refcnt = NULL;
1191
1192 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1193 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1194 return -ENOBUFS;
1195
1196 return 0;
1197}
1198
1199static void packet_free_pending(struct packet_sock *po)
1200{
1201 free_percpu(po->tx_ring.pending_refcnt);
1202}
1203
1204#define ROOM_POW_OFF 2
1205#define ROOM_NONE 0x0
1206#define ROOM_LOW 0x1
1207#define ROOM_NORMAL 0x2
1208
David Brazdil0f672f62019-12-10 10:32:29 +00001209static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210{
1211 int idx, len;
1212
David Brazdil0f672f62019-12-10 10:32:29 +00001213 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1214 idx = READ_ONCE(po->rx_ring.head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001215 if (pow_off)
1216 idx += len >> pow_off;
1217 if (idx >= len)
1218 idx -= len;
1219 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1220}
1221
David Brazdil0f672f62019-12-10 10:32:29 +00001222static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223{
1224 int idx, len;
1225
David Brazdil0f672f62019-12-10 10:32:29 +00001226 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1227 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001228 if (pow_off)
1229 idx += len >> pow_off;
1230 if (idx >= len)
1231 idx -= len;
1232 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1233}
1234
David Brazdil0f672f62019-12-10 10:32:29 +00001235static int __packet_rcv_has_room(const struct packet_sock *po,
1236 const struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001237{
David Brazdil0f672f62019-12-10 10:32:29 +00001238 const struct sock *sk = &po->sk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001239 int ret = ROOM_NONE;
1240
1241 if (po->prot_hook.func != tpacket_rcv) {
David Brazdil0f672f62019-12-10 10:32:29 +00001242 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1243 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1244 - (skb ? skb->truesize : 0);
1245
1246 if (avail > (rcvbuf >> ROOM_POW_OFF))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247 return ROOM_NORMAL;
1248 else if (avail > 0)
1249 return ROOM_LOW;
1250 else
1251 return ROOM_NONE;
1252 }
1253
1254 if (po->tp_version == TPACKET_V3) {
1255 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1256 ret = ROOM_NORMAL;
1257 else if (__tpacket_v3_has_room(po, 0))
1258 ret = ROOM_LOW;
1259 } else {
1260 if (__tpacket_has_room(po, ROOM_POW_OFF))
1261 ret = ROOM_NORMAL;
1262 else if (__tpacket_has_room(po, 0))
1263 ret = ROOM_LOW;
1264 }
1265
1266 return ret;
1267}
1268
1269static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1270{
David Brazdil0f672f62019-12-10 10:32:29 +00001271 int pressure, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001273 ret = __packet_rcv_has_room(po, skb);
David Brazdil0f672f62019-12-10 10:32:29 +00001274 pressure = ret != ROOM_NORMAL;
1275
1276 if (READ_ONCE(po->pressure) != pressure)
1277 WRITE_ONCE(po->pressure, pressure);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001278
1279 return ret;
1280}
1281
David Brazdil0f672f62019-12-10 10:32:29 +00001282static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1283{
1284 if (READ_ONCE(po->pressure) &&
1285 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1286 WRITE_ONCE(po->pressure, 0);
1287}
1288
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289static void packet_sock_destruct(struct sock *sk)
1290{
1291 skb_queue_purge(&sk->sk_error_queue);
1292
1293 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1294 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1295
1296 if (!sock_flag(sk, SOCK_DEAD)) {
1297 pr_err("Attempt to release alive packet socket: %p\n", sk);
1298 return;
1299 }
1300
1301 sk_refcnt_debug_dec(sk);
1302}
1303
1304static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1305{
Olivier Deprez0e641232021-09-23 10:07:05 +02001306 u32 *history = po->rollover->history;
1307 u32 victim, rxhash;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 int i, count = 0;
1309
1310 rxhash = skb_get_hash(skb);
1311 for (i = 0; i < ROLLOVER_HLEN; i++)
Olivier Deprez0e641232021-09-23 10:07:05 +02001312 if (READ_ONCE(history[i]) == rxhash)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001313 count++;
1314
Olivier Deprez0e641232021-09-23 10:07:05 +02001315 victim = prandom_u32() % ROLLOVER_HLEN;
1316
1317 /* Avoid dirtying the cache line if possible */
1318 if (READ_ONCE(history[victim]) != rxhash)
1319 WRITE_ONCE(history[victim], rxhash);
1320
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001321 return count > (ROLLOVER_HLEN >> 1);
1322}
1323
1324static unsigned int fanout_demux_hash(struct packet_fanout *f,
1325 struct sk_buff *skb,
1326 unsigned int num)
1327{
1328 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1329}
1330
1331static unsigned int fanout_demux_lb(struct packet_fanout *f,
1332 struct sk_buff *skb,
1333 unsigned int num)
1334{
1335 unsigned int val = atomic_inc_return(&f->rr_cur);
1336
1337 return val % num;
1338}
1339
1340static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1341 struct sk_buff *skb,
1342 unsigned int num)
1343{
1344 return smp_processor_id() % num;
1345}
1346
1347static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1348 struct sk_buff *skb,
1349 unsigned int num)
1350{
1351 return prandom_u32_max(num);
1352}
1353
1354static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1355 struct sk_buff *skb,
1356 unsigned int idx, bool try_self,
1357 unsigned int num)
1358{
1359 struct packet_sock *po, *po_next, *po_skip = NULL;
1360 unsigned int i, j, room = ROOM_NONE;
1361
Olivier Deprez157378f2022-04-04 15:47:50 +02001362 po = pkt_sk(rcu_dereference(f->arr[idx]));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001363
1364 if (try_self) {
1365 room = packet_rcv_has_room(po, skb);
1366 if (room == ROOM_NORMAL ||
1367 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1368 return idx;
1369 po_skip = po;
1370 }
1371
1372 i = j = min_t(int, po->rollover->sock, num - 1);
1373 do {
Olivier Deprez157378f2022-04-04 15:47:50 +02001374 po_next = pkt_sk(rcu_dereference(f->arr[i]));
David Brazdil0f672f62019-12-10 10:32:29 +00001375 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001376 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1377 if (i != j)
1378 po->rollover->sock = i;
1379 atomic_long_inc(&po->rollover->num);
1380 if (room == ROOM_LOW)
1381 atomic_long_inc(&po->rollover->num_huge);
1382 return i;
1383 }
1384
1385 if (++i == num)
1386 i = 0;
1387 } while (i != j);
1388
1389 atomic_long_inc(&po->rollover->num_failed);
1390 return idx;
1391}
1392
1393static unsigned int fanout_demux_qm(struct packet_fanout *f,
1394 struct sk_buff *skb,
1395 unsigned int num)
1396{
1397 return skb_get_queue_mapping(skb) % num;
1398}
1399
1400static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1401 struct sk_buff *skb,
1402 unsigned int num)
1403{
1404 struct bpf_prog *prog;
1405 unsigned int ret = 0;
1406
1407 rcu_read_lock();
1408 prog = rcu_dereference(f->bpf_prog);
1409 if (prog)
1410 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1411 rcu_read_unlock();
1412
1413 return ret;
1414}
1415
1416static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1417{
1418 return f->flags & (flag >> 8);
1419}
1420
1421static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1422 struct packet_type *pt, struct net_device *orig_dev)
1423{
1424 struct packet_fanout *f = pt->af_packet_priv;
1425 unsigned int num = READ_ONCE(f->num_members);
1426 struct net *net = read_pnet(&f->net);
1427 struct packet_sock *po;
1428 unsigned int idx;
1429
1430 if (!net_eq(dev_net(dev), net) || !num) {
1431 kfree_skb(skb);
1432 return 0;
1433 }
1434
1435 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1436 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1437 if (!skb)
1438 return 0;
1439 }
1440 switch (f->type) {
1441 case PACKET_FANOUT_HASH:
1442 default:
1443 idx = fanout_demux_hash(f, skb, num);
1444 break;
1445 case PACKET_FANOUT_LB:
1446 idx = fanout_demux_lb(f, skb, num);
1447 break;
1448 case PACKET_FANOUT_CPU:
1449 idx = fanout_demux_cpu(f, skb, num);
1450 break;
1451 case PACKET_FANOUT_RND:
1452 idx = fanout_demux_rnd(f, skb, num);
1453 break;
1454 case PACKET_FANOUT_QM:
1455 idx = fanout_demux_qm(f, skb, num);
1456 break;
1457 case PACKET_FANOUT_ROLLOVER:
1458 idx = fanout_demux_rollover(f, skb, 0, false, num);
1459 break;
1460 case PACKET_FANOUT_CBPF:
1461 case PACKET_FANOUT_EBPF:
1462 idx = fanout_demux_bpf(f, skb, num);
1463 break;
1464 }
1465
1466 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1467 idx = fanout_demux_rollover(f, skb, idx, true, num);
1468
Olivier Deprez157378f2022-04-04 15:47:50 +02001469 po = pkt_sk(rcu_dereference(f->arr[idx]));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001470 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1471}
1472
1473DEFINE_MUTEX(fanout_mutex);
1474EXPORT_SYMBOL_GPL(fanout_mutex);
1475static LIST_HEAD(fanout_list);
1476static u16 fanout_next_id;
1477
1478static void __fanout_link(struct sock *sk, struct packet_sock *po)
1479{
1480 struct packet_fanout *f = po->fanout;
1481
1482 spin_lock(&f->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02001483 rcu_assign_pointer(f->arr[f->num_members], sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001484 smp_wmb();
1485 f->num_members++;
1486 if (f->num_members == 1)
1487 dev_add_pack(&f->prot_hook);
1488 spin_unlock(&f->lock);
1489}
1490
1491static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1492{
1493 struct packet_fanout *f = po->fanout;
1494 int i;
1495
1496 spin_lock(&f->lock);
1497 for (i = 0; i < f->num_members; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001498 if (rcu_dereference_protected(f->arr[i],
1499 lockdep_is_held(&f->lock)) == sk)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001500 break;
1501 }
1502 BUG_ON(i >= f->num_members);
Olivier Deprez157378f2022-04-04 15:47:50 +02001503 rcu_assign_pointer(f->arr[i],
1504 rcu_dereference_protected(f->arr[f->num_members - 1],
1505 lockdep_is_held(&f->lock)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001506 f->num_members--;
1507 if (f->num_members == 0)
1508 __dev_remove_pack(&f->prot_hook);
1509 spin_unlock(&f->lock);
1510}
1511
1512static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1513{
1514 if (sk->sk_family != PF_PACKET)
1515 return false;
1516
1517 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1518}
1519
1520static void fanout_init_data(struct packet_fanout *f)
1521{
1522 switch (f->type) {
1523 case PACKET_FANOUT_LB:
1524 atomic_set(&f->rr_cur, 0);
1525 break;
1526 case PACKET_FANOUT_CBPF:
1527 case PACKET_FANOUT_EBPF:
1528 RCU_INIT_POINTER(f->bpf_prog, NULL);
1529 break;
1530 }
1531}
1532
1533static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1534{
1535 struct bpf_prog *old;
1536
1537 spin_lock(&f->lock);
1538 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1539 rcu_assign_pointer(f->bpf_prog, new);
1540 spin_unlock(&f->lock);
1541
1542 if (old) {
1543 synchronize_net();
1544 bpf_prog_destroy(old);
1545 }
1546}
1547
Olivier Deprez157378f2022-04-04 15:47:50 +02001548static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001549 unsigned int len)
1550{
1551 struct bpf_prog *new;
1552 struct sock_fprog fprog;
1553 int ret;
1554
1555 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1556 return -EPERM;
Olivier Deprez157378f2022-04-04 15:47:50 +02001557
1558 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1559 if (ret)
1560 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001561
1562 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1563 if (ret)
1564 return ret;
1565
1566 __fanout_set_data_bpf(po->fanout, new);
1567 return 0;
1568}
1569
Olivier Deprez157378f2022-04-04 15:47:50 +02001570static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001571 unsigned int len)
1572{
1573 struct bpf_prog *new;
1574 u32 fd;
1575
1576 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1577 return -EPERM;
1578 if (len != sizeof(fd))
1579 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001580 if (copy_from_sockptr(&fd, data, len))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001581 return -EFAULT;
1582
1583 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1584 if (IS_ERR(new))
1585 return PTR_ERR(new);
1586
1587 __fanout_set_data_bpf(po->fanout, new);
1588 return 0;
1589}
1590
Olivier Deprez157378f2022-04-04 15:47:50 +02001591static int fanout_set_data(struct packet_sock *po, sockptr_t data,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001592 unsigned int len)
1593{
1594 switch (po->fanout->type) {
1595 case PACKET_FANOUT_CBPF:
1596 return fanout_set_data_cbpf(po, data, len);
1597 case PACKET_FANOUT_EBPF:
1598 return fanout_set_data_ebpf(po, data, len);
1599 default:
1600 return -EINVAL;
1601 }
1602}
1603
1604static void fanout_release_data(struct packet_fanout *f)
1605{
1606 switch (f->type) {
1607 case PACKET_FANOUT_CBPF:
1608 case PACKET_FANOUT_EBPF:
1609 __fanout_set_data_bpf(f, NULL);
1610 }
1611}
1612
1613static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1614{
1615 struct packet_fanout *f;
1616
1617 list_for_each_entry(f, &fanout_list, list) {
1618 if (f->id == candidate_id &&
1619 read_pnet(&f->net) == sock_net(sk)) {
1620 return false;
1621 }
1622 }
1623 return true;
1624}
1625
1626static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1627{
1628 u16 id = fanout_next_id;
1629
1630 do {
1631 if (__fanout_id_is_free(sk, id)) {
1632 *new_id = id;
1633 fanout_next_id = id + 1;
1634 return true;
1635 }
1636
1637 id++;
1638 } while (id != fanout_next_id);
1639
1640 return false;
1641}
1642
Olivier Deprez157378f2022-04-04 15:47:50 +02001643static int fanout_add(struct sock *sk, struct fanout_args *args)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001644{
1645 struct packet_rollover *rollover = NULL;
1646 struct packet_sock *po = pkt_sk(sk);
Olivier Deprez157378f2022-04-04 15:47:50 +02001647 u16 type_flags = args->type_flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001648 struct packet_fanout *f, *match;
1649 u8 type = type_flags & 0xff;
1650 u8 flags = type_flags >> 8;
Olivier Deprez157378f2022-04-04 15:47:50 +02001651 u16 id = args->id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001652 int err;
1653
1654 switch (type) {
1655 case PACKET_FANOUT_ROLLOVER:
1656 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1657 return -EINVAL;
1658 case PACKET_FANOUT_HASH:
1659 case PACKET_FANOUT_LB:
1660 case PACKET_FANOUT_CPU:
1661 case PACKET_FANOUT_RND:
1662 case PACKET_FANOUT_QM:
1663 case PACKET_FANOUT_CBPF:
1664 case PACKET_FANOUT_EBPF:
1665 break;
1666 default:
1667 return -EINVAL;
1668 }
1669
1670 mutex_lock(&fanout_mutex);
1671
1672 err = -EALREADY;
1673 if (po->fanout)
1674 goto out;
1675
1676 if (type == PACKET_FANOUT_ROLLOVER ||
1677 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1678 err = -ENOMEM;
1679 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1680 if (!rollover)
1681 goto out;
1682 atomic_long_set(&rollover->num, 0);
1683 atomic_long_set(&rollover->num_huge, 0);
1684 atomic_long_set(&rollover->num_failed, 0);
1685 }
1686
1687 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1688 if (id != 0) {
1689 err = -EINVAL;
1690 goto out;
1691 }
1692 if (!fanout_find_new_id(sk, &id)) {
1693 err = -ENOMEM;
1694 goto out;
1695 }
1696 /* ephemeral flag for the first socket in the group: drop it */
1697 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1698 }
1699
1700 match = NULL;
1701 list_for_each_entry(f, &fanout_list, list) {
1702 if (f->id == id &&
1703 read_pnet(&f->net) == sock_net(sk)) {
1704 match = f;
1705 break;
1706 }
1707 }
1708 err = -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001709 if (match) {
1710 if (match->flags != flags)
1711 goto out;
1712 if (args->max_num_members &&
1713 args->max_num_members != match->max_num_members)
1714 goto out;
1715 } else {
1716 if (args->max_num_members > PACKET_FANOUT_MAX)
1717 goto out;
1718 if (!args->max_num_members)
1719 /* legacy PACKET_FANOUT_MAX */
1720 args->max_num_members = 256;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001721 err = -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +02001722 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1723 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001724 if (!match)
1725 goto out;
1726 write_pnet(&match->net, sock_net(sk));
1727 match->id = id;
1728 match->type = type;
1729 match->flags = flags;
1730 INIT_LIST_HEAD(&match->list);
1731 spin_lock_init(&match->lock);
1732 refcount_set(&match->sk_ref, 0);
1733 fanout_init_data(match);
1734 match->prot_hook.type = po->prot_hook.type;
1735 match->prot_hook.dev = po->prot_hook.dev;
1736 match->prot_hook.func = packet_rcv_fanout;
1737 match->prot_hook.af_packet_priv = match;
Olivier Deprez157378f2022-04-04 15:47:50 +02001738 match->prot_hook.af_packet_net = read_pnet(&match->net);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001739 match->prot_hook.id_match = match_fanout_group;
Olivier Deprez157378f2022-04-04 15:47:50 +02001740 match->max_num_members = args->max_num_members;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001741 list_add(&match->list, &fanout_list);
1742 }
1743 err = -EINVAL;
1744
1745 spin_lock(&po->bind_lock);
1746 if (po->running &&
1747 match->type == type &&
1748 match->prot_hook.type == po->prot_hook.type &&
1749 match->prot_hook.dev == po->prot_hook.dev) {
1750 err = -ENOSPC;
Olivier Deprez157378f2022-04-04 15:47:50 +02001751 if (refcount_read(&match->sk_ref) < match->max_num_members) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001752 __dev_remove_pack(&po->prot_hook);
Olivier Deprez157378f2022-04-04 15:47:50 +02001753
1754 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1755 WRITE_ONCE(po->fanout, match);
1756
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001757 po->rollover = rollover;
1758 rollover = NULL;
1759 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1760 __fanout_link(sk, po);
1761 err = 0;
1762 }
1763 }
1764 spin_unlock(&po->bind_lock);
1765
1766 if (err && !refcount_read(&match->sk_ref)) {
1767 list_del(&match->list);
Olivier Deprez157378f2022-04-04 15:47:50 +02001768 kvfree(match);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001769 }
1770
1771out:
1772 kfree(rollover);
1773 mutex_unlock(&fanout_mutex);
1774 return err;
1775}
1776
1777/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1778 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1779 * It is the responsibility of the caller to call fanout_release_data() and
1780 * free the returned packet_fanout (after synchronize_net())
1781 */
1782static struct packet_fanout *fanout_release(struct sock *sk)
1783{
1784 struct packet_sock *po = pkt_sk(sk);
1785 struct packet_fanout *f;
1786
1787 mutex_lock(&fanout_mutex);
1788 f = po->fanout;
1789 if (f) {
1790 po->fanout = NULL;
1791
1792 if (refcount_dec_and_test(&f->sk_ref))
1793 list_del(&f->list);
1794 else
1795 f = NULL;
1796 }
1797 mutex_unlock(&fanout_mutex);
1798
1799 return f;
1800}
1801
1802static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1803 struct sk_buff *skb)
1804{
1805 /* Earlier code assumed this would be a VLAN pkt, double-check
1806 * this now that we have the actual packet in hand. We can only
1807 * do this check on Ethernet devices.
1808 */
1809 if (unlikely(dev->type != ARPHRD_ETHER))
1810 return false;
1811
1812 skb_reset_mac_header(skb);
1813 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1814}
1815
1816static const struct proto_ops packet_ops;
1817
1818static const struct proto_ops packet_ops_spkt;
1819
1820static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1821 struct packet_type *pt, struct net_device *orig_dev)
1822{
1823 struct sock *sk;
1824 struct sockaddr_pkt *spkt;
1825
1826 /*
1827 * When we registered the protocol we saved the socket in the data
1828 * field for just this event.
1829 */
1830
1831 sk = pt->af_packet_priv;
1832
1833 /*
1834 * Yank back the headers [hope the device set this
1835 * right or kerboom...]
1836 *
1837 * Incoming packets have ll header pulled,
1838 * push it back.
1839 *
1840 * For outgoing ones skb->data == skb_mac_header(skb)
1841 * so that this procedure is noop.
1842 */
1843
1844 if (skb->pkt_type == PACKET_LOOPBACK)
1845 goto out;
1846
1847 if (!net_eq(dev_net(dev), sock_net(sk)))
1848 goto out;
1849
1850 skb = skb_share_check(skb, GFP_ATOMIC);
1851 if (skb == NULL)
1852 goto oom;
1853
1854 /* drop any routing info */
1855 skb_dst_drop(skb);
1856
1857 /* drop conntrack reference */
David Brazdil0f672f62019-12-10 10:32:29 +00001858 nf_reset_ct(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001859
1860 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1861
1862 skb_push(skb, skb->data - skb_mac_header(skb));
1863
1864 /*
1865 * The SOCK_PACKET socket receives _all_ frames.
1866 */
1867
1868 spkt->spkt_family = dev->type;
1869 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1870 spkt->spkt_protocol = skb->protocol;
1871
1872 /*
1873 * Charge the memory to the socket. This is done specifically
1874 * to prevent sockets using all the memory up.
1875 */
1876
1877 if (sock_queue_rcv_skb(sk, skb) == 0)
1878 return 0;
1879
1880out:
1881 kfree_skb(skb);
1882oom:
1883 return 0;
1884}
1885
David Brazdil0f672f62019-12-10 10:32:29 +00001886static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1887{
1888 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1889 sock->type == SOCK_RAW) {
1890 skb_reset_mac_header(skb);
1891 skb->protocol = dev_parse_header_protocol(skb);
1892 }
1893
1894 skb_probe_transport_header(skb);
1895}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001896
1897/*
1898 * Output a raw packet to a device layer. This bypasses all the other
1899 * protocol layers and you must therefore supply it with a complete frame
1900 */
1901
1902static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1903 size_t len)
1904{
1905 struct sock *sk = sock->sk;
1906 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1907 struct sk_buff *skb = NULL;
1908 struct net_device *dev;
1909 struct sockcm_cookie sockc;
1910 __be16 proto = 0;
1911 int err;
1912 int extra_len = 0;
1913
1914 /*
1915 * Get and verify the address.
1916 */
1917
1918 if (saddr) {
1919 if (msg->msg_namelen < sizeof(struct sockaddr))
1920 return -EINVAL;
1921 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1922 proto = saddr->spkt_protocol;
1923 } else
1924 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1925
1926 /*
1927 * Find the device first to size check it
1928 */
1929
1930 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1931retry:
1932 rcu_read_lock();
1933 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1934 err = -ENODEV;
1935 if (dev == NULL)
1936 goto out_unlock;
1937
1938 err = -ENETDOWN;
1939 if (!(dev->flags & IFF_UP))
1940 goto out_unlock;
1941
1942 /*
1943 * You may not queue a frame bigger than the mtu. This is the lowest level
1944 * raw protocol and you must do your own fragmentation at this level.
1945 */
1946
1947 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1948 if (!netif_supports_nofcs(dev)) {
1949 err = -EPROTONOSUPPORT;
1950 goto out_unlock;
1951 }
1952 extra_len = 4; /* We're doing our own CRC */
1953 }
1954
1955 err = -EMSGSIZE;
1956 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1957 goto out_unlock;
1958
1959 if (!skb) {
1960 size_t reserved = LL_RESERVED_SPACE(dev);
1961 int tlen = dev->needed_tailroom;
1962 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1963
1964 rcu_read_unlock();
1965 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1966 if (skb == NULL)
1967 return -ENOBUFS;
1968 /* FIXME: Save some space for broken drivers that write a hard
1969 * header at transmission time by themselves. PPP is the notable
1970 * one here. This should really be fixed at the driver level.
1971 */
1972 skb_reserve(skb, reserved);
1973 skb_reset_network_header(skb);
1974
1975 /* Try to align data part correctly */
1976 if (hhlen) {
1977 skb->data -= hhlen;
1978 skb->tail -= hhlen;
1979 if (len < hhlen)
1980 skb_reset_network_header(skb);
1981 }
1982 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1983 if (err)
1984 goto out_free;
1985 goto retry;
1986 }
1987
1988 if (!dev_validate_header(dev, skb->data, len)) {
1989 err = -EINVAL;
1990 goto out_unlock;
1991 }
1992 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1993 !packet_extra_vlan_len_allowed(dev, skb)) {
1994 err = -EMSGSIZE;
1995 goto out_unlock;
1996 }
1997
1998 sockcm_init(&sockc, sk);
1999 if (msg->msg_controllen) {
2000 err = sock_cmsg_send(sk, msg, &sockc);
2001 if (unlikely(err))
2002 goto out_unlock;
2003 }
2004
2005 skb->protocol = proto;
2006 skb->dev = dev;
2007 skb->priority = sk->sk_priority;
2008 skb->mark = sk->sk_mark;
2009 skb->tstamp = sockc.transmit_time;
2010
David Brazdil0f672f62019-12-10 10:32:29 +00002011 skb_setup_tx_timestamp(skb, sockc.tsflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002012
2013 if (unlikely(extra_len == 4))
2014 skb->no_fcs = 1;
2015
David Brazdil0f672f62019-12-10 10:32:29 +00002016 packet_parse_headers(skb, sock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002017
2018 dev_queue_xmit(skb);
2019 rcu_read_unlock();
2020 return len;
2021
2022out_unlock:
2023 rcu_read_unlock();
2024out_free:
2025 kfree_skb(skb);
2026 return err;
2027}
2028
2029static unsigned int run_filter(struct sk_buff *skb,
2030 const struct sock *sk,
2031 unsigned int res)
2032{
2033 struct sk_filter *filter;
2034
2035 rcu_read_lock();
2036 filter = rcu_dereference(sk->sk_filter);
2037 if (filter != NULL)
2038 res = bpf_prog_run_clear_cb(filter->prog, skb);
2039 rcu_read_unlock();
2040
2041 return res;
2042}
2043
2044static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2045 size_t *len)
2046{
2047 struct virtio_net_hdr vnet_hdr;
2048
2049 if (*len < sizeof(vnet_hdr))
2050 return -EINVAL;
2051 *len -= sizeof(vnet_hdr);
2052
2053 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2054 return -EINVAL;
2055
2056 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2057}
2058
2059/*
2060 * This function makes lazy skb cloning in hope that most of packets
2061 * are discarded by BPF.
2062 *
2063 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2064 * and skb->cb are mangled. It works because (and until) packets
2065 * falling here are owned by current CPU. Output packets are cloned
2066 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2067 * sequencially, so that if we return skb to original state on exit,
2068 * we will not harm anyone.
2069 */
2070
2071static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2072 struct packet_type *pt, struct net_device *orig_dev)
2073{
2074 struct sock *sk;
2075 struct sockaddr_ll *sll;
2076 struct packet_sock *po;
2077 u8 *skb_head = skb->data;
2078 int skb_len = skb->len;
2079 unsigned int snaplen, res;
2080 bool is_drop_n_account = false;
2081
2082 if (skb->pkt_type == PACKET_LOOPBACK)
2083 goto drop;
2084
2085 sk = pt->af_packet_priv;
2086 po = pkt_sk(sk);
2087
2088 if (!net_eq(dev_net(dev), sock_net(sk)))
2089 goto drop;
2090
2091 skb->dev = dev;
2092
Olivier Deprez157378f2022-04-04 15:47:50 +02002093 if (dev_has_header(dev)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002094 /* The device has an explicit notion of ll header,
2095 * exported to higher levels.
2096 *
2097 * Otherwise, the device hides details of its frame
2098 * structure, so that corresponding packet head is
2099 * never delivered to user.
2100 */
2101 if (sk->sk_type != SOCK_DGRAM)
2102 skb_push(skb, skb->data - skb_mac_header(skb));
2103 else if (skb->pkt_type == PACKET_OUTGOING) {
2104 /* Special case: outgoing packets have ll header at head */
2105 skb_pull(skb, skb_network_offset(skb));
2106 }
2107 }
2108
2109 snaplen = skb->len;
2110
2111 res = run_filter(skb, sk, snaplen);
2112 if (!res)
2113 goto drop_n_restore;
2114 if (snaplen > res)
2115 snaplen = res;
2116
2117 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2118 goto drop_n_acct;
2119
2120 if (skb_shared(skb)) {
2121 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2122 if (nskb == NULL)
2123 goto drop_n_acct;
2124
2125 if (skb_head != skb->data) {
2126 skb->data = skb_head;
2127 skb->len = skb_len;
2128 }
2129 consume_skb(skb);
2130 skb = nskb;
2131 }
2132
2133 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2134
2135 sll = &PACKET_SKB_CB(skb)->sa.ll;
2136 sll->sll_hatype = dev->type;
2137 sll->sll_pkttype = skb->pkt_type;
2138 if (unlikely(po->origdev))
2139 sll->sll_ifindex = orig_dev->ifindex;
2140 else
2141 sll->sll_ifindex = dev->ifindex;
2142
2143 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2144
2145 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2146 * Use their space for storing the original skb length.
2147 */
2148 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2149
2150 if (pskb_trim(skb, snaplen))
2151 goto drop_n_acct;
2152
2153 skb_set_owner_r(skb, sk);
2154 skb->dev = NULL;
2155 skb_dst_drop(skb);
2156
2157 /* drop conntrack reference */
David Brazdil0f672f62019-12-10 10:32:29 +00002158 nf_reset_ct(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002159
2160 spin_lock(&sk->sk_receive_queue.lock);
2161 po->stats.stats1.tp_packets++;
2162 sock_skb_set_dropcount(sk, skb);
2163 __skb_queue_tail(&sk->sk_receive_queue, skb);
2164 spin_unlock(&sk->sk_receive_queue.lock);
2165 sk->sk_data_ready(sk);
2166 return 0;
2167
2168drop_n_acct:
2169 is_drop_n_account = true;
David Brazdil0f672f62019-12-10 10:32:29 +00002170 atomic_inc(&po->tp_drops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002171 atomic_inc(&sk->sk_drops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002172
2173drop_n_restore:
2174 if (skb_head != skb->data && skb_shared(skb)) {
2175 skb->data = skb_head;
2176 skb->len = skb_len;
2177 }
2178drop:
2179 if (!is_drop_n_account)
2180 consume_skb(skb);
2181 else
2182 kfree_skb(skb);
2183 return 0;
2184}
2185
2186static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2187 struct packet_type *pt, struct net_device *orig_dev)
2188{
2189 struct sock *sk;
2190 struct packet_sock *po;
2191 struct sockaddr_ll *sll;
2192 union tpacket_uhdr h;
2193 u8 *skb_head = skb->data;
2194 int skb_len = skb->len;
2195 unsigned int snaplen, res;
2196 unsigned long status = TP_STATUS_USER;
Olivier Deprez0e641232021-09-23 10:07:05 +02002197 unsigned short macoff, hdrlen;
2198 unsigned int netoff;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002199 struct sk_buff *copy_skb = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02002200 struct timespec64 ts;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 __u32 ts_status;
2202 bool is_drop_n_account = false;
Olivier Deprez0e641232021-09-23 10:07:05 +02002203 unsigned int slot_id = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002204 bool do_vnet = false;
2205
2206 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2207 * We may add members to them until current aligned size without forcing
2208 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2209 */
2210 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2211 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2212
2213 if (skb->pkt_type == PACKET_LOOPBACK)
2214 goto drop;
2215
2216 sk = pt->af_packet_priv;
2217 po = pkt_sk(sk);
2218
2219 if (!net_eq(dev_net(dev), sock_net(sk)))
2220 goto drop;
2221
Olivier Deprez157378f2022-04-04 15:47:50 +02002222 if (dev_has_header(dev)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002223 if (sk->sk_type != SOCK_DGRAM)
2224 skb_push(skb, skb->data - skb_mac_header(skb));
2225 else if (skb->pkt_type == PACKET_OUTGOING) {
2226 /* Special case: outgoing packets have ll header at head */
2227 skb_pull(skb, skb_network_offset(skb));
2228 }
2229 }
2230
2231 snaplen = skb->len;
2232
2233 res = run_filter(skb, sk, snaplen);
2234 if (!res)
2235 goto drop_n_restore;
2236
David Brazdil0f672f62019-12-10 10:32:29 +00002237 /* If we are flooded, just give up */
2238 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2239 atomic_inc(&po->tp_drops);
2240 goto drop_n_restore;
2241 }
2242
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002243 if (skb->ip_summed == CHECKSUM_PARTIAL)
2244 status |= TP_STATUS_CSUMNOTREADY;
2245 else if (skb->pkt_type != PACKET_OUTGOING &&
2246 (skb->ip_summed == CHECKSUM_COMPLETE ||
2247 skb_csum_unnecessary(skb)))
2248 status |= TP_STATUS_CSUM_VALID;
2249
2250 if (snaplen > res)
2251 snaplen = res;
2252
2253 if (sk->sk_type == SOCK_DGRAM) {
2254 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2255 po->tp_reserve;
2256 } else {
2257 unsigned int maclen = skb_network_offset(skb);
2258 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2259 (maclen < 16 ? 16 : maclen)) +
2260 po->tp_reserve;
2261 if (po->has_vnet_hdr) {
2262 netoff += sizeof(struct virtio_net_hdr);
2263 do_vnet = true;
2264 }
2265 macoff = netoff - maclen;
2266 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002267 if (netoff > USHRT_MAX) {
2268 atomic_inc(&po->tp_drops);
2269 goto drop_n_restore;
2270 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002271 if (po->tp_version <= TPACKET_V2) {
2272 if (macoff + snaplen > po->rx_ring.frame_size) {
2273 if (po->copy_thresh &&
2274 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2275 if (skb_shared(skb)) {
2276 copy_skb = skb_clone(skb, GFP_ATOMIC);
2277 } else {
2278 copy_skb = skb_get(skb);
2279 skb_head = skb->data;
2280 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002281 if (copy_skb) {
2282 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2283 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002284 skb_set_owner_r(copy_skb, sk);
Olivier Deprez157378f2022-04-04 15:47:50 +02002285 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002286 }
2287 snaplen = po->rx_ring.frame_size - macoff;
2288 if ((int)snaplen < 0) {
2289 snaplen = 0;
2290 do_vnet = false;
2291 }
2292 }
2293 } else if (unlikely(macoff + snaplen >
2294 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2295 u32 nval;
2296
2297 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2298 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2299 snaplen, nval, macoff);
2300 snaplen = nval;
2301 if (unlikely((int)snaplen < 0)) {
2302 snaplen = 0;
2303 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2304 do_vnet = false;
2305 }
2306 }
2307 spin_lock(&sk->sk_receive_queue.lock);
2308 h.raw = packet_current_rx_frame(po, skb,
2309 TP_STATUS_KERNEL, (macoff+snaplen));
2310 if (!h.raw)
2311 goto drop_n_account;
Olivier Deprez0e641232021-09-23 10:07:05 +02002312
2313 if (po->tp_version <= TPACKET_V2) {
2314 slot_id = po->rx_ring.head;
2315 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2316 goto drop_n_account;
2317 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2318 }
2319
2320 if (do_vnet &&
2321 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2322 sizeof(struct virtio_net_hdr),
2323 vio_le(), true, 0)) {
2324 if (po->tp_version == TPACKET_V3)
2325 prb_clear_blk_fill_status(&po->rx_ring);
2326 goto drop_n_account;
2327 }
2328
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002329 if (po->tp_version <= TPACKET_V2) {
2330 packet_increment_rx_head(po, &po->rx_ring);
2331 /*
2332 * LOSING will be reported till you read the stats,
2333 * because it's COR - Clear On Read.
2334 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2335 * at packet level.
2336 */
David Brazdil0f672f62019-12-10 10:32:29 +00002337 if (atomic_read(&po->tp_drops))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002338 status |= TP_STATUS_LOSING;
2339 }
2340
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002341 po->stats.stats1.tp_packets++;
2342 if (copy_skb) {
2343 status |= TP_STATUS_COPY;
2344 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2345 }
2346 spin_unlock(&sk->sk_receive_queue.lock);
2347
2348 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2349
Olivier Deprez157378f2022-04-04 15:47:50 +02002350 /* Always timestamp; prefer an existing software timestamp taken
2351 * closer to the time of capture.
2352 */
2353 ts_status = tpacket_get_timestamp(skb, &ts,
2354 po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2355 if (!ts_status)
2356 ktime_get_real_ts64(&ts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002357
2358 status |= ts_status;
2359
2360 switch (po->tp_version) {
2361 case TPACKET_V1:
2362 h.h1->tp_len = skb->len;
2363 h.h1->tp_snaplen = snaplen;
2364 h.h1->tp_mac = macoff;
2365 h.h1->tp_net = netoff;
2366 h.h1->tp_sec = ts.tv_sec;
2367 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2368 hdrlen = sizeof(*h.h1);
2369 break;
2370 case TPACKET_V2:
2371 h.h2->tp_len = skb->len;
2372 h.h2->tp_snaplen = snaplen;
2373 h.h2->tp_mac = macoff;
2374 h.h2->tp_net = netoff;
2375 h.h2->tp_sec = ts.tv_sec;
2376 h.h2->tp_nsec = ts.tv_nsec;
2377 if (skb_vlan_tag_present(skb)) {
2378 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2379 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2380 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2381 } else {
2382 h.h2->tp_vlan_tci = 0;
2383 h.h2->tp_vlan_tpid = 0;
2384 }
2385 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2386 hdrlen = sizeof(*h.h2);
2387 break;
2388 case TPACKET_V3:
2389 /* tp_nxt_offset,vlan are already populated above.
2390 * So DONT clear those fields here
2391 */
2392 h.h3->tp_status |= status;
2393 h.h3->tp_len = skb->len;
2394 h.h3->tp_snaplen = snaplen;
2395 h.h3->tp_mac = macoff;
2396 h.h3->tp_net = netoff;
2397 h.h3->tp_sec = ts.tv_sec;
2398 h.h3->tp_nsec = ts.tv_nsec;
2399 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2400 hdrlen = sizeof(*h.h3);
2401 break;
2402 default:
2403 BUG();
2404 }
2405
2406 sll = h.raw + TPACKET_ALIGN(hdrlen);
2407 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2408 sll->sll_family = AF_PACKET;
2409 sll->sll_hatype = dev->type;
2410 sll->sll_protocol = skb->protocol;
2411 sll->sll_pkttype = skb->pkt_type;
2412 if (unlikely(po->origdev))
2413 sll->sll_ifindex = orig_dev->ifindex;
2414 else
2415 sll->sll_ifindex = dev->ifindex;
2416
2417 smp_mb();
2418
2419#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2420 if (po->tp_version <= TPACKET_V2) {
2421 u8 *start, *end;
2422
2423 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2424 macoff + snaplen);
2425
2426 for (start = h.raw; start < end; start += PAGE_SIZE)
2427 flush_dcache_page(pgv_to_page(start));
2428 }
2429 smp_wmb();
2430#endif
2431
2432 if (po->tp_version <= TPACKET_V2) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002433 spin_lock(&sk->sk_receive_queue.lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002434 __packet_set_status(po, h.raw, status);
Olivier Deprez0e641232021-09-23 10:07:05 +02002435 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2436 spin_unlock(&sk->sk_receive_queue.lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002437 sk->sk_data_ready(sk);
Olivier Deprez0e641232021-09-23 10:07:05 +02002438 } else if (po->tp_version == TPACKET_V3) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002439 prb_clear_blk_fill_status(&po->rx_ring);
2440 }
2441
2442drop_n_restore:
2443 if (skb_head != skb->data && skb_shared(skb)) {
2444 skb->data = skb_head;
2445 skb->len = skb_len;
2446 }
2447drop:
2448 if (!is_drop_n_account)
2449 consume_skb(skb);
2450 else
2451 kfree_skb(skb);
2452 return 0;
2453
2454drop_n_account:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002455 spin_unlock(&sk->sk_receive_queue.lock);
David Brazdil0f672f62019-12-10 10:32:29 +00002456 atomic_inc(&po->tp_drops);
2457 is_drop_n_account = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002458
2459 sk->sk_data_ready(sk);
2460 kfree_skb(copy_skb);
2461 goto drop_n_restore;
2462}
2463
2464static void tpacket_destruct_skb(struct sk_buff *skb)
2465{
2466 struct packet_sock *po = pkt_sk(skb->sk);
2467
2468 if (likely(po->tx_ring.pg_vec)) {
2469 void *ph;
2470 __u32 ts;
2471
2472 ph = skb_zcopy_get_nouarg(skb);
2473 packet_dec_pending(&po->tx_ring);
2474
2475 ts = __packet_set_timestamp(po, ph, skb);
2476 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
David Brazdil0f672f62019-12-10 10:32:29 +00002477
2478 if (!packet_read_pending(&po->tx_ring))
2479 complete(&po->skb_completion);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002480 }
2481
2482 sock_wfree(skb);
2483}
2484
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002485static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2486{
2487 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2488 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2489 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2490 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2491 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2492 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2493 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2494
2495 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2496 return -EINVAL;
2497
2498 return 0;
2499}
2500
2501static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2502 struct virtio_net_hdr *vnet_hdr)
2503{
2504 if (*len < sizeof(*vnet_hdr))
2505 return -EINVAL;
2506 *len -= sizeof(*vnet_hdr);
2507
2508 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2509 return -EFAULT;
2510
2511 return __packet_snd_vnet_parse(vnet_hdr, *len);
2512}
2513
2514static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2515 void *frame, struct net_device *dev, void *data, int tp_len,
2516 __be16 proto, unsigned char *addr, int hlen, int copylen,
2517 const struct sockcm_cookie *sockc)
2518{
2519 union tpacket_uhdr ph;
2520 int to_write, offset, len, nr_frags, len_max;
2521 struct socket *sock = po->sk.sk_socket;
2522 struct page *page;
2523 int err;
2524
2525 ph.raw = frame;
2526
2527 skb->protocol = proto;
2528 skb->dev = dev;
2529 skb->priority = po->sk.sk_priority;
2530 skb->mark = po->sk.sk_mark;
2531 skb->tstamp = sockc->transmit_time;
David Brazdil0f672f62019-12-10 10:32:29 +00002532 skb_setup_tx_timestamp(skb, sockc->tsflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002533 skb_zcopy_set_nouarg(skb, ph.raw);
2534
2535 skb_reserve(skb, hlen);
2536 skb_reset_network_header(skb);
2537
2538 to_write = tp_len;
2539
2540 if (sock->type == SOCK_DGRAM) {
2541 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2542 NULL, tp_len);
2543 if (unlikely(err < 0))
2544 return -EINVAL;
2545 } else if (copylen) {
2546 int hdrlen = min_t(int, copylen, tp_len);
2547
2548 skb_push(skb, dev->hard_header_len);
2549 skb_put(skb, copylen - dev->hard_header_len);
2550 err = skb_store_bits(skb, 0, data, hdrlen);
2551 if (unlikely(err))
2552 return err;
2553 if (!dev_validate_header(dev, skb->data, hdrlen))
2554 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002555
2556 data += hdrlen;
2557 to_write -= hdrlen;
2558 }
2559
2560 offset = offset_in_page(data);
2561 len_max = PAGE_SIZE - offset;
2562 len = ((to_write > len_max) ? len_max : to_write);
2563
2564 skb->data_len = to_write;
2565 skb->len += to_write;
2566 skb->truesize += to_write;
2567 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2568
2569 while (likely(to_write)) {
2570 nr_frags = skb_shinfo(skb)->nr_frags;
2571
2572 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2573 pr_err("Packet exceed the number of skb frags(%lu)\n",
2574 MAX_SKB_FRAGS);
2575 return -EFAULT;
2576 }
2577
2578 page = pgv_to_page(data);
2579 data += len;
2580 flush_dcache_page(page);
2581 get_page(page);
2582 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2583 to_write -= len;
2584 offset = 0;
2585 len_max = PAGE_SIZE;
2586 len = ((to_write > len_max) ? len_max : to_write);
2587 }
2588
David Brazdil0f672f62019-12-10 10:32:29 +00002589 packet_parse_headers(skb, sock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002590
2591 return tp_len;
2592}
2593
2594static int tpacket_parse_header(struct packet_sock *po, void *frame,
2595 int size_max, void **data)
2596{
2597 union tpacket_uhdr ph;
2598 int tp_len, off;
2599
2600 ph.raw = frame;
2601
2602 switch (po->tp_version) {
2603 case TPACKET_V3:
2604 if (ph.h3->tp_next_offset != 0) {
2605 pr_warn_once("variable sized slot not supported");
2606 return -EINVAL;
2607 }
2608 tp_len = ph.h3->tp_len;
2609 break;
2610 case TPACKET_V2:
2611 tp_len = ph.h2->tp_len;
2612 break;
2613 default:
2614 tp_len = ph.h1->tp_len;
2615 break;
2616 }
2617 if (unlikely(tp_len > size_max)) {
2618 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2619 return -EMSGSIZE;
2620 }
2621
2622 if (unlikely(po->tp_tx_has_off)) {
2623 int off_min, off_max;
2624
2625 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2626 off_max = po->tx_ring.frame_size - tp_len;
2627 if (po->sk.sk_type == SOCK_DGRAM) {
2628 switch (po->tp_version) {
2629 case TPACKET_V3:
2630 off = ph.h3->tp_net;
2631 break;
2632 case TPACKET_V2:
2633 off = ph.h2->tp_net;
2634 break;
2635 default:
2636 off = ph.h1->tp_net;
2637 break;
2638 }
2639 } else {
2640 switch (po->tp_version) {
2641 case TPACKET_V3:
2642 off = ph.h3->tp_mac;
2643 break;
2644 case TPACKET_V2:
2645 off = ph.h2->tp_mac;
2646 break;
2647 default:
2648 off = ph.h1->tp_mac;
2649 break;
2650 }
2651 }
2652 if (unlikely((off < off_min) || (off_max < off)))
2653 return -EINVAL;
2654 } else {
2655 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2656 }
2657
2658 *data = frame + off;
2659 return tp_len;
2660}
2661
2662static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2663{
David Brazdil0f672f62019-12-10 10:32:29 +00002664 struct sk_buff *skb = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002665 struct net_device *dev;
2666 struct virtio_net_hdr *vnet_hdr = NULL;
2667 struct sockcm_cookie sockc;
2668 __be16 proto;
2669 int err, reserve = 0;
2670 void *ph;
2671 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2672 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
David Brazdil0f672f62019-12-10 10:32:29 +00002673 unsigned char *addr = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002674 int tp_len, size_max;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002675 void *data;
2676 int len_sum = 0;
2677 int status = TP_STATUS_AVAILABLE;
2678 int hlen, tlen, copylen = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002679 long timeo = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002680
2681 mutex_lock(&po->pg_vec_lock);
2682
David Brazdil0f672f62019-12-10 10:32:29 +00002683 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2684 * we need to confirm it under protection of pg_vec_lock.
2685 */
2686 if (unlikely(!po->tx_ring.pg_vec)) {
2687 err = -EBUSY;
2688 goto out;
2689 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002690 if (likely(saddr == NULL)) {
2691 dev = packet_cached_dev_get(po);
Olivier Deprez0e641232021-09-23 10:07:05 +02002692 proto = READ_ONCE(po->num);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002693 } else {
2694 err = -EINVAL;
2695 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2696 goto out;
2697 if (msg->msg_namelen < (saddr->sll_halen
2698 + offsetof(struct sockaddr_ll,
2699 sll_addr)))
2700 goto out;
2701 proto = saddr->sll_protocol;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002702 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
David Brazdil0f672f62019-12-10 10:32:29 +00002703 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2704 if (dev && msg->msg_namelen < dev->addr_len +
2705 offsetof(struct sockaddr_ll, sll_addr))
2706 goto out_put;
2707 addr = saddr->sll_addr;
2708 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002709 }
2710
2711 err = -ENXIO;
2712 if (unlikely(dev == NULL))
2713 goto out;
2714 err = -ENETDOWN;
2715 if (unlikely(!(dev->flags & IFF_UP)))
2716 goto out_put;
2717
2718 sockcm_init(&sockc, &po->sk);
2719 if (msg->msg_controllen) {
2720 err = sock_cmsg_send(&po->sk, msg, &sockc);
2721 if (unlikely(err))
2722 goto out_put;
2723 }
2724
2725 if (po->sk.sk_socket->type == SOCK_RAW)
2726 reserve = dev->hard_header_len;
2727 size_max = po->tx_ring.frame_size
2728 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2729
2730 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2731 size_max = dev->mtu + reserve + VLAN_HLEN;
2732
David Brazdil0f672f62019-12-10 10:32:29 +00002733 reinit_completion(&po->skb_completion);
2734
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002735 do {
2736 ph = packet_current_frame(po, &po->tx_ring,
2737 TP_STATUS_SEND_REQUEST);
2738 if (unlikely(ph == NULL)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002739 if (need_wait && skb) {
2740 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2741 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2742 if (timeo <= 0) {
2743 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2744 goto out_put;
2745 }
2746 }
2747 /* check for additional frames */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002748 continue;
2749 }
2750
2751 skb = NULL;
2752 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2753 if (tp_len < 0)
2754 goto tpacket_error;
2755
2756 status = TP_STATUS_SEND_REQUEST;
2757 hlen = LL_RESERVED_SPACE(dev);
2758 tlen = dev->needed_tailroom;
2759 if (po->has_vnet_hdr) {
2760 vnet_hdr = data;
2761 data += sizeof(*vnet_hdr);
2762 tp_len -= sizeof(*vnet_hdr);
2763 if (tp_len < 0 ||
2764 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2765 tp_len = -EINVAL;
2766 goto tpacket_error;
2767 }
2768 copylen = __virtio16_to_cpu(vio_le(),
2769 vnet_hdr->hdr_len);
2770 }
2771 copylen = max_t(int, copylen, dev->hard_header_len);
2772 skb = sock_alloc_send_skb(&po->sk,
2773 hlen + tlen + sizeof(struct sockaddr_ll) +
2774 (copylen - dev->hard_header_len),
2775 !need_wait, &err);
2776
2777 if (unlikely(skb == NULL)) {
2778 /* we assume the socket was initially writeable ... */
2779 if (likely(len_sum > 0))
2780 err = len_sum;
2781 goto out_status;
2782 }
2783 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2784 addr, hlen, copylen, &sockc);
2785 if (likely(tp_len >= 0) &&
2786 tp_len > dev->mtu + reserve &&
2787 !po->has_vnet_hdr &&
2788 !packet_extra_vlan_len_allowed(dev, skb))
2789 tp_len = -EMSGSIZE;
2790
2791 if (unlikely(tp_len < 0)) {
2792tpacket_error:
2793 if (po->tp_loss) {
2794 __packet_set_status(po, ph,
2795 TP_STATUS_AVAILABLE);
2796 packet_increment_head(&po->tx_ring);
2797 kfree_skb(skb);
2798 continue;
2799 } else {
2800 status = TP_STATUS_WRONG_FORMAT;
2801 err = tp_len;
2802 goto out_status;
2803 }
2804 }
2805
2806 if (po->has_vnet_hdr) {
2807 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2808 tp_len = -EINVAL;
2809 goto tpacket_error;
2810 }
2811 virtio_net_hdr_set_proto(skb, vnet_hdr);
2812 }
2813
2814 skb->destructor = tpacket_destruct_skb;
2815 __packet_set_status(po, ph, TP_STATUS_SENDING);
2816 packet_inc_pending(&po->tx_ring);
2817
2818 status = TP_STATUS_SEND_REQUEST;
2819 err = po->xmit(skb);
Olivier Deprez92d4c212022-12-06 15:05:30 +01002820 if (unlikely(err != 0)) {
2821 if (err > 0)
2822 err = net_xmit_errno(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002823 if (err && __packet_get_status(po, ph) ==
2824 TP_STATUS_AVAILABLE) {
2825 /* skb was destructed already */
2826 skb = NULL;
2827 goto out_status;
2828 }
2829 /*
2830 * skb was dropped but not destructed yet;
2831 * let's treat it like congestion or err < 0
2832 */
2833 err = 0;
2834 }
2835 packet_increment_head(&po->tx_ring);
2836 len_sum += tp_len;
2837 } while (likely((ph != NULL) ||
2838 /* Note: packet_read_pending() might be slow if we have
2839 * to call it as it's per_cpu variable, but in fast-path
2840 * we already short-circuit the loop with the first
2841 * condition, and luckily don't have to go that path
2842 * anyway.
2843 */
2844 (need_wait && packet_read_pending(&po->tx_ring))));
2845
2846 err = len_sum;
2847 goto out_put;
2848
2849out_status:
2850 __packet_set_status(po, ph, status);
2851 kfree_skb(skb);
2852out_put:
2853 dev_put(dev);
2854out:
2855 mutex_unlock(&po->pg_vec_lock);
2856 return err;
2857}
2858
2859static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2860 size_t reserve, size_t len,
2861 size_t linear, int noblock,
2862 int *err)
2863{
2864 struct sk_buff *skb;
2865
2866 /* Under a page? Don't bother with paged skb. */
2867 if (prepad + len < PAGE_SIZE || !linear)
2868 linear = len;
2869
2870 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2871 err, 0);
2872 if (!skb)
2873 return NULL;
2874
2875 skb_reserve(skb, reserve);
2876 skb_put(skb, linear);
2877 skb->data_len = len - linear;
2878 skb->len += len - linear;
2879
2880 return skb;
2881}
2882
2883static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2884{
2885 struct sock *sk = sock->sk;
2886 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2887 struct sk_buff *skb;
2888 struct net_device *dev;
2889 __be16 proto;
David Brazdil0f672f62019-12-10 10:32:29 +00002890 unsigned char *addr = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002891 int err, reserve = 0;
2892 struct sockcm_cookie sockc;
2893 struct virtio_net_hdr vnet_hdr = { 0 };
2894 int offset = 0;
2895 struct packet_sock *po = pkt_sk(sk);
2896 bool has_vnet_hdr = false;
2897 int hlen, tlen, linear;
2898 int extra_len = 0;
2899
2900 /*
2901 * Get and verify the address.
2902 */
2903
2904 if (likely(saddr == NULL)) {
2905 dev = packet_cached_dev_get(po);
Olivier Deprez0e641232021-09-23 10:07:05 +02002906 proto = READ_ONCE(po->num);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002907 } else {
2908 err = -EINVAL;
2909 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2910 goto out;
2911 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2912 goto out;
2913 proto = saddr->sll_protocol;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002914 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
David Brazdil0f672f62019-12-10 10:32:29 +00002915 if (sock->type == SOCK_DGRAM) {
2916 if (dev && msg->msg_namelen < dev->addr_len +
2917 offsetof(struct sockaddr_ll, sll_addr))
2918 goto out_unlock;
2919 addr = saddr->sll_addr;
2920 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002921 }
2922
2923 err = -ENXIO;
2924 if (unlikely(dev == NULL))
2925 goto out_unlock;
2926 err = -ENETDOWN;
2927 if (unlikely(!(dev->flags & IFF_UP)))
2928 goto out_unlock;
2929
2930 sockcm_init(&sockc, sk);
2931 sockc.mark = sk->sk_mark;
2932 if (msg->msg_controllen) {
2933 err = sock_cmsg_send(sk, msg, &sockc);
2934 if (unlikely(err))
2935 goto out_unlock;
2936 }
2937
2938 if (sock->type == SOCK_RAW)
2939 reserve = dev->hard_header_len;
2940 if (po->has_vnet_hdr) {
2941 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2942 if (err)
2943 goto out_unlock;
2944 has_vnet_hdr = true;
2945 }
2946
2947 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2948 if (!netif_supports_nofcs(dev)) {
2949 err = -EPROTONOSUPPORT;
2950 goto out_unlock;
2951 }
2952 extra_len = 4; /* We're doing our own CRC */
2953 }
2954
2955 err = -EMSGSIZE;
2956 if (!vnet_hdr.gso_type &&
2957 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2958 goto out_unlock;
2959
2960 err = -ENOBUFS;
2961 hlen = LL_RESERVED_SPACE(dev);
2962 tlen = dev->needed_tailroom;
2963 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2964 linear = max(linear, min_t(int, len, dev->hard_header_len));
2965 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2966 msg->msg_flags & MSG_DONTWAIT, &err);
2967 if (skb == NULL)
2968 goto out_unlock;
2969
2970 skb_reset_network_header(skb);
2971
2972 err = -EINVAL;
2973 if (sock->type == SOCK_DGRAM) {
2974 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2975 if (unlikely(offset < 0))
2976 goto out_free;
2977 } else if (reserve) {
2978 skb_reserve(skb, -reserve);
David Brazdil0f672f62019-12-10 10:32:29 +00002979 if (len < reserve + sizeof(struct ipv6hdr) &&
2980 dev->min_header_len != dev->hard_header_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002981 skb_reset_network_header(skb);
2982 }
2983
2984 /* Returns -EFAULT on error */
2985 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2986 if (err)
2987 goto out_free;
2988
Olivier Deprez92d4c212022-12-06 15:05:30 +01002989 if ((sock->type == SOCK_RAW &&
2990 !dev_validate_header(dev, skb->data, len)) || !skb->len) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002991 err = -EINVAL;
2992 goto out_free;
2993 }
2994
David Brazdil0f672f62019-12-10 10:32:29 +00002995 skb_setup_tx_timestamp(skb, sockc.tsflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002996
2997 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2998 !packet_extra_vlan_len_allowed(dev, skb)) {
2999 err = -EMSGSIZE;
3000 goto out_free;
3001 }
3002
3003 skb->protocol = proto;
3004 skb->dev = dev;
3005 skb->priority = sk->sk_priority;
3006 skb->mark = sockc.mark;
3007 skb->tstamp = sockc.transmit_time;
3008
3009 if (has_vnet_hdr) {
3010 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3011 if (err)
3012 goto out_free;
3013 len += sizeof(vnet_hdr);
3014 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3015 }
3016
David Brazdil0f672f62019-12-10 10:32:29 +00003017 packet_parse_headers(skb, sock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003018
3019 if (unlikely(extra_len == 4))
3020 skb->no_fcs = 1;
3021
3022 err = po->xmit(skb);
Olivier Deprez92d4c212022-12-06 15:05:30 +01003023 if (unlikely(err != 0)) {
3024 if (err > 0)
3025 err = net_xmit_errno(err);
3026 if (err)
3027 goto out_unlock;
3028 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003029
3030 dev_put(dev);
3031
3032 return len;
3033
3034out_free:
3035 kfree_skb(skb);
3036out_unlock:
3037 if (dev)
3038 dev_put(dev);
3039out:
3040 return err;
3041}
3042
3043static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3044{
3045 struct sock *sk = sock->sk;
3046 struct packet_sock *po = pkt_sk(sk);
3047
Olivier Deprez157378f2022-04-04 15:47:50 +02003048 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3049 * tpacket_snd() will redo the check safely.
3050 */
3051 if (data_race(po->tx_ring.pg_vec))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003052 return tpacket_snd(po, msg);
Olivier Deprez157378f2022-04-04 15:47:50 +02003053
3054 return packet_snd(sock, msg, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003055}
3056
3057/*
3058 * Close a PACKET socket. This is fairly simple. We immediately go
3059 * to 'closed' state and remove our protocol entry in the device list.
3060 */
3061
3062static int packet_release(struct socket *sock)
3063{
3064 struct sock *sk = sock->sk;
3065 struct packet_sock *po;
3066 struct packet_fanout *f;
3067 struct net *net;
3068 union tpacket_req_u req_u;
3069
3070 if (!sk)
3071 return 0;
3072
3073 net = sock_net(sk);
3074 po = pkt_sk(sk);
3075
3076 mutex_lock(&net->packet.sklist_lock);
3077 sk_del_node_init_rcu(sk);
3078 mutex_unlock(&net->packet.sklist_lock);
3079
3080 preempt_disable();
3081 sock_prot_inuse_add(net, sk->sk_prot, -1);
3082 preempt_enable();
3083
3084 spin_lock(&po->bind_lock);
3085 unregister_prot_hook(sk, false);
3086 packet_cached_dev_reset(po);
3087
3088 if (po->prot_hook.dev) {
3089 dev_put(po->prot_hook.dev);
3090 po->prot_hook.dev = NULL;
3091 }
3092 spin_unlock(&po->bind_lock);
3093
3094 packet_flush_mclist(sk);
3095
3096 lock_sock(sk);
3097 if (po->rx_ring.pg_vec) {
3098 memset(&req_u, 0, sizeof(req_u));
3099 packet_set_ring(sk, &req_u, 1, 0);
3100 }
3101
3102 if (po->tx_ring.pg_vec) {
3103 memset(&req_u, 0, sizeof(req_u));
3104 packet_set_ring(sk, &req_u, 1, 1);
3105 }
3106 release_sock(sk);
3107
3108 f = fanout_release(sk);
3109
3110 synchronize_net();
3111
David Brazdil0f672f62019-12-10 10:32:29 +00003112 kfree(po->rollover);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003113 if (f) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003114 fanout_release_data(f);
Olivier Deprez157378f2022-04-04 15:47:50 +02003115 kvfree(f);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003116 }
3117 /*
3118 * Now the socket is dead. No more input will appear.
3119 */
3120 sock_orphan(sk);
3121 sock->sk = NULL;
3122
3123 /* Purge queues */
3124
3125 skb_queue_purge(&sk->sk_receive_queue);
3126 packet_free_pending(po);
3127 sk_refcnt_debug_release(sk);
3128
3129 sock_put(sk);
3130 return 0;
3131}
3132
3133/*
3134 * Attach a packet hook.
3135 */
3136
3137static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3138 __be16 proto)
3139{
3140 struct packet_sock *po = pkt_sk(sk);
3141 struct net_device *dev_curr;
3142 __be16 proto_curr;
3143 bool need_rehook;
3144 struct net_device *dev = NULL;
3145 int ret = 0;
3146 bool unlisted = false;
3147
3148 lock_sock(sk);
3149 spin_lock(&po->bind_lock);
3150 rcu_read_lock();
3151
3152 if (po->fanout) {
3153 ret = -EINVAL;
3154 goto out_unlock;
3155 }
3156
3157 if (name) {
3158 dev = dev_get_by_name_rcu(sock_net(sk), name);
3159 if (!dev) {
3160 ret = -ENODEV;
3161 goto out_unlock;
3162 }
3163 } else if (ifindex) {
3164 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3165 if (!dev) {
3166 ret = -ENODEV;
3167 goto out_unlock;
3168 }
3169 }
3170
3171 if (dev)
3172 dev_hold(dev);
3173
3174 proto_curr = po->prot_hook.type;
3175 dev_curr = po->prot_hook.dev;
3176
3177 need_rehook = proto_curr != proto || dev_curr != dev;
3178
3179 if (need_rehook) {
3180 if (po->running) {
3181 rcu_read_unlock();
3182 /* prevents packet_notifier() from calling
3183 * register_prot_hook()
3184 */
Olivier Deprez0e641232021-09-23 10:07:05 +02003185 WRITE_ONCE(po->num, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003186 __unregister_prot_hook(sk, true);
3187 rcu_read_lock();
3188 dev_curr = po->prot_hook.dev;
3189 if (dev)
3190 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3191 dev->ifindex);
3192 }
3193
3194 BUG_ON(po->running);
Olivier Deprez0e641232021-09-23 10:07:05 +02003195 WRITE_ONCE(po->num, proto);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003196 po->prot_hook.type = proto;
3197
3198 if (unlikely(unlisted)) {
3199 dev_put(dev);
3200 po->prot_hook.dev = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +02003201 WRITE_ONCE(po->ifindex, -1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003202 packet_cached_dev_reset(po);
3203 } else {
3204 po->prot_hook.dev = dev;
Olivier Deprez0e641232021-09-23 10:07:05 +02003205 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003206 packet_cached_dev_assign(po, dev);
3207 }
3208 }
3209 if (dev_curr)
3210 dev_put(dev_curr);
3211
3212 if (proto == 0 || !need_rehook)
3213 goto out_unlock;
3214
3215 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3216 register_prot_hook(sk);
3217 } else {
3218 sk->sk_err = ENETDOWN;
3219 if (!sock_flag(sk, SOCK_DEAD))
3220 sk->sk_error_report(sk);
3221 }
3222
3223out_unlock:
3224 rcu_read_unlock();
3225 spin_unlock(&po->bind_lock);
3226 release_sock(sk);
3227 return ret;
3228}
3229
3230/*
3231 * Bind a packet socket to a device
3232 */
3233
3234static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3235 int addr_len)
3236{
3237 struct sock *sk = sock->sk;
3238 char name[sizeof(uaddr->sa_data) + 1];
3239
3240 /*
3241 * Check legality
3242 */
3243
3244 if (addr_len != sizeof(struct sockaddr))
3245 return -EINVAL;
3246 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3247 * zero-terminated.
3248 */
3249 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3250 name[sizeof(uaddr->sa_data)] = 0;
3251
3252 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3253}
3254
3255static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3256{
3257 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3258 struct sock *sk = sock->sk;
3259
3260 /*
3261 * Check legality
3262 */
3263
3264 if (addr_len < sizeof(struct sockaddr_ll))
3265 return -EINVAL;
3266 if (sll->sll_family != AF_PACKET)
3267 return -EINVAL;
3268
3269 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3270 sll->sll_protocol ? : pkt_sk(sk)->num);
3271}
3272
3273static struct proto packet_proto = {
3274 .name = "PACKET",
3275 .owner = THIS_MODULE,
3276 .obj_size = sizeof(struct packet_sock),
3277};
3278
3279/*
3280 * Create a packet of type SOCK_PACKET.
3281 */
3282
3283static int packet_create(struct net *net, struct socket *sock, int protocol,
3284 int kern)
3285{
3286 struct sock *sk;
3287 struct packet_sock *po;
3288 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3289 int err;
3290
3291 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3292 return -EPERM;
3293 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3294 sock->type != SOCK_PACKET)
3295 return -ESOCKTNOSUPPORT;
3296
3297 sock->state = SS_UNCONNECTED;
3298
3299 err = -ENOBUFS;
3300 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3301 if (sk == NULL)
3302 goto out;
3303
3304 sock->ops = &packet_ops;
3305 if (sock->type == SOCK_PACKET)
3306 sock->ops = &packet_ops_spkt;
3307
3308 sock_init_data(sock, sk);
3309
3310 po = pkt_sk(sk);
David Brazdil0f672f62019-12-10 10:32:29 +00003311 init_completion(&po->skb_completion);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003312 sk->sk_family = PF_PACKET;
3313 po->num = proto;
3314 po->xmit = dev_queue_xmit;
3315
3316 err = packet_alloc_pending(po);
3317 if (err)
3318 goto out2;
3319
3320 packet_cached_dev_reset(po);
3321
3322 sk->sk_destruct = packet_sock_destruct;
3323 sk_refcnt_debug_inc(sk);
3324
3325 /*
3326 * Attach a protocol block
3327 */
3328
3329 spin_lock_init(&po->bind_lock);
3330 mutex_init(&po->pg_vec_lock);
3331 po->rollover = NULL;
3332 po->prot_hook.func = packet_rcv;
3333
3334 if (sock->type == SOCK_PACKET)
3335 po->prot_hook.func = packet_rcv_spkt;
3336
3337 po->prot_hook.af_packet_priv = sk;
Olivier Deprez157378f2022-04-04 15:47:50 +02003338 po->prot_hook.af_packet_net = sock_net(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003339
3340 if (proto) {
3341 po->prot_hook.type = proto;
3342 __register_prot_hook(sk);
3343 }
3344
3345 mutex_lock(&net->packet.sklist_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00003346 sk_add_node_tail_rcu(sk, &net->packet.sklist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003347 mutex_unlock(&net->packet.sklist_lock);
3348
3349 preempt_disable();
3350 sock_prot_inuse_add(net, &packet_proto, 1);
3351 preempt_enable();
3352
3353 return 0;
3354out2:
3355 sk_free(sk);
3356out:
3357 return err;
3358}
3359
3360/*
3361 * Pull a packet from our receive queue and hand it to the user.
3362 * If necessary we block.
3363 */
3364
3365static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3366 int flags)
3367{
3368 struct sock *sk = sock->sk;
3369 struct sk_buff *skb;
3370 int copied, err;
3371 int vnet_hdr_len = 0;
3372 unsigned int origlen = 0;
3373
3374 err = -EINVAL;
3375 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3376 goto out;
3377
3378#if 0
3379 /* What error should we return now? EUNATTACH? */
3380 if (pkt_sk(sk)->ifindex < 0)
3381 return -ENODEV;
3382#endif
3383
3384 if (flags & MSG_ERRQUEUE) {
3385 err = sock_recv_errqueue(sk, msg, len,
3386 SOL_PACKET, PACKET_TX_TIMESTAMP);
3387 goto out;
3388 }
3389
3390 /*
3391 * Call the generic datagram receiver. This handles all sorts
3392 * of horrible races and re-entrancy so we can forget about it
3393 * in the protocol layers.
3394 *
3395 * Now it will return ENETDOWN, if device have just gone down,
3396 * but then it will block.
3397 */
3398
3399 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3400
3401 /*
3402 * An error occurred so return it. Because skb_recv_datagram()
3403 * handles the blocking we don't see and worry about blocking
3404 * retries.
3405 */
3406
3407 if (skb == NULL)
3408 goto out;
3409
David Brazdil0f672f62019-12-10 10:32:29 +00003410 packet_rcv_try_clear_pressure(pkt_sk(sk));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003411
3412 if (pkt_sk(sk)->has_vnet_hdr) {
3413 err = packet_rcv_vnet(msg, skb, &len);
3414 if (err)
3415 goto out_free;
3416 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3417 }
3418
3419 /* You lose any data beyond the buffer you gave. If it worries
3420 * a user program they can ask the device for its MTU
3421 * anyway.
3422 */
3423 copied = skb->len;
3424 if (copied > len) {
3425 copied = len;
3426 msg->msg_flags |= MSG_TRUNC;
3427 }
3428
3429 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3430 if (err)
3431 goto out_free;
3432
3433 if (sock->type != SOCK_PACKET) {
3434 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3435
3436 /* Original length was stored in sockaddr_ll fields */
3437 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3438 sll->sll_family = AF_PACKET;
3439 sll->sll_protocol = skb->protocol;
3440 }
3441
3442 sock_recv_ts_and_drops(msg, sk, skb);
3443
3444 if (msg->msg_name) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003445 const size_t max_len = min(sizeof(skb->cb),
3446 sizeof(struct sockaddr_storage));
David Brazdil0f672f62019-12-10 10:32:29 +00003447 int copy_len;
3448
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003449 /* If the address length field is there to be filled
3450 * in, we fill it in now.
3451 */
3452 if (sock->type == SOCK_PACKET) {
3453 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3454 msg->msg_namelen = sizeof(struct sockaddr_pkt);
David Brazdil0f672f62019-12-10 10:32:29 +00003455 copy_len = msg->msg_namelen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003456 } else {
3457 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3458
3459 msg->msg_namelen = sll->sll_halen +
3460 offsetof(struct sockaddr_ll, sll_addr);
David Brazdil0f672f62019-12-10 10:32:29 +00003461 copy_len = msg->msg_namelen;
3462 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3463 memset(msg->msg_name +
3464 offsetof(struct sockaddr_ll, sll_addr),
3465 0, sizeof(sll->sll_addr));
3466 msg->msg_namelen = sizeof(struct sockaddr_ll);
3467 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003468 }
Olivier Deprez157378f2022-04-04 15:47:50 +02003469 if (WARN_ON_ONCE(copy_len > max_len)) {
3470 copy_len = max_len;
3471 msg->msg_namelen = copy_len;
3472 }
David Brazdil0f672f62019-12-10 10:32:29 +00003473 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003474 }
3475
3476 if (pkt_sk(sk)->auxdata) {
3477 struct tpacket_auxdata aux;
3478
3479 aux.tp_status = TP_STATUS_USER;
3480 if (skb->ip_summed == CHECKSUM_PARTIAL)
3481 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3482 else if (skb->pkt_type != PACKET_OUTGOING &&
3483 (skb->ip_summed == CHECKSUM_COMPLETE ||
3484 skb_csum_unnecessary(skb)))
3485 aux.tp_status |= TP_STATUS_CSUM_VALID;
3486
3487 aux.tp_len = origlen;
3488 aux.tp_snaplen = skb->len;
3489 aux.tp_mac = 0;
3490 aux.tp_net = skb_network_offset(skb);
3491 if (skb_vlan_tag_present(skb)) {
3492 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3493 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3494 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3495 } else {
3496 aux.tp_vlan_tci = 0;
3497 aux.tp_vlan_tpid = 0;
3498 }
3499 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3500 }
3501
3502 /*
3503 * Free or return the buffer as appropriate. Again this
3504 * hides all the races and re-entrancy issues from us.
3505 */
3506 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3507
3508out_free:
3509 skb_free_datagram(sk, skb);
3510out:
3511 return err;
3512}
3513
3514static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3515 int peer)
3516{
3517 struct net_device *dev;
3518 struct sock *sk = sock->sk;
3519
3520 if (peer)
3521 return -EOPNOTSUPP;
3522
3523 uaddr->sa_family = AF_PACKET;
3524 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3525 rcu_read_lock();
Olivier Deprez0e641232021-09-23 10:07:05 +02003526 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003527 if (dev)
3528 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3529 rcu_read_unlock();
3530
3531 return sizeof(*uaddr);
3532}
3533
3534static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3535 int peer)
3536{
3537 struct net_device *dev;
3538 struct sock *sk = sock->sk;
3539 struct packet_sock *po = pkt_sk(sk);
3540 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Olivier Deprez0e641232021-09-23 10:07:05 +02003541 int ifindex;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003542
3543 if (peer)
3544 return -EOPNOTSUPP;
3545
Olivier Deprez0e641232021-09-23 10:07:05 +02003546 ifindex = READ_ONCE(po->ifindex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003547 sll->sll_family = AF_PACKET;
Olivier Deprez0e641232021-09-23 10:07:05 +02003548 sll->sll_ifindex = ifindex;
3549 sll->sll_protocol = READ_ONCE(po->num);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003550 sll->sll_pkttype = 0;
3551 rcu_read_lock();
Olivier Deprez0e641232021-09-23 10:07:05 +02003552 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003553 if (dev) {
3554 sll->sll_hatype = dev->type;
3555 sll->sll_halen = dev->addr_len;
3556 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3557 } else {
3558 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3559 sll->sll_halen = 0;
3560 }
3561 rcu_read_unlock();
3562
3563 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3564}
3565
3566static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3567 int what)
3568{
3569 switch (i->type) {
3570 case PACKET_MR_MULTICAST:
3571 if (i->alen != dev->addr_len)
3572 return -EINVAL;
3573 if (what > 0)
3574 return dev_mc_add(dev, i->addr);
3575 else
3576 return dev_mc_del(dev, i->addr);
3577 break;
3578 case PACKET_MR_PROMISC:
3579 return dev_set_promiscuity(dev, what);
3580 case PACKET_MR_ALLMULTI:
3581 return dev_set_allmulti(dev, what);
3582 case PACKET_MR_UNICAST:
3583 if (i->alen != dev->addr_len)
3584 return -EINVAL;
3585 if (what > 0)
3586 return dev_uc_add(dev, i->addr);
3587 else
3588 return dev_uc_del(dev, i->addr);
3589 break;
3590 default:
3591 break;
3592 }
3593 return 0;
3594}
3595
3596static void packet_dev_mclist_delete(struct net_device *dev,
3597 struct packet_mclist **mlp)
3598{
3599 struct packet_mclist *ml;
3600
3601 while ((ml = *mlp) != NULL) {
3602 if (ml->ifindex == dev->ifindex) {
3603 packet_dev_mc(dev, ml, -1);
3604 *mlp = ml->next;
3605 kfree(ml);
3606 } else
3607 mlp = &ml->next;
3608 }
3609}
3610
3611static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3612{
3613 struct packet_sock *po = pkt_sk(sk);
3614 struct packet_mclist *ml, *i;
3615 struct net_device *dev;
3616 int err;
3617
3618 rtnl_lock();
3619
3620 err = -ENODEV;
3621 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3622 if (!dev)
3623 goto done;
3624
3625 err = -EINVAL;
3626 if (mreq->mr_alen > dev->addr_len)
3627 goto done;
3628
3629 err = -ENOBUFS;
3630 i = kmalloc(sizeof(*i), GFP_KERNEL);
3631 if (i == NULL)
3632 goto done;
3633
3634 err = 0;
3635 for (ml = po->mclist; ml; ml = ml->next) {
3636 if (ml->ifindex == mreq->mr_ifindex &&
3637 ml->type == mreq->mr_type &&
3638 ml->alen == mreq->mr_alen &&
3639 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3640 ml->count++;
3641 /* Free the new element ... */
3642 kfree(i);
3643 goto done;
3644 }
3645 }
3646
3647 i->type = mreq->mr_type;
3648 i->ifindex = mreq->mr_ifindex;
3649 i->alen = mreq->mr_alen;
3650 memcpy(i->addr, mreq->mr_address, i->alen);
3651 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3652 i->count = 1;
3653 i->next = po->mclist;
3654 po->mclist = i;
3655 err = packet_dev_mc(dev, i, 1);
3656 if (err) {
3657 po->mclist = i->next;
3658 kfree(i);
3659 }
3660
3661done:
3662 rtnl_unlock();
3663 return err;
3664}
3665
3666static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3667{
3668 struct packet_mclist *ml, **mlp;
3669
3670 rtnl_lock();
3671
3672 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3673 if (ml->ifindex == mreq->mr_ifindex &&
3674 ml->type == mreq->mr_type &&
3675 ml->alen == mreq->mr_alen &&
3676 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3677 if (--ml->count == 0) {
3678 struct net_device *dev;
3679 *mlp = ml->next;
3680 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3681 if (dev)
3682 packet_dev_mc(dev, ml, -1);
3683 kfree(ml);
3684 }
3685 break;
3686 }
3687 }
3688 rtnl_unlock();
3689 return 0;
3690}
3691
3692static void packet_flush_mclist(struct sock *sk)
3693{
3694 struct packet_sock *po = pkt_sk(sk);
3695 struct packet_mclist *ml;
3696
3697 if (!po->mclist)
3698 return;
3699
3700 rtnl_lock();
3701 while ((ml = po->mclist) != NULL) {
3702 struct net_device *dev;
3703
3704 po->mclist = ml->next;
3705 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3706 if (dev != NULL)
3707 packet_dev_mc(dev, ml, -1);
3708 kfree(ml);
3709 }
3710 rtnl_unlock();
3711}
3712
3713static int
Olivier Deprez157378f2022-04-04 15:47:50 +02003714packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3715 unsigned int optlen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003716{
3717 struct sock *sk = sock->sk;
3718 struct packet_sock *po = pkt_sk(sk);
3719 int ret;
3720
3721 if (level != SOL_PACKET)
3722 return -ENOPROTOOPT;
3723
3724 switch (optname) {
3725 case PACKET_ADD_MEMBERSHIP:
3726 case PACKET_DROP_MEMBERSHIP:
3727 {
3728 struct packet_mreq_max mreq;
3729 int len = optlen;
3730 memset(&mreq, 0, sizeof(mreq));
3731 if (len < sizeof(struct packet_mreq))
3732 return -EINVAL;
3733 if (len > sizeof(mreq))
3734 len = sizeof(mreq);
Olivier Deprez157378f2022-04-04 15:47:50 +02003735 if (copy_from_sockptr(&mreq, optval, len))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003736 return -EFAULT;
3737 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3738 return -EINVAL;
3739 if (optname == PACKET_ADD_MEMBERSHIP)
3740 ret = packet_mc_add(sk, &mreq);
3741 else
3742 ret = packet_mc_drop(sk, &mreq);
3743 return ret;
3744 }
3745
3746 case PACKET_RX_RING:
3747 case PACKET_TX_RING:
3748 {
3749 union tpacket_req_u req_u;
3750 int len;
3751
3752 lock_sock(sk);
3753 switch (po->tp_version) {
3754 case TPACKET_V1:
3755 case TPACKET_V2:
3756 len = sizeof(req_u.req);
3757 break;
3758 case TPACKET_V3:
3759 default:
3760 len = sizeof(req_u.req3);
3761 break;
3762 }
3763 if (optlen < len) {
3764 ret = -EINVAL;
3765 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +02003766 if (copy_from_sockptr(&req_u.req, optval, len))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003767 ret = -EFAULT;
3768 else
3769 ret = packet_set_ring(sk, &req_u, 0,
3770 optname == PACKET_TX_RING);
3771 }
3772 release_sock(sk);
3773 return ret;
3774 }
3775 case PACKET_COPY_THRESH:
3776 {
3777 int val;
3778
3779 if (optlen != sizeof(val))
3780 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003781 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003782 return -EFAULT;
3783
3784 pkt_sk(sk)->copy_thresh = val;
3785 return 0;
3786 }
3787 case PACKET_VERSION:
3788 {
3789 int val;
3790
3791 if (optlen != sizeof(val))
3792 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003793 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003794 return -EFAULT;
3795 switch (val) {
3796 case TPACKET_V1:
3797 case TPACKET_V2:
3798 case TPACKET_V3:
3799 break;
3800 default:
3801 return -EINVAL;
3802 }
3803 lock_sock(sk);
3804 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3805 ret = -EBUSY;
3806 } else {
3807 po->tp_version = val;
3808 ret = 0;
3809 }
3810 release_sock(sk);
3811 return ret;
3812 }
3813 case PACKET_RESERVE:
3814 {
3815 unsigned int val;
3816
3817 if (optlen != sizeof(val))
3818 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003819 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003820 return -EFAULT;
3821 if (val > INT_MAX)
3822 return -EINVAL;
3823 lock_sock(sk);
3824 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3825 ret = -EBUSY;
3826 } else {
3827 po->tp_reserve = val;
3828 ret = 0;
3829 }
3830 release_sock(sk);
3831 return ret;
3832 }
3833 case PACKET_LOSS:
3834 {
3835 unsigned int val;
3836
3837 if (optlen != sizeof(val))
3838 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003839 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003840 return -EFAULT;
3841
3842 lock_sock(sk);
3843 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3844 ret = -EBUSY;
3845 } else {
3846 po->tp_loss = !!val;
3847 ret = 0;
3848 }
3849 release_sock(sk);
3850 return ret;
3851 }
3852 case PACKET_AUXDATA:
3853 {
3854 int val;
3855
3856 if (optlen < sizeof(val))
3857 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003858 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003859 return -EFAULT;
3860
3861 lock_sock(sk);
3862 po->auxdata = !!val;
3863 release_sock(sk);
3864 return 0;
3865 }
3866 case PACKET_ORIGDEV:
3867 {
3868 int val;
3869
3870 if (optlen < sizeof(val))
3871 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003872 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003873 return -EFAULT;
3874
3875 lock_sock(sk);
3876 po->origdev = !!val;
3877 release_sock(sk);
3878 return 0;
3879 }
3880 case PACKET_VNET_HDR:
3881 {
3882 int val;
3883
3884 if (sock->type != SOCK_RAW)
3885 return -EINVAL;
3886 if (optlen < sizeof(val))
3887 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003888 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003889 return -EFAULT;
3890
3891 lock_sock(sk);
3892 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3893 ret = -EBUSY;
3894 } else {
3895 po->has_vnet_hdr = !!val;
3896 ret = 0;
3897 }
3898 release_sock(sk);
3899 return ret;
3900 }
3901 case PACKET_TIMESTAMP:
3902 {
3903 int val;
3904
3905 if (optlen != sizeof(val))
3906 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003907 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003908 return -EFAULT;
3909
3910 po->tp_tstamp = val;
3911 return 0;
3912 }
3913 case PACKET_FANOUT:
3914 {
Olivier Deprez157378f2022-04-04 15:47:50 +02003915 struct fanout_args args = { 0 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003916
Olivier Deprez157378f2022-04-04 15:47:50 +02003917 if (optlen != sizeof(int) && optlen != sizeof(args))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003918 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003919 if (copy_from_sockptr(&args, optval, optlen))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003920 return -EFAULT;
3921
Olivier Deprez157378f2022-04-04 15:47:50 +02003922 return fanout_add(sk, &args);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003923 }
3924 case PACKET_FANOUT_DATA:
3925 {
Olivier Deprez157378f2022-04-04 15:47:50 +02003926 /* Paired with the WRITE_ONCE() in fanout_add() */
3927 if (!READ_ONCE(po->fanout))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003928 return -EINVAL;
3929
3930 return fanout_set_data(po, optval, optlen);
3931 }
David Brazdil0f672f62019-12-10 10:32:29 +00003932 case PACKET_IGNORE_OUTGOING:
3933 {
3934 int val;
3935
3936 if (optlen != sizeof(val))
3937 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003938 if (copy_from_sockptr(&val, optval, sizeof(val)))
David Brazdil0f672f62019-12-10 10:32:29 +00003939 return -EFAULT;
3940 if (val < 0 || val > 1)
3941 return -EINVAL;
3942
3943 po->prot_hook.ignore_outgoing = !!val;
3944 return 0;
3945 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003946 case PACKET_TX_HAS_OFF:
3947 {
3948 unsigned int val;
3949
3950 if (optlen != sizeof(val))
3951 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003952 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003953 return -EFAULT;
3954
3955 lock_sock(sk);
3956 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3957 ret = -EBUSY;
3958 } else {
3959 po->tp_tx_has_off = !!val;
3960 ret = 0;
3961 }
3962 release_sock(sk);
3963 return 0;
3964 }
3965 case PACKET_QDISC_BYPASS:
3966 {
3967 int val;
3968
3969 if (optlen != sizeof(val))
3970 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003971 if (copy_from_sockptr(&val, optval, sizeof(val)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003972 return -EFAULT;
3973
3974 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3975 return 0;
3976 }
3977 default:
3978 return -ENOPROTOOPT;
3979 }
3980}
3981
3982static int packet_getsockopt(struct socket *sock, int level, int optname,
3983 char __user *optval, int __user *optlen)
3984{
3985 int len;
3986 int val, lv = sizeof(val);
3987 struct sock *sk = sock->sk;
3988 struct packet_sock *po = pkt_sk(sk);
3989 void *data = &val;
3990 union tpacket_stats_u st;
3991 struct tpacket_rollover_stats rstats;
David Brazdil0f672f62019-12-10 10:32:29 +00003992 int drops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003993
3994 if (level != SOL_PACKET)
3995 return -ENOPROTOOPT;
3996
3997 if (get_user(len, optlen))
3998 return -EFAULT;
3999
4000 if (len < 0)
4001 return -EINVAL;
4002
4003 switch (optname) {
4004 case PACKET_STATISTICS:
4005 spin_lock_bh(&sk->sk_receive_queue.lock);
4006 memcpy(&st, &po->stats, sizeof(st));
4007 memset(&po->stats, 0, sizeof(po->stats));
4008 spin_unlock_bh(&sk->sk_receive_queue.lock);
David Brazdil0f672f62019-12-10 10:32:29 +00004009 drops = atomic_xchg(&po->tp_drops, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004010
4011 if (po->tp_version == TPACKET_V3) {
4012 lv = sizeof(struct tpacket_stats_v3);
David Brazdil0f672f62019-12-10 10:32:29 +00004013 st.stats3.tp_drops = drops;
4014 st.stats3.tp_packets += drops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004015 data = &st.stats3;
4016 } else {
4017 lv = sizeof(struct tpacket_stats);
David Brazdil0f672f62019-12-10 10:32:29 +00004018 st.stats1.tp_drops = drops;
4019 st.stats1.tp_packets += drops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004020 data = &st.stats1;
4021 }
4022
4023 break;
4024 case PACKET_AUXDATA:
4025 val = po->auxdata;
4026 break;
4027 case PACKET_ORIGDEV:
4028 val = po->origdev;
4029 break;
4030 case PACKET_VNET_HDR:
4031 val = po->has_vnet_hdr;
4032 break;
4033 case PACKET_VERSION:
4034 val = po->tp_version;
4035 break;
4036 case PACKET_HDRLEN:
4037 if (len > sizeof(int))
4038 len = sizeof(int);
4039 if (len < sizeof(int))
4040 return -EINVAL;
4041 if (copy_from_user(&val, optval, len))
4042 return -EFAULT;
4043 switch (val) {
4044 case TPACKET_V1:
4045 val = sizeof(struct tpacket_hdr);
4046 break;
4047 case TPACKET_V2:
4048 val = sizeof(struct tpacket2_hdr);
4049 break;
4050 case TPACKET_V3:
4051 val = sizeof(struct tpacket3_hdr);
4052 break;
4053 default:
4054 return -EINVAL;
4055 }
4056 break;
4057 case PACKET_RESERVE:
4058 val = po->tp_reserve;
4059 break;
4060 case PACKET_LOSS:
4061 val = po->tp_loss;
4062 break;
4063 case PACKET_TIMESTAMP:
4064 val = po->tp_tstamp;
4065 break;
4066 case PACKET_FANOUT:
4067 val = (po->fanout ?
4068 ((u32)po->fanout->id |
4069 ((u32)po->fanout->type << 16) |
4070 ((u32)po->fanout->flags << 24)) :
4071 0);
4072 break;
David Brazdil0f672f62019-12-10 10:32:29 +00004073 case PACKET_IGNORE_OUTGOING:
4074 val = po->prot_hook.ignore_outgoing;
4075 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004076 case PACKET_ROLLOVER_STATS:
4077 if (!po->rollover)
4078 return -EINVAL;
4079 rstats.tp_all = atomic_long_read(&po->rollover->num);
4080 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4081 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4082 data = &rstats;
4083 lv = sizeof(rstats);
4084 break;
4085 case PACKET_TX_HAS_OFF:
4086 val = po->tp_tx_has_off;
4087 break;
4088 case PACKET_QDISC_BYPASS:
4089 val = packet_use_direct_xmit(po);
4090 break;
4091 default:
4092 return -ENOPROTOOPT;
4093 }
4094
4095 if (len > lv)
4096 len = lv;
4097 if (put_user(len, optlen))
4098 return -EFAULT;
4099 if (copy_to_user(optval, data, len))
4100 return -EFAULT;
4101 return 0;
4102}
4103
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004104static int packet_notifier(struct notifier_block *this,
4105 unsigned long msg, void *ptr)
4106{
4107 struct sock *sk;
4108 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4109 struct net *net = dev_net(dev);
4110
4111 rcu_read_lock();
4112 sk_for_each_rcu(sk, &net->packet.sklist) {
4113 struct packet_sock *po = pkt_sk(sk);
4114
4115 switch (msg) {
4116 case NETDEV_UNREGISTER:
4117 if (po->mclist)
4118 packet_dev_mclist_delete(dev, &po->mclist);
Olivier Deprez157378f2022-04-04 15:47:50 +02004119 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004120
4121 case NETDEV_DOWN:
4122 if (dev->ifindex == po->ifindex) {
4123 spin_lock(&po->bind_lock);
4124 if (po->running) {
4125 __unregister_prot_hook(sk, false);
4126 sk->sk_err = ENETDOWN;
4127 if (!sock_flag(sk, SOCK_DEAD))
4128 sk->sk_error_report(sk);
4129 }
4130 if (msg == NETDEV_UNREGISTER) {
4131 packet_cached_dev_reset(po);
Olivier Deprez0e641232021-09-23 10:07:05 +02004132 WRITE_ONCE(po->ifindex, -1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004133 if (po->prot_hook.dev)
4134 dev_put(po->prot_hook.dev);
4135 po->prot_hook.dev = NULL;
4136 }
4137 spin_unlock(&po->bind_lock);
4138 }
4139 break;
4140 case NETDEV_UP:
4141 if (dev->ifindex == po->ifindex) {
4142 spin_lock(&po->bind_lock);
4143 if (po->num)
4144 register_prot_hook(sk);
4145 spin_unlock(&po->bind_lock);
4146 }
4147 break;
4148 }
4149 }
4150 rcu_read_unlock();
4151 return NOTIFY_DONE;
4152}
4153
4154
4155static int packet_ioctl(struct socket *sock, unsigned int cmd,
4156 unsigned long arg)
4157{
4158 struct sock *sk = sock->sk;
4159
4160 switch (cmd) {
4161 case SIOCOUTQ:
4162 {
4163 int amount = sk_wmem_alloc_get(sk);
4164
4165 return put_user(amount, (int __user *)arg);
4166 }
4167 case SIOCINQ:
4168 {
4169 struct sk_buff *skb;
4170 int amount = 0;
4171
4172 spin_lock_bh(&sk->sk_receive_queue.lock);
4173 skb = skb_peek(&sk->sk_receive_queue);
4174 if (skb)
4175 amount = skb->len;
4176 spin_unlock_bh(&sk->sk_receive_queue.lock);
4177 return put_user(amount, (int __user *)arg);
4178 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004179#ifdef CONFIG_INET
4180 case SIOCADDRT:
4181 case SIOCDELRT:
4182 case SIOCDARP:
4183 case SIOCGARP:
4184 case SIOCSARP:
4185 case SIOCGIFADDR:
4186 case SIOCSIFADDR:
4187 case SIOCGIFBRDADDR:
4188 case SIOCSIFBRDADDR:
4189 case SIOCGIFNETMASK:
4190 case SIOCSIFNETMASK:
4191 case SIOCGIFDSTADDR:
4192 case SIOCSIFDSTADDR:
4193 case SIOCSIFFLAGS:
4194 return inet_dgram_ops.ioctl(sock, cmd, arg);
4195#endif
4196
4197 default:
4198 return -ENOIOCTLCMD;
4199 }
4200 return 0;
4201}
4202
4203static __poll_t packet_poll(struct file *file, struct socket *sock,
4204 poll_table *wait)
4205{
4206 struct sock *sk = sock->sk;
4207 struct packet_sock *po = pkt_sk(sk);
4208 __poll_t mask = datagram_poll(file, sock, wait);
4209
4210 spin_lock_bh(&sk->sk_receive_queue.lock);
4211 if (po->rx_ring.pg_vec) {
4212 if (!packet_previous_rx_frame(po, &po->rx_ring,
4213 TP_STATUS_KERNEL))
4214 mask |= EPOLLIN | EPOLLRDNORM;
4215 }
David Brazdil0f672f62019-12-10 10:32:29 +00004216 packet_rcv_try_clear_pressure(po);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004217 spin_unlock_bh(&sk->sk_receive_queue.lock);
4218 spin_lock_bh(&sk->sk_write_queue.lock);
4219 if (po->tx_ring.pg_vec) {
4220 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4221 mask |= EPOLLOUT | EPOLLWRNORM;
4222 }
4223 spin_unlock_bh(&sk->sk_write_queue.lock);
4224 return mask;
4225}
4226
4227
4228/* Dirty? Well, I still did not learn better way to account
4229 * for user mmaps.
4230 */
4231
4232static void packet_mm_open(struct vm_area_struct *vma)
4233{
4234 struct file *file = vma->vm_file;
4235 struct socket *sock = file->private_data;
4236 struct sock *sk = sock->sk;
4237
4238 if (sk)
4239 atomic_inc(&pkt_sk(sk)->mapped);
4240}
4241
4242static void packet_mm_close(struct vm_area_struct *vma)
4243{
4244 struct file *file = vma->vm_file;
4245 struct socket *sock = file->private_data;
4246 struct sock *sk = sock->sk;
4247
4248 if (sk)
4249 atomic_dec(&pkt_sk(sk)->mapped);
4250}
4251
4252static const struct vm_operations_struct packet_mmap_ops = {
4253 .open = packet_mm_open,
4254 .close = packet_mm_close,
4255};
4256
4257static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4258 unsigned int len)
4259{
4260 int i;
4261
4262 for (i = 0; i < len; i++) {
4263 if (likely(pg_vec[i].buffer)) {
4264 if (is_vmalloc_addr(pg_vec[i].buffer))
4265 vfree(pg_vec[i].buffer);
4266 else
4267 free_pages((unsigned long)pg_vec[i].buffer,
4268 order);
4269 pg_vec[i].buffer = NULL;
4270 }
4271 }
4272 kfree(pg_vec);
4273}
4274
4275static char *alloc_one_pg_vec_page(unsigned long order)
4276{
4277 char *buffer;
4278 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4279 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4280
4281 buffer = (char *) __get_free_pages(gfp_flags, order);
4282 if (buffer)
4283 return buffer;
4284
4285 /* __get_free_pages failed, fall back to vmalloc */
4286 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4287 if (buffer)
4288 return buffer;
4289
4290 /* vmalloc failed, lets dig into swap here */
4291 gfp_flags &= ~__GFP_NORETRY;
4292 buffer = (char *) __get_free_pages(gfp_flags, order);
4293 if (buffer)
4294 return buffer;
4295
4296 /* complete and utter failure */
4297 return NULL;
4298}
4299
4300static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4301{
4302 unsigned int block_nr = req->tp_block_nr;
4303 struct pgv *pg_vec;
4304 int i;
4305
David Brazdil0f672f62019-12-10 10:32:29 +00004306 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004307 if (unlikely(!pg_vec))
4308 goto out;
4309
4310 for (i = 0; i < block_nr; i++) {
4311 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4312 if (unlikely(!pg_vec[i].buffer))
4313 goto out_free_pgvec;
4314 }
4315
4316out:
4317 return pg_vec;
4318
4319out_free_pgvec:
4320 free_pg_vec(pg_vec, order, block_nr);
4321 pg_vec = NULL;
4322 goto out;
4323}
4324
4325static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4326 int closing, int tx_ring)
4327{
4328 struct pgv *pg_vec = NULL;
4329 struct packet_sock *po = pkt_sk(sk);
Olivier Deprez0e641232021-09-23 10:07:05 +02004330 unsigned long *rx_owner_map = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004331 int was_running, order = 0;
4332 struct packet_ring_buffer *rb;
4333 struct sk_buff_head *rb_queue;
4334 __be16 num;
Olivier Deprez157378f2022-04-04 15:47:50 +02004335 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004336 /* Added to avoid minimal code churn */
4337 struct tpacket_req *req = &req_u->req;
4338
4339 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4340 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4341
4342 err = -EBUSY;
4343 if (!closing) {
4344 if (atomic_read(&po->mapped))
4345 goto out;
4346 if (packet_read_pending(rb))
4347 goto out;
4348 }
4349
4350 if (req->tp_block_nr) {
4351 unsigned int min_frame_size;
4352
4353 /* Sanity tests and some calculations */
4354 err = -EBUSY;
4355 if (unlikely(rb->pg_vec))
4356 goto out;
4357
4358 switch (po->tp_version) {
4359 case TPACKET_V1:
4360 po->tp_hdrlen = TPACKET_HDRLEN;
4361 break;
4362 case TPACKET_V2:
4363 po->tp_hdrlen = TPACKET2_HDRLEN;
4364 break;
4365 case TPACKET_V3:
4366 po->tp_hdrlen = TPACKET3_HDRLEN;
4367 break;
4368 }
4369
4370 err = -EINVAL;
4371 if (unlikely((int)req->tp_block_size <= 0))
4372 goto out;
4373 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4374 goto out;
4375 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4376 if (po->tp_version >= TPACKET_V3 &&
4377 req->tp_block_size <
4378 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4379 goto out;
4380 if (unlikely(req->tp_frame_size < min_frame_size))
4381 goto out;
4382 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4383 goto out;
4384
4385 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4386 if (unlikely(rb->frames_per_block == 0))
4387 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00004388 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004389 goto out;
4390 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4391 req->tp_frame_nr))
4392 goto out;
4393
4394 err = -ENOMEM;
4395 order = get_order(req->tp_block_size);
4396 pg_vec = alloc_pg_vec(req, order);
4397 if (unlikely(!pg_vec))
4398 goto out;
4399 switch (po->tp_version) {
4400 case TPACKET_V3:
4401 /* Block transmit is not supported yet */
4402 if (!tx_ring) {
4403 init_prb_bdqc(po, rb, pg_vec, req_u);
4404 } else {
4405 struct tpacket_req3 *req3 = &req_u->req3;
4406
4407 if (req3->tp_retire_blk_tov ||
4408 req3->tp_sizeof_priv ||
4409 req3->tp_feature_req_word) {
4410 err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00004411 goto out_free_pg_vec;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004412 }
4413 }
4414 break;
4415 default:
Olivier Deprez0e641232021-09-23 10:07:05 +02004416 if (!tx_ring) {
4417 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4418 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4419 if (!rx_owner_map)
4420 goto out_free_pg_vec;
4421 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004422 break;
4423 }
4424 }
4425 /* Done */
4426 else {
4427 err = -EINVAL;
4428 if (unlikely(req->tp_frame_nr))
4429 goto out;
4430 }
4431
4432
4433 /* Detach socket from network */
4434 spin_lock(&po->bind_lock);
4435 was_running = po->running;
4436 num = po->num;
4437 if (was_running) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004438 WRITE_ONCE(po->num, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004439 __unregister_prot_hook(sk, false);
4440 }
4441 spin_unlock(&po->bind_lock);
4442
4443 synchronize_net();
4444
4445 err = -EBUSY;
4446 mutex_lock(&po->pg_vec_lock);
4447 if (closing || atomic_read(&po->mapped) == 0) {
4448 err = 0;
4449 spin_lock_bh(&rb_queue->lock);
4450 swap(rb->pg_vec, pg_vec);
Olivier Deprez0e641232021-09-23 10:07:05 +02004451 if (po->tp_version <= TPACKET_V2)
4452 swap(rb->rx_owner_map, rx_owner_map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004453 rb->frame_max = (req->tp_frame_nr - 1);
4454 rb->head = 0;
4455 rb->frame_size = req->tp_frame_size;
4456 spin_unlock_bh(&rb_queue->lock);
4457
4458 swap(rb->pg_vec_order, order);
4459 swap(rb->pg_vec_len, req->tp_block_nr);
4460
4461 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4462 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4463 tpacket_rcv : packet_rcv;
4464 skb_queue_purge(rb_queue);
4465 if (atomic_read(&po->mapped))
4466 pr_err("packet_mmap: vma is busy: %d\n",
4467 atomic_read(&po->mapped));
4468 }
4469 mutex_unlock(&po->pg_vec_lock);
4470
4471 spin_lock(&po->bind_lock);
4472 if (was_running) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004473 WRITE_ONCE(po->num, num);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004474 register_prot_hook(sk);
4475 }
4476 spin_unlock(&po->bind_lock);
4477 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4478 /* Because we don't support block-based V3 on tx-ring */
4479 if (!tx_ring)
4480 prb_shutdown_retire_blk_timer(po, rb_queue);
4481 }
4482
David Brazdil0f672f62019-12-10 10:32:29 +00004483out_free_pg_vec:
Olivier Deprez157378f2022-04-04 15:47:50 +02004484 if (pg_vec) {
4485 bitmap_free(rx_owner_map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004486 free_pg_vec(pg_vec, order, req->tp_block_nr);
Olivier Deprez157378f2022-04-04 15:47:50 +02004487 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004488out:
4489 return err;
4490}
4491
4492static int packet_mmap(struct file *file, struct socket *sock,
4493 struct vm_area_struct *vma)
4494{
4495 struct sock *sk = sock->sk;
4496 struct packet_sock *po = pkt_sk(sk);
4497 unsigned long size, expected_size;
4498 struct packet_ring_buffer *rb;
4499 unsigned long start;
4500 int err = -EINVAL;
4501 int i;
4502
4503 if (vma->vm_pgoff)
4504 return -EINVAL;
4505
4506 mutex_lock(&po->pg_vec_lock);
4507
4508 expected_size = 0;
4509 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4510 if (rb->pg_vec) {
4511 expected_size += rb->pg_vec_len
4512 * rb->pg_vec_pages
4513 * PAGE_SIZE;
4514 }
4515 }
4516
4517 if (expected_size == 0)
4518 goto out;
4519
4520 size = vma->vm_end - vma->vm_start;
4521 if (size != expected_size)
4522 goto out;
4523
4524 start = vma->vm_start;
4525 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4526 if (rb->pg_vec == NULL)
4527 continue;
4528
4529 for (i = 0; i < rb->pg_vec_len; i++) {
4530 struct page *page;
4531 void *kaddr = rb->pg_vec[i].buffer;
4532 int pg_num;
4533
4534 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4535 page = pgv_to_page(kaddr);
4536 err = vm_insert_page(vma, start, page);
4537 if (unlikely(err))
4538 goto out;
4539 start += PAGE_SIZE;
4540 kaddr += PAGE_SIZE;
4541 }
4542 }
4543 }
4544
4545 atomic_inc(&po->mapped);
4546 vma->vm_ops = &packet_mmap_ops;
4547 err = 0;
4548
4549out:
4550 mutex_unlock(&po->pg_vec_lock);
4551 return err;
4552}
4553
4554static const struct proto_ops packet_ops_spkt = {
4555 .family = PF_PACKET,
4556 .owner = THIS_MODULE,
4557 .release = packet_release,
4558 .bind = packet_bind_spkt,
4559 .connect = sock_no_connect,
4560 .socketpair = sock_no_socketpair,
4561 .accept = sock_no_accept,
4562 .getname = packet_getname_spkt,
4563 .poll = datagram_poll,
4564 .ioctl = packet_ioctl,
David Brazdil0f672f62019-12-10 10:32:29 +00004565 .gettstamp = sock_gettstamp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004566 .listen = sock_no_listen,
4567 .shutdown = sock_no_shutdown,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004568 .sendmsg = packet_sendmsg_spkt,
4569 .recvmsg = packet_recvmsg,
4570 .mmap = sock_no_mmap,
4571 .sendpage = sock_no_sendpage,
4572};
4573
4574static const struct proto_ops packet_ops = {
4575 .family = PF_PACKET,
4576 .owner = THIS_MODULE,
4577 .release = packet_release,
4578 .bind = packet_bind,
4579 .connect = sock_no_connect,
4580 .socketpair = sock_no_socketpair,
4581 .accept = sock_no_accept,
4582 .getname = packet_getname,
4583 .poll = packet_poll,
4584 .ioctl = packet_ioctl,
David Brazdil0f672f62019-12-10 10:32:29 +00004585 .gettstamp = sock_gettstamp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004586 .listen = sock_no_listen,
4587 .shutdown = sock_no_shutdown,
4588 .setsockopt = packet_setsockopt,
4589 .getsockopt = packet_getsockopt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004590 .sendmsg = packet_sendmsg,
4591 .recvmsg = packet_recvmsg,
4592 .mmap = packet_mmap,
4593 .sendpage = sock_no_sendpage,
4594};
4595
4596static const struct net_proto_family packet_family_ops = {
4597 .family = PF_PACKET,
4598 .create = packet_create,
4599 .owner = THIS_MODULE,
4600};
4601
4602static struct notifier_block packet_netdev_notifier = {
4603 .notifier_call = packet_notifier,
4604};
4605
4606#ifdef CONFIG_PROC_FS
4607
4608static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4609 __acquires(RCU)
4610{
4611 struct net *net = seq_file_net(seq);
4612
4613 rcu_read_lock();
4614 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4615}
4616
4617static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4618{
4619 struct net *net = seq_file_net(seq);
4620 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4621}
4622
4623static void packet_seq_stop(struct seq_file *seq, void *v)
4624 __releases(RCU)
4625{
4626 rcu_read_unlock();
4627}
4628
4629static int packet_seq_show(struct seq_file *seq, void *v)
4630{
4631 if (v == SEQ_START_TOKEN)
4632 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4633 else {
4634 struct sock *s = sk_entry(v);
4635 const struct packet_sock *po = pkt_sk(s);
4636
4637 seq_printf(seq,
4638 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4639 s,
4640 refcount_read(&s->sk_refcnt),
4641 s->sk_type,
Olivier Deprez0e641232021-09-23 10:07:05 +02004642 ntohs(READ_ONCE(po->num)),
4643 READ_ONCE(po->ifindex),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004644 po->running,
4645 atomic_read(&s->sk_rmem_alloc),
4646 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4647 sock_i_ino(s));
4648 }
4649
4650 return 0;
4651}
4652
4653static const struct seq_operations packet_seq_ops = {
4654 .start = packet_seq_start,
4655 .next = packet_seq_next,
4656 .stop = packet_seq_stop,
4657 .show = packet_seq_show,
4658};
4659#endif
4660
4661static int __net_init packet_net_init(struct net *net)
4662{
4663 mutex_init(&net->packet.sklist_lock);
4664 INIT_HLIST_HEAD(&net->packet.sklist);
4665
Olivier Deprez157378f2022-04-04 15:47:50 +02004666#ifdef CONFIG_PROC_FS
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004667 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4668 sizeof(struct seq_net_private)))
4669 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +02004670#endif /* CONFIG_PROC_FS */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004671
4672 return 0;
4673}
4674
4675static void __net_exit packet_net_exit(struct net *net)
4676{
4677 remove_proc_entry("packet", net->proc_net);
4678 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4679}
4680
4681static struct pernet_operations packet_net_ops = {
4682 .init = packet_net_init,
4683 .exit = packet_net_exit,
4684};
4685
4686
4687static void __exit packet_exit(void)
4688{
4689 unregister_netdevice_notifier(&packet_netdev_notifier);
4690 unregister_pernet_subsys(&packet_net_ops);
4691 sock_unregister(PF_PACKET);
4692 proto_unregister(&packet_proto);
4693}
4694
4695static int __init packet_init(void)
4696{
David Brazdil0f672f62019-12-10 10:32:29 +00004697 int rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004698
David Brazdil0f672f62019-12-10 10:32:29 +00004699 rc = proto_register(&packet_proto, 0);
4700 if (rc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004701 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00004702 rc = sock_register(&packet_family_ops);
4703 if (rc)
4704 goto out_proto;
4705 rc = register_pernet_subsys(&packet_net_ops);
4706 if (rc)
4707 goto out_sock;
4708 rc = register_netdevice_notifier(&packet_netdev_notifier);
4709 if (rc)
4710 goto out_pernet;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004711
David Brazdil0f672f62019-12-10 10:32:29 +00004712 return 0;
4713
4714out_pernet:
4715 unregister_pernet_subsys(&packet_net_ops);
4716out_sock:
4717 sock_unregister(PF_PACKET);
4718out_proto:
4719 proto_unregister(&packet_proto);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004720out:
4721 return rc;
4722}
4723
4724module_init(packet_init);
4725module_exit(packet_exit);
4726MODULE_LICENSE("GPL");
4727MODULE_ALIAS_NETPROTO(PF_PACKET);