blob: 0ec529d77a56e0bdf18d9c2b6e5c5457d1ac8bf4 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
25 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readibility.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
46#include <linux/uaccess.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
53#include <linux/highmem.h>
54#include <linux/slab.h>
55
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
70#include <net/xfrm.h>
71#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
75#include <net/checksum.h>
76#include <net/inetpeer.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020077#include <net/inet_ecn.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078#include <net/lwtunnel.h>
79#include <linux/bpf-cgroup.h>
80#include <linux/igmp.h>
81#include <linux/netfilter_ipv4.h>
82#include <linux/netfilter_bridge.h>
83#include <linux/netlink.h>
84#include <linux/tcp.h>
85
86static int
87ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
88 unsigned int mtu,
89 int (*output)(struct net *, struct sock *, struct sk_buff *));
90
91/* Generate a checksum for an outgoing IP datagram. */
92void ip_send_check(struct iphdr *iph)
93{
94 iph->check = 0;
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96}
97EXPORT_SYMBOL(ip_send_check);
98
99int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
100{
101 struct iphdr *iph = ip_hdr(skb);
102
103 iph->tot_len = htons(skb->len);
104 ip_send_check(iph);
105
106 /* if egress device is enslaved to an L3 master device pass the
107 * skb to its handler for processing
108 */
109 skb = l3mdev_ip_out(sk, skb);
110 if (unlikely(!skb))
111 return 0;
112
113 skb->protocol = htons(ETH_P_IP);
114
115 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
116 net, sk, skb, NULL, skb_dst(skb)->dev,
117 dst_output);
118}
119
120int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
121{
122 int err;
123
124 err = __ip_local_out(net, sk, skb);
125 if (likely(err == 1))
126 err = dst_output(net, sk, skb);
127
128 return err;
129}
130EXPORT_SYMBOL_GPL(ip_local_out);
131
132static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
133{
134 int ttl = inet->uc_ttl;
135
136 if (ttl < 0)
137 ttl = ip4_dst_hoplimit(dst);
138 return ttl;
139}
140
141/*
142 * Add an ip header to a skbuff and send it out.
143 *
144 */
145int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
146 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
147{
148 struct inet_sock *inet = inet_sk(sk);
149 struct rtable *rt = skb_rtable(skb);
150 struct net *net = sock_net(sk);
151 struct iphdr *iph;
152
153 /* Build the IP header. */
154 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
155 skb_reset_network_header(skb);
156 iph = ip_hdr(skb);
157 iph->version = 4;
158 iph->ihl = 5;
159 iph->tos = inet->tos;
160 iph->ttl = ip_select_ttl(inet, &rt->dst);
161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = saddr;
163 iph->protocol = sk->sk_protocol;
164 if (ip_dont_fragment(sk, &rt->dst)) {
165 iph->frag_off = htons(IP_DF);
166 iph->id = 0;
167 } else {
168 iph->frag_off = 0;
169 __ip_select_ident(net, iph, 1);
170 }
171
172 if (opt && opt->opt.optlen) {
173 iph->ihl += opt->opt.optlen>>2;
174 ip_options_build(skb, &opt->opt, daddr, rt, 0);
175 }
176
177 skb->priority = sk->sk_priority;
178 if (!skb->mark)
179 skb->mark = sk->sk_mark;
180
181 /* Send it out. */
182 return ip_local_out(net, skb->sk, skb);
183}
184EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
185
186static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
187{
188 struct dst_entry *dst = skb_dst(skb);
189 struct rtable *rt = (struct rtable *)dst;
190 struct net_device *dev = dst->dev;
191 unsigned int hh_len = LL_RESERVED_SPACE(dev);
192 struct neighbour *neigh;
David Brazdil0f672f62019-12-10 10:32:29 +0000193 bool is_v6gw = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194
195 if (rt->rt_type == RTN_MULTICAST) {
196 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
197 } else if (rt->rt_type == RTN_BROADCAST)
198 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
199
200 /* Be paranoid, rather than too clever. */
201 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
202 struct sk_buff *skb2;
203
204 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
205 if (!skb2) {
206 kfree_skb(skb);
207 return -ENOMEM;
208 }
209 if (skb->sk)
210 skb_set_owner_w(skb2, skb->sk);
211 consume_skb(skb);
212 skb = skb2;
213 }
214
215 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
216 int res = lwtunnel_xmit(skb);
217
218 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
219 return res;
220 }
221
222 rcu_read_lock_bh();
David Brazdil0f672f62019-12-10 10:32:29 +0000223 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224 if (!IS_ERR(neigh)) {
225 int res;
226
227 sock_confirm_neigh(skb, neigh);
David Brazdil0f672f62019-12-10 10:32:29 +0000228 /* if crossing protocols, can not use the cached header */
229 res = neigh_output(neigh, skb, is_v6gw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230 rcu_read_unlock_bh();
231 return res;
232 }
233 rcu_read_unlock_bh();
234
235 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
236 __func__);
237 kfree_skb(skb);
238 return -EINVAL;
239}
240
241static int ip_finish_output_gso(struct net *net, struct sock *sk,
242 struct sk_buff *skb, unsigned int mtu)
243{
244 netdev_features_t features;
245 struct sk_buff *segs;
246 int ret = 0;
247
248 /* common case: seglen is <= mtu
249 */
250 if (skb_gso_validate_network_len(skb, mtu))
251 return ip_finish_output2(net, sk, skb);
252
253 /* Slowpath - GSO segment length exceeds the egress MTU.
254 *
255 * This can happen in several cases:
256 * - Forwarding of a TCP GRO skb, when DF flag is not set.
257 * - Forwarding of an skb that arrived on a virtualization interface
258 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
259 * stack.
260 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
261 * interface with a smaller MTU.
262 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
263 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
264 * insufficent MTU.
265 */
266 features = netif_skb_features(skb);
267 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
268 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
269 if (IS_ERR_OR_NULL(segs)) {
270 kfree_skb(skb);
271 return -ENOMEM;
272 }
273
274 consume_skb(skb);
275
276 do {
277 struct sk_buff *nskb = segs->next;
278 int err;
279
David Brazdil0f672f62019-12-10 10:32:29 +0000280 skb_mark_not_on_list(segs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
282
283 if (err && ret == 0)
284 ret = err;
285 segs = nskb;
286 } while (segs);
287
288 return ret;
289}
290
David Brazdil0f672f62019-12-10 10:32:29 +0000291static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292{
293 unsigned int mtu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294
295#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
296 /* Policy lookup after SNAT yielded a new policy */
297 if (skb_dst(skb)->xfrm) {
298 IPCB(skb)->flags |= IPSKB_REROUTED;
299 return dst_output(net, sk, skb);
300 }
301#endif
302 mtu = ip_skb_dst_mtu(sk, skb);
303 if (skb_is_gso(skb))
304 return ip_finish_output_gso(net, sk, skb, mtu);
305
Olivier Deprez0e641232021-09-23 10:07:05 +0200306 if (skb->len > mtu || IPCB(skb)->frag_max_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
308
309 return ip_finish_output2(net, sk, skb);
310}
311
David Brazdil0f672f62019-12-10 10:32:29 +0000312static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000313{
314 int ret;
315
316 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000317 switch (ret) {
318 case NET_XMIT_SUCCESS:
319 return __ip_finish_output(net, sk, skb);
320 case NET_XMIT_CN:
321 return __ip_finish_output(net, sk, skb) ? : ret;
322 default:
323 kfree_skb(skb);
324 return ret;
325 }
326}
327
328static int ip_mc_finish_output(struct net *net, struct sock *sk,
329 struct sk_buff *skb)
330{
331 struct rtable *new_rt;
332 bool do_cn = false;
333 int ret, err;
334
335 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
336 switch (ret) {
337 case NET_XMIT_CN:
338 do_cn = true;
339 /* fall through */
340 case NET_XMIT_SUCCESS:
341 break;
342 default:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000343 kfree_skb(skb);
344 return ret;
345 }
346
David Brazdil0f672f62019-12-10 10:32:29 +0000347 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
348 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
349 * see ipv4_pktinfo_prepare().
350 */
351 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
352 if (new_rt) {
353 new_rt->rt_iif = 0;
354 skb_dst_drop(skb);
355 skb_dst_set(skb, &new_rt->dst);
356 }
357
358 err = dev_loopback_xmit(net, sk, skb);
359 return (do_cn && err) ? ret : err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360}
361
362int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
363{
364 struct rtable *rt = skb_rtable(skb);
365 struct net_device *dev = rt->dst.dev;
366
367 /*
368 * If the indicated interface is up and running, send the packet.
369 */
370 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
371
372 skb->dev = dev;
373 skb->protocol = htons(ETH_P_IP);
374
375 /*
376 * Multicasts are looped back for other local users
377 */
378
379 if (rt->rt_flags&RTCF_MULTICAST) {
380 if (sk_mc_loop(sk)
381#ifdef CONFIG_IP_MROUTE
382 /* Small optimization: do not loopback not local frames,
383 which returned after forwarding; they will be dropped
384 by ip_mr_input in any case.
385 Note, that local frames are looped back to be delivered
386 to local recipients.
387
388 This check is duplicated in ip_mr_input at the moment.
389 */
390 &&
391 ((rt->rt_flags & RTCF_LOCAL) ||
392 !(IPCB(skb)->flags & IPSKB_FORWARDED))
393#endif
394 ) {
395 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
396 if (newskb)
397 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
398 net, sk, newskb, NULL, newskb->dev,
399 ip_mc_finish_output);
400 }
401
402 /* Multicasts with ttl 0 must not go beyond the host */
403
404 if (ip_hdr(skb)->ttl == 0) {
405 kfree_skb(skb);
406 return 0;
407 }
408 }
409
410 if (rt->rt_flags&RTCF_BROADCAST) {
411 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
412 if (newskb)
413 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
414 net, sk, newskb, NULL, newskb->dev,
415 ip_mc_finish_output);
416 }
417
418 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
419 net, sk, skb, NULL, skb->dev,
420 ip_finish_output,
421 !(IPCB(skb)->flags & IPSKB_REROUTED));
422}
423
424int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
425{
426 struct net_device *dev = skb_dst(skb)->dev;
427
428 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
429
430 skb->dev = dev;
431 skb->protocol = htons(ETH_P_IP);
432
433 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
434 net, sk, skb, NULL, dev,
435 ip_finish_output,
436 !(IPCB(skb)->flags & IPSKB_REROUTED));
437}
438
439/*
440 * copy saddr and daddr, possibly using 64bit load/stores
441 * Equivalent to :
442 * iph->saddr = fl4->saddr;
443 * iph->daddr = fl4->daddr;
444 */
445static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
446{
447 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
448 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
Olivier Deprez0e641232021-09-23 10:07:05 +0200449
450 iph->saddr = fl4->saddr;
451 iph->daddr = fl4->daddr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452}
453
454/* Note: skb->sk can be different from sk, in case of tunnels */
455int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
456 __u8 tos)
457{
458 struct inet_sock *inet = inet_sk(sk);
459 struct net *net = sock_net(sk);
460 struct ip_options_rcu *inet_opt;
461 struct flowi4 *fl4;
462 struct rtable *rt;
463 struct iphdr *iph;
464 int res;
465
466 /* Skip all of this if the packet is already routed,
467 * f.e. by something like SCTP.
468 */
469 rcu_read_lock();
470 inet_opt = rcu_dereference(inet->inet_opt);
471 fl4 = &fl->u.ip4;
472 rt = skb_rtable(skb);
473 if (rt)
474 goto packet_routed;
475
476 /* Make sure we can route this packet. */
477 rt = (struct rtable *)__sk_dst_check(sk, 0);
478 if (!rt) {
479 __be32 daddr;
480
481 /* Use correct destination address if we have options. */
482 daddr = inet->inet_daddr;
483 if (inet_opt && inet_opt->opt.srr)
484 daddr = inet_opt->opt.faddr;
485
486 /* If this fails, retransmit mechanism of transport layer will
487 * keep trying until route appears or the connection times
488 * itself out.
489 */
490 rt = ip_route_output_ports(net, fl4, sk,
491 daddr, inet->inet_saddr,
492 inet->inet_dport,
493 inet->inet_sport,
494 sk->sk_protocol,
495 RT_CONN_FLAGS_TOS(sk, tos),
496 sk->sk_bound_dev_if);
497 if (IS_ERR(rt))
498 goto no_route;
499 sk_setup_caps(sk, &rt->dst);
500 }
501 skb_dst_set_noref(skb, &rt->dst);
502
503packet_routed:
504 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
505 goto no_route;
506
507 /* OK, we know where to send it, allocate and build IP header. */
508 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
509 skb_reset_network_header(skb);
510 iph = ip_hdr(skb);
511 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
512 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
513 iph->frag_off = htons(IP_DF);
514 else
515 iph->frag_off = 0;
516 iph->ttl = ip_select_ttl(inet, &rt->dst);
517 iph->protocol = sk->sk_protocol;
518 ip_copy_addrs(iph, fl4);
519
520 /* Transport layer set skb->h.foo itself. */
521
522 if (inet_opt && inet_opt->opt.optlen) {
523 iph->ihl += inet_opt->opt.optlen >> 2;
524 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
525 }
526
527 ip_select_ident_segs(net, skb, sk,
528 skb_shinfo(skb)->gso_segs ?: 1);
529
530 /* TODO : should we use skb->sk here instead of sk ? */
531 skb->priority = sk->sk_priority;
532 skb->mark = sk->sk_mark;
533
534 res = ip_local_out(net, sk, skb);
535 rcu_read_unlock();
536 return res;
537
538no_route:
539 rcu_read_unlock();
540 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
541 kfree_skb(skb);
542 return -EHOSTUNREACH;
543}
544EXPORT_SYMBOL(__ip_queue_xmit);
545
546static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
547{
548 to->pkt_type = from->pkt_type;
549 to->priority = from->priority;
550 to->protocol = from->protocol;
David Brazdil0f672f62019-12-10 10:32:29 +0000551 to->skb_iif = from->skb_iif;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552 skb_dst_drop(to);
553 skb_dst_copy(to, from);
554 to->dev = from->dev;
555 to->mark = from->mark;
556
557 skb_copy_hash(to, from);
558
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000559#ifdef CONFIG_NET_SCHED
560 to->tc_index = from->tc_index;
561#endif
562 nf_copy(to, from);
David Brazdil0f672f62019-12-10 10:32:29 +0000563 skb_ext_copy(to, from);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564#if IS_ENABLED(CONFIG_IP_VS)
565 to->ipvs_property = from->ipvs_property;
566#endif
567 skb_copy_secmark(to, from);
568}
569
570static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
571 unsigned int mtu,
572 int (*output)(struct net *, struct sock *, struct sk_buff *))
573{
574 struct iphdr *iph = ip_hdr(skb);
575
576 if ((iph->frag_off & htons(IP_DF)) == 0)
577 return ip_do_fragment(net, sk, skb, output);
578
579 if (unlikely(!skb->ignore_df ||
580 (IPCB(skb)->frag_max_size &&
581 IPCB(skb)->frag_max_size > mtu))) {
582 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
583 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
584 htonl(mtu));
585 kfree_skb(skb);
586 return -EMSGSIZE;
587 }
588
589 return ip_do_fragment(net, sk, skb, output);
590}
591
David Brazdil0f672f62019-12-10 10:32:29 +0000592void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
593 unsigned int hlen, struct ip_fraglist_iter *iter)
594{
595 unsigned int first_len = skb_pagelen(skb);
596
597 iter->frag = skb_shinfo(skb)->frag_list;
598 skb_frag_list_init(skb);
599
600 iter->offset = 0;
601 iter->iph = iph;
602 iter->hlen = hlen;
603
604 skb->data_len = first_len - skb_headlen(skb);
605 skb->len = first_len;
606 iph->tot_len = htons(first_len);
607 iph->frag_off = htons(IP_MF);
608 ip_send_check(iph);
609}
610EXPORT_SYMBOL(ip_fraglist_init);
611
612static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
613 struct ip_fraglist_iter *iter)
614{
615 struct sk_buff *to = iter->frag;
616
617 /* Copy the flags to each fragment. */
618 IPCB(to)->flags = IPCB(skb)->flags;
619
620 if (iter->offset == 0)
621 ip_options_fragment(to);
622}
623
624void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
625{
626 unsigned int hlen = iter->hlen;
627 struct iphdr *iph = iter->iph;
628 struct sk_buff *frag;
629
630 frag = iter->frag;
631 frag->ip_summed = CHECKSUM_NONE;
632 skb_reset_transport_header(frag);
633 __skb_push(frag, hlen);
634 skb_reset_network_header(frag);
635 memcpy(skb_network_header(frag), iph, hlen);
636 iter->iph = ip_hdr(frag);
637 iph = iter->iph;
638 iph->tot_len = htons(frag->len);
639 ip_copy_metadata(frag, skb);
640 iter->offset += skb->len - hlen;
641 iph->frag_off = htons(iter->offset >> 3);
642 if (frag->next)
643 iph->frag_off |= htons(IP_MF);
644 /* Ready, complete checksum */
645 ip_send_check(iph);
646}
647EXPORT_SYMBOL(ip_fraglist_prepare);
648
649void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
650 unsigned int ll_rs, unsigned int mtu, bool DF,
651 struct ip_frag_state *state)
652{
653 struct iphdr *iph = ip_hdr(skb);
654
655 state->DF = DF;
656 state->hlen = hlen;
657 state->ll_rs = ll_rs;
658 state->mtu = mtu;
659
660 state->left = skb->len - hlen; /* Space per frame */
661 state->ptr = hlen; /* Where to start from */
662
663 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
664 state->not_last_frag = iph->frag_off & htons(IP_MF);
665}
666EXPORT_SYMBOL(ip_frag_init);
667
668static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
669 bool first_frag, struct ip_frag_state *state)
670{
671 /* Copy the flags to each fragment. */
672 IPCB(to)->flags = IPCB(from)->flags;
673
674 /* ANK: dirty, but effective trick. Upgrade options only if
675 * the segment to be fragmented was THE FIRST (otherwise,
676 * options are already fixed) and make it ONCE
677 * on the initial skb, so that all the following fragments
678 * will inherit fixed options.
679 */
680 if (first_frag)
681 ip_options_fragment(from);
682}
683
684struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
685{
686 unsigned int len = state->left;
687 struct sk_buff *skb2;
688 struct iphdr *iph;
689
690 len = state->left;
691 /* IF: it doesn't fit, use 'mtu' - the data space left */
692 if (len > state->mtu)
693 len = state->mtu;
694 /* IF: we are not sending up to and including the packet end
695 then align the next start on an eight byte boundary */
696 if (len < state->left) {
697 len &= ~7;
698 }
699
700 /* Allocate buffer */
701 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
702 if (!skb2)
703 return ERR_PTR(-ENOMEM);
704
705 /*
706 * Set up data on packet
707 */
708
709 ip_copy_metadata(skb2, skb);
710 skb_reserve(skb2, state->ll_rs);
711 skb_put(skb2, len + state->hlen);
712 skb_reset_network_header(skb2);
713 skb2->transport_header = skb2->network_header + state->hlen;
714
715 /*
716 * Charge the memory for the fragment to any owner
717 * it might possess
718 */
719
720 if (skb->sk)
721 skb_set_owner_w(skb2, skb->sk);
722
723 /*
724 * Copy the packet header into the new buffer.
725 */
726
727 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
728
729 /*
730 * Copy a block of the IP datagram.
731 */
732 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
733 BUG();
734 state->left -= len;
735
736 /*
737 * Fill in the new header fields.
738 */
739 iph = ip_hdr(skb2);
740 iph->frag_off = htons((state->offset >> 3));
741 if (state->DF)
742 iph->frag_off |= htons(IP_DF);
743
744 /*
745 * Added AC : If we are fragmenting a fragment that's not the
746 * last fragment then keep MF on each bit
747 */
748 if (state->left > 0 || state->not_last_frag)
749 iph->frag_off |= htons(IP_MF);
750 state->ptr += len;
751 state->offset += len;
752
753 iph->tot_len = htons(len + state->hlen);
754
755 ip_send_check(iph);
756
757 return skb2;
758}
759EXPORT_SYMBOL(ip_frag_next);
760
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761/*
762 * This IP datagram is too large to be sent in one piece. Break it up into
763 * smaller pieces (each of size equal to IP header plus
764 * a block of the data of the original IP data part) that will yet fit in a
765 * single device frame, and queue such a frame for sending.
766 */
767
768int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
769 int (*output)(struct net *, struct sock *, struct sk_buff *))
770{
771 struct iphdr *iph;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000772 struct sk_buff *skb2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000773 struct rtable *rt = skb_rtable(skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000774 unsigned int mtu, hlen, ll_rs;
775 struct ip_fraglist_iter iter;
776 ktime_t tstamp = skb->tstamp;
777 struct ip_frag_state state;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000778 int err = 0;
779
780 /* for offloaded checksums cleanup checksum before fragmentation */
781 if (skb->ip_summed == CHECKSUM_PARTIAL &&
782 (err = skb_checksum_help(skb)))
783 goto fail;
784
785 /*
786 * Point into the IP datagram header.
787 */
788
789 iph = ip_hdr(skb);
790
791 mtu = ip_skb_dst_mtu(sk, skb);
792 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
793 mtu = IPCB(skb)->frag_max_size;
794
795 /*
796 * Setup starting values.
797 */
798
799 hlen = iph->ihl * 4;
800 mtu = mtu - hlen; /* Size of data space */
801 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
802 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
803
804 /* When frag_list is given, use it. First, check its validity:
805 * some transformers could create wrong frag_list or break existing
806 * one, it is not prohibited. In this case fall back to copying.
807 *
808 * LATER: this step can be merged to real generation of fragments,
809 * we can switch to copy when see the first bad fragment.
810 */
811 if (skb_has_frag_list(skb)) {
812 struct sk_buff *frag, *frag2;
813 unsigned int first_len = skb_pagelen(skb);
814
815 if (first_len - hlen > mtu ||
816 ((first_len - hlen) & 7) ||
817 ip_is_fragment(iph) ||
818 skb_cloned(skb) ||
819 skb_headroom(skb) < ll_rs)
820 goto slow_path;
821
822 skb_walk_frags(skb, frag) {
823 /* Correct geometry. */
824 if (frag->len > mtu ||
825 ((frag->len & 7) && frag->next) ||
826 skb_headroom(frag) < hlen + ll_rs)
827 goto slow_path_clean;
828
829 /* Partially cloned skb? */
830 if (skb_shared(frag))
831 goto slow_path_clean;
832
833 BUG_ON(frag->sk);
834 if (skb->sk) {
835 frag->sk = skb->sk;
836 frag->destructor = sock_wfree;
837 }
838 skb->truesize -= frag->truesize;
839 }
840
841 /* Everything is OK. Generate! */
David Brazdil0f672f62019-12-10 10:32:29 +0000842 ip_fraglist_init(skb, iph, hlen, &iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000843
844 for (;;) {
845 /* Prepare header of the next frame,
846 * before previous one went down. */
David Brazdil0f672f62019-12-10 10:32:29 +0000847 if (iter.frag) {
848 ip_fraglist_ipcb_prepare(skb, &iter);
849 ip_fraglist_prepare(skb, &iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000850 }
851
David Brazdil0f672f62019-12-10 10:32:29 +0000852 skb->tstamp = tstamp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000853 err = output(net, sk, skb);
854
855 if (!err)
856 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
David Brazdil0f672f62019-12-10 10:32:29 +0000857 if (err || !iter.frag)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000858 break;
859
David Brazdil0f672f62019-12-10 10:32:29 +0000860 skb = ip_fraglist_next(&iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000861 }
862
863 if (err == 0) {
864 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
865 return 0;
866 }
867
David Brazdil0f672f62019-12-10 10:32:29 +0000868 kfree_skb_list(iter.frag);
869
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
871 return err;
872
873slow_path_clean:
874 skb_walk_frags(skb, frag2) {
875 if (frag2 == frag)
876 break;
877 frag2->sk = NULL;
878 frag2->destructor = NULL;
879 skb->truesize += frag2->truesize;
880 }
881 }
882
883slow_path:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000884 /*
885 * Fragment the datagram.
886 */
887
David Brazdil0f672f62019-12-10 10:32:29 +0000888 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
889 &state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890
891 /*
892 * Keep copying data until we run out.
893 */
894
David Brazdil0f672f62019-12-10 10:32:29 +0000895 while (state.left > 0) {
896 bool first_frag = (state.offset == 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897
David Brazdil0f672f62019-12-10 10:32:29 +0000898 skb2 = ip_frag_next(skb, &state);
899 if (IS_ERR(skb2)) {
900 err = PTR_ERR(skb2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000901 goto fail;
902 }
David Brazdil0f672f62019-12-10 10:32:29 +0000903 ip_frag_ipcb(skb, skb2, first_frag, &state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000904
905 /*
906 * Put this fragment into the sending queue.
907 */
David Brazdil0f672f62019-12-10 10:32:29 +0000908 skb2->tstamp = tstamp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000909 err = output(net, sk, skb2);
910 if (err)
911 goto fail;
912
913 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
914 }
915 consume_skb(skb);
916 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
917 return err;
918
919fail:
920 kfree_skb(skb);
921 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
922 return err;
923}
924EXPORT_SYMBOL(ip_do_fragment);
925
926int
927ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
928{
929 struct msghdr *msg = from;
930
931 if (skb->ip_summed == CHECKSUM_PARTIAL) {
932 if (!copy_from_iter_full(to, len, &msg->msg_iter))
933 return -EFAULT;
934 } else {
935 __wsum csum = 0;
936 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
937 return -EFAULT;
938 skb->csum = csum_block_add(skb->csum, csum, odd);
939 }
940 return 0;
941}
942EXPORT_SYMBOL(ip_generic_getfrag);
943
944static inline __wsum
945csum_page(struct page *page, int offset, int copy)
946{
947 char *kaddr;
948 __wsum csum;
949 kaddr = kmap(page);
950 csum = csum_partial(kaddr + offset, copy, 0);
951 kunmap(page);
952 return csum;
953}
954
955static int __ip_append_data(struct sock *sk,
956 struct flowi4 *fl4,
957 struct sk_buff_head *queue,
958 struct inet_cork *cork,
959 struct page_frag *pfrag,
960 int getfrag(void *from, char *to, int offset,
961 int len, int odd, struct sk_buff *skb),
962 void *from, int length, int transhdrlen,
963 unsigned int flags)
964{
965 struct inet_sock *inet = inet_sk(sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000966 struct ubuf_info *uarg = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967 struct sk_buff *skb;
968
969 struct ip_options *opt = cork->opt;
970 int hh_len;
971 int exthdrlen;
972 int mtu;
973 int copy;
974 int err;
975 int offset = 0;
976 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
977 int csummode = CHECKSUM_NONE;
978 struct rtable *rt = (struct rtable *)cork->dst;
979 unsigned int wmem_alloc_delta = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000980 bool paged, extra_uref = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000981 u32 tskey = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000982
983 skb = skb_peek_tail(queue);
984
985 exthdrlen = !skb ? rt->dst.header_len : 0;
986 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
987 paged = !!cork->gso_size;
988
989 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
990 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
991 tskey = sk->sk_tskey++;
992
993 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
994
995 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
996 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
997 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
998
999 if (cork->length + length > maxnonfragsize - fragheaderlen) {
1000 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1001 mtu - (opt ? opt->optlen : 0));
1002 return -EMSGSIZE;
1003 }
1004
1005 /*
1006 * transhdrlen > 0 means that this is the first fragment and we wish
1007 * it won't be fragmented in the future.
1008 */
1009 if (transhdrlen &&
1010 length + fragheaderlen <= mtu &&
1011 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1012 (!(flags & MSG_MORE) || cork->gso_size) &&
1013 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1014 csummode = CHECKSUM_PARTIAL;
1015
David Brazdil0f672f62019-12-10 10:32:29 +00001016 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1017 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1018 if (!uarg)
1019 return -ENOBUFS;
1020 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1021 if (rt->dst.dev->features & NETIF_F_SG &&
1022 csummode == CHECKSUM_PARTIAL) {
1023 paged = true;
1024 } else {
1025 uarg->zerocopy = 0;
1026 skb_zcopy_set(skb, uarg, &extra_uref);
1027 }
1028 }
1029
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001030 cork->length += length;
1031
1032 /* So, what's going on in the loop below?
1033 *
1034 * We use calculated fragment length to generate chained skb,
1035 * each of segments is IP fragment ready for sending to network after
1036 * adding appropriate IP header.
1037 */
1038
1039 if (!skb)
1040 goto alloc_new_skb;
1041
1042 while (length > 0) {
1043 /* Check if the remaining data fits into current packet. */
1044 copy = mtu - skb->len;
1045 if (copy < length)
1046 copy = maxfraglen - skb->len;
1047 if (copy <= 0) {
1048 char *data;
1049 unsigned int datalen;
1050 unsigned int fraglen;
1051 unsigned int fraggap;
Olivier Deprez0e641232021-09-23 10:07:05 +02001052 unsigned int alloclen, alloc_extra;
David Brazdil0f672f62019-12-10 10:32:29 +00001053 unsigned int pagedlen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 struct sk_buff *skb_prev;
1055alloc_new_skb:
1056 skb_prev = skb;
1057 if (skb_prev)
1058 fraggap = skb_prev->len - maxfraglen;
1059 else
1060 fraggap = 0;
1061
1062 /*
1063 * If remaining data exceeds the mtu,
1064 * we know we need more fragment(s).
1065 */
1066 datalen = length + fraggap;
1067 if (datalen > mtu - fragheaderlen)
1068 datalen = maxfraglen - fragheaderlen;
1069 fraglen = datalen + fragheaderlen;
David Brazdil0f672f62019-12-10 10:32:29 +00001070 pagedlen = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071
Olivier Deprez0e641232021-09-23 10:07:05 +02001072 alloc_extra = hh_len + 15;
1073 alloc_extra += exthdrlen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001074
1075 /* The last fragment gets additional space at tail.
1076 * Note, with MSG_MORE we overallocate on fragments,
1077 * because we have no idea what fragment will be
1078 * the last.
1079 */
1080 if (datalen == length + fraggap)
Olivier Deprez0e641232021-09-23 10:07:05 +02001081 alloc_extra += rt->dst.trailer_len;
1082
1083 if ((flags & MSG_MORE) &&
1084 !(rt->dst.dev->features&NETIF_F_SG))
1085 alloclen = mtu;
1086 else if (!paged &&
1087 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1088 !(rt->dst.dev->features & NETIF_F_SG)))
1089 alloclen = fraglen;
1090 else {
1091 alloclen = min_t(int, fraglen, MAX_HEADER);
1092 pagedlen = fraglen - alloclen;
1093 }
1094
1095 alloclen += alloc_extra;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096
1097 if (transhdrlen) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001098 skb = sock_alloc_send_skb(sk, alloclen,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001099 (flags & MSG_DONTWAIT), &err);
1100 } else {
1101 skb = NULL;
1102 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1103 2 * sk->sk_sndbuf)
Olivier Deprez0e641232021-09-23 10:07:05 +02001104 skb = alloc_skb(alloclen,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001105 sk->sk_allocation);
1106 if (unlikely(!skb))
1107 err = -ENOBUFS;
1108 }
1109 if (!skb)
1110 goto error;
1111
1112 /*
1113 * Fill in the control structures
1114 */
1115 skb->ip_summed = csummode;
1116 skb->csum = 0;
1117 skb_reserve(skb, hh_len);
1118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119 /*
1120 * Find where to start putting bytes.
1121 */
1122 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1123 skb_set_network_header(skb, exthdrlen);
1124 skb->transport_header = (skb->network_header +
1125 fragheaderlen);
1126 data += fragheaderlen + exthdrlen;
1127
1128 if (fraggap) {
1129 skb->csum = skb_copy_and_csum_bits(
1130 skb_prev, maxfraglen,
1131 data + transhdrlen, fraggap, 0);
1132 skb_prev->csum = csum_sub(skb_prev->csum,
1133 skb->csum);
1134 data += fraggap;
1135 pskb_trim_unique(skb_prev, maxfraglen);
1136 }
1137
1138 copy = datalen - transhdrlen - fraggap - pagedlen;
1139 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1140 err = -EFAULT;
1141 kfree_skb(skb);
1142 goto error;
1143 }
1144
1145 offset += copy;
1146 length -= copy + transhdrlen;
1147 transhdrlen = 0;
1148 exthdrlen = 0;
1149 csummode = CHECKSUM_NONE;
1150
David Brazdil0f672f62019-12-10 10:32:29 +00001151 /* only the initial fragment is time stamped */
1152 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1153 cork->tx_flags = 0;
1154 skb_shinfo(skb)->tskey = tskey;
1155 tskey = 0;
1156 skb_zcopy_set(skb, uarg, &extra_uref);
1157
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158 if ((flags & MSG_CONFIRM) && !skb_prev)
1159 skb_set_dst_pending_confirm(skb, 1);
1160
1161 /*
1162 * Put the packet on the pending queue.
1163 */
1164 if (!skb->destructor) {
1165 skb->destructor = sock_wfree;
1166 skb->sk = sk;
1167 wmem_alloc_delta += skb->truesize;
1168 }
1169 __skb_queue_tail(queue, skb);
1170 continue;
1171 }
1172
1173 if (copy > length)
1174 copy = length;
1175
1176 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1177 skb_tailroom(skb) >= copy) {
1178 unsigned int off;
1179
1180 off = skb->len;
1181 if (getfrag(from, skb_put(skb, copy),
1182 offset, copy, off, skb) < 0) {
1183 __skb_trim(skb, off);
1184 err = -EFAULT;
1185 goto error;
1186 }
David Brazdil0f672f62019-12-10 10:32:29 +00001187 } else if (!uarg || !uarg->zerocopy) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001188 int i = skb_shinfo(skb)->nr_frags;
1189
1190 err = -ENOMEM;
1191 if (!sk_page_frag_refill(sk, pfrag))
1192 goto error;
1193
1194 if (!skb_can_coalesce(skb, i, pfrag->page,
1195 pfrag->offset)) {
1196 err = -EMSGSIZE;
1197 if (i == MAX_SKB_FRAGS)
1198 goto error;
1199
1200 __skb_fill_page_desc(skb, i, pfrag->page,
1201 pfrag->offset, 0);
1202 skb_shinfo(skb)->nr_frags = ++i;
1203 get_page(pfrag->page);
1204 }
1205 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1206 if (getfrag(from,
1207 page_address(pfrag->page) + pfrag->offset,
1208 offset, copy, skb->len, skb) < 0)
1209 goto error_efault;
1210
1211 pfrag->offset += copy;
1212 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1213 skb->len += copy;
1214 skb->data_len += copy;
1215 skb->truesize += copy;
1216 wmem_alloc_delta += copy;
David Brazdil0f672f62019-12-10 10:32:29 +00001217 } else {
1218 err = skb_zerocopy_iter_dgram(skb, from, copy);
1219 if (err < 0)
1220 goto error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221 }
1222 offset += copy;
1223 length -= copy;
1224 }
1225
1226 if (wmem_alloc_delta)
1227 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1228 return 0;
1229
1230error_efault:
1231 err = -EFAULT;
1232error:
David Brazdil0f672f62019-12-10 10:32:29 +00001233 if (uarg)
1234 sock_zerocopy_put_abort(uarg, extra_uref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001235 cork->length -= length;
1236 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1237 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1238 return err;
1239}
1240
1241static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1242 struct ipcm_cookie *ipc, struct rtable **rtp)
1243{
1244 struct ip_options_rcu *opt;
1245 struct rtable *rt;
1246
1247 rt = *rtp;
1248 if (unlikely(!rt))
1249 return -EFAULT;
1250
1251 /*
1252 * setup for corking.
1253 */
1254 opt = ipc->opt;
1255 if (opt) {
1256 if (!cork->opt) {
1257 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1258 sk->sk_allocation);
1259 if (unlikely(!cork->opt))
1260 return -ENOBUFS;
1261 }
1262 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1263 cork->flags |= IPCORK_OPT;
1264 cork->addr = ipc->addr;
1265 }
1266
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001267 cork->fragsize = ip_sk_use_pmtu(sk) ?
Olivier Deprez0e641232021-09-23 10:07:05 +02001268 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1269
1270 if (!inetdev_valid_mtu(cork->fragsize))
1271 return -ENETUNREACH;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272
1273 cork->gso_size = ipc->gso_size;
Olivier Deprez0e641232021-09-23 10:07:05 +02001274
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001275 cork->dst = &rt->dst;
Olivier Deprez0e641232021-09-23 10:07:05 +02001276 /* We stole this route, caller should not release it. */
1277 *rtp = NULL;
1278
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001279 cork->length = 0;
1280 cork->ttl = ipc->ttl;
1281 cork->tos = ipc->tos;
David Brazdil0f672f62019-12-10 10:32:29 +00001282 cork->mark = ipc->sockc.mark;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001283 cork->priority = ipc->priority;
1284 cork->transmit_time = ipc->sockc.transmit_time;
1285 cork->tx_flags = 0;
1286 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1287
1288 return 0;
1289}
1290
1291/*
1292 * ip_append_data() and ip_append_page() can make one large IP datagram
1293 * from many pieces of data. Each pieces will be holded on the socket
1294 * until ip_push_pending_frames() is called. Each piece can be a page
1295 * or non-page data.
1296 *
1297 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1298 * this interface potentially.
1299 *
1300 * LATER: length must be adjusted by pad at tail, when it is required.
1301 */
1302int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1303 int getfrag(void *from, char *to, int offset, int len,
1304 int odd, struct sk_buff *skb),
1305 void *from, int length, int transhdrlen,
1306 struct ipcm_cookie *ipc, struct rtable **rtp,
1307 unsigned int flags)
1308{
1309 struct inet_sock *inet = inet_sk(sk);
1310 int err;
1311
1312 if (flags&MSG_PROBE)
1313 return 0;
1314
1315 if (skb_queue_empty(&sk->sk_write_queue)) {
1316 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1317 if (err)
1318 return err;
1319 } else {
1320 transhdrlen = 0;
1321 }
1322
1323 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1324 sk_page_frag(sk), getfrag,
1325 from, length, transhdrlen, flags);
1326}
1327
1328ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1329 int offset, size_t size, int flags)
1330{
1331 struct inet_sock *inet = inet_sk(sk);
1332 struct sk_buff *skb;
1333 struct rtable *rt;
1334 struct ip_options *opt = NULL;
1335 struct inet_cork *cork;
1336 int hh_len;
1337 int mtu;
1338 int len;
1339 int err;
1340 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1341
1342 if (inet->hdrincl)
1343 return -EPERM;
1344
1345 if (flags&MSG_PROBE)
1346 return 0;
1347
1348 if (skb_queue_empty(&sk->sk_write_queue))
1349 return -EINVAL;
1350
1351 cork = &inet->cork.base;
1352 rt = (struct rtable *)cork->dst;
1353 if (cork->flags & IPCORK_OPT)
1354 opt = cork->opt;
1355
1356 if (!(rt->dst.dev->features&NETIF_F_SG))
1357 return -EOPNOTSUPP;
1358
1359 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1360 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
1361
1362 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1363 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1364 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1365
1366 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1367 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1368 mtu - (opt ? opt->optlen : 0));
1369 return -EMSGSIZE;
1370 }
1371
1372 skb = skb_peek_tail(&sk->sk_write_queue);
1373 if (!skb)
1374 return -EINVAL;
1375
1376 cork->length += size;
1377
1378 while (size > 0) {
1379 /* Check if the remaining data fits into current packet. */
1380 len = mtu - skb->len;
1381 if (len < size)
1382 len = maxfraglen - skb->len;
1383
1384 if (len <= 0) {
1385 struct sk_buff *skb_prev;
1386 int alloclen;
1387
1388 skb_prev = skb;
1389 fraggap = skb_prev->len - maxfraglen;
1390
1391 alloclen = fragheaderlen + hh_len + fraggap + 15;
1392 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1393 if (unlikely(!skb)) {
1394 err = -ENOBUFS;
1395 goto error;
1396 }
1397
1398 /*
1399 * Fill in the control structures
1400 */
1401 skb->ip_summed = CHECKSUM_NONE;
1402 skb->csum = 0;
1403 skb_reserve(skb, hh_len);
1404
1405 /*
1406 * Find where to start putting bytes.
1407 */
1408 skb_put(skb, fragheaderlen + fraggap);
1409 skb_reset_network_header(skb);
1410 skb->transport_header = (skb->network_header +
1411 fragheaderlen);
1412 if (fraggap) {
1413 skb->csum = skb_copy_and_csum_bits(skb_prev,
1414 maxfraglen,
1415 skb_transport_header(skb),
1416 fraggap, 0);
1417 skb_prev->csum = csum_sub(skb_prev->csum,
1418 skb->csum);
1419 pskb_trim_unique(skb_prev, maxfraglen);
1420 }
1421
1422 /*
1423 * Put the packet on the pending queue.
1424 */
1425 __skb_queue_tail(&sk->sk_write_queue, skb);
1426 continue;
1427 }
1428
1429 if (len > size)
1430 len = size;
1431
1432 if (skb_append_pagefrags(skb, page, offset, len)) {
1433 err = -EMSGSIZE;
1434 goto error;
1435 }
1436
1437 if (skb->ip_summed == CHECKSUM_NONE) {
1438 __wsum csum;
1439 csum = csum_page(page, offset, len);
1440 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1441 }
1442
1443 skb->len += len;
1444 skb->data_len += len;
1445 skb->truesize += len;
1446 refcount_add(len, &sk->sk_wmem_alloc);
1447 offset += len;
1448 size -= len;
1449 }
1450 return 0;
1451
1452error:
1453 cork->length -= size;
1454 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1455 return err;
1456}
1457
1458static void ip_cork_release(struct inet_cork *cork)
1459{
1460 cork->flags &= ~IPCORK_OPT;
1461 kfree(cork->opt);
1462 cork->opt = NULL;
1463 dst_release(cork->dst);
1464 cork->dst = NULL;
1465}
1466
1467/*
1468 * Combined all pending IP fragments on the socket as one IP datagram
1469 * and push them out.
1470 */
1471struct sk_buff *__ip_make_skb(struct sock *sk,
1472 struct flowi4 *fl4,
1473 struct sk_buff_head *queue,
1474 struct inet_cork *cork)
1475{
1476 struct sk_buff *skb, *tmp_skb;
1477 struct sk_buff **tail_skb;
1478 struct inet_sock *inet = inet_sk(sk);
1479 struct net *net = sock_net(sk);
1480 struct ip_options *opt = NULL;
1481 struct rtable *rt = (struct rtable *)cork->dst;
1482 struct iphdr *iph;
1483 __be16 df = 0;
1484 __u8 ttl;
1485
1486 skb = __skb_dequeue(queue);
1487 if (!skb)
1488 goto out;
1489 tail_skb = &(skb_shinfo(skb)->frag_list);
1490
1491 /* move skb->data to ip header from ext header */
1492 if (skb->data < skb_network_header(skb))
1493 __skb_pull(skb, skb_network_offset(skb));
1494 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1495 __skb_pull(tmp_skb, skb_network_header_len(skb));
1496 *tail_skb = tmp_skb;
1497 tail_skb = &(tmp_skb->next);
1498 skb->len += tmp_skb->len;
1499 skb->data_len += tmp_skb->len;
1500 skb->truesize += tmp_skb->truesize;
1501 tmp_skb->destructor = NULL;
1502 tmp_skb->sk = NULL;
1503 }
1504
1505 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1506 * to fragment the frame generated here. No matter, what transforms
1507 * how transforms change size of the packet, it will come out.
1508 */
1509 skb->ignore_df = ip_sk_ignore_df(sk);
1510
1511 /* DF bit is set when we want to see DF on outgoing frames.
1512 * If ignore_df is set too, we still allow to fragment this frame
1513 * locally. */
1514 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1515 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1516 (skb->len <= dst_mtu(&rt->dst) &&
1517 ip_dont_fragment(sk, &rt->dst)))
1518 df = htons(IP_DF);
1519
1520 if (cork->flags & IPCORK_OPT)
1521 opt = cork->opt;
1522
1523 if (cork->ttl != 0)
1524 ttl = cork->ttl;
1525 else if (rt->rt_type == RTN_MULTICAST)
1526 ttl = inet->mc_ttl;
1527 else
1528 ttl = ip_select_ttl(inet, &rt->dst);
1529
1530 iph = ip_hdr(skb);
1531 iph->version = 4;
1532 iph->ihl = 5;
1533 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1534 iph->frag_off = df;
1535 iph->ttl = ttl;
1536 iph->protocol = sk->sk_protocol;
1537 ip_copy_addrs(iph, fl4);
1538 ip_select_ident(net, skb, sk);
1539
1540 if (opt) {
1541 iph->ihl += opt->optlen>>2;
1542 ip_options_build(skb, opt, cork->addr, rt, 0);
1543 }
1544
1545 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
David Brazdil0f672f62019-12-10 10:32:29 +00001546 skb->mark = cork->mark;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001547 skb->tstamp = cork->transmit_time;
1548 /*
1549 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1550 * on dst refcount
1551 */
1552 cork->dst = NULL;
1553 skb_dst_set(skb, &rt->dst);
1554
1555 if (iph->protocol == IPPROTO_ICMP)
1556 icmp_out_count(net, ((struct icmphdr *)
1557 skb_transport_header(skb))->type);
1558
1559 ip_cork_release(cork);
1560out:
1561 return skb;
1562}
1563
1564int ip_send_skb(struct net *net, struct sk_buff *skb)
1565{
1566 int err;
1567
1568 err = ip_local_out(net, skb->sk, skb);
1569 if (err) {
1570 if (err > 0)
1571 err = net_xmit_errno(err);
1572 if (err)
1573 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1574 }
1575
1576 return err;
1577}
1578
1579int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1580{
1581 struct sk_buff *skb;
1582
1583 skb = ip_finish_skb(sk, fl4);
1584 if (!skb)
1585 return 0;
1586
1587 /* Netfilter gets whole the not fragmented skb. */
1588 return ip_send_skb(sock_net(sk), skb);
1589}
1590
1591/*
1592 * Throw away all pending data on the socket.
1593 */
1594static void __ip_flush_pending_frames(struct sock *sk,
1595 struct sk_buff_head *queue,
1596 struct inet_cork *cork)
1597{
1598 struct sk_buff *skb;
1599
1600 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1601 kfree_skb(skb);
1602
1603 ip_cork_release(cork);
1604}
1605
1606void ip_flush_pending_frames(struct sock *sk)
1607{
1608 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1609}
1610
1611struct sk_buff *ip_make_skb(struct sock *sk,
1612 struct flowi4 *fl4,
1613 int getfrag(void *from, char *to, int offset,
1614 int len, int odd, struct sk_buff *skb),
1615 void *from, int length, int transhdrlen,
1616 struct ipcm_cookie *ipc, struct rtable **rtp,
1617 struct inet_cork *cork, unsigned int flags)
1618{
1619 struct sk_buff_head queue;
1620 int err;
1621
1622 if (flags & MSG_PROBE)
1623 return NULL;
1624
1625 __skb_queue_head_init(&queue);
1626
1627 cork->flags = 0;
1628 cork->addr = 0;
1629 cork->opt = NULL;
1630 err = ip_setup_cork(sk, cork, ipc, rtp);
1631 if (err)
1632 return ERR_PTR(err);
1633
1634 err = __ip_append_data(sk, fl4, &queue, cork,
1635 &current->task_frag, getfrag,
1636 from, length, transhdrlen, flags);
1637 if (err) {
1638 __ip_flush_pending_frames(sk, &queue, cork);
1639 return ERR_PTR(err);
1640 }
1641
1642 return __ip_make_skb(sk, fl4, &queue, cork);
1643}
1644
1645/*
1646 * Fetch data from kernel space and fill in checksum if needed.
1647 */
1648static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1649 int len, int odd, struct sk_buff *skb)
1650{
1651 __wsum csum;
1652
1653 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1654 skb->csum = csum_block_add(skb->csum, csum, odd);
1655 return 0;
1656}
1657
1658/*
1659 * Generic function to send a packet as reply to another packet.
1660 * Used to send some TCP resets/acks so far.
1661 */
1662void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1663 const struct ip_options *sopt,
1664 __be32 daddr, __be32 saddr,
1665 const struct ip_reply_arg *arg,
David Brazdil0f672f62019-12-10 10:32:29 +00001666 unsigned int len, u64 transmit_time)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001667{
1668 struct ip_options_data replyopts;
1669 struct ipcm_cookie ipc;
1670 struct flowi4 fl4;
1671 struct rtable *rt = skb_rtable(skb);
1672 struct net *net = sock_net(sk);
1673 struct sk_buff *nskb;
1674 int err;
1675 int oif;
1676
1677 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1678 return;
1679
1680 ipcm_init(&ipc);
1681 ipc.addr = daddr;
David Brazdil0f672f62019-12-10 10:32:29 +00001682 ipc.sockc.transmit_time = transmit_time;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683
1684 if (replyopts.opt.opt.optlen) {
1685 ipc.opt = &replyopts.opt;
1686
1687 if (replyopts.opt.opt.srr)
1688 daddr = replyopts.opt.opt.faddr;
1689 }
1690
1691 oif = arg->bound_dev_if;
1692 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1693 oif = skb->skb_iif;
1694
1695 flowi4_init_output(&fl4, oif,
1696 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1697 RT_TOS(arg->tos),
1698 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1699 ip_reply_arg_flowi_flags(arg),
1700 daddr, saddr,
1701 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1702 arg->uid);
1703 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1704 rt = ip_route_output_key(net, &fl4);
1705 if (IS_ERR(rt))
1706 return;
1707
Olivier Deprez0e641232021-09-23 10:07:05 +02001708 inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001709
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001710 sk->sk_protocol = ip_hdr(skb)->protocol;
1711 sk->sk_bound_dev_if = arg->bound_dev_if;
1712 sk->sk_sndbuf = sysctl_wmem_default;
Olivier Deprez0e641232021-09-23 10:07:05 +02001713 ipc.sockc.mark = fl4.flowi4_mark;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001714 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1715 len, 0, &ipc, &rt, MSG_DONTWAIT);
1716 if (unlikely(err)) {
1717 ip_flush_pending_frames(sk);
1718 goto out;
1719 }
1720
1721 nskb = skb_peek(&sk->sk_write_queue);
1722 if (nskb) {
1723 if (arg->csumoffset >= 0)
1724 *((__sum16 *)skb_transport_header(nskb) +
1725 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1726 arg->csum));
1727 nskb->ip_summed = CHECKSUM_NONE;
1728 ip_push_pending_frames(sk, &fl4);
1729 }
1730out:
1731 ip_rt_put(rt);
1732}
1733
1734void __init ip_init(void)
1735{
1736 ip_rt_init();
1737 inet_initpeers();
1738
1739#if defined(CONFIG_IP_MULTICAST)
1740 igmp_mc_init();
1741#endif
1742}