blob: 4b775af572688e5eab89abea95484870c63604a7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the IP module.
8 *
9 * Version: @(#)ip.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 *
15 * Changes:
16 * Mike McLagan : Routing by source
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017 */
18#ifndef _IP_H
19#define _IP_H
20
21#include <linux/types.h>
22#include <linux/ip.h>
23#include <linux/in.h>
24#include <linux/skbuff.h>
25#include <linux/jhash.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020026#include <linux/sockptr.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027
28#include <net/inet_sock.h>
29#include <net/route.h>
30#include <net/snmp.h>
31#include <net/flow.h>
32#include <net/flow_dissector.h>
33#include <net/netns/hash.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020034#include <net/lwtunnel.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035
36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
37#define IPV4_MIN_MTU 68 /* RFC 791 */
38
David Brazdil0f672f62019-12-10 10:32:29 +000039extern unsigned int sysctl_fib_sync_mem;
40extern unsigned int sysctl_fib_sync_mem_min;
41extern unsigned int sysctl_fib_sync_mem_max;
42
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043struct sock;
44
45struct inet_skb_parm {
46 int iif;
47 struct ip_options opt; /* Compiled IP options */
48 u16 flags;
49
50#define IPSKB_FORWARDED BIT(0)
51#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
52#define IPSKB_XFRM_TRANSFORMED BIT(2)
53#define IPSKB_FRAG_COMPLETE BIT(3)
54#define IPSKB_REROUTED BIT(4)
55#define IPSKB_DOREDIRECT BIT(5)
56#define IPSKB_FRAG_PMTU BIT(6)
57#define IPSKB_L3SLAVE BIT(7)
Olivier Deprez92d4c212022-12-06 15:05:30 +010058#define IPSKB_NOPOLICY BIT(8)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
60 u16 frag_max_size;
61};
62
63static inline bool ipv4_l3mdev_skb(u16 flags)
64{
65 return !!(flags & IPSKB_L3SLAVE);
66}
67
68static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
69{
70 return ip_hdr(skb)->ihl * 4;
71}
72
73struct ipcm_cookie {
74 struct sockcm_cookie sockc;
75 __be32 addr;
76 int oif;
77 struct ip_options_rcu *opt;
78 __u8 ttl;
79 __s16 tos;
80 char priority;
81 __u16 gso_size;
82};
83
84static inline void ipcm_init(struct ipcm_cookie *ipcm)
85{
86 *ipcm = (struct ipcm_cookie) { .tos = -1 };
87}
88
89static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
90 const struct inet_sock *inet)
91{
92 ipcm_init(ipcm);
93
David Brazdil0f672f62019-12-10 10:32:29 +000094 ipcm->sockc.mark = inet->sk.sk_mark;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
96 ipcm->oif = inet->sk.sk_bound_dev_if;
97 ipcm->addr = inet->inet_saddr;
98}
99
100#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
101#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
102
103/* return enslaved device index if relevant */
104static inline int inet_sdif(struct sk_buff *skb)
105{
106#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
107 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
108 return IPCB(skb)->iif;
109#endif
110 return 0;
111}
112
113/* Special input handler for packets caught by router alert option.
114 They are selected only by protocol field, and then processed likely
115 local ones; but only if someone wants them! Otherwise, router
116 not running rsvpd will kill RSVP.
117
118 It is user level problem, what it will make with them.
119 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
120 but receiver should be enough clever f.e. to forward mtrace requests,
121 sent to multicast group to reach destination designated router.
122 */
123
124struct ip_ra_chain {
125 struct ip_ra_chain __rcu *next;
126 struct sock *sk;
127 union {
128 void (*destructor)(struct sock *);
129 struct sock *saved_sk;
130 };
131 struct rcu_head rcu;
132};
133
134/* IP flags. */
135#define IP_CE 0x8000 /* Flag: "Congestion" */
136#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
137#define IP_MF 0x2000 /* Flag: "More Fragments" */
138#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
139
140#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
141
142struct msghdr;
143struct net_device;
144struct packet_type;
145struct rtable;
146struct sockaddr;
147
148int igmp_mc_init(void);
149
150/*
151 * Functions provided by ip.c
152 */
153
154int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
155 __be32 saddr, __be32 daddr,
Olivier Deprez157378f2022-04-04 15:47:50 +0200156 struct ip_options_rcu *opt, u8 tos);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
158 struct net_device *orig_dev);
159void ip_list_rcv(struct list_head *head, struct packet_type *pt,
160 struct net_device *orig_dev);
161int ip_local_deliver(struct sk_buff *skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000162void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163int ip_mr_input(struct sk_buff *skb);
164int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
165int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
166int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
167 int (*output)(struct net *, struct sock *, struct sk_buff *));
David Brazdil0f672f62019-12-10 10:32:29 +0000168
169struct ip_fraglist_iter {
170 struct sk_buff *frag;
171 struct iphdr *iph;
172 int offset;
173 unsigned int hlen;
174};
175
176void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
177 unsigned int hlen, struct ip_fraglist_iter *iter);
178void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
179
180static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
181{
182 struct sk_buff *skb = iter->frag;
183
184 iter->frag = skb->next;
185 skb_mark_not_on_list(skb);
186
187 return skb;
188}
189
190struct ip_frag_state {
191 bool DF;
192 unsigned int hlen;
193 unsigned int ll_rs;
194 unsigned int mtu;
195 unsigned int left;
196 int offset;
197 int ptr;
198 __be16 not_last_frag;
199};
200
201void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
202 unsigned int mtu, bool DF, struct ip_frag_state *state);
203struct sk_buff *ip_frag_next(struct sk_buff *skb,
204 struct ip_frag_state *state);
205
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206void ip_send_check(struct iphdr *ip);
207int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
208int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
209
210int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
211 __u8 tos);
212void ip_init(void);
213int ip_append_data(struct sock *sk, struct flowi4 *fl4,
214 int getfrag(void *from, char *to, int offset, int len,
215 int odd, struct sk_buff *skb),
216 void *from, int len, int protolen,
217 struct ipcm_cookie *ipc,
218 struct rtable **rt,
219 unsigned int flags);
220int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
221 struct sk_buff *skb);
222ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
223 int offset, size_t size, int flags);
224struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
225 struct sk_buff_head *queue,
226 struct inet_cork *cork);
227int ip_send_skb(struct net *net, struct sk_buff *skb);
228int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
229void ip_flush_pending_frames(struct sock *sk);
230struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
231 int getfrag(void *from, char *to, int offset,
232 int len, int odd, struct sk_buff *skb),
233 void *from, int length, int transhdrlen,
234 struct ipcm_cookie *ipc, struct rtable **rtp,
235 struct inet_cork *cork, unsigned int flags);
236
Olivier Deprez157378f2022-04-04 15:47:50 +0200237int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238
239static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
240{
241 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
242}
243
244static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
245{
246 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
247}
248
249static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
250{
251 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
252}
253
254/* datagram.c */
255int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
256int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
257
258void ip4_datagram_release_cb(struct sock *sk);
259
260struct ip_reply_arg {
261 struct kvec iov[1];
262 int flags;
263 __wsum csum;
264 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
265 /* -1 if not needed */
266 int bound_dev_if;
267 u8 tos;
268 kuid_t uid;
269};
270
271#define IP_REPLY_ARG_NOSRCCHECK 1
272
273static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
274{
275 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
276}
277
278void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
279 const struct ip_options *sopt,
280 __be32 daddr, __be32 saddr,
281 const struct ip_reply_arg *arg,
David Brazdil0f672f62019-12-10 10:32:29 +0000282 unsigned int len, u64 transmit_time);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283
284#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
285#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
286#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
287#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
288#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
289#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
290#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
291#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
292#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
293#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
294
295u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
296unsigned long snmp_fold_field(void __percpu *mib, int offt);
297#if BITS_PER_LONG==32
298u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
299 size_t syncp_offset);
300u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
301#else
302static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
303 size_t syncp_offset)
304{
305 return snmp_get_cpu_field(mib, cpu, offct);
306
307}
308
309static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
310{
311 return snmp_fold_field(mib, offt);
312}
313#endif
314
315#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
316{ \
317 int i, c; \
318 for_each_possible_cpu(c) { \
319 for (i = 0; stats_list[i].name; i++) \
320 buff64[i] += snmp_get_cpu_field64( \
321 mib_statistic, \
322 c, stats_list[i].entry, \
323 offset); \
324 } \
325}
326
327#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
328{ \
329 int i, c; \
330 for_each_possible_cpu(c) { \
331 for (i = 0; stats_list[i].name; i++) \
332 buff[i] += snmp_get_cpu_field( \
333 mib_statistic, \
334 c, stats_list[i].entry); \
335 } \
336}
337
338void inet_get_local_port_range(struct net *net, int *low, int *high);
339
340#ifdef CONFIG_SYSCTL
Olivier Deprez157378f2022-04-04 15:47:50 +0200341static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342{
343 if (!net->ipv4.sysctl_local_reserved_ports)
Olivier Deprez157378f2022-04-04 15:47:50 +0200344 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
346}
347
348static inline bool sysctl_dev_name_is_allowed(const char *name)
349{
350 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
351}
352
Olivier Deprez157378f2022-04-04 15:47:50 +0200353static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354{
Olivier Deprez92d4c212022-12-06 15:05:30 +0100355 return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000356}
357
358#else
Olivier Deprez157378f2022-04-04 15:47:50 +0200359static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360{
Olivier Deprez157378f2022-04-04 15:47:50 +0200361 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362}
363
Olivier Deprez157378f2022-04-04 15:47:50 +0200364static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000365{
Olivier Deprez157378f2022-04-04 15:47:50 +0200366 return port < PROT_SOCK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367}
368#endif
369
370__be32 inet_current_timestamp(void);
371
372/* From inetpeer.c */
373extern int inet_peer_threshold;
374extern int inet_peer_minttl;
375extern int inet_peer_maxttl;
376
377void ipfrag_init(void);
378
379void ip_static_sysctl_init(void);
380
381#define IP4_REPLY_MARK(net, mark) \
Olivier Deprez92d4c212022-12-06 15:05:30 +0100382 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383
384static inline bool ip_is_fragment(const struct iphdr *iph)
385{
386 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
387}
388
389#ifdef CONFIG_INET
390#include <net/dst.h>
391
392/* The function in 2.2 was invalid, producing wrong result for
393 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
394static inline
395int ip_decrease_ttl(struct iphdr *iph)
396{
397 u32 check = (__force u32)iph->check;
398 check += (__force u32)htons(0x0100);
399 iph->check = (__force __sum16)(check + (check>=0xFFFF));
400 return --iph->ttl;
401}
402
403static inline int ip_mtu_locked(const struct dst_entry *dst)
404{
405 const struct rtable *rt = (const struct rtable *)dst;
406
407 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
408}
409
410static inline
411int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
412{
413 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
414
415 return pmtudisc == IP_PMTUDISC_DO ||
416 (pmtudisc == IP_PMTUDISC_WANT &&
417 !ip_mtu_locked(dst));
418}
419
420static inline bool ip_sk_accept_pmtu(const struct sock *sk)
421{
422 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
423 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
424}
425
426static inline bool ip_sk_use_pmtu(const struct sock *sk)
427{
428 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
429}
430
431static inline bool ip_sk_ignore_df(const struct sock *sk)
432{
433 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
434 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
435}
436
437static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
438 bool forwarding)
439{
440 struct net *net = dev_net(dst->dev);
Olivier Deprez0e641232021-09-23 10:07:05 +0200441 unsigned int mtu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000442
Olivier Deprez92d4c212022-12-06 15:05:30 +0100443 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 ip_mtu_locked(dst) ||
445 !forwarding)
446 return dst_mtu(dst);
447
Olivier Deprez0e641232021-09-23 10:07:05 +0200448 /* 'forwarding = true' case should always honour route mtu */
449 mtu = dst_metric_raw(dst, RTAX_MTU);
450 if (!mtu)
451 mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
452
453 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000454}
455
456static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
457 const struct sk_buff *skb)
458{
Olivier Deprez0e641232021-09-23 10:07:05 +0200459 unsigned int mtu;
460
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
462 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
463
464 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
465 }
466
Olivier Deprez0e641232021-09-23 10:07:05 +0200467 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
468 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469}
470
David Brazdil0f672f62019-12-10 10:32:29 +0000471struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
472 int fc_mx_len,
473 struct netlink_ext_ack *extack);
474static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
475{
476 if (fib_metrics != &dst_default_metrics &&
477 refcount_dec_and_test(&fib_metrics->refcnt))
478 kfree(fib_metrics);
479}
480
481/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
482static inline
483void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
484{
485 dst_init_metrics(dst, fib_metrics->metrics, true);
486
487 if (fib_metrics != &dst_default_metrics) {
488 dst->_metrics |= DST_METRICS_REFCOUNTED;
489 refcount_inc(&fib_metrics->refcnt);
490 }
491}
492
493static inline
494void ip_dst_metrics_put(struct dst_entry *dst)
495{
496 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
497
498 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
499 kfree(p);
500}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501
502u32 ip_idents_reserve(u32 hash, int segs);
503void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
504
505static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
506 struct sock *sk, int segs)
507{
508 struct iphdr *iph = ip_hdr(skb);
509
Olivier Deprez157378f2022-04-04 15:47:50 +0200510 /* We had many attacks based on IPID, use the private
511 * generator as much as we can.
512 */
513 if (sk && inet_sk(sk)->inet_daddr) {
514 iph->id = htons(inet_sk(sk)->inet_id);
515 inet_sk(sk)->inet_id += segs;
516 return;
517 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200519 iph->id = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200521 /* Unfortunately we need the big hammer to get a suitable IPID */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 __ip_select_ident(net, iph, segs);
523 }
524}
525
526static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
527 struct sock *sk)
528{
529 ip_select_ident_segs(net, skb, sk, 1);
530}
531
532static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
533{
534 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
535 skb->len, proto, 0);
536}
537
538/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
539 * Equivalent to : flow->v4addrs.src = iph->saddr;
540 * flow->v4addrs.dst = iph->daddr;
541 */
542static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
543 const struct iphdr *iph)
544{
545 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
546 offsetof(typeof(flow->addrs), v4addrs.src) +
547 sizeof(flow->addrs.v4addrs.src));
Olivier Deprez92d4c212022-12-06 15:05:30 +0100548 memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
550}
551
552static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
553{
554 const struct iphdr *iph = skb_gro_network_header(skb);
555
556 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
557 skb_gro_len(skb), proto, 0);
558}
559
560/*
561 * Map a multicast IP onto multicast MAC for type ethernet.
562 */
563
564static inline void ip_eth_mc_map(__be32 naddr, char *buf)
565{
566 __u32 addr=ntohl(naddr);
567 buf[0]=0x01;
568 buf[1]=0x00;
569 buf[2]=0x5e;
570 buf[5]=addr&0xFF;
571 addr>>=8;
572 buf[4]=addr&0xFF;
573 addr>>=8;
574 buf[3]=addr&0x7F;
575}
576
577/*
578 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
579 * Leave P_Key as 0 to be filled in by driver.
580 */
581
582static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
583{
584 __u32 addr;
585 unsigned char scope = broadcast[5] & 0xF;
586
587 buf[0] = 0; /* Reserved */
588 buf[1] = 0xff; /* Multicast QPN */
589 buf[2] = 0xff;
590 buf[3] = 0xff;
591 addr = ntohl(naddr);
592 buf[4] = 0xff;
593 buf[5] = 0x10 | scope; /* scope from broadcast address */
594 buf[6] = 0x40; /* IPv4 signature */
595 buf[7] = 0x1b;
596 buf[8] = broadcast[8]; /* P_Key */
597 buf[9] = broadcast[9];
598 buf[10] = 0;
599 buf[11] = 0;
600 buf[12] = 0;
601 buf[13] = 0;
602 buf[14] = 0;
603 buf[15] = 0;
604 buf[19] = addr & 0xff;
605 addr >>= 8;
606 buf[18] = addr & 0xff;
607 addr >>= 8;
608 buf[17] = addr & 0xff;
609 addr >>= 8;
610 buf[16] = addr & 0x0f;
611}
612
613static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
614{
615 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
616 memcpy(buf, broadcast, 4);
617 else
618 memcpy(buf, &naddr, sizeof(naddr));
619}
620
621#if IS_ENABLED(CONFIG_IPV6)
622#include <linux/ipv6.h>
623#endif
624
625static __inline__ void inet_reset_saddr(struct sock *sk)
626{
627 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
628#if IS_ENABLED(CONFIG_IPV6)
629 if (sk->sk_family == PF_INET6) {
630 struct ipv6_pinfo *np = inet6_sk(sk);
631
632 memset(&np->saddr, 0, sizeof(np->saddr));
633 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
634 }
635#endif
636}
637
638#endif
639
640static inline unsigned int ipv4_addr_hash(__be32 ip)
641{
642 return (__force unsigned int) ip;
643}
644
645static inline u32 ipv4_portaddr_hash(const struct net *net,
646 __be32 saddr,
647 unsigned int port)
648{
649 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
650}
651
652bool ip_call_ra_chain(struct sk_buff *skb);
653
654/*
655 * Functions provided by ip_fragment.c
656 */
657
658enum ip_defrag_users {
659 IP_DEFRAG_LOCAL_DELIVER,
660 IP_DEFRAG_CALL_RA_CHAIN,
661 IP_DEFRAG_CONNTRACK_IN,
662 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
663 IP_DEFRAG_CONNTRACK_OUT,
664 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
665 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
666 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
667 IP_DEFRAG_VS_IN,
668 IP_DEFRAG_VS_OUT,
669 IP_DEFRAG_VS_FWD,
670 IP_DEFRAG_AF_PACKET,
671 IP_DEFRAG_MACVLAN,
672};
673
674/* Return true if the value of 'user' is between 'lower_bond'
675 * and 'upper_bond' inclusively.
676 */
677static inline bool ip_defrag_user_in_between(u32 user,
678 enum ip_defrag_users lower_bond,
679 enum ip_defrag_users upper_bond)
680{
681 return user >= lower_bond && user <= upper_bond;
682}
683
684int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
685#ifdef CONFIG_INET
686struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
687#else
688static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
689{
690 return skb;
691}
692#endif
693
694/*
695 * Functions provided by ip_forward.c
696 */
697
698int ip_forward(struct sk_buff *skb);
699
700/*
701 * Functions provided by ip_options.c
702 */
703
704void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
705 __be32 daddr, struct rtable *rt, int is_frag);
706
707int __ip_options_echo(struct net *net, struct ip_options *dopt,
708 struct sk_buff *skb, const struct ip_options *sopt);
709static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
710 struct sk_buff *skb)
711{
712 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
713}
714
715void ip_options_fragment(struct sk_buff *skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000716int __ip_options_compile(struct net *net, struct ip_options *opt,
717 struct sk_buff *skb, __be32 *info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718int ip_options_compile(struct net *net, struct ip_options *opt,
719 struct sk_buff *skb);
720int ip_options_get(struct net *net, struct ip_options_rcu **optp,
Olivier Deprez157378f2022-04-04 15:47:50 +0200721 sockptr_t data, int optlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722void ip_options_undo(struct ip_options *opt);
723void ip_forward_options(struct sk_buff *skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000724int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725
726/*
727 * Functions provided by ip_sockglue.c
728 */
729
730void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
731void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
732 struct sk_buff *skb, int tlen, int offset);
733int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
734 struct ipcm_cookie *ipc, bool allow_ipv6);
Olivier Deprez157378f2022-04-04 15:47:50 +0200735int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 unsigned int optlen);
737int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
738 int __user *optlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000739int ip_ra_control(struct sock *sk, unsigned char on,
740 void (*destructor)(struct sock *));
741
742int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
743void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
744 u32 info, u8 *payload);
745void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
746 u32 info);
747
748static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
749{
750 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
751}
752
753bool icmp_global_allow(void);
754extern int sysctl_icmp_msgs_per_sec;
755extern int sysctl_icmp_msgs_burst;
756
757#ifdef CONFIG_PROC_FS
758int ip_misc_proc_init(void);
759#endif
760
David Brazdil0f672f62019-12-10 10:32:29 +0000761int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000762 struct netlink_ext_ack *extack);
763
Olivier Deprez0e641232021-09-23 10:07:05 +0200764static inline bool inetdev_valid_mtu(unsigned int mtu)
765{
766 return likely(mtu >= IPV4_MIN_MTU);
767}
768
Olivier Deprez157378f2022-04-04 15:47:50 +0200769void ip_sock_set_freebind(struct sock *sk);
770int ip_sock_set_mtu_discover(struct sock *sk, int val);
771void ip_sock_set_pktinfo(struct sock *sk);
772void ip_sock_set_recverr(struct sock *sk);
773void ip_sock_set_tos(struct sock *sk, int val);
774
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775#endif /* _IP_H */