blob: 2146372adff43fc4e2092f6072014eaaec9fe3d1 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001-2003 Intel Corp.
7 * Copyright (c) 2001-2002 Nokia, Inc.
8 * Copyright (c) 2001 La Monte H.P. Yarroll
9 *
10 * This file is part of the SCTP kernel implementation
11 *
12 * These functions interface with the sockets layer to implement the
13 * SCTP Extensions for the Sockets API.
14 *
15 * Note that the descriptions from the specification are USER level
16 * functions--this file is the functions which populate the struct proto
17 * for SCTP which is the BOTTOM of the sockets interface.
18 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019 * Please send any bug reports or fixes you make to the
20 * email address(es):
21 * lksctp developers <linux-sctp@vger.kernel.org>
22 *
23 * Written or modified by:
24 * La Monte H.P. Yarroll <piggy@acm.org>
25 * Narasimha Budihal <narsi@refcode.org>
26 * Karl Knutson <karl@athena.chicago.il.us>
27 * Jon Grimm <jgrimm@us.ibm.com>
28 * Xingang Guo <xingang.guo@intel.com>
29 * Daisy Chang <daisyc@us.ibm.com>
30 * Sridhar Samudrala <samudrala@us.ibm.com>
31 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
32 * Ardelle Fan <ardelle.fan@intel.com>
33 * Ryan Layer <rmlayer@us.ibm.com>
34 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
35 * Kevin Gao <kevin.gao@intel.com>
36 */
37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40#include <crypto/hash.h>
41#include <linux/types.h>
42#include <linux/kernel.h>
43#include <linux/wait.h>
44#include <linux/time.h>
45#include <linux/sched/signal.h>
46#include <linux/ip.h>
47#include <linux/capability.h>
48#include <linux/fcntl.h>
49#include <linux/poll.h>
50#include <linux/init.h>
51#include <linux/slab.h>
52#include <linux/file.h>
53#include <linux/compat.h>
54#include <linux/rhashtable.h>
55
56#include <net/ip.h>
57#include <net/icmp.h>
58#include <net/route.h>
59#include <net/ipv6.h>
60#include <net/inet_common.h>
61#include <net/busy_poll.h>
62
63#include <linux/socket.h> /* for sa_family_t */
64#include <linux/export.h>
65#include <net/sock.h>
66#include <net/sctp/sctp.h>
67#include <net/sctp/sm.h>
68#include <net/sctp/stream_sched.h>
69
70/* Forward declarations for internal helper functions. */
David Brazdil0f672f62019-12-10 10:32:29 +000071static bool sctp_writeable(struct sock *sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072static void sctp_wfree(struct sk_buff *skb);
73static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
74 size_t msg_len);
75static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
76static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
77static int sctp_wait_for_accept(struct sock *sk, long timeo);
78static void sctp_wait_for_close(struct sock *sk, long timeo);
79static void sctp_destruct_sock(struct sock *sk);
80static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
81 union sctp_addr *addr, int len);
82static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
83static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
84static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
85static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
86static int sctp_send_asconf(struct sctp_association *asoc,
87 struct sctp_chunk *chunk);
88static int sctp_do_bind(struct sock *, union sctp_addr *, int);
89static int sctp_autobind(struct sock *sk);
David Brazdil0f672f62019-12-10 10:32:29 +000090static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
91 struct sctp_association *assoc,
92 enum sctp_socket_type type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093
94static unsigned long sctp_memory_pressure;
95static atomic_long_t sctp_memory_allocated;
96struct percpu_counter sctp_sockets_allocated;
97
98static void sctp_enter_memory_pressure(struct sock *sk)
99{
100 sctp_memory_pressure = 1;
101}
102
103
104/* Get the sndbuf space available at the time on the association. */
105static inline int sctp_wspace(struct sctp_association *asoc)
106{
David Brazdil0f672f62019-12-10 10:32:29 +0000107 struct sock *sk = asoc->base.sk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108
David Brazdil0f672f62019-12-10 10:32:29 +0000109 return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used
110 : sk_stream_wspace(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111}
112
113/* Increment the used sndbuf space count of the corresponding association by
114 * the size of the outgoing data chunk.
115 * Also, set the skb destructor for sndbuf accounting later.
116 *
117 * Since it is always 1-1 between chunk and skb, and also a new skb is always
118 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
119 * destructor in the data chunk skb for the purpose of the sndbuf space
120 * tracking.
121 */
122static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
123{
124 struct sctp_association *asoc = chunk->asoc;
125 struct sock *sk = asoc->base.sk;
126
127 /* The sndbuf space is tracked per association. */
128 sctp_association_hold(asoc);
129
130 if (chunk->shkey)
131 sctp_auth_shkey_hold(chunk->shkey);
132
133 skb_set_owner_w(chunk->skb, sk);
134
135 chunk->skb->destructor = sctp_wfree;
136 /* Save the chunk pointer in skb for sctp_wfree to use later. */
137 skb_shinfo(chunk->skb)->destructor_arg = chunk;
138
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139 refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
David Brazdil0f672f62019-12-10 10:32:29 +0000140 asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
141 sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 sk_mem_charge(sk, chunk->skb->truesize);
143}
144
145static void sctp_clear_owner_w(struct sctp_chunk *chunk)
146{
147 skb_orphan(chunk->skb);
148}
149
Olivier Deprez0e641232021-09-23 10:07:05 +0200150#define traverse_and_process() \
151do { \
152 msg = chunk->msg; \
153 if (msg == prev_msg) \
154 continue; \
155 list_for_each_entry(c, &msg->chunks, frag_list) { \
156 if ((clear && asoc->base.sk == c->skb->sk) || \
157 (!clear && asoc->base.sk != c->skb->sk)) \
158 cb(c); \
159 } \
160 prev_msg = msg; \
161} while (0)
162
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
Olivier Deprez0e641232021-09-23 10:07:05 +0200164 bool clear,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 void (*cb)(struct sctp_chunk *))
166
167{
Olivier Deprez0e641232021-09-23 10:07:05 +0200168 struct sctp_datamsg *msg, *prev_msg = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169 struct sctp_outq *q = &asoc->outqueue;
Olivier Deprez0e641232021-09-23 10:07:05 +0200170 struct sctp_chunk *chunk, *c;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 struct sctp_transport *t;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172
173 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
174 list_for_each_entry(chunk, &t->transmitted, transmitted_list)
Olivier Deprez0e641232021-09-23 10:07:05 +0200175 traverse_and_process();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176
177 list_for_each_entry(chunk, &q->retransmit, transmitted_list)
Olivier Deprez0e641232021-09-23 10:07:05 +0200178 traverse_and_process();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179
180 list_for_each_entry(chunk, &q->sacked, transmitted_list)
Olivier Deprez0e641232021-09-23 10:07:05 +0200181 traverse_and_process();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
183 list_for_each_entry(chunk, &q->abandoned, transmitted_list)
Olivier Deprez0e641232021-09-23 10:07:05 +0200184 traverse_and_process();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185
186 list_for_each_entry(chunk, &q->out_chunk_list, list)
Olivier Deprez0e641232021-09-23 10:07:05 +0200187 traverse_and_process();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188}
189
190static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
191 void (*cb)(struct sk_buff *, struct sock *))
192
193{
194 struct sk_buff *skb, *tmp;
195
196 sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
197 cb(skb, sk);
198
199 sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
200 cb(skb, sk);
201
202 sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
203 cb(skb, sk);
204}
205
206/* Verify that this is a valid address. */
207static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
208 int len)
209{
210 struct sctp_af *af;
211
212 /* Verify basic sockaddr. */
213 af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
214 if (!af)
215 return -EINVAL;
216
217 /* Is this a valid SCTP address? */
218 if (!af->addr_valid(addr, sctp_sk(sk), NULL))
219 return -EINVAL;
220
221 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
222 return -EINVAL;
223
224 return 0;
225}
226
227/* Look up the association by its id. If this is not a UDP-style
228 * socket, the ID field is always ignored.
229 */
230struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
231{
232 struct sctp_association *asoc = NULL;
233
234 /* If this is not a UDP-style socket, assoc id should be ignored. */
235 if (!sctp_style(sk, UDP)) {
236 /* Return NULL if the socket state is not ESTABLISHED. It
237 * could be a TCP-style listening socket or a socket which
238 * hasn't yet called connect() to establish an association.
239 */
240 if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
241 return NULL;
242
243 /* Get the first and the only association from the list. */
244 if (!list_empty(&sctp_sk(sk)->ep->asocs))
245 asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
246 struct sctp_association, asocs);
247 return asoc;
248 }
249
250 /* Otherwise this is a UDP-style socket. */
David Brazdil0f672f62019-12-10 10:32:29 +0000251 if (id <= SCTP_ALL_ASSOC)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000252 return NULL;
253
254 spin_lock_bh(&sctp_assocs_id_lock);
255 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
256 if (asoc && (asoc->base.sk != sk || asoc->base.dead))
257 asoc = NULL;
258 spin_unlock_bh(&sctp_assocs_id_lock);
259
260 return asoc;
261}
262
263/* Look up the transport from an address and an assoc id. If both address and
264 * id are specified, the associations matching the address and the id should be
265 * the same.
266 */
267static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
268 struct sockaddr_storage *addr,
269 sctp_assoc_t id)
270{
271 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
272 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
273 union sctp_addr *laddr = (union sctp_addr *)addr;
274 struct sctp_transport *transport;
275
276 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
277 return NULL;
278
279 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
280 laddr,
281 &transport);
282
283 if (!addr_asoc)
284 return NULL;
285
286 id_asoc = sctp_id2assoc(sk, id);
287 if (id_asoc && (id_asoc != addr_asoc))
288 return NULL;
289
290 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
291 (union sctp_addr *)addr);
292
293 return transport;
294}
295
296/* API 3.1.2 bind() - UDP Style Syntax
297 * The syntax of bind() is,
298 *
299 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
300 *
301 * sd - the socket descriptor returned by socket().
302 * addr - the address structure (struct sockaddr_in or struct
303 * sockaddr_in6 [RFC 2553]),
304 * addr_len - the size of the address structure.
305 */
306static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
307{
308 int retval = 0;
309
310 lock_sock(sk);
311
312 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
313 addr, addr_len);
314
315 /* Disallow binding twice. */
316 if (!sctp_sk(sk)->ep->base.bind_addr.port)
317 retval = sctp_do_bind(sk, (union sctp_addr *)addr,
318 addr_len);
319 else
320 retval = -EINVAL;
321
322 release_sock(sk);
323
324 return retval;
325}
326
David Brazdil0f672f62019-12-10 10:32:29 +0000327static int sctp_get_port_local(struct sock *, union sctp_addr *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000328
329/* Verify this is a valid sockaddr. */
330static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
331 union sctp_addr *addr, int len)
332{
333 struct sctp_af *af;
334
335 /* Check minimum size. */
336 if (len < sizeof (struct sockaddr))
337 return NULL;
338
339 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
340 return NULL;
341
342 if (addr->sa.sa_family == AF_INET6) {
343 if (len < SIN6_LEN_RFC2133)
344 return NULL;
345 /* V4 mapped address are really of AF_INET family */
346 if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
347 !opt->pf->af_supported(AF_INET, opt))
348 return NULL;
349 }
350
351 /* If we get this far, af is valid. */
352 af = sctp_get_af_specific(addr->sa.sa_family);
353
354 if (len < af->sockaddr_len)
355 return NULL;
356
357 return af;
358}
359
Olivier Deprez0e641232021-09-23 10:07:05 +0200360static void sctp_auto_asconf_init(struct sctp_sock *sp)
361{
362 struct net *net = sock_net(&sp->inet.sk);
363
364 if (net->sctp.default_auto_asconf) {
365 spin_lock(&net->sctp.addr_wq_lock);
366 list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
367 spin_unlock(&net->sctp.addr_wq_lock);
368 sp->do_auto_asconf = 1;
369 }
370}
371
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372/* Bind a local address either to an endpoint or to an association. */
373static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
374{
375 struct net *net = sock_net(sk);
376 struct sctp_sock *sp = sctp_sk(sk);
377 struct sctp_endpoint *ep = sp->ep;
378 struct sctp_bind_addr *bp = &ep->base.bind_addr;
379 struct sctp_af *af;
380 unsigned short snum;
381 int ret = 0;
382
383 /* Common sockaddr verification. */
384 af = sctp_sockaddr_af(sp, addr, len);
385 if (!af) {
386 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
387 __func__, sk, addr, len);
388 return -EINVAL;
389 }
390
391 snum = ntohs(addr->v4.sin_port);
392
393 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
394 __func__, sk, &addr->sa, bp->port, snum, len);
395
396 /* PF specific bind() address verification. */
397 if (!sp->pf->bind_verify(sp, addr))
398 return -EADDRNOTAVAIL;
399
400 /* We must either be unbound, or bind to the same port.
401 * It's OK to allow 0 ports if we are already bound.
402 * We'll just inhert an already bound port in this case
403 */
404 if (bp->port) {
405 if (!snum)
406 snum = bp->port;
407 else if (snum != bp->port) {
408 pr_debug("%s: new port %d doesn't match existing port "
409 "%d\n", __func__, snum, bp->port);
410 return -EINVAL;
411 }
412 }
413
414 if (snum && snum < inet_prot_sock(net) &&
415 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
416 return -EACCES;
417
418 /* See if the address matches any of the addresses we may have
419 * already bound before checking against other endpoints.
420 */
421 if (sctp_bind_addr_match(bp, addr, sp))
422 return -EINVAL;
423
424 /* Make sure we are allowed to bind here.
425 * The function sctp_get_port_local() does duplicate address
426 * detection.
427 */
428 addr->v4.sin_port = htons(snum);
David Brazdil0f672f62019-12-10 10:32:29 +0000429 if (sctp_get_port_local(sk, addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430 return -EADDRINUSE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431
432 /* Refresh ephemeral port. */
Olivier Deprez0e641232021-09-23 10:07:05 +0200433 if (!bp->port) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 bp->port = inet_sk(sk)->inet_num;
Olivier Deprez0e641232021-09-23 10:07:05 +0200435 sctp_auto_asconf_init(sp);
436 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437
438 /* Add the address to the bind address list.
439 * Use GFP_ATOMIC since BHs will be disabled.
440 */
441 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
442 SCTP_ADDR_SRC, GFP_ATOMIC);
443
David Brazdil0f672f62019-12-10 10:32:29 +0000444 if (ret) {
445 sctp_put_port(sk);
446 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447 }
David Brazdil0f672f62019-12-10 10:32:29 +0000448 /* Copy back into socket for getsockname() use. */
449 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
450 sp->pf->to_sk_saddr(addr, sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451
452 return ret;
453}
454
455 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
456 *
457 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
458 * at any one time. If a sender, after sending an ASCONF chunk, decides
459 * it needs to transfer another ASCONF Chunk, it MUST wait until the
460 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
461 * subsequent ASCONF. Note this restriction binds each side, so at any
462 * time two ASCONF may be in-transit on any given association (one sent
463 * from each endpoint).
464 */
465static int sctp_send_asconf(struct sctp_association *asoc,
466 struct sctp_chunk *chunk)
467{
468 struct net *net = sock_net(asoc->base.sk);
469 int retval = 0;
470
471 /* If there is an outstanding ASCONF chunk, queue it for later
472 * transmission.
473 */
474 if (asoc->addip_last_asconf) {
475 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
476 goto out;
477 }
478
479 /* Hold the chunk until an ASCONF_ACK is received. */
480 sctp_chunk_hold(chunk);
481 retval = sctp_primitive_ASCONF(net, asoc, chunk);
482 if (retval)
483 sctp_chunk_free(chunk);
484 else
485 asoc->addip_last_asconf = chunk;
486
487out:
488 return retval;
489}
490
491/* Add a list of addresses as bind addresses to local endpoint or
492 * association.
493 *
494 * Basically run through each address specified in the addrs/addrcnt
495 * array/length pair, determine if it is IPv6 or IPv4 and call
496 * sctp_do_bind() on it.
497 *
498 * If any of them fails, then the operation will be reversed and the
499 * ones that were added will be removed.
500 *
501 * Only sctp_setsockopt_bindx() is supposed to call this function.
502 */
503static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
504{
505 int cnt;
506 int retval = 0;
507 void *addr_buf;
508 struct sockaddr *sa_addr;
509 struct sctp_af *af;
510
511 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
512 addrs, addrcnt);
513
514 addr_buf = addrs;
515 for (cnt = 0; cnt < addrcnt; cnt++) {
516 /* The list may contain either IPv4 or IPv6 address;
517 * determine the address length for walking thru the list.
518 */
519 sa_addr = addr_buf;
520 af = sctp_get_af_specific(sa_addr->sa_family);
521 if (!af) {
522 retval = -EINVAL;
523 goto err_bindx_add;
524 }
525
526 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
527 af->sockaddr_len);
528
529 addr_buf += af->sockaddr_len;
530
531err_bindx_add:
532 if (retval < 0) {
533 /* Failed. Cleanup the ones that have been added */
534 if (cnt > 0)
535 sctp_bindx_rem(sk, addrs, cnt);
536 return retval;
537 }
538 }
539
540 return retval;
541}
542
543/* Send an ASCONF chunk with Add IP address parameters to all the peers of the
544 * associations that are part of the endpoint indicating that a list of local
545 * addresses are added to the endpoint.
546 *
547 * If any of the addresses is already in the bind address list of the
548 * association, we do not send the chunk for that association. But it will not
549 * affect other associations.
550 *
551 * Only sctp_setsockopt_bindx() is supposed to call this function.
552 */
553static int sctp_send_asconf_add_ip(struct sock *sk,
554 struct sockaddr *addrs,
555 int addrcnt)
556{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 struct sctp_sock *sp;
558 struct sctp_endpoint *ep;
559 struct sctp_association *asoc;
560 struct sctp_bind_addr *bp;
561 struct sctp_chunk *chunk;
562 struct sctp_sockaddr_entry *laddr;
563 union sctp_addr *addr;
564 union sctp_addr saveaddr;
565 void *addr_buf;
566 struct sctp_af *af;
567 struct list_head *p;
568 int i;
569 int retval = 0;
570
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000571 sp = sctp_sk(sk);
572 ep = sp->ep;
573
David Brazdil0f672f62019-12-10 10:32:29 +0000574 if (!ep->asconf_enable)
575 return retval;
576
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
578 __func__, sk, addrs, addrcnt);
579
580 list_for_each_entry(asoc, &ep->asocs, asocs) {
581 if (!asoc->peer.asconf_capable)
582 continue;
583
584 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
585 continue;
586
587 if (!sctp_state(asoc, ESTABLISHED))
588 continue;
589
590 /* Check if any address in the packed array of addresses is
591 * in the bind address list of the association. If so,
592 * do not send the asconf chunk to its peer, but continue with
593 * other associations.
594 */
595 addr_buf = addrs;
596 for (i = 0; i < addrcnt; i++) {
597 addr = addr_buf;
598 af = sctp_get_af_specific(addr->v4.sin_family);
599 if (!af) {
600 retval = -EINVAL;
601 goto out;
602 }
603
604 if (sctp_assoc_lookup_laddr(asoc, addr))
605 break;
606
607 addr_buf += af->sockaddr_len;
608 }
609 if (i < addrcnt)
610 continue;
611
612 /* Use the first valid address in bind addr list of
613 * association as Address Parameter of ASCONF CHUNK.
614 */
615 bp = &asoc->base.bind_addr;
616 p = bp->address_list.next;
617 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
618 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
619 addrcnt, SCTP_PARAM_ADD_IP);
620 if (!chunk) {
621 retval = -ENOMEM;
622 goto out;
623 }
624
625 /* Add the new addresses to the bind address list with
626 * use_as_src set to 0.
627 */
628 addr_buf = addrs;
629 for (i = 0; i < addrcnt; i++) {
630 addr = addr_buf;
631 af = sctp_get_af_specific(addr->v4.sin_family);
632 memcpy(&saveaddr, addr, af->sockaddr_len);
633 retval = sctp_add_bind_addr(bp, &saveaddr,
634 sizeof(saveaddr),
635 SCTP_ADDR_NEW, GFP_ATOMIC);
636 addr_buf += af->sockaddr_len;
637 }
638 if (asoc->src_out_of_asoc_ok) {
639 struct sctp_transport *trans;
640
641 list_for_each_entry(trans,
642 &asoc->peer.transport_addr_list, transports) {
643 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
644 2*asoc->pathmtu, 4380));
645 trans->ssthresh = asoc->peer.i.a_rwnd;
646 trans->rto = asoc->rto_initial;
647 sctp_max_rto(asoc, trans);
648 trans->rtt = trans->srtt = trans->rttvar = 0;
649 /* Clear the source and route cache */
650 sctp_transport_route(trans, NULL,
651 sctp_sk(asoc->base.sk));
652 }
653 }
654 retval = sctp_send_asconf(asoc, chunk);
655 }
656
657out:
658 return retval;
659}
660
661/* Remove a list of addresses from bind addresses list. Do not remove the
662 * last address.
663 *
664 * Basically run through each address specified in the addrs/addrcnt
665 * array/length pair, determine if it is IPv6 or IPv4 and call
666 * sctp_del_bind() on it.
667 *
668 * If any of them fails, then the operation will be reversed and the
669 * ones that were removed will be added back.
670 *
671 * At least one address has to be left; if only one address is
672 * available, the operation will return -EBUSY.
673 *
674 * Only sctp_setsockopt_bindx() is supposed to call this function.
675 */
676static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
677{
678 struct sctp_sock *sp = sctp_sk(sk);
679 struct sctp_endpoint *ep = sp->ep;
680 int cnt;
681 struct sctp_bind_addr *bp = &ep->base.bind_addr;
682 int retval = 0;
683 void *addr_buf;
684 union sctp_addr *sa_addr;
685 struct sctp_af *af;
686
687 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
688 __func__, sk, addrs, addrcnt);
689
690 addr_buf = addrs;
691 for (cnt = 0; cnt < addrcnt; cnt++) {
692 /* If the bind address list is empty or if there is only one
693 * bind address, there is nothing more to be removed (we need
694 * at least one address here).
695 */
696 if (list_empty(&bp->address_list) ||
697 (sctp_list_single_entry(&bp->address_list))) {
698 retval = -EBUSY;
699 goto err_bindx_rem;
700 }
701
702 sa_addr = addr_buf;
703 af = sctp_get_af_specific(sa_addr->sa.sa_family);
704 if (!af) {
705 retval = -EINVAL;
706 goto err_bindx_rem;
707 }
708
709 if (!af->addr_valid(sa_addr, sp, NULL)) {
710 retval = -EADDRNOTAVAIL;
711 goto err_bindx_rem;
712 }
713
714 if (sa_addr->v4.sin_port &&
715 sa_addr->v4.sin_port != htons(bp->port)) {
716 retval = -EINVAL;
717 goto err_bindx_rem;
718 }
719
720 if (!sa_addr->v4.sin_port)
721 sa_addr->v4.sin_port = htons(bp->port);
722
723 /* FIXME - There is probably a need to check if sk->sk_saddr and
724 * sk->sk_rcv_addr are currently set to one of the addresses to
725 * be removed. This is something which needs to be looked into
726 * when we are fixing the outstanding issues with multi-homing
727 * socket routing and failover schemes. Refer to comments in
728 * sctp_do_bind(). -daisy
729 */
730 retval = sctp_del_bind_addr(bp, sa_addr);
731
732 addr_buf += af->sockaddr_len;
733err_bindx_rem:
734 if (retval < 0) {
735 /* Failed. Add the ones that has been removed back */
736 if (cnt > 0)
737 sctp_bindx_add(sk, addrs, cnt);
738 return retval;
739 }
740 }
741
742 return retval;
743}
744
745/* Send an ASCONF chunk with Delete IP address parameters to all the peers of
746 * the associations that are part of the endpoint indicating that a list of
747 * local addresses are removed from the endpoint.
748 *
749 * If any of the addresses is already in the bind address list of the
750 * association, we do not send the chunk for that association. But it will not
751 * affect other associations.
752 *
753 * Only sctp_setsockopt_bindx() is supposed to call this function.
754 */
755static int sctp_send_asconf_del_ip(struct sock *sk,
756 struct sockaddr *addrs,
757 int addrcnt)
758{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759 struct sctp_sock *sp;
760 struct sctp_endpoint *ep;
761 struct sctp_association *asoc;
762 struct sctp_transport *transport;
763 struct sctp_bind_addr *bp;
764 struct sctp_chunk *chunk;
765 union sctp_addr *laddr;
766 void *addr_buf;
767 struct sctp_af *af;
768 struct sctp_sockaddr_entry *saddr;
769 int i;
770 int retval = 0;
771 int stored = 0;
772
773 chunk = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774 sp = sctp_sk(sk);
775 ep = sp->ep;
776
David Brazdil0f672f62019-12-10 10:32:29 +0000777 if (!ep->asconf_enable)
778 return retval;
779
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
781 __func__, sk, addrs, addrcnt);
782
783 list_for_each_entry(asoc, &ep->asocs, asocs) {
784
785 if (!asoc->peer.asconf_capable)
786 continue;
787
788 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
789 continue;
790
791 if (!sctp_state(asoc, ESTABLISHED))
792 continue;
793
794 /* Check if any address in the packed array of addresses is
795 * not present in the bind address list of the association.
796 * If so, do not send the asconf chunk to its peer, but
797 * continue with other associations.
798 */
799 addr_buf = addrs;
800 for (i = 0; i < addrcnt; i++) {
801 laddr = addr_buf;
802 af = sctp_get_af_specific(laddr->v4.sin_family);
803 if (!af) {
804 retval = -EINVAL;
805 goto out;
806 }
807
808 if (!sctp_assoc_lookup_laddr(asoc, laddr))
809 break;
810
811 addr_buf += af->sockaddr_len;
812 }
813 if (i < addrcnt)
814 continue;
815
816 /* Find one address in the association's bind address list
817 * that is not in the packed array of addresses. This is to
818 * make sure that we do not delete all the addresses in the
819 * association.
820 */
821 bp = &asoc->base.bind_addr;
822 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
823 addrcnt, sp);
824 if ((laddr == NULL) && (addrcnt == 1)) {
825 if (asoc->asconf_addr_del_pending)
826 continue;
827 asoc->asconf_addr_del_pending =
828 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
829 if (asoc->asconf_addr_del_pending == NULL) {
830 retval = -ENOMEM;
831 goto out;
832 }
833 asoc->asconf_addr_del_pending->sa.sa_family =
834 addrs->sa_family;
835 asoc->asconf_addr_del_pending->v4.sin_port =
836 htons(bp->port);
837 if (addrs->sa_family == AF_INET) {
838 struct sockaddr_in *sin;
839
840 sin = (struct sockaddr_in *)addrs;
841 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
842 } else if (addrs->sa_family == AF_INET6) {
843 struct sockaddr_in6 *sin6;
844
845 sin6 = (struct sockaddr_in6 *)addrs;
846 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
847 }
848
849 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
850 __func__, asoc, &asoc->asconf_addr_del_pending->sa,
851 asoc->asconf_addr_del_pending);
852
853 asoc->src_out_of_asoc_ok = 1;
854 stored = 1;
855 goto skip_mkasconf;
856 }
857
858 if (laddr == NULL)
859 return -EINVAL;
860
861 /* We do not need RCU protection throughout this loop
862 * because this is done under a socket lock from the
863 * setsockopt call.
864 */
865 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
866 SCTP_PARAM_DEL_IP);
867 if (!chunk) {
868 retval = -ENOMEM;
869 goto out;
870 }
871
872skip_mkasconf:
873 /* Reset use_as_src flag for the addresses in the bind address
874 * list that are to be deleted.
875 */
876 addr_buf = addrs;
877 for (i = 0; i < addrcnt; i++) {
878 laddr = addr_buf;
879 af = sctp_get_af_specific(laddr->v4.sin_family);
880 list_for_each_entry(saddr, &bp->address_list, list) {
881 if (sctp_cmp_addr_exact(&saddr->a, laddr))
882 saddr->state = SCTP_ADDR_DEL;
883 }
884 addr_buf += af->sockaddr_len;
885 }
886
887 /* Update the route and saddr entries for all the transports
888 * as some of the addresses in the bind address list are
889 * about to be deleted and cannot be used as source addresses.
890 */
891 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
892 transports) {
893 sctp_transport_route(transport, NULL,
894 sctp_sk(asoc->base.sk));
895 }
896
897 if (stored)
898 /* We don't need to transmit ASCONF */
899 continue;
900 retval = sctp_send_asconf(asoc, chunk);
901 }
902out:
903 return retval;
904}
905
906/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
907int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
908{
909 struct sock *sk = sctp_opt2sk(sp);
910 union sctp_addr *addr;
911 struct sctp_af *af;
912
913 /* It is safe to write port space in caller. */
914 addr = &addrw->a;
915 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
916 af = sctp_get_af_specific(addr->sa.sa_family);
917 if (!af)
918 return -EINVAL;
919 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
920 return -EINVAL;
921
922 if (addrw->state == SCTP_ADDR_NEW)
923 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
924 else
925 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
926}
927
928/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
929 *
930 * API 8.1
931 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
932 * int flags);
933 *
934 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
935 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
936 * or IPv6 addresses.
937 *
938 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
939 * Section 3.1.2 for this usage.
940 *
941 * addrs is a pointer to an array of one or more socket addresses. Each
942 * address is contained in its appropriate structure (i.e. struct
943 * sockaddr_in or struct sockaddr_in6) the family of the address type
944 * must be used to distinguish the address length (note that this
945 * representation is termed a "packed array" of addresses). The caller
946 * specifies the number of addresses in the array with addrcnt.
947 *
948 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
949 * -1, and sets errno to the appropriate error code.
950 *
951 * For SCTP, the port given in each socket address must be the same, or
952 * sctp_bindx() will fail, setting errno to EINVAL.
953 *
954 * The flags parameter is formed from the bitwise OR of zero or more of
955 * the following currently defined flags:
956 *
957 * SCTP_BINDX_ADD_ADDR
958 *
959 * SCTP_BINDX_REM_ADDR
960 *
961 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
962 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
963 * addresses from the association. The two flags are mutually exclusive;
964 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
965 * not remove all addresses from an association; sctp_bindx() will
966 * reject such an attempt with EINVAL.
967 *
968 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
969 * additional addresses with an endpoint after calling bind(). Or use
970 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
971 * socket is associated with so that no new association accepted will be
972 * associated with those addresses. If the endpoint supports dynamic
973 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
974 * endpoint to send the appropriate message to the peer to change the
975 * peers address lists.
976 *
977 * Adding and removing addresses from a connected association is
978 * optional functionality. Implementations that do not support this
979 * functionality should return EOPNOTSUPP.
980 *
981 * Basically do nothing but copying the addresses from user to kernel
982 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
983 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
984 * from userspace.
985 *
986 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
987 * it.
988 *
989 * sk The sk of the socket
990 * addrs The pointer to the addresses in user land
991 * addrssize Size of the addrs buffer
992 * op Operation to perform (add or remove, see the flags of
993 * sctp_bindx)
994 *
995 * Returns 0 if ok, <0 errno code on error.
996 */
997static int sctp_setsockopt_bindx(struct sock *sk,
998 struct sockaddr __user *addrs,
999 int addrs_size, int op)
1000{
1001 struct sockaddr *kaddrs;
1002 int err;
1003 int addrcnt = 0;
1004 int walk_size = 0;
1005 struct sockaddr *sa_addr;
1006 void *addr_buf;
1007 struct sctp_af *af;
1008
1009 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
1010 __func__, sk, addrs, addrs_size, op);
1011
1012 if (unlikely(addrs_size <= 0))
1013 return -EINVAL;
1014
David Brazdil0f672f62019-12-10 10:32:29 +00001015 kaddrs = memdup_user(addrs, addrs_size);
1016 if (IS_ERR(kaddrs))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001017 return PTR_ERR(kaddrs);
1018
1019 /* Walk through the addrs buffer and count the number of addresses. */
1020 addr_buf = kaddrs;
1021 while (walk_size < addrs_size) {
1022 if (walk_size + sizeof(sa_family_t) > addrs_size) {
David Brazdil0f672f62019-12-10 10:32:29 +00001023 kfree(kaddrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001024 return -EINVAL;
1025 }
1026
1027 sa_addr = addr_buf;
1028 af = sctp_get_af_specific(sa_addr->sa_family);
1029
1030 /* If the address family is not supported or if this address
1031 * causes the address buffer to overflow return EINVAL.
1032 */
1033 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
David Brazdil0f672f62019-12-10 10:32:29 +00001034 kfree(kaddrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035 return -EINVAL;
1036 }
1037 addrcnt++;
1038 addr_buf += af->sockaddr_len;
1039 walk_size += af->sockaddr_len;
1040 }
1041
1042 /* Do the work. */
1043 switch (op) {
1044 case SCTP_BINDX_ADD_ADDR:
1045 /* Allow security module to validate bindx addresses. */
1046 err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_BINDX_ADD,
1047 (struct sockaddr *)kaddrs,
1048 addrs_size);
1049 if (err)
1050 goto out;
1051 err = sctp_bindx_add(sk, kaddrs, addrcnt);
1052 if (err)
1053 goto out;
1054 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
1055 break;
1056
1057 case SCTP_BINDX_REM_ADDR:
1058 err = sctp_bindx_rem(sk, kaddrs, addrcnt);
1059 if (err)
1060 goto out;
1061 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
1062 break;
1063
1064 default:
1065 err = -EINVAL;
1066 break;
1067 }
1068
1069out:
David Brazdil0f672f62019-12-10 10:32:29 +00001070 kfree(kaddrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071
1072 return err;
1073}
1074
David Brazdil0f672f62019-12-10 10:32:29 +00001075static int sctp_connect_new_asoc(struct sctp_endpoint *ep,
1076 const union sctp_addr *daddr,
1077 const struct sctp_initmsg *init,
1078 struct sctp_transport **tp)
1079{
1080 struct sctp_association *asoc;
1081 struct sock *sk = ep->base.sk;
1082 struct net *net = sock_net(sk);
1083 enum sctp_scope scope;
1084 int err;
1085
1086 if (sctp_endpoint_is_peeled_off(ep, daddr))
1087 return -EADDRNOTAVAIL;
1088
1089 if (!ep->base.bind_addr.port) {
1090 if (sctp_autobind(sk))
1091 return -EAGAIN;
1092 } else {
1093 if (ep->base.bind_addr.port < inet_prot_sock(net) &&
1094 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
1095 return -EACCES;
1096 }
1097
1098 scope = sctp_scope(daddr);
1099 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1100 if (!asoc)
1101 return -ENOMEM;
1102
1103 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1104 if (err < 0)
1105 goto free;
1106
1107 *tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
1108 if (!*tp) {
1109 err = -ENOMEM;
1110 goto free;
1111 }
1112
1113 if (!init)
1114 return 0;
1115
1116 if (init->sinit_num_ostreams) {
1117 __u16 outcnt = init->sinit_num_ostreams;
1118
1119 asoc->c.sinit_num_ostreams = outcnt;
1120 /* outcnt has been changed, need to re-init stream */
1121 err = sctp_stream_init(&asoc->stream, outcnt, 0, GFP_KERNEL);
1122 if (err)
1123 goto free;
1124 }
1125
1126 if (init->sinit_max_instreams)
1127 asoc->c.sinit_max_instreams = init->sinit_max_instreams;
1128
1129 if (init->sinit_max_attempts)
1130 asoc->max_init_attempts = init->sinit_max_attempts;
1131
1132 if (init->sinit_max_init_timeo)
1133 asoc->max_init_timeo =
1134 msecs_to_jiffies(init->sinit_max_init_timeo);
1135
1136 return 0;
1137free:
1138 sctp_association_free(asoc);
1139 return err;
1140}
1141
1142static int sctp_connect_add_peer(struct sctp_association *asoc,
1143 union sctp_addr *daddr, int addr_len)
1144{
1145 struct sctp_endpoint *ep = asoc->ep;
1146 struct sctp_association *old;
1147 struct sctp_transport *t;
1148 int err;
1149
1150 err = sctp_verify_addr(ep->base.sk, daddr, addr_len);
1151 if (err)
1152 return err;
1153
1154 old = sctp_endpoint_lookup_assoc(ep, daddr, &t);
1155 if (old && old != asoc)
1156 return old->state >= SCTP_STATE_ESTABLISHED ? -EISCONN
1157 : -EALREADY;
1158
1159 if (sctp_endpoint_is_peeled_off(ep, daddr))
1160 return -EADDRNOTAVAIL;
1161
1162 t = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
1163 if (!t)
1164 return -ENOMEM;
1165
1166 return 0;
1167}
1168
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001169/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1170 *
1171 * Common routine for handling connect() and sctp_connectx().
1172 * Connect will come in with just a single address.
1173 */
David Brazdil0f672f62019-12-10 10:32:29 +00001174static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs,
1175 int addrs_size, int flags, sctp_assoc_t *assoc_id)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176{
David Brazdil0f672f62019-12-10 10:32:29 +00001177 struct sctp_sock *sp = sctp_sk(sk);
1178 struct sctp_endpoint *ep = sp->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001179 struct sctp_transport *transport;
David Brazdil0f672f62019-12-10 10:32:29 +00001180 struct sctp_association *asoc;
1181 void *addr_buf = kaddrs;
1182 union sctp_addr *daddr;
1183 struct sctp_af *af;
1184 int walk_size, err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001185 long timeo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001186
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001187 if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
David Brazdil0f672f62019-12-10 10:32:29 +00001188 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)))
1189 return -EISCONN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001190
David Brazdil0f672f62019-12-10 10:32:29 +00001191 daddr = addr_buf;
1192 af = sctp_get_af_specific(daddr->sa.sa_family);
1193 if (!af || af->sockaddr_len > addrs_size)
1194 return -EINVAL;
1195
1196 err = sctp_verify_addr(sk, daddr, af->sockaddr_len);
1197 if (err)
1198 return err;
1199
1200 asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
1201 if (asoc)
1202 return asoc->state >= SCTP_STATE_ESTABLISHED ? -EISCONN
1203 : -EALREADY;
1204
1205 err = sctp_connect_new_asoc(ep, daddr, NULL, &transport);
1206 if (err)
1207 return err;
1208 asoc = transport->asoc;
1209
1210 addr_buf += af->sockaddr_len;
1211 walk_size = af->sockaddr_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001212 while (walk_size < addrs_size) {
David Brazdil0f672f62019-12-10 10:32:29 +00001213 err = -EINVAL;
1214 if (walk_size + sizeof(sa_family_t) > addrs_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001215 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216
David Brazdil0f672f62019-12-10 10:32:29 +00001217 daddr = addr_buf;
1218 af = sctp_get_af_specific(daddr->sa.sa_family);
1219 if (!af || af->sockaddr_len + walk_size > addrs_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001220 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221
David Brazdil0f672f62019-12-10 10:32:29 +00001222 if (asoc->peer.port != ntohs(daddr->v4.sin_port))
1223 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001224
David Brazdil0f672f62019-12-10 10:32:29 +00001225 err = sctp_connect_add_peer(asoc, daddr, af->sockaddr_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001226 if (err)
1227 goto out_free;
1228
David Brazdil0f672f62019-12-10 10:32:29 +00001229 addr_buf += af->sockaddr_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001230 walk_size += af->sockaddr_len;
1231 }
1232
1233 /* In case the user of sctp_connectx() wants an association
1234 * id back, assign one now.
1235 */
1236 if (assoc_id) {
1237 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1238 if (err < 0)
1239 goto out_free;
1240 }
1241
David Brazdil0f672f62019-12-10 10:32:29 +00001242 err = sctp_primitive_ASSOCIATE(sock_net(sk), asoc, NULL);
1243 if (err < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001245
1246 /* Initialize sk's dport and daddr for getpeername() */
1247 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
David Brazdil0f672f62019-12-10 10:32:29 +00001248 sp->pf->to_sk_daddr(daddr, sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001249 sk->sk_err = 0;
1250
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001251 if (assoc_id)
1252 *assoc_id = asoc->assoc_id;
1253
David Brazdil0f672f62019-12-10 10:32:29 +00001254 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1255 return sctp_wait_for_connect(asoc, &timeo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001256
1257out_free:
1258 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1259 __func__, asoc, kaddrs, err);
David Brazdil0f672f62019-12-10 10:32:29 +00001260 sctp_association_free(asoc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001261 return err;
1262}
1263
1264/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1265 *
1266 * API 8.9
1267 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1268 * sctp_assoc_t *asoc);
1269 *
1270 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1271 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1272 * or IPv6 addresses.
1273 *
1274 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1275 * Section 3.1.2 for this usage.
1276 *
1277 * addrs is a pointer to an array of one or more socket addresses. Each
1278 * address is contained in its appropriate structure (i.e. struct
1279 * sockaddr_in or struct sockaddr_in6) the family of the address type
1280 * must be used to distengish the address length (note that this
1281 * representation is termed a "packed array" of addresses). The caller
1282 * specifies the number of addresses in the array with addrcnt.
1283 *
1284 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1285 * the association id of the new association. On failure, sctp_connectx()
1286 * returns -1, and sets errno to the appropriate error code. The assoc_id
1287 * is not touched by the kernel.
1288 *
1289 * For SCTP, the port given in each socket address must be the same, or
1290 * sctp_connectx() will fail, setting errno to EINVAL.
1291 *
1292 * An application can use sctp_connectx to initiate an association with
1293 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1294 * allows a caller to specify multiple addresses at which a peer can be
1295 * reached. The way the SCTP stack uses the list of addresses to set up
1296 * the association is implementation dependent. This function only
1297 * specifies that the stack will try to make use of all the addresses in
1298 * the list when needed.
1299 *
1300 * Note that the list of addresses passed in is only used for setting up
1301 * the association. It does not necessarily equal the set of addresses
1302 * the peer uses for the resulting association. If the caller wants to
1303 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1304 * retrieve them after the association has been set up.
1305 *
1306 * Basically do nothing but copying the addresses from user to kernel
1307 * land and invoking either sctp_connectx(). This is used for tunneling
1308 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1309 *
1310 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1311 * it.
1312 *
1313 * sk The sk of the socket
1314 * addrs The pointer to the addresses in user land
1315 * addrssize Size of the addrs buffer
1316 *
1317 * Returns >=0 if ok, <0 errno code on error.
1318 */
1319static int __sctp_setsockopt_connectx(struct sock *sk,
1320 struct sockaddr __user *addrs,
1321 int addrs_size,
1322 sctp_assoc_t *assoc_id)
1323{
1324 struct sockaddr *kaddrs;
1325 int err = 0, flags = 0;
1326
1327 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1328 __func__, sk, addrs, addrs_size);
1329
David Brazdil0f672f62019-12-10 10:32:29 +00001330 /* make sure the 1st addr's sa_family is accessible later */
1331 if (unlikely(addrs_size < sizeof(sa_family_t)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001332 return -EINVAL;
1333
David Brazdil0f672f62019-12-10 10:32:29 +00001334 kaddrs = memdup_user(addrs, addrs_size);
1335 if (IS_ERR(kaddrs))
Olivier Deprez0e641232021-09-23 10:07:05 +02001336 return PTR_ERR(kaddrs) == -EFAULT ? -EINVAL : PTR_ERR(kaddrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001337
1338 /* Allow security module to validate connectx addresses. */
1339 err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX,
1340 (struct sockaddr *)kaddrs,
1341 addrs_size);
1342 if (err)
1343 goto out_free;
1344
1345 /* in-kernel sockets don't generally have a file allocated to them
1346 * if all they do is call sock_create_kern().
1347 */
1348 if (sk->sk_socket->file)
1349 flags = sk->sk_socket->file->f_flags;
1350
1351 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
1352
1353out_free:
David Brazdil0f672f62019-12-10 10:32:29 +00001354 kfree(kaddrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001355
1356 return err;
1357}
1358
1359/*
1360 * This is an older interface. It's kept for backward compatibility
1361 * to the option that doesn't provide association id.
1362 */
1363static int sctp_setsockopt_connectx_old(struct sock *sk,
1364 struct sockaddr __user *addrs,
1365 int addrs_size)
1366{
1367 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1368}
1369
1370/*
1371 * New interface for the API. The since the API is done with a socket
1372 * option, to make it simple we feed back the association id is as a return
1373 * indication to the call. Error is always negative and association id is
1374 * always positive.
1375 */
1376static int sctp_setsockopt_connectx(struct sock *sk,
1377 struct sockaddr __user *addrs,
1378 int addrs_size)
1379{
1380 sctp_assoc_t assoc_id = 0;
1381 int err = 0;
1382
1383 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1384
1385 if (err)
1386 return err;
1387 else
1388 return assoc_id;
1389}
1390
1391/*
1392 * New (hopefully final) interface for the API.
1393 * We use the sctp_getaddrs_old structure so that use-space library
1394 * can avoid any unnecessary allocations. The only different part
1395 * is that we store the actual length of the address buffer into the
1396 * addrs_num structure member. That way we can re-use the existing
1397 * code.
1398 */
1399#ifdef CONFIG_COMPAT
1400struct compat_sctp_getaddrs_old {
1401 sctp_assoc_t assoc_id;
1402 s32 addr_num;
1403 compat_uptr_t addrs; /* struct sockaddr * */
1404};
1405#endif
1406
1407static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1408 char __user *optval,
1409 int __user *optlen)
1410{
1411 struct sctp_getaddrs_old param;
1412 sctp_assoc_t assoc_id = 0;
1413 int err = 0;
1414
1415#ifdef CONFIG_COMPAT
1416 if (in_compat_syscall()) {
1417 struct compat_sctp_getaddrs_old param32;
1418
1419 if (len < sizeof(param32))
1420 return -EINVAL;
1421 if (copy_from_user(&param32, optval, sizeof(param32)))
1422 return -EFAULT;
1423
1424 param.assoc_id = param32.assoc_id;
1425 param.addr_num = param32.addr_num;
1426 param.addrs = compat_ptr(param32.addrs);
1427 } else
1428#endif
1429 {
1430 if (len < sizeof(param))
1431 return -EINVAL;
1432 if (copy_from_user(&param, optval, sizeof(param)))
1433 return -EFAULT;
1434 }
1435
1436 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1437 param.addrs, param.addr_num,
1438 &assoc_id);
1439 if (err == 0 || err == -EINPROGRESS) {
1440 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1441 return -EFAULT;
1442 if (put_user(sizeof(assoc_id), optlen))
1443 return -EFAULT;
1444 }
1445
1446 return err;
1447}
1448
1449/* API 3.1.4 close() - UDP Style Syntax
1450 * Applications use close() to perform graceful shutdown (as described in
1451 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1452 * by a UDP-style socket.
1453 *
1454 * The syntax is
1455 *
1456 * ret = close(int sd);
1457 *
1458 * sd - the socket descriptor of the associations to be closed.
1459 *
1460 * To gracefully shutdown a specific association represented by the
1461 * UDP-style socket, an application should use the sendmsg() call,
1462 * passing no user data, but including the appropriate flag in the
1463 * ancillary data (see Section xxxx).
1464 *
1465 * If sd in the close() call is a branched-off socket representing only
1466 * one association, the shutdown is performed on that association only.
1467 *
1468 * 4.1.6 close() - TCP Style Syntax
1469 *
1470 * Applications use close() to gracefully close down an association.
1471 *
1472 * The syntax is:
1473 *
1474 * int close(int sd);
1475 *
1476 * sd - the socket descriptor of the association to be closed.
1477 *
1478 * After an application calls close() on a socket descriptor, no further
1479 * socket operations will succeed on that descriptor.
1480 *
1481 * API 7.1.4 SO_LINGER
1482 *
1483 * An application using the TCP-style socket can use this option to
1484 * perform the SCTP ABORT primitive. The linger option structure is:
1485 *
1486 * struct linger {
1487 * int l_onoff; // option on/off
1488 * int l_linger; // linger time
1489 * };
1490 *
1491 * To enable the option, set l_onoff to 1. If the l_linger value is set
1492 * to 0, calling close() is the same as the ABORT primitive. If the
1493 * value is set to a negative value, the setsockopt() call will return
1494 * an error. If the value is set to a positive value linger_time, the
1495 * close() can be blocked for at most linger_time ms. If the graceful
1496 * shutdown phase does not finish during this period, close() will
1497 * return but the graceful shutdown phase continues in the system.
1498 */
1499static void sctp_close(struct sock *sk, long timeout)
1500{
1501 struct net *net = sock_net(sk);
1502 struct sctp_endpoint *ep;
1503 struct sctp_association *asoc;
1504 struct list_head *pos, *temp;
1505 unsigned int data_was_unread;
1506
1507 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1508
1509 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1510 sk->sk_shutdown = SHUTDOWN_MASK;
1511 inet_sk_set_state(sk, SCTP_SS_CLOSING);
1512
1513 ep = sctp_sk(sk)->ep;
1514
1515 /* Clean up any skbs sitting on the receive queue. */
1516 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1517 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1518
1519 /* Walk all associations on an endpoint. */
1520 list_for_each_safe(pos, temp, &ep->asocs) {
1521 asoc = list_entry(pos, struct sctp_association, asocs);
1522
1523 if (sctp_style(sk, TCP)) {
1524 /* A closed association can still be in the list if
1525 * it belongs to a TCP-style listening socket that is
1526 * not yet accepted. If so, free it. If not, send an
1527 * ABORT or SHUTDOWN based on the linger options.
1528 */
1529 if (sctp_state(asoc, CLOSED)) {
1530 sctp_association_free(asoc);
1531 continue;
1532 }
1533 }
1534
1535 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1536 !skb_queue_empty(&asoc->ulpq.reasm) ||
1537 !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
1538 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1539 struct sctp_chunk *chunk;
1540
1541 chunk = sctp_make_abort_user(asoc, NULL, 0);
1542 sctp_primitive_ABORT(net, asoc, chunk);
1543 } else
1544 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1545 }
1546
1547 /* On a TCP-style socket, block for at most linger_time if set. */
1548 if (sctp_style(sk, TCP) && timeout)
1549 sctp_wait_for_close(sk, timeout);
1550
1551 /* This will run the backlog queue. */
1552 release_sock(sk);
1553
1554 /* Supposedly, no process has access to the socket, but
1555 * the net layers still may.
1556 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1557 * held and that should be grabbed before socket lock.
1558 */
1559 spin_lock_bh(&net->sctp.addr_wq_lock);
1560 bh_lock_sock_nested(sk);
1561
1562 /* Hold the sock, since sk_common_release() will put sock_put()
1563 * and we have just a little more cleanup.
1564 */
1565 sock_hold(sk);
1566 sk_common_release(sk);
1567
1568 bh_unlock_sock(sk);
1569 spin_unlock_bh(&net->sctp.addr_wq_lock);
1570
1571 sock_put(sk);
1572
1573 SCTP_DBG_OBJCNT_DEC(sock);
1574}
1575
1576/* Handle EPIPE error. */
1577static int sctp_error(struct sock *sk, int flags, int err)
1578{
1579 if (err == -EPIPE)
1580 err = sock_error(sk) ? : -EPIPE;
1581 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1582 send_sig(SIGPIPE, current, 0);
1583 return err;
1584}
1585
1586/* API 3.1.3 sendmsg() - UDP Style Syntax
1587 *
1588 * An application uses sendmsg() and recvmsg() calls to transmit data to
1589 * and receive data from its peer.
1590 *
1591 * ssize_t sendmsg(int socket, const struct msghdr *message,
1592 * int flags);
1593 *
1594 * socket - the socket descriptor of the endpoint.
1595 * message - pointer to the msghdr structure which contains a single
1596 * user message and possibly some ancillary data.
1597 *
1598 * See Section 5 for complete description of the data
1599 * structures.
1600 *
1601 * flags - flags sent or received with the user message, see Section
1602 * 5 for complete description of the flags.
1603 *
1604 * Note: This function could use a rewrite especially when explicit
1605 * connect support comes in.
1606 */
1607/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1608
1609static int sctp_msghdr_parse(const struct msghdr *msg,
1610 struct sctp_cmsgs *cmsgs);
1611
1612static int sctp_sendmsg_parse(struct sock *sk, struct sctp_cmsgs *cmsgs,
1613 struct sctp_sndrcvinfo *srinfo,
1614 const struct msghdr *msg, size_t msg_len)
1615{
1616 __u16 sflags;
1617 int err;
1618
1619 if (sctp_sstate(sk, LISTENING) && sctp_style(sk, TCP))
1620 return -EPIPE;
1621
1622 if (msg_len > sk->sk_sndbuf)
1623 return -EMSGSIZE;
1624
1625 memset(cmsgs, 0, sizeof(*cmsgs));
1626 err = sctp_msghdr_parse(msg, cmsgs);
1627 if (err) {
1628 pr_debug("%s: msghdr parse err:%x\n", __func__, err);
1629 return err;
1630 }
1631
1632 memset(srinfo, 0, sizeof(*srinfo));
1633 if (cmsgs->srinfo) {
1634 srinfo->sinfo_stream = cmsgs->srinfo->sinfo_stream;
1635 srinfo->sinfo_flags = cmsgs->srinfo->sinfo_flags;
1636 srinfo->sinfo_ppid = cmsgs->srinfo->sinfo_ppid;
1637 srinfo->sinfo_context = cmsgs->srinfo->sinfo_context;
1638 srinfo->sinfo_assoc_id = cmsgs->srinfo->sinfo_assoc_id;
1639 srinfo->sinfo_timetolive = cmsgs->srinfo->sinfo_timetolive;
1640 }
1641
1642 if (cmsgs->sinfo) {
1643 srinfo->sinfo_stream = cmsgs->sinfo->snd_sid;
1644 srinfo->sinfo_flags = cmsgs->sinfo->snd_flags;
1645 srinfo->sinfo_ppid = cmsgs->sinfo->snd_ppid;
1646 srinfo->sinfo_context = cmsgs->sinfo->snd_context;
1647 srinfo->sinfo_assoc_id = cmsgs->sinfo->snd_assoc_id;
1648 }
1649
1650 if (cmsgs->prinfo) {
1651 srinfo->sinfo_timetolive = cmsgs->prinfo->pr_value;
1652 SCTP_PR_SET_POLICY(srinfo->sinfo_flags,
1653 cmsgs->prinfo->pr_policy);
1654 }
1655
1656 sflags = srinfo->sinfo_flags;
1657 if (!sflags && msg_len)
1658 return 0;
1659
1660 if (sctp_style(sk, TCP) && (sflags & (SCTP_EOF | SCTP_ABORT)))
1661 return -EINVAL;
1662
1663 if (((sflags & SCTP_EOF) && msg_len > 0) ||
1664 (!(sflags & (SCTP_EOF | SCTP_ABORT)) && msg_len == 0))
1665 return -EINVAL;
1666
1667 if ((sflags & SCTP_ADDR_OVER) && !msg->msg_name)
1668 return -EINVAL;
1669
1670 return 0;
1671}
1672
1673static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
1674 struct sctp_cmsgs *cmsgs,
1675 union sctp_addr *daddr,
1676 struct sctp_transport **tp)
1677{
1678 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001679 struct sctp_association *asoc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680 struct cmsghdr *cmsg;
1681 __be32 flowinfo = 0;
1682 struct sctp_af *af;
1683 int err;
1684
1685 *tp = NULL;
1686
1687 if (sflags & (SCTP_EOF | SCTP_ABORT))
1688 return -EINVAL;
1689
1690 if (sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) ||
1691 sctp_sstate(sk, CLOSING)))
1692 return -EADDRNOTAVAIL;
1693
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001694 /* Label connection socket for first association 1-to-many
1695 * style for client sequence socket()->sendmsg(). This
1696 * needs to be done before sctp_assoc_add_peer() as that will
1697 * set up the initial packet that needs to account for any
1698 * security ip options (CIPSO/CALIPSO) added to the packet.
1699 */
1700 af = sctp_get_af_specific(daddr->sa.sa_family);
1701 if (!af)
1702 return -EINVAL;
1703 err = security_sctp_bind_connect(sk, SCTP_SENDMSG_CONNECT,
1704 (struct sockaddr *)daddr,
1705 af->sockaddr_len);
1706 if (err < 0)
1707 return err;
1708
David Brazdil0f672f62019-12-10 10:32:29 +00001709 err = sctp_connect_new_asoc(ep, daddr, cmsgs->init, tp);
1710 if (err)
1711 return err;
1712 asoc = (*tp)->asoc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001713
1714 if (!cmsgs->addrs_msg)
1715 return 0;
1716
1717 if (daddr->sa.sa_family == AF_INET6)
1718 flowinfo = daddr->v6.sin6_flowinfo;
1719
1720 /* sendv addr list parse */
1721 for_each_cmsghdr(cmsg, cmsgs->addrs_msg) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001722 union sctp_addr _daddr;
1723 int dlen;
1724
1725 if (cmsg->cmsg_level != IPPROTO_SCTP ||
1726 (cmsg->cmsg_type != SCTP_DSTADDRV4 &&
1727 cmsg->cmsg_type != SCTP_DSTADDRV6))
1728 continue;
1729
1730 daddr = &_daddr;
1731 memset(daddr, 0, sizeof(*daddr));
1732 dlen = cmsg->cmsg_len - sizeof(struct cmsghdr);
1733 if (cmsg->cmsg_type == SCTP_DSTADDRV4) {
1734 if (dlen < sizeof(struct in_addr)) {
1735 err = -EINVAL;
1736 goto free;
1737 }
1738
1739 dlen = sizeof(struct in_addr);
1740 daddr->v4.sin_family = AF_INET;
1741 daddr->v4.sin_port = htons(asoc->peer.port);
1742 memcpy(&daddr->v4.sin_addr, CMSG_DATA(cmsg), dlen);
1743 } else {
1744 if (dlen < sizeof(struct in6_addr)) {
1745 err = -EINVAL;
1746 goto free;
1747 }
1748
1749 dlen = sizeof(struct in6_addr);
1750 daddr->v6.sin6_flowinfo = flowinfo;
1751 daddr->v6.sin6_family = AF_INET6;
1752 daddr->v6.sin6_port = htons(asoc->peer.port);
1753 memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen);
1754 }
David Brazdil0f672f62019-12-10 10:32:29 +00001755
1756 err = sctp_connect_add_peer(asoc, daddr, sizeof(*daddr));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001757 if (err)
1758 goto free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001759 }
1760
1761 return 0;
1762
1763free:
1764 sctp_association_free(asoc);
1765 return err;
1766}
1767
1768static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
1769 __u16 sflags, struct msghdr *msg,
1770 size_t msg_len)
1771{
1772 struct sock *sk = asoc->base.sk;
1773 struct net *net = sock_net(sk);
1774
1775 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP))
1776 return -EPIPE;
1777
1778 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP) &&
1779 !sctp_state(asoc, ESTABLISHED))
1780 return 0;
1781
1782 if (sflags & SCTP_EOF) {
1783 pr_debug("%s: shutting down association:%p\n", __func__, asoc);
1784 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1785
1786 return 0;
1787 }
1788
1789 if (sflags & SCTP_ABORT) {
1790 struct sctp_chunk *chunk;
1791
1792 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1793 if (!chunk)
1794 return -ENOMEM;
1795
1796 pr_debug("%s: aborting association:%p\n", __func__, asoc);
1797 sctp_primitive_ABORT(net, asoc, chunk);
David Brazdil0f672f62019-12-10 10:32:29 +00001798 iov_iter_revert(&msg->msg_iter, msg_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001799
1800 return 0;
1801 }
1802
1803 return 1;
1804}
1805
1806static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
1807 struct msghdr *msg, size_t msg_len,
1808 struct sctp_transport *transport,
1809 struct sctp_sndrcvinfo *sinfo)
1810{
1811 struct sock *sk = asoc->base.sk;
1812 struct sctp_sock *sp = sctp_sk(sk);
1813 struct net *net = sock_net(sk);
1814 struct sctp_datamsg *datamsg;
1815 bool wait_connect = false;
1816 struct sctp_chunk *chunk;
1817 long timeo;
1818 int err;
1819
1820 if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
1821 err = -EINVAL;
1822 goto err;
1823 }
1824
1825 if (unlikely(!SCTP_SO(&asoc->stream, sinfo->sinfo_stream)->ext)) {
1826 err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream);
1827 if (err)
1828 goto err;
1829 }
1830
1831 if (sp->disable_fragments && msg_len > asoc->frag_point) {
1832 err = -EMSGSIZE;
1833 goto err;
1834 }
1835
1836 if (asoc->pmtu_pending) {
1837 if (sp->param_flags & SPP_PMTUD_ENABLE)
1838 sctp_assoc_sync_pmtu(asoc);
1839 asoc->pmtu_pending = 0;
1840 }
1841
David Brazdil0f672f62019-12-10 10:32:29 +00001842 if (sctp_wspace(asoc) < (int)msg_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001843 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
1844
David Brazdil0f672f62019-12-10 10:32:29 +00001845 if (sk_under_memory_pressure(sk))
1846 sk_mem_reclaim(sk);
1847
1848 if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001849 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1850 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1851 if (err)
1852 goto err;
1853 }
1854
1855 if (sctp_state(asoc, CLOSED)) {
1856 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1857 if (err)
1858 goto err;
1859
David Brazdil0f672f62019-12-10 10:32:29 +00001860 if (asoc->ep->intl_enable) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001861 timeo = sock_sndtimeo(sk, 0);
1862 err = sctp_wait_for_connect(asoc, &timeo);
1863 if (err) {
1864 err = -ESRCH;
1865 goto err;
1866 }
1867 } else {
1868 wait_connect = true;
1869 }
1870
1871 pr_debug("%s: we associated primitively\n", __func__);
1872 }
1873
1874 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
1875 if (IS_ERR(datamsg)) {
1876 err = PTR_ERR(datamsg);
1877 goto err;
1878 }
1879
1880 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
1881
1882 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1883 sctp_chunk_hold(chunk);
1884 sctp_set_owner_w(chunk);
1885 chunk->transport = transport;
1886 }
1887
1888 err = sctp_primitive_SEND(net, asoc, datamsg);
1889 if (err) {
1890 sctp_datamsg_free(datamsg);
1891 goto err;
1892 }
1893
1894 pr_debug("%s: we sent primitively\n", __func__);
1895
1896 sctp_datamsg_put(datamsg);
1897
1898 if (unlikely(wait_connect)) {
1899 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1900 sctp_wait_for_connect(asoc, &timeo);
1901 }
1902
1903 err = msg_len;
1904
1905err:
1906 return err;
1907}
1908
1909static union sctp_addr *sctp_sendmsg_get_daddr(struct sock *sk,
1910 const struct msghdr *msg,
1911 struct sctp_cmsgs *cmsgs)
1912{
1913 union sctp_addr *daddr = NULL;
1914 int err;
1915
1916 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
1917 int len = msg->msg_namelen;
1918
1919 if (len > sizeof(*daddr))
1920 len = sizeof(*daddr);
1921
1922 daddr = (union sctp_addr *)msg->msg_name;
1923
1924 err = sctp_verify_addr(sk, daddr, len);
1925 if (err)
1926 return ERR_PTR(err);
1927 }
1928
1929 return daddr;
1930}
1931
1932static void sctp_sendmsg_update_sinfo(struct sctp_association *asoc,
1933 struct sctp_sndrcvinfo *sinfo,
1934 struct sctp_cmsgs *cmsgs)
1935{
1936 if (!cmsgs->srinfo && !cmsgs->sinfo) {
1937 sinfo->sinfo_stream = asoc->default_stream;
1938 sinfo->sinfo_ppid = asoc->default_ppid;
1939 sinfo->sinfo_context = asoc->default_context;
1940 sinfo->sinfo_assoc_id = sctp_assoc2id(asoc);
1941
1942 if (!cmsgs->prinfo)
1943 sinfo->sinfo_flags = asoc->default_flags;
1944 }
1945
1946 if (!cmsgs->srinfo && !cmsgs->prinfo)
1947 sinfo->sinfo_timetolive = asoc->default_timetolive;
1948
1949 if (cmsgs->authinfo) {
1950 /* Reuse sinfo_tsn to indicate that authinfo was set and
1951 * sinfo_ssn to save the keyid on tx path.
1952 */
1953 sinfo->sinfo_tsn = 1;
1954 sinfo->sinfo_ssn = cmsgs->authinfo->auth_keynumber;
1955 }
1956}
1957
1958static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1959{
1960 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
1961 struct sctp_transport *transport = NULL;
1962 struct sctp_sndrcvinfo _sinfo, *sinfo;
David Brazdil0f672f62019-12-10 10:32:29 +00001963 struct sctp_association *asoc, *tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001964 struct sctp_cmsgs cmsgs;
1965 union sctp_addr *daddr;
1966 bool new = false;
1967 __u16 sflags;
1968 int err;
1969
1970 /* Parse and get snd_info */
1971 err = sctp_sendmsg_parse(sk, &cmsgs, &_sinfo, msg, msg_len);
1972 if (err)
1973 goto out;
1974
1975 sinfo = &_sinfo;
1976 sflags = sinfo->sinfo_flags;
1977
1978 /* Get daddr from msg */
1979 daddr = sctp_sendmsg_get_daddr(sk, msg, &cmsgs);
1980 if (IS_ERR(daddr)) {
1981 err = PTR_ERR(daddr);
1982 goto out;
1983 }
1984
1985 lock_sock(sk);
1986
1987 /* SCTP_SENDALL process */
1988 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001989 list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001990 err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
1991 msg_len);
1992 if (err == 0)
1993 continue;
1994 if (err < 0)
1995 goto out_unlock;
1996
1997 sctp_sendmsg_update_sinfo(asoc, sinfo, &cmsgs);
1998
1999 err = sctp_sendmsg_to_asoc(asoc, msg, msg_len,
2000 NULL, sinfo);
2001 if (err < 0)
2002 goto out_unlock;
2003
2004 iov_iter_revert(&msg->msg_iter, err);
2005 }
2006
2007 goto out_unlock;
2008 }
2009
2010 /* Get and check or create asoc */
2011 if (daddr) {
2012 asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
2013 if (asoc) {
2014 err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
2015 msg_len);
2016 if (err <= 0)
2017 goto out_unlock;
2018 } else {
2019 err = sctp_sendmsg_new_asoc(sk, sflags, &cmsgs, daddr,
2020 &transport);
2021 if (err)
2022 goto out_unlock;
2023
2024 asoc = transport->asoc;
2025 new = true;
2026 }
2027
2028 if (!sctp_style(sk, TCP) && !(sflags & SCTP_ADDR_OVER))
2029 transport = NULL;
2030 } else {
2031 asoc = sctp_id2assoc(sk, sinfo->sinfo_assoc_id);
2032 if (!asoc) {
2033 err = -EPIPE;
2034 goto out_unlock;
2035 }
2036
2037 err = sctp_sendmsg_check_sflags(asoc, sflags, msg, msg_len);
2038 if (err <= 0)
2039 goto out_unlock;
2040 }
2041
2042 /* Update snd_info with the asoc */
2043 sctp_sendmsg_update_sinfo(asoc, sinfo, &cmsgs);
2044
2045 /* Send msg to the asoc */
2046 err = sctp_sendmsg_to_asoc(asoc, msg, msg_len, transport, sinfo);
2047 if (err < 0 && err != -ESRCH && new)
2048 sctp_association_free(asoc);
2049
2050out_unlock:
2051 release_sock(sk);
2052out:
2053 return sctp_error(sk, msg->msg_flags, err);
2054}
2055
2056/* This is an extended version of skb_pull() that removes the data from the
2057 * start of a skb even when data is spread across the list of skb's in the
2058 * frag_list. len specifies the total amount of data that needs to be removed.
2059 * when 'len' bytes could be removed from the skb, it returns 0.
2060 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2061 * could not be removed.
2062 */
2063static int sctp_skb_pull(struct sk_buff *skb, int len)
2064{
2065 struct sk_buff *list;
2066 int skb_len = skb_headlen(skb);
2067 int rlen;
2068
2069 if (len <= skb_len) {
2070 __skb_pull(skb, len);
2071 return 0;
2072 }
2073 len -= skb_len;
2074 __skb_pull(skb, skb_len);
2075
2076 skb_walk_frags(skb, list) {
2077 rlen = sctp_skb_pull(list, len);
2078 skb->len -= (len-rlen);
2079 skb->data_len -= (len-rlen);
2080
2081 if (!rlen)
2082 return 0;
2083
2084 len = rlen;
2085 }
2086
2087 return len;
2088}
2089
2090/* API 3.1.3 recvmsg() - UDP Style Syntax
2091 *
2092 * ssize_t recvmsg(int socket, struct msghdr *message,
2093 * int flags);
2094 *
2095 * socket - the socket descriptor of the endpoint.
2096 * message - pointer to the msghdr structure which contains a single
2097 * user message and possibly some ancillary data.
2098 *
2099 * See Section 5 for complete description of the data
2100 * structures.
2101 *
2102 * flags - flags sent or received with the user message, see Section
2103 * 5 for complete description of the flags.
2104 */
2105static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2106 int noblock, int flags, int *addr_len)
2107{
2108 struct sctp_ulpevent *event = NULL;
2109 struct sctp_sock *sp = sctp_sk(sk);
2110 struct sk_buff *skb, *head_skb;
2111 int copied;
2112 int err = 0;
2113 int skb_len;
2114
2115 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2116 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
2117 addr_len);
2118
2119 lock_sock(sk);
2120
2121 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
2122 !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
2123 err = -ENOTCONN;
2124 goto out;
2125 }
2126
2127 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
2128 if (!skb)
2129 goto out;
2130
2131 /* Get the total length of the skb including any skb's in the
2132 * frag_list.
2133 */
2134 skb_len = skb->len;
2135
2136 copied = skb_len;
2137 if (copied > len)
2138 copied = len;
2139
2140 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2141
2142 event = sctp_skb2event(skb);
2143
2144 if (err)
2145 goto out_free;
2146
2147 if (event->chunk && event->chunk->head_skb)
2148 head_skb = event->chunk->head_skb;
2149 else
2150 head_skb = skb;
2151 sock_recv_ts_and_drops(msg, sk, head_skb);
2152 if (sctp_ulpevent_is_notification(event)) {
2153 msg->msg_flags |= MSG_NOTIFICATION;
2154 sp->pf->event_msgname(event, msg->msg_name, addr_len);
2155 } else {
2156 sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len);
2157 }
2158
2159 /* Check if we allow SCTP_NXTINFO. */
2160 if (sp->recvnxtinfo)
2161 sctp_ulpevent_read_nxtinfo(event, msg, sk);
2162 /* Check if we allow SCTP_RCVINFO. */
2163 if (sp->recvrcvinfo)
2164 sctp_ulpevent_read_rcvinfo(event, msg);
2165 /* Check if we allow SCTP_SNDRCVINFO. */
David Brazdil0f672f62019-12-10 10:32:29 +00002166 if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_DATA_IO_EVENT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002167 sctp_ulpevent_read_sndrcvinfo(event, msg);
2168
2169 err = copied;
2170
2171 /* If skb's length exceeds the user's buffer, update the skb and
2172 * push it back to the receive_queue so that the next call to
2173 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2174 */
2175 if (skb_len > copied) {
2176 msg->msg_flags &= ~MSG_EOR;
2177 if (flags & MSG_PEEK)
2178 goto out_free;
2179 sctp_skb_pull(skb, copied);
2180 skb_queue_head(&sk->sk_receive_queue, skb);
2181
2182 /* When only partial message is copied to the user, increase
2183 * rwnd by that amount. If all the data in the skb is read,
2184 * rwnd is updated when the event is freed.
2185 */
2186 if (!sctp_ulpevent_is_notification(event))
2187 sctp_assoc_rwnd_increase(event->asoc, copied);
2188 goto out;
2189 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2190 (event->msg_flags & MSG_EOR))
2191 msg->msg_flags |= MSG_EOR;
2192 else
2193 msg->msg_flags &= ~MSG_EOR;
2194
2195out_free:
2196 if (flags & MSG_PEEK) {
2197 /* Release the skb reference acquired after peeking the skb in
2198 * sctp_skb_recv_datagram().
2199 */
2200 kfree_skb(skb);
2201 } else {
2202 /* Free the event which includes releasing the reference to
2203 * the owner of the skb, freeing the skb and updating the
2204 * rwnd.
2205 */
2206 sctp_ulpevent_free(event);
2207 }
2208out:
2209 release_sock(sk);
2210 return err;
2211}
2212
2213/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2214 *
2215 * This option is a on/off flag. If enabled no SCTP message
2216 * fragmentation will be performed. Instead if a message being sent
2217 * exceeds the current PMTU size, the message will NOT be sent and
2218 * instead a error will be indicated to the user.
2219 */
2220static int sctp_setsockopt_disable_fragments(struct sock *sk,
2221 char __user *optval,
2222 unsigned int optlen)
2223{
2224 int val;
2225
2226 if (optlen < sizeof(int))
2227 return -EINVAL;
2228
2229 if (get_user(val, (int __user *)optval))
2230 return -EFAULT;
2231
2232 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
2233
2234 return 0;
2235}
2236
2237static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2238 unsigned int optlen)
2239{
David Brazdil0f672f62019-12-10 10:32:29 +00002240 struct sctp_event_subscribe subscribe;
2241 __u8 *sn_type = (__u8 *)&subscribe;
2242 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002243 struct sctp_association *asoc;
David Brazdil0f672f62019-12-10 10:32:29 +00002244 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002245
2246 if (optlen > sizeof(struct sctp_event_subscribe))
2247 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00002248
2249 if (copy_from_user(&subscribe, optval, optlen))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002250 return -EFAULT;
2251
David Brazdil0f672f62019-12-10 10:32:29 +00002252 for (i = 0; i < optlen; i++)
2253 sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
2254 sn_type[i]);
2255
2256 list_for_each_entry(asoc, &sp->ep->asocs, asocs)
2257 asoc->subscribe = sctp_sk(sk)->subscribe;
2258
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002259 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2260 * if there is no data to be sent or retransmit, the stack will
2261 * immediately send up this notification.
2262 */
David Brazdil0f672f62019-12-10 10:32:29 +00002263 if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_SENDER_DRY_EVENT)) {
2264 struct sctp_ulpevent *event;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002265
David Brazdil0f672f62019-12-10 10:32:29 +00002266 asoc = sctp_id2assoc(sk, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002267 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2268 event = sctp_ulpevent_make_sender_dry_event(asoc,
2269 GFP_USER | __GFP_NOWARN);
2270 if (!event)
2271 return -ENOMEM;
2272
2273 asoc->stream.si->enqueue_event(&asoc->ulpq, event);
2274 }
2275 }
2276
2277 return 0;
2278}
2279
2280/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2281 *
2282 * This socket option is applicable to the UDP-style socket only. When
2283 * set it will cause associations that are idle for more than the
2284 * specified number of seconds to automatically close. An association
2285 * being idle is defined an association that has NOT sent or received
2286 * user data. The special value of '0' indicates that no automatic
2287 * close of any associations should be performed. The option expects an
2288 * integer defining the number of seconds of idle time before an
2289 * association is closed.
2290 */
2291static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2292 unsigned int optlen)
2293{
2294 struct sctp_sock *sp = sctp_sk(sk);
2295 struct net *net = sock_net(sk);
2296
2297 /* Applicable to UDP-style socket only */
2298 if (sctp_style(sk, TCP))
2299 return -EOPNOTSUPP;
2300 if (optlen != sizeof(int))
2301 return -EINVAL;
2302 if (copy_from_user(&sp->autoclose, optval, optlen))
2303 return -EFAULT;
2304
2305 if (sp->autoclose > net->sctp.max_autoclose)
2306 sp->autoclose = net->sctp.max_autoclose;
2307
2308 return 0;
2309}
2310
2311/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2312 *
2313 * Applications can enable or disable heartbeats for any peer address of
2314 * an association, modify an address's heartbeat interval, force a
2315 * heartbeat to be sent immediately, and adjust the address's maximum
2316 * number of retransmissions sent before an address is considered
2317 * unreachable. The following structure is used to access and modify an
2318 * address's parameters:
2319 *
2320 * struct sctp_paddrparams {
2321 * sctp_assoc_t spp_assoc_id;
2322 * struct sockaddr_storage spp_address;
2323 * uint32_t spp_hbinterval;
2324 * uint16_t spp_pathmaxrxt;
2325 * uint32_t spp_pathmtu;
2326 * uint32_t spp_sackdelay;
2327 * uint32_t spp_flags;
2328 * uint32_t spp_ipv6_flowlabel;
2329 * uint8_t spp_dscp;
2330 * };
2331 *
2332 * spp_assoc_id - (one-to-many style socket) This is filled in the
2333 * application, and identifies the association for
2334 * this query.
2335 * spp_address - This specifies which address is of interest.
2336 * spp_hbinterval - This contains the value of the heartbeat interval,
2337 * in milliseconds. If a value of zero
2338 * is present in this field then no changes are to
2339 * be made to this parameter.
2340 * spp_pathmaxrxt - This contains the maximum number of
2341 * retransmissions before this address shall be
2342 * considered unreachable. If a value of zero
2343 * is present in this field then no changes are to
2344 * be made to this parameter.
2345 * spp_pathmtu - When Path MTU discovery is disabled the value
2346 * specified here will be the "fixed" path mtu.
2347 * Note that if the spp_address field is empty
2348 * then all associations on this address will
2349 * have this fixed path mtu set upon them.
2350 *
2351 * spp_sackdelay - When delayed sack is enabled, this value specifies
2352 * the number of milliseconds that sacks will be delayed
2353 * for. This value will apply to all addresses of an
2354 * association if the spp_address field is empty. Note
2355 * also, that if delayed sack is enabled and this
2356 * value is set to 0, no change is made to the last
2357 * recorded delayed sack timer value.
2358 *
2359 * spp_flags - These flags are used to control various features
2360 * on an association. The flag field may contain
2361 * zero or more of the following options.
2362 *
2363 * SPP_HB_ENABLE - Enable heartbeats on the
2364 * specified address. Note that if the address
2365 * field is empty all addresses for the association
2366 * have heartbeats enabled upon them.
2367 *
2368 * SPP_HB_DISABLE - Disable heartbeats on the
2369 * speicifed address. Note that if the address
2370 * field is empty all addresses for the association
2371 * will have their heartbeats disabled. Note also
2372 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2373 * mutually exclusive, only one of these two should
2374 * be specified. Enabling both fields will have
2375 * undetermined results.
2376 *
2377 * SPP_HB_DEMAND - Request a user initiated heartbeat
2378 * to be made immediately.
2379 *
2380 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2381 * heartbeat delayis to be set to the value of 0
2382 * milliseconds.
2383 *
2384 * SPP_PMTUD_ENABLE - This field will enable PMTU
2385 * discovery upon the specified address. Note that
2386 * if the address feild is empty then all addresses
2387 * on the association are effected.
2388 *
2389 * SPP_PMTUD_DISABLE - This field will disable PMTU
2390 * discovery upon the specified address. Note that
2391 * if the address feild is empty then all addresses
2392 * on the association are effected. Not also that
2393 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2394 * exclusive. Enabling both will have undetermined
2395 * results.
2396 *
2397 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2398 * on delayed sack. The time specified in spp_sackdelay
2399 * is used to specify the sack delay for this address. Note
2400 * that if spp_address is empty then all addresses will
2401 * enable delayed sack and take on the sack delay
2402 * value specified in spp_sackdelay.
2403 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2404 * off delayed sack. If the spp_address field is blank then
2405 * delayed sack is disabled for the entire association. Note
2406 * also that this field is mutually exclusive to
2407 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2408 * results.
2409 *
2410 * SPP_IPV6_FLOWLABEL: Setting this flag enables the
2411 * setting of the IPV6 flow label value. The value is
2412 * contained in the spp_ipv6_flowlabel field.
2413 * Upon retrieval, this flag will be set to indicate that
2414 * the spp_ipv6_flowlabel field has a valid value returned.
2415 * If a specific destination address is set (in the
2416 * spp_address field), then the value returned is that of
2417 * the address. If just an association is specified (and
2418 * no address), then the association's default flow label
2419 * is returned. If neither an association nor a destination
2420 * is specified, then the socket's default flow label is
2421 * returned. For non-IPv6 sockets, this flag will be left
2422 * cleared.
2423 *
2424 * SPP_DSCP: Setting this flag enables the setting of the
2425 * Differentiated Services Code Point (DSCP) value
2426 * associated with either the association or a specific
2427 * address. The value is obtained in the spp_dscp field.
2428 * Upon retrieval, this flag will be set to indicate that
2429 * the spp_dscp field has a valid value returned. If a
2430 * specific destination address is set when called (in the
2431 * spp_address field), then that specific destination
2432 * address's DSCP value is returned. If just an association
2433 * is specified, then the association's default DSCP is
2434 * returned. If neither an association nor a destination is
2435 * specified, then the socket's default DSCP is returned.
2436 *
2437 * spp_ipv6_flowlabel
2438 * - This field is used in conjunction with the
2439 * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
2440 * The 20 least significant bits are used for the flow
2441 * label. This setting has precedence over any IPv6-layer
2442 * setting.
2443 *
2444 * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
2445 * and contains the DSCP. The 6 most significant bits are
2446 * used for the DSCP. This setting has precedence over any
2447 * IPv4- or IPv6- layer setting.
2448 */
2449static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2450 struct sctp_transport *trans,
2451 struct sctp_association *asoc,
2452 struct sctp_sock *sp,
2453 int hb_change,
2454 int pmtud_change,
2455 int sackdelay_change)
2456{
2457 int error;
2458
2459 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2460 struct net *net = sock_net(trans->asoc->base.sk);
2461
2462 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2463 if (error)
2464 return error;
2465 }
2466
2467 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2468 * this field is ignored. Note also that a value of zero indicates
2469 * the current setting should be left unchanged.
2470 */
2471 if (params->spp_flags & SPP_HB_ENABLE) {
2472
2473 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2474 * set. This lets us use 0 value when this flag
2475 * is set.
2476 */
2477 if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
2478 params->spp_hbinterval = 0;
2479
2480 if (params->spp_hbinterval ||
2481 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
2482 if (trans) {
2483 trans->hbinterval =
2484 msecs_to_jiffies(params->spp_hbinterval);
2485 } else if (asoc) {
2486 asoc->hbinterval =
2487 msecs_to_jiffies(params->spp_hbinterval);
2488 } else {
2489 sp->hbinterval = params->spp_hbinterval;
2490 }
2491 }
2492 }
2493
2494 if (hb_change) {
2495 if (trans) {
2496 trans->param_flags =
2497 (trans->param_flags & ~SPP_HB) | hb_change;
2498 } else if (asoc) {
2499 asoc->param_flags =
2500 (asoc->param_flags & ~SPP_HB) | hb_change;
2501 } else {
2502 sp->param_flags =
2503 (sp->param_flags & ~SPP_HB) | hb_change;
2504 }
2505 }
2506
2507 /* When Path MTU discovery is disabled the value specified here will
2508 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2509 * include the flag SPP_PMTUD_DISABLE for this field to have any
2510 * effect).
2511 */
2512 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2513 if (trans) {
2514 trans->pathmtu = params->spp_pathmtu;
2515 sctp_assoc_sync_pmtu(asoc);
2516 } else if (asoc) {
2517 sctp_assoc_set_pmtu(asoc, params->spp_pathmtu);
2518 } else {
2519 sp->pathmtu = params->spp_pathmtu;
2520 }
2521 }
2522
2523 if (pmtud_change) {
2524 if (trans) {
2525 int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
2526 (params->spp_flags & SPP_PMTUD_ENABLE);
2527 trans->param_flags =
2528 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2529 if (update) {
2530 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2531 sctp_assoc_sync_pmtu(asoc);
2532 }
2533 } else if (asoc) {
2534 asoc->param_flags =
2535 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
2536 } else {
2537 sp->param_flags =
2538 (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
2539 }
2540 }
2541
2542 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2543 * value of this field is ignored. Note also that a value of zero
2544 * indicates the current setting should be left unchanged.
2545 */
2546 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
2547 if (trans) {
2548 trans->sackdelay =
2549 msecs_to_jiffies(params->spp_sackdelay);
2550 } else if (asoc) {
2551 asoc->sackdelay =
2552 msecs_to_jiffies(params->spp_sackdelay);
2553 } else {
2554 sp->sackdelay = params->spp_sackdelay;
2555 }
2556 }
2557
2558 if (sackdelay_change) {
2559 if (trans) {
2560 trans->param_flags =
2561 (trans->param_flags & ~SPP_SACKDELAY) |
2562 sackdelay_change;
2563 } else if (asoc) {
2564 asoc->param_flags =
2565 (asoc->param_flags & ~SPP_SACKDELAY) |
2566 sackdelay_change;
2567 } else {
2568 sp->param_flags =
2569 (sp->param_flags & ~SPP_SACKDELAY) |
2570 sackdelay_change;
2571 }
2572 }
2573
2574 /* Note that a value of zero indicates the current setting should be
2575 left unchanged.
2576 */
2577 if (params->spp_pathmaxrxt) {
2578 if (trans) {
2579 trans->pathmaxrxt = params->spp_pathmaxrxt;
2580 } else if (asoc) {
2581 asoc->pathmaxrxt = params->spp_pathmaxrxt;
2582 } else {
2583 sp->pathmaxrxt = params->spp_pathmaxrxt;
2584 }
2585 }
2586
2587 if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
2588 if (trans) {
2589 if (trans->ipaddr.sa.sa_family == AF_INET6) {
2590 trans->flowlabel = params->spp_ipv6_flowlabel &
2591 SCTP_FLOWLABEL_VAL_MASK;
2592 trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2593 }
2594 } else if (asoc) {
2595 struct sctp_transport *t;
2596
2597 list_for_each_entry(t, &asoc->peer.transport_addr_list,
2598 transports) {
2599 if (t->ipaddr.sa.sa_family != AF_INET6)
2600 continue;
2601 t->flowlabel = params->spp_ipv6_flowlabel &
2602 SCTP_FLOWLABEL_VAL_MASK;
2603 t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2604 }
2605 asoc->flowlabel = params->spp_ipv6_flowlabel &
2606 SCTP_FLOWLABEL_VAL_MASK;
2607 asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2608 } else if (sctp_opt2sk(sp)->sk_family == AF_INET6) {
2609 sp->flowlabel = params->spp_ipv6_flowlabel &
2610 SCTP_FLOWLABEL_VAL_MASK;
2611 sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2612 }
2613 }
2614
2615 if (params->spp_flags & SPP_DSCP) {
2616 if (trans) {
2617 trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
2618 trans->dscp |= SCTP_DSCP_SET_MASK;
2619 } else if (asoc) {
2620 struct sctp_transport *t;
2621
2622 list_for_each_entry(t, &asoc->peer.transport_addr_list,
2623 transports) {
2624 t->dscp = params->spp_dscp &
2625 SCTP_DSCP_VAL_MASK;
2626 t->dscp |= SCTP_DSCP_SET_MASK;
2627 }
2628 asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
2629 asoc->dscp |= SCTP_DSCP_SET_MASK;
2630 } else {
2631 sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
2632 sp->dscp |= SCTP_DSCP_SET_MASK;
2633 }
2634 }
2635
2636 return 0;
2637}
2638
2639static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2640 char __user *optval,
2641 unsigned int optlen)
2642{
2643 struct sctp_paddrparams params;
2644 struct sctp_transport *trans = NULL;
2645 struct sctp_association *asoc = NULL;
2646 struct sctp_sock *sp = sctp_sk(sk);
2647 int error;
2648 int hb_change, pmtud_change, sackdelay_change;
2649
2650 if (optlen == sizeof(params)) {
2651 if (copy_from_user(&params, optval, optlen))
2652 return -EFAULT;
2653 } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
2654 spp_ipv6_flowlabel), 4)) {
2655 if (copy_from_user(&params, optval, optlen))
2656 return -EFAULT;
2657 if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
2658 return -EINVAL;
2659 } else {
2660 return -EINVAL;
2661 }
2662
2663 /* Validate flags and value parameters. */
2664 hb_change = params.spp_flags & SPP_HB;
2665 pmtud_change = params.spp_flags & SPP_PMTUD;
2666 sackdelay_change = params.spp_flags & SPP_SACKDELAY;
2667
2668 if (hb_change == SPP_HB ||
2669 pmtud_change == SPP_PMTUD ||
2670 sackdelay_change == SPP_SACKDELAY ||
2671 params.spp_sackdelay > 500 ||
2672 (params.spp_pathmtu &&
2673 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2674 return -EINVAL;
2675
2676 /* If an address other than INADDR_ANY is specified, and
2677 * no transport is found, then the request is invalid.
2678 */
2679 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
2680 trans = sctp_addr_id2transport(sk, &params.spp_address,
2681 params.spp_assoc_id);
2682 if (!trans)
2683 return -EINVAL;
2684 }
2685
David Brazdil0f672f62019-12-10 10:32:29 +00002686 /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
2687 * socket is a one to many style socket, and an association
2688 * was not found, then the id was invalid.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002689 */
2690 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00002691 if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
2692 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002693 return -EINVAL;
2694
2695 /* Heartbeat demand can only be sent on a transport or
2696 * association, but not a socket.
2697 */
2698 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
2699 return -EINVAL;
2700
2701 /* Process parameters. */
2702 error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2703 hb_change, pmtud_change,
2704 sackdelay_change);
2705
2706 if (error)
2707 return error;
2708
2709 /* If changes are for association, also apply parameters to each
2710 * transport.
2711 */
2712 if (!trans && asoc) {
2713 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2714 transports) {
2715 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2716 hb_change, pmtud_change,
2717 sackdelay_change);
2718 }
2719 }
2720
2721 return 0;
2722}
2723
2724static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
2725{
2726 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
2727}
2728
2729static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
2730{
2731 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
2732}
2733
David Brazdil0f672f62019-12-10 10:32:29 +00002734static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params,
2735 struct sctp_association *asoc)
2736{
2737 struct sctp_transport *trans;
2738
2739 if (params->sack_delay) {
2740 asoc->sackdelay = msecs_to_jiffies(params->sack_delay);
2741 asoc->param_flags =
2742 sctp_spp_sackdelay_enable(asoc->param_flags);
2743 }
2744 if (params->sack_freq == 1) {
2745 asoc->param_flags =
2746 sctp_spp_sackdelay_disable(asoc->param_flags);
2747 } else if (params->sack_freq > 1) {
2748 asoc->sackfreq = params->sack_freq;
2749 asoc->param_flags =
2750 sctp_spp_sackdelay_enable(asoc->param_flags);
2751 }
2752
2753 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2754 transports) {
2755 if (params->sack_delay) {
2756 trans->sackdelay = msecs_to_jiffies(params->sack_delay);
2757 trans->param_flags =
2758 sctp_spp_sackdelay_enable(trans->param_flags);
2759 }
2760 if (params->sack_freq == 1) {
2761 trans->param_flags =
2762 sctp_spp_sackdelay_disable(trans->param_flags);
2763 } else if (params->sack_freq > 1) {
2764 trans->sackfreq = params->sack_freq;
2765 trans->param_flags =
2766 sctp_spp_sackdelay_enable(trans->param_flags);
2767 }
2768 }
2769}
2770
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002771/*
2772 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2773 *
2774 * This option will effect the way delayed acks are performed. This
2775 * option allows you to get or set the delayed ack time, in
2776 * milliseconds. It also allows changing the delayed ack frequency.
2777 * Changing the frequency to 1 disables the delayed sack algorithm. If
2778 * the assoc_id is 0, then this sets or gets the endpoints default
2779 * values. If the assoc_id field is non-zero, then the set or get
2780 * effects the specified association for the one to many model (the
2781 * assoc_id field is ignored by the one to one model). Note that if
2782 * sack_delay or sack_freq are 0 when setting this option, then the
2783 * current values will remain unchanged.
2784 *
2785 * struct sctp_sack_info {
2786 * sctp_assoc_t sack_assoc_id;
2787 * uint32_t sack_delay;
2788 * uint32_t sack_freq;
2789 * };
2790 *
2791 * sack_assoc_id - This parameter, indicates which association the user
2792 * is performing an action upon. Note that if this field's value is
2793 * zero then the endpoints default value is changed (effecting future
2794 * associations only).
2795 *
2796 * sack_delay - This parameter contains the number of milliseconds that
2797 * the user is requesting the delayed ACK timer be set to. Note that
2798 * this value is defined in the standard to be between 200 and 500
2799 * milliseconds.
2800 *
2801 * sack_freq - This parameter contains the number of packets that must
2802 * be received before a sack is sent without waiting for the delay
2803 * timer to expire. The default value for this is 2, setting this
2804 * value to 1 will disable the delayed sack algorithm.
2805 */
2806
2807static int sctp_setsockopt_delayed_ack(struct sock *sk,
2808 char __user *optval, unsigned int optlen)
2809{
David Brazdil0f672f62019-12-10 10:32:29 +00002810 struct sctp_sock *sp = sctp_sk(sk);
2811 struct sctp_association *asoc;
2812 struct sctp_sack_info params;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002813
2814 if (optlen == sizeof(struct sctp_sack_info)) {
2815 if (copy_from_user(&params, optval, optlen))
2816 return -EFAULT;
2817
2818 if (params.sack_delay == 0 && params.sack_freq == 0)
2819 return 0;
2820 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2821 pr_warn_ratelimited(DEPRECATED
2822 "%s (pid %d) "
2823 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2824 "Use struct sctp_sack_info instead\n",
2825 current->comm, task_pid_nr(current));
2826 if (copy_from_user(&params, optval, optlen))
2827 return -EFAULT;
2828
2829 if (params.sack_delay == 0)
2830 params.sack_freq = 1;
2831 else
2832 params.sack_freq = 0;
2833 } else
2834 return -EINVAL;
2835
2836 /* Validate value parameter. */
2837 if (params.sack_delay > 500)
2838 return -EINVAL;
2839
David Brazdil0f672f62019-12-10 10:32:29 +00002840 /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
2841 * socket is a one to many style socket, and an association
2842 * was not found, then the id was invalid.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002843 */
2844 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00002845 if (!asoc && params.sack_assoc_id > SCTP_ALL_ASSOC &&
2846 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002847 return -EINVAL;
2848
David Brazdil0f672f62019-12-10 10:32:29 +00002849 if (asoc) {
2850 sctp_apply_asoc_delayed_ack(&params, asoc);
2851
2852 return 0;
2853 }
2854
2855 if (sctp_style(sk, TCP))
2856 params.sack_assoc_id = SCTP_FUTURE_ASSOC;
2857
2858 if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
2859 params.sack_assoc_id == SCTP_ALL_ASSOC) {
2860 if (params.sack_delay) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002861 sp->sackdelay = params.sack_delay;
2862 sp->param_flags =
2863 sctp_spp_sackdelay_enable(sp->param_flags);
2864 }
David Brazdil0f672f62019-12-10 10:32:29 +00002865 if (params.sack_freq == 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002866 sp->param_flags =
2867 sctp_spp_sackdelay_disable(sp->param_flags);
David Brazdil0f672f62019-12-10 10:32:29 +00002868 } else if (params.sack_freq > 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002869 sp->sackfreq = params.sack_freq;
2870 sp->param_flags =
2871 sctp_spp_sackdelay_enable(sp->param_flags);
2872 }
2873 }
2874
David Brazdil0f672f62019-12-10 10:32:29 +00002875 if (params.sack_assoc_id == SCTP_CURRENT_ASSOC ||
2876 params.sack_assoc_id == SCTP_ALL_ASSOC)
2877 list_for_each_entry(asoc, &sp->ep->asocs, asocs)
2878 sctp_apply_asoc_delayed_ack(&params, asoc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002879
2880 return 0;
2881}
2882
2883/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2884 *
2885 * Applications can specify protocol parameters for the default association
2886 * initialization. The option name argument to setsockopt() and getsockopt()
2887 * is SCTP_INITMSG.
2888 *
2889 * Setting initialization parameters is effective only on an unconnected
2890 * socket (for UDP-style sockets only future associations are effected
2891 * by the change). With TCP-style sockets, this option is inherited by
2892 * sockets derived from a listener socket.
2893 */
2894static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2895{
2896 struct sctp_initmsg sinit;
2897 struct sctp_sock *sp = sctp_sk(sk);
2898
2899 if (optlen != sizeof(struct sctp_initmsg))
2900 return -EINVAL;
2901 if (copy_from_user(&sinit, optval, optlen))
2902 return -EFAULT;
2903
2904 if (sinit.sinit_num_ostreams)
2905 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2906 if (sinit.sinit_max_instreams)
2907 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2908 if (sinit.sinit_max_attempts)
2909 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2910 if (sinit.sinit_max_init_timeo)
2911 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2912
2913 return 0;
2914}
2915
2916/*
2917 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2918 *
2919 * Applications that wish to use the sendto() system call may wish to
2920 * specify a default set of parameters that would normally be supplied
2921 * through the inclusion of ancillary data. This socket option allows
2922 * such an application to set the default sctp_sndrcvinfo structure.
2923 * The application that wishes to use this socket option simply passes
2924 * in to this call the sctp_sndrcvinfo structure defined in Section
2925 * 5.2.2) The input parameters accepted by this call include
2926 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2927 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2928 * to this call if the caller is using the UDP model.
2929 */
2930static int sctp_setsockopt_default_send_param(struct sock *sk,
2931 char __user *optval,
2932 unsigned int optlen)
2933{
2934 struct sctp_sock *sp = sctp_sk(sk);
2935 struct sctp_association *asoc;
2936 struct sctp_sndrcvinfo info;
2937
2938 if (optlen != sizeof(info))
2939 return -EINVAL;
2940 if (copy_from_user(&info, optval, optlen))
2941 return -EFAULT;
2942 if (info.sinfo_flags &
2943 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2944 SCTP_ABORT | SCTP_EOF))
2945 return -EINVAL;
2946
2947 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00002948 if (!asoc && info.sinfo_assoc_id > SCTP_ALL_ASSOC &&
2949 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002950 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00002951
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002952 if (asoc) {
2953 asoc->default_stream = info.sinfo_stream;
2954 asoc->default_flags = info.sinfo_flags;
2955 asoc->default_ppid = info.sinfo_ppid;
2956 asoc->default_context = info.sinfo_context;
2957 asoc->default_timetolive = info.sinfo_timetolive;
David Brazdil0f672f62019-12-10 10:32:29 +00002958
2959 return 0;
2960 }
2961
2962 if (sctp_style(sk, TCP))
2963 info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
2964
2965 if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
2966 info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002967 sp->default_stream = info.sinfo_stream;
2968 sp->default_flags = info.sinfo_flags;
2969 sp->default_ppid = info.sinfo_ppid;
2970 sp->default_context = info.sinfo_context;
2971 sp->default_timetolive = info.sinfo_timetolive;
2972 }
2973
David Brazdil0f672f62019-12-10 10:32:29 +00002974 if (info.sinfo_assoc_id == SCTP_CURRENT_ASSOC ||
2975 info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
2976 list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
2977 asoc->default_stream = info.sinfo_stream;
2978 asoc->default_flags = info.sinfo_flags;
2979 asoc->default_ppid = info.sinfo_ppid;
2980 asoc->default_context = info.sinfo_context;
2981 asoc->default_timetolive = info.sinfo_timetolive;
2982 }
2983 }
2984
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002985 return 0;
2986}
2987
2988/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
2989 * (SCTP_DEFAULT_SNDINFO)
2990 */
2991static int sctp_setsockopt_default_sndinfo(struct sock *sk,
2992 char __user *optval,
2993 unsigned int optlen)
2994{
2995 struct sctp_sock *sp = sctp_sk(sk);
2996 struct sctp_association *asoc;
2997 struct sctp_sndinfo info;
2998
2999 if (optlen != sizeof(info))
3000 return -EINVAL;
3001 if (copy_from_user(&info, optval, optlen))
3002 return -EFAULT;
3003 if (info.snd_flags &
3004 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
3005 SCTP_ABORT | SCTP_EOF))
3006 return -EINVAL;
3007
3008 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003009 if (!asoc && info.snd_assoc_id > SCTP_ALL_ASSOC &&
3010 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003011 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00003012
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003013 if (asoc) {
3014 asoc->default_stream = info.snd_sid;
3015 asoc->default_flags = info.snd_flags;
3016 asoc->default_ppid = info.snd_ppid;
3017 asoc->default_context = info.snd_context;
David Brazdil0f672f62019-12-10 10:32:29 +00003018
3019 return 0;
3020 }
3021
3022 if (sctp_style(sk, TCP))
3023 info.snd_assoc_id = SCTP_FUTURE_ASSOC;
3024
3025 if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
3026 info.snd_assoc_id == SCTP_ALL_ASSOC) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003027 sp->default_stream = info.snd_sid;
3028 sp->default_flags = info.snd_flags;
3029 sp->default_ppid = info.snd_ppid;
3030 sp->default_context = info.snd_context;
3031 }
3032
David Brazdil0f672f62019-12-10 10:32:29 +00003033 if (info.snd_assoc_id == SCTP_CURRENT_ASSOC ||
3034 info.snd_assoc_id == SCTP_ALL_ASSOC) {
3035 list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
3036 asoc->default_stream = info.snd_sid;
3037 asoc->default_flags = info.snd_flags;
3038 asoc->default_ppid = info.snd_ppid;
3039 asoc->default_context = info.snd_context;
3040 }
3041 }
3042
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003043 return 0;
3044}
3045
3046/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
3047 *
3048 * Requests that the local SCTP stack use the enclosed peer address as
3049 * the association primary. The enclosed address must be one of the
3050 * association peer's addresses.
3051 */
3052static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
3053 unsigned int optlen)
3054{
3055 struct sctp_prim prim;
3056 struct sctp_transport *trans;
3057 struct sctp_af *af;
3058 int err;
3059
3060 if (optlen != sizeof(struct sctp_prim))
3061 return -EINVAL;
3062
3063 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
3064 return -EFAULT;
3065
3066 /* Allow security module to validate address but need address len. */
3067 af = sctp_get_af_specific(prim.ssp_addr.ss_family);
3068 if (!af)
3069 return -EINVAL;
3070
3071 err = security_sctp_bind_connect(sk, SCTP_PRIMARY_ADDR,
3072 (struct sockaddr *)&prim.ssp_addr,
3073 af->sockaddr_len);
3074 if (err)
3075 return err;
3076
3077 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
3078 if (!trans)
3079 return -EINVAL;
3080
3081 sctp_assoc_set_primary(trans->asoc, trans);
3082
3083 return 0;
3084}
3085
3086/*
3087 * 7.1.5 SCTP_NODELAY
3088 *
3089 * Turn on/off any Nagle-like algorithm. This means that packets are
3090 * generally sent as soon as possible and no unnecessary delays are
3091 * introduced, at the cost of more packets in the network. Expects an
3092 * integer boolean flag.
3093 */
3094static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
3095 unsigned int optlen)
3096{
3097 int val;
3098
3099 if (optlen < sizeof(int))
3100 return -EINVAL;
3101 if (get_user(val, (int __user *)optval))
3102 return -EFAULT;
3103
3104 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
3105 return 0;
3106}
3107
3108/*
3109 *
3110 * 7.1.1 SCTP_RTOINFO
3111 *
3112 * The protocol parameters used to initialize and bound retransmission
3113 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
3114 * and modify these parameters.
3115 * All parameters are time values, in milliseconds. A value of 0, when
3116 * modifying the parameters, indicates that the current value should not
3117 * be changed.
3118 *
3119 */
3120static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
3121{
3122 struct sctp_rtoinfo rtoinfo;
3123 struct sctp_association *asoc;
3124 unsigned long rto_min, rto_max;
3125 struct sctp_sock *sp = sctp_sk(sk);
3126
3127 if (optlen != sizeof (struct sctp_rtoinfo))
3128 return -EINVAL;
3129
3130 if (copy_from_user(&rtoinfo, optval, optlen))
3131 return -EFAULT;
3132
3133 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
3134
3135 /* Set the values to the specific association */
David Brazdil0f672f62019-12-10 10:32:29 +00003136 if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
3137 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003138 return -EINVAL;
3139
3140 rto_max = rtoinfo.srto_max;
3141 rto_min = rtoinfo.srto_min;
3142
3143 if (rto_max)
3144 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
3145 else
3146 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
3147
3148 if (rto_min)
3149 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
3150 else
3151 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
3152
3153 if (rto_min > rto_max)
3154 return -EINVAL;
3155
3156 if (asoc) {
3157 if (rtoinfo.srto_initial != 0)
3158 asoc->rto_initial =
3159 msecs_to_jiffies(rtoinfo.srto_initial);
3160 asoc->rto_max = rto_max;
3161 asoc->rto_min = rto_min;
3162 } else {
3163 /* If there is no association or the association-id = 0
3164 * set the values to the endpoint.
3165 */
3166 if (rtoinfo.srto_initial != 0)
3167 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
3168 sp->rtoinfo.srto_max = rto_max;
3169 sp->rtoinfo.srto_min = rto_min;
3170 }
3171
3172 return 0;
3173}
3174
3175/*
3176 *
3177 * 7.1.2 SCTP_ASSOCINFO
3178 *
3179 * This option is used to tune the maximum retransmission attempts
3180 * of the association.
3181 * Returns an error if the new association retransmission value is
3182 * greater than the sum of the retransmission value of the peer.
3183 * See [SCTP] for more information.
3184 *
3185 */
3186static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
3187{
3188
3189 struct sctp_assocparams assocparams;
3190 struct sctp_association *asoc;
3191
3192 if (optlen != sizeof(struct sctp_assocparams))
3193 return -EINVAL;
3194 if (copy_from_user(&assocparams, optval, optlen))
3195 return -EFAULT;
3196
3197 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
3198
David Brazdil0f672f62019-12-10 10:32:29 +00003199 if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
3200 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003201 return -EINVAL;
3202
3203 /* Set the values to the specific association */
3204 if (asoc) {
3205 if (assocparams.sasoc_asocmaxrxt != 0) {
3206 __u32 path_sum = 0;
3207 int paths = 0;
3208 struct sctp_transport *peer_addr;
3209
3210 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
3211 transports) {
3212 path_sum += peer_addr->pathmaxrxt;
3213 paths++;
3214 }
3215
3216 /* Only validate asocmaxrxt if we have more than
3217 * one path/transport. We do this because path
3218 * retransmissions are only counted when we have more
3219 * then one path.
3220 */
3221 if (paths > 1 &&
3222 assocparams.sasoc_asocmaxrxt > path_sum)
3223 return -EINVAL;
3224
3225 asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
3226 }
3227
3228 if (assocparams.sasoc_cookie_life != 0)
3229 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
3230 } else {
3231 /* Set the values to the endpoint */
3232 struct sctp_sock *sp = sctp_sk(sk);
3233
3234 if (assocparams.sasoc_asocmaxrxt != 0)
3235 sp->assocparams.sasoc_asocmaxrxt =
3236 assocparams.sasoc_asocmaxrxt;
3237 if (assocparams.sasoc_cookie_life != 0)
3238 sp->assocparams.sasoc_cookie_life =
3239 assocparams.sasoc_cookie_life;
3240 }
3241 return 0;
3242}
3243
3244/*
3245 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3246 *
3247 * This socket option is a boolean flag which turns on or off mapped V4
3248 * addresses. If this option is turned on and the socket is type
3249 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3250 * If this option is turned off, then no mapping will be done of V4
3251 * addresses and a user will receive both PF_INET6 and PF_INET type
3252 * addresses on the socket.
3253 */
3254static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
3255{
3256 int val;
3257 struct sctp_sock *sp = sctp_sk(sk);
3258
3259 if (optlen < sizeof(int))
3260 return -EINVAL;
3261 if (get_user(val, (int __user *)optval))
3262 return -EFAULT;
3263 if (val)
3264 sp->v4mapped = 1;
3265 else
3266 sp->v4mapped = 0;
3267
3268 return 0;
3269}
3270
3271/*
3272 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3273 * This option will get or set the maximum size to put in any outgoing
3274 * SCTP DATA chunk. If a message is larger than this size it will be
3275 * fragmented by SCTP into the specified size. Note that the underlying
3276 * SCTP implementation may fragment into smaller sized chunks when the
3277 * PMTU of the underlying association is smaller than the value set by
3278 * the user. The default value for this option is '0' which indicates
3279 * the user is NOT limiting fragmentation and only the PMTU will effect
3280 * SCTP's choice of DATA chunk size. Note also that values set larger
3281 * than the maximum size of an IP datagram will effectively let SCTP
3282 * control fragmentation (i.e. the same as setting this option to 0).
3283 *
3284 * The following structure is used to access and modify this parameter:
3285 *
3286 * struct sctp_assoc_value {
3287 * sctp_assoc_t assoc_id;
3288 * uint32_t assoc_value;
3289 * };
3290 *
3291 * assoc_id: This parameter is ignored for one-to-one style sockets.
3292 * For one-to-many style sockets this parameter indicates which
3293 * association the user is performing an action upon. Note that if
3294 * this field's value is zero then the endpoints default value is
3295 * changed (effecting future associations only).
3296 * assoc_value: This parameter specifies the maximum size in bytes.
3297 */
3298static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3299{
3300 struct sctp_sock *sp = sctp_sk(sk);
3301 struct sctp_assoc_value params;
3302 struct sctp_association *asoc;
3303 int val;
3304
3305 if (optlen == sizeof(int)) {
3306 pr_warn_ratelimited(DEPRECATED
3307 "%s (pid %d) "
3308 "Use of int in maxseg socket option.\n"
3309 "Use struct sctp_assoc_value instead\n",
3310 current->comm, task_pid_nr(current));
3311 if (copy_from_user(&val, optval, optlen))
3312 return -EFAULT;
David Brazdil0f672f62019-12-10 10:32:29 +00003313 params.assoc_id = SCTP_FUTURE_ASSOC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003314 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3315 if (copy_from_user(&params, optval, optlen))
3316 return -EFAULT;
3317 val = params.assoc_value;
3318 } else {
3319 return -EINVAL;
3320 }
3321
3322 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003323 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
3324 sctp_style(sk, UDP))
3325 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003326
3327 if (val) {
3328 int min_len, max_len;
3329 __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
3330 sizeof(struct sctp_data_chunk);
3331
David Brazdil0f672f62019-12-10 10:32:29 +00003332 min_len = sctp_min_frag_point(sp, datasize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003333 max_len = SCTP_MAX_CHUNK_LEN - datasize;
3334
3335 if (val < min_len || val > max_len)
3336 return -EINVAL;
3337 }
3338
3339 if (asoc) {
3340 asoc->user_frag = val;
3341 sctp_assoc_update_frag_point(asoc);
3342 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003343 sp->user_frag = val;
3344 }
3345
3346 return 0;
3347}
3348
3349
3350/*
3351 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3352 *
3353 * Requests that the peer mark the enclosed address as the association
3354 * primary. The enclosed address must be one of the association's
3355 * locally bound addresses. The following structure is used to make a
3356 * set primary request:
3357 */
3358static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3359 unsigned int optlen)
3360{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003361 struct sctp_sock *sp;
3362 struct sctp_association *asoc = NULL;
3363 struct sctp_setpeerprim prim;
3364 struct sctp_chunk *chunk;
3365 struct sctp_af *af;
3366 int err;
3367
3368 sp = sctp_sk(sk);
3369
David Brazdil0f672f62019-12-10 10:32:29 +00003370 if (!sp->ep->asconf_enable)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003371 return -EPERM;
3372
3373 if (optlen != sizeof(struct sctp_setpeerprim))
3374 return -EINVAL;
3375
3376 if (copy_from_user(&prim, optval, optlen))
3377 return -EFAULT;
3378
3379 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
3380 if (!asoc)
3381 return -EINVAL;
3382
3383 if (!asoc->peer.asconf_capable)
3384 return -EPERM;
3385
3386 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
3387 return -EPERM;
3388
3389 if (!sctp_state(asoc, ESTABLISHED))
3390 return -ENOTCONN;
3391
3392 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
3393 if (!af)
3394 return -EINVAL;
3395
3396 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
3397 return -EADDRNOTAVAIL;
3398
3399 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
3400 return -EADDRNOTAVAIL;
3401
3402 /* Allow security module to validate address. */
3403 err = security_sctp_bind_connect(sk, SCTP_SET_PEER_PRIMARY_ADDR,
3404 (struct sockaddr *)&prim.sspp_addr,
3405 af->sockaddr_len);
3406 if (err)
3407 return err;
3408
3409 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3410 chunk = sctp_make_asconf_set_prim(asoc,
3411 (union sctp_addr *)&prim.sspp_addr);
3412 if (!chunk)
3413 return -ENOMEM;
3414
3415 err = sctp_send_asconf(asoc, chunk);
3416
3417 pr_debug("%s: we set peer primary addr primitively\n", __func__);
3418
3419 return err;
3420}
3421
3422static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
3423 unsigned int optlen)
3424{
3425 struct sctp_setadaptation adaptation;
3426
3427 if (optlen != sizeof(struct sctp_setadaptation))
3428 return -EINVAL;
3429 if (copy_from_user(&adaptation, optval, optlen))
3430 return -EFAULT;
3431
3432 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
3433
3434 return 0;
3435}
3436
3437/*
3438 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3439 *
3440 * The context field in the sctp_sndrcvinfo structure is normally only
3441 * used when a failed message is retrieved holding the value that was
3442 * sent down on the actual send call. This option allows the setting of
3443 * a default context on an association basis that will be received on
3444 * reading messages from the peer. This is especially helpful in the
3445 * one-2-many model for an application to keep some reference to an
3446 * internal state machine that is processing messages on the
3447 * association. Note that the setting of this value only effects
3448 * received messages from the peer and does not effect the value that is
3449 * saved with outbound messages.
3450 */
3451static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3452 unsigned int optlen)
3453{
David Brazdil0f672f62019-12-10 10:32:29 +00003454 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003455 struct sctp_assoc_value params;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003456 struct sctp_association *asoc;
3457
3458 if (optlen != sizeof(struct sctp_assoc_value))
3459 return -EINVAL;
3460 if (copy_from_user(&params, optval, optlen))
3461 return -EFAULT;
3462
David Brazdil0f672f62019-12-10 10:32:29 +00003463 asoc = sctp_id2assoc(sk, params.assoc_id);
3464 if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
3465 sctp_style(sk, UDP))
3466 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003467
David Brazdil0f672f62019-12-10 10:32:29 +00003468 if (asoc) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003469 asoc->default_rcv_context = params.assoc_value;
David Brazdil0f672f62019-12-10 10:32:29 +00003470
3471 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003472 }
3473
David Brazdil0f672f62019-12-10 10:32:29 +00003474 if (sctp_style(sk, TCP))
3475 params.assoc_id = SCTP_FUTURE_ASSOC;
3476
3477 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
3478 params.assoc_id == SCTP_ALL_ASSOC)
3479 sp->default_rcv_context = params.assoc_value;
3480
3481 if (params.assoc_id == SCTP_CURRENT_ASSOC ||
3482 params.assoc_id == SCTP_ALL_ASSOC)
3483 list_for_each_entry(asoc, &sp->ep->asocs, asocs)
3484 asoc->default_rcv_context = params.assoc_value;
3485
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003486 return 0;
3487}
3488
3489/*
3490 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3491 *
3492 * This options will at a minimum specify if the implementation is doing
3493 * fragmented interleave. Fragmented interleave, for a one to many
3494 * socket, is when subsequent calls to receive a message may return
3495 * parts of messages from different associations. Some implementations
3496 * may allow you to turn this value on or off. If so, when turned off,
3497 * no fragment interleave will occur (which will cause a head of line
3498 * blocking amongst multiple associations sharing the same one to many
3499 * socket). When this option is turned on, then each receive call may
3500 * come from a different association (thus the user must receive data
3501 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3502 * association each receive belongs to.
3503 *
3504 * This option takes a boolean value. A non-zero value indicates that
3505 * fragmented interleave is on. A value of zero indicates that
3506 * fragmented interleave is off.
3507 *
3508 * Note that it is important that an implementation that allows this
3509 * option to be turned on, have it off by default. Otherwise an unaware
3510 * application using the one to many model may become confused and act
3511 * incorrectly.
3512 */
3513static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3514 char __user *optval,
3515 unsigned int optlen)
3516{
3517 int val;
3518
3519 if (optlen != sizeof(int))
3520 return -EINVAL;
3521 if (get_user(val, (int __user *)optval))
3522 return -EFAULT;
3523
3524 sctp_sk(sk)->frag_interleave = !!val;
3525
3526 if (!sctp_sk(sk)->frag_interleave)
David Brazdil0f672f62019-12-10 10:32:29 +00003527 sctp_sk(sk)->ep->intl_enable = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003528
3529 return 0;
3530}
3531
3532/*
3533 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3534 * (SCTP_PARTIAL_DELIVERY_POINT)
3535 *
3536 * This option will set or get the SCTP partial delivery point. This
3537 * point is the size of a message where the partial delivery API will be
3538 * invoked to help free up rwnd space for the peer. Setting this to a
3539 * lower value will cause partial deliveries to happen more often. The
3540 * calls argument is an integer that sets or gets the partial delivery
3541 * point. Note also that the call will fail if the user attempts to set
3542 * this value larger than the socket receive buffer size.
3543 *
3544 * Note that any single message having a length smaller than or equal to
3545 * the SCTP partial delivery point will be delivered in one single read
3546 * call as long as the user provided buffer is large enough to hold the
3547 * message.
3548 */
3549static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3550 char __user *optval,
3551 unsigned int optlen)
3552{
3553 u32 val;
3554
3555 if (optlen != sizeof(u32))
3556 return -EINVAL;
3557 if (get_user(val, (int __user *)optval))
3558 return -EFAULT;
3559
3560 /* Note: We double the receive buffer from what the user sets
3561 * it to be, also initial rwnd is based on rcvbuf/2.
3562 */
3563 if (val > (sk->sk_rcvbuf >> 1))
3564 return -EINVAL;
3565
3566 sctp_sk(sk)->pd_point = val;
3567
3568 return 0; /* is this the right error code? */
3569}
3570
3571/*
3572 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3573 *
3574 * This option will allow a user to change the maximum burst of packets
3575 * that can be emitted by this association. Note that the default value
3576 * is 4, and some implementations may restrict this setting so that it
3577 * can only be lowered.
3578 *
3579 * NOTE: This text doesn't seem right. Do this on a socket basis with
3580 * future associations inheriting the socket value.
3581 */
3582static int sctp_setsockopt_maxburst(struct sock *sk,
3583 char __user *optval,
3584 unsigned int optlen)
3585{
David Brazdil0f672f62019-12-10 10:32:29 +00003586 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003587 struct sctp_assoc_value params;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003588 struct sctp_association *asoc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003589
3590 if (optlen == sizeof(int)) {
3591 pr_warn_ratelimited(DEPRECATED
3592 "%s (pid %d) "
3593 "Use of int in max_burst socket option deprecated.\n"
3594 "Use struct sctp_assoc_value instead\n",
3595 current->comm, task_pid_nr(current));
David Brazdil0f672f62019-12-10 10:32:29 +00003596 if (copy_from_user(&params.assoc_value, optval, optlen))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003597 return -EFAULT;
David Brazdil0f672f62019-12-10 10:32:29 +00003598 params.assoc_id = SCTP_FUTURE_ASSOC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003599 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3600 if (copy_from_user(&params, optval, optlen))
3601 return -EFAULT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003602 } else
3603 return -EINVAL;
3604
David Brazdil0f672f62019-12-10 10:32:29 +00003605 asoc = sctp_id2assoc(sk, params.assoc_id);
3606 if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
3607 sctp_style(sk, UDP))
3608 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003609
David Brazdil0f672f62019-12-10 10:32:29 +00003610 if (asoc) {
3611 asoc->max_burst = params.assoc_value;
3612
3613 return 0;
3614 }
3615
3616 if (sctp_style(sk, TCP))
3617 params.assoc_id = SCTP_FUTURE_ASSOC;
3618
3619 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
3620 params.assoc_id == SCTP_ALL_ASSOC)
3621 sp->max_burst = params.assoc_value;
3622
3623 if (params.assoc_id == SCTP_CURRENT_ASSOC ||
3624 params.assoc_id == SCTP_ALL_ASSOC)
3625 list_for_each_entry(asoc, &sp->ep->asocs, asocs)
3626 asoc->max_burst = params.assoc_value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003627
3628 return 0;
3629}
3630
3631/*
3632 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3633 *
3634 * This set option adds a chunk type that the user is requesting to be
3635 * received only in an authenticated way. Changes to the list of chunks
3636 * will only effect future associations on the socket.
3637 */
3638static int sctp_setsockopt_auth_chunk(struct sock *sk,
3639 char __user *optval,
3640 unsigned int optlen)
3641{
3642 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3643 struct sctp_authchunk val;
3644
3645 if (!ep->auth_enable)
3646 return -EACCES;
3647
3648 if (optlen != sizeof(struct sctp_authchunk))
3649 return -EINVAL;
3650 if (copy_from_user(&val, optval, optlen))
3651 return -EFAULT;
3652
3653 switch (val.sauth_chunk) {
3654 case SCTP_CID_INIT:
3655 case SCTP_CID_INIT_ACK:
3656 case SCTP_CID_SHUTDOWN_COMPLETE:
3657 case SCTP_CID_AUTH:
3658 return -EINVAL;
3659 }
3660
3661 /* add this chunk id to the endpoint */
3662 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
3663}
3664
3665/*
3666 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3667 *
3668 * This option gets or sets the list of HMAC algorithms that the local
3669 * endpoint requires the peer to use.
3670 */
3671static int sctp_setsockopt_hmac_ident(struct sock *sk,
3672 char __user *optval,
3673 unsigned int optlen)
3674{
3675 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3676 struct sctp_hmacalgo *hmacs;
3677 u32 idents;
3678 int err;
3679
3680 if (!ep->auth_enable)
3681 return -EACCES;
3682
3683 if (optlen < sizeof(struct sctp_hmacalgo))
3684 return -EINVAL;
3685 optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
3686 SCTP_AUTH_NUM_HMACS * sizeof(u16));
3687
3688 hmacs = memdup_user(optval, optlen);
3689 if (IS_ERR(hmacs))
3690 return PTR_ERR(hmacs);
3691
3692 idents = hmacs->shmac_num_idents;
3693 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3694 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3695 err = -EINVAL;
3696 goto out;
3697 }
3698
3699 err = sctp_auth_ep_set_hmacs(ep, hmacs);
3700out:
3701 kfree(hmacs);
3702 return err;
3703}
3704
3705/*
3706 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3707 *
3708 * This option will set a shared secret key which is used to build an
3709 * association shared key.
3710 */
3711static int sctp_setsockopt_auth_key(struct sock *sk,
3712 char __user *optval,
3713 unsigned int optlen)
3714{
3715 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3716 struct sctp_authkey *authkey;
3717 struct sctp_association *asoc;
David Brazdil0f672f62019-12-10 10:32:29 +00003718 int ret = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003719
3720 if (optlen <= sizeof(struct sctp_authkey))
3721 return -EINVAL;
3722 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3723 * this.
3724 */
David Brazdil0f672f62019-12-10 10:32:29 +00003725 optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(*authkey));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003726
3727 authkey = memdup_user(optval, optlen);
3728 if (IS_ERR(authkey))
3729 return PTR_ERR(authkey);
3730
David Brazdil0f672f62019-12-10 10:32:29 +00003731 if (authkey->sca_keylength > optlen - sizeof(*authkey))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003732 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003733
3734 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003735 if (!asoc && authkey->sca_assoc_id > SCTP_ALL_ASSOC &&
3736 sctp_style(sk, UDP))
3737 goto out;
3738
3739 if (asoc) {
3740 ret = sctp_auth_set_key(ep, asoc, authkey);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003741 goto out;
3742 }
3743
David Brazdil0f672f62019-12-10 10:32:29 +00003744 if (sctp_style(sk, TCP))
3745 authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
3746
3747 if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
3748 authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
3749 ret = sctp_auth_set_key(ep, asoc, authkey);
3750 if (ret)
3751 goto out;
3752 }
3753
3754 ret = 0;
3755
3756 if (authkey->sca_assoc_id == SCTP_CURRENT_ASSOC ||
3757 authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
3758 list_for_each_entry(asoc, &ep->asocs, asocs) {
3759 int res = sctp_auth_set_key(ep, asoc, authkey);
3760
3761 if (res && !ret)
3762 ret = res;
3763 }
3764 }
3765
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003766out:
3767 kzfree(authkey);
3768 return ret;
3769}
3770
3771/*
3772 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3773 *
3774 * This option will get or set the active shared key to be used to build
3775 * the association shared key.
3776 */
3777static int sctp_setsockopt_active_key(struct sock *sk,
3778 char __user *optval,
3779 unsigned int optlen)
3780{
3781 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003782 struct sctp_association *asoc;
David Brazdil0f672f62019-12-10 10:32:29 +00003783 struct sctp_authkeyid val;
3784 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003785
3786 if (optlen != sizeof(struct sctp_authkeyid))
3787 return -EINVAL;
3788 if (copy_from_user(&val, optval, optlen))
3789 return -EFAULT;
3790
3791 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003792 if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
3793 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003794 return -EINVAL;
3795
David Brazdil0f672f62019-12-10 10:32:29 +00003796 if (asoc)
3797 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3798
3799 if (sctp_style(sk, TCP))
3800 val.scact_assoc_id = SCTP_FUTURE_ASSOC;
3801
3802 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
3803 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3804 ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3805 if (ret)
3806 return ret;
3807 }
3808
3809 if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
3810 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3811 list_for_each_entry(asoc, &ep->asocs, asocs) {
3812 int res = sctp_auth_set_active_key(ep, asoc,
3813 val.scact_keynumber);
3814
3815 if (res && !ret)
3816 ret = res;
3817 }
3818 }
3819
3820 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003821}
3822
3823/*
3824 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3825 *
3826 * This set option will delete a shared secret key from use.
3827 */
3828static int sctp_setsockopt_del_key(struct sock *sk,
3829 char __user *optval,
3830 unsigned int optlen)
3831{
3832 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003833 struct sctp_association *asoc;
David Brazdil0f672f62019-12-10 10:32:29 +00003834 struct sctp_authkeyid val;
3835 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003836
3837 if (optlen != sizeof(struct sctp_authkeyid))
3838 return -EINVAL;
3839 if (copy_from_user(&val, optval, optlen))
3840 return -EFAULT;
3841
3842 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003843 if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
3844 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003845 return -EINVAL;
3846
David Brazdil0f672f62019-12-10 10:32:29 +00003847 if (asoc)
3848 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003849
David Brazdil0f672f62019-12-10 10:32:29 +00003850 if (sctp_style(sk, TCP))
3851 val.scact_assoc_id = SCTP_FUTURE_ASSOC;
3852
3853 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
3854 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3855 ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3856 if (ret)
3857 return ret;
3858 }
3859
3860 if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
3861 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3862 list_for_each_entry(asoc, &ep->asocs, asocs) {
3863 int res = sctp_auth_del_key_id(ep, asoc,
3864 val.scact_keynumber);
3865
3866 if (res && !ret)
3867 ret = res;
3868 }
3869 }
3870
3871 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003872}
3873
3874/*
3875 * 8.3.4 Deactivate a Shared Key (SCTP_AUTH_DEACTIVATE_KEY)
3876 *
3877 * This set option will deactivate a shared secret key.
3878 */
3879static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
3880 unsigned int optlen)
3881{
3882 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003883 struct sctp_association *asoc;
David Brazdil0f672f62019-12-10 10:32:29 +00003884 struct sctp_authkeyid val;
3885 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003886
3887 if (optlen != sizeof(struct sctp_authkeyid))
3888 return -EINVAL;
3889 if (copy_from_user(&val, optval, optlen))
3890 return -EFAULT;
3891
3892 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00003893 if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
3894 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003895 return -EINVAL;
3896
David Brazdil0f672f62019-12-10 10:32:29 +00003897 if (asoc)
3898 return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
3899
3900 if (sctp_style(sk, TCP))
3901 val.scact_assoc_id = SCTP_FUTURE_ASSOC;
3902
3903 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
3904 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3905 ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
3906 if (ret)
3907 return ret;
3908 }
3909
3910 if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
3911 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3912 list_for_each_entry(asoc, &ep->asocs, asocs) {
3913 int res = sctp_auth_deact_key_id(ep, asoc,
3914 val.scact_keynumber);
3915
3916 if (res && !ret)
3917 ret = res;
3918 }
3919 }
3920
3921 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003922}
3923
3924/*
3925 * 8.1.23 SCTP_AUTO_ASCONF
3926 *
3927 * This option will enable or disable the use of the automatic generation of
3928 * ASCONF chunks to add and delete addresses to an existing association. Note
3929 * that this option has two caveats namely: a) it only affects sockets that
3930 * are bound to all addresses available to the SCTP stack, and b) the system
3931 * administrator may have an overriding control that turns the ASCONF feature
3932 * off no matter what setting the socket option may have.
3933 * This option expects an integer boolean flag, where a non-zero value turns on
3934 * the option, and a zero value turns off the option.
3935 * Note. In this implementation, socket operation overrides default parameter
3936 * being set by sysctl as well as FreeBSD implementation
3937 */
3938static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3939 unsigned int optlen)
3940{
3941 int val;
3942 struct sctp_sock *sp = sctp_sk(sk);
3943
3944 if (optlen < sizeof(int))
3945 return -EINVAL;
3946 if (get_user(val, (int __user *)optval))
3947 return -EFAULT;
3948 if (!sctp_is_ep_boundall(sk) && val)
3949 return -EINVAL;
3950 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3951 return 0;
3952
3953 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3954 if (val == 0 && sp->do_auto_asconf) {
3955 list_del(&sp->auto_asconf_list);
3956 sp->do_auto_asconf = 0;
3957 } else if (val && !sp->do_auto_asconf) {
3958 list_add_tail(&sp->auto_asconf_list,
3959 &sock_net(sk)->sctp.auto_asconf_splist);
3960 sp->do_auto_asconf = 1;
3961 }
3962 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3963 return 0;
3964}
3965
3966/*
3967 * SCTP_PEER_ADDR_THLDS
3968 *
3969 * This option allows us to alter the partially failed threshold for one or all
3970 * transports in an association. See Section 6.1 of:
3971 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3972 */
3973static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3974 char __user *optval,
3975 unsigned int optlen)
3976{
3977 struct sctp_paddrthlds val;
3978 struct sctp_transport *trans;
3979 struct sctp_association *asoc;
3980
3981 if (optlen < sizeof(struct sctp_paddrthlds))
3982 return -EINVAL;
3983 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3984 sizeof(struct sctp_paddrthlds)))
3985 return -EFAULT;
3986
David Brazdil0f672f62019-12-10 10:32:29 +00003987 if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3988 trans = sctp_addr_id2transport(sk, &val.spt_address,
3989 val.spt_assoc_id);
3990 if (!trans)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003991 return -ENOENT;
David Brazdil0f672f62019-12-10 10:32:29 +00003992
3993 if (val.spt_pathmaxrxt)
3994 trans->pathmaxrxt = val.spt_pathmaxrxt;
3995 trans->pf_retrans = val.spt_pathpfthld;
3996
3997 return 0;
3998 }
3999
4000 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
4001 if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
4002 sctp_style(sk, UDP))
4003 return -EINVAL;
4004
4005 if (asoc) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004006 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
4007 transports) {
4008 if (val.spt_pathmaxrxt)
4009 trans->pathmaxrxt = val.spt_pathmaxrxt;
4010 trans->pf_retrans = val.spt_pathpfthld;
4011 }
4012
4013 if (val.spt_pathmaxrxt)
4014 asoc->pathmaxrxt = val.spt_pathmaxrxt;
4015 asoc->pf_retrans = val.spt_pathpfthld;
4016 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00004017 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004018
4019 if (val.spt_pathmaxrxt)
David Brazdil0f672f62019-12-10 10:32:29 +00004020 sp->pathmaxrxt = val.spt_pathmaxrxt;
4021 sp->pf_retrans = val.spt_pathpfthld;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004022 }
4023
4024 return 0;
4025}
4026
4027static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
4028 char __user *optval,
4029 unsigned int optlen)
4030{
4031 int val;
4032
4033 if (optlen < sizeof(int))
4034 return -EINVAL;
4035 if (get_user(val, (int __user *) optval))
4036 return -EFAULT;
4037
4038 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
4039
4040 return 0;
4041}
4042
4043static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
4044 char __user *optval,
4045 unsigned int optlen)
4046{
4047 int val;
4048
4049 if (optlen < sizeof(int))
4050 return -EINVAL;
4051 if (get_user(val, (int __user *) optval))
4052 return -EFAULT;
4053
4054 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
4055
4056 return 0;
4057}
4058
4059static int sctp_setsockopt_pr_supported(struct sock *sk,
4060 char __user *optval,
4061 unsigned int optlen)
4062{
4063 struct sctp_assoc_value params;
David Brazdil0f672f62019-12-10 10:32:29 +00004064 struct sctp_association *asoc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004065
4066 if (optlen != sizeof(params))
4067 return -EINVAL;
4068
4069 if (copy_from_user(&params, optval, optlen))
4070 return -EFAULT;
4071
David Brazdil0f672f62019-12-10 10:32:29 +00004072 asoc = sctp_id2assoc(sk, params.assoc_id);
4073 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
4074 sctp_style(sk, UDP))
4075 return -EINVAL;
4076
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004077 sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
4078
4079 return 0;
4080}
4081
4082static int sctp_setsockopt_default_prinfo(struct sock *sk,
4083 char __user *optval,
4084 unsigned int optlen)
4085{
David Brazdil0f672f62019-12-10 10:32:29 +00004086 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004087 struct sctp_default_prinfo info;
4088 struct sctp_association *asoc;
4089 int retval = -EINVAL;
4090
4091 if (optlen != sizeof(info))
4092 goto out;
4093
4094 if (copy_from_user(&info, optval, sizeof(info))) {
4095 retval = -EFAULT;
4096 goto out;
4097 }
4098
4099 if (info.pr_policy & ~SCTP_PR_SCTP_MASK)
4100 goto out;
4101
4102 if (info.pr_policy == SCTP_PR_SCTP_NONE)
4103 info.pr_value = 0;
4104
4105 asoc = sctp_id2assoc(sk, info.pr_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00004106 if (!asoc && info.pr_assoc_id > SCTP_ALL_ASSOC &&
4107 sctp_style(sk, UDP))
4108 goto out;
4109
4110 retval = 0;
4111
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004112 if (asoc) {
4113 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
4114 asoc->default_timetolive = info.pr_value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004115 goto out;
4116 }
4117
David Brazdil0f672f62019-12-10 10:32:29 +00004118 if (sctp_style(sk, TCP))
4119 info.pr_assoc_id = SCTP_FUTURE_ASSOC;
4120
4121 if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
4122 info.pr_assoc_id == SCTP_ALL_ASSOC) {
4123 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
4124 sp->default_timetolive = info.pr_value;
4125 }
4126
4127 if (info.pr_assoc_id == SCTP_CURRENT_ASSOC ||
4128 info.pr_assoc_id == SCTP_ALL_ASSOC) {
4129 list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
4130 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
4131 asoc->default_timetolive = info.pr_value;
4132 }
4133 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004134
4135out:
4136 return retval;
4137}
4138
4139static int sctp_setsockopt_reconfig_supported(struct sock *sk,
4140 char __user *optval,
4141 unsigned int optlen)
4142{
4143 struct sctp_assoc_value params;
4144 struct sctp_association *asoc;
4145 int retval = -EINVAL;
4146
4147 if (optlen != sizeof(params))
4148 goto out;
4149
4150 if (copy_from_user(&params, optval, optlen)) {
4151 retval = -EFAULT;
4152 goto out;
4153 }
4154
4155 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00004156 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
4157 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004158 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00004159
4160 sctp_sk(sk)->ep->reconf_enable = !!params.assoc_value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004161
4162 retval = 0;
4163
4164out:
4165 return retval;
4166}
4167
4168static int sctp_setsockopt_enable_strreset(struct sock *sk,
4169 char __user *optval,
4170 unsigned int optlen)
4171{
David Brazdil0f672f62019-12-10 10:32:29 +00004172 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004173 struct sctp_assoc_value params;
4174 struct sctp_association *asoc;
4175 int retval = -EINVAL;
4176
4177 if (optlen != sizeof(params))
4178 goto out;
4179
4180 if (copy_from_user(&params, optval, optlen)) {
4181 retval = -EFAULT;
4182 goto out;
4183 }
4184
4185 if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
4186 goto out;
4187
4188 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00004189 if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
4190 sctp_style(sk, UDP))
4191 goto out;
4192
4193 retval = 0;
4194
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004195 if (asoc) {
4196 asoc->strreset_enable = params.assoc_value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004197 goto out;
4198 }
4199
David Brazdil0f672f62019-12-10 10:32:29 +00004200 if (sctp_style(sk, TCP))
4201 params.assoc_id = SCTP_FUTURE_ASSOC;
4202
4203 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
4204 params.assoc_id == SCTP_ALL_ASSOC)
4205 ep->strreset_enable = params.assoc_value;
4206
4207 if (params.assoc_id == SCTP_CURRENT_ASSOC ||
4208 params.assoc_id == SCTP_ALL_ASSOC)
4209 list_for_each_entry(asoc, &ep->asocs, asocs)
4210 asoc->strreset_enable = params.assoc_value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004211
4212out:
4213 return retval;
4214}
4215
4216static int sctp_setsockopt_reset_streams(struct sock *sk,
4217 char __user *optval,
4218 unsigned int optlen)
4219{
4220 struct sctp_reset_streams *params;
4221 struct sctp_association *asoc;
4222 int retval = -EINVAL;
4223
4224 if (optlen < sizeof(*params))
4225 return -EINVAL;
4226 /* srs_number_streams is u16, so optlen can't be bigger than this. */
4227 optlen = min_t(unsigned int, optlen, USHRT_MAX +
4228 sizeof(__u16) * sizeof(*params));
4229
4230 params = memdup_user(optval, optlen);
4231 if (IS_ERR(params))
4232 return PTR_ERR(params);
4233
4234 if (params->srs_number_streams * sizeof(__u16) >
4235 optlen - sizeof(*params))
4236 goto out;
4237
4238 asoc = sctp_id2assoc(sk, params->srs_assoc_id);
4239 if (!asoc)
4240 goto out;
4241
4242 retval = sctp_send_reset_streams(asoc, params);
4243
4244out:
4245 kfree(params);
4246 return retval;
4247}
4248
4249static int sctp_setsockopt_reset_assoc(struct sock *sk,
4250 char __user *optval,
4251 unsigned int optlen)
4252{
4253 struct sctp_association *asoc;
4254 sctp_assoc_t associd;
4255 int retval = -EINVAL;
4256
4257 if (optlen != sizeof(associd))
4258 goto out;
4259
4260 if (copy_from_user(&associd, optval, optlen)) {
4261 retval = -EFAULT;
4262 goto out;
4263 }
4264
4265 asoc = sctp_id2assoc(sk, associd);
4266 if (!asoc)
4267 goto out;
4268
4269 retval = sctp_send_reset_assoc(asoc);
4270
4271out:
4272 return retval;
4273}
4274
4275static int sctp_setsockopt_add_streams(struct sock *sk,
4276 char __user *optval,
4277 unsigned int optlen)
4278{
4279 struct sctp_association *asoc;
4280 struct sctp_add_streams params;
4281 int retval = -EINVAL;
4282
4283 if (optlen != sizeof(params))
4284 goto out;
4285
4286 if (copy_from_user(&params, optval, optlen)) {
4287 retval = -EFAULT;
4288 goto out;
4289 }
4290
4291 asoc = sctp_id2assoc(sk, params.sas_assoc_id);
4292 if (!asoc)
4293 goto out;
4294
4295 retval = sctp_send_add_streams(asoc, &params);
4296
4297out:
4298 return retval;
4299}
4300
4301static int sctp_setsockopt_scheduler(struct sock *sk,
4302 char __user *optval,
4303 unsigned int optlen)
4304{
David Brazdil0f672f62019-12-10 10:32:29 +00004305 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004306 struct sctp_association *asoc;
4307 struct sctp_assoc_value params;
David Brazdil0f672f62019-12-10 10:32:29 +00004308 int retval = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004309
4310 if (optlen < sizeof(params))
David Brazdil0f672f62019-12-10 10:32:29 +00004311 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004312
4313 optlen = sizeof(params);
David Brazdil0f672f62019-12-10 10:32:29 +00004314 if (copy_from_user(&params, optval, optlen))
4315 return -EFAULT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004316
4317 if (params.assoc_value > SCTP_SS_MAX)
David Brazdil0f672f62019-12-10 10:32:29 +00004318 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004319
4320 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00004321 if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
4322 sctp_style(sk, UDP))
4323 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004324
David Brazdil0f672f62019-12-10 10:32:29 +00004325 if (asoc)
4326 return sctp_sched_set_sched(asoc, params.assoc_value);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004327
David Brazdil0f672f62019-12-10 10:32:29 +00004328 if (sctp_style(sk, TCP))
4329 params.assoc_id = SCTP_FUTURE_ASSOC;
4330
4331 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
4332 params.assoc_id == SCTP_ALL_ASSOC)
4333 sp->default_ss = params.assoc_value;
4334
4335 if (params.assoc_id == SCTP_CURRENT_ASSOC ||
4336 params.assoc_id == SCTP_ALL_ASSOC) {
4337 list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
4338 int ret = sctp_sched_set_sched(asoc,
4339 params.assoc_value);
4340
4341 if (ret && !retval)
4342 retval = ret;
4343 }
4344 }
4345
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004346 return retval;
4347}
4348
4349static int sctp_setsockopt_scheduler_value(struct sock *sk,
4350 char __user *optval,
4351 unsigned int optlen)
4352{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004353 struct sctp_stream_value params;
David Brazdil0f672f62019-12-10 10:32:29 +00004354 struct sctp_association *asoc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004355 int retval = -EINVAL;
4356
4357 if (optlen < sizeof(params))
4358 goto out;
4359
4360 optlen = sizeof(params);
4361 if (copy_from_user(&params, optval, optlen)) {
4362 retval = -EFAULT;
4363 goto out;
4364 }
4365
4366 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00004367 if (!asoc && params.assoc_id != SCTP_CURRENT_ASSOC &&
4368 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004369 goto out;
4370
David Brazdil0f672f62019-12-10 10:32:29 +00004371 if (asoc) {
4372 retval = sctp_sched_set_value(asoc, params.stream_id,
4373 params.stream_value, GFP_KERNEL);
4374 goto out;
4375 }
4376
4377 retval = 0;
4378
4379 list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) {
4380 int ret = sctp_sched_set_value(asoc, params.stream_id,
4381 params.stream_value, GFP_KERNEL);
4382 if (ret && !retval) /* try to return the 1st error. */
4383 retval = ret;
4384 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004385
4386out:
4387 return retval;
4388}
4389
4390static int sctp_setsockopt_interleaving_supported(struct sock *sk,
4391 char __user *optval,
4392 unsigned int optlen)
4393{
4394 struct sctp_sock *sp = sctp_sk(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004395 struct sctp_assoc_value params;
David Brazdil0f672f62019-12-10 10:32:29 +00004396 struct sctp_association *asoc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004397 int retval = -EINVAL;
4398
4399 if (optlen < sizeof(params))
4400 goto out;
4401
4402 optlen = sizeof(params);
4403 if (copy_from_user(&params, optval, optlen)) {
4404 retval = -EFAULT;
4405 goto out;
4406 }
4407
David Brazdil0f672f62019-12-10 10:32:29 +00004408 asoc = sctp_id2assoc(sk, params.assoc_id);
4409 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
4410 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004411 goto out;
4412
David Brazdil0f672f62019-12-10 10:32:29 +00004413 if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004414 retval = -EPERM;
4415 goto out;
4416 }
4417
David Brazdil0f672f62019-12-10 10:32:29 +00004418 sp->ep->intl_enable = !!params.assoc_value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004419
4420 retval = 0;
4421
4422out:
4423 return retval;
4424}
4425
4426static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
4427 unsigned int optlen)
4428{
4429 int val;
4430
4431 if (!sctp_style(sk, TCP))
4432 return -EOPNOTSUPP;
4433
4434 if (sctp_sk(sk)->ep->base.bind_addr.port)
4435 return -EFAULT;
4436
4437 if (optlen < sizeof(int))
4438 return -EINVAL;
4439
4440 if (get_user(val, (int __user *)optval))
4441 return -EFAULT;
4442
4443 sctp_sk(sk)->reuse = !!val;
4444
4445 return 0;
4446}
4447
David Brazdil0f672f62019-12-10 10:32:29 +00004448static int sctp_assoc_ulpevent_type_set(struct sctp_event *param,
4449 struct sctp_association *asoc)
4450{
4451 struct sctp_ulpevent *event;
4452
4453 sctp_ulpevent_type_set(&asoc->subscribe, param->se_type, param->se_on);
4454
4455 if (param->se_type == SCTP_SENDER_DRY_EVENT && param->se_on) {
4456 if (sctp_outq_is_empty(&asoc->outqueue)) {
4457 event = sctp_ulpevent_make_sender_dry_event(asoc,
4458 GFP_USER | __GFP_NOWARN);
4459 if (!event)
4460 return -ENOMEM;
4461
4462 asoc->stream.si->enqueue_event(&asoc->ulpq, event);
4463 }
4464 }
4465
4466 return 0;
4467}
4468
4469static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
4470 unsigned int optlen)
4471{
4472 struct sctp_sock *sp = sctp_sk(sk);
4473 struct sctp_association *asoc;
4474 struct sctp_event param;
4475 int retval = 0;
4476
4477 if (optlen < sizeof(param))
4478 return -EINVAL;
4479
4480 optlen = sizeof(param);
4481 if (copy_from_user(&param, optval, optlen))
4482 return -EFAULT;
4483
4484 if (param.se_type < SCTP_SN_TYPE_BASE ||
4485 param.se_type > SCTP_SN_TYPE_MAX)
4486 return -EINVAL;
4487
4488 asoc = sctp_id2assoc(sk, param.se_assoc_id);
4489 if (!asoc && param.se_assoc_id > SCTP_ALL_ASSOC &&
4490 sctp_style(sk, UDP))
4491 return -EINVAL;
4492
4493 if (asoc)
4494 return sctp_assoc_ulpevent_type_set(&param, asoc);
4495
4496 if (sctp_style(sk, TCP))
4497 param.se_assoc_id = SCTP_FUTURE_ASSOC;
4498
4499 if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
4500 param.se_assoc_id == SCTP_ALL_ASSOC)
4501 sctp_ulpevent_type_set(&sp->subscribe,
4502 param.se_type, param.se_on);
4503
4504 if (param.se_assoc_id == SCTP_CURRENT_ASSOC ||
4505 param.se_assoc_id == SCTP_ALL_ASSOC) {
4506 list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
4507 int ret = sctp_assoc_ulpevent_type_set(&param, asoc);
4508
4509 if (ret && !retval)
4510 retval = ret;
4511 }
4512 }
4513
4514 return retval;
4515}
4516
4517static int sctp_setsockopt_asconf_supported(struct sock *sk,
4518 char __user *optval,
4519 unsigned int optlen)
4520{
4521 struct sctp_assoc_value params;
4522 struct sctp_association *asoc;
4523 struct sctp_endpoint *ep;
4524 int retval = -EINVAL;
4525
4526 if (optlen != sizeof(params))
4527 goto out;
4528
4529 if (copy_from_user(&params, optval, optlen)) {
4530 retval = -EFAULT;
4531 goto out;
4532 }
4533
4534 asoc = sctp_id2assoc(sk, params.assoc_id);
4535 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
4536 sctp_style(sk, UDP))
4537 goto out;
4538
4539 ep = sctp_sk(sk)->ep;
4540 ep->asconf_enable = !!params.assoc_value;
4541
4542 if (ep->asconf_enable && ep->auth_enable) {
4543 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
4544 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
4545 }
4546
4547 retval = 0;
4548
4549out:
4550 return retval;
4551}
4552
4553static int sctp_setsockopt_auth_supported(struct sock *sk,
4554 char __user *optval,
4555 unsigned int optlen)
4556{
4557 struct sctp_assoc_value params;
4558 struct sctp_association *asoc;
4559 struct sctp_endpoint *ep;
4560 int retval = -EINVAL;
4561
4562 if (optlen != sizeof(params))
4563 goto out;
4564
4565 if (copy_from_user(&params, optval, optlen)) {
4566 retval = -EFAULT;
4567 goto out;
4568 }
4569
4570 asoc = sctp_id2assoc(sk, params.assoc_id);
4571 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
4572 sctp_style(sk, UDP))
4573 goto out;
4574
4575 ep = sctp_sk(sk)->ep;
4576 if (params.assoc_value) {
4577 retval = sctp_auth_init(ep, GFP_KERNEL);
4578 if (retval)
4579 goto out;
4580 if (ep->asconf_enable) {
4581 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
4582 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
4583 }
4584 }
4585
4586 ep->auth_enable = !!params.assoc_value;
4587 retval = 0;
4588
4589out:
4590 return retval;
4591}
4592
4593static int sctp_setsockopt_ecn_supported(struct sock *sk,
4594 char __user *optval,
4595 unsigned int optlen)
4596{
4597 struct sctp_assoc_value params;
4598 struct sctp_association *asoc;
4599 int retval = -EINVAL;
4600
4601 if (optlen != sizeof(params))
4602 goto out;
4603
4604 if (copy_from_user(&params, optval, optlen)) {
4605 retval = -EFAULT;
4606 goto out;
4607 }
4608
4609 asoc = sctp_id2assoc(sk, params.assoc_id);
4610 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
4611 sctp_style(sk, UDP))
4612 goto out;
4613
4614 sctp_sk(sk)->ep->ecn_enable = !!params.assoc_value;
4615 retval = 0;
4616
4617out:
4618 return retval;
4619}
4620
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004621/* API 6.2 setsockopt(), getsockopt()
4622 *
4623 * Applications use setsockopt() and getsockopt() to set or retrieve
4624 * socket options. Socket options are used to change the default
4625 * behavior of sockets calls. They are described in Section 7.
4626 *
4627 * The syntax is:
4628 *
4629 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
4630 * int __user *optlen);
4631 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
4632 * int optlen);
4633 *
4634 * sd - the socket descript.
4635 * level - set to IPPROTO_SCTP for all SCTP options.
4636 * optname - the option name.
4637 * optval - the buffer to store the value of the option.
4638 * optlen - the size of the buffer.
4639 */
4640static int sctp_setsockopt(struct sock *sk, int level, int optname,
4641 char __user *optval, unsigned int optlen)
4642{
4643 int retval = 0;
4644
4645 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
4646
4647 /* I can hardly begin to describe how wrong this is. This is
4648 * so broken as to be worse than useless. The API draft
4649 * REALLY is NOT helpful here... I am not convinced that the
4650 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
4651 * are at all well-founded.
4652 */
4653 if (level != SOL_SCTP) {
4654 struct sctp_af *af = sctp_sk(sk)->pf->af;
4655 retval = af->setsockopt(sk, level, optname, optval, optlen);
4656 goto out_nounlock;
4657 }
4658
4659 lock_sock(sk);
4660
4661 switch (optname) {
4662 case SCTP_SOCKOPT_BINDX_ADD:
4663 /* 'optlen' is the size of the addresses buffer. */
4664 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
4665 optlen, SCTP_BINDX_ADD_ADDR);
4666 break;
4667
4668 case SCTP_SOCKOPT_BINDX_REM:
4669 /* 'optlen' is the size of the addresses buffer. */
4670 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
4671 optlen, SCTP_BINDX_REM_ADDR);
4672 break;
4673
4674 case SCTP_SOCKOPT_CONNECTX_OLD:
4675 /* 'optlen' is the size of the addresses buffer. */
4676 retval = sctp_setsockopt_connectx_old(sk,
4677 (struct sockaddr __user *)optval,
4678 optlen);
4679 break;
4680
4681 case SCTP_SOCKOPT_CONNECTX:
4682 /* 'optlen' is the size of the addresses buffer. */
4683 retval = sctp_setsockopt_connectx(sk,
4684 (struct sockaddr __user *)optval,
4685 optlen);
4686 break;
4687
4688 case SCTP_DISABLE_FRAGMENTS:
4689 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
4690 break;
4691
4692 case SCTP_EVENTS:
4693 retval = sctp_setsockopt_events(sk, optval, optlen);
4694 break;
4695
4696 case SCTP_AUTOCLOSE:
4697 retval = sctp_setsockopt_autoclose(sk, optval, optlen);
4698 break;
4699
4700 case SCTP_PEER_ADDR_PARAMS:
4701 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
4702 break;
4703
4704 case SCTP_DELAYED_SACK:
4705 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
4706 break;
4707 case SCTP_PARTIAL_DELIVERY_POINT:
4708 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
4709 break;
4710
4711 case SCTP_INITMSG:
4712 retval = sctp_setsockopt_initmsg(sk, optval, optlen);
4713 break;
4714 case SCTP_DEFAULT_SEND_PARAM:
4715 retval = sctp_setsockopt_default_send_param(sk, optval,
4716 optlen);
4717 break;
4718 case SCTP_DEFAULT_SNDINFO:
4719 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
4720 break;
4721 case SCTP_PRIMARY_ADDR:
4722 retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
4723 break;
4724 case SCTP_SET_PEER_PRIMARY_ADDR:
4725 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
4726 break;
4727 case SCTP_NODELAY:
4728 retval = sctp_setsockopt_nodelay(sk, optval, optlen);
4729 break;
4730 case SCTP_RTOINFO:
4731 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
4732 break;
4733 case SCTP_ASSOCINFO:
4734 retval = sctp_setsockopt_associnfo(sk, optval, optlen);
4735 break;
4736 case SCTP_I_WANT_MAPPED_V4_ADDR:
4737 retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
4738 break;
4739 case SCTP_MAXSEG:
4740 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
4741 break;
4742 case SCTP_ADAPTATION_LAYER:
4743 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
4744 break;
4745 case SCTP_CONTEXT:
4746 retval = sctp_setsockopt_context(sk, optval, optlen);
4747 break;
4748 case SCTP_FRAGMENT_INTERLEAVE:
4749 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
4750 break;
4751 case SCTP_MAX_BURST:
4752 retval = sctp_setsockopt_maxburst(sk, optval, optlen);
4753 break;
4754 case SCTP_AUTH_CHUNK:
4755 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
4756 break;
4757 case SCTP_HMAC_IDENT:
4758 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
4759 break;
4760 case SCTP_AUTH_KEY:
4761 retval = sctp_setsockopt_auth_key(sk, optval, optlen);
4762 break;
4763 case SCTP_AUTH_ACTIVE_KEY:
4764 retval = sctp_setsockopt_active_key(sk, optval, optlen);
4765 break;
4766 case SCTP_AUTH_DELETE_KEY:
4767 retval = sctp_setsockopt_del_key(sk, optval, optlen);
4768 break;
4769 case SCTP_AUTH_DEACTIVATE_KEY:
4770 retval = sctp_setsockopt_deactivate_key(sk, optval, optlen);
4771 break;
4772 case SCTP_AUTO_ASCONF:
4773 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
4774 break;
4775 case SCTP_PEER_ADDR_THLDS:
4776 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
4777 break;
4778 case SCTP_RECVRCVINFO:
4779 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
4780 break;
4781 case SCTP_RECVNXTINFO:
4782 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
4783 break;
4784 case SCTP_PR_SUPPORTED:
4785 retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
4786 break;
4787 case SCTP_DEFAULT_PRINFO:
4788 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
4789 break;
4790 case SCTP_RECONFIG_SUPPORTED:
4791 retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
4792 break;
4793 case SCTP_ENABLE_STREAM_RESET:
4794 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
4795 break;
4796 case SCTP_RESET_STREAMS:
4797 retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
4798 break;
4799 case SCTP_RESET_ASSOC:
4800 retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
4801 break;
4802 case SCTP_ADD_STREAMS:
4803 retval = sctp_setsockopt_add_streams(sk, optval, optlen);
4804 break;
4805 case SCTP_STREAM_SCHEDULER:
4806 retval = sctp_setsockopt_scheduler(sk, optval, optlen);
4807 break;
4808 case SCTP_STREAM_SCHEDULER_VALUE:
4809 retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
4810 break;
4811 case SCTP_INTERLEAVING_SUPPORTED:
4812 retval = sctp_setsockopt_interleaving_supported(sk, optval,
4813 optlen);
4814 break;
4815 case SCTP_REUSE_PORT:
4816 retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
4817 break;
David Brazdil0f672f62019-12-10 10:32:29 +00004818 case SCTP_EVENT:
4819 retval = sctp_setsockopt_event(sk, optval, optlen);
4820 break;
4821 case SCTP_ASCONF_SUPPORTED:
4822 retval = sctp_setsockopt_asconf_supported(sk, optval, optlen);
4823 break;
4824 case SCTP_AUTH_SUPPORTED:
4825 retval = sctp_setsockopt_auth_supported(sk, optval, optlen);
4826 break;
4827 case SCTP_ECN_SUPPORTED:
4828 retval = sctp_setsockopt_ecn_supported(sk, optval, optlen);
4829 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004830 default:
4831 retval = -ENOPROTOOPT;
4832 break;
4833 }
4834
4835 release_sock(sk);
4836
4837out_nounlock:
4838 return retval;
4839}
4840
4841/* API 3.1.6 connect() - UDP Style Syntax
4842 *
4843 * An application may use the connect() call in the UDP model to initiate an
4844 * association without sending data.
4845 *
4846 * The syntax is:
4847 *
4848 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
4849 *
4850 * sd: the socket descriptor to have a new association added to.
4851 *
4852 * nam: the address structure (either struct sockaddr_in or struct
4853 * sockaddr_in6 defined in RFC2553 [7]).
4854 *
4855 * len: the size of the address.
4856 */
4857static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4858 int addr_len, int flags)
4859{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004860 struct sctp_af *af;
David Brazdil0f672f62019-12-10 10:32:29 +00004861 int err = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004862
4863 lock_sock(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004864 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
4865 addr, addr_len);
4866
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004867 /* Validate addr_len before calling common connect/connectx routine. */
4868 af = sctp_get_af_specific(addr->sa_family);
David Brazdil0f672f62019-12-10 10:32:29 +00004869 if (af && addr_len >= af->sockaddr_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004870 err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004871
4872 release_sock(sk);
4873 return err;
4874}
4875
4876int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
4877 int addr_len, int flags)
4878{
4879 if (addr_len < sizeof(uaddr->sa_family))
4880 return -EINVAL;
4881
4882 if (uaddr->sa_family == AF_UNSPEC)
4883 return -EOPNOTSUPP;
4884
4885 return sctp_connect(sock->sk, uaddr, addr_len, flags);
4886}
4887
4888/* FIXME: Write comments. */
4889static int sctp_disconnect(struct sock *sk, int flags)
4890{
4891 return -EOPNOTSUPP; /* STUB */
4892}
4893
4894/* 4.1.4 accept() - TCP Style Syntax
4895 *
4896 * Applications use accept() call to remove an established SCTP
4897 * association from the accept queue of the endpoint. A new socket
4898 * descriptor will be returned from accept() to represent the newly
4899 * formed association.
4900 */
4901static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
4902{
4903 struct sctp_sock *sp;
4904 struct sctp_endpoint *ep;
4905 struct sock *newsk = NULL;
4906 struct sctp_association *asoc;
4907 long timeo;
4908 int error = 0;
4909
4910 lock_sock(sk);
4911
4912 sp = sctp_sk(sk);
4913 ep = sp->ep;
4914
4915 if (!sctp_style(sk, TCP)) {
4916 error = -EOPNOTSUPP;
4917 goto out;
4918 }
4919
4920 if (!sctp_sstate(sk, LISTENING)) {
4921 error = -EINVAL;
4922 goto out;
4923 }
4924
4925 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
4926
4927 error = sctp_wait_for_accept(sk, timeo);
4928 if (error)
4929 goto out;
4930
4931 /* We treat the list of associations on the endpoint as the accept
4932 * queue and pick the first association on the list.
4933 */
4934 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
4935
4936 newsk = sp->pf->create_accept_sk(sk, asoc, kern);
4937 if (!newsk) {
4938 error = -ENOMEM;
4939 goto out;
4940 }
4941
4942 /* Populate the fields of the newsk from the oldsk and migrate the
4943 * asoc to the newsk.
4944 */
David Brazdil0f672f62019-12-10 10:32:29 +00004945 error = sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
4946 if (error) {
4947 sk_common_release(newsk);
4948 newsk = NULL;
4949 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004950
4951out:
4952 release_sock(sk);
4953 *err = error;
4954 return newsk;
4955}
4956
4957/* The SCTP ioctl handler. */
4958static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4959{
4960 int rc = -ENOTCONN;
4961
4962 lock_sock(sk);
4963
4964 /*
4965 * SEQPACKET-style sockets in LISTENING state are valid, for
4966 * SCTP, so only discard TCP-style sockets in LISTENING state.
4967 */
4968 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
4969 goto out;
4970
4971 switch (cmd) {
4972 case SIOCINQ: {
4973 struct sk_buff *skb;
4974 unsigned int amount = 0;
4975
4976 skb = skb_peek(&sk->sk_receive_queue);
4977 if (skb != NULL) {
4978 /*
4979 * We will only return the amount of this packet since
4980 * that is all that will be read.
4981 */
4982 amount = skb->len;
4983 }
4984 rc = put_user(amount, (int __user *)arg);
4985 break;
4986 }
4987 default:
4988 rc = -ENOIOCTLCMD;
4989 break;
4990 }
4991out:
4992 release_sock(sk);
4993 return rc;
4994}
4995
4996/* This is the function which gets called during socket creation to
4997 * initialized the SCTP-specific portion of the sock.
4998 * The sock structure should already be zero-filled memory.
4999 */
5000static int sctp_init_sock(struct sock *sk)
5001{
5002 struct net *net = sock_net(sk);
5003 struct sctp_sock *sp;
5004
5005 pr_debug("%s: sk:%p\n", __func__, sk);
5006
5007 sp = sctp_sk(sk);
5008
5009 /* Initialize the SCTP per socket area. */
5010 switch (sk->sk_type) {
5011 case SOCK_SEQPACKET:
5012 sp->type = SCTP_SOCKET_UDP;
5013 break;
5014 case SOCK_STREAM:
5015 sp->type = SCTP_SOCKET_TCP;
5016 break;
5017 default:
5018 return -ESOCKTNOSUPPORT;
5019 }
5020
5021 sk->sk_gso_type = SKB_GSO_SCTP;
5022
5023 /* Initialize default send parameters. These parameters can be
5024 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
5025 */
5026 sp->default_stream = 0;
5027 sp->default_ppid = 0;
5028 sp->default_flags = 0;
5029 sp->default_context = 0;
5030 sp->default_timetolive = 0;
5031
5032 sp->default_rcv_context = 0;
5033 sp->max_burst = net->sctp.max_burst;
5034
5035 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
5036
5037 /* Initialize default setup parameters. These parameters
5038 * can be modified with the SCTP_INITMSG socket option or
5039 * overridden by the SCTP_INIT CMSG.
5040 */
5041 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
5042 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
5043 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
5044 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
5045
5046 /* Initialize default RTO related parameters. These parameters can
5047 * be modified for with the SCTP_RTOINFO socket option.
5048 */
5049 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
5050 sp->rtoinfo.srto_max = net->sctp.rto_max;
5051 sp->rtoinfo.srto_min = net->sctp.rto_min;
5052
5053 /* Initialize default association related parameters. These parameters
5054 * can be modified with the SCTP_ASSOCINFO socket option.
5055 */
5056 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
5057 sp->assocparams.sasoc_number_peer_destinations = 0;
5058 sp->assocparams.sasoc_peer_rwnd = 0;
5059 sp->assocparams.sasoc_local_rwnd = 0;
5060 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
5061
5062 /* Initialize default event subscriptions. By default, all the
5063 * options are off.
5064 */
David Brazdil0f672f62019-12-10 10:32:29 +00005065 sp->subscribe = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005066
5067 /* Default Peer Address Parameters. These defaults can
5068 * be modified via SCTP_PEER_ADDR_PARAMS
5069 */
5070 sp->hbinterval = net->sctp.hb_interval;
5071 sp->pathmaxrxt = net->sctp.max_retrans_path;
David Brazdil0f672f62019-12-10 10:32:29 +00005072 sp->pf_retrans = net->sctp.pf_retrans;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005073 sp->pathmtu = 0; /* allow default discovery */
5074 sp->sackdelay = net->sctp.sack_timeout;
5075 sp->sackfreq = 2;
5076 sp->param_flags = SPP_HB_ENABLE |
5077 SPP_PMTUD_ENABLE |
5078 SPP_SACKDELAY_ENABLE;
David Brazdil0f672f62019-12-10 10:32:29 +00005079 sp->default_ss = SCTP_SS_DEFAULT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005080
5081 /* If enabled no SCTP message fragmentation will be performed.
5082 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
5083 */
5084 sp->disable_fragments = 0;
5085
5086 /* Enable Nagle algorithm by default. */
5087 sp->nodelay = 0;
5088
5089 sp->recvrcvinfo = 0;
5090 sp->recvnxtinfo = 0;
5091
5092 /* Enable by default. */
5093 sp->v4mapped = 1;
5094
5095 /* Auto-close idle associations after the configured
5096 * number of seconds. A value of 0 disables this
5097 * feature. Configure through the SCTP_AUTOCLOSE socket option,
5098 * for UDP-style sockets only.
5099 */
5100 sp->autoclose = 0;
5101
5102 /* User specified fragmentation limit. */
5103 sp->user_frag = 0;
5104
5105 sp->adaptation_ind = 0;
5106
5107 sp->pf = sctp_get_pf_specific(sk->sk_family);
5108
5109 /* Control variables for partial data delivery. */
5110 atomic_set(&sp->pd_mode, 0);
5111 skb_queue_head_init(&sp->pd_lobby);
5112 sp->frag_interleave = 0;
5113
5114 /* Create a per socket endpoint structure. Even if we
5115 * change the data structure relationships, this may still
5116 * be useful for storing pre-connect address information.
5117 */
5118 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
5119 if (!sp->ep)
5120 return -ENOMEM;
5121
5122 sp->hmac = NULL;
5123
5124 sk->sk_destruct = sctp_destruct_sock;
5125
5126 SCTP_DBG_OBJCNT_INC(sock);
5127
5128 local_bh_disable();
5129 sk_sockets_allocated_inc(sk);
5130 sock_prot_inuse_add(net, sk->sk_prot, 1);
5131
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005132 local_bh_enable();
5133
5134 return 0;
5135}
5136
5137/* Cleanup any SCTP per socket resources. Must be called with
5138 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
5139 */
5140static void sctp_destroy_sock(struct sock *sk)
5141{
5142 struct sctp_sock *sp;
5143
5144 pr_debug("%s: sk:%p\n", __func__, sk);
5145
5146 /* Release our hold on the endpoint. */
5147 sp = sctp_sk(sk);
5148 /* This could happen during socket init, thus we bail out
5149 * early, since the rest of the below is not setup either.
5150 */
5151 if (sp->ep == NULL)
5152 return;
5153
5154 if (sp->do_auto_asconf) {
5155 sp->do_auto_asconf = 0;
5156 list_del(&sp->auto_asconf_list);
5157 }
5158 sctp_endpoint_free(sp->ep);
5159 local_bh_disable();
5160 sk_sockets_allocated_dec(sk);
5161 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
5162 local_bh_enable();
5163}
5164
5165/* Triggered when there are no references on the socket anymore */
5166static void sctp_destruct_sock(struct sock *sk)
5167{
5168 struct sctp_sock *sp = sctp_sk(sk);
5169
5170 /* Free up the HMAC transform. */
5171 crypto_free_shash(sp->hmac);
5172
5173 inet_sock_destruct(sk);
5174}
5175
5176/* API 4.1.7 shutdown() - TCP Style Syntax
5177 * int shutdown(int socket, int how);
5178 *
5179 * sd - the socket descriptor of the association to be closed.
5180 * how - Specifies the type of shutdown. The values are
5181 * as follows:
5182 * SHUT_RD
5183 * Disables further receive operations. No SCTP
5184 * protocol action is taken.
5185 * SHUT_WR
5186 * Disables further send operations, and initiates
5187 * the SCTP shutdown sequence.
5188 * SHUT_RDWR
5189 * Disables further send and receive operations
5190 * and initiates the SCTP shutdown sequence.
5191 */
5192static void sctp_shutdown(struct sock *sk, int how)
5193{
5194 struct net *net = sock_net(sk);
5195 struct sctp_endpoint *ep;
5196
5197 if (!sctp_style(sk, TCP))
5198 return;
5199
5200 ep = sctp_sk(sk)->ep;
5201 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
5202 struct sctp_association *asoc;
5203
5204 inet_sk_set_state(sk, SCTP_SS_CLOSING);
5205 asoc = list_entry(ep->asocs.next,
5206 struct sctp_association, asocs);
5207 sctp_primitive_SHUTDOWN(net, asoc, NULL);
5208 }
5209}
5210
5211int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
5212 struct sctp_info *info)
5213{
5214 struct sctp_transport *prim;
5215 struct list_head *pos;
5216 int mask;
5217
5218 memset(info, 0, sizeof(*info));
5219 if (!asoc) {
5220 struct sctp_sock *sp = sctp_sk(sk);
5221
5222 info->sctpi_s_autoclose = sp->autoclose;
5223 info->sctpi_s_adaptation_ind = sp->adaptation_ind;
5224 info->sctpi_s_pd_point = sp->pd_point;
5225 info->sctpi_s_nodelay = sp->nodelay;
5226 info->sctpi_s_disable_fragments = sp->disable_fragments;
5227 info->sctpi_s_v4mapped = sp->v4mapped;
5228 info->sctpi_s_frag_interleave = sp->frag_interleave;
5229 info->sctpi_s_type = sp->type;
5230
5231 return 0;
5232 }
5233
5234 info->sctpi_tag = asoc->c.my_vtag;
5235 info->sctpi_state = asoc->state;
5236 info->sctpi_rwnd = asoc->a_rwnd;
5237 info->sctpi_unackdata = asoc->unack_data;
5238 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
5239 info->sctpi_instrms = asoc->stream.incnt;
5240 info->sctpi_outstrms = asoc->stream.outcnt;
5241 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
5242 info->sctpi_inqueue++;
5243 list_for_each(pos, &asoc->outqueue.out_chunk_list)
5244 info->sctpi_outqueue++;
5245 info->sctpi_overall_error = asoc->overall_error_count;
5246 info->sctpi_max_burst = asoc->max_burst;
5247 info->sctpi_maxseg = asoc->frag_point;
5248 info->sctpi_peer_rwnd = asoc->peer.rwnd;
5249 info->sctpi_peer_tag = asoc->c.peer_vtag;
5250
5251 mask = asoc->peer.ecn_capable << 1;
5252 mask = (mask | asoc->peer.ipv4_address) << 1;
5253 mask = (mask | asoc->peer.ipv6_address) << 1;
5254 mask = (mask | asoc->peer.hostname_address) << 1;
5255 mask = (mask | asoc->peer.asconf_capable) << 1;
5256 mask = (mask | asoc->peer.prsctp_capable) << 1;
5257 mask = (mask | asoc->peer.auth_capable);
5258 info->sctpi_peer_capable = mask;
5259 mask = asoc->peer.sack_needed << 1;
5260 mask = (mask | asoc->peer.sack_generation) << 1;
5261 mask = (mask | asoc->peer.zero_window_announced);
5262 info->sctpi_peer_sack = mask;
5263
5264 info->sctpi_isacks = asoc->stats.isacks;
5265 info->sctpi_osacks = asoc->stats.osacks;
5266 info->sctpi_opackets = asoc->stats.opackets;
5267 info->sctpi_ipackets = asoc->stats.ipackets;
5268 info->sctpi_rtxchunks = asoc->stats.rtxchunks;
5269 info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
5270 info->sctpi_idupchunks = asoc->stats.idupchunks;
5271 info->sctpi_gapcnt = asoc->stats.gapcnt;
5272 info->sctpi_ouodchunks = asoc->stats.ouodchunks;
5273 info->sctpi_iuodchunks = asoc->stats.iuodchunks;
5274 info->sctpi_oodchunks = asoc->stats.oodchunks;
5275 info->sctpi_iodchunks = asoc->stats.iodchunks;
5276 info->sctpi_octrlchunks = asoc->stats.octrlchunks;
5277 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
5278
5279 prim = asoc->peer.primary_path;
5280 memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
5281 info->sctpi_p_state = prim->state;
5282 info->sctpi_p_cwnd = prim->cwnd;
5283 info->sctpi_p_srtt = prim->srtt;
5284 info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
5285 info->sctpi_p_hbinterval = prim->hbinterval;
5286 info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
5287 info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
5288 info->sctpi_p_ssthresh = prim->ssthresh;
5289 info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
5290 info->sctpi_p_flight_size = prim->flight_size;
5291 info->sctpi_p_error = prim->error_count;
5292
5293 return 0;
5294}
5295EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
5296
5297/* use callback to avoid exporting the core structure */
5298void sctp_transport_walk_start(struct rhashtable_iter *iter)
5299{
5300 rhltable_walk_enter(&sctp_transport_hashtable, iter);
5301
5302 rhashtable_walk_start(iter);
5303}
5304
5305void sctp_transport_walk_stop(struct rhashtable_iter *iter)
5306{
5307 rhashtable_walk_stop(iter);
5308 rhashtable_walk_exit(iter);
5309}
5310
5311struct sctp_transport *sctp_transport_get_next(struct net *net,
5312 struct rhashtable_iter *iter)
5313{
5314 struct sctp_transport *t;
5315
5316 t = rhashtable_walk_next(iter);
5317 for (; t; t = rhashtable_walk_next(iter)) {
5318 if (IS_ERR(t)) {
5319 if (PTR_ERR(t) == -EAGAIN)
5320 continue;
5321 break;
5322 }
5323
5324 if (!sctp_transport_hold(t))
5325 continue;
5326
5327 if (net_eq(sock_net(t->asoc->base.sk), net) &&
5328 t->asoc->peer.primary_path == t)
5329 break;
5330
5331 sctp_transport_put(t);
5332 }
5333
5334 return t;
5335}
5336
5337struct sctp_transport *sctp_transport_get_idx(struct net *net,
5338 struct rhashtable_iter *iter,
5339 int pos)
5340{
5341 struct sctp_transport *t;
5342
5343 if (!pos)
5344 return SEQ_START_TOKEN;
5345
5346 while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
5347 if (!--pos)
5348 break;
5349 sctp_transport_put(t);
5350 }
5351
5352 return t;
5353}
5354
5355int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
5356 void *p) {
5357 int err = 0;
5358 int hash = 0;
5359 struct sctp_ep_common *epb;
5360 struct sctp_hashbucket *head;
5361
5362 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
5363 hash++, head++) {
5364 read_lock_bh(&head->lock);
5365 sctp_for_each_hentry(epb, &head->chain) {
5366 err = cb(sctp_ep(epb), p);
5367 if (err)
5368 break;
5369 }
5370 read_unlock_bh(&head->lock);
5371 }
5372
5373 return err;
5374}
5375EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
5376
5377int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
5378 struct net *net,
5379 const union sctp_addr *laddr,
5380 const union sctp_addr *paddr, void *p)
5381{
5382 struct sctp_transport *transport;
5383 int err;
5384
5385 rcu_read_lock();
5386 transport = sctp_addrs_lookup_transport(net, laddr, paddr);
5387 rcu_read_unlock();
5388 if (!transport)
5389 return -ENOENT;
5390
5391 err = cb(transport, p);
5392 sctp_transport_put(transport);
5393
5394 return err;
5395}
5396EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
5397
5398int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
5399 int (*cb_done)(struct sctp_transport *, void *),
5400 struct net *net, int *pos, void *p) {
5401 struct rhashtable_iter hti;
5402 struct sctp_transport *tsp;
5403 int ret;
5404
5405again:
5406 ret = 0;
5407 sctp_transport_walk_start(&hti);
5408
5409 tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
5410 for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
5411 ret = cb(tsp, p);
5412 if (ret)
5413 break;
5414 (*pos)++;
5415 sctp_transport_put(tsp);
5416 }
5417 sctp_transport_walk_stop(&hti);
5418
5419 if (ret) {
5420 if (cb_done && !cb_done(tsp, p)) {
5421 (*pos)++;
5422 sctp_transport_put(tsp);
5423 goto again;
5424 }
5425 sctp_transport_put(tsp);
5426 }
5427
5428 return ret;
5429}
5430EXPORT_SYMBOL_GPL(sctp_for_each_transport);
5431
5432/* 7.2.1 Association Status (SCTP_STATUS)
5433
5434 * Applications can retrieve current status information about an
5435 * association, including association state, peer receiver window size,
5436 * number of unacked data chunks, and number of data chunks pending
5437 * receipt. This information is read-only.
5438 */
5439static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
5440 char __user *optval,
5441 int __user *optlen)
5442{
5443 struct sctp_status status;
5444 struct sctp_association *asoc = NULL;
5445 struct sctp_transport *transport;
5446 sctp_assoc_t associd;
5447 int retval = 0;
5448
5449 if (len < sizeof(status)) {
5450 retval = -EINVAL;
5451 goto out;
5452 }
5453
5454 len = sizeof(status);
5455 if (copy_from_user(&status, optval, len)) {
5456 retval = -EFAULT;
5457 goto out;
5458 }
5459
5460 associd = status.sstat_assoc_id;
5461 asoc = sctp_id2assoc(sk, associd);
5462 if (!asoc) {
5463 retval = -EINVAL;
5464 goto out;
5465 }
5466
5467 transport = asoc->peer.primary_path;
5468
5469 status.sstat_assoc_id = sctp_assoc2id(asoc);
5470 status.sstat_state = sctp_assoc_to_state(asoc);
5471 status.sstat_rwnd = asoc->peer.rwnd;
5472 status.sstat_unackdata = asoc->unack_data;
5473
5474 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
5475 status.sstat_instrms = asoc->stream.incnt;
5476 status.sstat_outstrms = asoc->stream.outcnt;
5477 status.sstat_fragmentation_point = asoc->frag_point;
5478 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
5479 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
5480 transport->af_specific->sockaddr_len);
5481 /* Map ipv4 address into v4-mapped-on-v6 address. */
5482 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
5483 (union sctp_addr *)&status.sstat_primary.spinfo_address);
5484 status.sstat_primary.spinfo_state = transport->state;
5485 status.sstat_primary.spinfo_cwnd = transport->cwnd;
5486 status.sstat_primary.spinfo_srtt = transport->srtt;
5487 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
5488 status.sstat_primary.spinfo_mtu = transport->pathmtu;
5489
5490 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
5491 status.sstat_primary.spinfo_state = SCTP_ACTIVE;
5492
5493 if (put_user(len, optlen)) {
5494 retval = -EFAULT;
5495 goto out;
5496 }
5497
5498 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
5499 __func__, len, status.sstat_state, status.sstat_rwnd,
5500 status.sstat_assoc_id);
5501
5502 if (copy_to_user(optval, &status, len)) {
5503 retval = -EFAULT;
5504 goto out;
5505 }
5506
5507out:
5508 return retval;
5509}
5510
5511
5512/* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
5513 *
5514 * Applications can retrieve information about a specific peer address
5515 * of an association, including its reachability state, congestion
5516 * window, and retransmission timer values. This information is
5517 * read-only.
5518 */
5519static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
5520 char __user *optval,
5521 int __user *optlen)
5522{
5523 struct sctp_paddrinfo pinfo;
5524 struct sctp_transport *transport;
5525 int retval = 0;
5526
5527 if (len < sizeof(pinfo)) {
5528 retval = -EINVAL;
5529 goto out;
5530 }
5531
5532 len = sizeof(pinfo);
5533 if (copy_from_user(&pinfo, optval, len)) {
5534 retval = -EFAULT;
5535 goto out;
5536 }
5537
5538 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
5539 pinfo.spinfo_assoc_id);
5540 if (!transport)
5541 return -EINVAL;
5542
5543 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
5544 pinfo.spinfo_state = transport->state;
5545 pinfo.spinfo_cwnd = transport->cwnd;
5546 pinfo.spinfo_srtt = transport->srtt;
5547 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
5548 pinfo.spinfo_mtu = transport->pathmtu;
5549
5550 if (pinfo.spinfo_state == SCTP_UNKNOWN)
5551 pinfo.spinfo_state = SCTP_ACTIVE;
5552
5553 if (put_user(len, optlen)) {
5554 retval = -EFAULT;
5555 goto out;
5556 }
5557
5558 if (copy_to_user(optval, &pinfo, len)) {
5559 retval = -EFAULT;
5560 goto out;
5561 }
5562
5563out:
5564 return retval;
5565}
5566
5567/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
5568 *
5569 * This option is a on/off flag. If enabled no SCTP message
5570 * fragmentation will be performed. Instead if a message being sent
5571 * exceeds the current PMTU size, the message will NOT be sent and
5572 * instead a error will be indicated to the user.
5573 */
5574static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
5575 char __user *optval, int __user *optlen)
5576{
5577 int val;
5578
5579 if (len < sizeof(int))
5580 return -EINVAL;
5581
5582 len = sizeof(int);
5583 val = (sctp_sk(sk)->disable_fragments == 1);
5584 if (put_user(len, optlen))
5585 return -EFAULT;
5586 if (copy_to_user(optval, &val, len))
5587 return -EFAULT;
5588 return 0;
5589}
5590
5591/* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
5592 *
5593 * This socket option is used to specify various notifications and
5594 * ancillary data the user wishes to receive.
5595 */
5596static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
5597 int __user *optlen)
5598{
David Brazdil0f672f62019-12-10 10:32:29 +00005599 struct sctp_event_subscribe subscribe;
5600 __u8 *sn_type = (__u8 *)&subscribe;
5601 int i;
5602
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005603 if (len == 0)
5604 return -EINVAL;
5605 if (len > sizeof(struct sctp_event_subscribe))
5606 len = sizeof(struct sctp_event_subscribe);
5607 if (put_user(len, optlen))
5608 return -EFAULT;
David Brazdil0f672f62019-12-10 10:32:29 +00005609
5610 for (i = 0; i < len; i++)
5611 sn_type[i] = sctp_ulpevent_type_enabled(sctp_sk(sk)->subscribe,
5612 SCTP_SN_TYPE_BASE + i);
5613
5614 if (copy_to_user(optval, &subscribe, len))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005615 return -EFAULT;
David Brazdil0f672f62019-12-10 10:32:29 +00005616
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005617 return 0;
5618}
5619
5620/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
5621 *
5622 * This socket option is applicable to the UDP-style socket only. When
5623 * set it will cause associations that are idle for more than the
5624 * specified number of seconds to automatically close. An association
5625 * being idle is defined an association that has NOT sent or received
5626 * user data. The special value of '0' indicates that no automatic
5627 * close of any associations should be performed. The option expects an
5628 * integer defining the number of seconds of idle time before an
5629 * association is closed.
5630 */
5631static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
5632{
5633 /* Applicable to UDP-style socket only */
5634 if (sctp_style(sk, TCP))
5635 return -EOPNOTSUPP;
5636 if (len < sizeof(int))
5637 return -EINVAL;
5638 len = sizeof(int);
5639 if (put_user(len, optlen))
5640 return -EFAULT;
5641 if (put_user(sctp_sk(sk)->autoclose, (int __user *)optval))
5642 return -EFAULT;
5643 return 0;
5644}
5645
5646/* Helper routine to branch off an association to a new socket. */
5647int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
5648{
5649 struct sctp_association *asoc = sctp_id2assoc(sk, id);
5650 struct sctp_sock *sp = sctp_sk(sk);
5651 struct socket *sock;
5652 int err = 0;
5653
5654 /* Do not peel off from one netns to another one. */
5655 if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
5656 return -EINVAL;
5657
5658 if (!asoc)
5659 return -EINVAL;
5660
5661 /* An association cannot be branched off from an already peeled-off
5662 * socket, nor is this supported for tcp style sockets.
5663 */
5664 if (!sctp_style(sk, UDP))
5665 return -EINVAL;
5666
5667 /* Create a new socket. */
5668 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
5669 if (err < 0)
5670 return err;
5671
5672 sctp_copy_sock(sock->sk, sk, asoc);
5673
5674 /* Make peeled-off sockets more like 1-1 accepted sockets.
5675 * Set the daddr and initialize id to something more random and also
5676 * copy over any ip options.
5677 */
5678 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
5679 sp->pf->copy_ip_options(sk, sock->sk);
5680
5681 /* Populate the fields of the newsk from the oldsk and migrate the
5682 * asoc to the newsk.
5683 */
David Brazdil0f672f62019-12-10 10:32:29 +00005684 err = sctp_sock_migrate(sk, sock->sk, asoc,
5685 SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
5686 if (err) {
5687 sock_release(sock);
5688 sock = NULL;
5689 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005690
5691 *sockp = sock;
5692
5693 return err;
5694}
5695EXPORT_SYMBOL(sctp_do_peeloff);
5696
5697static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
5698 struct file **newfile, unsigned flags)
5699{
5700 struct socket *newsock;
5701 int retval;
5702
5703 retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
5704 if (retval < 0)
5705 goto out;
5706
5707 /* Map the socket to an unused fd that can be returned to the user. */
5708 retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
5709 if (retval < 0) {
5710 sock_release(newsock);
5711 goto out;
5712 }
5713
5714 *newfile = sock_alloc_file(newsock, 0, NULL);
5715 if (IS_ERR(*newfile)) {
5716 put_unused_fd(retval);
5717 retval = PTR_ERR(*newfile);
5718 *newfile = NULL;
5719 return retval;
5720 }
5721
5722 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
5723 retval);
5724
5725 peeloff->sd = retval;
5726
5727 if (flags & SOCK_NONBLOCK)
5728 (*newfile)->f_flags |= O_NONBLOCK;
5729out:
5730 return retval;
5731}
5732
5733static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
5734{
5735 sctp_peeloff_arg_t peeloff;
5736 struct file *newfile = NULL;
5737 int retval = 0;
5738
5739 if (len < sizeof(sctp_peeloff_arg_t))
5740 return -EINVAL;
5741 len = sizeof(sctp_peeloff_arg_t);
5742 if (copy_from_user(&peeloff, optval, len))
5743 return -EFAULT;
5744
5745 retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
5746 if (retval < 0)
5747 goto out;
5748
5749 /* Return the fd mapped to the new socket. */
5750 if (put_user(len, optlen)) {
5751 fput(newfile);
5752 put_unused_fd(retval);
5753 return -EFAULT;
5754 }
5755
5756 if (copy_to_user(optval, &peeloff, len)) {
5757 fput(newfile);
5758 put_unused_fd(retval);
5759 return -EFAULT;
5760 }
5761 fd_install(retval, newfile);
5762out:
5763 return retval;
5764}
5765
5766static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
5767 char __user *optval, int __user *optlen)
5768{
5769 sctp_peeloff_flags_arg_t peeloff;
5770 struct file *newfile = NULL;
5771 int retval = 0;
5772
5773 if (len < sizeof(sctp_peeloff_flags_arg_t))
5774 return -EINVAL;
5775 len = sizeof(sctp_peeloff_flags_arg_t);
5776 if (copy_from_user(&peeloff, optval, len))
5777 return -EFAULT;
5778
5779 retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
5780 &newfile, peeloff.flags);
5781 if (retval < 0)
5782 goto out;
5783
5784 /* Return the fd mapped to the new socket. */
5785 if (put_user(len, optlen)) {
5786 fput(newfile);
5787 put_unused_fd(retval);
5788 return -EFAULT;
5789 }
5790
5791 if (copy_to_user(optval, &peeloff, len)) {
5792 fput(newfile);
5793 put_unused_fd(retval);
5794 return -EFAULT;
5795 }
5796 fd_install(retval, newfile);
5797out:
5798 return retval;
5799}
5800
5801/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
5802 *
5803 * Applications can enable or disable heartbeats for any peer address of
5804 * an association, modify an address's heartbeat interval, force a
5805 * heartbeat to be sent immediately, and adjust the address's maximum
5806 * number of retransmissions sent before an address is considered
5807 * unreachable. The following structure is used to access and modify an
5808 * address's parameters:
5809 *
5810 * struct sctp_paddrparams {
5811 * sctp_assoc_t spp_assoc_id;
5812 * struct sockaddr_storage spp_address;
5813 * uint32_t spp_hbinterval;
5814 * uint16_t spp_pathmaxrxt;
5815 * uint32_t spp_pathmtu;
5816 * uint32_t spp_sackdelay;
5817 * uint32_t spp_flags;
5818 * };
5819 *
5820 * spp_assoc_id - (one-to-many style socket) This is filled in the
5821 * application, and identifies the association for
5822 * this query.
5823 * spp_address - This specifies which address is of interest.
5824 * spp_hbinterval - This contains the value of the heartbeat interval,
5825 * in milliseconds. If a value of zero
5826 * is present in this field then no changes are to
5827 * be made to this parameter.
5828 * spp_pathmaxrxt - This contains the maximum number of
5829 * retransmissions before this address shall be
5830 * considered unreachable. If a value of zero
5831 * is present in this field then no changes are to
5832 * be made to this parameter.
5833 * spp_pathmtu - When Path MTU discovery is disabled the value
5834 * specified here will be the "fixed" path mtu.
5835 * Note that if the spp_address field is empty
5836 * then all associations on this address will
5837 * have this fixed path mtu set upon them.
5838 *
5839 * spp_sackdelay - When delayed sack is enabled, this value specifies
5840 * the number of milliseconds that sacks will be delayed
5841 * for. This value will apply to all addresses of an
5842 * association if the spp_address field is empty. Note
5843 * also, that if delayed sack is enabled and this
5844 * value is set to 0, no change is made to the last
5845 * recorded delayed sack timer value.
5846 *
5847 * spp_flags - These flags are used to control various features
5848 * on an association. The flag field may contain
5849 * zero or more of the following options.
5850 *
5851 * SPP_HB_ENABLE - Enable heartbeats on the
5852 * specified address. Note that if the address
5853 * field is empty all addresses for the association
5854 * have heartbeats enabled upon them.
5855 *
5856 * SPP_HB_DISABLE - Disable heartbeats on the
5857 * speicifed address. Note that if the address
5858 * field is empty all addresses for the association
5859 * will have their heartbeats disabled. Note also
5860 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
5861 * mutually exclusive, only one of these two should
5862 * be specified. Enabling both fields will have
5863 * undetermined results.
5864 *
5865 * SPP_HB_DEMAND - Request a user initiated heartbeat
5866 * to be made immediately.
5867 *
5868 * SPP_PMTUD_ENABLE - This field will enable PMTU
5869 * discovery upon the specified address. Note that
5870 * if the address feild is empty then all addresses
5871 * on the association are effected.
5872 *
5873 * SPP_PMTUD_DISABLE - This field will disable PMTU
5874 * discovery upon the specified address. Note that
5875 * if the address feild is empty then all addresses
5876 * on the association are effected. Not also that
5877 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
5878 * exclusive. Enabling both will have undetermined
5879 * results.
5880 *
5881 * SPP_SACKDELAY_ENABLE - Setting this flag turns
5882 * on delayed sack. The time specified in spp_sackdelay
5883 * is used to specify the sack delay for this address. Note
5884 * that if spp_address is empty then all addresses will
5885 * enable delayed sack and take on the sack delay
5886 * value specified in spp_sackdelay.
5887 * SPP_SACKDELAY_DISABLE - Setting this flag turns
5888 * off delayed sack. If the spp_address field is blank then
5889 * delayed sack is disabled for the entire association. Note
5890 * also that this field is mutually exclusive to
5891 * SPP_SACKDELAY_ENABLE, setting both will have undefined
5892 * results.
5893 *
5894 * SPP_IPV6_FLOWLABEL: Setting this flag enables the
5895 * setting of the IPV6 flow label value. The value is
5896 * contained in the spp_ipv6_flowlabel field.
5897 * Upon retrieval, this flag will be set to indicate that
5898 * the spp_ipv6_flowlabel field has a valid value returned.
5899 * If a specific destination address is set (in the
5900 * spp_address field), then the value returned is that of
5901 * the address. If just an association is specified (and
5902 * no address), then the association's default flow label
5903 * is returned. If neither an association nor a destination
5904 * is specified, then the socket's default flow label is
5905 * returned. For non-IPv6 sockets, this flag will be left
5906 * cleared.
5907 *
5908 * SPP_DSCP: Setting this flag enables the setting of the
5909 * Differentiated Services Code Point (DSCP) value
5910 * associated with either the association or a specific
5911 * address. The value is obtained in the spp_dscp field.
5912 * Upon retrieval, this flag will be set to indicate that
5913 * the spp_dscp field has a valid value returned. If a
5914 * specific destination address is set when called (in the
5915 * spp_address field), then that specific destination
5916 * address's DSCP value is returned. If just an association
5917 * is specified, then the association's default DSCP is
5918 * returned. If neither an association nor a destination is
5919 * specified, then the socket's default DSCP is returned.
5920 *
5921 * spp_ipv6_flowlabel
5922 * - This field is used in conjunction with the
5923 * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
5924 * The 20 least significant bits are used for the flow
5925 * label. This setting has precedence over any IPv6-layer
5926 * setting.
5927 *
5928 * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
5929 * and contains the DSCP. The 6 most significant bits are
5930 * used for the DSCP. This setting has precedence over any
5931 * IPv4- or IPv6- layer setting.
5932 */
5933static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
5934 char __user *optval, int __user *optlen)
5935{
5936 struct sctp_paddrparams params;
5937 struct sctp_transport *trans = NULL;
5938 struct sctp_association *asoc = NULL;
5939 struct sctp_sock *sp = sctp_sk(sk);
5940
5941 if (len >= sizeof(params))
5942 len = sizeof(params);
5943 else if (len >= ALIGN(offsetof(struct sctp_paddrparams,
5944 spp_ipv6_flowlabel), 4))
5945 len = ALIGN(offsetof(struct sctp_paddrparams,
5946 spp_ipv6_flowlabel), 4);
5947 else
5948 return -EINVAL;
5949
5950 if (copy_from_user(&params, optval, len))
5951 return -EFAULT;
5952
5953 /* If an address other than INADDR_ANY is specified, and
5954 * no transport is found, then the request is invalid.
5955 */
5956 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
5957 trans = sctp_addr_id2transport(sk, &params.spp_address,
5958 params.spp_assoc_id);
5959 if (!trans) {
5960 pr_debug("%s: failed no transport\n", __func__);
5961 return -EINVAL;
5962 }
5963 }
5964
David Brazdil0f672f62019-12-10 10:32:29 +00005965 /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
5966 * socket is a one to many style socket, and an association
5967 * was not found, then the id was invalid.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005968 */
5969 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00005970 if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
5971 sctp_style(sk, UDP)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005972 pr_debug("%s: failed no association\n", __func__);
5973 return -EINVAL;
5974 }
5975
5976 if (trans) {
5977 /* Fetch transport values. */
5978 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
5979 params.spp_pathmtu = trans->pathmtu;
5980 params.spp_pathmaxrxt = trans->pathmaxrxt;
5981 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
5982
5983 /*draft-11 doesn't say what to return in spp_flags*/
5984 params.spp_flags = trans->param_flags;
5985 if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
5986 params.spp_ipv6_flowlabel = trans->flowlabel &
5987 SCTP_FLOWLABEL_VAL_MASK;
5988 params.spp_flags |= SPP_IPV6_FLOWLABEL;
5989 }
5990 if (trans->dscp & SCTP_DSCP_SET_MASK) {
5991 params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK;
5992 params.spp_flags |= SPP_DSCP;
5993 }
5994 } else if (asoc) {
5995 /* Fetch association values. */
5996 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
5997 params.spp_pathmtu = asoc->pathmtu;
5998 params.spp_pathmaxrxt = asoc->pathmaxrxt;
5999 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
6000
6001 /*draft-11 doesn't say what to return in spp_flags*/
6002 params.spp_flags = asoc->param_flags;
6003 if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
6004 params.spp_ipv6_flowlabel = asoc->flowlabel &
6005 SCTP_FLOWLABEL_VAL_MASK;
6006 params.spp_flags |= SPP_IPV6_FLOWLABEL;
6007 }
6008 if (asoc->dscp & SCTP_DSCP_SET_MASK) {
6009 params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK;
6010 params.spp_flags |= SPP_DSCP;
6011 }
6012 } else {
6013 /* Fetch socket values. */
6014 params.spp_hbinterval = sp->hbinterval;
6015 params.spp_pathmtu = sp->pathmtu;
6016 params.spp_sackdelay = sp->sackdelay;
6017 params.spp_pathmaxrxt = sp->pathmaxrxt;
6018
6019 /*draft-11 doesn't say what to return in spp_flags*/
6020 params.spp_flags = sp->param_flags;
6021 if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
6022 params.spp_ipv6_flowlabel = sp->flowlabel &
6023 SCTP_FLOWLABEL_VAL_MASK;
6024 params.spp_flags |= SPP_IPV6_FLOWLABEL;
6025 }
6026 if (sp->dscp & SCTP_DSCP_SET_MASK) {
6027 params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK;
6028 params.spp_flags |= SPP_DSCP;
6029 }
6030 }
6031
6032 if (copy_to_user(optval, &params, len))
6033 return -EFAULT;
6034
6035 if (put_user(len, optlen))
6036 return -EFAULT;
6037
6038 return 0;
6039}
6040
6041/*
6042 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
6043 *
6044 * This option will effect the way delayed acks are performed. This
6045 * option allows you to get or set the delayed ack time, in
6046 * milliseconds. It also allows changing the delayed ack frequency.
6047 * Changing the frequency to 1 disables the delayed sack algorithm. If
6048 * the assoc_id is 0, then this sets or gets the endpoints default
6049 * values. If the assoc_id field is non-zero, then the set or get
6050 * effects the specified association for the one to many model (the
6051 * assoc_id field is ignored by the one to one model). Note that if
6052 * sack_delay or sack_freq are 0 when setting this option, then the
6053 * current values will remain unchanged.
6054 *
6055 * struct sctp_sack_info {
6056 * sctp_assoc_t sack_assoc_id;
6057 * uint32_t sack_delay;
6058 * uint32_t sack_freq;
6059 * };
6060 *
6061 * sack_assoc_id - This parameter, indicates which association the user
6062 * is performing an action upon. Note that if this field's value is
6063 * zero then the endpoints default value is changed (effecting future
6064 * associations only).
6065 *
6066 * sack_delay - This parameter contains the number of milliseconds that
6067 * the user is requesting the delayed ACK timer be set to. Note that
6068 * this value is defined in the standard to be between 200 and 500
6069 * milliseconds.
6070 *
6071 * sack_freq - This parameter contains the number of packets that must
6072 * be received before a sack is sent without waiting for the delay
6073 * timer to expire. The default value for this is 2, setting this
6074 * value to 1 will disable the delayed sack algorithm.
6075 */
6076static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
6077 char __user *optval,
6078 int __user *optlen)
6079{
6080 struct sctp_sack_info params;
6081 struct sctp_association *asoc = NULL;
6082 struct sctp_sock *sp = sctp_sk(sk);
6083
6084 if (len >= sizeof(struct sctp_sack_info)) {
6085 len = sizeof(struct sctp_sack_info);
6086
6087 if (copy_from_user(&params, optval, len))
6088 return -EFAULT;
6089 } else if (len == sizeof(struct sctp_assoc_value)) {
6090 pr_warn_ratelimited(DEPRECATED
6091 "%s (pid %d) "
6092 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
6093 "Use struct sctp_sack_info instead\n",
6094 current->comm, task_pid_nr(current));
6095 if (copy_from_user(&params, optval, len))
6096 return -EFAULT;
6097 } else
6098 return -EINVAL;
6099
David Brazdil0f672f62019-12-10 10:32:29 +00006100 /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
6101 * socket is a one to many style socket, and an association
6102 * was not found, then the id was invalid.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006103 */
6104 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00006105 if (!asoc && params.sack_assoc_id != SCTP_FUTURE_ASSOC &&
6106 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006107 return -EINVAL;
6108
6109 if (asoc) {
6110 /* Fetch association values. */
6111 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
David Brazdil0f672f62019-12-10 10:32:29 +00006112 params.sack_delay = jiffies_to_msecs(asoc->sackdelay);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006113 params.sack_freq = asoc->sackfreq;
6114
6115 } else {
6116 params.sack_delay = 0;
6117 params.sack_freq = 1;
6118 }
6119 } else {
6120 /* Fetch socket values. */
6121 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
6122 params.sack_delay = sp->sackdelay;
6123 params.sack_freq = sp->sackfreq;
6124 } else {
6125 params.sack_delay = 0;
6126 params.sack_freq = 1;
6127 }
6128 }
6129
6130 if (copy_to_user(optval, &params, len))
6131 return -EFAULT;
6132
6133 if (put_user(len, optlen))
6134 return -EFAULT;
6135
6136 return 0;
6137}
6138
6139/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
6140 *
6141 * Applications can specify protocol parameters for the default association
6142 * initialization. The option name argument to setsockopt() and getsockopt()
6143 * is SCTP_INITMSG.
6144 *
6145 * Setting initialization parameters is effective only on an unconnected
6146 * socket (for UDP-style sockets only future associations are effected
6147 * by the change). With TCP-style sockets, this option is inherited by
6148 * sockets derived from a listener socket.
6149 */
6150static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
6151{
6152 if (len < sizeof(struct sctp_initmsg))
6153 return -EINVAL;
6154 len = sizeof(struct sctp_initmsg);
6155 if (put_user(len, optlen))
6156 return -EFAULT;
6157 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
6158 return -EFAULT;
6159 return 0;
6160}
6161
6162
6163static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
6164 char __user *optval, int __user *optlen)
6165{
6166 struct sctp_association *asoc;
6167 int cnt = 0;
6168 struct sctp_getaddrs getaddrs;
6169 struct sctp_transport *from;
6170 void __user *to;
6171 union sctp_addr temp;
6172 struct sctp_sock *sp = sctp_sk(sk);
6173 int addrlen;
6174 size_t space_left;
6175 int bytes_copied;
6176
6177 if (len < sizeof(struct sctp_getaddrs))
6178 return -EINVAL;
6179
6180 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
6181 return -EFAULT;
6182
6183 /* For UDP-style sockets, id specifies the association to query. */
6184 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
6185 if (!asoc)
6186 return -EINVAL;
6187
6188 to = optval + offsetof(struct sctp_getaddrs, addrs);
6189 space_left = len - offsetof(struct sctp_getaddrs, addrs);
6190
6191 list_for_each_entry(from, &asoc->peer.transport_addr_list,
6192 transports) {
6193 memcpy(&temp, &from->ipaddr, sizeof(temp));
6194 addrlen = sctp_get_pf_specific(sk->sk_family)
6195 ->addr_to_user(sp, &temp);
6196 if (space_left < addrlen)
6197 return -ENOMEM;
6198 if (copy_to_user(to, &temp, addrlen))
6199 return -EFAULT;
6200 to += addrlen;
6201 cnt++;
6202 space_left -= addrlen;
6203 }
6204
6205 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
6206 return -EFAULT;
6207 bytes_copied = ((char __user *)to) - optval;
6208 if (put_user(bytes_copied, optlen))
6209 return -EFAULT;
6210
6211 return 0;
6212}
6213
6214static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
6215 size_t space_left, int *bytes_copied)
6216{
6217 struct sctp_sockaddr_entry *addr;
6218 union sctp_addr temp;
6219 int cnt = 0;
6220 int addrlen;
6221 struct net *net = sock_net(sk);
6222
6223 rcu_read_lock();
6224 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
6225 if (!addr->valid)
6226 continue;
6227
6228 if ((PF_INET == sk->sk_family) &&
6229 (AF_INET6 == addr->a.sa.sa_family))
6230 continue;
6231 if ((PF_INET6 == sk->sk_family) &&
6232 inet_v6_ipv6only(sk) &&
6233 (AF_INET == addr->a.sa.sa_family))
6234 continue;
6235 memcpy(&temp, &addr->a, sizeof(temp));
6236 if (!temp.v4.sin_port)
6237 temp.v4.sin_port = htons(port);
6238
6239 addrlen = sctp_get_pf_specific(sk->sk_family)
6240 ->addr_to_user(sctp_sk(sk), &temp);
6241
6242 if (space_left < addrlen) {
6243 cnt = -ENOMEM;
6244 break;
6245 }
6246 memcpy(to, &temp, addrlen);
6247
6248 to += addrlen;
6249 cnt++;
6250 space_left -= addrlen;
6251 *bytes_copied += addrlen;
6252 }
6253 rcu_read_unlock();
6254
6255 return cnt;
6256}
6257
6258
6259static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
6260 char __user *optval, int __user *optlen)
6261{
6262 struct sctp_bind_addr *bp;
6263 struct sctp_association *asoc;
6264 int cnt = 0;
6265 struct sctp_getaddrs getaddrs;
6266 struct sctp_sockaddr_entry *addr;
6267 void __user *to;
6268 union sctp_addr temp;
6269 struct sctp_sock *sp = sctp_sk(sk);
6270 int addrlen;
6271 int err = 0;
6272 size_t space_left;
6273 int bytes_copied = 0;
6274 void *addrs;
6275 void *buf;
6276
6277 if (len < sizeof(struct sctp_getaddrs))
6278 return -EINVAL;
6279
6280 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
6281 return -EFAULT;
6282
6283 /*
6284 * For UDP-style sockets, id specifies the association to query.
6285 * If the id field is set to the value '0' then the locally bound
6286 * addresses are returned without regard to any particular
6287 * association.
6288 */
6289 if (0 == getaddrs.assoc_id) {
6290 bp = &sctp_sk(sk)->ep->base.bind_addr;
6291 } else {
6292 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
6293 if (!asoc)
6294 return -EINVAL;
6295 bp = &asoc->base.bind_addr;
6296 }
6297
6298 to = optval + offsetof(struct sctp_getaddrs, addrs);
6299 space_left = len - offsetof(struct sctp_getaddrs, addrs);
6300
6301 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
6302 if (!addrs)
6303 return -ENOMEM;
6304
6305 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
6306 * addresses from the global local address list.
6307 */
6308 if (sctp_list_single_entry(&bp->address_list)) {
6309 addr = list_entry(bp->address_list.next,
6310 struct sctp_sockaddr_entry, list);
6311 if (sctp_is_any(sk, &addr->a)) {
6312 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
6313 space_left, &bytes_copied);
6314 if (cnt < 0) {
6315 err = cnt;
6316 goto out;
6317 }
6318 goto copy_getaddrs;
6319 }
6320 }
6321
6322 buf = addrs;
6323 /* Protection on the bound address list is not needed since
6324 * in the socket option context we hold a socket lock and
6325 * thus the bound address list can't change.
6326 */
6327 list_for_each_entry(addr, &bp->address_list, list) {
6328 memcpy(&temp, &addr->a, sizeof(temp));
6329 addrlen = sctp_get_pf_specific(sk->sk_family)
6330 ->addr_to_user(sp, &temp);
6331 if (space_left < addrlen) {
6332 err = -ENOMEM; /*fixme: right error?*/
6333 goto out;
6334 }
6335 memcpy(buf, &temp, addrlen);
6336 buf += addrlen;
6337 bytes_copied += addrlen;
6338 cnt++;
6339 space_left -= addrlen;
6340 }
6341
6342copy_getaddrs:
6343 if (copy_to_user(to, addrs, bytes_copied)) {
6344 err = -EFAULT;
6345 goto out;
6346 }
6347 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
6348 err = -EFAULT;
6349 goto out;
6350 }
6351 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
6352 * but we can't change it anymore.
6353 */
6354 if (put_user(bytes_copied, optlen))
6355 err = -EFAULT;
6356out:
6357 kfree(addrs);
6358 return err;
6359}
6360
6361/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
6362 *
6363 * Requests that the local SCTP stack use the enclosed peer address as
6364 * the association primary. The enclosed address must be one of the
6365 * association peer's addresses.
6366 */
6367static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
6368 char __user *optval, int __user *optlen)
6369{
6370 struct sctp_prim prim;
6371 struct sctp_association *asoc;
6372 struct sctp_sock *sp = sctp_sk(sk);
6373
6374 if (len < sizeof(struct sctp_prim))
6375 return -EINVAL;
6376
6377 len = sizeof(struct sctp_prim);
6378
6379 if (copy_from_user(&prim, optval, len))
6380 return -EFAULT;
6381
6382 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
6383 if (!asoc)
6384 return -EINVAL;
6385
6386 if (!asoc->peer.primary_path)
6387 return -ENOTCONN;
6388
6389 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
6390 asoc->peer.primary_path->af_specific->sockaddr_len);
6391
6392 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
6393 (union sctp_addr *)&prim.ssp_addr);
6394
6395 if (put_user(len, optlen))
6396 return -EFAULT;
6397 if (copy_to_user(optval, &prim, len))
6398 return -EFAULT;
6399
6400 return 0;
6401}
6402
6403/*
6404 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
6405 *
6406 * Requests that the local endpoint set the specified Adaptation Layer
6407 * Indication parameter for all future INIT and INIT-ACK exchanges.
6408 */
6409static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
6410 char __user *optval, int __user *optlen)
6411{
6412 struct sctp_setadaptation adaptation;
6413
6414 if (len < sizeof(struct sctp_setadaptation))
6415 return -EINVAL;
6416
6417 len = sizeof(struct sctp_setadaptation);
6418
6419 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
6420
6421 if (put_user(len, optlen))
6422 return -EFAULT;
6423 if (copy_to_user(optval, &adaptation, len))
6424 return -EFAULT;
6425
6426 return 0;
6427}
6428
6429/*
6430 *
6431 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
6432 *
6433 * Applications that wish to use the sendto() system call may wish to
6434 * specify a default set of parameters that would normally be supplied
6435 * through the inclusion of ancillary data. This socket option allows
6436 * such an application to set the default sctp_sndrcvinfo structure.
6437
6438
6439 * The application that wishes to use this socket option simply passes
6440 * in to this call the sctp_sndrcvinfo structure defined in Section
6441 * 5.2.2) The input parameters accepted by this call include
6442 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
6443 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
6444 * to this call if the caller is using the UDP model.
6445 *
6446 * For getsockopt, it get the default sctp_sndrcvinfo structure.
6447 */
6448static int sctp_getsockopt_default_send_param(struct sock *sk,
6449 int len, char __user *optval,
6450 int __user *optlen)
6451{
6452 struct sctp_sock *sp = sctp_sk(sk);
6453 struct sctp_association *asoc;
6454 struct sctp_sndrcvinfo info;
6455
6456 if (len < sizeof(info))
6457 return -EINVAL;
6458
6459 len = sizeof(info);
6460
6461 if (copy_from_user(&info, optval, len))
6462 return -EFAULT;
6463
6464 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00006465 if (!asoc && info.sinfo_assoc_id != SCTP_FUTURE_ASSOC &&
6466 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006467 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00006468
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006469 if (asoc) {
6470 info.sinfo_stream = asoc->default_stream;
6471 info.sinfo_flags = asoc->default_flags;
6472 info.sinfo_ppid = asoc->default_ppid;
6473 info.sinfo_context = asoc->default_context;
6474 info.sinfo_timetolive = asoc->default_timetolive;
6475 } else {
6476 info.sinfo_stream = sp->default_stream;
6477 info.sinfo_flags = sp->default_flags;
6478 info.sinfo_ppid = sp->default_ppid;
6479 info.sinfo_context = sp->default_context;
6480 info.sinfo_timetolive = sp->default_timetolive;
6481 }
6482
6483 if (put_user(len, optlen))
6484 return -EFAULT;
6485 if (copy_to_user(optval, &info, len))
6486 return -EFAULT;
6487
6488 return 0;
6489}
6490
6491/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
6492 * (SCTP_DEFAULT_SNDINFO)
6493 */
6494static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
6495 char __user *optval,
6496 int __user *optlen)
6497{
6498 struct sctp_sock *sp = sctp_sk(sk);
6499 struct sctp_association *asoc;
6500 struct sctp_sndinfo info;
6501
6502 if (len < sizeof(info))
6503 return -EINVAL;
6504
6505 len = sizeof(info);
6506
6507 if (copy_from_user(&info, optval, len))
6508 return -EFAULT;
6509
6510 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00006511 if (!asoc && info.snd_assoc_id != SCTP_FUTURE_ASSOC &&
6512 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006513 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00006514
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006515 if (asoc) {
6516 info.snd_sid = asoc->default_stream;
6517 info.snd_flags = asoc->default_flags;
6518 info.snd_ppid = asoc->default_ppid;
6519 info.snd_context = asoc->default_context;
6520 } else {
6521 info.snd_sid = sp->default_stream;
6522 info.snd_flags = sp->default_flags;
6523 info.snd_ppid = sp->default_ppid;
6524 info.snd_context = sp->default_context;
6525 }
6526
6527 if (put_user(len, optlen))
6528 return -EFAULT;
6529 if (copy_to_user(optval, &info, len))
6530 return -EFAULT;
6531
6532 return 0;
6533}
6534
6535/*
6536 *
6537 * 7.1.5 SCTP_NODELAY
6538 *
6539 * Turn on/off any Nagle-like algorithm. This means that packets are
6540 * generally sent as soon as possible and no unnecessary delays are
6541 * introduced, at the cost of more packets in the network. Expects an
6542 * integer boolean flag.
6543 */
6544
6545static int sctp_getsockopt_nodelay(struct sock *sk, int len,
6546 char __user *optval, int __user *optlen)
6547{
6548 int val;
6549
6550 if (len < sizeof(int))
6551 return -EINVAL;
6552
6553 len = sizeof(int);
6554 val = (sctp_sk(sk)->nodelay == 1);
6555 if (put_user(len, optlen))
6556 return -EFAULT;
6557 if (copy_to_user(optval, &val, len))
6558 return -EFAULT;
6559 return 0;
6560}
6561
6562/*
6563 *
6564 * 7.1.1 SCTP_RTOINFO
6565 *
6566 * The protocol parameters used to initialize and bound retransmission
6567 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
6568 * and modify these parameters.
6569 * All parameters are time values, in milliseconds. A value of 0, when
6570 * modifying the parameters, indicates that the current value should not
6571 * be changed.
6572 *
6573 */
6574static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
6575 char __user *optval,
6576 int __user *optlen) {
6577 struct sctp_rtoinfo rtoinfo;
6578 struct sctp_association *asoc;
6579
6580 if (len < sizeof (struct sctp_rtoinfo))
6581 return -EINVAL;
6582
6583 len = sizeof(struct sctp_rtoinfo);
6584
6585 if (copy_from_user(&rtoinfo, optval, len))
6586 return -EFAULT;
6587
6588 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
6589
David Brazdil0f672f62019-12-10 10:32:29 +00006590 if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
6591 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006592 return -EINVAL;
6593
6594 /* Values corresponding to the specific association. */
6595 if (asoc) {
6596 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
6597 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
6598 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
6599 } else {
6600 /* Values corresponding to the endpoint. */
6601 struct sctp_sock *sp = sctp_sk(sk);
6602
6603 rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
6604 rtoinfo.srto_max = sp->rtoinfo.srto_max;
6605 rtoinfo.srto_min = sp->rtoinfo.srto_min;
6606 }
6607
6608 if (put_user(len, optlen))
6609 return -EFAULT;
6610
6611 if (copy_to_user(optval, &rtoinfo, len))
6612 return -EFAULT;
6613
6614 return 0;
6615}
6616
6617/*
6618 *
6619 * 7.1.2 SCTP_ASSOCINFO
6620 *
6621 * This option is used to tune the maximum retransmission attempts
6622 * of the association.
6623 * Returns an error if the new association retransmission value is
6624 * greater than the sum of the retransmission value of the peer.
6625 * See [SCTP] for more information.
6626 *
6627 */
6628static int sctp_getsockopt_associnfo(struct sock *sk, int len,
6629 char __user *optval,
6630 int __user *optlen)
6631{
6632
6633 struct sctp_assocparams assocparams;
6634 struct sctp_association *asoc;
6635 struct list_head *pos;
6636 int cnt = 0;
6637
6638 if (len < sizeof (struct sctp_assocparams))
6639 return -EINVAL;
6640
6641 len = sizeof(struct sctp_assocparams);
6642
6643 if (copy_from_user(&assocparams, optval, len))
6644 return -EFAULT;
6645
6646 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
6647
David Brazdil0f672f62019-12-10 10:32:29 +00006648 if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
6649 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006650 return -EINVAL;
6651
6652 /* Values correspoinding to the specific association */
6653 if (asoc) {
6654 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
6655 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
6656 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
6657 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
6658
6659 list_for_each(pos, &asoc->peer.transport_addr_list) {
6660 cnt++;
6661 }
6662
6663 assocparams.sasoc_number_peer_destinations = cnt;
6664 } else {
6665 /* Values corresponding to the endpoint */
6666 struct sctp_sock *sp = sctp_sk(sk);
6667
6668 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
6669 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
6670 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
6671 assocparams.sasoc_cookie_life =
6672 sp->assocparams.sasoc_cookie_life;
6673 assocparams.sasoc_number_peer_destinations =
6674 sp->assocparams.
6675 sasoc_number_peer_destinations;
6676 }
6677
6678 if (put_user(len, optlen))
6679 return -EFAULT;
6680
6681 if (copy_to_user(optval, &assocparams, len))
6682 return -EFAULT;
6683
6684 return 0;
6685}
6686
6687/*
6688 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
6689 *
6690 * This socket option is a boolean flag which turns on or off mapped V4
6691 * addresses. If this option is turned on and the socket is type
6692 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
6693 * If this option is turned off, then no mapping will be done of V4
6694 * addresses and a user will receive both PF_INET6 and PF_INET type
6695 * addresses on the socket.
6696 */
6697static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
6698 char __user *optval, int __user *optlen)
6699{
6700 int val;
6701 struct sctp_sock *sp = sctp_sk(sk);
6702
6703 if (len < sizeof(int))
6704 return -EINVAL;
6705
6706 len = sizeof(int);
6707 val = sp->v4mapped;
6708 if (put_user(len, optlen))
6709 return -EFAULT;
6710 if (copy_to_user(optval, &val, len))
6711 return -EFAULT;
6712
6713 return 0;
6714}
6715
6716/*
6717 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
6718 * (chapter and verse is quoted at sctp_setsockopt_context())
6719 */
6720static int sctp_getsockopt_context(struct sock *sk, int len,
6721 char __user *optval, int __user *optlen)
6722{
6723 struct sctp_assoc_value params;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006724 struct sctp_association *asoc;
6725
6726 if (len < sizeof(struct sctp_assoc_value))
6727 return -EINVAL;
6728
6729 len = sizeof(struct sctp_assoc_value);
6730
6731 if (copy_from_user(&params, optval, len))
6732 return -EFAULT;
6733
David Brazdil0f672f62019-12-10 10:32:29 +00006734 asoc = sctp_id2assoc(sk, params.assoc_id);
6735 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
6736 sctp_style(sk, UDP))
6737 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006738
David Brazdil0f672f62019-12-10 10:32:29 +00006739 params.assoc_value = asoc ? asoc->default_rcv_context
6740 : sctp_sk(sk)->default_rcv_context;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006741
6742 if (put_user(len, optlen))
6743 return -EFAULT;
6744 if (copy_to_user(optval, &params, len))
6745 return -EFAULT;
6746
6747 return 0;
6748}
6749
6750/*
6751 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
6752 * This option will get or set the maximum size to put in any outgoing
6753 * SCTP DATA chunk. If a message is larger than this size it will be
6754 * fragmented by SCTP into the specified size. Note that the underlying
6755 * SCTP implementation may fragment into smaller sized chunks when the
6756 * PMTU of the underlying association is smaller than the value set by
6757 * the user. The default value for this option is '0' which indicates
6758 * the user is NOT limiting fragmentation and only the PMTU will effect
6759 * SCTP's choice of DATA chunk size. Note also that values set larger
6760 * than the maximum size of an IP datagram will effectively let SCTP
6761 * control fragmentation (i.e. the same as setting this option to 0).
6762 *
6763 * The following structure is used to access and modify this parameter:
6764 *
6765 * struct sctp_assoc_value {
6766 * sctp_assoc_t assoc_id;
6767 * uint32_t assoc_value;
6768 * };
6769 *
6770 * assoc_id: This parameter is ignored for one-to-one style sockets.
6771 * For one-to-many style sockets this parameter indicates which
6772 * association the user is performing an action upon. Note that if
6773 * this field's value is zero then the endpoints default value is
6774 * changed (effecting future associations only).
6775 * assoc_value: This parameter specifies the maximum size in bytes.
6776 */
6777static int sctp_getsockopt_maxseg(struct sock *sk, int len,
6778 char __user *optval, int __user *optlen)
6779{
6780 struct sctp_assoc_value params;
6781 struct sctp_association *asoc;
6782
6783 if (len == sizeof(int)) {
6784 pr_warn_ratelimited(DEPRECATED
6785 "%s (pid %d) "
6786 "Use of int in maxseg socket option.\n"
6787 "Use struct sctp_assoc_value instead\n",
6788 current->comm, task_pid_nr(current));
David Brazdil0f672f62019-12-10 10:32:29 +00006789 params.assoc_id = SCTP_FUTURE_ASSOC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006790 } else if (len >= sizeof(struct sctp_assoc_value)) {
6791 len = sizeof(struct sctp_assoc_value);
6792 if (copy_from_user(&params, optval, len))
6793 return -EFAULT;
6794 } else
6795 return -EINVAL;
6796
6797 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00006798 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
6799 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006800 return -EINVAL;
6801
6802 if (asoc)
6803 params.assoc_value = asoc->frag_point;
6804 else
6805 params.assoc_value = sctp_sk(sk)->user_frag;
6806
6807 if (put_user(len, optlen))
6808 return -EFAULT;
6809 if (len == sizeof(int)) {
6810 if (copy_to_user(optval, &params.assoc_value, len))
6811 return -EFAULT;
6812 } else {
6813 if (copy_to_user(optval, &params, len))
6814 return -EFAULT;
6815 }
6816
6817 return 0;
6818}
6819
6820/*
6821 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
6822 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
6823 */
6824static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
6825 char __user *optval, int __user *optlen)
6826{
6827 int val;
6828
6829 if (len < sizeof(int))
6830 return -EINVAL;
6831
6832 len = sizeof(int);
6833
6834 val = sctp_sk(sk)->frag_interleave;
6835 if (put_user(len, optlen))
6836 return -EFAULT;
6837 if (copy_to_user(optval, &val, len))
6838 return -EFAULT;
6839
6840 return 0;
6841}
6842
6843/*
6844 * 7.1.25. Set or Get the sctp partial delivery point
6845 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
6846 */
6847static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
6848 char __user *optval,
6849 int __user *optlen)
6850{
6851 u32 val;
6852
6853 if (len < sizeof(u32))
6854 return -EINVAL;
6855
6856 len = sizeof(u32);
6857
6858 val = sctp_sk(sk)->pd_point;
6859 if (put_user(len, optlen))
6860 return -EFAULT;
6861 if (copy_to_user(optval, &val, len))
6862 return -EFAULT;
6863
6864 return 0;
6865}
6866
6867/*
6868 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
6869 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
6870 */
6871static int sctp_getsockopt_maxburst(struct sock *sk, int len,
6872 char __user *optval,
6873 int __user *optlen)
6874{
6875 struct sctp_assoc_value params;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006876 struct sctp_association *asoc;
6877
6878 if (len == sizeof(int)) {
6879 pr_warn_ratelimited(DEPRECATED
6880 "%s (pid %d) "
6881 "Use of int in max_burst socket option.\n"
6882 "Use struct sctp_assoc_value instead\n",
6883 current->comm, task_pid_nr(current));
David Brazdil0f672f62019-12-10 10:32:29 +00006884 params.assoc_id = SCTP_FUTURE_ASSOC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006885 } else if (len >= sizeof(struct sctp_assoc_value)) {
6886 len = sizeof(struct sctp_assoc_value);
6887 if (copy_from_user(&params, optval, len))
6888 return -EFAULT;
6889 } else
6890 return -EINVAL;
6891
David Brazdil0f672f62019-12-10 10:32:29 +00006892 asoc = sctp_id2assoc(sk, params.assoc_id);
6893 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
6894 sctp_style(sk, UDP))
6895 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006896
David Brazdil0f672f62019-12-10 10:32:29 +00006897 params.assoc_value = asoc ? asoc->max_burst : sctp_sk(sk)->max_burst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006898
6899 if (len == sizeof(int)) {
6900 if (copy_to_user(optval, &params.assoc_value, len))
6901 return -EFAULT;
6902 } else {
6903 if (copy_to_user(optval, &params, len))
6904 return -EFAULT;
6905 }
6906
6907 return 0;
6908
6909}
6910
6911static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
6912 char __user *optval, int __user *optlen)
6913{
6914 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6915 struct sctp_hmacalgo __user *p = (void __user *)optval;
6916 struct sctp_hmac_algo_param *hmacs;
6917 __u16 data_len = 0;
6918 u32 num_idents;
6919 int i;
6920
6921 if (!ep->auth_enable)
6922 return -EACCES;
6923
6924 hmacs = ep->auth_hmacs_list;
6925 data_len = ntohs(hmacs->param_hdr.length) -
6926 sizeof(struct sctp_paramhdr);
6927
6928 if (len < sizeof(struct sctp_hmacalgo) + data_len)
6929 return -EINVAL;
6930
6931 len = sizeof(struct sctp_hmacalgo) + data_len;
6932 num_idents = data_len / sizeof(u16);
6933
6934 if (put_user(len, optlen))
6935 return -EFAULT;
6936 if (put_user(num_idents, &p->shmac_num_idents))
6937 return -EFAULT;
6938 for (i = 0; i < num_idents; i++) {
6939 __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
6940
6941 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
6942 return -EFAULT;
6943 }
6944 return 0;
6945}
6946
6947static int sctp_getsockopt_active_key(struct sock *sk, int len,
6948 char __user *optval, int __user *optlen)
6949{
6950 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6951 struct sctp_authkeyid val;
6952 struct sctp_association *asoc;
6953
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006954 if (len < sizeof(struct sctp_authkeyid))
6955 return -EINVAL;
6956
6957 len = sizeof(struct sctp_authkeyid);
6958 if (copy_from_user(&val, optval, len))
6959 return -EFAULT;
6960
6961 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
6962 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
6963 return -EINVAL;
6964
David Brazdil0f672f62019-12-10 10:32:29 +00006965 if (asoc) {
6966 if (!asoc->peer.auth_capable)
6967 return -EACCES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006968 val.scact_keynumber = asoc->active_key_id;
David Brazdil0f672f62019-12-10 10:32:29 +00006969 } else {
6970 if (!ep->auth_enable)
6971 return -EACCES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006972 val.scact_keynumber = ep->active_key_id;
David Brazdil0f672f62019-12-10 10:32:29 +00006973 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006974
6975 if (put_user(len, optlen))
6976 return -EFAULT;
6977 if (copy_to_user(optval, &val, len))
6978 return -EFAULT;
6979
6980 return 0;
6981}
6982
6983static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6984 char __user *optval, int __user *optlen)
6985{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006986 struct sctp_authchunks __user *p = (void __user *)optval;
6987 struct sctp_authchunks val;
6988 struct sctp_association *asoc;
6989 struct sctp_chunks_param *ch;
6990 u32 num_chunks = 0;
6991 char __user *to;
6992
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006993 if (len < sizeof(struct sctp_authchunks))
6994 return -EINVAL;
6995
6996 if (copy_from_user(&val, optval, sizeof(val)))
6997 return -EFAULT;
6998
6999 to = p->gauth_chunks;
7000 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
7001 if (!asoc)
7002 return -EINVAL;
7003
David Brazdil0f672f62019-12-10 10:32:29 +00007004 if (!asoc->peer.auth_capable)
7005 return -EACCES;
7006
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007007 ch = asoc->peer.peer_chunks;
7008 if (!ch)
7009 goto num;
7010
7011 /* See if the user provided enough room for all the data */
7012 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
7013 if (len < num_chunks)
7014 return -EINVAL;
7015
7016 if (copy_to_user(to, ch->chunks, num_chunks))
7017 return -EFAULT;
7018num:
7019 len = sizeof(struct sctp_authchunks) + num_chunks;
7020 if (put_user(len, optlen))
7021 return -EFAULT;
7022 if (put_user(num_chunks, &p->gauth_number_of_chunks))
7023 return -EFAULT;
7024 return 0;
7025}
7026
7027static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
7028 char __user *optval, int __user *optlen)
7029{
7030 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
7031 struct sctp_authchunks __user *p = (void __user *)optval;
7032 struct sctp_authchunks val;
7033 struct sctp_association *asoc;
7034 struct sctp_chunks_param *ch;
7035 u32 num_chunks = 0;
7036 char __user *to;
7037
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007038 if (len < sizeof(struct sctp_authchunks))
7039 return -EINVAL;
7040
7041 if (copy_from_user(&val, optval, sizeof(val)))
7042 return -EFAULT;
7043
7044 to = p->gauth_chunks;
7045 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007046 if (!asoc && val.gauth_assoc_id != SCTP_FUTURE_ASSOC &&
7047 sctp_style(sk, UDP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007048 return -EINVAL;
7049
David Brazdil0f672f62019-12-10 10:32:29 +00007050 if (asoc) {
7051 if (!asoc->peer.auth_capable)
7052 return -EACCES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007053 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
David Brazdil0f672f62019-12-10 10:32:29 +00007054 } else {
7055 if (!ep->auth_enable)
7056 return -EACCES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007057 ch = ep->auth_chunk_list;
David Brazdil0f672f62019-12-10 10:32:29 +00007058 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007059 if (!ch)
7060 goto num;
7061
7062 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
7063 if (len < sizeof(struct sctp_authchunks) + num_chunks)
7064 return -EINVAL;
7065
7066 if (copy_to_user(to, ch->chunks, num_chunks))
7067 return -EFAULT;
7068num:
7069 len = sizeof(struct sctp_authchunks) + num_chunks;
7070 if (put_user(len, optlen))
7071 return -EFAULT;
7072 if (put_user(num_chunks, &p->gauth_number_of_chunks))
7073 return -EFAULT;
7074
7075 return 0;
7076}
7077
7078/*
7079 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
7080 * This option gets the current number of associations that are attached
7081 * to a one-to-many style socket. The option value is an uint32_t.
7082 */
7083static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
7084 char __user *optval, int __user *optlen)
7085{
7086 struct sctp_sock *sp = sctp_sk(sk);
7087 struct sctp_association *asoc;
7088 u32 val = 0;
7089
7090 if (sctp_style(sk, TCP))
7091 return -EOPNOTSUPP;
7092
7093 if (len < sizeof(u32))
7094 return -EINVAL;
7095
7096 len = sizeof(u32);
7097
7098 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
7099 val++;
7100 }
7101
7102 if (put_user(len, optlen))
7103 return -EFAULT;
7104 if (copy_to_user(optval, &val, len))
7105 return -EFAULT;
7106
7107 return 0;
7108}
7109
7110/*
7111 * 8.1.23 SCTP_AUTO_ASCONF
7112 * See the corresponding setsockopt entry as description
7113 */
7114static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
7115 char __user *optval, int __user *optlen)
7116{
7117 int val = 0;
7118
7119 if (len < sizeof(int))
7120 return -EINVAL;
7121
7122 len = sizeof(int);
7123 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
7124 val = 1;
7125 if (put_user(len, optlen))
7126 return -EFAULT;
7127 if (copy_to_user(optval, &val, len))
7128 return -EFAULT;
7129 return 0;
7130}
7131
7132/*
7133 * 8.2.6. Get the Current Identifiers of Associations
7134 * (SCTP_GET_ASSOC_ID_LIST)
7135 *
7136 * This option gets the current list of SCTP association identifiers of
7137 * the SCTP associations handled by a one-to-many style socket.
7138 */
7139static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
7140 char __user *optval, int __user *optlen)
7141{
7142 struct sctp_sock *sp = sctp_sk(sk);
7143 struct sctp_association *asoc;
7144 struct sctp_assoc_ids *ids;
7145 u32 num = 0;
7146
7147 if (sctp_style(sk, TCP))
7148 return -EOPNOTSUPP;
7149
7150 if (len < sizeof(struct sctp_assoc_ids))
7151 return -EINVAL;
7152
7153 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
7154 num++;
7155 }
7156
7157 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
7158 return -EINVAL;
7159
7160 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
7161
7162 ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
7163 if (unlikely(!ids))
7164 return -ENOMEM;
7165
7166 ids->gaids_number_of_ids = num;
7167 num = 0;
7168 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
7169 ids->gaids_assoc_id[num++] = asoc->assoc_id;
7170 }
7171
7172 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
7173 kfree(ids);
7174 return -EFAULT;
7175 }
7176
7177 kfree(ids);
7178 return 0;
7179}
7180
7181/*
7182 * SCTP_PEER_ADDR_THLDS
7183 *
7184 * This option allows us to fetch the partially failed threshold for one or all
7185 * transports in an association. See Section 6.1 of:
7186 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
7187 */
7188static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
7189 char __user *optval,
7190 int len,
7191 int __user *optlen)
7192{
7193 struct sctp_paddrthlds val;
7194 struct sctp_transport *trans;
7195 struct sctp_association *asoc;
7196
7197 if (len < sizeof(struct sctp_paddrthlds))
7198 return -EINVAL;
7199 len = sizeof(struct sctp_paddrthlds);
7200 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
7201 return -EFAULT;
7202
David Brazdil0f672f62019-12-10 10:32:29 +00007203 if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007204 trans = sctp_addr_id2transport(sk, &val.spt_address,
7205 val.spt_assoc_id);
7206 if (!trans)
7207 return -ENOENT;
7208
7209 val.spt_pathmaxrxt = trans->pathmaxrxt;
7210 val.spt_pathpfthld = trans->pf_retrans;
David Brazdil0f672f62019-12-10 10:32:29 +00007211
7212 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007213 }
7214
David Brazdil0f672f62019-12-10 10:32:29 +00007215 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
7216 if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
7217 sctp_style(sk, UDP))
7218 return -EINVAL;
7219
7220 if (asoc) {
7221 val.spt_pathpfthld = asoc->pf_retrans;
7222 val.spt_pathmaxrxt = asoc->pathmaxrxt;
7223 } else {
7224 struct sctp_sock *sp = sctp_sk(sk);
7225
7226 val.spt_pathpfthld = sp->pf_retrans;
7227 val.spt_pathmaxrxt = sp->pathmaxrxt;
7228 }
7229
7230out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007231 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
7232 return -EFAULT;
7233
7234 return 0;
7235}
7236
7237/*
7238 * SCTP_GET_ASSOC_STATS
7239 *
7240 * This option retrieves local per endpoint statistics. It is modeled
7241 * after OpenSolaris' implementation
7242 */
7243static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
7244 char __user *optval,
7245 int __user *optlen)
7246{
7247 struct sctp_assoc_stats sas;
7248 struct sctp_association *asoc = NULL;
7249
7250 /* User must provide at least the assoc id */
7251 if (len < sizeof(sctp_assoc_t))
7252 return -EINVAL;
7253
7254 /* Allow the struct to grow and fill in as much as possible */
7255 len = min_t(size_t, len, sizeof(sas));
7256
7257 if (copy_from_user(&sas, optval, len))
7258 return -EFAULT;
7259
7260 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
7261 if (!asoc)
7262 return -EINVAL;
7263
7264 sas.sas_rtxchunks = asoc->stats.rtxchunks;
7265 sas.sas_gapcnt = asoc->stats.gapcnt;
7266 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
7267 sas.sas_osacks = asoc->stats.osacks;
7268 sas.sas_isacks = asoc->stats.isacks;
7269 sas.sas_octrlchunks = asoc->stats.octrlchunks;
7270 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
7271 sas.sas_oodchunks = asoc->stats.oodchunks;
7272 sas.sas_iodchunks = asoc->stats.iodchunks;
7273 sas.sas_ouodchunks = asoc->stats.ouodchunks;
7274 sas.sas_iuodchunks = asoc->stats.iuodchunks;
7275 sas.sas_idupchunks = asoc->stats.idupchunks;
7276 sas.sas_opackets = asoc->stats.opackets;
7277 sas.sas_ipackets = asoc->stats.ipackets;
7278
7279 /* New high max rto observed, will return 0 if not a single
7280 * RTO update took place. obs_rto_ipaddr will be bogus
7281 * in such a case
7282 */
7283 sas.sas_maxrto = asoc->stats.max_obs_rto;
7284 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
7285 sizeof(struct sockaddr_storage));
7286
7287 /* Mark beginning of a new observation period */
7288 asoc->stats.max_obs_rto = asoc->rto_min;
7289
7290 if (put_user(len, optlen))
7291 return -EFAULT;
7292
7293 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
7294
7295 if (copy_to_user(optval, &sas, len))
7296 return -EFAULT;
7297
7298 return 0;
7299}
7300
7301static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
7302 char __user *optval,
7303 int __user *optlen)
7304{
7305 int val = 0;
7306
7307 if (len < sizeof(int))
7308 return -EINVAL;
7309
7310 len = sizeof(int);
7311 if (sctp_sk(sk)->recvrcvinfo)
7312 val = 1;
7313 if (put_user(len, optlen))
7314 return -EFAULT;
7315 if (copy_to_user(optval, &val, len))
7316 return -EFAULT;
7317
7318 return 0;
7319}
7320
7321static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
7322 char __user *optval,
7323 int __user *optlen)
7324{
7325 int val = 0;
7326
7327 if (len < sizeof(int))
7328 return -EINVAL;
7329
7330 len = sizeof(int);
7331 if (sctp_sk(sk)->recvnxtinfo)
7332 val = 1;
7333 if (put_user(len, optlen))
7334 return -EFAULT;
7335 if (copy_to_user(optval, &val, len))
7336 return -EFAULT;
7337
7338 return 0;
7339}
7340
7341static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
7342 char __user *optval,
7343 int __user *optlen)
7344{
7345 struct sctp_assoc_value params;
7346 struct sctp_association *asoc;
7347 int retval = -EFAULT;
7348
7349 if (len < sizeof(params)) {
7350 retval = -EINVAL;
7351 goto out;
7352 }
7353
7354 len = sizeof(params);
7355 if (copy_from_user(&params, optval, len))
7356 goto out;
7357
7358 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007359 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7360 sctp_style(sk, UDP)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007361 retval = -EINVAL;
7362 goto out;
7363 }
7364
David Brazdil0f672f62019-12-10 10:32:29 +00007365 params.assoc_value = asoc ? asoc->peer.prsctp_capable
7366 : sctp_sk(sk)->ep->prsctp_enable;
7367
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007368 if (put_user(len, optlen))
7369 goto out;
7370
7371 if (copy_to_user(optval, &params, len))
7372 goto out;
7373
7374 retval = 0;
7375
7376out:
7377 return retval;
7378}
7379
7380static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
7381 char __user *optval,
7382 int __user *optlen)
7383{
7384 struct sctp_default_prinfo info;
7385 struct sctp_association *asoc;
7386 int retval = -EFAULT;
7387
7388 if (len < sizeof(info)) {
7389 retval = -EINVAL;
7390 goto out;
7391 }
7392
7393 len = sizeof(info);
7394 if (copy_from_user(&info, optval, len))
7395 goto out;
7396
7397 asoc = sctp_id2assoc(sk, info.pr_assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007398 if (!asoc && info.pr_assoc_id != SCTP_FUTURE_ASSOC &&
7399 sctp_style(sk, UDP)) {
7400 retval = -EINVAL;
7401 goto out;
7402 }
7403
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007404 if (asoc) {
7405 info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
7406 info.pr_value = asoc->default_timetolive;
David Brazdil0f672f62019-12-10 10:32:29 +00007407 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007408 struct sctp_sock *sp = sctp_sk(sk);
7409
7410 info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
7411 info.pr_value = sp->default_timetolive;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007412 }
7413
7414 if (put_user(len, optlen))
7415 goto out;
7416
7417 if (copy_to_user(optval, &info, len))
7418 goto out;
7419
7420 retval = 0;
7421
7422out:
7423 return retval;
7424}
7425
7426static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
7427 char __user *optval,
7428 int __user *optlen)
7429{
7430 struct sctp_prstatus params;
7431 struct sctp_association *asoc;
7432 int policy;
7433 int retval = -EINVAL;
7434
7435 if (len < sizeof(params))
7436 goto out;
7437
7438 len = sizeof(params);
7439 if (copy_from_user(&params, optval, len)) {
7440 retval = -EFAULT;
7441 goto out;
7442 }
7443
7444 policy = params.sprstat_policy;
7445 if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
7446 ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
7447 goto out;
7448
7449 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
7450 if (!asoc)
7451 goto out;
7452
7453 if (policy == SCTP_PR_SCTP_ALL) {
7454 params.sprstat_abandoned_unsent = 0;
7455 params.sprstat_abandoned_sent = 0;
7456 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
7457 params.sprstat_abandoned_unsent +=
7458 asoc->abandoned_unsent[policy];
7459 params.sprstat_abandoned_sent +=
7460 asoc->abandoned_sent[policy];
7461 }
7462 } else {
7463 params.sprstat_abandoned_unsent =
7464 asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)];
7465 params.sprstat_abandoned_sent =
7466 asoc->abandoned_sent[__SCTP_PR_INDEX(policy)];
7467 }
7468
7469 if (put_user(len, optlen)) {
7470 retval = -EFAULT;
7471 goto out;
7472 }
7473
7474 if (copy_to_user(optval, &params, len)) {
7475 retval = -EFAULT;
7476 goto out;
7477 }
7478
7479 retval = 0;
7480
7481out:
7482 return retval;
7483}
7484
7485static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
7486 char __user *optval,
7487 int __user *optlen)
7488{
7489 struct sctp_stream_out_ext *streamoute;
7490 struct sctp_association *asoc;
7491 struct sctp_prstatus params;
7492 int retval = -EINVAL;
7493 int policy;
7494
7495 if (len < sizeof(params))
7496 goto out;
7497
7498 len = sizeof(params);
7499 if (copy_from_user(&params, optval, len)) {
7500 retval = -EFAULT;
7501 goto out;
7502 }
7503
7504 policy = params.sprstat_policy;
7505 if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
7506 ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
7507 goto out;
7508
7509 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
7510 if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
7511 goto out;
7512
7513 streamoute = SCTP_SO(&asoc->stream, params.sprstat_sid)->ext;
7514 if (!streamoute) {
7515 /* Not allocated yet, means all stats are 0 */
7516 params.sprstat_abandoned_unsent = 0;
7517 params.sprstat_abandoned_sent = 0;
7518 retval = 0;
7519 goto out;
7520 }
7521
7522 if (policy == SCTP_PR_SCTP_ALL) {
7523 params.sprstat_abandoned_unsent = 0;
7524 params.sprstat_abandoned_sent = 0;
7525 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
7526 params.sprstat_abandoned_unsent +=
7527 streamoute->abandoned_unsent[policy];
7528 params.sprstat_abandoned_sent +=
7529 streamoute->abandoned_sent[policy];
7530 }
7531 } else {
7532 params.sprstat_abandoned_unsent =
7533 streamoute->abandoned_unsent[__SCTP_PR_INDEX(policy)];
7534 params.sprstat_abandoned_sent =
7535 streamoute->abandoned_sent[__SCTP_PR_INDEX(policy)];
7536 }
7537
7538 if (put_user(len, optlen) || copy_to_user(optval, &params, len)) {
7539 retval = -EFAULT;
7540 goto out;
7541 }
7542
7543 retval = 0;
7544
7545out:
7546 return retval;
7547}
7548
7549static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
7550 char __user *optval,
7551 int __user *optlen)
7552{
7553 struct sctp_assoc_value params;
7554 struct sctp_association *asoc;
7555 int retval = -EFAULT;
7556
7557 if (len < sizeof(params)) {
7558 retval = -EINVAL;
7559 goto out;
7560 }
7561
7562 len = sizeof(params);
7563 if (copy_from_user(&params, optval, len))
7564 goto out;
7565
7566 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007567 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7568 sctp_style(sk, UDP)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007569 retval = -EINVAL;
7570 goto out;
7571 }
7572
David Brazdil0f672f62019-12-10 10:32:29 +00007573 params.assoc_value = asoc ? asoc->peer.reconf_capable
7574 : sctp_sk(sk)->ep->reconf_enable;
7575
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007576 if (put_user(len, optlen))
7577 goto out;
7578
7579 if (copy_to_user(optval, &params, len))
7580 goto out;
7581
7582 retval = 0;
7583
7584out:
7585 return retval;
7586}
7587
7588static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
7589 char __user *optval,
7590 int __user *optlen)
7591{
7592 struct sctp_assoc_value params;
7593 struct sctp_association *asoc;
7594 int retval = -EFAULT;
7595
7596 if (len < sizeof(params)) {
7597 retval = -EINVAL;
7598 goto out;
7599 }
7600
7601 len = sizeof(params);
7602 if (copy_from_user(&params, optval, len))
7603 goto out;
7604
7605 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007606 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7607 sctp_style(sk, UDP)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007608 retval = -EINVAL;
7609 goto out;
7610 }
7611
David Brazdil0f672f62019-12-10 10:32:29 +00007612 params.assoc_value = asoc ? asoc->strreset_enable
7613 : sctp_sk(sk)->ep->strreset_enable;
7614
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007615 if (put_user(len, optlen))
7616 goto out;
7617
7618 if (copy_to_user(optval, &params, len))
7619 goto out;
7620
7621 retval = 0;
7622
7623out:
7624 return retval;
7625}
7626
7627static int sctp_getsockopt_scheduler(struct sock *sk, int len,
7628 char __user *optval,
7629 int __user *optlen)
7630{
7631 struct sctp_assoc_value params;
7632 struct sctp_association *asoc;
7633 int retval = -EFAULT;
7634
7635 if (len < sizeof(params)) {
7636 retval = -EINVAL;
7637 goto out;
7638 }
7639
7640 len = sizeof(params);
7641 if (copy_from_user(&params, optval, len))
7642 goto out;
7643
7644 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007645 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7646 sctp_style(sk, UDP)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007647 retval = -EINVAL;
7648 goto out;
7649 }
7650
David Brazdil0f672f62019-12-10 10:32:29 +00007651 params.assoc_value = asoc ? sctp_sched_get_sched(asoc)
7652 : sctp_sk(sk)->default_ss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007653
7654 if (put_user(len, optlen))
7655 goto out;
7656
7657 if (copy_to_user(optval, &params, len))
7658 goto out;
7659
7660 retval = 0;
7661
7662out:
7663 return retval;
7664}
7665
7666static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
7667 char __user *optval,
7668 int __user *optlen)
7669{
7670 struct sctp_stream_value params;
7671 struct sctp_association *asoc;
7672 int retval = -EFAULT;
7673
7674 if (len < sizeof(params)) {
7675 retval = -EINVAL;
7676 goto out;
7677 }
7678
7679 len = sizeof(params);
7680 if (copy_from_user(&params, optval, len))
7681 goto out;
7682
7683 asoc = sctp_id2assoc(sk, params.assoc_id);
7684 if (!asoc) {
7685 retval = -EINVAL;
7686 goto out;
7687 }
7688
7689 retval = sctp_sched_get_value(asoc, params.stream_id,
7690 &params.stream_value);
7691 if (retval)
7692 goto out;
7693
7694 if (put_user(len, optlen)) {
7695 retval = -EFAULT;
7696 goto out;
7697 }
7698
7699 if (copy_to_user(optval, &params, len)) {
7700 retval = -EFAULT;
7701 goto out;
7702 }
7703
7704out:
7705 return retval;
7706}
7707
7708static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
7709 char __user *optval,
7710 int __user *optlen)
7711{
7712 struct sctp_assoc_value params;
7713 struct sctp_association *asoc;
7714 int retval = -EFAULT;
7715
7716 if (len < sizeof(params)) {
7717 retval = -EINVAL;
7718 goto out;
7719 }
7720
7721 len = sizeof(params);
7722 if (copy_from_user(&params, optval, len))
7723 goto out;
7724
7725 asoc = sctp_id2assoc(sk, params.assoc_id);
David Brazdil0f672f62019-12-10 10:32:29 +00007726 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7727 sctp_style(sk, UDP)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007728 retval = -EINVAL;
7729 goto out;
7730 }
7731
David Brazdil0f672f62019-12-10 10:32:29 +00007732 params.assoc_value = asoc ? asoc->peer.intl_capable
7733 : sctp_sk(sk)->ep->intl_enable;
7734
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007735 if (put_user(len, optlen))
7736 goto out;
7737
7738 if (copy_to_user(optval, &params, len))
7739 goto out;
7740
7741 retval = 0;
7742
7743out:
7744 return retval;
7745}
7746
7747static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
7748 char __user *optval,
7749 int __user *optlen)
7750{
7751 int val;
7752
7753 if (len < sizeof(int))
7754 return -EINVAL;
7755
7756 len = sizeof(int);
7757 val = sctp_sk(sk)->reuse;
7758 if (put_user(len, optlen))
7759 return -EFAULT;
7760
7761 if (copy_to_user(optval, &val, len))
7762 return -EFAULT;
7763
7764 return 0;
7765}
7766
David Brazdil0f672f62019-12-10 10:32:29 +00007767static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
7768 int __user *optlen)
7769{
7770 struct sctp_association *asoc;
7771 struct sctp_event param;
7772 __u16 subscribe;
7773
7774 if (len < sizeof(param))
7775 return -EINVAL;
7776
7777 len = sizeof(param);
7778 if (copy_from_user(&param, optval, len))
7779 return -EFAULT;
7780
7781 if (param.se_type < SCTP_SN_TYPE_BASE ||
7782 param.se_type > SCTP_SN_TYPE_MAX)
7783 return -EINVAL;
7784
7785 asoc = sctp_id2assoc(sk, param.se_assoc_id);
7786 if (!asoc && param.se_assoc_id != SCTP_FUTURE_ASSOC &&
7787 sctp_style(sk, UDP))
7788 return -EINVAL;
7789
7790 subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
7791 param.se_on = sctp_ulpevent_type_enabled(subscribe, param.se_type);
7792
7793 if (put_user(len, optlen))
7794 return -EFAULT;
7795
7796 if (copy_to_user(optval, &param, len))
7797 return -EFAULT;
7798
7799 return 0;
7800}
7801
7802static int sctp_getsockopt_asconf_supported(struct sock *sk, int len,
7803 char __user *optval,
7804 int __user *optlen)
7805{
7806 struct sctp_assoc_value params;
7807 struct sctp_association *asoc;
7808 int retval = -EFAULT;
7809
7810 if (len < sizeof(params)) {
7811 retval = -EINVAL;
7812 goto out;
7813 }
7814
7815 len = sizeof(params);
7816 if (copy_from_user(&params, optval, len))
7817 goto out;
7818
7819 asoc = sctp_id2assoc(sk, params.assoc_id);
7820 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7821 sctp_style(sk, UDP)) {
7822 retval = -EINVAL;
7823 goto out;
7824 }
7825
7826 params.assoc_value = asoc ? asoc->peer.asconf_capable
7827 : sctp_sk(sk)->ep->asconf_enable;
7828
7829 if (put_user(len, optlen))
7830 goto out;
7831
7832 if (copy_to_user(optval, &params, len))
7833 goto out;
7834
7835 retval = 0;
7836
7837out:
7838 return retval;
7839}
7840
7841static int sctp_getsockopt_auth_supported(struct sock *sk, int len,
7842 char __user *optval,
7843 int __user *optlen)
7844{
7845 struct sctp_assoc_value params;
7846 struct sctp_association *asoc;
7847 int retval = -EFAULT;
7848
7849 if (len < sizeof(params)) {
7850 retval = -EINVAL;
7851 goto out;
7852 }
7853
7854 len = sizeof(params);
7855 if (copy_from_user(&params, optval, len))
7856 goto out;
7857
7858 asoc = sctp_id2assoc(sk, params.assoc_id);
7859 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7860 sctp_style(sk, UDP)) {
7861 retval = -EINVAL;
7862 goto out;
7863 }
7864
7865 params.assoc_value = asoc ? asoc->peer.auth_capable
7866 : sctp_sk(sk)->ep->auth_enable;
7867
7868 if (put_user(len, optlen))
7869 goto out;
7870
7871 if (copy_to_user(optval, &params, len))
7872 goto out;
7873
7874 retval = 0;
7875
7876out:
7877 return retval;
7878}
7879
7880static int sctp_getsockopt_ecn_supported(struct sock *sk, int len,
7881 char __user *optval,
7882 int __user *optlen)
7883{
7884 struct sctp_assoc_value params;
7885 struct sctp_association *asoc;
7886 int retval = -EFAULT;
7887
7888 if (len < sizeof(params)) {
7889 retval = -EINVAL;
7890 goto out;
7891 }
7892
7893 len = sizeof(params);
7894 if (copy_from_user(&params, optval, len))
7895 goto out;
7896
7897 asoc = sctp_id2assoc(sk, params.assoc_id);
7898 if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
7899 sctp_style(sk, UDP)) {
7900 retval = -EINVAL;
7901 goto out;
7902 }
7903
7904 params.assoc_value = asoc ? asoc->peer.ecn_capable
7905 : sctp_sk(sk)->ep->ecn_enable;
7906
7907 if (put_user(len, optlen))
7908 goto out;
7909
7910 if (copy_to_user(optval, &params, len))
7911 goto out;
7912
7913 retval = 0;
7914
7915out:
7916 return retval;
7917}
7918
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007919static int sctp_getsockopt(struct sock *sk, int level, int optname,
7920 char __user *optval, int __user *optlen)
7921{
7922 int retval = 0;
7923 int len;
7924
7925 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
7926
7927 /* I can hardly begin to describe how wrong this is. This is
7928 * so broken as to be worse than useless. The API draft
7929 * REALLY is NOT helpful here... I am not convinced that the
7930 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
7931 * are at all well-founded.
7932 */
7933 if (level != SOL_SCTP) {
7934 struct sctp_af *af = sctp_sk(sk)->pf->af;
7935
7936 retval = af->getsockopt(sk, level, optname, optval, optlen);
7937 return retval;
7938 }
7939
7940 if (get_user(len, optlen))
7941 return -EFAULT;
7942
7943 if (len < 0)
7944 return -EINVAL;
7945
7946 lock_sock(sk);
7947
7948 switch (optname) {
7949 case SCTP_STATUS:
7950 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
7951 break;
7952 case SCTP_DISABLE_FRAGMENTS:
7953 retval = sctp_getsockopt_disable_fragments(sk, len, optval,
7954 optlen);
7955 break;
7956 case SCTP_EVENTS:
7957 retval = sctp_getsockopt_events(sk, len, optval, optlen);
7958 break;
7959 case SCTP_AUTOCLOSE:
7960 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
7961 break;
7962 case SCTP_SOCKOPT_PEELOFF:
7963 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
7964 break;
7965 case SCTP_SOCKOPT_PEELOFF_FLAGS:
7966 retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
7967 break;
7968 case SCTP_PEER_ADDR_PARAMS:
7969 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
7970 optlen);
7971 break;
7972 case SCTP_DELAYED_SACK:
7973 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
7974 optlen);
7975 break;
7976 case SCTP_INITMSG:
7977 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
7978 break;
7979 case SCTP_GET_PEER_ADDRS:
7980 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
7981 optlen);
7982 break;
7983 case SCTP_GET_LOCAL_ADDRS:
7984 retval = sctp_getsockopt_local_addrs(sk, len, optval,
7985 optlen);
7986 break;
7987 case SCTP_SOCKOPT_CONNECTX3:
7988 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
7989 break;
7990 case SCTP_DEFAULT_SEND_PARAM:
7991 retval = sctp_getsockopt_default_send_param(sk, len,
7992 optval, optlen);
7993 break;
7994 case SCTP_DEFAULT_SNDINFO:
7995 retval = sctp_getsockopt_default_sndinfo(sk, len,
7996 optval, optlen);
7997 break;
7998 case SCTP_PRIMARY_ADDR:
7999 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
8000 break;
8001 case SCTP_NODELAY:
8002 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
8003 break;
8004 case SCTP_RTOINFO:
8005 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
8006 break;
8007 case SCTP_ASSOCINFO:
8008 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
8009 break;
8010 case SCTP_I_WANT_MAPPED_V4_ADDR:
8011 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
8012 break;
8013 case SCTP_MAXSEG:
8014 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
8015 break;
8016 case SCTP_GET_PEER_ADDR_INFO:
8017 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
8018 optlen);
8019 break;
8020 case SCTP_ADAPTATION_LAYER:
8021 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
8022 optlen);
8023 break;
8024 case SCTP_CONTEXT:
8025 retval = sctp_getsockopt_context(sk, len, optval, optlen);
8026 break;
8027 case SCTP_FRAGMENT_INTERLEAVE:
8028 retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
8029 optlen);
8030 break;
8031 case SCTP_PARTIAL_DELIVERY_POINT:
8032 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
8033 optlen);
8034 break;
8035 case SCTP_MAX_BURST:
8036 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
8037 break;
8038 case SCTP_AUTH_KEY:
8039 case SCTP_AUTH_CHUNK:
8040 case SCTP_AUTH_DELETE_KEY:
8041 case SCTP_AUTH_DEACTIVATE_KEY:
8042 retval = -EOPNOTSUPP;
8043 break;
8044 case SCTP_HMAC_IDENT:
8045 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
8046 break;
8047 case SCTP_AUTH_ACTIVE_KEY:
8048 retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
8049 break;
8050 case SCTP_PEER_AUTH_CHUNKS:
8051 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
8052 optlen);
8053 break;
8054 case SCTP_LOCAL_AUTH_CHUNKS:
8055 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
8056 optlen);
8057 break;
8058 case SCTP_GET_ASSOC_NUMBER:
8059 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
8060 break;
8061 case SCTP_GET_ASSOC_ID_LIST:
8062 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
8063 break;
8064 case SCTP_AUTO_ASCONF:
8065 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
8066 break;
8067 case SCTP_PEER_ADDR_THLDS:
8068 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
8069 break;
8070 case SCTP_GET_ASSOC_STATS:
8071 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
8072 break;
8073 case SCTP_RECVRCVINFO:
8074 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
8075 break;
8076 case SCTP_RECVNXTINFO:
8077 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
8078 break;
8079 case SCTP_PR_SUPPORTED:
8080 retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
8081 break;
8082 case SCTP_DEFAULT_PRINFO:
8083 retval = sctp_getsockopt_default_prinfo(sk, len, optval,
8084 optlen);
8085 break;
8086 case SCTP_PR_ASSOC_STATUS:
8087 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
8088 optlen);
8089 break;
8090 case SCTP_PR_STREAM_STATUS:
8091 retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
8092 optlen);
8093 break;
8094 case SCTP_RECONFIG_SUPPORTED:
8095 retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
8096 optlen);
8097 break;
8098 case SCTP_ENABLE_STREAM_RESET:
8099 retval = sctp_getsockopt_enable_strreset(sk, len, optval,
8100 optlen);
8101 break;
8102 case SCTP_STREAM_SCHEDULER:
8103 retval = sctp_getsockopt_scheduler(sk, len, optval,
8104 optlen);
8105 break;
8106 case SCTP_STREAM_SCHEDULER_VALUE:
8107 retval = sctp_getsockopt_scheduler_value(sk, len, optval,
8108 optlen);
8109 break;
8110 case SCTP_INTERLEAVING_SUPPORTED:
8111 retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
8112 optlen);
8113 break;
8114 case SCTP_REUSE_PORT:
8115 retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
8116 break;
David Brazdil0f672f62019-12-10 10:32:29 +00008117 case SCTP_EVENT:
8118 retval = sctp_getsockopt_event(sk, len, optval, optlen);
8119 break;
8120 case SCTP_ASCONF_SUPPORTED:
8121 retval = sctp_getsockopt_asconf_supported(sk, len, optval,
8122 optlen);
8123 break;
8124 case SCTP_AUTH_SUPPORTED:
8125 retval = sctp_getsockopt_auth_supported(sk, len, optval,
8126 optlen);
8127 break;
8128 case SCTP_ECN_SUPPORTED:
8129 retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen);
8130 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008131 default:
8132 retval = -ENOPROTOOPT;
8133 break;
8134 }
8135
8136 release_sock(sk);
8137 return retval;
8138}
8139
8140static int sctp_hash(struct sock *sk)
8141{
8142 /* STUB */
8143 return 0;
8144}
8145
8146static void sctp_unhash(struct sock *sk)
8147{
8148 /* STUB */
8149}
8150
8151/* Check if port is acceptable. Possibly find first available port.
8152 *
8153 * The port hash table (contained in the 'global' SCTP protocol storage
8154 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
8155 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
8156 * list (the list number is the port number hashed out, so as you
8157 * would expect from a hash function, all the ports in a given list have
8158 * such a number that hashes out to the same list number; you were
8159 * expecting that, right?); so each list has a set of ports, with a
8160 * link to the socket (struct sock) that uses it, the port number and
8161 * a fastreuse flag (FIXME: NPI ipg).
8162 */
8163static struct sctp_bind_bucket *sctp_bucket_create(
8164 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
8165
David Brazdil0f672f62019-12-10 10:32:29 +00008166static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008167{
David Brazdil0f672f62019-12-10 10:32:29 +00008168 struct sctp_sock *sp = sctp_sk(sk);
8169 bool reuse = (sk->sk_reuse || sp->reuse);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008170 struct sctp_bind_hashbucket *head; /* hash list */
David Brazdil0f672f62019-12-10 10:32:29 +00008171 kuid_t uid = sock_i_uid(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008172 struct sctp_bind_bucket *pp;
8173 unsigned short snum;
8174 int ret;
8175
8176 snum = ntohs(addr->v4.sin_port);
8177
8178 pr_debug("%s: begins, snum:%d\n", __func__, snum);
8179
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008180 if (snum == 0) {
8181 /* Search for an available port. */
8182 int low, high, remaining, index;
8183 unsigned int rover;
8184 struct net *net = sock_net(sk);
8185
8186 inet_get_local_port_range(net, &low, &high);
8187 remaining = (high - low) + 1;
8188 rover = prandom_u32() % remaining + low;
8189
8190 do {
8191 rover++;
8192 if ((rover < low) || (rover > high))
8193 rover = low;
8194 if (inet_is_local_reserved_port(net, rover))
8195 continue;
8196 index = sctp_phashfn(sock_net(sk), rover);
8197 head = &sctp_port_hashtable[index];
Olivier Deprez0e641232021-09-23 10:07:05 +02008198 spin_lock_bh(&head->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008199 sctp_for_each_hentry(pp, &head->chain)
8200 if ((pp->port == rover) &&
8201 net_eq(sock_net(sk), pp->net))
8202 goto next;
8203 break;
8204 next:
Olivier Deprez0e641232021-09-23 10:07:05 +02008205 spin_unlock_bh(&head->lock);
8206 cond_resched();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008207 } while (--remaining > 0);
8208
8209 /* Exhausted local port range during search? */
8210 ret = 1;
8211 if (remaining <= 0)
Olivier Deprez0e641232021-09-23 10:07:05 +02008212 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008213
8214 /* OK, here is the one we will use. HEAD (the port
8215 * hash table list entry) is non-NULL and we hold it's
8216 * mutex.
8217 */
8218 snum = rover;
8219 } else {
8220 /* We are given an specific port number; we verify
8221 * that it is not being used. If it is used, we will
8222 * exahust the search in the hash list corresponding
8223 * to the port number (snum) - we detect that with the
8224 * port iterator, pp being NULL.
8225 */
8226 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
Olivier Deprez0e641232021-09-23 10:07:05 +02008227 spin_lock_bh(&head->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008228 sctp_for_each_hentry(pp, &head->chain) {
8229 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
8230 goto pp_found;
8231 }
8232 }
8233 pp = NULL;
8234 goto pp_not_found;
8235pp_found:
8236 if (!hlist_empty(&pp->owner)) {
8237 /* We had a port hash table hit - there is an
8238 * available port (pp != NULL) and it is being
8239 * used by other socket (pp->owner not empty); that other
8240 * socket is going to be sk2.
8241 */
8242 struct sock *sk2;
8243
8244 pr_debug("%s: found a possible match\n", __func__);
8245
David Brazdil0f672f62019-12-10 10:32:29 +00008246 if ((pp->fastreuse && reuse &&
8247 sk->sk_state != SCTP_SS_LISTENING) ||
8248 (pp->fastreuseport && sk->sk_reuseport &&
8249 uid_eq(pp->fastuid, uid)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008250 goto success;
8251
8252 /* Run through the list of sockets bound to the port
8253 * (pp->port) [via the pointers bind_next and
8254 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
8255 * we get the endpoint they describe and run through
8256 * the endpoint's list of IP (v4 or v6) addresses,
8257 * comparing each of the addresses with the address of
8258 * the socket sk. If we find a match, then that means
8259 * that this port/socket (sk) combination are already
8260 * in an endpoint.
8261 */
8262 sk_for_each_bound(sk2, &pp->owner) {
David Brazdil0f672f62019-12-10 10:32:29 +00008263 struct sctp_sock *sp2 = sctp_sk(sk2);
8264 struct sctp_endpoint *ep2 = sp2->ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008265
8266 if (sk == sk2 ||
David Brazdil0f672f62019-12-10 10:32:29 +00008267 (reuse && (sk2->sk_reuse || sp2->reuse) &&
8268 sk2->sk_state != SCTP_SS_LISTENING) ||
8269 (sk->sk_reuseport && sk2->sk_reuseport &&
8270 uid_eq(uid, sock_i_uid(sk2))))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008271 continue;
8272
David Brazdil0f672f62019-12-10 10:32:29 +00008273 if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
8274 addr, sp2, sp)) {
8275 ret = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008276 goto fail_unlock;
8277 }
8278 }
8279
8280 pr_debug("%s: found a match\n", __func__);
8281 }
8282pp_not_found:
8283 /* If there was a hash table miss, create a new port. */
8284 ret = 1;
8285 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
8286 goto fail_unlock;
8287
8288 /* In either case (hit or miss), make sure fastreuse is 1 only
8289 * if sk->sk_reuse is too (that is, if the caller requested
8290 * SO_REUSEADDR on this socket -sk-).
8291 */
8292 if (hlist_empty(&pp->owner)) {
8293 if (reuse && sk->sk_state != SCTP_SS_LISTENING)
8294 pp->fastreuse = 1;
8295 else
8296 pp->fastreuse = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00008297
8298 if (sk->sk_reuseport) {
8299 pp->fastreuseport = 1;
8300 pp->fastuid = uid;
8301 } else {
8302 pp->fastreuseport = 0;
8303 }
8304 } else {
8305 if (pp->fastreuse &&
8306 (!reuse || sk->sk_state == SCTP_SS_LISTENING))
8307 pp->fastreuse = 0;
8308
8309 if (pp->fastreuseport &&
8310 (!sk->sk_reuseport || !uid_eq(pp->fastuid, uid)))
8311 pp->fastreuseport = 0;
8312 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008313
8314 /* We are set, so fill up all the data in the hash table
8315 * entry, tie the socket list information with the rest of the
8316 * sockets FIXME: Blurry, NPI (ipg).
8317 */
8318success:
David Brazdil0f672f62019-12-10 10:32:29 +00008319 if (!sp->bind_hash) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008320 inet_sk(sk)->inet_num = snum;
8321 sk_add_bind_node(sk, &pp->owner);
David Brazdil0f672f62019-12-10 10:32:29 +00008322 sp->bind_hash = pp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008323 }
8324 ret = 0;
8325
8326fail_unlock:
Olivier Deprez0e641232021-09-23 10:07:05 +02008327 spin_unlock_bh(&head->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008328 return ret;
8329}
8330
8331/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
8332 * port is requested.
8333 */
8334static int sctp_get_port(struct sock *sk, unsigned short snum)
8335{
8336 union sctp_addr addr;
8337 struct sctp_af *af = sctp_sk(sk)->pf->af;
8338
8339 /* Set up a dummy address struct from the sk. */
8340 af->from_sk(&addr, sk);
8341 addr.v4.sin_port = htons(snum);
8342
8343 /* Note: sk->sk_num gets filled in if ephemeral port request. */
David Brazdil0f672f62019-12-10 10:32:29 +00008344 return sctp_get_port_local(sk, &addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008345}
8346
8347/*
8348 * Move a socket to LISTENING state.
8349 */
8350static int sctp_listen_start(struct sock *sk, int backlog)
8351{
8352 struct sctp_sock *sp = sctp_sk(sk);
8353 struct sctp_endpoint *ep = sp->ep;
8354 struct crypto_shash *tfm = NULL;
8355 char alg[32];
8356
8357 /* Allocate HMAC for generating cookie. */
8358 if (!sp->hmac && sp->sctp_hmac_alg) {
8359 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
8360 tfm = crypto_alloc_shash(alg, 0, 0);
8361 if (IS_ERR(tfm)) {
8362 net_info_ratelimited("failed to load transform for %s: %ld\n",
8363 sp->sctp_hmac_alg, PTR_ERR(tfm));
8364 return -ENOSYS;
8365 }
8366 sctp_sk(sk)->hmac = tfm;
8367 }
8368
8369 /*
8370 * If a bind() or sctp_bindx() is not called prior to a listen()
8371 * call that allows new associations to be accepted, the system
8372 * picks an ephemeral port and will choose an address set equivalent
8373 * to binding with a wildcard address.
8374 *
8375 * This is not currently spelled out in the SCTP sockets
8376 * extensions draft, but follows the practice as seen in TCP
8377 * sockets.
8378 *
8379 */
8380 inet_sk_set_state(sk, SCTP_SS_LISTENING);
8381 if (!ep->base.bind_addr.port) {
8382 if (sctp_autobind(sk))
8383 return -EAGAIN;
8384 } else {
8385 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
8386 inet_sk_set_state(sk, SCTP_SS_CLOSED);
8387 return -EADDRINUSE;
8388 }
8389 }
8390
8391 sk->sk_max_ack_backlog = backlog;
David Brazdil0f672f62019-12-10 10:32:29 +00008392 return sctp_hash_endpoint(ep);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008393}
8394
8395/*
8396 * 4.1.3 / 5.1.3 listen()
8397 *
8398 * By default, new associations are not accepted for UDP style sockets.
8399 * An application uses listen() to mark a socket as being able to
8400 * accept new associations.
8401 *
8402 * On TCP style sockets, applications use listen() to ready the SCTP
8403 * endpoint for accepting inbound associations.
8404 *
8405 * On both types of endpoints a backlog of '0' disables listening.
8406 *
8407 * Move a socket to LISTENING state.
8408 */
8409int sctp_inet_listen(struct socket *sock, int backlog)
8410{
8411 struct sock *sk = sock->sk;
8412 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
8413 int err = -EINVAL;
8414
8415 if (unlikely(backlog < 0))
8416 return err;
8417
8418 lock_sock(sk);
8419
8420 /* Peeled-off sockets are not allowed to listen(). */
8421 if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
8422 goto out;
8423
8424 if (sock->state != SS_UNCONNECTED)
8425 goto out;
8426
8427 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
8428 goto out;
8429
8430 /* If backlog is zero, disable listening. */
8431 if (!backlog) {
8432 if (sctp_sstate(sk, CLOSED))
8433 goto out;
8434
8435 err = 0;
8436 sctp_unhash_endpoint(ep);
8437 sk->sk_state = SCTP_SS_CLOSED;
8438 if (sk->sk_reuse || sctp_sk(sk)->reuse)
8439 sctp_sk(sk)->bind_hash->fastreuse = 1;
8440 goto out;
8441 }
8442
8443 /* If we are already listening, just update the backlog */
8444 if (sctp_sstate(sk, LISTENING))
8445 sk->sk_max_ack_backlog = backlog;
8446 else {
8447 err = sctp_listen_start(sk, backlog);
8448 if (err)
8449 goto out;
8450 }
8451
8452 err = 0;
8453out:
8454 release_sock(sk);
8455 return err;
8456}
8457
8458/*
8459 * This function is done by modeling the current datagram_poll() and the
8460 * tcp_poll(). Note that, based on these implementations, we don't
8461 * lock the socket in this function, even though it seems that,
8462 * ideally, locking or some other mechanisms can be used to ensure
8463 * the integrity of the counters (sndbuf and wmem_alloc) used
8464 * in this place. We assume that we don't need locks either until proven
8465 * otherwise.
8466 *
8467 * Another thing to note is that we include the Async I/O support
8468 * here, again, by modeling the current TCP/UDP code. We don't have
8469 * a good way to test with it yet.
8470 */
8471__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
8472{
8473 struct sock *sk = sock->sk;
8474 struct sctp_sock *sp = sctp_sk(sk);
8475 __poll_t mask;
8476
8477 poll_wait(file, sk_sleep(sk), wait);
8478
8479 sock_rps_record_flow(sk);
8480
8481 /* A TCP-style listening socket becomes readable when the accept queue
8482 * is not empty.
8483 */
8484 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
8485 return (!list_empty(&sp->ep->asocs)) ?
8486 (EPOLLIN | EPOLLRDNORM) : 0;
8487
8488 mask = 0;
8489
8490 /* Is there any exceptional events? */
David Brazdil0f672f62019-12-10 10:32:29 +00008491 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008492 mask |= EPOLLERR |
8493 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
8494 if (sk->sk_shutdown & RCV_SHUTDOWN)
8495 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
8496 if (sk->sk_shutdown == SHUTDOWN_MASK)
8497 mask |= EPOLLHUP;
8498
8499 /* Is it readable? Reconsider this code with TCP-style support. */
David Brazdil0f672f62019-12-10 10:32:29 +00008500 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008501 mask |= EPOLLIN | EPOLLRDNORM;
8502
8503 /* The association is either gone or not ready. */
8504 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
8505 return mask;
8506
8507 /* Is it writable? */
8508 if (sctp_writeable(sk)) {
8509 mask |= EPOLLOUT | EPOLLWRNORM;
8510 } else {
8511 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
8512 /*
8513 * Since the socket is not locked, the buffer
8514 * might be made available after the writeable check and
8515 * before the bit is set. This could cause a lost I/O
8516 * signal. tcp_poll() has a race breaker for this race
8517 * condition. Based on their implementation, we put
8518 * in the following code to cover it as well.
8519 */
8520 if (sctp_writeable(sk))
8521 mask |= EPOLLOUT | EPOLLWRNORM;
8522 }
8523 return mask;
8524}
8525
8526/********************************************************************
8527 * 2nd Level Abstractions
8528 ********************************************************************/
8529
8530static struct sctp_bind_bucket *sctp_bucket_create(
8531 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
8532{
8533 struct sctp_bind_bucket *pp;
8534
8535 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
8536 if (pp) {
8537 SCTP_DBG_OBJCNT_INC(bind_bucket);
8538 pp->port = snum;
8539 pp->fastreuse = 0;
8540 INIT_HLIST_HEAD(&pp->owner);
8541 pp->net = net;
8542 hlist_add_head(&pp->node, &head->chain);
8543 }
8544 return pp;
8545}
8546
8547/* Caller must hold hashbucket lock for this tb with local BH disabled */
8548static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
8549{
8550 if (pp && hlist_empty(&pp->owner)) {
8551 __hlist_del(&pp->node);
8552 kmem_cache_free(sctp_bucket_cachep, pp);
8553 SCTP_DBG_OBJCNT_DEC(bind_bucket);
8554 }
8555}
8556
8557/* Release this socket's reference to a local port. */
8558static inline void __sctp_put_port(struct sock *sk)
8559{
8560 struct sctp_bind_hashbucket *head =
8561 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
8562 inet_sk(sk)->inet_num)];
8563 struct sctp_bind_bucket *pp;
8564
8565 spin_lock(&head->lock);
8566 pp = sctp_sk(sk)->bind_hash;
8567 __sk_del_bind_node(sk);
8568 sctp_sk(sk)->bind_hash = NULL;
8569 inet_sk(sk)->inet_num = 0;
8570 sctp_bucket_destroy(pp);
8571 spin_unlock(&head->lock);
8572}
8573
8574void sctp_put_port(struct sock *sk)
8575{
8576 local_bh_disable();
8577 __sctp_put_port(sk);
8578 local_bh_enable();
8579}
8580
8581/*
8582 * The system picks an ephemeral port and choose an address set equivalent
8583 * to binding with a wildcard address.
8584 * One of those addresses will be the primary address for the association.
8585 * This automatically enables the multihoming capability of SCTP.
8586 */
8587static int sctp_autobind(struct sock *sk)
8588{
8589 union sctp_addr autoaddr;
8590 struct sctp_af *af;
8591 __be16 port;
8592
8593 /* Initialize a local sockaddr structure to INADDR_ANY. */
8594 af = sctp_sk(sk)->pf->af;
8595
8596 port = htons(inet_sk(sk)->inet_num);
8597 af->inaddr_any(&autoaddr, port);
8598
8599 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
8600}
8601
8602/* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
8603 *
8604 * From RFC 2292
8605 * 4.2 The cmsghdr Structure *
8606 *
8607 * When ancillary data is sent or received, any number of ancillary data
8608 * objects can be specified by the msg_control and msg_controllen members of
8609 * the msghdr structure, because each object is preceded by
8610 * a cmsghdr structure defining the object's length (the cmsg_len member).
8611 * Historically Berkeley-derived implementations have passed only one object
8612 * at a time, but this API allows multiple objects to be
8613 * passed in a single call to sendmsg() or recvmsg(). The following example
8614 * shows two ancillary data objects in a control buffer.
8615 *
8616 * |<--------------------------- msg_controllen -------------------------->|
8617 * | |
8618 *
8619 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
8620 *
8621 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
8622 * | | |
8623 *
8624 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
8625 *
8626 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
8627 * | | | | |
8628 *
8629 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
8630 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
8631 *
8632 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
8633 *
8634 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
8635 * ^
8636 * |
8637 *
8638 * msg_control
8639 * points here
8640 */
8641static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs)
8642{
8643 struct msghdr *my_msg = (struct msghdr *)msg;
8644 struct cmsghdr *cmsg;
8645
8646 for_each_cmsghdr(cmsg, my_msg) {
8647 if (!CMSG_OK(my_msg, cmsg))
8648 return -EINVAL;
8649
8650 /* Should we parse this header or ignore? */
8651 if (cmsg->cmsg_level != IPPROTO_SCTP)
8652 continue;
8653
8654 /* Strictly check lengths following example in SCM code. */
8655 switch (cmsg->cmsg_type) {
8656 case SCTP_INIT:
8657 /* SCTP Socket API Extension
8658 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
8659 *
8660 * This cmsghdr structure provides information for
8661 * initializing new SCTP associations with sendmsg().
8662 * The SCTP_INITMSG socket option uses this same data
8663 * structure. This structure is not used for
8664 * recvmsg().
8665 *
8666 * cmsg_level cmsg_type cmsg_data[]
8667 * ------------ ------------ ----------------------
8668 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
8669 */
8670 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
8671 return -EINVAL;
8672
8673 cmsgs->init = CMSG_DATA(cmsg);
8674 break;
8675
8676 case SCTP_SNDRCV:
8677 /* SCTP Socket API Extension
8678 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
8679 *
8680 * This cmsghdr structure specifies SCTP options for
8681 * sendmsg() and describes SCTP header information
8682 * about a received message through recvmsg().
8683 *
8684 * cmsg_level cmsg_type cmsg_data[]
8685 * ------------ ------------ ----------------------
8686 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
8687 */
8688 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
8689 return -EINVAL;
8690
8691 cmsgs->srinfo = CMSG_DATA(cmsg);
8692
8693 if (cmsgs->srinfo->sinfo_flags &
8694 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
8695 SCTP_SACK_IMMEDIATELY | SCTP_SENDALL |
8696 SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF))
8697 return -EINVAL;
8698 break;
8699
8700 case SCTP_SNDINFO:
8701 /* SCTP Socket API Extension
8702 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
8703 *
8704 * This cmsghdr structure specifies SCTP options for
8705 * sendmsg(). This structure and SCTP_RCVINFO replaces
8706 * SCTP_SNDRCV which has been deprecated.
8707 *
8708 * cmsg_level cmsg_type cmsg_data[]
8709 * ------------ ------------ ---------------------
8710 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
8711 */
8712 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
8713 return -EINVAL;
8714
8715 cmsgs->sinfo = CMSG_DATA(cmsg);
8716
8717 if (cmsgs->sinfo->snd_flags &
8718 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
8719 SCTP_SACK_IMMEDIATELY | SCTP_SENDALL |
8720 SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF))
8721 return -EINVAL;
8722 break;
8723 case SCTP_PRINFO:
8724 /* SCTP Socket API Extension
8725 * 5.3.7 SCTP PR-SCTP Information Structure (SCTP_PRINFO)
8726 *
8727 * This cmsghdr structure specifies SCTP options for sendmsg().
8728 *
8729 * cmsg_level cmsg_type cmsg_data[]
8730 * ------------ ------------ ---------------------
8731 * IPPROTO_SCTP SCTP_PRINFO struct sctp_prinfo
8732 */
8733 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_prinfo)))
8734 return -EINVAL;
8735
8736 cmsgs->prinfo = CMSG_DATA(cmsg);
8737 if (cmsgs->prinfo->pr_policy & ~SCTP_PR_SCTP_MASK)
8738 return -EINVAL;
8739
8740 if (cmsgs->prinfo->pr_policy == SCTP_PR_SCTP_NONE)
8741 cmsgs->prinfo->pr_value = 0;
8742 break;
8743 case SCTP_AUTHINFO:
8744 /* SCTP Socket API Extension
8745 * 5.3.8 SCTP AUTH Information Structure (SCTP_AUTHINFO)
8746 *
8747 * This cmsghdr structure specifies SCTP options for sendmsg().
8748 *
8749 * cmsg_level cmsg_type cmsg_data[]
8750 * ------------ ------------ ---------------------
8751 * IPPROTO_SCTP SCTP_AUTHINFO struct sctp_authinfo
8752 */
8753 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_authinfo)))
8754 return -EINVAL;
8755
8756 cmsgs->authinfo = CMSG_DATA(cmsg);
8757 break;
8758 case SCTP_DSTADDRV4:
8759 case SCTP_DSTADDRV6:
8760 /* SCTP Socket API Extension
8761 * 5.3.9/10 SCTP Destination IPv4/6 Address Structure (SCTP_DSTADDRV4/6)
8762 *
8763 * This cmsghdr structure specifies SCTP options for sendmsg().
8764 *
8765 * cmsg_level cmsg_type cmsg_data[]
8766 * ------------ ------------ ---------------------
8767 * IPPROTO_SCTP SCTP_DSTADDRV4 struct in_addr
8768 * ------------ ------------ ---------------------
8769 * IPPROTO_SCTP SCTP_DSTADDRV6 struct in6_addr
8770 */
8771 cmsgs->addrs_msg = my_msg;
8772 break;
8773 default:
8774 return -EINVAL;
8775 }
8776 }
8777
8778 return 0;
8779}
8780
8781/*
8782 * Wait for a packet..
8783 * Note: This function is the same function as in core/datagram.c
8784 * with a few modifications to make lksctp work.
8785 */
8786static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
8787{
8788 int error;
8789 DEFINE_WAIT(wait);
8790
8791 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
8792
8793 /* Socket errors? */
8794 error = sock_error(sk);
8795 if (error)
8796 goto out;
8797
8798 if (!skb_queue_empty(&sk->sk_receive_queue))
8799 goto ready;
8800
8801 /* Socket shut down? */
8802 if (sk->sk_shutdown & RCV_SHUTDOWN)
8803 goto out;
8804
8805 /* Sequenced packets can come disconnected. If so we report the
8806 * problem.
8807 */
8808 error = -ENOTCONN;
8809
8810 /* Is there a good reason to think that we may receive some data? */
8811 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
8812 goto out;
8813
8814 /* Handle signals. */
8815 if (signal_pending(current))
8816 goto interrupted;
8817
8818 /* Let another process have a go. Since we are going to sleep
8819 * anyway. Note: This may cause odd behaviors if the message
8820 * does not fit in the user's buffer, but this seems to be the
8821 * only way to honor MSG_DONTWAIT realistically.
8822 */
8823 release_sock(sk);
8824 *timeo_p = schedule_timeout(*timeo_p);
8825 lock_sock(sk);
8826
8827ready:
8828 finish_wait(sk_sleep(sk), &wait);
8829 return 0;
8830
8831interrupted:
8832 error = sock_intr_errno(*timeo_p);
8833
8834out:
8835 finish_wait(sk_sleep(sk), &wait);
8836 *err = error;
8837 return error;
8838}
8839
8840/* Receive a datagram.
8841 * Note: This is pretty much the same routine as in core/datagram.c
8842 * with a few changes to make lksctp work.
8843 */
8844struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
8845 int noblock, int *err)
8846{
8847 int error;
8848 struct sk_buff *skb;
8849 long timeo;
8850
8851 timeo = sock_rcvtimeo(sk, noblock);
8852
8853 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
8854 MAX_SCHEDULE_TIMEOUT);
8855
8856 do {
8857 /* Again only user level code calls this function,
8858 * so nothing interrupt level
8859 * will suddenly eat the receive_queue.
8860 *
8861 * Look at current nfs client by the way...
8862 * However, this function was correct in any case. 8)
8863 */
8864 if (flags & MSG_PEEK) {
8865 skb = skb_peek(&sk->sk_receive_queue);
8866 if (skb)
8867 refcount_inc(&skb->users);
8868 } else {
8869 skb = __skb_dequeue(&sk->sk_receive_queue);
8870 }
8871
8872 if (skb)
8873 return skb;
8874
8875 /* Caller is allowed not to check sk->sk_err before calling. */
8876 error = sock_error(sk);
8877 if (error)
8878 goto no_packet;
8879
8880 if (sk->sk_shutdown & RCV_SHUTDOWN)
8881 break;
8882
8883 if (sk_can_busy_loop(sk)) {
8884 sk_busy_loop(sk, noblock);
8885
David Brazdil0f672f62019-12-10 10:32:29 +00008886 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008887 continue;
8888 }
8889
8890 /* User doesn't want to wait. */
8891 error = -EAGAIN;
8892 if (!timeo)
8893 goto no_packet;
8894 } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
8895
8896 return NULL;
8897
8898no_packet:
8899 *err = error;
8900 return NULL;
8901}
8902
8903/* If sndbuf has changed, wake up per association sndbuf waiters. */
8904static void __sctp_write_space(struct sctp_association *asoc)
8905{
8906 struct sock *sk = asoc->base.sk;
8907
8908 if (sctp_wspace(asoc) <= 0)
8909 return;
8910
8911 if (waitqueue_active(&asoc->wait))
8912 wake_up_interruptible(&asoc->wait);
8913
8914 if (sctp_writeable(sk)) {
8915 struct socket_wq *wq;
8916
8917 rcu_read_lock();
8918 wq = rcu_dereference(sk->sk_wq);
8919 if (wq) {
8920 if (waitqueue_active(&wq->wait))
8921 wake_up_interruptible(&wq->wait);
8922
8923 /* Note that we try to include the Async I/O support
8924 * here by modeling from the current TCP/UDP code.
8925 * We have not tested with it yet.
8926 */
8927 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
8928 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
8929 }
8930 rcu_read_unlock();
8931 }
8932}
8933
8934static void sctp_wake_up_waiters(struct sock *sk,
8935 struct sctp_association *asoc)
8936{
8937 struct sctp_association *tmp = asoc;
8938
8939 /* We do accounting for the sndbuf space per association,
8940 * so we only need to wake our own association.
8941 */
8942 if (asoc->ep->sndbuf_policy)
8943 return __sctp_write_space(asoc);
8944
8945 /* If association goes down and is just flushing its
8946 * outq, then just normally notify others.
8947 */
8948 if (asoc->base.dead)
8949 return sctp_write_space(sk);
8950
8951 /* Accounting for the sndbuf space is per socket, so we
8952 * need to wake up others, try to be fair and in case of
8953 * other associations, let them have a go first instead
8954 * of just doing a sctp_write_space() call.
8955 *
8956 * Note that we reach sctp_wake_up_waiters() only when
8957 * associations free up queued chunks, thus we are under
8958 * lock and the list of associations on a socket is
8959 * guaranteed not to change.
8960 */
8961 for (tmp = list_next_entry(tmp, asocs); 1;
8962 tmp = list_next_entry(tmp, asocs)) {
8963 /* Manually skip the head element. */
8964 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
8965 continue;
8966 /* Wake up association. */
8967 __sctp_write_space(tmp);
8968 /* We've reached the end. */
8969 if (tmp == asoc)
8970 break;
8971 }
8972}
8973
8974/* Do accounting for the sndbuf space.
8975 * Decrement the used sndbuf space of the corresponding association by the
8976 * data size which was just transmitted(freed).
8977 */
8978static void sctp_wfree(struct sk_buff *skb)
8979{
8980 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
8981 struct sctp_association *asoc = chunk->asoc;
8982 struct sock *sk = asoc->base.sk;
8983
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008984 sk_mem_uncharge(sk, skb->truesize);
David Brazdil0f672f62019-12-10 10:32:29 +00008985 sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
8986 asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
8987 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
8988 &sk->sk_wmem_alloc));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008989
8990 if (chunk->shkey) {
8991 struct sctp_shared_key *shkey = chunk->shkey;
8992
8993 /* refcnt == 2 and !list_empty mean after this release, it's
8994 * not being used anywhere, and it's time to notify userland
8995 * that this shkey can be freed if it's been deactivated.
8996 */
8997 if (shkey->deactivated && !list_empty(&shkey->key_list) &&
8998 refcount_read(&shkey->refcnt) == 2) {
8999 struct sctp_ulpevent *ev;
9000
9001 ev = sctp_ulpevent_make_authkey(asoc, shkey->key_id,
9002 SCTP_AUTH_FREE_KEY,
9003 GFP_KERNEL);
9004 if (ev)
9005 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
9006 }
9007 sctp_auth_shkey_release(chunk->shkey);
9008 }
9009
9010 sock_wfree(skb);
9011 sctp_wake_up_waiters(sk, asoc);
9012
9013 sctp_association_put(asoc);
9014}
9015
9016/* Do accounting for the receive space on the socket.
9017 * Accounting for the association is done in ulpevent.c
9018 * We set this as a destructor for the cloned data skbs so that
9019 * accounting is done at the correct time.
9020 */
9021void sctp_sock_rfree(struct sk_buff *skb)
9022{
9023 struct sock *sk = skb->sk;
9024 struct sctp_ulpevent *event = sctp_skb2event(skb);
9025
9026 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
9027
9028 /*
9029 * Mimic the behavior of sock_rfree
9030 */
9031 sk_mem_uncharge(sk, event->rmem_len);
9032}
9033
9034
9035/* Helper function to wait for space in the sndbuf. */
9036static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
9037 size_t msg_len)
9038{
9039 struct sock *sk = asoc->base.sk;
9040 long current_timeo = *timeo_p;
9041 DEFINE_WAIT(wait);
9042 int err = 0;
9043
9044 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
9045 *timeo_p, msg_len);
9046
9047 /* Increment the association's refcnt. */
9048 sctp_association_hold(asoc);
9049
9050 /* Wait on the association specific sndbuf space. */
9051 for (;;) {
9052 prepare_to_wait_exclusive(&asoc->wait, &wait,
9053 TASK_INTERRUPTIBLE);
9054 if (asoc->base.dead)
9055 goto do_dead;
9056 if (!*timeo_p)
9057 goto do_nonblock;
9058 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
9059 goto do_error;
9060 if (signal_pending(current))
9061 goto do_interrupted;
David Brazdil0f672f62019-12-10 10:32:29 +00009062 if (sk_under_memory_pressure(sk))
9063 sk_mem_reclaim(sk);
9064 if ((int)msg_len <= sctp_wspace(asoc) &&
9065 sk_wmem_schedule(sk, msg_len))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009066 break;
9067
9068 /* Let another process have a go. Since we are going
9069 * to sleep anyway.
9070 */
9071 release_sock(sk);
9072 current_timeo = schedule_timeout(current_timeo);
9073 lock_sock(sk);
9074 if (sk != asoc->base.sk)
9075 goto do_error;
9076
9077 *timeo_p = current_timeo;
9078 }
9079
9080out:
9081 finish_wait(&asoc->wait, &wait);
9082
9083 /* Release the association's refcnt. */
9084 sctp_association_put(asoc);
9085
9086 return err;
9087
9088do_dead:
9089 err = -ESRCH;
9090 goto out;
9091
9092do_error:
9093 err = -EPIPE;
9094 goto out;
9095
9096do_interrupted:
9097 err = sock_intr_errno(*timeo_p);
9098 goto out;
9099
9100do_nonblock:
9101 err = -EAGAIN;
9102 goto out;
9103}
9104
9105void sctp_data_ready(struct sock *sk)
9106{
9107 struct socket_wq *wq;
9108
9109 rcu_read_lock();
9110 wq = rcu_dereference(sk->sk_wq);
9111 if (skwq_has_sleeper(wq))
9112 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
9113 EPOLLRDNORM | EPOLLRDBAND);
9114 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
9115 rcu_read_unlock();
9116}
9117
9118/* If socket sndbuf has changed, wake up all per association waiters. */
9119void sctp_write_space(struct sock *sk)
9120{
9121 struct sctp_association *asoc;
9122
9123 /* Wake up the tasks in each wait queue. */
9124 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
9125 __sctp_write_space(asoc);
9126 }
9127}
9128
9129/* Is there any sndbuf space available on the socket?
9130 *
9131 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
9132 * associations on the same socket. For a UDP-style socket with
9133 * multiple associations, it is possible for it to be "unwriteable"
9134 * prematurely. I assume that this is acceptable because
9135 * a premature "unwriteable" is better than an accidental "writeable" which
9136 * would cause an unwanted block under certain circumstances. For the 1-1
9137 * UDP-style sockets or TCP-style sockets, this code should work.
9138 * - Daisy
9139 */
David Brazdil0f672f62019-12-10 10:32:29 +00009140static bool sctp_writeable(struct sock *sk)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009141{
David Brazdil0f672f62019-12-10 10:32:29 +00009142 return sk->sk_sndbuf > sk->sk_wmem_queued;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009143}
9144
9145/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
9146 * returns immediately with EINPROGRESS.
9147 */
9148static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
9149{
9150 struct sock *sk = asoc->base.sk;
9151 int err = 0;
9152 long current_timeo = *timeo_p;
9153 DEFINE_WAIT(wait);
9154
9155 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
9156
9157 /* Increment the association's refcnt. */
9158 sctp_association_hold(asoc);
9159
9160 for (;;) {
9161 prepare_to_wait_exclusive(&asoc->wait, &wait,
9162 TASK_INTERRUPTIBLE);
9163 if (!*timeo_p)
9164 goto do_nonblock;
9165 if (sk->sk_shutdown & RCV_SHUTDOWN)
9166 break;
9167 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
9168 asoc->base.dead)
9169 goto do_error;
9170 if (signal_pending(current))
9171 goto do_interrupted;
9172
9173 if (sctp_state(asoc, ESTABLISHED))
9174 break;
9175
9176 /* Let another process have a go. Since we are going
9177 * to sleep anyway.
9178 */
9179 release_sock(sk);
9180 current_timeo = schedule_timeout(current_timeo);
9181 lock_sock(sk);
9182
9183 *timeo_p = current_timeo;
9184 }
9185
9186out:
9187 finish_wait(&asoc->wait, &wait);
9188
9189 /* Release the association's refcnt. */
9190 sctp_association_put(asoc);
9191
9192 return err;
9193
9194do_error:
9195 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
9196 err = -ETIMEDOUT;
9197 else
9198 err = -ECONNREFUSED;
9199 goto out;
9200
9201do_interrupted:
9202 err = sock_intr_errno(*timeo_p);
9203 goto out;
9204
9205do_nonblock:
9206 err = -EINPROGRESS;
9207 goto out;
9208}
9209
9210static int sctp_wait_for_accept(struct sock *sk, long timeo)
9211{
9212 struct sctp_endpoint *ep;
9213 int err = 0;
9214 DEFINE_WAIT(wait);
9215
9216 ep = sctp_sk(sk)->ep;
9217
9218
9219 for (;;) {
9220 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
9221 TASK_INTERRUPTIBLE);
9222
9223 if (list_empty(&ep->asocs)) {
9224 release_sock(sk);
9225 timeo = schedule_timeout(timeo);
9226 lock_sock(sk);
9227 }
9228
9229 err = -EINVAL;
9230 if (!sctp_sstate(sk, LISTENING))
9231 break;
9232
9233 err = 0;
9234 if (!list_empty(&ep->asocs))
9235 break;
9236
9237 err = sock_intr_errno(timeo);
9238 if (signal_pending(current))
9239 break;
9240
9241 err = -EAGAIN;
9242 if (!timeo)
9243 break;
9244 }
9245
9246 finish_wait(sk_sleep(sk), &wait);
9247
9248 return err;
9249}
9250
9251static void sctp_wait_for_close(struct sock *sk, long timeout)
9252{
9253 DEFINE_WAIT(wait);
9254
9255 do {
9256 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
9257 if (list_empty(&sctp_sk(sk)->ep->asocs))
9258 break;
9259 release_sock(sk);
9260 timeout = schedule_timeout(timeout);
9261 lock_sock(sk);
9262 } while (!signal_pending(current) && timeout);
9263
9264 finish_wait(sk_sleep(sk), &wait);
9265}
9266
9267static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
9268{
9269 struct sk_buff *frag;
9270
9271 if (!skb->data_len)
9272 goto done;
9273
9274 /* Don't forget the fragments. */
9275 skb_walk_frags(skb, frag)
9276 sctp_skb_set_owner_r_frag(frag, sk);
9277
9278done:
9279 sctp_skb_set_owner_r(skb, sk);
9280}
9281
9282void sctp_copy_sock(struct sock *newsk, struct sock *sk,
9283 struct sctp_association *asoc)
9284{
9285 struct inet_sock *inet = inet_sk(sk);
9286 struct inet_sock *newinet;
9287 struct sctp_sock *sp = sctp_sk(sk);
9288 struct sctp_endpoint *ep = sp->ep;
9289
9290 newsk->sk_type = sk->sk_type;
9291 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
9292 newsk->sk_flags = sk->sk_flags;
9293 newsk->sk_tsflags = sk->sk_tsflags;
9294 newsk->sk_no_check_tx = sk->sk_no_check_tx;
9295 newsk->sk_no_check_rx = sk->sk_no_check_rx;
9296 newsk->sk_reuse = sk->sk_reuse;
9297 sctp_sk(newsk)->reuse = sp->reuse;
9298
9299 newsk->sk_shutdown = sk->sk_shutdown;
9300 newsk->sk_destruct = sctp_destruct_sock;
9301 newsk->sk_family = sk->sk_family;
9302 newsk->sk_protocol = IPPROTO_SCTP;
9303 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
9304 newsk->sk_sndbuf = sk->sk_sndbuf;
9305 newsk->sk_rcvbuf = sk->sk_rcvbuf;
9306 newsk->sk_lingertime = sk->sk_lingertime;
9307 newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
9308 newsk->sk_sndtimeo = sk->sk_sndtimeo;
9309 newsk->sk_rxhash = sk->sk_rxhash;
9310
9311 newinet = inet_sk(newsk);
9312
9313 /* Initialize sk's sport, dport, rcv_saddr and daddr for
9314 * getsockname() and getpeername()
9315 */
9316 newinet->inet_sport = inet->inet_sport;
9317 newinet->inet_saddr = inet->inet_saddr;
9318 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
9319 newinet->inet_dport = htons(asoc->peer.port);
9320 newinet->pmtudisc = inet->pmtudisc;
David Brazdil0f672f62019-12-10 10:32:29 +00009321 newinet->inet_id = prandom_u32();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009322
9323 newinet->uc_ttl = inet->uc_ttl;
9324 newinet->mc_loop = 1;
9325 newinet->mc_ttl = 1;
9326 newinet->mc_index = 0;
9327 newinet->mc_list = NULL;
9328
9329 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
9330 net_enable_timestamp();
9331
9332 /* Set newsk security attributes from orginal sk and connection
9333 * security attribute from ep.
9334 */
9335 security_sctp_sk_clone(ep, sk, newsk);
9336}
9337
9338static inline void sctp_copy_descendant(struct sock *sk_to,
9339 const struct sock *sk_from)
9340{
Olivier Deprez0e641232021-09-23 10:07:05 +02009341 size_t ancestor_size = sizeof(struct inet_sock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009342
Olivier Deprez0e641232021-09-23 10:07:05 +02009343 ancestor_size += sk_from->sk_prot->obj_size;
9344 ancestor_size -= offsetof(struct sctp_sock, pd_lobby);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009345 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
9346}
9347
9348/* Populate the fields of the newsk from the oldsk and migrate the assoc
9349 * and its messages to the newsk.
9350 */
David Brazdil0f672f62019-12-10 10:32:29 +00009351static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
9352 struct sctp_association *assoc,
9353 enum sctp_socket_type type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009354{
9355 struct sctp_sock *oldsp = sctp_sk(oldsk);
9356 struct sctp_sock *newsp = sctp_sk(newsk);
9357 struct sctp_bind_bucket *pp; /* hash list port iterator */
9358 struct sctp_endpoint *newep = newsp->ep;
9359 struct sk_buff *skb, *tmp;
9360 struct sctp_ulpevent *event;
9361 struct sctp_bind_hashbucket *head;
David Brazdil0f672f62019-12-10 10:32:29 +00009362 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009363
9364 /* Migrate socket buffer sizes and all the socket level options to the
9365 * new socket.
9366 */
9367 newsk->sk_sndbuf = oldsk->sk_sndbuf;
9368 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
9369 /* Brute force copy old sctp opt. */
9370 sctp_copy_descendant(newsk, oldsk);
9371
9372 /* Restore the ep value that was overwritten with the above structure
9373 * copy.
9374 */
9375 newsp->ep = newep;
9376 newsp->hmac = NULL;
9377
9378 /* Hook this new socket in to the bind_hash list. */
9379 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
9380 inet_sk(oldsk)->inet_num)];
9381 spin_lock_bh(&head->lock);
9382 pp = sctp_sk(oldsk)->bind_hash;
9383 sk_add_bind_node(newsk, &pp->owner);
9384 sctp_sk(newsk)->bind_hash = pp;
9385 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
9386 spin_unlock_bh(&head->lock);
9387
9388 /* Copy the bind_addr list from the original endpoint to the new
9389 * endpoint so that we can handle restarts properly
9390 */
David Brazdil0f672f62019-12-10 10:32:29 +00009391 err = sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
9392 &oldsp->ep->base.bind_addr, GFP_KERNEL);
9393 if (err)
9394 return err;
9395
9396 /* New ep's auth_hmacs should be set if old ep's is set, in case
9397 * that net->sctp.auth_enable has been changed to 0 by users and
9398 * new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
9399 */
9400 if (oldsp->ep->auth_hmacs) {
9401 err = sctp_auth_init_hmacs(newsp->ep, GFP_KERNEL);
9402 if (err)
9403 return err;
9404 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009405
Olivier Deprez0e641232021-09-23 10:07:05 +02009406 sctp_auto_asconf_init(newsp);
9407
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009408 /* Move any messages in the old socket's receive queue that are for the
9409 * peeled off association to the new socket's receive queue.
9410 */
9411 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
9412 event = sctp_skb2event(skb);
9413 if (event->asoc == assoc) {
9414 __skb_unlink(skb, &oldsk->sk_receive_queue);
9415 __skb_queue_tail(&newsk->sk_receive_queue, skb);
9416 sctp_skb_set_owner_r_frag(skb, newsk);
9417 }
9418 }
9419
9420 /* Clean up any messages pending delivery due to partial
9421 * delivery. Three cases:
9422 * 1) No partial deliver; no work.
9423 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
9424 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
9425 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009426 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
9427
9428 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
9429 struct sk_buff_head *queue;
9430
9431 /* Decide which queue to move pd_lobby skbs to. */
9432 if (assoc->ulpq.pd_mode) {
9433 queue = &newsp->pd_lobby;
9434 } else
9435 queue = &newsk->sk_receive_queue;
9436
9437 /* Walk through the pd_lobby, looking for skbs that
9438 * need moved to the new socket.
9439 */
9440 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
9441 event = sctp_skb2event(skb);
9442 if (event->asoc == assoc) {
9443 __skb_unlink(skb, &oldsp->pd_lobby);
9444 __skb_queue_tail(queue, skb);
9445 sctp_skb_set_owner_r_frag(skb, newsk);
9446 }
9447 }
9448
9449 /* Clear up any skbs waiting for the partial
9450 * delivery to finish.
9451 */
9452 if (assoc->ulpq.pd_mode)
9453 sctp_clear_pd(oldsk, NULL);
9454
9455 }
9456
9457 sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
9458
9459 /* Set the type of socket to indicate that it is peeled off from the
9460 * original UDP-style socket or created with the accept() call on a
9461 * TCP-style socket..
9462 */
9463 newsp->type = type;
9464
9465 /* Mark the new socket "in-use" by the user so that any packets
9466 * that may arrive on the association after we've moved it are
9467 * queued to the backlog. This prevents a potential race between
9468 * backlog processing on the old socket and new-packet processing
9469 * on the new socket.
9470 *
9471 * The caller has just allocated newsk so we can guarantee that other
9472 * paths won't try to lock it and then oldsk.
9473 */
9474 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
Olivier Deprez0e641232021-09-23 10:07:05 +02009475 sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009476 sctp_assoc_migrate(assoc, newsk);
Olivier Deprez0e641232021-09-23 10:07:05 +02009477 sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009478
9479 /* If the association on the newsk is already closed before accept()
9480 * is called, set RCV_SHUTDOWN flag.
9481 */
9482 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
9483 inet_sk_set_state(newsk, SCTP_SS_CLOSED);
9484 newsk->sk_shutdown |= RCV_SHUTDOWN;
9485 } else {
9486 inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
9487 }
9488
9489 release_sock(newsk);
David Brazdil0f672f62019-12-10 10:32:29 +00009490
9491 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009492}
9493
9494
9495/* This proto struct describes the ULP interface for SCTP. */
9496struct proto sctp_prot = {
9497 .name = "SCTP",
9498 .owner = THIS_MODULE,
9499 .close = sctp_close,
9500 .disconnect = sctp_disconnect,
9501 .accept = sctp_accept,
9502 .ioctl = sctp_ioctl,
9503 .init = sctp_init_sock,
9504 .destroy = sctp_destroy_sock,
9505 .shutdown = sctp_shutdown,
9506 .setsockopt = sctp_setsockopt,
9507 .getsockopt = sctp_getsockopt,
9508 .sendmsg = sctp_sendmsg,
9509 .recvmsg = sctp_recvmsg,
9510 .bind = sctp_bind,
9511 .backlog_rcv = sctp_backlog_rcv,
9512 .hash = sctp_hash,
9513 .unhash = sctp_unhash,
David Brazdil0f672f62019-12-10 10:32:29 +00009514 .no_autobind = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009515 .obj_size = sizeof(struct sctp_sock),
9516 .useroffset = offsetof(struct sctp_sock, subscribe),
9517 .usersize = offsetof(struct sctp_sock, initmsg) -
9518 offsetof(struct sctp_sock, subscribe) +
9519 sizeof_field(struct sctp_sock, initmsg),
9520 .sysctl_mem = sysctl_sctp_mem,
9521 .sysctl_rmem = sysctl_sctp_rmem,
9522 .sysctl_wmem = sysctl_sctp_wmem,
9523 .memory_pressure = &sctp_memory_pressure,
9524 .enter_memory_pressure = sctp_enter_memory_pressure,
9525 .memory_allocated = &sctp_memory_allocated,
9526 .sockets_allocated = &sctp_sockets_allocated,
9527};
9528
9529#if IS_ENABLED(CONFIG_IPV6)
9530
9531#include <net/transp_v6.h>
9532static void sctp_v6_destroy_sock(struct sock *sk)
9533{
9534 sctp_destroy_sock(sk);
9535 inet6_destroy_sock(sk);
9536}
9537
9538struct proto sctpv6_prot = {
9539 .name = "SCTPv6",
9540 .owner = THIS_MODULE,
9541 .close = sctp_close,
9542 .disconnect = sctp_disconnect,
9543 .accept = sctp_accept,
9544 .ioctl = sctp_ioctl,
9545 .init = sctp_init_sock,
9546 .destroy = sctp_v6_destroy_sock,
9547 .shutdown = sctp_shutdown,
9548 .setsockopt = sctp_setsockopt,
9549 .getsockopt = sctp_getsockopt,
9550 .sendmsg = sctp_sendmsg,
9551 .recvmsg = sctp_recvmsg,
9552 .bind = sctp_bind,
9553 .backlog_rcv = sctp_backlog_rcv,
9554 .hash = sctp_hash,
9555 .unhash = sctp_unhash,
David Brazdil0f672f62019-12-10 10:32:29 +00009556 .no_autobind = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009557 .obj_size = sizeof(struct sctp6_sock),
9558 .useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
9559 .usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
9560 offsetof(struct sctp6_sock, sctp.subscribe) +
9561 sizeof_field(struct sctp6_sock, sctp.initmsg),
9562 .sysctl_mem = sysctl_sctp_mem,
9563 .sysctl_rmem = sysctl_sctp_rmem,
9564 .sysctl_wmem = sysctl_sctp_wmem,
9565 .memory_pressure = &sctp_memory_pressure,
9566 .enter_memory_pressure = sctp_enter_memory_pressure,
9567 .memory_allocated = &sctp_memory_allocated,
9568 .sockets_allocated = &sctp_sockets_allocated,
9569};
9570#endif /* IS_ENABLED(CONFIG_IPV6) */