blob: b9b847dc097ccb4e3f7f67f9e0a6ff84a7182691 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002
3/*
4 * DECnet An implementation of the DECnet protocol suite for the LINUX
5 * operating system. DECnet is implemented using the BSD Socket
6 * interface as the means of communication with the user level.
7 *
8 * DECnet Socket Layer Interface
9 *
10 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
11 * Patrick Caulfield <patrick@pandh.demon.co.uk>
12 *
13 * Changes:
14 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
15 * version of the code. Original copyright preserved
16 * below.
17 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
18 * compatible with my routing layer.
19 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
20 * Caulfield.
21 * Steve Whitehouse: Further bug fixes, checking module code still works
22 * with new routing layer.
23 * Steve Whitehouse: Additional set/get_sockopt() calls.
24 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
25 * code.
26 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
27 * way. Didn't manage it entirely, but its better.
28 * Steve Whitehouse: ditto for sendmsg().
29 * Steve Whitehouse: A selection of bug fixes to various things.
30 * Steve Whitehouse: Added TIOCOUTQ ioctl.
31 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
32 * Steve Whitehouse: Fixes to connect() error returns.
33 * Patrick Caulfield: Fixes to delayed acceptance logic.
34 * David S. Miller: New socket locking
35 * Steve Whitehouse: Socket list hashing/locking
36 * Arnaldo C. Melo: use capable, not suser
37 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
38 * when required.
39 * Patrick Caulfield: /proc/net/decnet now has object name/number
40 * Steve Whitehouse: Fixed local port allocation, hashed sk list
41 * Matthew Wilcox: Fixes for dn_ioctl()
42 * Steve Whitehouse: New connect/accept logic to allow timeouts and
43 * prepare for sendpage etc.
44 */
45
46
47/******************************************************************************
48 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050
51HISTORY:
52
53Version Kernel Date Author/Comments
54------- ------ ---- ---------------
55Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
56 (emserrat@geocities.com)
57
58 First Development of DECnet Socket La-
59 yer for Linux. Only supports outgoing
60 connections.
61
62Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
63 (patrick@pandh.demon.co.uk)
64
65 Port to new kernel development version.
66
67Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
68 (emserrat@geocities.com)
69 _
70 Added support for incoming connections
71 so we can start developing server apps
72 on Linux.
73 -
74 Module Support
75Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
77 _
78 Added support for X11R6.4. Now we can
79 use DECnet transport for X on Linux!!!
80 -
81Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
82 (emserrat@geocities.com)
83 Removed bugs on flow control
84 Removed bugs on incoming accessdata
85 order
86 -
87Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
88 dn_recvmsg fixes
89
90 Patrick J. Caulfield
91 dn_bind fixes
92*******************************************************************************/
93
94#include <linux/module.h>
95#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/slab.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
101#include <linux/sched/signal.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/netdevice.h>
107#include <linux/inet.h>
108#include <linux/route.h>
109#include <linux/netfilter.h>
110#include <linux/seq_file.h>
111#include <net/sock.h>
112#include <net/tcp_states.h>
113#include <net/flow.h>
114#include <asm/ioctls.h>
115#include <linux/capability.h>
116#include <linux/mm.h>
117#include <linux/interrupt.h>
118#include <linux/proc_fs.h>
119#include <linux/stat.h>
120#include <linux/init.h>
121#include <linux/poll.h>
122#include <linux/jiffies.h>
123#include <net/net_namespace.h>
124#include <net/neighbour.h>
125#include <net/dst.h>
126#include <net/fib_rules.h>
127#include <net/tcp.h>
128#include <net/dn.h>
129#include <net/dn_nsp.h>
130#include <net/dn_dev.h>
131#include <net/dn_route.h>
132#include <net/dn_fib.h>
133#include <net/dn_neigh.h>
134
135struct dn_sock {
136 struct sock sk;
137 struct dn_scp scp;
138};
139
140static void dn_keepalive(struct sock *sk);
141
142#define DN_SK_HASH_SHIFT 8
143#define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
144#define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
145
146
147static const struct proto_ops dn_proto_ops;
148static DEFINE_RWLOCK(dn_hash_lock);
149static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
150static struct hlist_head dn_wild_sk;
151static atomic_long_t decnet_memory_allocated;
152
153static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
154static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
155
156static struct hlist_head *dn_find_list(struct sock *sk)
157{
158 struct dn_scp *scp = DN_SK(sk);
159
160 if (scp->addr.sdn_flags & SDF_WILD)
161 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
162
163 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
164}
165
166/*
167 * Valid ports are those greater than zero and not already in use.
168 */
169static int check_port(__le16 port)
170{
171 struct sock *sk;
172
173 if (port == 0)
174 return -1;
175
176 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
177 struct dn_scp *scp = DN_SK(sk);
178 if (scp->addrloc == port)
179 return -1;
180 }
181 return 0;
182}
183
184static unsigned short port_alloc(struct sock *sk)
185{
186 struct dn_scp *scp = DN_SK(sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000187 static unsigned short port = 0x2000;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188 unsigned short i_port = port;
189
190 while(check_port(cpu_to_le16(++port)) != 0) {
191 if (port == i_port)
192 return 0;
193 }
194
195 scp->addrloc = cpu_to_le16(port);
196
197 return 1;
198}
199
200/*
201 * Since this is only ever called from user
202 * level, we don't need a write_lock() version
203 * of this.
204 */
205static int dn_hash_sock(struct sock *sk)
206{
207 struct dn_scp *scp = DN_SK(sk);
208 struct hlist_head *list;
209 int rv = -EUSERS;
210
211 BUG_ON(sk_hashed(sk));
212
213 write_lock_bh(&dn_hash_lock);
214
215 if (!scp->addrloc && !port_alloc(sk))
216 goto out;
217
218 rv = -EADDRINUSE;
219 if ((list = dn_find_list(sk)) == NULL)
220 goto out;
221
222 sk_add_node(sk, list);
223 rv = 0;
224out:
225 write_unlock_bh(&dn_hash_lock);
226 return rv;
227}
228
229static void dn_unhash_sock(struct sock *sk)
230{
231 write_lock(&dn_hash_lock);
232 sk_del_node_init(sk);
233 write_unlock(&dn_hash_lock);
234}
235
236static void dn_unhash_sock_bh(struct sock *sk)
237{
238 write_lock_bh(&dn_hash_lock);
239 sk_del_node_init(sk);
240 write_unlock_bh(&dn_hash_lock);
241}
242
243static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
244{
245 int i;
246 unsigned int hash = addr->sdn_objnum;
247
248 if (hash == 0) {
249 hash = addr->sdn_objnamel;
250 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
251 hash ^= addr->sdn_objname[i];
252 hash ^= (hash << 3);
253 }
254 }
255
256 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
257}
258
259/*
260 * Called to transform a socket from bound (i.e. with a local address)
261 * into a listening socket (doesn't need a local port number) and rehashes
262 * based upon the object name/number.
263 */
264static void dn_rehash_sock(struct sock *sk)
265{
266 struct hlist_head *list;
267 struct dn_scp *scp = DN_SK(sk);
268
269 if (scp->addr.sdn_flags & SDF_WILD)
270 return;
271
272 write_lock_bh(&dn_hash_lock);
273 sk_del_node_init(sk);
274 DN_SK(sk)->addrloc = 0;
275 list = listen_hash(&DN_SK(sk)->addr);
276 sk_add_node(sk, list);
277 write_unlock_bh(&dn_hash_lock);
278}
279
280int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
281{
282 int len = 2;
283
284 *buf++ = type;
285
286 switch (type) {
287 case 0:
288 *buf++ = sdn->sdn_objnum;
289 break;
290 case 1:
291 *buf++ = 0;
292 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
293 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
294 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
295 break;
296 case 2:
297 memset(buf, 0, 5);
298 buf += 5;
299 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
300 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
301 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
302 break;
303 }
304
305 return len;
306}
307
308/*
309 * On reception of usernames, we handle types 1 and 0 for destination
310 * addresses only. Types 2 and 4 are used for source addresses, but the
311 * UIC, GIC are ignored and they are both treated the same way. Type 3
312 * is never used as I've no idea what its purpose might be or what its
313 * format is.
314 */
315int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
316{
317 unsigned char type;
318 int size = len;
319 int namel = 12;
320
321 sdn->sdn_objnum = 0;
322 sdn->sdn_objnamel = cpu_to_le16(0);
323 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
324
325 if (len < 2)
326 return -1;
327
328 len -= 2;
329 *fmt = *data++;
330 type = *data++;
331
332 switch (*fmt) {
333 case 0:
334 sdn->sdn_objnum = type;
335 return 2;
336 case 1:
337 namel = 16;
338 break;
339 case 2:
340 len -= 4;
341 data += 4;
342 break;
343 case 4:
344 len -= 8;
345 data += 8;
346 break;
347 default:
348 return -1;
349 }
350
351 len -= 1;
352
353 if (len < 0)
354 return -1;
355
356 sdn->sdn_objnamel = cpu_to_le16(*data++);
357 len -= le16_to_cpu(sdn->sdn_objnamel);
358
359 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
360 return -1;
361
362 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
363
364 return size - len;
365}
366
367struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
368{
369 struct hlist_head *list = listen_hash(addr);
370 struct sock *sk;
371
372 read_lock(&dn_hash_lock);
373 sk_for_each(sk, list) {
374 struct dn_scp *scp = DN_SK(sk);
375 if (sk->sk_state != TCP_LISTEN)
376 continue;
377 if (scp->addr.sdn_objnum) {
378 if (scp->addr.sdn_objnum != addr->sdn_objnum)
379 continue;
380 } else {
381 if (addr->sdn_objnum)
382 continue;
383 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
384 continue;
385 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
386 continue;
387 }
388 sock_hold(sk);
389 read_unlock(&dn_hash_lock);
390 return sk;
391 }
392
393 sk = sk_head(&dn_wild_sk);
394 if (sk) {
395 if (sk->sk_state == TCP_LISTEN)
396 sock_hold(sk);
397 else
398 sk = NULL;
399 }
400
401 read_unlock(&dn_hash_lock);
402 return sk;
403}
404
405struct sock *dn_find_by_skb(struct sk_buff *skb)
406{
407 struct dn_skb_cb *cb = DN_SKB_CB(skb);
408 struct sock *sk;
409 struct dn_scp *scp;
410
411 read_lock(&dn_hash_lock);
412 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
413 scp = DN_SK(sk);
414 if (cb->src != dn_saddr2dn(&scp->peer))
415 continue;
416 if (cb->dst_port != scp->addrloc)
417 continue;
418 if (scp->addrrem && (cb->src_port != scp->addrrem))
419 continue;
420 sock_hold(sk);
421 goto found;
422 }
423 sk = NULL;
424found:
425 read_unlock(&dn_hash_lock);
426 return sk;
427}
428
429
430
431static void dn_destruct(struct sock *sk)
432{
433 struct dn_scp *scp = DN_SK(sk);
434
435 skb_queue_purge(&scp->data_xmit_queue);
436 skb_queue_purge(&scp->other_xmit_queue);
437 skb_queue_purge(&scp->other_receive_queue);
438
David Brazdil0f672f62019-12-10 10:32:29 +0000439 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000440}
441
442static unsigned long dn_memory_pressure;
443
444static void dn_enter_memory_pressure(struct sock *sk)
445{
446 if (!dn_memory_pressure) {
447 dn_memory_pressure = 1;
448 }
449}
450
451static struct proto dn_proto = {
452 .name = "NSP",
453 .owner = THIS_MODULE,
454 .enter_memory_pressure = dn_enter_memory_pressure,
455 .memory_pressure = &dn_memory_pressure,
456 .memory_allocated = &decnet_memory_allocated,
457 .sysctl_mem = sysctl_decnet_mem,
458 .sysctl_wmem = sysctl_decnet_wmem,
459 .sysctl_rmem = sysctl_decnet_rmem,
460 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
461 .obj_size = sizeof(struct dn_sock),
462};
463
464static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
465{
466 struct dn_scp *scp;
467 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
468
469 if (!sk)
470 goto out;
471
472 if (sock)
473 sock->ops = &dn_proto_ops;
474 sock_init_data(sock, sk);
475
476 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
477 sk->sk_destruct = dn_destruct;
478 sk->sk_no_check_tx = 1;
479 sk->sk_family = PF_DECnet;
480 sk->sk_protocol = 0;
481 sk->sk_allocation = gfp;
482 sk->sk_sndbuf = sysctl_decnet_wmem[1];
483 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
484
485 /* Initialization of DECnet Session Control Port */
486 scp = DN_SK(sk);
487 scp->state = DN_O; /* Open */
488 scp->numdat = 1; /* Next data seg to tx */
489 scp->numoth = 1; /* Next oth data to tx */
490 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
491 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
492 scp->ackrcv_dat = 0; /* Highest data ack recv*/
493 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
494 scp->flowrem_sw = DN_SEND;
495 scp->flowloc_sw = DN_SEND;
496 scp->flowrem_dat = 0;
497 scp->flowrem_oth = 1;
498 scp->flowloc_dat = 0;
499 scp->flowloc_oth = 1;
500 scp->services_rem = 0;
501 scp->services_loc = 1 | NSP_FC_NONE;
502 scp->info_rem = 0;
503 scp->info_loc = 0x03; /* NSP version 4.1 */
504 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
505 scp->nonagle = 0;
506 scp->multi_ireq = 1;
507 scp->accept_mode = ACC_IMMED;
508 scp->addr.sdn_family = AF_DECnet;
509 scp->peer.sdn_family = AF_DECnet;
510 scp->accessdata.acc_accl = 5;
511 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
512
513 scp->max_window = NSP_MAX_WINDOW;
514 scp->snd_window = NSP_MIN_WINDOW;
515 scp->nsp_srtt = NSP_INITIAL_SRTT;
516 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
517 scp->nsp_rxtshift = 0;
518
519 skb_queue_head_init(&scp->data_xmit_queue);
520 skb_queue_head_init(&scp->other_xmit_queue);
521 skb_queue_head_init(&scp->other_receive_queue);
522
523 scp->persist = 0;
524 scp->persist_fxn = NULL;
525 scp->keepalive = 10 * HZ;
526 scp->keepalive_fxn = dn_keepalive;
527
528 dn_start_slow_timer(sk);
529out:
530 return sk;
531}
532
533/*
534 * Keepalive timer.
535 * FIXME: Should respond to SO_KEEPALIVE etc.
536 */
537static void dn_keepalive(struct sock *sk)
538{
539 struct dn_scp *scp = DN_SK(sk);
540
541 /*
542 * By checking the other_data transmit queue is empty
543 * we are double checking that we are not sending too
544 * many of these keepalive frames.
545 */
546 if (skb_queue_empty(&scp->other_xmit_queue))
547 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
548}
549
550
551/*
552 * Timer for shutdown/destroyed sockets.
553 * When socket is dead & no packets have been sent for a
554 * certain amount of time, they are removed by this
555 * routine. Also takes care of sending out DI & DC
556 * frames at correct times.
557 */
558int dn_destroy_timer(struct sock *sk)
559{
560 struct dn_scp *scp = DN_SK(sk);
561
562 scp->persist = dn_nsp_persist(sk);
563
564 switch (scp->state) {
565 case DN_DI:
566 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
567 if (scp->nsp_rxtshift >= decnet_di_count)
568 scp->state = DN_CN;
569 return 0;
570
571 case DN_DR:
572 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
573 if (scp->nsp_rxtshift >= decnet_dr_count)
574 scp->state = DN_DRC;
575 return 0;
576
577 case DN_DN:
578 if (scp->nsp_rxtshift < decnet_dn_count) {
579 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
580 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
581 GFP_ATOMIC);
582 return 0;
583 }
584 }
585
586 scp->persist = (HZ * decnet_time_wait);
587
588 if (sk->sk_socket)
589 return 0;
590
591 if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
592 dn_unhash_sock(sk);
593 sock_put(sk);
594 return 1;
595 }
596
597 return 0;
598}
599
600static void dn_destroy_sock(struct sock *sk)
601{
602 struct dn_scp *scp = DN_SK(sk);
603
604 scp->nsp_rxtshift = 0; /* reset back off */
605
606 if (sk->sk_socket) {
607 if (sk->sk_socket->state != SS_UNCONNECTED)
608 sk->sk_socket->state = SS_DISCONNECTING;
609 }
610
611 sk->sk_state = TCP_CLOSE;
612
613 switch (scp->state) {
614 case DN_DN:
615 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
616 sk->sk_allocation);
617 scp->persist_fxn = dn_destroy_timer;
618 scp->persist = dn_nsp_persist(sk);
619 break;
620 case DN_CR:
621 scp->state = DN_DR;
622 goto disc_reject;
623 case DN_RUN:
624 scp->state = DN_DI;
625 /* fall through */
626 case DN_DI:
627 case DN_DR:
628disc_reject:
629 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
630 /* fall through */
631 case DN_NC:
632 case DN_NR:
633 case DN_RJ:
634 case DN_DIC:
635 case DN_CN:
636 case DN_DRC:
637 case DN_CI:
638 case DN_CD:
639 scp->persist_fxn = dn_destroy_timer;
640 scp->persist = dn_nsp_persist(sk);
641 break;
642 default:
643 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
644 /* fall through */
645 case DN_O:
646 dn_stop_slow_timer(sk);
647
648 dn_unhash_sock_bh(sk);
649 sock_put(sk);
650
651 break;
652 }
653}
654
655char *dn_addr2asc(__u16 addr, char *buf)
656{
657 unsigned short node, area;
658
659 node = addr & 0x03ff;
660 area = addr >> 10;
661 sprintf(buf, "%hd.%hd", area, node);
662
663 return buf;
664}
665
666
667
668static int dn_create(struct net *net, struct socket *sock, int protocol,
669 int kern)
670{
671 struct sock *sk;
672
673 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
674 return -EINVAL;
675
676 if (!net_eq(net, &init_net))
677 return -EAFNOSUPPORT;
678
679 switch (sock->type) {
680 case SOCK_SEQPACKET:
681 if (protocol != DNPROTO_NSP)
682 return -EPROTONOSUPPORT;
683 break;
684 case SOCK_STREAM:
685 break;
686 default:
687 return -ESOCKTNOSUPPORT;
688 }
689
690
691 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
692 return -ENOBUFS;
693
694 sk->sk_protocol = protocol;
695
696 return 0;
697}
698
699
700static int
701dn_release(struct socket *sock)
702{
703 struct sock *sk = sock->sk;
704
705 if (sk) {
706 sock_orphan(sk);
707 sock_hold(sk);
708 lock_sock(sk);
709 dn_destroy_sock(sk);
710 release_sock(sk);
711 sock_put(sk);
712 }
713
714 return 0;
715}
716
717static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
718{
719 struct sock *sk = sock->sk;
720 struct dn_scp *scp = DN_SK(sk);
721 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
722 struct net_device *dev, *ldev;
723 int rv;
724
725 if (addr_len != sizeof(struct sockaddr_dn))
726 return -EINVAL;
727
728 if (saddr->sdn_family != AF_DECnet)
729 return -EINVAL;
730
731 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
732 return -EINVAL;
733
734 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
735 return -EINVAL;
736
737 if (saddr->sdn_flags & ~SDF_WILD)
738 return -EINVAL;
739
740 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
741 (saddr->sdn_flags & SDF_WILD)))
742 return -EACCES;
743
744 if (!(saddr->sdn_flags & SDF_WILD)) {
745 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
746 rcu_read_lock();
747 ldev = NULL;
748 for_each_netdev_rcu(&init_net, dev) {
749 if (!dev->dn_ptr)
750 continue;
751 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
752 ldev = dev;
753 break;
754 }
755 }
756 rcu_read_unlock();
757 if (ldev == NULL)
758 return -EADDRNOTAVAIL;
759 }
760 }
761
762 rv = -EINVAL;
763 lock_sock(sk);
764 if (sock_flag(sk, SOCK_ZAPPED)) {
765 memcpy(&scp->addr, saddr, addr_len);
766 sock_reset_flag(sk, SOCK_ZAPPED);
767
768 rv = dn_hash_sock(sk);
769 if (rv)
770 sock_set_flag(sk, SOCK_ZAPPED);
771 }
772 release_sock(sk);
773
774 return rv;
775}
776
777
778static int dn_auto_bind(struct socket *sock)
779{
780 struct sock *sk = sock->sk;
781 struct dn_scp *scp = DN_SK(sk);
782 int rv;
783
784 sock_reset_flag(sk, SOCK_ZAPPED);
785
786 scp->addr.sdn_flags = 0;
787 scp->addr.sdn_objnum = 0;
788
789 /*
790 * This stuff is to keep compatibility with Eduardo's
791 * patch. I hope I can dispense with it shortly...
792 */
793 if ((scp->accessdata.acc_accl != 0) &&
794 (scp->accessdata.acc_accl <= 12)) {
795
796 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
797 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
798
799 scp->accessdata.acc_accl = 0;
800 memset(scp->accessdata.acc_acc, 0, 40);
801 }
802 /* End of compatibility stuff */
803
804 scp->addr.sdn_add.a_len = cpu_to_le16(2);
805 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
806 if (rv == 0) {
807 rv = dn_hash_sock(sk);
808 if (rv)
809 sock_set_flag(sk, SOCK_ZAPPED);
810 }
811
812 return rv;
813}
814
815static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
816{
817 struct dn_scp *scp = DN_SK(sk);
Olivier Deprez0e641232021-09-23 10:07:05 +0200818 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 int err;
820
821 if (scp->state != DN_CR)
822 return -EINVAL;
823
824 scp->state = DN_CC;
825 scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
826 dn_send_conn_conf(sk, allocation);
827
Olivier Deprez0e641232021-09-23 10:07:05 +0200828 add_wait_queue(sk_sleep(sk), &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 for(;;) {
830 release_sock(sk);
831 if (scp->state == DN_CC)
Olivier Deprez0e641232021-09-23 10:07:05 +0200832 *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000833 lock_sock(sk);
834 err = 0;
835 if (scp->state == DN_RUN)
836 break;
837 err = sock_error(sk);
838 if (err)
839 break;
840 err = sock_intr_errno(*timeo);
841 if (signal_pending(current))
842 break;
843 err = -EAGAIN;
844 if (!*timeo)
845 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000846 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200847 remove_wait_queue(sk_sleep(sk), &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000848 if (err == 0) {
849 sk->sk_socket->state = SS_CONNECTED;
850 } else if (scp->state != DN_CC) {
851 sk->sk_socket->state = SS_UNCONNECTED;
852 }
853 return err;
854}
855
856static int dn_wait_run(struct sock *sk, long *timeo)
857{
858 struct dn_scp *scp = DN_SK(sk);
Olivier Deprez0e641232021-09-23 10:07:05 +0200859 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000860 int err = 0;
861
862 if (scp->state == DN_RUN)
863 goto out;
864
865 if (!*timeo)
866 return -EALREADY;
867
Olivier Deprez0e641232021-09-23 10:07:05 +0200868 add_wait_queue(sk_sleep(sk), &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000869 for(;;) {
870 release_sock(sk);
871 if (scp->state == DN_CI || scp->state == DN_CC)
Olivier Deprez0e641232021-09-23 10:07:05 +0200872 *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000873 lock_sock(sk);
874 err = 0;
875 if (scp->state == DN_RUN)
876 break;
877 err = sock_error(sk);
878 if (err)
879 break;
880 err = sock_intr_errno(*timeo);
881 if (signal_pending(current))
882 break;
883 err = -ETIMEDOUT;
884 if (!*timeo)
885 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000886 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200887 remove_wait_queue(sk_sleep(sk), &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000888out:
889 if (err == 0) {
890 sk->sk_socket->state = SS_CONNECTED;
891 } else if (scp->state != DN_CI && scp->state != DN_CC) {
892 sk->sk_socket->state = SS_UNCONNECTED;
893 }
894 return err;
895}
896
897static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
898{
899 struct socket *sock = sk->sk_socket;
900 struct dn_scp *scp = DN_SK(sk);
901 int err = -EISCONN;
902 struct flowidn fld;
903 struct dst_entry *dst;
904
905 if (sock->state == SS_CONNECTED)
906 goto out;
907
908 if (sock->state == SS_CONNECTING) {
909 err = 0;
910 if (scp->state == DN_RUN) {
911 sock->state = SS_CONNECTED;
912 goto out;
913 }
914 err = -ECONNREFUSED;
915 if (scp->state != DN_CI && scp->state != DN_CC) {
916 sock->state = SS_UNCONNECTED;
917 goto out;
918 }
919 return dn_wait_run(sk, timeo);
920 }
921
922 err = -EINVAL;
923 if (scp->state != DN_O)
924 goto out;
925
926 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
927 goto out;
928 if (addr->sdn_family != AF_DECnet)
929 goto out;
930 if (addr->sdn_flags & SDF_WILD)
931 goto out;
932
933 if (sock_flag(sk, SOCK_ZAPPED)) {
934 err = dn_auto_bind(sk->sk_socket);
935 if (err)
936 goto out;
937 }
938
939 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
940
941 err = -EHOSTUNREACH;
942 memset(&fld, 0, sizeof(fld));
943 fld.flowidn_oif = sk->sk_bound_dev_if;
944 fld.daddr = dn_saddr2dn(&scp->peer);
945 fld.saddr = dn_saddr2dn(&scp->addr);
946 dn_sk_ports_copy(&fld, scp);
947 fld.flowidn_proto = DNPROTO_NSP;
948 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
949 goto out;
950 dst = __sk_dst_get(sk);
951 sk->sk_route_caps = dst->dev->features;
952 sock->state = SS_CONNECTING;
953 scp->state = DN_CI;
954 scp->segsize_loc = dst_metric_advmss(dst);
955
956 dn_nsp_send_conninit(sk, NSP_CI);
957 err = -EINPROGRESS;
958 if (*timeo) {
959 err = dn_wait_run(sk, timeo);
960 }
961out:
962 return err;
963}
964
965static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
966{
967 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
968 struct sock *sk = sock->sk;
969 int err;
970 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
971
972 lock_sock(sk);
973 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
974 release_sock(sk);
975
976 return err;
977}
978
979static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
980{
981 struct dn_scp *scp = DN_SK(sk);
982
983 switch (scp->state) {
984 case DN_RUN:
985 return 0;
986 case DN_CR:
987 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
988 case DN_CI:
989 case DN_CC:
990 return dn_wait_run(sk, timeo);
991 case DN_O:
992 return __dn_connect(sk, addr, addrlen, timeo, flags);
993 }
994
995 return -EINVAL;
996}
997
998
999static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1000{
1001 unsigned char *ptr = skb->data;
1002
1003 acc->acc_userl = *ptr++;
1004 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1005 ptr += acc->acc_userl;
1006
1007 acc->acc_passl = *ptr++;
1008 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1009 ptr += acc->acc_passl;
1010
1011 acc->acc_accl = *ptr++;
1012 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1013
1014 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1015
1016}
1017
1018static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1019{
1020 unsigned char *ptr = skb->data;
1021 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1022
1023 BUG_ON(len > 16); /* we've checked the contents earlier */
1024 opt->opt_optl = cpu_to_le16(len);
1025 opt->opt_status = 0;
1026 memcpy(opt->opt_data, ptr, len);
1027 skb_pull(skb, len + 1);
1028}
1029
1030static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1031{
Olivier Deprez0e641232021-09-23 10:07:05 +02001032 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033 struct sk_buff *skb = NULL;
1034 int err = 0;
1035
Olivier Deprez0e641232021-09-23 10:07:05 +02001036 add_wait_queue(sk_sleep(sk), &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001037 for(;;) {
1038 release_sock(sk);
1039 skb = skb_dequeue(&sk->sk_receive_queue);
1040 if (skb == NULL) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001041 *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001042 skb = skb_dequeue(&sk->sk_receive_queue);
1043 }
1044 lock_sock(sk);
1045 if (skb != NULL)
1046 break;
1047 err = -EINVAL;
1048 if (sk->sk_state != TCP_LISTEN)
1049 break;
1050 err = sock_intr_errno(*timeo);
1051 if (signal_pending(current))
1052 break;
1053 err = -EAGAIN;
1054 if (!*timeo)
1055 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001056 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001057 remove_wait_queue(sk_sleep(sk), &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001058
1059 return skb == NULL ? ERR_PTR(err) : skb;
1060}
1061
1062static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1063 bool kern)
1064{
1065 struct sock *sk = sock->sk, *newsk;
1066 struct sk_buff *skb = NULL;
1067 struct dn_skb_cb *cb;
1068 unsigned char menuver;
1069 int err = 0;
1070 unsigned char type;
1071 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1072 struct dst_entry *dst;
1073
1074 lock_sock(sk);
1075
1076 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1077 release_sock(sk);
1078 return -EINVAL;
1079 }
1080
1081 skb = skb_dequeue(&sk->sk_receive_queue);
1082 if (skb == NULL) {
1083 skb = dn_wait_for_connect(sk, &timeo);
1084 if (IS_ERR(skb)) {
1085 release_sock(sk);
1086 return PTR_ERR(skb);
1087 }
1088 }
1089
1090 cb = DN_SKB_CB(skb);
1091 sk->sk_ack_backlog--;
1092 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1093 if (newsk == NULL) {
1094 release_sock(sk);
1095 kfree_skb(skb);
1096 return -ENOBUFS;
1097 }
1098 release_sock(sk);
1099
1100 dst = skb_dst(skb);
1101 sk_dst_set(newsk, dst);
1102 skb_dst_set(skb, NULL);
1103
1104 DN_SK(newsk)->state = DN_CR;
1105 DN_SK(newsk)->addrrem = cb->src_port;
1106 DN_SK(newsk)->services_rem = cb->services;
1107 DN_SK(newsk)->info_rem = cb->info;
1108 DN_SK(newsk)->segsize_rem = cb->segsize;
1109 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1110
1111 if (DN_SK(newsk)->segsize_rem < 230)
1112 DN_SK(newsk)->segsize_rem = 230;
1113
1114 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1115 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1116
1117 newsk->sk_state = TCP_LISTEN;
1118 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1119
1120 /*
1121 * If we are listening on a wild socket, we don't want
1122 * the newly created socket on the wrong hash queue.
1123 */
1124 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1125
1126 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1127 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1128 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1129 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1130
1131 menuver = *skb->data;
1132 skb_pull(skb, 1);
1133
1134 if (menuver & DN_MENUVER_ACC)
1135 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1136
1137 if (menuver & DN_MENUVER_USR)
1138 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1139
1140 if (menuver & DN_MENUVER_PRX)
1141 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1142
1143 if (menuver & DN_MENUVER_UIC)
1144 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1145
1146 kfree_skb(skb);
1147
1148 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1149 sizeof(struct optdata_dn));
1150 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1151 sizeof(struct optdata_dn));
1152
1153 lock_sock(newsk);
1154 err = dn_hash_sock(newsk);
1155 if (err == 0) {
1156 sock_reset_flag(newsk, SOCK_ZAPPED);
1157 dn_send_conn_ack(newsk);
1158
1159 /*
1160 * Here we use sk->sk_allocation since although the conn conf is
1161 * for the newsk, the context is the old socket.
1162 */
1163 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1164 err = dn_confirm_accept(newsk, &timeo,
1165 sk->sk_allocation);
1166 }
1167 release_sock(newsk);
1168 return err;
1169}
1170
1171
1172static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
1173{
1174 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1175 struct sock *sk = sock->sk;
1176 struct dn_scp *scp = DN_SK(sk);
1177
1178 lock_sock(sk);
1179
1180 if (peer) {
1181 if ((sock->state != SS_CONNECTED &&
1182 sock->state != SS_CONNECTING) &&
1183 scp->accept_mode == ACC_IMMED) {
1184 release_sock(sk);
1185 return -ENOTCONN;
1186 }
1187
1188 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1189 } else {
1190 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1191 }
1192
1193 release_sock(sk);
1194
1195 return sizeof(struct sockaddr_dn);
1196}
1197
1198
1199static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1200{
1201 struct sock *sk = sock->sk;
1202 struct dn_scp *scp = DN_SK(sk);
1203 __poll_t mask = datagram_poll(file, sock, wait);
1204
David Brazdil0f672f62019-12-10 10:32:29 +00001205 if (!skb_queue_empty_lockless(&scp->other_receive_queue))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001206 mask |= EPOLLRDBAND;
1207
1208 return mask;
1209}
1210
1211static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1212{
1213 struct sock *sk = sock->sk;
1214 struct dn_scp *scp = DN_SK(sk);
1215 int err = -EOPNOTSUPP;
1216 long amount = 0;
1217 struct sk_buff *skb;
1218 int val;
1219
1220 switch(cmd)
1221 {
1222 case SIOCGIFADDR:
1223 case SIOCSIFADDR:
1224 return dn_dev_ioctl(cmd, (void __user *)arg);
1225
1226 case SIOCATMARK:
1227 lock_sock(sk);
1228 val = !skb_queue_empty(&scp->other_receive_queue);
1229 if (scp->state != DN_RUN)
1230 val = -ENOTCONN;
1231 release_sock(sk);
1232 return val;
1233
1234 case TIOCOUTQ:
1235 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1236 if (amount < 0)
1237 amount = 0;
1238 err = put_user(amount, (int __user *)arg);
1239 break;
1240
1241 case TIOCINQ:
1242 lock_sock(sk);
1243 skb = skb_peek(&scp->other_receive_queue);
1244 if (skb) {
1245 amount = skb->len;
1246 } else {
1247 skb_queue_walk(&sk->sk_receive_queue, skb)
1248 amount += skb->len;
1249 }
1250 release_sock(sk);
1251 err = put_user(amount, (int __user *)arg);
1252 break;
1253
1254 default:
1255 err = -ENOIOCTLCMD;
1256 break;
1257 }
1258
1259 return err;
1260}
1261
1262static int dn_listen(struct socket *sock, int backlog)
1263{
1264 struct sock *sk = sock->sk;
1265 int err = -EINVAL;
1266
1267 lock_sock(sk);
1268
1269 if (sock_flag(sk, SOCK_ZAPPED))
1270 goto out;
1271
1272 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1273 goto out;
1274
1275 sk->sk_max_ack_backlog = backlog;
1276 sk->sk_ack_backlog = 0;
1277 sk->sk_state = TCP_LISTEN;
1278 err = 0;
1279 dn_rehash_sock(sk);
1280
1281out:
1282 release_sock(sk);
1283
1284 return err;
1285}
1286
1287
1288static int dn_shutdown(struct socket *sock, int how)
1289{
1290 struct sock *sk = sock->sk;
1291 struct dn_scp *scp = DN_SK(sk);
1292 int err = -ENOTCONN;
1293
1294 lock_sock(sk);
1295
1296 if (sock->state == SS_UNCONNECTED)
1297 goto out;
1298
1299 err = 0;
1300 if (sock->state == SS_DISCONNECTING)
1301 goto out;
1302
1303 err = -EINVAL;
1304 if (scp->state == DN_O)
1305 goto out;
1306
1307 if (how != SHUT_RDWR)
1308 goto out;
1309
1310 sk->sk_shutdown = SHUTDOWN_MASK;
1311 dn_destroy_sock(sk);
1312 err = 0;
1313
1314out:
1315 release_sock(sk);
1316
1317 return err;
1318}
1319
1320static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1321{
1322 struct sock *sk = sock->sk;
1323 int err;
1324
1325 lock_sock(sk);
1326 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1327 release_sock(sk);
1328#ifdef CONFIG_NETFILTER
1329 /* we need to exclude all possible ENOPROTOOPTs except default case */
1330 if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
1331 optname != DSO_STREAM && optname != DSO_SEQPACKET)
1332 err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1333#endif
1334
1335 return err;
1336}
1337
1338static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1339{
1340 struct sock *sk = sock->sk;
1341 struct dn_scp *scp = DN_SK(sk);
1342 long timeo;
1343 union {
1344 struct optdata_dn opt;
1345 struct accessdata_dn acc;
1346 int mode;
1347 unsigned long win;
1348 int val;
1349 unsigned char services;
1350 unsigned char info;
1351 } u;
1352 int err;
1353
1354 if (optlen && !optval)
1355 return -EINVAL;
1356
1357 if (optlen > sizeof(u))
1358 return -EINVAL;
1359
1360 if (copy_from_user(&u, optval, optlen))
1361 return -EFAULT;
1362
1363 switch (optname) {
1364 case DSO_CONDATA:
1365 if (sock->state == SS_CONNECTED)
1366 return -EISCONN;
1367 if ((scp->state != DN_O) && (scp->state != DN_CR))
1368 return -EINVAL;
1369
1370 if (optlen != sizeof(struct optdata_dn))
1371 return -EINVAL;
1372
1373 if (le16_to_cpu(u.opt.opt_optl) > 16)
1374 return -EINVAL;
1375
1376 memcpy(&scp->conndata_out, &u.opt, optlen);
1377 break;
1378
1379 case DSO_DISDATA:
1380 if (sock->state != SS_CONNECTED &&
1381 scp->accept_mode == ACC_IMMED)
1382 return -ENOTCONN;
1383
1384 if (optlen != sizeof(struct optdata_dn))
1385 return -EINVAL;
1386
1387 if (le16_to_cpu(u.opt.opt_optl) > 16)
1388 return -EINVAL;
1389
1390 memcpy(&scp->discdata_out, &u.opt, optlen);
1391 break;
1392
1393 case DSO_CONACCESS:
1394 if (sock->state == SS_CONNECTED)
1395 return -EISCONN;
1396 if (scp->state != DN_O)
1397 return -EINVAL;
1398
1399 if (optlen != sizeof(struct accessdata_dn))
1400 return -EINVAL;
1401
1402 if ((u.acc.acc_accl > DN_MAXACCL) ||
1403 (u.acc.acc_passl > DN_MAXACCL) ||
1404 (u.acc.acc_userl > DN_MAXACCL))
1405 return -EINVAL;
1406
1407 memcpy(&scp->accessdata, &u.acc, optlen);
1408 break;
1409
1410 case DSO_ACCEPTMODE:
1411 if (sock->state == SS_CONNECTED)
1412 return -EISCONN;
1413 if (scp->state != DN_O)
1414 return -EINVAL;
1415
1416 if (optlen != sizeof(int))
1417 return -EINVAL;
1418
1419 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1420 return -EINVAL;
1421
1422 scp->accept_mode = (unsigned char)u.mode;
1423 break;
1424
1425 case DSO_CONACCEPT:
1426 if (scp->state != DN_CR)
1427 return -EINVAL;
1428 timeo = sock_rcvtimeo(sk, 0);
1429 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1430 return err;
1431
1432 case DSO_CONREJECT:
1433 if (scp->state != DN_CR)
1434 return -EINVAL;
1435
1436 scp->state = DN_DR;
1437 sk->sk_shutdown = SHUTDOWN_MASK;
1438 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1439 break;
1440
1441 case DSO_MAXWINDOW:
1442 if (optlen != sizeof(unsigned long))
1443 return -EINVAL;
1444 if (u.win > NSP_MAX_WINDOW)
1445 u.win = NSP_MAX_WINDOW;
1446 if (u.win == 0)
1447 return -EINVAL;
1448 scp->max_window = u.win;
1449 if (scp->snd_window > u.win)
1450 scp->snd_window = u.win;
1451 break;
1452
1453 case DSO_NODELAY:
1454 if (optlen != sizeof(int))
1455 return -EINVAL;
1456 if (scp->nonagle == TCP_NAGLE_CORK)
1457 return -EINVAL;
1458 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
1459 /* if (scp->nonagle == 1) { Push pending frames } */
1460 break;
1461
1462 case DSO_CORK:
1463 if (optlen != sizeof(int))
1464 return -EINVAL;
1465 if (scp->nonagle == TCP_NAGLE_OFF)
1466 return -EINVAL;
1467 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
1468 /* if (scp->nonagle == 0) { Push pending frames } */
1469 break;
1470
1471 case DSO_SERVICES:
1472 if (optlen != sizeof(unsigned char))
1473 return -EINVAL;
1474 if ((u.services & ~NSP_FC_MASK) != 0x01)
1475 return -EINVAL;
1476 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1477 return -EINVAL;
1478 scp->services_loc = u.services;
1479 break;
1480
1481 case DSO_INFO:
1482 if (optlen != sizeof(unsigned char))
1483 return -EINVAL;
1484 if (u.info & 0xfc)
1485 return -EINVAL;
1486 scp->info_loc = u.info;
1487 break;
1488
1489 case DSO_LINKINFO:
1490 case DSO_STREAM:
1491 case DSO_SEQPACKET:
1492 default:
1493 return -ENOPROTOOPT;
1494 }
1495
1496 return 0;
1497}
1498
1499static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1500{
1501 struct sock *sk = sock->sk;
1502 int err;
1503
1504 lock_sock(sk);
1505 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1506 release_sock(sk);
1507#ifdef CONFIG_NETFILTER
1508 if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1509 optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1510 optname != DSO_CONREJECT) {
1511 int len;
1512
1513 if (get_user(len, optlen))
1514 return -EFAULT;
1515
1516 err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1517 if (err >= 0)
1518 err = put_user(len, optlen);
1519 }
1520#endif
1521
1522 return err;
1523}
1524
1525static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1526{
1527 struct sock *sk = sock->sk;
1528 struct dn_scp *scp = DN_SK(sk);
1529 struct linkinfo_dn link;
1530 unsigned int r_len;
1531 void *r_data = NULL;
1532 unsigned int val;
1533
1534 if(get_user(r_len , optlen))
1535 return -EFAULT;
1536
1537 switch (optname) {
1538 case DSO_CONDATA:
1539 if (r_len > sizeof(struct optdata_dn))
1540 r_len = sizeof(struct optdata_dn);
1541 r_data = &scp->conndata_in;
1542 break;
1543
1544 case DSO_DISDATA:
1545 if (r_len > sizeof(struct optdata_dn))
1546 r_len = sizeof(struct optdata_dn);
1547 r_data = &scp->discdata_in;
1548 break;
1549
1550 case DSO_CONACCESS:
1551 if (r_len > sizeof(struct accessdata_dn))
1552 r_len = sizeof(struct accessdata_dn);
1553 r_data = &scp->accessdata;
1554 break;
1555
1556 case DSO_ACCEPTMODE:
1557 if (r_len > sizeof(unsigned char))
1558 r_len = sizeof(unsigned char);
1559 r_data = &scp->accept_mode;
1560 break;
1561
1562 case DSO_LINKINFO:
1563 if (r_len > sizeof(struct linkinfo_dn))
1564 r_len = sizeof(struct linkinfo_dn);
1565
1566 memset(&link, 0, sizeof(link));
1567
1568 switch (sock->state) {
1569 case SS_CONNECTING:
1570 link.idn_linkstate = LL_CONNECTING;
1571 break;
1572 case SS_DISCONNECTING:
1573 link.idn_linkstate = LL_DISCONNECTING;
1574 break;
1575 case SS_CONNECTED:
1576 link.idn_linkstate = LL_RUNNING;
1577 break;
1578 default:
1579 link.idn_linkstate = LL_INACTIVE;
1580 }
1581
1582 link.idn_segsize = scp->segsize_rem;
1583 r_data = &link;
1584 break;
1585
1586 case DSO_MAXWINDOW:
1587 if (r_len > sizeof(unsigned long))
1588 r_len = sizeof(unsigned long);
1589 r_data = &scp->max_window;
1590 break;
1591
1592 case DSO_NODELAY:
1593 if (r_len > sizeof(int))
1594 r_len = sizeof(int);
1595 val = (scp->nonagle == TCP_NAGLE_OFF);
1596 r_data = &val;
1597 break;
1598
1599 case DSO_CORK:
1600 if (r_len > sizeof(int))
1601 r_len = sizeof(int);
1602 val = (scp->nonagle == TCP_NAGLE_CORK);
1603 r_data = &val;
1604 break;
1605
1606 case DSO_SERVICES:
1607 if (r_len > sizeof(unsigned char))
1608 r_len = sizeof(unsigned char);
1609 r_data = &scp->services_rem;
1610 break;
1611
1612 case DSO_INFO:
1613 if (r_len > sizeof(unsigned char))
1614 r_len = sizeof(unsigned char);
1615 r_data = &scp->info_rem;
1616 break;
1617
1618 case DSO_STREAM:
1619 case DSO_SEQPACKET:
1620 case DSO_CONACCEPT:
1621 case DSO_CONREJECT:
1622 default:
1623 return -ENOPROTOOPT;
1624 }
1625
1626 if (r_data) {
1627 if (copy_to_user(optval, r_data, r_len))
1628 return -EFAULT;
1629 if (put_user(r_len, optlen))
1630 return -EFAULT;
1631 }
1632
1633 return 0;
1634}
1635
1636
1637static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1638{
1639 struct sk_buff *skb;
1640 int len = 0;
1641
1642 if (flags & MSG_OOB)
1643 return !skb_queue_empty(q) ? 1 : 0;
1644
1645 skb_queue_walk(q, skb) {
1646 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1647 len += skb->len;
1648
1649 if (cb->nsp_flags & 0x40) {
1650 /* SOCK_SEQPACKET reads to EOM */
1651 if (sk->sk_type == SOCK_SEQPACKET)
1652 return 1;
1653 /* so does SOCK_STREAM unless WAITALL is specified */
1654 if (!(flags & MSG_WAITALL))
1655 return 1;
1656 }
1657
1658 /* minimum data length for read exceeded */
1659 if (len >= target)
1660 return 1;
1661 }
1662
1663 return 0;
1664}
1665
1666
1667static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1668 int flags)
1669{
1670 struct sock *sk = sock->sk;
1671 struct dn_scp *scp = DN_SK(sk);
1672 struct sk_buff_head *queue = &sk->sk_receive_queue;
1673 size_t target = size > 1 ? 1 : 0;
1674 size_t copied = 0;
1675 int rv = 0;
1676 struct sk_buff *skb, *n;
1677 struct dn_skb_cb *cb = NULL;
1678 unsigned char eor = 0;
1679 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1680
1681 lock_sock(sk);
1682
1683 if (sock_flag(sk, SOCK_ZAPPED)) {
1684 rv = -EADDRNOTAVAIL;
1685 goto out;
1686 }
1687
1688 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1689 rv = 0;
1690 goto out;
1691 }
1692
1693 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1694 if (rv)
1695 goto out;
1696
1697 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1698 rv = -EOPNOTSUPP;
1699 goto out;
1700 }
1701
1702 if (flags & MSG_OOB)
1703 queue = &scp->other_receive_queue;
1704
1705 if (flags & MSG_WAITALL)
1706 target = size;
1707
1708
1709 /*
1710 * See if there is data ready to read, sleep if there isn't
1711 */
1712 for(;;) {
1713 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1714
1715 if (sk->sk_err)
1716 goto out;
1717
1718 if (!skb_queue_empty(&scp->other_receive_queue)) {
1719 if (!(flags & MSG_OOB)) {
1720 msg->msg_flags |= MSG_OOB;
1721 if (!scp->other_report) {
1722 scp->other_report = 1;
1723 goto out;
1724 }
1725 }
1726 }
1727
1728 if (scp->state != DN_RUN)
1729 goto out;
1730
1731 if (signal_pending(current)) {
1732 rv = sock_intr_errno(timeo);
1733 goto out;
1734 }
1735
1736 if (dn_data_ready(sk, queue, flags, target))
1737 break;
1738
1739 if (flags & MSG_DONTWAIT) {
1740 rv = -EWOULDBLOCK;
1741 goto out;
1742 }
1743
1744 add_wait_queue(sk_sleep(sk), &wait);
1745 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1746 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
1747 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1748 remove_wait_queue(sk_sleep(sk), &wait);
1749 }
1750
1751 skb_queue_walk_safe(queue, skb, n) {
1752 unsigned int chunk = skb->len;
1753 cb = DN_SKB_CB(skb);
1754
1755 if ((chunk + copied) > size)
1756 chunk = size - copied;
1757
1758 if (memcpy_to_msg(msg, skb->data, chunk)) {
1759 rv = -EFAULT;
1760 break;
1761 }
1762 copied += chunk;
1763
1764 if (!(flags & MSG_PEEK))
1765 skb_pull(skb, chunk);
1766
1767 eor = cb->nsp_flags & 0x40;
1768
1769 if (skb->len == 0) {
1770 skb_unlink(skb, queue);
1771 kfree_skb(skb);
1772 /*
1773 * N.B. Don't refer to skb or cb after this point
1774 * in loop.
1775 */
1776 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1777 scp->flowloc_sw = DN_SEND;
1778 dn_nsp_send_link(sk, DN_SEND, 0);
1779 }
1780 }
1781
1782 if (eor) {
1783 if (sk->sk_type == SOCK_SEQPACKET)
1784 break;
1785 if (!(flags & MSG_WAITALL))
1786 break;
1787 }
1788
1789 if (flags & MSG_OOB)
1790 break;
1791
1792 if (copied >= target)
1793 break;
1794 }
1795
1796 rv = copied;
1797
1798
1799 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1800 msg->msg_flags |= MSG_EOR;
1801
1802out:
1803 if (rv == 0)
1804 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1805
1806 if ((rv >= 0) && msg->msg_name) {
1807 __sockaddr_check_size(sizeof(struct sockaddr_dn));
1808 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1809 msg->msg_namelen = sizeof(struct sockaddr_dn);
1810 }
1811
1812 release_sock(sk);
1813
1814 return rv;
1815}
1816
1817
1818static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1819{
1820 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1821 if (skb_queue_len(queue) >= scp->snd_window)
1822 return 1;
1823 if (fctype != NSP_FC_NONE) {
1824 if (flags & MSG_OOB) {
1825 if (scp->flowrem_oth == 0)
1826 return 1;
1827 } else {
1828 if (scp->flowrem_dat == 0)
1829 return 1;
1830 }
1831 }
1832 return 0;
1833}
1834
1835/*
1836 * The DECnet spec requires that the "routing layer" accepts packets which
1837 * are at least 230 bytes in size. This excludes any headers which the NSP
1838 * layer might add, so we always assume that we'll be using the maximal
1839 * length header on data packets. The variation in length is due to the
1840 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1841 * make much practical difference.
1842 */
1843unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
1844{
1845 unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
1846 if (dev) {
1847 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1848 mtu -= LL_RESERVED_SPACE(dev);
1849 if (dn_db->use_long)
1850 mtu -= 21;
1851 else
1852 mtu -= 6;
1853 mtu -= DN_MAX_NSP_DATA_HEADER;
1854 } else {
1855 /*
1856 * 21 = long header, 16 = guess at MAC header length
1857 */
1858 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1859 }
1860 if (mtu > mss)
1861 mss = mtu;
1862 return mss;
1863}
1864
1865static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1866{
1867 struct dst_entry *dst = __sk_dst_get(sk);
1868 struct dn_scp *scp = DN_SK(sk);
1869 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1870
1871 /* Other data messages are limited to 16 bytes per packet */
1872 if (flags & MSG_OOB)
1873 return 16;
1874
1875 /* This works out the maximum size of segment we can send out */
1876 if (dst) {
1877 u32 mtu = dst_mtu(dst);
1878 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1879 }
1880
1881 return mss_now;
1882}
1883
1884/*
1885 * N.B. We get the timeout wrong here, but then we always did get it
1886 * wrong before and this is another step along the road to correcting
1887 * it. It ought to get updated each time we pass through the routine,
1888 * but in practise it probably doesn't matter too much for now.
1889 */
1890static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1891 unsigned long datalen, int noblock,
1892 int *errcode)
1893{
1894 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1895 noblock, errcode);
1896 if (skb) {
1897 skb->protocol = htons(ETH_P_DNA_RT);
1898 skb->pkt_type = PACKET_OUTGOING;
1899 }
1900 return skb;
1901}
1902
1903static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1904{
1905 struct sock *sk = sock->sk;
1906 struct dn_scp *scp = DN_SK(sk);
1907 size_t mss;
1908 struct sk_buff_head *queue = &scp->data_xmit_queue;
1909 int flags = msg->msg_flags;
1910 int err = 0;
1911 size_t sent = 0;
1912 int addr_len = msg->msg_namelen;
1913 DECLARE_SOCKADDR(struct sockaddr_dn *, addr, msg->msg_name);
1914 struct sk_buff *skb = NULL;
1915 struct dn_skb_cb *cb;
1916 size_t len;
1917 unsigned char fctype;
1918 long timeo;
1919
1920 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1921 return -EOPNOTSUPP;
1922
1923 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1924 return -EINVAL;
1925
1926 lock_sock(sk);
1927 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1928 /*
1929 * The only difference between stream sockets and sequenced packet
1930 * sockets is that the stream sockets always behave as if MSG_EOR
1931 * has been set.
1932 */
1933 if (sock->type == SOCK_STREAM) {
1934 if (flags & MSG_EOR) {
1935 err = -EINVAL;
1936 goto out;
1937 }
1938 flags |= MSG_EOR;
1939 }
1940
1941
1942 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1943 if (err)
1944 goto out_err;
1945
1946 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1947 err = -EPIPE;
1948 if (!(flags & MSG_NOSIGNAL))
1949 send_sig(SIGPIPE, current, 0);
1950 goto out_err;
1951 }
1952
1953 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1954 dst_negative_advice(sk);
1955
1956 mss = scp->segsize_rem;
1957 fctype = scp->services_rem & NSP_FC_MASK;
1958
1959 mss = dn_current_mss(sk, flags);
1960
1961 if (flags & MSG_OOB) {
1962 queue = &scp->other_xmit_queue;
1963 if (size > mss) {
1964 err = -EMSGSIZE;
1965 goto out;
1966 }
1967 }
1968
1969 scp->persist_fxn = dn_nsp_xmit_timeout;
1970
1971 while(sent < size) {
1972 err = sock_error(sk);
1973 if (err)
1974 goto out;
1975
1976 if (signal_pending(current)) {
1977 err = sock_intr_errno(timeo);
1978 goto out;
1979 }
1980
1981 /*
1982 * Calculate size that we wish to send.
1983 */
1984 len = size - sent;
1985
1986 if (len > mss)
1987 len = mss;
1988
1989 /*
1990 * Wait for queue size to go down below the window
1991 * size.
1992 */
1993 if (dn_queue_too_long(scp, queue, flags)) {
1994 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1995
1996 if (flags & MSG_DONTWAIT) {
1997 err = -EWOULDBLOCK;
1998 goto out;
1999 }
2000
2001 add_wait_queue(sk_sleep(sk), &wait);
2002 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2003 sk_wait_event(sk, &timeo,
2004 !dn_queue_too_long(scp, queue, flags), &wait);
2005 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2006 remove_wait_queue(sk_sleep(sk), &wait);
2007 continue;
2008 }
2009
2010 /*
2011 * Get a suitably sized skb.
2012 * 64 is a bit of a hack really, but its larger than any
2013 * link-layer headers and has served us well as a good
2014 * guess as to their real length.
2015 */
2016 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2017 flags & MSG_DONTWAIT, &err);
2018
2019 if (err)
2020 break;
2021
2022 if (!skb)
2023 continue;
2024
2025 cb = DN_SKB_CB(skb);
2026
2027 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2028
2029 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2030 err = -EFAULT;
2031 goto out;
2032 }
2033
2034 if (flags & MSG_OOB) {
2035 cb->nsp_flags = 0x30;
2036 if (fctype != NSP_FC_NONE)
2037 scp->flowrem_oth--;
2038 } else {
2039 cb->nsp_flags = 0x00;
2040 if (scp->seg_total == 0)
2041 cb->nsp_flags |= 0x20;
2042
2043 scp->seg_total += len;
2044
2045 if (((sent + len) == size) && (flags & MSG_EOR)) {
2046 cb->nsp_flags |= 0x40;
2047 scp->seg_total = 0;
2048 if (fctype == NSP_FC_SCMC)
2049 scp->flowrem_dat--;
2050 }
2051 if (fctype == NSP_FC_SRC)
2052 scp->flowrem_dat--;
2053 }
2054
2055 sent += len;
2056 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2057 skb = NULL;
2058
2059 scp->persist = dn_nsp_persist(sk);
2060
2061 }
2062out:
2063
2064 kfree_skb(skb);
2065
2066 release_sock(sk);
2067
2068 return sent ? sent : err;
2069
2070out_err:
2071 err = sk_stream_error(sk, flags, err);
2072 release_sock(sk);
2073 return err;
2074}
2075
2076static int dn_device_event(struct notifier_block *this, unsigned long event,
2077 void *ptr)
2078{
2079 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2080
2081 if (!net_eq(dev_net(dev), &init_net))
2082 return NOTIFY_DONE;
2083
2084 switch (event) {
2085 case NETDEV_UP:
2086 dn_dev_up(dev);
2087 break;
2088 case NETDEV_DOWN:
2089 dn_dev_down(dev);
2090 break;
2091 default:
2092 break;
2093 }
2094
2095 return NOTIFY_DONE;
2096}
2097
2098static struct notifier_block dn_dev_notifier = {
2099 .notifier_call = dn_device_event,
2100};
2101
2102static struct packet_type dn_dix_packet_type __read_mostly = {
2103 .type = cpu_to_be16(ETH_P_DNA_RT),
2104 .func = dn_route_rcv,
2105};
2106
2107#ifdef CONFIG_PROC_FS
2108struct dn_iter_state {
2109 int bucket;
2110};
2111
2112static struct sock *dn_socket_get_first(struct seq_file *seq)
2113{
2114 struct dn_iter_state *state = seq->private;
2115 struct sock *n = NULL;
2116
2117 for(state->bucket = 0;
2118 state->bucket < DN_SK_HASH_SIZE;
2119 ++state->bucket) {
2120 n = sk_head(&dn_sk_hash[state->bucket]);
2121 if (n)
2122 break;
2123 }
2124
2125 return n;
2126}
2127
2128static struct sock *dn_socket_get_next(struct seq_file *seq,
2129 struct sock *n)
2130{
2131 struct dn_iter_state *state = seq->private;
2132
2133 n = sk_next(n);
2134try_again:
2135 if (n)
2136 goto out;
2137 if (++state->bucket >= DN_SK_HASH_SIZE)
2138 goto out;
2139 n = sk_head(&dn_sk_hash[state->bucket]);
2140 goto try_again;
2141out:
2142 return n;
2143}
2144
2145static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2146{
2147 struct sock *sk = dn_socket_get_first(seq);
2148
2149 if (sk) {
2150 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2151 --*pos;
2152 }
2153 return *pos ? NULL : sk;
2154}
2155
2156static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2157{
2158 void *rc;
2159 read_lock_bh(&dn_hash_lock);
2160 rc = socket_get_idx(seq, &pos);
2161 if (!rc) {
2162 read_unlock_bh(&dn_hash_lock);
2163 }
2164 return rc;
2165}
2166
2167static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2168{
2169 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2170}
2171
2172static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2173{
2174 void *rc;
2175
2176 if (v == SEQ_START_TOKEN) {
2177 rc = dn_socket_get_idx(seq, 0);
2178 goto out;
2179 }
2180
2181 rc = dn_socket_get_next(seq, v);
2182 if (rc)
2183 goto out;
2184 read_unlock_bh(&dn_hash_lock);
2185out:
2186 ++*pos;
2187 return rc;
2188}
2189
2190static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2191{
2192 if (v && v != SEQ_START_TOKEN)
2193 read_unlock_bh(&dn_hash_lock);
2194}
2195
2196#define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2197
2198static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2199{
2200 int i;
2201
2202 switch (le16_to_cpu(dn->sdn_objnamel)) {
2203 case 0:
2204 sprintf(buf, "%d", dn->sdn_objnum);
2205 break;
2206 default:
2207 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2208 buf[i] = dn->sdn_objname[i];
2209 if (IS_NOT_PRINTABLE(buf[i]))
2210 buf[i] = '.';
2211 }
2212 buf[i] = 0;
2213 }
2214}
2215
2216static char *dn_state2asc(unsigned char state)
2217{
2218 switch (state) {
2219 case DN_O:
2220 return "OPEN";
2221 case DN_CR:
2222 return " CR";
2223 case DN_DR:
2224 return " DR";
2225 case DN_DRC:
2226 return " DRC";
2227 case DN_CC:
2228 return " CC";
2229 case DN_CI:
2230 return " CI";
2231 case DN_NR:
2232 return " NR";
2233 case DN_NC:
2234 return " NC";
2235 case DN_CD:
2236 return " CD";
2237 case DN_RJ:
2238 return " RJ";
2239 case DN_RUN:
2240 return " RUN";
2241 case DN_DI:
2242 return " DI";
2243 case DN_DIC:
2244 return " DIC";
2245 case DN_DN:
2246 return " DN";
2247 case DN_CL:
2248 return " CL";
2249 case DN_CN:
2250 return " CN";
2251 }
2252
2253 return "????";
2254}
2255
2256static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2257{
2258 struct dn_scp *scp = DN_SK(sk);
2259 char buf1[DN_ASCBUF_LEN];
2260 char buf2[DN_ASCBUF_LEN];
2261 char local_object[DN_MAXOBJL+3];
2262 char remote_object[DN_MAXOBJL+3];
2263
2264 dn_printable_object(&scp->addr, local_object);
2265 dn_printable_object(&scp->peer, remote_object);
2266
2267 seq_printf(seq,
2268 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2269 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2270 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2271 scp->addrloc,
2272 scp->numdat,
2273 scp->numoth,
2274 scp->ackxmt_dat,
2275 scp->ackxmt_oth,
2276 scp->flowloc_sw,
2277 local_object,
2278 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2279 scp->addrrem,
2280 scp->numdat_rcv,
2281 scp->numoth_rcv,
2282 scp->ackrcv_dat,
2283 scp->ackrcv_oth,
2284 scp->flowrem_sw,
2285 remote_object,
2286 dn_state2asc(scp->state),
2287 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2288}
2289
2290static int dn_socket_seq_show(struct seq_file *seq, void *v)
2291{
2292 if (v == SEQ_START_TOKEN) {
2293 seq_puts(seq, "Local Remote\n");
2294 } else {
2295 dn_socket_format_entry(seq, v);
2296 }
2297 return 0;
2298}
2299
2300static const struct seq_operations dn_socket_seq_ops = {
2301 .start = dn_socket_seq_start,
2302 .next = dn_socket_seq_next,
2303 .stop = dn_socket_seq_stop,
2304 .show = dn_socket_seq_show,
2305};
2306#endif
2307
2308static const struct net_proto_family dn_family_ops = {
2309 .family = AF_DECnet,
2310 .create = dn_create,
2311 .owner = THIS_MODULE,
2312};
2313
2314static const struct proto_ops dn_proto_ops = {
2315 .family = AF_DECnet,
2316 .owner = THIS_MODULE,
2317 .release = dn_release,
2318 .bind = dn_bind,
2319 .connect = dn_connect,
2320 .socketpair = sock_no_socketpair,
2321 .accept = dn_accept,
2322 .getname = dn_getname,
2323 .poll = dn_poll,
2324 .ioctl = dn_ioctl,
2325 .listen = dn_listen,
2326 .shutdown = dn_shutdown,
2327 .setsockopt = dn_setsockopt,
2328 .getsockopt = dn_getsockopt,
2329 .sendmsg = dn_sendmsg,
2330 .recvmsg = dn_recvmsg,
2331 .mmap = sock_no_mmap,
2332 .sendpage = sock_no_sendpage,
2333};
2334
2335MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2336MODULE_AUTHOR("Linux DECnet Project Team");
2337MODULE_LICENSE("GPL");
2338MODULE_ALIAS_NETPROTO(PF_DECnet);
2339
2340static const char banner[] __initconst = KERN_INFO
2341"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2342
2343static int __init decnet_init(void)
2344{
2345 int rc;
2346
2347 printk(banner);
2348
2349 rc = proto_register(&dn_proto, 1);
2350 if (rc != 0)
2351 goto out;
2352
2353 dn_neigh_init();
2354 dn_dev_init();
2355 dn_route_init();
2356 dn_fib_init();
2357
2358 sock_register(&dn_family_ops);
2359 dev_add_pack(&dn_dix_packet_type);
2360 register_netdevice_notifier(&dn_dev_notifier);
2361
2362 proc_create_seq_private("decnet", 0444, init_net.proc_net,
2363 &dn_socket_seq_ops, sizeof(struct dn_iter_state),
2364 NULL);
2365 dn_register_sysctl();
2366out:
2367 return rc;
2368
2369}
2370module_init(decnet_init);
2371
2372/*
2373 * Prevent DECnet module unloading until its fixed properly.
2374 * Requires an audit of the code to check for memory leaks and
2375 * initialisation problems etc.
2376 */
2377#if 0
2378static void __exit decnet_exit(void)
2379{
2380 sock_unregister(AF_DECnet);
2381 rtnl_unregister_all(PF_DECnet);
2382 dev_remove_pack(&dn_dix_packet_type);
2383
2384 dn_unregister_sysctl();
2385
2386 unregister_netdevice_notifier(&dn_dev_notifier);
2387
2388 dn_route_cleanup();
2389 dn_dev_cleanup();
2390 dn_neigh_cleanup();
2391 dn_fib_cleanup();
2392
2393 remove_proc_entry("decnet", init_net.proc_net);
2394
2395 proto_unregister(&dn_proto);
2396
David Brazdil0f672f62019-12-10 10:32:29 +00002397 rcu_barrier(); /* Wait for completion of call_rcu()'s */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002398}
2399module_exit(decnet_exit);
2400#endif