blob: bb837019d1724c7cfe461a28da51f80bb5516604 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
2/* raw.c - Raw sockets for protocol family CAN
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/uio.h>
45#include <linux/net.h>
46#include <linux/slab.h>
47#include <linux/netdevice.h>
48#include <linux/socket.h>
49#include <linux/if_arp.h>
50#include <linux/skbuff.h>
51#include <linux/can.h>
52#include <linux/can/core.h>
53#include <linux/can/skb.h>
54#include <linux/can/raw.h>
55#include <net/sock.h>
56#include <net/net_namespace.h>
57
58#define CAN_RAW_VERSION CAN_VERSION
59
60MODULE_DESCRIPTION("PF_CAN raw protocol");
61MODULE_LICENSE("Dual BSD/GPL");
62MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
63MODULE_ALIAS("can-proto-1");
64
Olivier Deprez0e641232021-09-23 10:07:05 +020065#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
66
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067#define MASK_ALL 0
68
David Brazdil0f672f62019-12-10 10:32:29 +000069/* A raw socket has a list of can_filters attached to it, each receiving
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070 * the CAN frames matching that filter. If the filter list is empty,
71 * no CAN frames will be received by the socket. The default after
72 * opening the socket, is to have one filter which receives all frames.
73 * The filter list is allocated dynamically with the exception of the
74 * list containing only one item. This common case is optimized by
75 * storing the single filter in dfilter, to avoid using dynamic memory.
76 */
77
78struct uniqframe {
79 int skbcnt;
80 const struct sk_buff *skb;
81 unsigned int join_rx_count;
82};
83
84struct raw_sock {
85 struct sock sk;
86 int bound;
87 int ifindex;
Olivier Deprez0e641232021-09-23 10:07:05 +020088 struct list_head notifier;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089 int loopback;
90 int recv_own_msgs;
91 int fd_frames;
92 int join_filters;
93 int count; /* number of active filters */
94 struct can_filter dfilter; /* default/single filter */
95 struct can_filter *filter; /* pointer to filter(s) */
96 can_err_mask_t err_mask;
97 struct uniqframe __percpu *uniq;
98};
99
Olivier Deprez0e641232021-09-23 10:07:05 +0200100static LIST_HEAD(raw_notifier_list);
101static DEFINE_SPINLOCK(raw_notifier_lock);
102static struct raw_sock *raw_busy_notifier;
103
David Brazdil0f672f62019-12-10 10:32:29 +0000104/* Return pointer to store the extra msg flags for raw_recvmsg().
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
106 * in skb->cb.
107 */
108static inline unsigned int *raw_flags(struct sk_buff *skb)
109{
110 sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
111 sizeof(unsigned int));
112
113 /* return pointer after struct sockaddr_can */
114 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
115}
116
117static inline struct raw_sock *raw_sk(const struct sock *sk)
118{
119 return (struct raw_sock *)sk;
120}
121
122static void raw_rcv(struct sk_buff *oskb, void *data)
123{
124 struct sock *sk = (struct sock *)data;
125 struct raw_sock *ro = raw_sk(sk);
126 struct sockaddr_can *addr;
127 struct sk_buff *skb;
128 unsigned int *pflags;
129
130 /* check the received tx sock reference */
131 if (!ro->recv_own_msgs && oskb->sk == sk)
132 return;
133
134 /* do not pass non-CAN2.0 frames to a legacy socket */
135 if (!ro->fd_frames && oskb->len != CAN_MTU)
136 return;
137
138 /* eliminate multiple filter matches for the same skb */
139 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
140 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
141 if (ro->join_filters) {
142 this_cpu_inc(ro->uniq->join_rx_count);
143 /* drop frame until all enabled filters matched */
144 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
145 return;
146 } else {
147 return;
148 }
149 } else {
150 this_cpu_ptr(ro->uniq)->skb = oskb;
151 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
152 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
153 /* drop first frame to check all enabled filters? */
154 if (ro->join_filters && ro->count > 1)
155 return;
156 }
157
158 /* clone the given skb to be able to enqueue it into the rcv queue */
159 skb = skb_clone(oskb, GFP_ATOMIC);
160 if (!skb)
161 return;
162
David Brazdil0f672f62019-12-10 10:32:29 +0000163 /* Put the datagram to the queue so that raw_recvmsg() can
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000164 * get it from there. We need to pass the interface index to
165 * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
166 * containing the interface index.
167 */
168
169 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
170 addr = (struct sockaddr_can *)skb->cb;
171 memset(addr, 0, sizeof(*addr));
172 addr->can_family = AF_CAN;
173 addr->can_ifindex = skb->dev->ifindex;
174
175 /* add CAN specific message flags for raw_recvmsg() */
176 pflags = raw_flags(skb);
177 *pflags = 0;
178 if (oskb->sk)
179 *pflags |= MSG_DONTROUTE;
180 if (oskb->sk == sk)
181 *pflags |= MSG_CONFIRM;
182
183 if (sock_queue_rcv_skb(sk, skb) < 0)
184 kfree_skb(skb);
185}
186
187static int raw_enable_filters(struct net *net, struct net_device *dev,
188 struct sock *sk, struct can_filter *filter,
189 int count)
190{
191 int err = 0;
192 int i;
193
194 for (i = 0; i < count; i++) {
195 err = can_rx_register(net, dev, filter[i].can_id,
196 filter[i].can_mask,
197 raw_rcv, sk, "raw", sk);
198 if (err) {
199 /* clean up successfully registered filters */
200 while (--i >= 0)
201 can_rx_unregister(net, dev, filter[i].can_id,
202 filter[i].can_mask,
203 raw_rcv, sk);
204 break;
205 }
206 }
207
208 return err;
209}
210
211static int raw_enable_errfilter(struct net *net, struct net_device *dev,
212 struct sock *sk, can_err_mask_t err_mask)
213{
214 int err = 0;
215
216 if (err_mask)
217 err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
218 raw_rcv, sk, "raw", sk);
219
220 return err;
221}
222
223static void raw_disable_filters(struct net *net, struct net_device *dev,
224 struct sock *sk, struct can_filter *filter,
225 int count)
226{
227 int i;
228
229 for (i = 0; i < count; i++)
230 can_rx_unregister(net, dev, filter[i].can_id,
231 filter[i].can_mask, raw_rcv, sk);
232}
233
234static inline void raw_disable_errfilter(struct net *net,
235 struct net_device *dev,
236 struct sock *sk,
237 can_err_mask_t err_mask)
238
239{
240 if (err_mask)
241 can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
242 raw_rcv, sk);
243}
244
245static inline void raw_disable_allfilters(struct net *net,
246 struct net_device *dev,
247 struct sock *sk)
248{
249 struct raw_sock *ro = raw_sk(sk);
250
251 raw_disable_filters(net, dev, sk, ro->filter, ro->count);
252 raw_disable_errfilter(net, dev, sk, ro->err_mask);
253}
254
255static int raw_enable_allfilters(struct net *net, struct net_device *dev,
256 struct sock *sk)
257{
258 struct raw_sock *ro = raw_sk(sk);
259 int err;
260
261 err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
262 if (!err) {
263 err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
264 if (err)
265 raw_disable_filters(net, dev, sk, ro->filter,
266 ro->count);
267 }
268
269 return err;
270}
271
Olivier Deprez0e641232021-09-23 10:07:05 +0200272static void raw_notify(struct raw_sock *ro, unsigned long msg,
273 struct net_device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275 struct sock *sk = &ro->sk;
276
277 if (!net_eq(dev_net(dev), sock_net(sk)))
Olivier Deprez0e641232021-09-23 10:07:05 +0200278 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279
280 if (ro->ifindex != dev->ifindex)
Olivier Deprez0e641232021-09-23 10:07:05 +0200281 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282
283 switch (msg) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284 case NETDEV_UNREGISTER:
285 lock_sock(sk);
286 /* remove current filters & unregister */
287 if (ro->bound)
288 raw_disable_allfilters(dev_net(dev), dev, sk);
289
290 if (ro->count > 1)
291 kfree(ro->filter);
292
293 ro->ifindex = 0;
294 ro->bound = 0;
295 ro->count = 0;
296 release_sock(sk);
297
298 sk->sk_err = ENODEV;
299 if (!sock_flag(sk, SOCK_DEAD))
300 sk->sk_error_report(sk);
301 break;
302
303 case NETDEV_DOWN:
304 sk->sk_err = ENETDOWN;
305 if (!sock_flag(sk, SOCK_DEAD))
306 sk->sk_error_report(sk);
307 break;
308 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200309}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310
Olivier Deprez0e641232021-09-23 10:07:05 +0200311static int raw_notifier(struct notifier_block *nb, unsigned long msg,
312 void *ptr)
313{
314 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
315
316 if (dev->type != ARPHRD_CAN)
317 return NOTIFY_DONE;
318 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
319 return NOTIFY_DONE;
320 if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
321 return NOTIFY_DONE;
322
323 spin_lock(&raw_notifier_lock);
324 list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
325 spin_unlock(&raw_notifier_lock);
326 raw_notify(raw_busy_notifier, msg, dev);
327 spin_lock(&raw_notifier_lock);
328 }
329 raw_busy_notifier = NULL;
330 spin_unlock(&raw_notifier_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331 return NOTIFY_DONE;
332}
333
334static int raw_init(struct sock *sk)
335{
336 struct raw_sock *ro = raw_sk(sk);
337
338 ro->bound = 0;
339 ro->ifindex = 0;
340
341 /* set default filter to single entry dfilter */
342 ro->dfilter.can_id = 0;
343 ro->dfilter.can_mask = MASK_ALL;
344 ro->filter = &ro->dfilter;
345 ro->count = 1;
346
347 /* set default loopback behaviour */
348 ro->loopback = 1;
349 ro->recv_own_msgs = 0;
350 ro->fd_frames = 0;
351 ro->join_filters = 0;
352
353 /* alloc_percpu provides zero'ed memory */
354 ro->uniq = alloc_percpu(struct uniqframe);
355 if (unlikely(!ro->uniq))
356 return -ENOMEM;
357
358 /* set notifier */
Olivier Deprez0e641232021-09-23 10:07:05 +0200359 spin_lock(&raw_notifier_lock);
360 list_add_tail(&ro->notifier, &raw_notifier_list);
361 spin_unlock(&raw_notifier_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362
363 return 0;
364}
365
366static int raw_release(struct socket *sock)
367{
368 struct sock *sk = sock->sk;
369 struct raw_sock *ro;
370
371 if (!sk)
372 return 0;
373
374 ro = raw_sk(sk);
375
Olivier Deprez0e641232021-09-23 10:07:05 +0200376 spin_lock(&raw_notifier_lock);
377 while (raw_busy_notifier == ro) {
378 spin_unlock(&raw_notifier_lock);
379 schedule_timeout_uninterruptible(1);
380 spin_lock(&raw_notifier_lock);
381 }
382 list_del(&ro->notifier);
383 spin_unlock(&raw_notifier_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384
385 lock_sock(sk);
386
387 /* remove current filters & unregister */
388 if (ro->bound) {
389 if (ro->ifindex) {
390 struct net_device *dev;
391
392 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
393 if (dev) {
394 raw_disable_allfilters(dev_net(dev), dev, sk);
395 dev_put(dev);
396 }
David Brazdil0f672f62019-12-10 10:32:29 +0000397 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000398 raw_disable_allfilters(sock_net(sk), NULL, sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000399 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 }
401
402 if (ro->count > 1)
403 kfree(ro->filter);
404
405 ro->ifindex = 0;
406 ro->bound = 0;
407 ro->count = 0;
408 free_percpu(ro->uniq);
409
410 sock_orphan(sk);
411 sock->sk = NULL;
412
413 release_sock(sk);
414 sock_put(sk);
415
416 return 0;
417}
418
419static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
420{
421 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
422 struct sock *sk = sock->sk;
423 struct raw_sock *ro = raw_sk(sk);
424 int ifindex;
425 int err = 0;
426 int notify_enetdown = 0;
427
Olivier Deprez0e641232021-09-23 10:07:05 +0200428 if (len < RAW_MIN_NAMELEN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000429 return -EINVAL;
430 if (addr->can_family != AF_CAN)
431 return -EINVAL;
432
433 lock_sock(sk);
434
435 if (ro->bound && addr->can_ifindex == ro->ifindex)
436 goto out;
437
438 if (addr->can_ifindex) {
439 struct net_device *dev;
440
441 dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
442 if (!dev) {
443 err = -ENODEV;
444 goto out;
445 }
446 if (dev->type != ARPHRD_CAN) {
447 dev_put(dev);
448 err = -ENODEV;
449 goto out;
450 }
451 if (!(dev->flags & IFF_UP))
452 notify_enetdown = 1;
453
454 ifindex = dev->ifindex;
455
456 /* filters set by default/setsockopt */
457 err = raw_enable_allfilters(sock_net(sk), dev, sk);
458 dev_put(dev);
459 } else {
460 ifindex = 0;
461
462 /* filters set by default/setsockopt */
463 err = raw_enable_allfilters(sock_net(sk), NULL, sk);
464 }
465
466 if (!err) {
467 if (ro->bound) {
468 /* unregister old filters */
469 if (ro->ifindex) {
470 struct net_device *dev;
471
472 dev = dev_get_by_index(sock_net(sk),
473 ro->ifindex);
474 if (dev) {
475 raw_disable_allfilters(dev_net(dev),
476 dev, sk);
477 dev_put(dev);
478 }
David Brazdil0f672f62019-12-10 10:32:29 +0000479 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 raw_disable_allfilters(sock_net(sk), NULL, sk);
David Brazdil0f672f62019-12-10 10:32:29 +0000481 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482 }
483 ro->ifindex = ifindex;
484 ro->bound = 1;
485 }
486
487 out:
488 release_sock(sk);
489
490 if (notify_enetdown) {
491 sk->sk_err = ENETDOWN;
492 if (!sock_flag(sk, SOCK_DEAD))
493 sk->sk_error_report(sk);
494 }
495
496 return err;
497}
498
499static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
500 int peer)
501{
502 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
503 struct sock *sk = sock->sk;
504 struct raw_sock *ro = raw_sk(sk);
505
506 if (peer)
507 return -EOPNOTSUPP;
508
Olivier Deprez0e641232021-09-23 10:07:05 +0200509 memset(addr, 0, RAW_MIN_NAMELEN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510 addr->can_family = AF_CAN;
511 addr->can_ifindex = ro->ifindex;
512
Olivier Deprez0e641232021-09-23 10:07:05 +0200513 return RAW_MIN_NAMELEN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514}
515
516static int raw_setsockopt(struct socket *sock, int level, int optname,
517 char __user *optval, unsigned int optlen)
518{
519 struct sock *sk = sock->sk;
520 struct raw_sock *ro = raw_sk(sk);
521 struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
522 struct can_filter sfilter; /* single filter */
523 struct net_device *dev = NULL;
524 can_err_mask_t err_mask = 0;
525 int count = 0;
526 int err = 0;
527
528 if (level != SOL_CAN_RAW)
529 return -EINVAL;
530
531 switch (optname) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532 case CAN_RAW_FILTER:
533 if (optlen % sizeof(struct can_filter) != 0)
534 return -EINVAL;
535
536 if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
537 return -EINVAL;
538
539 count = optlen / sizeof(struct can_filter);
540
541 if (count > 1) {
542 /* filter does not fit into dfilter => alloc space */
543 filter = memdup_user(optval, optlen);
544 if (IS_ERR(filter))
545 return PTR_ERR(filter);
546 } else if (count == 1) {
547 if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
548 return -EFAULT;
549 }
550
Olivier Deprez0e641232021-09-23 10:07:05 +0200551 rtnl_lock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552 lock_sock(sk);
553
Olivier Deprez0e641232021-09-23 10:07:05 +0200554 if (ro->bound && ro->ifindex) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
Olivier Deprez0e641232021-09-23 10:07:05 +0200556 if (!dev) {
557 if (count > 1)
558 kfree(filter);
559 err = -ENODEV;
560 goto out_fil;
561 }
562 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563
564 if (ro->bound) {
565 /* (try to) register the new filters */
566 if (count == 1)
567 err = raw_enable_filters(sock_net(sk), dev, sk,
568 &sfilter, 1);
569 else
570 err = raw_enable_filters(sock_net(sk), dev, sk,
571 filter, count);
572 if (err) {
573 if (count > 1)
574 kfree(filter);
575 goto out_fil;
576 }
577
578 /* remove old filter registrations */
579 raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
580 ro->count);
581 }
582
583 /* remove old filter space */
584 if (ro->count > 1)
585 kfree(ro->filter);
586
587 /* link new filters to the socket */
588 if (count == 1) {
589 /* copy filter data for single filter */
590 ro->dfilter = sfilter;
591 filter = &ro->dfilter;
592 }
593 ro->filter = filter;
594 ro->count = count;
595
596 out_fil:
597 if (dev)
598 dev_put(dev);
599
600 release_sock(sk);
Olivier Deprez0e641232021-09-23 10:07:05 +0200601 rtnl_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602
603 break;
604
605 case CAN_RAW_ERR_FILTER:
606 if (optlen != sizeof(err_mask))
607 return -EINVAL;
608
609 if (copy_from_user(&err_mask, optval, optlen))
610 return -EFAULT;
611
612 err_mask &= CAN_ERR_MASK;
613
Olivier Deprez0e641232021-09-23 10:07:05 +0200614 rtnl_lock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000615 lock_sock(sk);
616
Olivier Deprez0e641232021-09-23 10:07:05 +0200617 if (ro->bound && ro->ifindex) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
Olivier Deprez0e641232021-09-23 10:07:05 +0200619 if (!dev) {
620 err = -ENODEV;
621 goto out_err;
622 }
623 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000624
625 /* remove current error mask */
626 if (ro->bound) {
627 /* (try to) register the new err_mask */
628 err = raw_enable_errfilter(sock_net(sk), dev, sk,
629 err_mask);
630
631 if (err)
632 goto out_err;
633
634 /* remove old err_mask registration */
635 raw_disable_errfilter(sock_net(sk), dev, sk,
636 ro->err_mask);
637 }
638
639 /* link new err_mask to the socket */
640 ro->err_mask = err_mask;
641
642 out_err:
643 if (dev)
644 dev_put(dev);
645
646 release_sock(sk);
Olivier Deprez0e641232021-09-23 10:07:05 +0200647 rtnl_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648
649 break;
650
651 case CAN_RAW_LOOPBACK:
652 if (optlen != sizeof(ro->loopback))
653 return -EINVAL;
654
655 if (copy_from_user(&ro->loopback, optval, optlen))
656 return -EFAULT;
657
658 break;
659
660 case CAN_RAW_RECV_OWN_MSGS:
661 if (optlen != sizeof(ro->recv_own_msgs))
662 return -EINVAL;
663
664 if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
665 return -EFAULT;
666
667 break;
668
669 case CAN_RAW_FD_FRAMES:
670 if (optlen != sizeof(ro->fd_frames))
671 return -EINVAL;
672
673 if (copy_from_user(&ro->fd_frames, optval, optlen))
674 return -EFAULT;
675
676 break;
677
678 case CAN_RAW_JOIN_FILTERS:
679 if (optlen != sizeof(ro->join_filters))
680 return -EINVAL;
681
682 if (copy_from_user(&ro->join_filters, optval, optlen))
683 return -EFAULT;
684
685 break;
686
687 default:
688 return -ENOPROTOOPT;
689 }
690 return err;
691}
692
693static int raw_getsockopt(struct socket *sock, int level, int optname,
694 char __user *optval, int __user *optlen)
695{
696 struct sock *sk = sock->sk;
697 struct raw_sock *ro = raw_sk(sk);
698 int len;
699 void *val;
700 int err = 0;
701
702 if (level != SOL_CAN_RAW)
703 return -EINVAL;
704 if (get_user(len, optlen))
705 return -EFAULT;
706 if (len < 0)
707 return -EINVAL;
708
709 switch (optname) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710 case CAN_RAW_FILTER:
711 lock_sock(sk);
712 if (ro->count > 0) {
713 int fsize = ro->count * sizeof(struct can_filter);
David Brazdil0f672f62019-12-10 10:32:29 +0000714
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715 if (len > fsize)
716 len = fsize;
717 if (copy_to_user(optval, ro->filter, len))
718 err = -EFAULT;
David Brazdil0f672f62019-12-10 10:32:29 +0000719 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 len = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000721 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722 release_sock(sk);
723
724 if (!err)
725 err = put_user(len, optlen);
726 return err;
727
728 case CAN_RAW_ERR_FILTER:
729 if (len > sizeof(can_err_mask_t))
730 len = sizeof(can_err_mask_t);
731 val = &ro->err_mask;
732 break;
733
734 case CAN_RAW_LOOPBACK:
735 if (len > sizeof(int))
736 len = sizeof(int);
737 val = &ro->loopback;
738 break;
739
740 case CAN_RAW_RECV_OWN_MSGS:
741 if (len > sizeof(int))
742 len = sizeof(int);
743 val = &ro->recv_own_msgs;
744 break;
745
746 case CAN_RAW_FD_FRAMES:
747 if (len > sizeof(int))
748 len = sizeof(int);
749 val = &ro->fd_frames;
750 break;
751
752 case CAN_RAW_JOIN_FILTERS:
753 if (len > sizeof(int))
754 len = sizeof(int);
755 val = &ro->join_filters;
756 break;
757
758 default:
759 return -ENOPROTOOPT;
760 }
761
762 if (put_user(len, optlen))
763 return -EFAULT;
764 if (copy_to_user(optval, val, len))
765 return -EFAULT;
766 return 0;
767}
768
769static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
770{
771 struct sock *sk = sock->sk;
772 struct raw_sock *ro = raw_sk(sk);
773 struct sk_buff *skb;
774 struct net_device *dev;
775 int ifindex;
776 int err;
777
778 if (msg->msg_name) {
779 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
780
Olivier Deprez0e641232021-09-23 10:07:05 +0200781 if (msg->msg_namelen < RAW_MIN_NAMELEN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782 return -EINVAL;
783
784 if (addr->can_family != AF_CAN)
785 return -EINVAL;
786
787 ifindex = addr->can_ifindex;
David Brazdil0f672f62019-12-10 10:32:29 +0000788 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000789 ifindex = ro->ifindex;
David Brazdil0f672f62019-12-10 10:32:29 +0000790 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791
792 dev = dev_get_by_index(sock_net(sk), ifindex);
793 if (!dev)
794 return -ENXIO;
795
796 err = -EINVAL;
797 if (ro->fd_frames && dev->mtu == CANFD_MTU) {
798 if (unlikely(size != CANFD_MTU && size != CAN_MTU))
799 goto put_dev;
800 } else {
801 if (unlikely(size != CAN_MTU))
802 goto put_dev;
803 }
804
805 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
806 msg->msg_flags & MSG_DONTWAIT, &err);
807 if (!skb)
808 goto put_dev;
809
810 can_skb_reserve(skb);
811 can_skb_prv(skb)->ifindex = dev->ifindex;
812 can_skb_prv(skb)->skbcnt = 0;
813
814 err = memcpy_from_msg(skb_put(skb, size), msg, size);
815 if (err < 0)
816 goto free_skb;
817
David Brazdil0f672f62019-12-10 10:32:29 +0000818 skb_setup_tx_timestamp(skb, sk->sk_tsflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819
820 skb->dev = dev;
821 skb->sk = sk;
822 skb->priority = sk->sk_priority;
823
824 err = can_send(skb, ro->loopback);
825
826 dev_put(dev);
827
828 if (err)
829 goto send_failed;
830
831 return size;
832
833free_skb:
834 kfree_skb(skb);
835put_dev:
836 dev_put(dev);
837send_failed:
838 return err;
839}
840
841static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
842 int flags)
843{
844 struct sock *sk = sock->sk;
845 struct sk_buff *skb;
846 int err = 0;
847 int noblock;
848
849 noblock = flags & MSG_DONTWAIT;
850 flags &= ~MSG_DONTWAIT;
851
852 skb = skb_recv_datagram(sk, flags, noblock, &err);
853 if (!skb)
854 return err;
855
856 if (size < skb->len)
857 msg->msg_flags |= MSG_TRUNC;
858 else
859 size = skb->len;
860
861 err = memcpy_to_msg(msg, skb->data, size);
862 if (err < 0) {
863 skb_free_datagram(sk, skb);
864 return err;
865 }
866
867 sock_recv_ts_and_drops(msg, sk, skb);
868
869 if (msg->msg_name) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200870 __sockaddr_check_size(RAW_MIN_NAMELEN);
871 msg->msg_namelen = RAW_MIN_NAMELEN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000872 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
873 }
874
875 /* assign the flags that have been recorded in raw_rcv() */
876 msg->msg_flags |= *(raw_flags(skb));
877
878 skb_free_datagram(sk, skb);
879
880 return size;
881}
882
David Brazdil0f672f62019-12-10 10:32:29 +0000883static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
884 unsigned long arg)
885{
886 /* no ioctls for socket layer -> hand it down to NIC layer */
887 return -ENOIOCTLCMD;
888}
889
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890static const struct proto_ops raw_ops = {
891 .family = PF_CAN,
892 .release = raw_release,
893 .bind = raw_bind,
894 .connect = sock_no_connect,
895 .socketpair = sock_no_socketpair,
896 .accept = sock_no_accept,
897 .getname = raw_getname,
898 .poll = datagram_poll,
David Brazdil0f672f62019-12-10 10:32:29 +0000899 .ioctl = raw_sock_no_ioctlcmd,
900 .gettstamp = sock_gettstamp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000901 .listen = sock_no_listen,
902 .shutdown = sock_no_shutdown,
903 .setsockopt = raw_setsockopt,
904 .getsockopt = raw_getsockopt,
905 .sendmsg = raw_sendmsg,
906 .recvmsg = raw_recvmsg,
907 .mmap = sock_no_mmap,
908 .sendpage = sock_no_sendpage,
909};
910
911static struct proto raw_proto __read_mostly = {
912 .name = "CAN_RAW",
913 .owner = THIS_MODULE,
914 .obj_size = sizeof(struct raw_sock),
915 .init = raw_init,
916};
917
918static const struct can_proto raw_can_proto = {
919 .type = SOCK_RAW,
920 .protocol = CAN_RAW,
921 .ops = &raw_ops,
922 .prot = &raw_proto,
923};
924
Olivier Deprez0e641232021-09-23 10:07:05 +0200925static struct notifier_block canraw_notifier = {
926 .notifier_call = raw_notifier
927};
928
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000929static __init int raw_module_init(void)
930{
931 int err;
932
933 pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
934
935 err = can_proto_register(&raw_can_proto);
936 if (err < 0)
David Brazdil0f672f62019-12-10 10:32:29 +0000937 pr_err("can: registration of raw protocol failed\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200938 else
939 register_netdevice_notifier(&canraw_notifier);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000940
941 return err;
942}
943
944static __exit void raw_module_exit(void)
945{
946 can_proto_unregister(&raw_can_proto);
Olivier Deprez0e641232021-09-23 10:07:05 +0200947 unregister_netdevice_notifier(&canraw_notifier);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000948}
949
950module_init(raw_module_init);
951module_exit(raw_module_exit);