blob: 1e17778d5ceeb5394eab2d8ed30ba97b1acdd1e7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/interrupt.h>
46#include <linux/hrtimer.h>
47#include <linux/list.h>
48#include <linux/proc_fs.h>
49#include <linux/seq_file.h>
50#include <linux/uio.h>
51#include <linux/net.h>
52#include <linux/netdevice.h>
53#include <linux/socket.h>
54#include <linux/if_arp.h>
55#include <linux/skbuff.h>
56#include <linux/can.h>
57#include <linux/can/core.h>
58#include <linux/can/skb.h>
59#include <linux/can/bcm.h>
60#include <linux/slab.h>
61#include <net/sock.h>
62#include <net/net_namespace.h>
63
64/*
65 * To send multiple CAN frame content within TX_SETUP or to filter
66 * CAN messages with multiplex index within RX_SETUP, the number of
67 * different filters is limited to 256 due to the one byte index value.
68 */
69#define MAX_NFRAMES 256
70
David Brazdil0f672f62019-12-10 10:32:29 +000071/* limit timers to 400 days for sending/timeouts */
72#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
73
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074/* use of last_frames[index].flags */
75#define RX_RECV 0x40 /* received data for this element */
76#define RX_THR 0x80 /* element not been sent due to throttle feature */
77#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
78
79/* get best masking value for can_rx_register() for a given single can_id */
80#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
81 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
82 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
83
84#define CAN_BCM_VERSION "20170425"
85
86MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
87MODULE_LICENSE("Dual BSD/GPL");
88MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
89MODULE_ALIAS("can-proto-2");
90
Olivier Deprez0e641232021-09-23 10:07:05 +020091#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
92
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093/*
94 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
95 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
96 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
97 */
98static inline u64 get_u64(const struct canfd_frame *cp, int offset)
99{
100 return *(u64 *)(cp->data + offset);
101}
102
103struct bcm_op {
104 struct list_head list;
105 int ifindex;
106 canid_t can_id;
107 u32 flags;
108 unsigned long frames_abs, frames_filtered;
109 struct bcm_timeval ival1, ival2;
110 struct hrtimer timer, thrtimer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
112 int rx_ifindex;
113 int cfsiz;
114 u32 count;
115 u32 nframes;
116 u32 currframe;
117 /* void pointers to arrays of struct can[fd]_frame */
118 void *frames;
119 void *last_frames;
120 struct canfd_frame sframe;
121 struct canfd_frame last_sframe;
122 struct sock *sk;
123 struct net_device *rx_reg_dev;
124};
125
126struct bcm_sock {
127 struct sock sk;
128 int bound;
129 int ifindex;
Olivier Deprez0e641232021-09-23 10:07:05 +0200130 struct list_head notifier;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131 struct list_head rx_ops;
132 struct list_head tx_ops;
133 unsigned long dropped_usr_msgs;
134 struct proc_dir_entry *bcm_proc_read;
135 char procname [32]; /* inode number in decimal with \0 */
136};
137
Olivier Deprez0e641232021-09-23 10:07:05 +0200138static LIST_HEAD(bcm_notifier_list);
139static DEFINE_SPINLOCK(bcm_notifier_lock);
140static struct bcm_sock *bcm_busy_notifier;
141
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142static inline struct bcm_sock *bcm_sk(const struct sock *sk)
143{
144 return (struct bcm_sock *)sk;
145}
146
147static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
148{
149 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
150}
151
David Brazdil0f672f62019-12-10 10:32:29 +0000152/* check limitations for timeval provided by user */
153static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
154{
155 if ((msg_head->ival1.tv_sec < 0) ||
156 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
157 (msg_head->ival1.tv_usec < 0) ||
158 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
159 (msg_head->ival2.tv_sec < 0) ||
160 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
161 (msg_head->ival2.tv_usec < 0) ||
162 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
163 return true;
164
165 return false;
166}
167
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
169#define OPSIZ sizeof(struct bcm_op)
170#define MHSIZ sizeof(struct bcm_msg_head)
171
172/*
173 * procfs functions
174 */
175#if IS_ENABLED(CONFIG_PROC_FS)
176static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
177{
178 struct net_device *dev;
179
180 if (!ifindex)
181 return "any";
182
183 rcu_read_lock();
184 dev = dev_get_by_index_rcu(net, ifindex);
185 if (dev)
186 strcpy(result, dev->name);
187 else
188 strcpy(result, "???");
189 rcu_read_unlock();
190
191 return result;
192}
193
194static int bcm_proc_show(struct seq_file *m, void *v)
195{
196 char ifname[IFNAMSIZ];
197 struct net *net = m->private;
198 struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
199 struct bcm_sock *bo = bcm_sk(sk);
200 struct bcm_op *op;
201
202 seq_printf(m, ">>> socket %pK", sk->sk_socket);
203 seq_printf(m, " / sk %pK", sk);
204 seq_printf(m, " / bo %pK", bo);
205 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
206 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
207 seq_printf(m, " <<<\n");
208
209 list_for_each_entry(op, &bo->rx_ops, list) {
210
211 unsigned long reduction;
212
213 /* print only active entries & prevent division by zero */
214 if (!op->frames_abs)
215 continue;
216
217 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
218 bcm_proc_getifname(net, ifname, op->ifindex));
219
220 if (op->flags & CAN_FD_FRAME)
221 seq_printf(m, "(%u)", op->nframes);
222 else
223 seq_printf(m, "[%u]", op->nframes);
224
225 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
226
227 if (op->kt_ival1)
228 seq_printf(m, "timeo=%lld ",
229 (long long)ktime_to_us(op->kt_ival1));
230
231 if (op->kt_ival2)
232 seq_printf(m, "thr=%lld ",
233 (long long)ktime_to_us(op->kt_ival2));
234
235 seq_printf(m, "# recv %ld (%ld) => reduction: ",
236 op->frames_filtered, op->frames_abs);
237
238 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
239
240 seq_printf(m, "%s%ld%%\n",
241 (reduction == 100) ? "near " : "", reduction);
242 }
243
244 list_for_each_entry(op, &bo->tx_ops, list) {
245
246 seq_printf(m, "tx_op: %03X %s ", op->can_id,
247 bcm_proc_getifname(net, ifname, op->ifindex));
248
249 if (op->flags & CAN_FD_FRAME)
250 seq_printf(m, "(%u) ", op->nframes);
251 else
252 seq_printf(m, "[%u] ", op->nframes);
253
254 if (op->kt_ival1)
255 seq_printf(m, "t1=%lld ",
256 (long long)ktime_to_us(op->kt_ival1));
257
258 if (op->kt_ival2)
259 seq_printf(m, "t2=%lld ",
260 (long long)ktime_to_us(op->kt_ival2));
261
262 seq_printf(m, "# sent %ld\n", op->frames_abs);
263 }
264 seq_putc(m, '\n');
265 return 0;
266}
267#endif /* CONFIG_PROC_FS */
268
269/*
270 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
271 * of the given bcm tx op
272 */
273static void bcm_can_tx(struct bcm_op *op)
274{
275 struct sk_buff *skb;
276 struct net_device *dev;
277 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
278
279 /* no target device? => exit */
280 if (!op->ifindex)
281 return;
282
283 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
284 if (!dev) {
285 /* RFC: should this bcm_op remove itself here? */
286 return;
287 }
288
289 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
290 if (!skb)
291 goto out;
292
293 can_skb_reserve(skb);
294 can_skb_prv(skb)->ifindex = dev->ifindex;
295 can_skb_prv(skb)->skbcnt = 0;
296
297 skb_put_data(skb, cf, op->cfsiz);
298
299 /* send with loopback */
300 skb->dev = dev;
301 can_skb_set_owner(skb, op->sk);
302 can_send(skb, 1);
303
304 /* update statistics */
305 op->currframe++;
306 op->frames_abs++;
307
308 /* reached last frame? */
309 if (op->currframe >= op->nframes)
310 op->currframe = 0;
311out:
312 dev_put(dev);
313}
314
315/*
316 * bcm_send_to_user - send a BCM message to the userspace
317 * (consisting of bcm_msg_head + x CAN frames)
318 */
319static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
320 struct canfd_frame *frames, int has_timestamp)
321{
322 struct sk_buff *skb;
323 struct canfd_frame *firstframe;
324 struct sockaddr_can *addr;
325 struct sock *sk = op->sk;
326 unsigned int datalen = head->nframes * op->cfsiz;
327 int err;
328
329 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
330 if (!skb)
331 return;
332
333 skb_put_data(skb, head, sizeof(*head));
334
335 if (head->nframes) {
336 /* CAN frames starting here */
337 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
338
339 skb_put_data(skb, frames, datalen);
340
341 /*
342 * the BCM uses the flags-element of the canfd_frame
343 * structure for internal purposes. This is only
344 * relevant for updates that are generated by the
345 * BCM, where nframes is 1
346 */
347 if (head->nframes == 1)
348 firstframe->flags &= BCM_CAN_FLAGS_MASK;
349 }
350
351 if (has_timestamp) {
352 /* restore rx timestamp */
353 skb->tstamp = op->rx_stamp;
354 }
355
356 /*
357 * Put the datagram to the queue so that bcm_recvmsg() can
358 * get it from there. We need to pass the interface index to
359 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
360 * containing the interface index.
361 */
362
363 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
364 addr = (struct sockaddr_can *)skb->cb;
365 memset(addr, 0, sizeof(*addr));
366 addr->can_family = AF_CAN;
367 addr->can_ifindex = op->rx_ifindex;
368
369 err = sock_queue_rcv_skb(sk, skb);
370 if (err < 0) {
371 struct bcm_sock *bo = bcm_sk(sk);
372
373 kfree_skb(skb);
374 /* don't care about overflows in this statistic */
375 bo->dropped_usr_msgs++;
376 }
377}
378
David Brazdil0f672f62019-12-10 10:32:29 +0000379static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380{
David Brazdil0f672f62019-12-10 10:32:29 +0000381 ktime_t ival;
382
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383 if (op->kt_ival1 && op->count)
David Brazdil0f672f62019-12-10 10:32:29 +0000384 ival = op->kt_ival1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000385 else if (op->kt_ival2)
David Brazdil0f672f62019-12-10 10:32:29 +0000386 ival = op->kt_ival2;
387 else
388 return false;
389
390 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
391 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000392}
393
David Brazdil0f672f62019-12-10 10:32:29 +0000394static void bcm_tx_start_timer(struct bcm_op *op)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395{
David Brazdil0f672f62019-12-10 10:32:29 +0000396 if (bcm_tx_set_expiry(op, &op->timer))
397 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
398}
399
400/* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
401static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
402{
403 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404 struct bcm_msg_head msg_head;
405
406 if (op->kt_ival1 && (op->count > 0)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407 op->count--;
408 if (!op->count && (op->flags & TX_COUNTEVT)) {
409
410 /* create notification to user */
Olivier Deprez0e641232021-09-23 10:07:05 +0200411 memset(&msg_head, 0, sizeof(msg_head));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 msg_head.opcode = TX_EXPIRED;
413 msg_head.flags = op->flags;
414 msg_head.count = op->count;
415 msg_head.ival1 = op->ival1;
416 msg_head.ival2 = op->ival2;
417 msg_head.can_id = op->can_id;
418 msg_head.nframes = 0;
419
420 bcm_send_to_user(op, &msg_head, NULL, 0);
421 }
422 bcm_can_tx(op);
423
David Brazdil0f672f62019-12-10 10:32:29 +0000424 } else if (op->kt_ival2) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425 bcm_can_tx(op);
David Brazdil0f672f62019-12-10 10:32:29 +0000426 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427
David Brazdil0f672f62019-12-10 10:32:29 +0000428 return bcm_tx_set_expiry(op, &op->timer) ?
429 HRTIMER_RESTART : HRTIMER_NORESTART;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430}
431
432/*
433 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
434 */
435static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
436{
437 struct bcm_msg_head head;
438
439 /* update statistics */
440 op->frames_filtered++;
441
442 /* prevent statistics overflow */
443 if (op->frames_filtered > ULONG_MAX/100)
444 op->frames_filtered = op->frames_abs = 0;
445
446 /* this element is not throttled anymore */
447 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
448
Olivier Deprez0e641232021-09-23 10:07:05 +0200449 memset(&head, 0, sizeof(head));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 head.opcode = RX_CHANGED;
451 head.flags = op->flags;
452 head.count = op->count;
453 head.ival1 = op->ival1;
454 head.ival2 = op->ival2;
455 head.can_id = op->can_id;
456 head.nframes = 1;
457
458 bcm_send_to_user(op, &head, data, 1);
459}
460
461/*
462 * bcm_rx_update_and_send - process a detected relevant receive content change
463 * 1. update the last received data
464 * 2. send a notification to the user (if possible)
465 */
466static void bcm_rx_update_and_send(struct bcm_op *op,
467 struct canfd_frame *lastdata,
468 const struct canfd_frame *rxdata)
469{
470 memcpy(lastdata, rxdata, op->cfsiz);
471
472 /* mark as used and throttled by default */
473 lastdata->flags |= (RX_RECV|RX_THR);
474
475 /* throttling mode inactive ? */
476 if (!op->kt_ival2) {
477 /* send RX_CHANGED to the user immediately */
478 bcm_rx_changed(op, lastdata);
479 return;
480 }
481
482 /* with active throttling timer we are just done here */
483 if (hrtimer_active(&op->thrtimer))
484 return;
485
486 /* first reception with enabled throttling mode */
487 if (!op->kt_lastmsg)
488 goto rx_changed_settime;
489
490 /* got a second frame inside a potential throttle period? */
491 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
492 ktime_to_us(op->kt_ival2)) {
493 /* do not send the saved data - only start throttle timer */
494 hrtimer_start(&op->thrtimer,
495 ktime_add(op->kt_lastmsg, op->kt_ival2),
David Brazdil0f672f62019-12-10 10:32:29 +0000496 HRTIMER_MODE_ABS_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000497 return;
498 }
499
500 /* the gap was that big, that throttling was not needed here */
501rx_changed_settime:
502 bcm_rx_changed(op, lastdata);
503 op->kt_lastmsg = ktime_get();
504}
505
506/*
507 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
508 * received data stored in op->last_frames[]
509 */
510static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
511 const struct canfd_frame *rxdata)
512{
513 struct canfd_frame *cf = op->frames + op->cfsiz * index;
514 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
515 int i;
516
517 /*
518 * no one uses the MSBs of flags for comparison,
519 * so we use it here to detect the first time of reception
520 */
521
522 if (!(lcf->flags & RX_RECV)) {
523 /* received data for the first time => send update to user */
524 bcm_rx_update_and_send(op, lcf, rxdata);
525 return;
526 }
527
528 /* do a real check in CAN frame data section */
529 for (i = 0; i < rxdata->len; i += 8) {
530 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
531 (get_u64(cf, i) & get_u64(lcf, i))) {
532 bcm_rx_update_and_send(op, lcf, rxdata);
533 return;
534 }
535 }
536
537 if (op->flags & RX_CHECK_DLC) {
538 /* do a real check in CAN frame length */
539 if (rxdata->len != lcf->len) {
540 bcm_rx_update_and_send(op, lcf, rxdata);
541 return;
542 }
543 }
544}
545
546/*
547 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
548 */
549static void bcm_rx_starttimer(struct bcm_op *op)
550{
551 if (op->flags & RX_NO_AUTOTIMER)
552 return;
553
554 if (op->kt_ival1)
David Brazdil0f672f62019-12-10 10:32:29 +0000555 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000556}
557
David Brazdil0f672f62019-12-10 10:32:29 +0000558/* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
559static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560{
David Brazdil0f672f62019-12-10 10:32:29 +0000561 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 struct bcm_msg_head msg_head;
563
David Brazdil0f672f62019-12-10 10:32:29 +0000564 /* if user wants to be informed, when cyclic CAN-Messages come back */
565 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
566 /* clear received CAN frames to indicate 'nothing received' */
567 memset(op->last_frames, 0, op->nframes * op->cfsiz);
568 }
569
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000570 /* create notification to user */
Olivier Deprez0e641232021-09-23 10:07:05 +0200571 memset(&msg_head, 0, sizeof(msg_head));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572 msg_head.opcode = RX_TIMEOUT;
573 msg_head.flags = op->flags;
574 msg_head.count = op->count;
575 msg_head.ival1 = op->ival1;
576 msg_head.ival2 = op->ival2;
577 msg_head.can_id = op->can_id;
578 msg_head.nframes = 0;
579
580 bcm_send_to_user(op, &msg_head, NULL, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581
582 return HRTIMER_NORESTART;
583}
584
585/*
586 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
587 */
David Brazdil0f672f62019-12-10 10:32:29 +0000588static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589{
590 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
591
592 if ((op->last_frames) && (lcf->flags & RX_THR)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000593 bcm_rx_changed(op, lcf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594 return 1;
595 }
596 return 0;
597}
598
599/*
600 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 */
David Brazdil0f672f62019-12-10 10:32:29 +0000602static int bcm_rx_thr_flush(struct bcm_op *op)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000603{
604 int updated = 0;
605
606 if (op->nframes > 1) {
607 unsigned int i;
608
609 /* for MUX filter we start at index 1 */
610 for (i = 1; i < op->nframes; i++)
David Brazdil0f672f62019-12-10 10:32:29 +0000611 updated += bcm_rx_do_flush(op, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000612
613 } else {
614 /* for RX_FILTER_ID and simple filter */
David Brazdil0f672f62019-12-10 10:32:29 +0000615 updated += bcm_rx_do_flush(op, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616 }
617
618 return updated;
619}
620
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000621/*
622 * bcm_rx_thr_handler - the time for blocked content updates is over now:
623 * Check for throttled data and send it to the userspace
624 */
625static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
626{
627 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
628
David Brazdil0f672f62019-12-10 10:32:29 +0000629 if (bcm_rx_thr_flush(op)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000630 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
631 return HRTIMER_RESTART;
632 } else {
633 /* rearm throttle handling */
634 op->kt_lastmsg = 0;
635 return HRTIMER_NORESTART;
636 }
637}
638
639/*
640 * bcm_rx_handler - handle a CAN frame reception
641 */
642static void bcm_rx_handler(struct sk_buff *skb, void *data)
643{
644 struct bcm_op *op = (struct bcm_op *)data;
645 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
646 unsigned int i;
647
648 if (op->can_id != rxframe->can_id)
649 return;
650
651 /* make sure to handle the correct frame type (CAN / CAN FD) */
652 if (skb->len != op->cfsiz)
653 return;
654
655 /* disable timeout */
656 hrtimer_cancel(&op->timer);
657
658 /* save rx timestamp */
659 op->rx_stamp = skb->tstamp;
660 /* save originator for recvfrom() */
661 op->rx_ifindex = skb->dev->ifindex;
662 /* update statistics */
663 op->frames_abs++;
664
665 if (op->flags & RX_RTR_FRAME) {
666 /* send reply for RTR-request (placed in op->frames[0]) */
667 bcm_can_tx(op);
668 return;
669 }
670
671 if (op->flags & RX_FILTER_ID) {
672 /* the easiest case */
673 bcm_rx_update_and_send(op, op->last_frames, rxframe);
674 goto rx_starttimer;
675 }
676
677 if (op->nframes == 1) {
678 /* simple compare with index 0 */
679 bcm_rx_cmp_to_index(op, 0, rxframe);
680 goto rx_starttimer;
681 }
682
683 if (op->nframes > 1) {
684 /*
685 * multiplex compare
686 *
687 * find the first multiplex mask that fits.
688 * Remark: The MUX-mask is stored in index 0 - but only the
689 * first 64 bits of the frame data[] are relevant (CAN FD)
690 */
691
692 for (i = 1; i < op->nframes; i++) {
693 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
694 (get_u64(op->frames, 0) &
695 get_u64(op->frames + op->cfsiz * i, 0))) {
696 bcm_rx_cmp_to_index(op, i, rxframe);
697 break;
698 }
699 }
700 }
701
702rx_starttimer:
703 bcm_rx_starttimer(op);
704}
705
706/*
707 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
708 */
709static struct bcm_op *bcm_find_op(struct list_head *ops,
710 struct bcm_msg_head *mh, int ifindex)
711{
712 struct bcm_op *op;
713
714 list_for_each_entry(op, ops, list) {
715 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
716 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
717 return op;
718 }
719
720 return NULL;
721}
722
723static void bcm_remove_op(struct bcm_op *op)
724{
David Brazdil0f672f62019-12-10 10:32:29 +0000725 hrtimer_cancel(&op->timer);
726 hrtimer_cancel(&op->thrtimer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000727
728 if ((op->frames) && (op->frames != &op->sframe))
729 kfree(op->frames);
730
731 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
732 kfree(op->last_frames);
733
734 kfree(op);
735}
736
737static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
738{
739 if (op->rx_reg_dev == dev) {
740 can_rx_unregister(dev_net(dev), dev, op->can_id,
741 REGMASK(op->can_id), bcm_rx_handler, op);
742
743 /* mark as removed subscription */
744 op->rx_reg_dev = NULL;
745 } else
746 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
747 "mismatch %p %p\n", op->rx_reg_dev, dev);
748}
749
750/*
751 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
752 */
753static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
754 int ifindex)
755{
756 struct bcm_op *op, *n;
757
758 list_for_each_entry_safe(op, n, ops, list) {
759 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
760 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
761
762 /*
763 * Don't care if we're bound or not (due to netdev
764 * problems) can_rx_unregister() is always a save
765 * thing to do here.
766 */
767 if (op->ifindex) {
768 /*
769 * Only remove subscriptions that had not
770 * been removed due to NETDEV_UNREGISTER
771 * in bcm_notifier()
772 */
773 if (op->rx_reg_dev) {
774 struct net_device *dev;
775
776 dev = dev_get_by_index(sock_net(op->sk),
777 op->ifindex);
778 if (dev) {
779 bcm_rx_unreg(dev, op);
780 dev_put(dev);
781 }
782 }
783 } else
784 can_rx_unregister(sock_net(op->sk), NULL,
785 op->can_id,
786 REGMASK(op->can_id),
787 bcm_rx_handler, op);
788
789 list_del(&op->list);
Olivier Deprez0e641232021-09-23 10:07:05 +0200790 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791 bcm_remove_op(op);
792 return 1; /* done */
793 }
794 }
795
796 return 0; /* not found */
797}
798
799/*
800 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
801 */
802static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
803 int ifindex)
804{
805 struct bcm_op *op, *n;
806
807 list_for_each_entry_safe(op, n, ops, list) {
808 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
809 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
810 list_del(&op->list);
811 bcm_remove_op(op);
812 return 1; /* done */
813 }
814 }
815
816 return 0; /* not found */
817}
818
819/*
820 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
821 */
822static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
823 int ifindex)
824{
825 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
826
827 if (!op)
828 return -EINVAL;
829
830 /* put current values into msg_head */
831 msg_head->flags = op->flags;
832 msg_head->count = op->count;
833 msg_head->ival1 = op->ival1;
834 msg_head->ival2 = op->ival2;
835 msg_head->nframes = op->nframes;
836
837 bcm_send_to_user(op, msg_head, op->frames, 0);
838
839 return MHSIZ;
840}
841
842/*
843 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
844 */
845static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
846 int ifindex, struct sock *sk)
847{
848 struct bcm_sock *bo = bcm_sk(sk);
849 struct bcm_op *op;
850 struct canfd_frame *cf;
851 unsigned int i;
852 int err;
853
854 /* we need a real device to send frames */
855 if (!ifindex)
856 return -ENODEV;
857
858 /* check nframes boundaries - we need at least one CAN frame */
859 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
860 return -EINVAL;
861
David Brazdil0f672f62019-12-10 10:32:29 +0000862 /* check timeval limitations */
863 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
864 return -EINVAL;
865
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866 /* check the given can_id */
867 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
868 if (op) {
869 /* update existing BCM operation */
870
871 /*
872 * Do we need more space for the CAN frames than currently
873 * allocated? -> This is a _really_ unusual use-case and
874 * therefore (complexity / locking) it is not supported.
875 */
876 if (msg_head->nframes > op->nframes)
877 return -E2BIG;
878
879 /* update CAN frames content */
880 for (i = 0; i < msg_head->nframes; i++) {
881
882 cf = op->frames + op->cfsiz * i;
883 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
884
885 if (op->flags & CAN_FD_FRAME) {
886 if (cf->len > 64)
887 err = -EINVAL;
888 } else {
889 if (cf->len > 8)
890 err = -EINVAL;
891 }
892
893 if (err < 0)
894 return err;
895
896 if (msg_head->flags & TX_CP_CAN_ID) {
897 /* copy can_id into frame */
898 cf->can_id = msg_head->can_id;
899 }
900 }
901 op->flags = msg_head->flags;
902
903 } else {
904 /* insert new BCM operation for the given can_id */
905
906 op = kzalloc(OPSIZ, GFP_KERNEL);
907 if (!op)
908 return -ENOMEM;
909
910 op->can_id = msg_head->can_id;
911 op->cfsiz = CFSIZ(msg_head->flags);
912 op->flags = msg_head->flags;
913
914 /* create array for CAN frames and copy the data */
915 if (msg_head->nframes > 1) {
916 op->frames = kmalloc_array(msg_head->nframes,
917 op->cfsiz,
918 GFP_KERNEL);
919 if (!op->frames) {
920 kfree(op);
921 return -ENOMEM;
922 }
923 } else
924 op->frames = &op->sframe;
925
926 for (i = 0; i < msg_head->nframes; i++) {
927
928 cf = op->frames + op->cfsiz * i;
929 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
930
931 if (op->flags & CAN_FD_FRAME) {
932 if (cf->len > 64)
933 err = -EINVAL;
934 } else {
935 if (cf->len > 8)
936 err = -EINVAL;
937 }
938
939 if (err < 0) {
940 if (op->frames != &op->sframe)
941 kfree(op->frames);
942 kfree(op);
943 return err;
944 }
945
946 if (msg_head->flags & TX_CP_CAN_ID) {
947 /* copy can_id into frame */
948 cf->can_id = msg_head->can_id;
949 }
950 }
951
952 /* tx_ops never compare with previous received messages */
953 op->last_frames = NULL;
954
955 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
956 op->sk = sk;
957 op->ifindex = ifindex;
958
959 /* initialize uninitialized (kzalloc) structure */
David Brazdil0f672f62019-12-10 10:32:29 +0000960 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
961 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000962 op->timer.function = bcm_tx_timeout_handler;
963
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000964 /* currently unused in tx_ops */
David Brazdil0f672f62019-12-10 10:32:29 +0000965 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
966 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967
968 /* add this bcm_op to the list of the tx_ops */
969 list_add(&op->list, &bo->tx_ops);
970
971 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
972
973 if (op->nframes != msg_head->nframes) {
974 op->nframes = msg_head->nframes;
975 /* start multiple frame transmission with index 0 */
976 op->currframe = 0;
977 }
978
979 /* check flags */
980
981 if (op->flags & TX_RESET_MULTI_IDX) {
982 /* start multiple frame transmission with index 0 */
983 op->currframe = 0;
984 }
985
986 if (op->flags & SETTIMER) {
987 /* set timer values */
988 op->count = msg_head->count;
989 op->ival1 = msg_head->ival1;
990 op->ival2 = msg_head->ival2;
991 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
992 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
993
994 /* disable an active timer due to zero values? */
995 if (!op->kt_ival1 && !op->kt_ival2)
996 hrtimer_cancel(&op->timer);
997 }
998
999 if (op->flags & STARTTIMER) {
1000 hrtimer_cancel(&op->timer);
1001 /* spec: send CAN frame when starting timer */
1002 op->flags |= TX_ANNOUNCE;
1003 }
1004
1005 if (op->flags & TX_ANNOUNCE) {
1006 bcm_can_tx(op);
1007 if (op->count)
1008 op->count--;
1009 }
1010
1011 if (op->flags & STARTTIMER)
1012 bcm_tx_start_timer(op);
1013
1014 return msg_head->nframes * op->cfsiz + MHSIZ;
1015}
1016
1017/*
1018 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1019 */
1020static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1021 int ifindex, struct sock *sk)
1022{
1023 struct bcm_sock *bo = bcm_sk(sk);
1024 struct bcm_op *op;
1025 int do_rx_register;
1026 int err = 0;
1027
1028 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1029 /* be robust against wrong usage ... */
1030 msg_head->flags |= RX_FILTER_ID;
1031 /* ignore trailing garbage */
1032 msg_head->nframes = 0;
1033 }
1034
1035 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1036 if (msg_head->nframes > MAX_NFRAMES + 1)
1037 return -EINVAL;
1038
1039 if ((msg_head->flags & RX_RTR_FRAME) &&
1040 ((msg_head->nframes != 1) ||
1041 (!(msg_head->can_id & CAN_RTR_FLAG))))
1042 return -EINVAL;
1043
David Brazdil0f672f62019-12-10 10:32:29 +00001044 /* check timeval limitations */
1045 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1046 return -EINVAL;
1047
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001048 /* check the given can_id */
1049 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1050 if (op) {
1051 /* update existing BCM operation */
1052
1053 /*
1054 * Do we need more space for the CAN frames than currently
1055 * allocated? -> This is a _really_ unusual use-case and
1056 * therefore (complexity / locking) it is not supported.
1057 */
1058 if (msg_head->nframes > op->nframes)
1059 return -E2BIG;
1060
1061 if (msg_head->nframes) {
1062 /* update CAN frames content */
1063 err = memcpy_from_msg(op->frames, msg,
1064 msg_head->nframes * op->cfsiz);
1065 if (err < 0)
1066 return err;
1067
1068 /* clear last_frames to indicate 'nothing received' */
1069 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1070 }
1071
1072 op->nframes = msg_head->nframes;
1073 op->flags = msg_head->flags;
1074
1075 /* Only an update -> do not call can_rx_register() */
1076 do_rx_register = 0;
1077
1078 } else {
1079 /* insert new BCM operation for the given can_id */
1080 op = kzalloc(OPSIZ, GFP_KERNEL);
1081 if (!op)
1082 return -ENOMEM;
1083
1084 op->can_id = msg_head->can_id;
1085 op->nframes = msg_head->nframes;
1086 op->cfsiz = CFSIZ(msg_head->flags);
1087 op->flags = msg_head->flags;
1088
1089 if (msg_head->nframes > 1) {
1090 /* create array for CAN frames and copy the data */
1091 op->frames = kmalloc_array(msg_head->nframes,
1092 op->cfsiz,
1093 GFP_KERNEL);
1094 if (!op->frames) {
1095 kfree(op);
1096 return -ENOMEM;
1097 }
1098
1099 /* create and init array for received CAN frames */
1100 op->last_frames = kcalloc(msg_head->nframes,
1101 op->cfsiz,
1102 GFP_KERNEL);
1103 if (!op->last_frames) {
1104 kfree(op->frames);
1105 kfree(op);
1106 return -ENOMEM;
1107 }
1108
1109 } else {
1110 op->frames = &op->sframe;
1111 op->last_frames = &op->last_sframe;
1112 }
1113
1114 if (msg_head->nframes) {
1115 err = memcpy_from_msg(op->frames, msg,
1116 msg_head->nframes * op->cfsiz);
1117 if (err < 0) {
1118 if (op->frames != &op->sframe)
1119 kfree(op->frames);
1120 if (op->last_frames != &op->last_sframe)
1121 kfree(op->last_frames);
1122 kfree(op);
1123 return err;
1124 }
1125 }
1126
1127 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1128 op->sk = sk;
1129 op->ifindex = ifindex;
1130
1131 /* ifindex for timeout events w/o previous frame reception */
1132 op->rx_ifindex = ifindex;
1133
1134 /* initialize uninitialized (kzalloc) structure */
David Brazdil0f672f62019-12-10 10:32:29 +00001135 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1136 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137 op->timer.function = bcm_rx_timeout_handler;
1138
David Brazdil0f672f62019-12-10 10:32:29 +00001139 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1140 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141 op->thrtimer.function = bcm_rx_thr_handler;
1142
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001143 /* add this bcm_op to the list of the rx_ops */
1144 list_add(&op->list, &bo->rx_ops);
1145
1146 /* call can_rx_register() */
1147 do_rx_register = 1;
1148
1149 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1150
1151 /* check flags */
1152
1153 if (op->flags & RX_RTR_FRAME) {
1154 struct canfd_frame *frame0 = op->frames;
1155
1156 /* no timers in RTR-mode */
1157 hrtimer_cancel(&op->thrtimer);
1158 hrtimer_cancel(&op->timer);
1159
1160 /*
1161 * funny feature in RX(!)_SETUP only for RTR-mode:
1162 * copy can_id into frame BUT without RTR-flag to
1163 * prevent a full-load-loopback-test ... ;-]
1164 */
1165 if ((op->flags & TX_CP_CAN_ID) ||
1166 (frame0->can_id == op->can_id))
1167 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1168
1169 } else {
1170 if (op->flags & SETTIMER) {
1171
1172 /* set timer value */
1173 op->ival1 = msg_head->ival1;
1174 op->ival2 = msg_head->ival2;
1175 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1176 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1177
1178 /* disable an active timer due to zero value? */
1179 if (!op->kt_ival1)
1180 hrtimer_cancel(&op->timer);
1181
1182 /*
1183 * In any case cancel the throttle timer, flush
1184 * potentially blocked msgs and reset throttle handling
1185 */
1186 op->kt_lastmsg = 0;
1187 hrtimer_cancel(&op->thrtimer);
David Brazdil0f672f62019-12-10 10:32:29 +00001188 bcm_rx_thr_flush(op);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001189 }
1190
1191 if ((op->flags & STARTTIMER) && op->kt_ival1)
1192 hrtimer_start(&op->timer, op->kt_ival1,
David Brazdil0f672f62019-12-10 10:32:29 +00001193 HRTIMER_MODE_REL_SOFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001194 }
1195
1196 /* now we can register for can_ids, if we added a new bcm_op */
1197 if (do_rx_register) {
1198 if (ifindex) {
1199 struct net_device *dev;
1200
1201 dev = dev_get_by_index(sock_net(sk), ifindex);
1202 if (dev) {
1203 err = can_rx_register(sock_net(sk), dev,
1204 op->can_id,
1205 REGMASK(op->can_id),
1206 bcm_rx_handler, op,
1207 "bcm", sk);
1208
1209 op->rx_reg_dev = dev;
1210 dev_put(dev);
1211 }
1212
1213 } else
1214 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1215 REGMASK(op->can_id),
1216 bcm_rx_handler, op, "bcm", sk);
1217 if (err) {
1218 /* this bcm rx op is broken -> remove it */
1219 list_del(&op->list);
1220 bcm_remove_op(op);
1221 return err;
1222 }
1223 }
1224
1225 return msg_head->nframes * op->cfsiz + MHSIZ;
1226}
1227
1228/*
1229 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1230 */
1231static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1232 int cfsiz)
1233{
1234 struct sk_buff *skb;
1235 struct net_device *dev;
1236 int err;
1237
1238 /* we need a real device to send frames */
1239 if (!ifindex)
1240 return -ENODEV;
1241
1242 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1243 if (!skb)
1244 return -ENOMEM;
1245
1246 can_skb_reserve(skb);
1247
1248 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1249 if (err < 0) {
1250 kfree_skb(skb);
1251 return err;
1252 }
1253
1254 dev = dev_get_by_index(sock_net(sk), ifindex);
1255 if (!dev) {
1256 kfree_skb(skb);
1257 return -ENODEV;
1258 }
1259
1260 can_skb_prv(skb)->ifindex = dev->ifindex;
1261 can_skb_prv(skb)->skbcnt = 0;
1262 skb->dev = dev;
1263 can_skb_set_owner(skb, sk);
1264 err = can_send(skb, 1); /* send with loopback */
1265 dev_put(dev);
1266
1267 if (err)
1268 return err;
1269
1270 return cfsiz + MHSIZ;
1271}
1272
1273/*
1274 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1275 */
1276static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1277{
1278 struct sock *sk = sock->sk;
1279 struct bcm_sock *bo = bcm_sk(sk);
1280 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1281 struct bcm_msg_head msg_head;
1282 int cfsiz;
1283 int ret; /* read bytes or error codes as return value */
1284
1285 if (!bo->bound)
1286 return -ENOTCONN;
1287
1288 /* check for valid message length from userspace */
1289 if (size < MHSIZ)
1290 return -EINVAL;
1291
1292 /* read message head information */
1293 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1294 if (ret < 0)
1295 return ret;
1296
1297 cfsiz = CFSIZ(msg_head.flags);
1298 if ((size - MHSIZ) % cfsiz)
1299 return -EINVAL;
1300
1301 /* check for alternative ifindex for this bcm_op */
1302
1303 if (!ifindex && msg->msg_name) {
1304 /* no bound device as default => check msg_name */
1305 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1306
Olivier Deprez0e641232021-09-23 10:07:05 +02001307 if (msg->msg_namelen < BCM_MIN_NAMELEN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 return -EINVAL;
1309
1310 if (addr->can_family != AF_CAN)
1311 return -EINVAL;
1312
1313 /* ifindex from sendto() */
1314 ifindex = addr->can_ifindex;
1315
1316 if (ifindex) {
1317 struct net_device *dev;
1318
1319 dev = dev_get_by_index(sock_net(sk), ifindex);
1320 if (!dev)
1321 return -ENODEV;
1322
1323 if (dev->type != ARPHRD_CAN) {
1324 dev_put(dev);
1325 return -ENODEV;
1326 }
1327
1328 dev_put(dev);
1329 }
1330 }
1331
1332 lock_sock(sk);
1333
1334 switch (msg_head.opcode) {
1335
1336 case TX_SETUP:
1337 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1338 break;
1339
1340 case RX_SETUP:
1341 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1342 break;
1343
1344 case TX_DELETE:
1345 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1346 ret = MHSIZ;
1347 else
1348 ret = -EINVAL;
1349 break;
1350
1351 case RX_DELETE:
1352 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1353 ret = MHSIZ;
1354 else
1355 ret = -EINVAL;
1356 break;
1357
1358 case TX_READ:
1359 /* reuse msg_head for the reply to TX_READ */
1360 msg_head.opcode = TX_STATUS;
1361 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1362 break;
1363
1364 case RX_READ:
1365 /* reuse msg_head for the reply to RX_READ */
1366 msg_head.opcode = RX_STATUS;
1367 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1368 break;
1369
1370 case TX_SEND:
1371 /* we need exactly one CAN frame behind the msg head */
1372 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1373 ret = -EINVAL;
1374 else
1375 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1376 break;
1377
1378 default:
1379 ret = -EINVAL;
1380 break;
1381 }
1382
1383 release_sock(sk);
1384
1385 return ret;
1386}
1387
1388/*
1389 * notification handler for netdevice status changes
1390 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001391static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1392 struct net_device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001393{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001394 struct sock *sk = &bo->sk;
1395 struct bcm_op *op;
1396 int notify_enodev = 0;
1397
1398 if (!net_eq(dev_net(dev), sock_net(sk)))
Olivier Deprez0e641232021-09-23 10:07:05 +02001399 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001400
1401 switch (msg) {
1402
1403 case NETDEV_UNREGISTER:
1404 lock_sock(sk);
1405
1406 /* remove device specific receive entries */
1407 list_for_each_entry(op, &bo->rx_ops, list)
1408 if (op->rx_reg_dev == dev)
1409 bcm_rx_unreg(dev, op);
1410
1411 /* remove device reference, if this is our bound device */
1412 if (bo->bound && bo->ifindex == dev->ifindex) {
1413 bo->bound = 0;
1414 bo->ifindex = 0;
1415 notify_enodev = 1;
1416 }
1417
1418 release_sock(sk);
1419
1420 if (notify_enodev) {
1421 sk->sk_err = ENODEV;
1422 if (!sock_flag(sk, SOCK_DEAD))
1423 sk->sk_error_report(sk);
1424 }
1425 break;
1426
1427 case NETDEV_DOWN:
1428 if (bo->bound && bo->ifindex == dev->ifindex) {
1429 sk->sk_err = ENETDOWN;
1430 if (!sock_flag(sk, SOCK_DEAD))
1431 sk->sk_error_report(sk);
1432 }
1433 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001434}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001435
Olivier Deprez0e641232021-09-23 10:07:05 +02001436static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1437 void *ptr)
1438{
1439 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1440
1441 if (dev->type != ARPHRD_CAN)
1442 return NOTIFY_DONE;
1443 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1444 return NOTIFY_DONE;
1445 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1446 return NOTIFY_DONE;
1447
1448 spin_lock(&bcm_notifier_lock);
1449 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1450 spin_unlock(&bcm_notifier_lock);
1451 bcm_notify(bcm_busy_notifier, msg, dev);
1452 spin_lock(&bcm_notifier_lock);
1453 }
1454 bcm_busy_notifier = NULL;
1455 spin_unlock(&bcm_notifier_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001456 return NOTIFY_DONE;
1457}
1458
1459/*
1460 * initial settings for all BCM sockets to be set at socket creation time
1461 */
1462static int bcm_init(struct sock *sk)
1463{
1464 struct bcm_sock *bo = bcm_sk(sk);
1465
1466 bo->bound = 0;
1467 bo->ifindex = 0;
1468 bo->dropped_usr_msgs = 0;
1469 bo->bcm_proc_read = NULL;
1470
1471 INIT_LIST_HEAD(&bo->tx_ops);
1472 INIT_LIST_HEAD(&bo->rx_ops);
1473
1474 /* set notifier */
Olivier Deprez0e641232021-09-23 10:07:05 +02001475 spin_lock(&bcm_notifier_lock);
1476 list_add_tail(&bo->notifier, &bcm_notifier_list);
1477 spin_unlock(&bcm_notifier_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001478
1479 return 0;
1480}
1481
1482/*
1483 * standard socket functions
1484 */
1485static int bcm_release(struct socket *sock)
1486{
1487 struct sock *sk = sock->sk;
1488 struct net *net;
1489 struct bcm_sock *bo;
1490 struct bcm_op *op, *next;
1491
1492 if (!sk)
1493 return 0;
1494
1495 net = sock_net(sk);
1496 bo = bcm_sk(sk);
1497
1498 /* remove bcm_ops, timer, rx_unregister(), etc. */
1499
Olivier Deprez0e641232021-09-23 10:07:05 +02001500 spin_lock(&bcm_notifier_lock);
1501 while (bcm_busy_notifier == bo) {
1502 spin_unlock(&bcm_notifier_lock);
1503 schedule_timeout_uninterruptible(1);
1504 spin_lock(&bcm_notifier_lock);
1505 }
1506 list_del(&bo->notifier);
1507 spin_unlock(&bcm_notifier_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001508
1509 lock_sock(sk);
1510
1511 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1512 bcm_remove_op(op);
1513
1514 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1515 /*
1516 * Don't care if we're bound or not (due to netdev problems)
1517 * can_rx_unregister() is always a save thing to do here.
1518 */
1519 if (op->ifindex) {
1520 /*
1521 * Only remove subscriptions that had not
1522 * been removed due to NETDEV_UNREGISTER
1523 * in bcm_notifier()
1524 */
1525 if (op->rx_reg_dev) {
1526 struct net_device *dev;
1527
1528 dev = dev_get_by_index(net, op->ifindex);
1529 if (dev) {
1530 bcm_rx_unreg(dev, op);
1531 dev_put(dev);
1532 }
1533 }
1534 } else
1535 can_rx_unregister(net, NULL, op->can_id,
1536 REGMASK(op->can_id),
1537 bcm_rx_handler, op);
1538
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001539 }
1540
Olivier Deprez0e641232021-09-23 10:07:05 +02001541 synchronize_rcu();
1542
1543 list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1544 bcm_remove_op(op);
1545
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001546#if IS_ENABLED(CONFIG_PROC_FS)
1547 /* remove procfs entry */
1548 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1549 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1550#endif /* CONFIG_PROC_FS */
1551
1552 /* remove device reference */
1553 if (bo->bound) {
1554 bo->bound = 0;
1555 bo->ifindex = 0;
1556 }
1557
1558 sock_orphan(sk);
1559 sock->sk = NULL;
1560
1561 release_sock(sk);
1562 sock_put(sk);
1563
1564 return 0;
1565}
1566
1567static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1568 int flags)
1569{
1570 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1571 struct sock *sk = sock->sk;
1572 struct bcm_sock *bo = bcm_sk(sk);
1573 struct net *net = sock_net(sk);
1574 int ret = 0;
1575
Olivier Deprez0e641232021-09-23 10:07:05 +02001576 if (len < BCM_MIN_NAMELEN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001577 return -EINVAL;
1578
1579 lock_sock(sk);
1580
1581 if (bo->bound) {
1582 ret = -EISCONN;
1583 goto fail;
1584 }
1585
1586 /* bind a device to this socket */
1587 if (addr->can_ifindex) {
1588 struct net_device *dev;
1589
1590 dev = dev_get_by_index(net, addr->can_ifindex);
1591 if (!dev) {
1592 ret = -ENODEV;
1593 goto fail;
1594 }
1595 if (dev->type != ARPHRD_CAN) {
1596 dev_put(dev);
1597 ret = -ENODEV;
1598 goto fail;
1599 }
1600
1601 bo->ifindex = dev->ifindex;
1602 dev_put(dev);
1603
1604 } else {
1605 /* no interface reference for ifindex = 0 ('any' CAN device) */
1606 bo->ifindex = 0;
1607 }
1608
1609#if IS_ENABLED(CONFIG_PROC_FS)
1610 if (net->can.bcmproc_dir) {
1611 /* unique socket address as filename */
1612 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1613 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1614 net->can.bcmproc_dir,
1615 bcm_proc_show, sk);
1616 if (!bo->bcm_proc_read) {
1617 ret = -ENOMEM;
1618 goto fail;
1619 }
1620 }
1621#endif /* CONFIG_PROC_FS */
1622
1623 bo->bound = 1;
1624
1625fail:
1626 release_sock(sk);
1627
1628 return ret;
1629}
1630
1631static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1632 int flags)
1633{
1634 struct sock *sk = sock->sk;
1635 struct sk_buff *skb;
1636 int error = 0;
1637 int noblock;
1638 int err;
1639
1640 noblock = flags & MSG_DONTWAIT;
1641 flags &= ~MSG_DONTWAIT;
1642 skb = skb_recv_datagram(sk, flags, noblock, &error);
1643 if (!skb)
1644 return error;
1645
1646 if (skb->len < size)
1647 size = skb->len;
1648
1649 err = memcpy_to_msg(msg, skb->data, size);
1650 if (err < 0) {
1651 skb_free_datagram(sk, skb);
1652 return err;
1653 }
1654
1655 sock_recv_ts_and_drops(msg, sk, skb);
1656
1657 if (msg->msg_name) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001658 __sockaddr_check_size(BCM_MIN_NAMELEN);
1659 msg->msg_namelen = BCM_MIN_NAMELEN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001660 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1661 }
1662
1663 skb_free_datagram(sk, skb);
1664
1665 return size;
1666}
1667
David Brazdil0f672f62019-12-10 10:32:29 +00001668static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1669 unsigned long arg)
1670{
1671 /* no ioctls for socket layer -> hand it down to NIC layer */
1672 return -ENOIOCTLCMD;
1673}
1674
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001675static const struct proto_ops bcm_ops = {
1676 .family = PF_CAN,
1677 .release = bcm_release,
1678 .bind = sock_no_bind,
1679 .connect = bcm_connect,
1680 .socketpair = sock_no_socketpair,
1681 .accept = sock_no_accept,
1682 .getname = sock_no_getname,
1683 .poll = datagram_poll,
David Brazdil0f672f62019-12-10 10:32:29 +00001684 .ioctl = bcm_sock_no_ioctlcmd,
1685 .gettstamp = sock_gettstamp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686 .listen = sock_no_listen,
1687 .shutdown = sock_no_shutdown,
1688 .setsockopt = sock_no_setsockopt,
1689 .getsockopt = sock_no_getsockopt,
1690 .sendmsg = bcm_sendmsg,
1691 .recvmsg = bcm_recvmsg,
1692 .mmap = sock_no_mmap,
1693 .sendpage = sock_no_sendpage,
1694};
1695
1696static struct proto bcm_proto __read_mostly = {
1697 .name = "CAN_BCM",
1698 .owner = THIS_MODULE,
1699 .obj_size = sizeof(struct bcm_sock),
1700 .init = bcm_init,
1701};
1702
1703static const struct can_proto bcm_can_proto = {
1704 .type = SOCK_DGRAM,
1705 .protocol = CAN_BCM,
1706 .ops = &bcm_ops,
1707 .prot = &bcm_proto,
1708};
1709
1710static int canbcm_pernet_init(struct net *net)
1711{
1712#if IS_ENABLED(CONFIG_PROC_FS)
1713 /* create /proc/net/can-bcm directory */
1714 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1715#endif /* CONFIG_PROC_FS */
1716
1717 return 0;
1718}
1719
1720static void canbcm_pernet_exit(struct net *net)
1721{
1722#if IS_ENABLED(CONFIG_PROC_FS)
1723 /* remove /proc/net/can-bcm directory */
1724 if (net->can.bcmproc_dir)
1725 remove_proc_entry("can-bcm", net->proc_net);
1726#endif /* CONFIG_PROC_FS */
1727}
1728
1729static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1730 .init = canbcm_pernet_init,
1731 .exit = canbcm_pernet_exit,
1732};
1733
Olivier Deprez0e641232021-09-23 10:07:05 +02001734static struct notifier_block canbcm_notifier = {
1735 .notifier_call = bcm_notifier
1736};
1737
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001738static int __init bcm_module_init(void)
1739{
1740 int err;
1741
1742 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
1743
1744 err = can_proto_register(&bcm_can_proto);
1745 if (err < 0) {
1746 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1747 return err;
1748 }
1749
1750 register_pernet_subsys(&canbcm_pernet_ops);
Olivier Deprez0e641232021-09-23 10:07:05 +02001751 register_netdevice_notifier(&canbcm_notifier);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001752 return 0;
1753}
1754
1755static void __exit bcm_module_exit(void)
1756{
1757 can_proto_unregister(&bcm_can_proto);
Olivier Deprez0e641232021-09-23 10:07:05 +02001758 unregister_netdevice_notifier(&canbcm_notifier);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001759 unregister_pernet_subsys(&canbcm_pernet_ops);
1760}
1761
1762module_init(bcm_module_init);
1763module_exit(bcm_module_exit);