1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <linux/spinlock.h>
62 #include <net/sock.h>
63 #include <net/net_namespace.h>
64
65 /*
66 * To send multiple CAN frame content within TX_SETUP or to filter
67 * CAN messages with multiplex index within RX_SETUP, the number of
68 * different filters is limited to 256 due to the one byte index value.
69 */
70 #define MAX_NFRAMES 256
71
72 /* limit timers to 400 days for sending/timeouts */
73 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
74
75 /* use of last_frames[index].flags */
76 #define RX_LOCAL 0x10 /* frame was created on the local host */
77 #define RX_OWN 0x20 /* frame was sent via the socket it was received on */
78 #define RX_RECV 0x40 /* received data for this element */
79 #define RX_THR 0x80 /* element not been sent due to throttle feature */
80 #define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
81
82 /* get best masking value for can_rx_register() for a given single can_id */
83 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
84 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
85 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
86
87 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
88 MODULE_LICENSE("Dual BSD/GPL");
89 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
90 MODULE_ALIAS("can-proto-2");
91
92 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
93
94 /*
95 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
96 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
97 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
98 */
get_u64(const struct canfd_frame * cp,int offset)99 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
100 {
101 return *(u64 *)(cp->data + offset);
102 }
103
104 struct bcm_op {
105 struct list_head list;
106 struct rcu_head rcu;
107 int ifindex;
108 canid_t can_id;
109 u32 flags;
110 unsigned long frames_abs, frames_filtered;
111 struct bcm_timeval ival1, ival2;
112 struct hrtimer timer, thrtimer;
113 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
114 int rx_ifindex;
115 int cfsiz;
116 u32 count;
117 u32 nframes;
118 u32 currframe;
119 /* void pointers to arrays of struct can[fd]_frame */
120 void *frames;
121 void *last_frames;
122 struct canfd_frame sframe;
123 struct canfd_frame last_sframe;
124 struct sock *sk;
125 struct net_device *rx_reg_dev;
126 spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
127 };
128
129 struct bcm_sock {
130 struct sock sk;
131 int bound;
132 int ifindex;
133 struct list_head notifier;
134 struct list_head rx_ops;
135 struct list_head tx_ops;
136 unsigned long dropped_usr_msgs;
137 struct proc_dir_entry *bcm_proc_read;
138 char procname [32]; /* inode number in decimal with \0 */
139 };
140
141 static LIST_HEAD(bcm_notifier_list);
142 static DEFINE_SPINLOCK(bcm_notifier_lock);
143 static struct bcm_sock *bcm_busy_notifier;
144
145 /* Return pointer to store the extra msg flags for bcm_recvmsg().
146 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
147 * in skb->cb.
148 */
bcm_flags(struct sk_buff * skb)149 static inline unsigned int *bcm_flags(struct sk_buff *skb)
150 {
151 /* return pointer after struct sockaddr_can */
152 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
153 }
154
bcm_sk(const struct sock * sk)155 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
156 {
157 return (struct bcm_sock *)sk;
158 }
159
bcm_timeval_to_ktime(struct bcm_timeval tv)160 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
161 {
162 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
163 }
164
165 /* check limitations for timeval provided by user */
bcm_is_invalid_tv(struct bcm_msg_head * msg_head)166 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
167 {
168 if ((msg_head->ival1.tv_sec < 0) ||
169 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
170 (msg_head->ival1.tv_usec < 0) ||
171 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
172 (msg_head->ival2.tv_sec < 0) ||
173 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
174 (msg_head->ival2.tv_usec < 0) ||
175 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
176 return true;
177
178 return false;
179 }
180
181 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
182 #define OPSIZ sizeof(struct bcm_op)
183 #define MHSIZ sizeof(struct bcm_msg_head)
184
185 /*
186 * procfs functions
187 */
188 #if IS_ENABLED(CONFIG_PROC_FS)
bcm_proc_getifname(struct net * net,char * result,int ifindex)189 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
190 {
191 struct net_device *dev;
192
193 if (!ifindex)
194 return "any";
195
196 rcu_read_lock();
197 dev = dev_get_by_index_rcu(net, ifindex);
198 if (dev)
199 strcpy(result, dev->name);
200 else
201 strcpy(result, "???");
202 rcu_read_unlock();
203
204 return result;
205 }
206
bcm_proc_show(struct seq_file * m,void * v)207 static int bcm_proc_show(struct seq_file *m, void *v)
208 {
209 char ifname[IFNAMSIZ];
210 struct net *net = m->private;
211 struct sock *sk = (struct sock *)pde_data(m->file->f_inode);
212 struct bcm_sock *bo = bcm_sk(sk);
213 struct bcm_op *op;
214
215 seq_printf(m, ">>> socket %pK", sk->sk_socket);
216 seq_printf(m, " / sk %pK", sk);
217 seq_printf(m, " / bo %pK", bo);
218 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
219 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
220 seq_printf(m, " <<<\n");
221
222 rcu_read_lock();
223
224 list_for_each_entry_rcu(op, &bo->rx_ops, list) {
225
226 unsigned long reduction;
227
228 /* print only active entries & prevent division by zero */
229 if (!op->frames_abs)
230 continue;
231
232 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
233 bcm_proc_getifname(net, ifname, op->ifindex));
234
235 if (op->flags & CAN_FD_FRAME)
236 seq_printf(m, "(%u)", op->nframes);
237 else
238 seq_printf(m, "[%u]", op->nframes);
239
240 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
241
242 if (op->kt_ival1)
243 seq_printf(m, "timeo=%lld ",
244 (long long)ktime_to_us(op->kt_ival1));
245
246 if (op->kt_ival2)
247 seq_printf(m, "thr=%lld ",
248 (long long)ktime_to_us(op->kt_ival2));
249
250 seq_printf(m, "# recv %ld (%ld) => reduction: ",
251 op->frames_filtered, op->frames_abs);
252
253 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
254
255 seq_printf(m, "%s%ld%%\n",
256 (reduction == 100) ? "near " : "", reduction);
257 }
258
259 list_for_each_entry(op, &bo->tx_ops, list) {
260
261 seq_printf(m, "tx_op: %03X %s ", op->can_id,
262 bcm_proc_getifname(net, ifname, op->ifindex));
263
264 if (op->flags & CAN_FD_FRAME)
265 seq_printf(m, "(%u) ", op->nframes);
266 else
267 seq_printf(m, "[%u] ", op->nframes);
268
269 if (op->kt_ival1)
270 seq_printf(m, "t1=%lld ",
271 (long long)ktime_to_us(op->kt_ival1));
272
273 if (op->kt_ival2)
274 seq_printf(m, "t2=%lld ",
275 (long long)ktime_to_us(op->kt_ival2));
276
277 seq_printf(m, "# sent %ld\n", op->frames_abs);
278 }
279 seq_putc(m, '\n');
280
281 rcu_read_unlock();
282
283 return 0;
284 }
285 #endif /* CONFIG_PROC_FS */
286
287 /*
288 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
289 * of the given bcm tx op
290 */
bcm_can_tx(struct bcm_op * op)291 static void bcm_can_tx(struct bcm_op *op)
292 {
293 struct sk_buff *skb;
294 struct net_device *dev;
295 struct canfd_frame *cf;
296 int err;
297
298 /* no target device? => exit */
299 if (!op->ifindex)
300 return;
301
302 /* read currframe under lock protection */
303 spin_lock_bh(&op->bcm_tx_lock);
304 cf = op->frames + op->cfsiz * op->currframe;
305 spin_unlock_bh(&op->bcm_tx_lock);
306
307 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
308 if (!dev) {
309 /* RFC: should this bcm_op remove itself here? */
310 return;
311 }
312
313 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
314 if (!skb)
315 goto out;
316
317 can_skb_reserve(skb);
318 can_skb_prv(skb)->ifindex = dev->ifindex;
319 can_skb_prv(skb)->skbcnt = 0;
320
321 skb_put_data(skb, cf, op->cfsiz);
322
323 /* send with loopback */
324 skb->dev = dev;
325 can_skb_set_owner(skb, op->sk);
326 err = can_send(skb, 1);
327
328 /* update currframe and count under lock protection */
329 spin_lock_bh(&op->bcm_tx_lock);
330
331 if (!err)
332 op->frames_abs++;
333
334 op->currframe++;
335
336 /* reached last frame? */
337 if (op->currframe >= op->nframes)
338 op->currframe = 0;
339
340 if (op->count > 0)
341 op->count--;
342
343 spin_unlock_bh(&op->bcm_tx_lock);
344 out:
345 dev_put(dev);
346 }
347
348 /*
349 * bcm_send_to_user - send a BCM message to the userspace
350 * (consisting of bcm_msg_head + x CAN frames)
351 */
bcm_send_to_user(struct bcm_op * op,struct bcm_msg_head * head,struct canfd_frame * frames,int has_timestamp)352 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
353 struct canfd_frame *frames, int has_timestamp)
354 {
355 struct sk_buff *skb;
356 struct canfd_frame *firstframe;
357 struct sockaddr_can *addr;
358 struct sock *sk = op->sk;
359 unsigned int datalen = head->nframes * op->cfsiz;
360 int err;
361 unsigned int *pflags;
362
363 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
364 if (!skb)
365 return;
366
367 skb_put_data(skb, head, sizeof(*head));
368
369 /* ensure space for sockaddr_can and msg flags */
370 sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
371 sizeof(unsigned int));
372
373 /* initialize msg flags */
374 pflags = bcm_flags(skb);
375 *pflags = 0;
376
377 if (head->nframes) {
378 /* CAN frames starting here */
379 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
380
381 skb_put_data(skb, frames, datalen);
382
383 /*
384 * the BCM uses the flags-element of the canfd_frame
385 * structure for internal purposes. This is only
386 * relevant for updates that are generated by the
387 * BCM, where nframes is 1
388 */
389 if (head->nframes == 1) {
390 if (firstframe->flags & RX_LOCAL)
391 *pflags |= MSG_DONTROUTE;
392 if (firstframe->flags & RX_OWN)
393 *pflags |= MSG_CONFIRM;
394
395 firstframe->flags &= BCM_CAN_FLAGS_MASK;
396 }
397 }
398
399 if (has_timestamp) {
400 /* restore rx timestamp */
401 skb->tstamp = op->rx_stamp;
402 }
403
404 /*
405 * Put the datagram to the queue so that bcm_recvmsg() can
406 * get it from there. We need to pass the interface index to
407 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
408 * containing the interface index.
409 */
410
411 addr = (struct sockaddr_can *)skb->cb;
412 memset(addr, 0, sizeof(*addr));
413 addr->can_family = AF_CAN;
414 addr->can_ifindex = op->rx_ifindex;
415
416 err = sock_queue_rcv_skb(sk, skb);
417 if (err < 0) {
418 struct bcm_sock *bo = bcm_sk(sk);
419
420 kfree_skb(skb);
421 /* don't care about overflows in this statistic */
422 bo->dropped_usr_msgs++;
423 }
424 }
425
bcm_tx_set_expiry(struct bcm_op * op,struct hrtimer * hrt)426 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
427 {
428 ktime_t ival;
429
430 if (op->kt_ival1 && op->count)
431 ival = op->kt_ival1;
432 else if (op->kt_ival2)
433 ival = op->kt_ival2;
434 else
435 return false;
436
437 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
438 return true;
439 }
440
bcm_tx_start_timer(struct bcm_op * op)441 static void bcm_tx_start_timer(struct bcm_op *op)
442 {
443 if (bcm_tx_set_expiry(op, &op->timer))
444 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
445 }
446
447 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
bcm_tx_timeout_handler(struct hrtimer * hrtimer)448 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
449 {
450 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
451 struct bcm_msg_head msg_head;
452
453 if (op->kt_ival1 && (op->count > 0)) {
454 bcm_can_tx(op);
455 if (!op->count && (op->flags & TX_COUNTEVT)) {
456
457 /* create notification to user */
458 memset(&msg_head, 0, sizeof(msg_head));
459 msg_head.opcode = TX_EXPIRED;
460 msg_head.flags = op->flags;
461 msg_head.count = op->count;
462 msg_head.ival1 = op->ival1;
463 msg_head.ival2 = op->ival2;
464 msg_head.can_id = op->can_id;
465 msg_head.nframes = 0;
466
467 bcm_send_to_user(op, &msg_head, NULL, 0);
468 }
469
470 } else if (op->kt_ival2) {
471 bcm_can_tx(op);
472 }
473
474 return bcm_tx_set_expiry(op, &op->timer) ?
475 HRTIMER_RESTART : HRTIMER_NORESTART;
476 }
477
478 /*
479 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
480 */
bcm_rx_changed(struct bcm_op * op,struct canfd_frame * data)481 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
482 {
483 struct bcm_msg_head head;
484
485 /* update statistics */
486 op->frames_filtered++;
487
488 /* prevent statistics overflow */
489 if (op->frames_filtered > ULONG_MAX/100)
490 op->frames_filtered = op->frames_abs = 0;
491
492 /* this element is not throttled anymore */
493 data->flags &= ~RX_THR;
494
495 memset(&head, 0, sizeof(head));
496 head.opcode = RX_CHANGED;
497 head.flags = op->flags;
498 head.count = op->count;
499 head.ival1 = op->ival1;
500 head.ival2 = op->ival2;
501 head.can_id = op->can_id;
502 head.nframes = 1;
503
504 bcm_send_to_user(op, &head, data, 1);
505 }
506
507 /*
508 * bcm_rx_update_and_send - process a detected relevant receive content change
509 * 1. update the last received data
510 * 2. send a notification to the user (if possible)
511 */
bcm_rx_update_and_send(struct bcm_op * op,struct canfd_frame * lastdata,const struct canfd_frame * rxdata,unsigned char traffic_flags)512 static void bcm_rx_update_and_send(struct bcm_op *op,
513 struct canfd_frame *lastdata,
514 const struct canfd_frame *rxdata,
515 unsigned char traffic_flags)
516 {
517 memcpy(lastdata, rxdata, op->cfsiz);
518
519 /* mark as used and throttled by default */
520 lastdata->flags |= (RX_RECV|RX_THR);
521
522 /* add own/local/remote traffic flags */
523 lastdata->flags |= traffic_flags;
524
525 /* throttling mode inactive ? */
526 if (!op->kt_ival2) {
527 /* send RX_CHANGED to the user immediately */
528 bcm_rx_changed(op, lastdata);
529 return;
530 }
531
532 /* with active throttling timer we are just done here */
533 if (hrtimer_active(&op->thrtimer))
534 return;
535
536 /* first reception with enabled throttling mode */
537 if (!op->kt_lastmsg)
538 goto rx_changed_settime;
539
540 /* got a second frame inside a potential throttle period? */
541 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
542 ktime_to_us(op->kt_ival2)) {
543 /* do not send the saved data - only start throttle timer */
544 hrtimer_start(&op->thrtimer,
545 ktime_add(op->kt_lastmsg, op->kt_ival2),
546 HRTIMER_MODE_ABS_SOFT);
547 return;
548 }
549
550 /* the gap was that big, that throttling was not needed here */
551 rx_changed_settime:
552 bcm_rx_changed(op, lastdata);
553 op->kt_lastmsg = ktime_get();
554 }
555
556 /*
557 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
558 * received data stored in op->last_frames[]
559 */
bcm_rx_cmp_to_index(struct bcm_op * op,unsigned int index,const struct canfd_frame * rxdata,unsigned char traffic_flags)560 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
561 const struct canfd_frame *rxdata,
562 unsigned char traffic_flags)
563 {
564 struct canfd_frame *cf = op->frames + op->cfsiz * index;
565 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
566 int i;
567
568 /*
569 * no one uses the MSBs of flags for comparison,
570 * so we use it here to detect the first time of reception
571 */
572
573 if (!(lcf->flags & RX_RECV)) {
574 /* received data for the first time => send update to user */
575 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
576 return;
577 }
578
579 /* do a real check in CAN frame data section */
580 for (i = 0; i < rxdata->len; i += 8) {
581 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
582 (get_u64(cf, i) & get_u64(lcf, i))) {
583 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
584 return;
585 }
586 }
587
588 if (op->flags & RX_CHECK_DLC) {
589 /* do a real check in CAN frame length */
590 if (rxdata->len != lcf->len) {
591 bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
592 return;
593 }
594 }
595 }
596
597 /*
598 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
599 */
bcm_rx_starttimer(struct bcm_op * op)600 static void bcm_rx_starttimer(struct bcm_op *op)
601 {
602 if (op->flags & RX_NO_AUTOTIMER)
603 return;
604
605 if (op->kt_ival1)
606 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
607 }
608
609 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
bcm_rx_timeout_handler(struct hrtimer * hrtimer)610 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
611 {
612 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
613 struct bcm_msg_head msg_head;
614
615 /* if user wants to be informed, when cyclic CAN-Messages come back */
616 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
617 /* clear received CAN frames to indicate 'nothing received' */
618 memset(op->last_frames, 0, op->nframes * op->cfsiz);
619 }
620
621 /* create notification to user */
622 memset(&msg_head, 0, sizeof(msg_head));
623 msg_head.opcode = RX_TIMEOUT;
624 msg_head.flags = op->flags;
625 msg_head.count = op->count;
626 msg_head.ival1 = op->ival1;
627 msg_head.ival2 = op->ival2;
628 msg_head.can_id = op->can_id;
629 msg_head.nframes = 0;
630
631 bcm_send_to_user(op, &msg_head, NULL, 0);
632
633 return HRTIMER_NORESTART;
634 }
635
636 /*
637 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
638 */
bcm_rx_do_flush(struct bcm_op * op,unsigned int index)639 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
640 {
641 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
642
643 if ((op->last_frames) && (lcf->flags & RX_THR)) {
644 bcm_rx_changed(op, lcf);
645 return 1;
646 }
647 return 0;
648 }
649
650 /*
651 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
652 */
bcm_rx_thr_flush(struct bcm_op * op)653 static int bcm_rx_thr_flush(struct bcm_op *op)
654 {
655 int updated = 0;
656
657 if (op->nframes > 1) {
658 unsigned int i;
659
660 /* for MUX filter we start at index 1 */
661 for (i = 1; i < op->nframes; i++)
662 updated += bcm_rx_do_flush(op, i);
663
664 } else {
665 /* for RX_FILTER_ID and simple filter */
666 updated += bcm_rx_do_flush(op, 0);
667 }
668
669 return updated;
670 }
671
672 /*
673 * bcm_rx_thr_handler - the time for blocked content updates is over now:
674 * Check for throttled data and send it to the userspace
675 */
bcm_rx_thr_handler(struct hrtimer * hrtimer)676 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
677 {
678 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
679
680 if (bcm_rx_thr_flush(op)) {
681 hrtimer_forward_now(hrtimer, op->kt_ival2);
682 return HRTIMER_RESTART;
683 } else {
684 /* rearm throttle handling */
685 op->kt_lastmsg = 0;
686 return HRTIMER_NORESTART;
687 }
688 }
689
690 /*
691 * bcm_rx_handler - handle a CAN frame reception
692 */
bcm_rx_handler(struct sk_buff * skb,void * data)693 static void bcm_rx_handler(struct sk_buff *skb, void *data)
694 {
695 struct bcm_op *op = (struct bcm_op *)data;
696 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
697 unsigned int i;
698 unsigned char traffic_flags;
699
700 if (op->can_id != rxframe->can_id)
701 return;
702
703 /* make sure to handle the correct frame type (CAN / CAN FD) */
704 if (op->flags & CAN_FD_FRAME) {
705 if (!can_is_canfd_skb(skb))
706 return;
707 } else {
708 if (!can_is_can_skb(skb))
709 return;
710 }
711
712 /* disable timeout */
713 hrtimer_cancel(&op->timer);
714
715 /* save rx timestamp */
716 op->rx_stamp = skb->tstamp;
717 /* save originator for recvfrom() */
718 op->rx_ifindex = skb->dev->ifindex;
719 /* update statistics */
720 op->frames_abs++;
721
722 if (op->flags & RX_RTR_FRAME) {
723 /* send reply for RTR-request (placed in op->frames[0]) */
724 bcm_can_tx(op);
725 return;
726 }
727
728 /* compute flags to distinguish between own/local/remote CAN traffic */
729 traffic_flags = 0;
730 if (skb->sk) {
731 traffic_flags |= RX_LOCAL;
732 if (skb->sk == op->sk)
733 traffic_flags |= RX_OWN;
734 }
735
736 if (op->flags & RX_FILTER_ID) {
737 /* the easiest case */
738 bcm_rx_update_and_send(op, op->last_frames, rxframe,
739 traffic_flags);
740 goto rx_starttimer;
741 }
742
743 if (op->nframes == 1) {
744 /* simple compare with index 0 */
745 bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
746 goto rx_starttimer;
747 }
748
749 if (op->nframes > 1) {
750 /*
751 * multiplex compare
752 *
753 * find the first multiplex mask that fits.
754 * Remark: The MUX-mask is stored in index 0 - but only the
755 * first 64 bits of the frame data[] are relevant (CAN FD)
756 */
757
758 for (i = 1; i < op->nframes; i++) {
759 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
760 (get_u64(op->frames, 0) &
761 get_u64(op->frames + op->cfsiz * i, 0))) {
762 bcm_rx_cmp_to_index(op, i, rxframe,
763 traffic_flags);
764 break;
765 }
766 }
767 }
768
769 rx_starttimer:
770 bcm_rx_starttimer(op);
771 }
772
773 /*
774 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
775 */
bcm_find_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)776 static struct bcm_op *bcm_find_op(struct list_head *ops,
777 struct bcm_msg_head *mh, int ifindex)
778 {
779 struct bcm_op *op;
780
781 list_for_each_entry(op, ops, list) {
782 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
783 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
784 return op;
785 }
786
787 return NULL;
788 }
789
bcm_free_op_rcu(struct rcu_head * rcu_head)790 static void bcm_free_op_rcu(struct rcu_head *rcu_head)
791 {
792 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
793
794 if ((op->frames) && (op->frames != &op->sframe))
795 kfree(op->frames);
796
797 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
798 kfree(op->last_frames);
799
800 kfree(op);
801 }
802
bcm_remove_op(struct bcm_op * op)803 static void bcm_remove_op(struct bcm_op *op)
804 {
805 hrtimer_cancel(&op->timer);
806 hrtimer_cancel(&op->thrtimer);
807
808 call_rcu(&op->rcu, bcm_free_op_rcu);
809 }
810
bcm_rx_unreg(struct net_device * dev,struct bcm_op * op)811 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
812 {
813 if (op->rx_reg_dev == dev) {
814 can_rx_unregister(dev_net(dev), dev, op->can_id,
815 REGMASK(op->can_id), bcm_rx_handler, op);
816
817 /* mark as removed subscription */
818 op->rx_reg_dev = NULL;
819 } else
820 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
821 "mismatch %p %p\n", op->rx_reg_dev, dev);
822 }
823
824 /*
825 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
826 */
bcm_delete_rx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)827 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
828 int ifindex)
829 {
830 struct bcm_op *op, *n;
831
832 list_for_each_entry_safe(op, n, ops, list) {
833 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
834 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
835
836 /* disable automatic timer on frame reception */
837 op->flags |= RX_NO_AUTOTIMER;
838
839 /*
840 * Don't care if we're bound or not (due to netdev
841 * problems) can_rx_unregister() is always a save
842 * thing to do here.
843 */
844 if (op->ifindex) {
845 /*
846 * Only remove subscriptions that had not
847 * been removed due to NETDEV_UNREGISTER
848 * in bcm_notifier()
849 */
850 if (op->rx_reg_dev) {
851 struct net_device *dev;
852
853 dev = dev_get_by_index(sock_net(op->sk),
854 op->ifindex);
855 if (dev) {
856 bcm_rx_unreg(dev, op);
857 dev_put(dev);
858 }
859 }
860 } else
861 can_rx_unregister(sock_net(op->sk), NULL,
862 op->can_id,
863 REGMASK(op->can_id),
864 bcm_rx_handler, op);
865
866 list_del_rcu(&op->list);
867 bcm_remove_op(op);
868 return 1; /* done */
869 }
870 }
871
872 return 0; /* not found */
873 }
874
875 /*
876 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
877 */
bcm_delete_tx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)878 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
879 int ifindex)
880 {
881 struct bcm_op *op, *n;
882
883 list_for_each_entry_safe(op, n, ops, list) {
884 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
885 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
886 list_del_rcu(&op->list);
887 bcm_remove_op(op);
888 return 1; /* done */
889 }
890 }
891
892 return 0; /* not found */
893 }
894
895 /*
896 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
897 */
bcm_read_op(struct list_head * ops,struct bcm_msg_head * msg_head,int ifindex)898 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
899 int ifindex)
900 {
901 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
902
903 if (!op)
904 return -EINVAL;
905
906 /* put current values into msg_head */
907 msg_head->flags = op->flags;
908 msg_head->count = op->count;
909 msg_head->ival1 = op->ival1;
910 msg_head->ival2 = op->ival2;
911 msg_head->nframes = op->nframes;
912
913 bcm_send_to_user(op, msg_head, op->frames, 0);
914
915 return MHSIZ;
916 }
917
918 /*
919 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
920 */
bcm_tx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)921 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
922 int ifindex, struct sock *sk)
923 {
924 struct bcm_sock *bo = bcm_sk(sk);
925 struct bcm_op *op;
926 struct canfd_frame *cf;
927 unsigned int i;
928 int err;
929
930 /* we need a real device to send frames */
931 if (!ifindex)
932 return -ENODEV;
933
934 /* check nframes boundaries - we need at least one CAN frame */
935 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
936 return -EINVAL;
937
938 /* check timeval limitations */
939 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
940 return -EINVAL;
941
942 /* check the given can_id */
943 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
944 if (op) {
945 /* update existing BCM operation */
946
947 /*
948 * Do we need more space for the CAN frames than currently
949 * allocated? -> This is a _really_ unusual use-case and
950 * therefore (complexity / locking) it is not supported.
951 */
952 if (msg_head->nframes > op->nframes)
953 return -E2BIG;
954
955 /* update CAN frames content */
956 for (i = 0; i < msg_head->nframes; i++) {
957
958 cf = op->frames + op->cfsiz * i;
959 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
960
961 if (op->flags & CAN_FD_FRAME) {
962 if (cf->len > 64)
963 err = -EINVAL;
964 } else {
965 if (cf->len > 8)
966 err = -EINVAL;
967 }
968
969 if (err < 0)
970 return err;
971
972 if (msg_head->flags & TX_CP_CAN_ID) {
973 /* copy can_id into frame */
974 cf->can_id = msg_head->can_id;
975 }
976 }
977 op->flags = msg_head->flags;
978
979 /* only lock for unlikely count/nframes/currframe changes */
980 if (op->nframes != msg_head->nframes ||
981 op->flags & TX_RESET_MULTI_IDX ||
982 op->flags & SETTIMER) {
983
984 spin_lock_bh(&op->bcm_tx_lock);
985
986 if (op->nframes != msg_head->nframes ||
987 op->flags & TX_RESET_MULTI_IDX) {
988 /* potentially update changed nframes */
989 op->nframes = msg_head->nframes;
990 /* restart multiple frame transmission */
991 op->currframe = 0;
992 }
993
994 if (op->flags & SETTIMER)
995 op->count = msg_head->count;
996
997 spin_unlock_bh(&op->bcm_tx_lock);
998 }
999
1000 } else {
1001 /* insert new BCM operation for the given can_id */
1002
1003 op = kzalloc(OPSIZ, GFP_KERNEL);
1004 if (!op)
1005 return -ENOMEM;
1006
1007 spin_lock_init(&op->bcm_tx_lock);
1008 op->can_id = msg_head->can_id;
1009 op->cfsiz = CFSIZ(msg_head->flags);
1010 op->flags = msg_head->flags;
1011 op->nframes = msg_head->nframes;
1012
1013 if (op->flags & SETTIMER)
1014 op->count = msg_head->count;
1015
1016 /* create array for CAN frames and copy the data */
1017 if (msg_head->nframes > 1) {
1018 op->frames = kmalloc_array(msg_head->nframes,
1019 op->cfsiz,
1020 GFP_KERNEL);
1021 if (!op->frames) {
1022 kfree(op);
1023 return -ENOMEM;
1024 }
1025 } else
1026 op->frames = &op->sframe;
1027
1028 for (i = 0; i < msg_head->nframes; i++) {
1029
1030 cf = op->frames + op->cfsiz * i;
1031 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
1032 if (err < 0)
1033 goto free_op;
1034
1035 if (op->flags & CAN_FD_FRAME) {
1036 if (cf->len > 64)
1037 err = -EINVAL;
1038 } else {
1039 if (cf->len > 8)
1040 err = -EINVAL;
1041 }
1042
1043 if (err < 0)
1044 goto free_op;
1045
1046 if (msg_head->flags & TX_CP_CAN_ID) {
1047 /* copy can_id into frame */
1048 cf->can_id = msg_head->can_id;
1049 }
1050 }
1051
1052 /* tx_ops never compare with previous received messages */
1053 op->last_frames = NULL;
1054
1055 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1056 op->sk = sk;
1057 op->ifindex = ifindex;
1058
1059 /* initialize uninitialized (kzalloc) structure */
1060 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1061 HRTIMER_MODE_REL_SOFT);
1062 op->timer.function = bcm_tx_timeout_handler;
1063
1064 /* currently unused in tx_ops */
1065 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1066 HRTIMER_MODE_REL_SOFT);
1067
1068 /* add this bcm_op to the list of the tx_ops */
1069 list_add(&op->list, &bo->tx_ops);
1070
1071 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
1072
1073 if (op->flags & SETTIMER) {
1074 /* set timer values */
1075 op->ival1 = msg_head->ival1;
1076 op->ival2 = msg_head->ival2;
1077 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1078 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1079
1080 /* disable an active timer due to zero values? */
1081 if (!op->kt_ival1 && !op->kt_ival2)
1082 hrtimer_cancel(&op->timer);
1083 }
1084
1085 if (op->flags & STARTTIMER) {
1086 hrtimer_cancel(&op->timer);
1087 /* spec: send CAN frame when starting timer */
1088 op->flags |= TX_ANNOUNCE;
1089 }
1090
1091 if (op->flags & TX_ANNOUNCE)
1092 bcm_can_tx(op);
1093
1094 if (op->flags & STARTTIMER)
1095 bcm_tx_start_timer(op);
1096
1097 return msg_head->nframes * op->cfsiz + MHSIZ;
1098
1099 free_op:
1100 if (op->frames != &op->sframe)
1101 kfree(op->frames);
1102 kfree(op);
1103 return err;
1104 }
1105
1106 /*
1107 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1108 */
bcm_rx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)1109 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1110 int ifindex, struct sock *sk)
1111 {
1112 struct bcm_sock *bo = bcm_sk(sk);
1113 struct bcm_op *op;
1114 int do_rx_register;
1115 int err = 0;
1116
1117 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1118 /* be robust against wrong usage ... */
1119 msg_head->flags |= RX_FILTER_ID;
1120 /* ignore trailing garbage */
1121 msg_head->nframes = 0;
1122 }
1123
1124 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1125 if (msg_head->nframes > MAX_NFRAMES + 1)
1126 return -EINVAL;
1127
1128 if ((msg_head->flags & RX_RTR_FRAME) &&
1129 ((msg_head->nframes != 1) ||
1130 (!(msg_head->can_id & CAN_RTR_FLAG))))
1131 return -EINVAL;
1132
1133 /* check timeval limitations */
1134 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1135 return -EINVAL;
1136
1137 /* check the given can_id */
1138 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1139 if (op) {
1140 /* update existing BCM operation */
1141
1142 /*
1143 * Do we need more space for the CAN frames than currently
1144 * allocated? -> This is a _really_ unusual use-case and
1145 * therefore (complexity / locking) it is not supported.
1146 */
1147 if (msg_head->nframes > op->nframes)
1148 return -E2BIG;
1149
1150 if (msg_head->nframes) {
1151 /* update CAN frames content */
1152 err = memcpy_from_msg(op->frames, msg,
1153 msg_head->nframes * op->cfsiz);
1154 if (err < 0)
1155 return err;
1156
1157 /* clear last_frames to indicate 'nothing received' */
1158 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1159 }
1160
1161 op->nframes = msg_head->nframes;
1162 op->flags = msg_head->flags;
1163
1164 /* Only an update -> do not call can_rx_register() */
1165 do_rx_register = 0;
1166
1167 } else {
1168 /* insert new BCM operation for the given can_id */
1169 op = kzalloc(OPSIZ, GFP_KERNEL);
1170 if (!op)
1171 return -ENOMEM;
1172
1173 op->can_id = msg_head->can_id;
1174 op->nframes = msg_head->nframes;
1175 op->cfsiz = CFSIZ(msg_head->flags);
1176 op->flags = msg_head->flags;
1177
1178 if (msg_head->nframes > 1) {
1179 /* create array for CAN frames and copy the data */
1180 op->frames = kmalloc_array(msg_head->nframes,
1181 op->cfsiz,
1182 GFP_KERNEL);
1183 if (!op->frames) {
1184 kfree(op);
1185 return -ENOMEM;
1186 }
1187
1188 /* create and init array for received CAN frames */
1189 op->last_frames = kcalloc(msg_head->nframes,
1190 op->cfsiz,
1191 GFP_KERNEL);
1192 if (!op->last_frames) {
1193 kfree(op->frames);
1194 kfree(op);
1195 return -ENOMEM;
1196 }
1197
1198 } else {
1199 op->frames = &op->sframe;
1200 op->last_frames = &op->last_sframe;
1201 }
1202
1203 if (msg_head->nframes) {
1204 err = memcpy_from_msg(op->frames, msg,
1205 msg_head->nframes * op->cfsiz);
1206 if (err < 0) {
1207 if (op->frames != &op->sframe)
1208 kfree(op->frames);
1209 if (op->last_frames != &op->last_sframe)
1210 kfree(op->last_frames);
1211 kfree(op);
1212 return err;
1213 }
1214 }
1215
1216 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1217 op->sk = sk;
1218 op->ifindex = ifindex;
1219
1220 /* ifindex for timeout events w/o previous frame reception */
1221 op->rx_ifindex = ifindex;
1222
1223 /* initialize uninitialized (kzalloc) structure */
1224 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1225 HRTIMER_MODE_REL_SOFT);
1226 op->timer.function = bcm_rx_timeout_handler;
1227
1228 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1229 HRTIMER_MODE_REL_SOFT);
1230 op->thrtimer.function = bcm_rx_thr_handler;
1231
1232 /* add this bcm_op to the list of the rx_ops */
1233 list_add(&op->list, &bo->rx_ops);
1234
1235 /* call can_rx_register() */
1236 do_rx_register = 1;
1237
1238 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1239
1240 /* check flags */
1241
1242 if (op->flags & RX_RTR_FRAME) {
1243 struct canfd_frame *frame0 = op->frames;
1244
1245 /* no timers in RTR-mode */
1246 hrtimer_cancel(&op->thrtimer);
1247 hrtimer_cancel(&op->timer);
1248
1249 /*
1250 * funny feature in RX(!)_SETUP only for RTR-mode:
1251 * copy can_id into frame BUT without RTR-flag to
1252 * prevent a full-load-loopback-test ... ;-]
1253 */
1254 if ((op->flags & TX_CP_CAN_ID) ||
1255 (frame0->can_id == op->can_id))
1256 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1257
1258 } else {
1259 if (op->flags & SETTIMER) {
1260
1261 /* set timer value */
1262 op->ival1 = msg_head->ival1;
1263 op->ival2 = msg_head->ival2;
1264 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1265 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1266
1267 /* disable an active timer due to zero value? */
1268 if (!op->kt_ival1)
1269 hrtimer_cancel(&op->timer);
1270
1271 /*
1272 * In any case cancel the throttle timer, flush
1273 * potentially blocked msgs and reset throttle handling
1274 */
1275 op->kt_lastmsg = 0;
1276 hrtimer_cancel(&op->thrtimer);
1277 bcm_rx_thr_flush(op);
1278 }
1279
1280 if ((op->flags & STARTTIMER) && op->kt_ival1)
1281 hrtimer_start(&op->timer, op->kt_ival1,
1282 HRTIMER_MODE_REL_SOFT);
1283 }
1284
1285 /* now we can register for can_ids, if we added a new bcm_op */
1286 if (do_rx_register) {
1287 if (ifindex) {
1288 struct net_device *dev;
1289
1290 dev = dev_get_by_index(sock_net(sk), ifindex);
1291 if (dev) {
1292 err = can_rx_register(sock_net(sk), dev,
1293 op->can_id,
1294 REGMASK(op->can_id),
1295 bcm_rx_handler, op,
1296 "bcm", sk);
1297
1298 op->rx_reg_dev = dev;
1299 dev_put(dev);
1300 }
1301
1302 } else
1303 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1304 REGMASK(op->can_id),
1305 bcm_rx_handler, op, "bcm", sk);
1306 if (err) {
1307 /* this bcm rx op is broken -> remove it */
1308 list_del_rcu(&op->list);
1309 bcm_remove_op(op);
1310 return err;
1311 }
1312 }
1313
1314 return msg_head->nframes * op->cfsiz + MHSIZ;
1315 }
1316
1317 /*
1318 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1319 */
bcm_tx_send(struct msghdr * msg,int ifindex,struct sock * sk,int cfsiz)1320 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1321 int cfsiz)
1322 {
1323 struct sk_buff *skb;
1324 struct net_device *dev;
1325 int err;
1326
1327 /* we need a real device to send frames */
1328 if (!ifindex)
1329 return -ENODEV;
1330
1331 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1332 if (!skb)
1333 return -ENOMEM;
1334
1335 can_skb_reserve(skb);
1336
1337 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1338 if (err < 0) {
1339 kfree_skb(skb);
1340 return err;
1341 }
1342
1343 dev = dev_get_by_index(sock_net(sk), ifindex);
1344 if (!dev) {
1345 kfree_skb(skb);
1346 return -ENODEV;
1347 }
1348
1349 can_skb_prv(skb)->ifindex = dev->ifindex;
1350 can_skb_prv(skb)->skbcnt = 0;
1351 skb->dev = dev;
1352 can_skb_set_owner(skb, sk);
1353 err = can_send(skb, 1); /* send with loopback */
1354 dev_put(dev);
1355
1356 if (err)
1357 return err;
1358
1359 return cfsiz + MHSIZ;
1360 }
1361
1362 /*
1363 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1364 */
bcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1365 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1366 {
1367 struct sock *sk = sock->sk;
1368 struct bcm_sock *bo = bcm_sk(sk);
1369 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1370 struct bcm_msg_head msg_head;
1371 int cfsiz;
1372 int ret; /* read bytes or error codes as return value */
1373
1374 if (!bo->bound)
1375 return -ENOTCONN;
1376
1377 /* check for valid message length from userspace */
1378 if (size < MHSIZ)
1379 return -EINVAL;
1380
1381 /* read message head information */
1382 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1383 if (ret < 0)
1384 return ret;
1385
1386 cfsiz = CFSIZ(msg_head.flags);
1387 if ((size - MHSIZ) % cfsiz)
1388 return -EINVAL;
1389
1390 /* check for alternative ifindex for this bcm_op */
1391
1392 if (!ifindex && msg->msg_name) {
1393 /* no bound device as default => check msg_name */
1394 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1395
1396 if (msg->msg_namelen < BCM_MIN_NAMELEN)
1397 return -EINVAL;
1398
1399 if (addr->can_family != AF_CAN)
1400 return -EINVAL;
1401
1402 /* ifindex from sendto() */
1403 ifindex = addr->can_ifindex;
1404
1405 if (ifindex) {
1406 struct net_device *dev;
1407
1408 dev = dev_get_by_index(sock_net(sk), ifindex);
1409 if (!dev)
1410 return -ENODEV;
1411
1412 if (dev->type != ARPHRD_CAN) {
1413 dev_put(dev);
1414 return -ENODEV;
1415 }
1416
1417 dev_put(dev);
1418 }
1419 }
1420
1421 lock_sock(sk);
1422
1423 switch (msg_head.opcode) {
1424
1425 case TX_SETUP:
1426 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1427 break;
1428
1429 case RX_SETUP:
1430 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1431 break;
1432
1433 case TX_DELETE:
1434 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1435 ret = MHSIZ;
1436 else
1437 ret = -EINVAL;
1438 break;
1439
1440 case RX_DELETE:
1441 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1442 ret = MHSIZ;
1443 else
1444 ret = -EINVAL;
1445 break;
1446
1447 case TX_READ:
1448 /* reuse msg_head for the reply to TX_READ */
1449 msg_head.opcode = TX_STATUS;
1450 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1451 break;
1452
1453 case RX_READ:
1454 /* reuse msg_head for the reply to RX_READ */
1455 msg_head.opcode = RX_STATUS;
1456 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1457 break;
1458
1459 case TX_SEND:
1460 /* we need exactly one CAN frame behind the msg head */
1461 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1462 ret = -EINVAL;
1463 else
1464 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1465 break;
1466
1467 default:
1468 ret = -EINVAL;
1469 break;
1470 }
1471
1472 release_sock(sk);
1473
1474 return ret;
1475 }
1476
1477 /*
1478 * notification handler for netdevice status changes
1479 */
bcm_notify(struct bcm_sock * bo,unsigned long msg,struct net_device * dev)1480 static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1481 struct net_device *dev)
1482 {
1483 struct sock *sk = &bo->sk;
1484 struct bcm_op *op;
1485 int notify_enodev = 0;
1486
1487 if (!net_eq(dev_net(dev), sock_net(sk)))
1488 return;
1489
1490 switch (msg) {
1491
1492 case NETDEV_UNREGISTER:
1493 lock_sock(sk);
1494
1495 /* remove device specific receive entries */
1496 list_for_each_entry(op, &bo->rx_ops, list)
1497 if (op->rx_reg_dev == dev)
1498 bcm_rx_unreg(dev, op);
1499
1500 /* remove device reference, if this is our bound device */
1501 if (bo->bound && bo->ifindex == dev->ifindex) {
1502 #if IS_ENABLED(CONFIG_PROC_FS)
1503 if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
1504 remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
1505 bo->bcm_proc_read = NULL;
1506 }
1507 #endif
1508 bo->bound = 0;
1509 bo->ifindex = 0;
1510 notify_enodev = 1;
1511 }
1512
1513 release_sock(sk);
1514
1515 if (notify_enodev) {
1516 sk->sk_err = ENODEV;
1517 if (!sock_flag(sk, SOCK_DEAD))
1518 sk_error_report(sk);
1519 }
1520 break;
1521
1522 case NETDEV_DOWN:
1523 if (bo->bound && bo->ifindex == dev->ifindex) {
1524 sk->sk_err = ENETDOWN;
1525 if (!sock_flag(sk, SOCK_DEAD))
1526 sk_error_report(sk);
1527 }
1528 }
1529 }
1530
bcm_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)1531 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1532 void *ptr)
1533 {
1534 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1535
1536 if (dev->type != ARPHRD_CAN)
1537 return NOTIFY_DONE;
1538 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1539 return NOTIFY_DONE;
1540 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1541 return NOTIFY_DONE;
1542
1543 spin_lock(&bcm_notifier_lock);
1544 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1545 spin_unlock(&bcm_notifier_lock);
1546 bcm_notify(bcm_busy_notifier, msg, dev);
1547 spin_lock(&bcm_notifier_lock);
1548 }
1549 bcm_busy_notifier = NULL;
1550 spin_unlock(&bcm_notifier_lock);
1551 return NOTIFY_DONE;
1552 }
1553
1554 /*
1555 * initial settings for all BCM sockets to be set at socket creation time
1556 */
bcm_init(struct sock * sk)1557 static int bcm_init(struct sock *sk)
1558 {
1559 struct bcm_sock *bo = bcm_sk(sk);
1560
1561 bo->bound = 0;
1562 bo->ifindex = 0;
1563 bo->dropped_usr_msgs = 0;
1564 bo->bcm_proc_read = NULL;
1565
1566 INIT_LIST_HEAD(&bo->tx_ops);
1567 INIT_LIST_HEAD(&bo->rx_ops);
1568
1569 /* set notifier */
1570 spin_lock(&bcm_notifier_lock);
1571 list_add_tail(&bo->notifier, &bcm_notifier_list);
1572 spin_unlock(&bcm_notifier_lock);
1573
1574 return 0;
1575 }
1576
1577 /*
1578 * standard socket functions
1579 */
bcm_release(struct socket * sock)1580 static int bcm_release(struct socket *sock)
1581 {
1582 struct sock *sk = sock->sk;
1583 struct net *net;
1584 struct bcm_sock *bo;
1585 struct bcm_op *op, *next;
1586
1587 if (!sk)
1588 return 0;
1589
1590 net = sock_net(sk);
1591 bo = bcm_sk(sk);
1592
1593 /* remove bcm_ops, timer, rx_unregister(), etc. */
1594
1595 spin_lock(&bcm_notifier_lock);
1596 while (bcm_busy_notifier == bo) {
1597 spin_unlock(&bcm_notifier_lock);
1598 schedule_timeout_uninterruptible(1);
1599 spin_lock(&bcm_notifier_lock);
1600 }
1601 list_del(&bo->notifier);
1602 spin_unlock(&bcm_notifier_lock);
1603
1604 lock_sock(sk);
1605
1606 #if IS_ENABLED(CONFIG_PROC_FS)
1607 /* remove procfs entry */
1608 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1609 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1610 #endif /* CONFIG_PROC_FS */
1611
1612 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1613 bcm_remove_op(op);
1614
1615 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1616 /*
1617 * Don't care if we're bound or not (due to netdev problems)
1618 * can_rx_unregister() is always a save thing to do here.
1619 */
1620 if (op->ifindex) {
1621 /*
1622 * Only remove subscriptions that had not
1623 * been removed due to NETDEV_UNREGISTER
1624 * in bcm_notifier()
1625 */
1626 if (op->rx_reg_dev) {
1627 struct net_device *dev;
1628
1629 dev = dev_get_by_index(net, op->ifindex);
1630 if (dev) {
1631 bcm_rx_unreg(dev, op);
1632 dev_put(dev);
1633 }
1634 }
1635 } else
1636 can_rx_unregister(net, NULL, op->can_id,
1637 REGMASK(op->can_id),
1638 bcm_rx_handler, op);
1639
1640 }
1641
1642 synchronize_rcu();
1643
1644 list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1645 bcm_remove_op(op);
1646
1647 /* remove device reference */
1648 if (bo->bound) {
1649 bo->bound = 0;
1650 bo->ifindex = 0;
1651 }
1652
1653 sock_orphan(sk);
1654 sock->sk = NULL;
1655
1656 release_sock(sk);
1657 sock_put(sk);
1658
1659 return 0;
1660 }
1661
bcm_connect(struct socket * sock,struct sockaddr * uaddr,int len,int flags)1662 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1663 int flags)
1664 {
1665 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1666 struct sock *sk = sock->sk;
1667 struct bcm_sock *bo = bcm_sk(sk);
1668 struct net *net = sock_net(sk);
1669 int ret = 0;
1670
1671 if (len < BCM_MIN_NAMELEN)
1672 return -EINVAL;
1673
1674 lock_sock(sk);
1675
1676 if (bo->bound) {
1677 ret = -EISCONN;
1678 goto fail;
1679 }
1680
1681 /* bind a device to this socket */
1682 if (addr->can_ifindex) {
1683 struct net_device *dev;
1684
1685 dev = dev_get_by_index(net, addr->can_ifindex);
1686 if (!dev) {
1687 ret = -ENODEV;
1688 goto fail;
1689 }
1690 if (dev->type != ARPHRD_CAN) {
1691 dev_put(dev);
1692 ret = -ENODEV;
1693 goto fail;
1694 }
1695
1696 bo->ifindex = dev->ifindex;
1697 dev_put(dev);
1698
1699 } else {
1700 /* no interface reference for ifindex = 0 ('any' CAN device) */
1701 bo->ifindex = 0;
1702 }
1703
1704 #if IS_ENABLED(CONFIG_PROC_FS)
1705 if (net->can.bcmproc_dir) {
1706 /* unique socket address as filename */
1707 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1708 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1709 net->can.bcmproc_dir,
1710 bcm_proc_show, sk);
1711 if (!bo->bcm_proc_read) {
1712 ret = -ENOMEM;
1713 goto fail;
1714 }
1715 }
1716 #endif /* CONFIG_PROC_FS */
1717
1718 bo->bound = 1;
1719
1720 fail:
1721 release_sock(sk);
1722
1723 return ret;
1724 }
1725
bcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1726 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1727 int flags)
1728 {
1729 struct sock *sk = sock->sk;
1730 struct sk_buff *skb;
1731 int error = 0;
1732 int err;
1733
1734 skb = skb_recv_datagram(sk, flags, &error);
1735 if (!skb)
1736 return error;
1737
1738 if (skb->len < size)
1739 size = skb->len;
1740
1741 err = memcpy_to_msg(msg, skb->data, size);
1742 if (err < 0) {
1743 skb_free_datagram(sk, skb);
1744 return err;
1745 }
1746
1747 sock_recv_cmsgs(msg, sk, skb);
1748
1749 if (msg->msg_name) {
1750 __sockaddr_check_size(BCM_MIN_NAMELEN);
1751 msg->msg_namelen = BCM_MIN_NAMELEN;
1752 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1753 }
1754
1755 /* assign the flags that have been recorded in bcm_send_to_user() */
1756 msg->msg_flags |= *(bcm_flags(skb));
1757
1758 skb_free_datagram(sk, skb);
1759
1760 return size;
1761 }
1762
bcm_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1763 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1764 unsigned long arg)
1765 {
1766 /* no ioctls for socket layer -> hand it down to NIC layer */
1767 return -ENOIOCTLCMD;
1768 }
1769
1770 static const struct proto_ops bcm_ops = {
1771 .family = PF_CAN,
1772 .release = bcm_release,
1773 .bind = sock_no_bind,
1774 .connect = bcm_connect,
1775 .socketpair = sock_no_socketpair,
1776 .accept = sock_no_accept,
1777 .getname = sock_no_getname,
1778 .poll = datagram_poll,
1779 .ioctl = bcm_sock_no_ioctlcmd,
1780 .gettstamp = sock_gettstamp,
1781 .listen = sock_no_listen,
1782 .shutdown = sock_no_shutdown,
1783 .sendmsg = bcm_sendmsg,
1784 .recvmsg = bcm_recvmsg,
1785 .mmap = sock_no_mmap,
1786 };
1787
1788 static struct proto bcm_proto __read_mostly = {
1789 .name = "CAN_BCM",
1790 .owner = THIS_MODULE,
1791 .obj_size = sizeof(struct bcm_sock),
1792 .init = bcm_init,
1793 };
1794
1795 static const struct can_proto bcm_can_proto = {
1796 .type = SOCK_DGRAM,
1797 .protocol = CAN_BCM,
1798 .ops = &bcm_ops,
1799 .prot = &bcm_proto,
1800 };
1801
canbcm_pernet_init(struct net * net)1802 static int canbcm_pernet_init(struct net *net)
1803 {
1804 #if IS_ENABLED(CONFIG_PROC_FS)
1805 /* create /proc/net/can-bcm directory */
1806 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1807 #endif /* CONFIG_PROC_FS */
1808
1809 return 0;
1810 }
1811
canbcm_pernet_exit(struct net * net)1812 static void canbcm_pernet_exit(struct net *net)
1813 {
1814 #if IS_ENABLED(CONFIG_PROC_FS)
1815 /* remove /proc/net/can-bcm directory */
1816 if (net->can.bcmproc_dir)
1817 remove_proc_entry("can-bcm", net->proc_net);
1818 #endif /* CONFIG_PROC_FS */
1819 }
1820
1821 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1822 .init = canbcm_pernet_init,
1823 .exit = canbcm_pernet_exit,
1824 };
1825
1826 static struct notifier_block canbcm_notifier = {
1827 .notifier_call = bcm_notifier
1828 };
1829
bcm_module_init(void)1830 static int __init bcm_module_init(void)
1831 {
1832 int err;
1833
1834 pr_info("can: broadcast manager protocol\n");
1835
1836 err = register_pernet_subsys(&canbcm_pernet_ops);
1837 if (err)
1838 return err;
1839
1840 err = register_netdevice_notifier(&canbcm_notifier);
1841 if (err)
1842 goto register_notifier_failed;
1843
1844 err = can_proto_register(&bcm_can_proto);
1845 if (err < 0) {
1846 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1847 goto register_proto_failed;
1848 }
1849
1850 return 0;
1851
1852 register_proto_failed:
1853 unregister_netdevice_notifier(&canbcm_notifier);
1854 register_notifier_failed:
1855 unregister_pernet_subsys(&canbcm_pernet_ops);
1856 return err;
1857 }
1858
bcm_module_exit(void)1859 static void __exit bcm_module_exit(void)
1860 {
1861 can_proto_unregister(&bcm_can_proto);
1862 unregister_netdevice_notifier(&canbcm_notifier);
1863 unregister_pernet_subsys(&canbcm_pernet_ops);
1864 }
1865
1866 module_init(bcm_module_init);
1867 module_exit(bcm_module_exit);
1868