1 // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
2 /*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <net/sock.h>
62 #include <net/net_namespace.h>
63
64 /*
65 * To send multiple CAN frame content within TX_SETUP or to filter
66 * CAN messages with multiplex index within RX_SETUP, the number of
67 * different filters is limited to 256 due to the one byte index value.
68 */
69 #define MAX_NFRAMES 256
70
71 /* limit timers to 400 days for sending/timeouts */
72 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
73
74 /* use of last_frames[index].flags */
75 #define RX_RECV 0x40 /* received data for this element */
76 #define RX_THR 0x80 /* element not been sent due to throttle feature */
77 #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
78
79 /* get best masking value for can_rx_register() for a given single can_id */
80 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
81 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
82 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
83
84 #define CAN_BCM_VERSION "20170425"
85
86 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
87 MODULE_LICENSE("Dual BSD/GPL");
88 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
89 MODULE_ALIAS("can-proto-2");
90
91 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
92
93 /*
94 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
95 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
96 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
97 */
get_u64(const struct canfd_frame * cp,int offset)98 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
99 {
100 return *(u64 *)(cp->data + offset);
101 }
102
103 struct bcm_op {
104 struct list_head list;
105 struct rcu_head rcu;
106 int ifindex;
107 canid_t can_id;
108 u32 flags;
109 unsigned long frames_abs, frames_filtered;
110 struct bcm_timeval ival1, ival2;
111 struct hrtimer timer, thrtimer;
112 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
113 int rx_ifindex;
114 int cfsiz;
115 u32 count;
116 u32 nframes;
117 u32 currframe;
118 /* void pointers to arrays of struct can[fd]_frame */
119 void *frames;
120 void *last_frames;
121 struct canfd_frame sframe;
122 struct canfd_frame last_sframe;
123 struct sock *sk;
124 struct net_device *rx_reg_dev;
125 };
126
127 struct bcm_sock {
128 struct sock sk;
129 int bound;
130 int ifindex;
131 struct list_head notifier;
132 struct list_head rx_ops;
133 struct list_head tx_ops;
134 unsigned long dropped_usr_msgs;
135 struct proc_dir_entry *bcm_proc_read;
136 char procname [32]; /* inode number in decimal with \0 */
137 };
138
139 static LIST_HEAD(bcm_notifier_list);
140 static DEFINE_SPINLOCK(bcm_notifier_lock);
141 static struct bcm_sock *bcm_busy_notifier;
142
bcm_sk(const struct sock * sk)143 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
144 {
145 return (struct bcm_sock *)sk;
146 }
147
bcm_timeval_to_ktime(struct bcm_timeval tv)148 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
149 {
150 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
151 }
152
153 /* check limitations for timeval provided by user */
bcm_is_invalid_tv(struct bcm_msg_head * msg_head)154 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
155 {
156 if ((msg_head->ival1.tv_sec < 0) ||
157 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
158 (msg_head->ival1.tv_usec < 0) ||
159 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
160 (msg_head->ival2.tv_sec < 0) ||
161 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
162 (msg_head->ival2.tv_usec < 0) ||
163 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
164 return true;
165
166 return false;
167 }
168
169 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
170 #define OPSIZ sizeof(struct bcm_op)
171 #define MHSIZ sizeof(struct bcm_msg_head)
172
173 /*
174 * procfs functions
175 */
176 #if IS_ENABLED(CONFIG_PROC_FS)
bcm_proc_getifname(struct net * net,char * result,int ifindex)177 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
178 {
179 struct net_device *dev;
180
181 if (!ifindex)
182 return "any";
183
184 rcu_read_lock();
185 dev = dev_get_by_index_rcu(net, ifindex);
186 if (dev)
187 strcpy(result, dev->name);
188 else
189 strcpy(result, "???");
190 rcu_read_unlock();
191
192 return result;
193 }
194
bcm_proc_show(struct seq_file * m,void * v)195 static int bcm_proc_show(struct seq_file *m, void *v)
196 {
197 char ifname[IFNAMSIZ];
198 struct net *net = m->private;
199 struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
200 struct bcm_sock *bo = bcm_sk(sk);
201 struct bcm_op *op;
202
203 seq_printf(m, ">>> socket %pK", sk->sk_socket);
204 seq_printf(m, " / sk %pK", sk);
205 seq_printf(m, " / bo %pK", bo);
206 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
207 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
208 seq_printf(m, " <<<\n");
209
210 list_for_each_entry(op, &bo->rx_ops, list) {
211
212 unsigned long reduction;
213
214 /* print only active entries & prevent division by zero */
215 if (!op->frames_abs)
216 continue;
217
218 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
219 bcm_proc_getifname(net, ifname, op->ifindex));
220
221 if (op->flags & CAN_FD_FRAME)
222 seq_printf(m, "(%u)", op->nframes);
223 else
224 seq_printf(m, "[%u]", op->nframes);
225
226 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
227
228 if (op->kt_ival1)
229 seq_printf(m, "timeo=%lld ",
230 (long long)ktime_to_us(op->kt_ival1));
231
232 if (op->kt_ival2)
233 seq_printf(m, "thr=%lld ",
234 (long long)ktime_to_us(op->kt_ival2));
235
236 seq_printf(m, "# recv %ld (%ld) => reduction: ",
237 op->frames_filtered, op->frames_abs);
238
239 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
240
241 seq_printf(m, "%s%ld%%\n",
242 (reduction == 100) ? "near " : "", reduction);
243 }
244
245 list_for_each_entry(op, &bo->tx_ops, list) {
246
247 seq_printf(m, "tx_op: %03X %s ", op->can_id,
248 bcm_proc_getifname(net, ifname, op->ifindex));
249
250 if (op->flags & CAN_FD_FRAME)
251 seq_printf(m, "(%u) ", op->nframes);
252 else
253 seq_printf(m, "[%u] ", op->nframes);
254
255 if (op->kt_ival1)
256 seq_printf(m, "t1=%lld ",
257 (long long)ktime_to_us(op->kt_ival1));
258
259 if (op->kt_ival2)
260 seq_printf(m, "t2=%lld ",
261 (long long)ktime_to_us(op->kt_ival2));
262
263 seq_printf(m, "# sent %ld\n", op->frames_abs);
264 }
265 seq_putc(m, '\n');
266 return 0;
267 }
268 #endif /* CONFIG_PROC_FS */
269
270 /*
271 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
272 * of the given bcm tx op
273 */
bcm_can_tx(struct bcm_op * op)274 static void bcm_can_tx(struct bcm_op *op)
275 {
276 struct sk_buff *skb;
277 struct net_device *dev;
278 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
279 int err;
280
281 /* no target device? => exit */
282 if (!op->ifindex)
283 return;
284
285 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
286 if (!dev) {
287 /* RFC: should this bcm_op remove itself here? */
288 return;
289 }
290
291 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
292 if (!skb)
293 goto out;
294
295 can_skb_reserve(skb);
296 can_skb_prv(skb)->ifindex = dev->ifindex;
297 can_skb_prv(skb)->skbcnt = 0;
298
299 skb_put_data(skb, cf, op->cfsiz);
300
301 /* send with loopback */
302 skb->dev = dev;
303 can_skb_set_owner(skb, op->sk);
304 err = can_send(skb, 1);
305 if (!err)
306 op->frames_abs++;
307
308 op->currframe++;
309
310 /* reached last frame? */
311 if (op->currframe >= op->nframes)
312 op->currframe = 0;
313 out:
314 dev_put(dev);
315 }
316
317 /*
318 * bcm_send_to_user - send a BCM message to the userspace
319 * (consisting of bcm_msg_head + x CAN frames)
320 */
bcm_send_to_user(struct bcm_op * op,struct bcm_msg_head * head,struct canfd_frame * frames,int has_timestamp)321 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
322 struct canfd_frame *frames, int has_timestamp)
323 {
324 struct sk_buff *skb;
325 struct canfd_frame *firstframe;
326 struct sockaddr_can *addr;
327 struct sock *sk = op->sk;
328 unsigned int datalen = head->nframes * op->cfsiz;
329 int err;
330
331 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
332 if (!skb)
333 return;
334
335 skb_put_data(skb, head, sizeof(*head));
336
337 if (head->nframes) {
338 /* CAN frames starting here */
339 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
340
341 skb_put_data(skb, frames, datalen);
342
343 /*
344 * the BCM uses the flags-element of the canfd_frame
345 * structure for internal purposes. This is only
346 * relevant for updates that are generated by the
347 * BCM, where nframes is 1
348 */
349 if (head->nframes == 1)
350 firstframe->flags &= BCM_CAN_FLAGS_MASK;
351 }
352
353 if (has_timestamp) {
354 /* restore rx timestamp */
355 skb->tstamp = op->rx_stamp;
356 }
357
358 /*
359 * Put the datagram to the queue so that bcm_recvmsg() can
360 * get it from there. We need to pass the interface index to
361 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
362 * containing the interface index.
363 */
364
365 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
366 addr = (struct sockaddr_can *)skb->cb;
367 memset(addr, 0, sizeof(*addr));
368 addr->can_family = AF_CAN;
369 addr->can_ifindex = op->rx_ifindex;
370
371 err = sock_queue_rcv_skb(sk, skb);
372 if (err < 0) {
373 struct bcm_sock *bo = bcm_sk(sk);
374
375 kfree_skb(skb);
376 /* don't care about overflows in this statistic */
377 bo->dropped_usr_msgs++;
378 }
379 }
380
bcm_tx_set_expiry(struct bcm_op * op,struct hrtimer * hrt)381 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
382 {
383 ktime_t ival;
384
385 if (op->kt_ival1 && op->count)
386 ival = op->kt_ival1;
387 else if (op->kt_ival2)
388 ival = op->kt_ival2;
389 else
390 return false;
391
392 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
393 return true;
394 }
395
bcm_tx_start_timer(struct bcm_op * op)396 static void bcm_tx_start_timer(struct bcm_op *op)
397 {
398 if (bcm_tx_set_expiry(op, &op->timer))
399 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
400 }
401
402 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
bcm_tx_timeout_handler(struct hrtimer * hrtimer)403 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
404 {
405 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
406 struct bcm_msg_head msg_head;
407
408 if (op->kt_ival1 && (op->count > 0)) {
409 op->count--;
410 if (!op->count && (op->flags & TX_COUNTEVT)) {
411
412 /* create notification to user */
413 memset(&msg_head, 0, sizeof(msg_head));
414 msg_head.opcode = TX_EXPIRED;
415 msg_head.flags = op->flags;
416 msg_head.count = op->count;
417 msg_head.ival1 = op->ival1;
418 msg_head.ival2 = op->ival2;
419 msg_head.can_id = op->can_id;
420 msg_head.nframes = 0;
421
422 bcm_send_to_user(op, &msg_head, NULL, 0);
423 }
424 bcm_can_tx(op);
425
426 } else if (op->kt_ival2) {
427 bcm_can_tx(op);
428 }
429
430 return bcm_tx_set_expiry(op, &op->timer) ?
431 HRTIMER_RESTART : HRTIMER_NORESTART;
432 }
433
434 /*
435 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
436 */
bcm_rx_changed(struct bcm_op * op,struct canfd_frame * data)437 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
438 {
439 struct bcm_msg_head head;
440
441 /* update statistics */
442 op->frames_filtered++;
443
444 /* prevent statistics overflow */
445 if (op->frames_filtered > ULONG_MAX/100)
446 op->frames_filtered = op->frames_abs = 0;
447
448 /* this element is not throttled anymore */
449 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
450
451 memset(&head, 0, sizeof(head));
452 head.opcode = RX_CHANGED;
453 head.flags = op->flags;
454 head.count = op->count;
455 head.ival1 = op->ival1;
456 head.ival2 = op->ival2;
457 head.can_id = op->can_id;
458 head.nframes = 1;
459
460 bcm_send_to_user(op, &head, data, 1);
461 }
462
463 /*
464 * bcm_rx_update_and_send - process a detected relevant receive content change
465 * 1. update the last received data
466 * 2. send a notification to the user (if possible)
467 */
bcm_rx_update_and_send(struct bcm_op * op,struct canfd_frame * lastdata,const struct canfd_frame * rxdata)468 static void bcm_rx_update_and_send(struct bcm_op *op,
469 struct canfd_frame *lastdata,
470 const struct canfd_frame *rxdata)
471 {
472 memcpy(lastdata, rxdata, op->cfsiz);
473
474 /* mark as used and throttled by default */
475 lastdata->flags |= (RX_RECV|RX_THR);
476
477 /* throttling mode inactive ? */
478 if (!op->kt_ival2) {
479 /* send RX_CHANGED to the user immediately */
480 bcm_rx_changed(op, lastdata);
481 return;
482 }
483
484 /* with active throttling timer we are just done here */
485 if (hrtimer_active(&op->thrtimer))
486 return;
487
488 /* first reception with enabled throttling mode */
489 if (!op->kt_lastmsg)
490 goto rx_changed_settime;
491
492 /* got a second frame inside a potential throttle period? */
493 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
494 ktime_to_us(op->kt_ival2)) {
495 /* do not send the saved data - only start throttle timer */
496 hrtimer_start(&op->thrtimer,
497 ktime_add(op->kt_lastmsg, op->kt_ival2),
498 HRTIMER_MODE_ABS_SOFT);
499 return;
500 }
501
502 /* the gap was that big, that throttling was not needed here */
503 rx_changed_settime:
504 bcm_rx_changed(op, lastdata);
505 op->kt_lastmsg = ktime_get();
506 }
507
508 /*
509 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
510 * received data stored in op->last_frames[]
511 */
bcm_rx_cmp_to_index(struct bcm_op * op,unsigned int index,const struct canfd_frame * rxdata)512 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
513 const struct canfd_frame *rxdata)
514 {
515 struct canfd_frame *cf = op->frames + op->cfsiz * index;
516 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
517 int i;
518
519 /*
520 * no one uses the MSBs of flags for comparison,
521 * so we use it here to detect the first time of reception
522 */
523
524 if (!(lcf->flags & RX_RECV)) {
525 /* received data for the first time => send update to user */
526 bcm_rx_update_and_send(op, lcf, rxdata);
527 return;
528 }
529
530 /* do a real check in CAN frame data section */
531 for (i = 0; i < rxdata->len; i += 8) {
532 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
533 (get_u64(cf, i) & get_u64(lcf, i))) {
534 bcm_rx_update_and_send(op, lcf, rxdata);
535 return;
536 }
537 }
538
539 if (op->flags & RX_CHECK_DLC) {
540 /* do a real check in CAN frame length */
541 if (rxdata->len != lcf->len) {
542 bcm_rx_update_and_send(op, lcf, rxdata);
543 return;
544 }
545 }
546 }
547
548 /*
549 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
550 */
bcm_rx_starttimer(struct bcm_op * op)551 static void bcm_rx_starttimer(struct bcm_op *op)
552 {
553 if (op->flags & RX_NO_AUTOTIMER)
554 return;
555
556 if (op->kt_ival1)
557 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
558 }
559
560 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
bcm_rx_timeout_handler(struct hrtimer * hrtimer)561 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
562 {
563 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
564 struct bcm_msg_head msg_head;
565
566 /* if user wants to be informed, when cyclic CAN-Messages come back */
567 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
568 /* clear received CAN frames to indicate 'nothing received' */
569 memset(op->last_frames, 0, op->nframes * op->cfsiz);
570 }
571
572 /* create notification to user */
573 memset(&msg_head, 0, sizeof(msg_head));
574 msg_head.opcode = RX_TIMEOUT;
575 msg_head.flags = op->flags;
576 msg_head.count = op->count;
577 msg_head.ival1 = op->ival1;
578 msg_head.ival2 = op->ival2;
579 msg_head.can_id = op->can_id;
580 msg_head.nframes = 0;
581
582 bcm_send_to_user(op, &msg_head, NULL, 0);
583
584 return HRTIMER_NORESTART;
585 }
586
587 /*
588 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
589 */
bcm_rx_do_flush(struct bcm_op * op,unsigned int index)590 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
591 {
592 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
593
594 if ((op->last_frames) && (lcf->flags & RX_THR)) {
595 bcm_rx_changed(op, lcf);
596 return 1;
597 }
598 return 0;
599 }
600
601 /*
602 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
603 */
bcm_rx_thr_flush(struct bcm_op * op)604 static int bcm_rx_thr_flush(struct bcm_op *op)
605 {
606 int updated = 0;
607
608 if (op->nframes > 1) {
609 unsigned int i;
610
611 /* for MUX filter we start at index 1 */
612 for (i = 1; i < op->nframes; i++)
613 updated += bcm_rx_do_flush(op, i);
614
615 } else {
616 /* for RX_FILTER_ID and simple filter */
617 updated += bcm_rx_do_flush(op, 0);
618 }
619
620 return updated;
621 }
622
623 /*
624 * bcm_rx_thr_handler - the time for blocked content updates is over now:
625 * Check for throttled data and send it to the userspace
626 */
bcm_rx_thr_handler(struct hrtimer * hrtimer)627 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
628 {
629 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
630
631 if (bcm_rx_thr_flush(op)) {
632 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
633 return HRTIMER_RESTART;
634 } else {
635 /* rearm throttle handling */
636 op->kt_lastmsg = 0;
637 return HRTIMER_NORESTART;
638 }
639 }
640
641 /*
642 * bcm_rx_handler - handle a CAN frame reception
643 */
bcm_rx_handler(struct sk_buff * skb,void * data)644 static void bcm_rx_handler(struct sk_buff *skb, void *data)
645 {
646 struct bcm_op *op = (struct bcm_op *)data;
647 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
648 unsigned int i;
649
650 if (op->can_id != rxframe->can_id)
651 return;
652
653 /* make sure to handle the correct frame type (CAN / CAN FD) */
654 if (skb->len != op->cfsiz)
655 return;
656
657 /* disable timeout */
658 hrtimer_cancel(&op->timer);
659
660 /* save rx timestamp */
661 op->rx_stamp = skb->tstamp;
662 /* save originator for recvfrom() */
663 op->rx_ifindex = skb->dev->ifindex;
664 /* update statistics */
665 op->frames_abs++;
666
667 if (op->flags & RX_RTR_FRAME) {
668 /* send reply for RTR-request (placed in op->frames[0]) */
669 bcm_can_tx(op);
670 return;
671 }
672
673 if (op->flags & RX_FILTER_ID) {
674 /* the easiest case */
675 bcm_rx_update_and_send(op, op->last_frames, rxframe);
676 goto rx_starttimer;
677 }
678
679 if (op->nframes == 1) {
680 /* simple compare with index 0 */
681 bcm_rx_cmp_to_index(op, 0, rxframe);
682 goto rx_starttimer;
683 }
684
685 if (op->nframes > 1) {
686 /*
687 * multiplex compare
688 *
689 * find the first multiplex mask that fits.
690 * Remark: The MUX-mask is stored in index 0 - but only the
691 * first 64 bits of the frame data[] are relevant (CAN FD)
692 */
693
694 for (i = 1; i < op->nframes; i++) {
695 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
696 (get_u64(op->frames, 0) &
697 get_u64(op->frames + op->cfsiz * i, 0))) {
698 bcm_rx_cmp_to_index(op, i, rxframe);
699 break;
700 }
701 }
702 }
703
704 rx_starttimer:
705 bcm_rx_starttimer(op);
706 }
707
708 /*
709 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
710 */
bcm_find_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)711 static struct bcm_op *bcm_find_op(struct list_head *ops,
712 struct bcm_msg_head *mh, int ifindex)
713 {
714 struct bcm_op *op;
715
716 list_for_each_entry(op, ops, list) {
717 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
718 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
719 return op;
720 }
721
722 return NULL;
723 }
724
bcm_free_op_rcu(struct rcu_head * rcu_head)725 static void bcm_free_op_rcu(struct rcu_head *rcu_head)
726 {
727 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
728
729 if ((op->frames) && (op->frames != &op->sframe))
730 kfree(op->frames);
731
732 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
733 kfree(op->last_frames);
734
735 kfree(op);
736 }
737
bcm_remove_op(struct bcm_op * op)738 static void bcm_remove_op(struct bcm_op *op)
739 {
740 hrtimer_cancel(&op->timer);
741 hrtimer_cancel(&op->thrtimer);
742
743 call_rcu(&op->rcu, bcm_free_op_rcu);
744 }
745
bcm_rx_unreg(struct net_device * dev,struct bcm_op * op)746 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
747 {
748 if (op->rx_reg_dev == dev) {
749 can_rx_unregister(dev_net(dev), dev, op->can_id,
750 REGMASK(op->can_id), bcm_rx_handler, op);
751
752 /* mark as removed subscription */
753 op->rx_reg_dev = NULL;
754 } else
755 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
756 "mismatch %p %p\n", op->rx_reg_dev, dev);
757 }
758
759 /*
760 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
761 */
bcm_delete_rx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)762 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
763 int ifindex)
764 {
765 struct bcm_op *op, *n;
766
767 list_for_each_entry_safe(op, n, ops, list) {
768 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
769 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
770
771 /* disable automatic timer on frame reception */
772 op->flags |= RX_NO_AUTOTIMER;
773
774 /*
775 * Don't care if we're bound or not (due to netdev
776 * problems) can_rx_unregister() is always a save
777 * thing to do here.
778 */
779 if (op->ifindex) {
780 /*
781 * Only remove subscriptions that had not
782 * been removed due to NETDEV_UNREGISTER
783 * in bcm_notifier()
784 */
785 if (op->rx_reg_dev) {
786 struct net_device *dev;
787
788 dev = dev_get_by_index(sock_net(op->sk),
789 op->ifindex);
790 if (dev) {
791 bcm_rx_unreg(dev, op);
792 dev_put(dev);
793 }
794 }
795 } else
796 can_rx_unregister(sock_net(op->sk), NULL,
797 op->can_id,
798 REGMASK(op->can_id),
799 bcm_rx_handler, op);
800
801 list_del(&op->list);
802 bcm_remove_op(op);
803 return 1; /* done */
804 }
805 }
806
807 return 0; /* not found */
808 }
809
810 /*
811 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
812 */
bcm_delete_tx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)813 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
814 int ifindex)
815 {
816 struct bcm_op *op, *n;
817
818 list_for_each_entry_safe(op, n, ops, list) {
819 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
820 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
821 list_del(&op->list);
822 bcm_remove_op(op);
823 return 1; /* done */
824 }
825 }
826
827 return 0; /* not found */
828 }
829
830 /*
831 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
832 */
bcm_read_op(struct list_head * ops,struct bcm_msg_head * msg_head,int ifindex)833 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
834 int ifindex)
835 {
836 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
837
838 if (!op)
839 return -EINVAL;
840
841 /* put current values into msg_head */
842 msg_head->flags = op->flags;
843 msg_head->count = op->count;
844 msg_head->ival1 = op->ival1;
845 msg_head->ival2 = op->ival2;
846 msg_head->nframes = op->nframes;
847
848 bcm_send_to_user(op, msg_head, op->frames, 0);
849
850 return MHSIZ;
851 }
852
853 /*
854 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
855 */
bcm_tx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)856 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
857 int ifindex, struct sock *sk)
858 {
859 struct bcm_sock *bo = bcm_sk(sk);
860 struct bcm_op *op;
861 struct canfd_frame *cf;
862 unsigned int i;
863 int err;
864
865 /* we need a real device to send frames */
866 if (!ifindex)
867 return -ENODEV;
868
869 /* check nframes boundaries - we need at least one CAN frame */
870 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
871 return -EINVAL;
872
873 /* check timeval limitations */
874 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
875 return -EINVAL;
876
877 /* check the given can_id */
878 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
879 if (op) {
880 /* update existing BCM operation */
881
882 /*
883 * Do we need more space for the CAN frames than currently
884 * allocated? -> This is a _really_ unusual use-case and
885 * therefore (complexity / locking) it is not supported.
886 */
887 if (msg_head->nframes > op->nframes)
888 return -E2BIG;
889
890 /* update CAN frames content */
891 for (i = 0; i < msg_head->nframes; i++) {
892
893 cf = op->frames + op->cfsiz * i;
894 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
895
896 if (op->flags & CAN_FD_FRAME) {
897 if (cf->len > 64)
898 err = -EINVAL;
899 } else {
900 if (cf->len > 8)
901 err = -EINVAL;
902 }
903
904 if (err < 0)
905 return err;
906
907 if (msg_head->flags & TX_CP_CAN_ID) {
908 /* copy can_id into frame */
909 cf->can_id = msg_head->can_id;
910 }
911 }
912 op->flags = msg_head->flags;
913
914 } else {
915 /* insert new BCM operation for the given can_id */
916
917 op = kzalloc(OPSIZ, GFP_KERNEL);
918 if (!op)
919 return -ENOMEM;
920
921 op->can_id = msg_head->can_id;
922 op->cfsiz = CFSIZ(msg_head->flags);
923 op->flags = msg_head->flags;
924
925 /* create array for CAN frames and copy the data */
926 if (msg_head->nframes > 1) {
927 op->frames = kmalloc_array(msg_head->nframes,
928 op->cfsiz,
929 GFP_KERNEL);
930 if (!op->frames) {
931 kfree(op);
932 return -ENOMEM;
933 }
934 } else
935 op->frames = &op->sframe;
936
937 for (i = 0; i < msg_head->nframes; i++) {
938
939 cf = op->frames + op->cfsiz * i;
940 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
941 if (err < 0)
942 goto free_op;
943
944 if (op->flags & CAN_FD_FRAME) {
945 if (cf->len > 64)
946 err = -EINVAL;
947 } else {
948 if (cf->len > 8)
949 err = -EINVAL;
950 }
951
952 if (err < 0)
953 goto free_op;
954
955 if (msg_head->flags & TX_CP_CAN_ID) {
956 /* copy can_id into frame */
957 cf->can_id = msg_head->can_id;
958 }
959 }
960
961 /* tx_ops never compare with previous received messages */
962 op->last_frames = NULL;
963
964 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
965 op->sk = sk;
966 op->ifindex = ifindex;
967
968 /* initialize uninitialized (kzalloc) structure */
969 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
970 HRTIMER_MODE_REL_SOFT);
971 op->timer.function = bcm_tx_timeout_handler;
972
973 /* currently unused in tx_ops */
974 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
975 HRTIMER_MODE_REL_SOFT);
976
977 /* add this bcm_op to the list of the tx_ops */
978 list_add(&op->list, &bo->tx_ops);
979
980 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
981
982 if (op->nframes != msg_head->nframes) {
983 op->nframes = msg_head->nframes;
984 /* start multiple frame transmission with index 0 */
985 op->currframe = 0;
986 }
987
988 /* check flags */
989
990 if (op->flags & TX_RESET_MULTI_IDX) {
991 /* start multiple frame transmission with index 0 */
992 op->currframe = 0;
993 }
994
995 if (op->flags & SETTIMER) {
996 /* set timer values */
997 op->count = msg_head->count;
998 op->ival1 = msg_head->ival1;
999 op->ival2 = msg_head->ival2;
1000 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1001 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1002
1003 /* disable an active timer due to zero values? */
1004 if (!op->kt_ival1 && !op->kt_ival2)
1005 hrtimer_cancel(&op->timer);
1006 }
1007
1008 if (op->flags & STARTTIMER) {
1009 hrtimer_cancel(&op->timer);
1010 /* spec: send CAN frame when starting timer */
1011 op->flags |= TX_ANNOUNCE;
1012 }
1013
1014 if (op->flags & TX_ANNOUNCE) {
1015 bcm_can_tx(op);
1016 if (op->count)
1017 op->count--;
1018 }
1019
1020 if (op->flags & STARTTIMER)
1021 bcm_tx_start_timer(op);
1022
1023 return msg_head->nframes * op->cfsiz + MHSIZ;
1024
1025 free_op:
1026 if (op->frames != &op->sframe)
1027 kfree(op->frames);
1028 kfree(op);
1029 return err;
1030 }
1031
1032 /*
1033 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1034 */
bcm_rx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)1035 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1036 int ifindex, struct sock *sk)
1037 {
1038 struct bcm_sock *bo = bcm_sk(sk);
1039 struct bcm_op *op;
1040 int do_rx_register;
1041 int err = 0;
1042
1043 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1044 /* be robust against wrong usage ... */
1045 msg_head->flags |= RX_FILTER_ID;
1046 /* ignore trailing garbage */
1047 msg_head->nframes = 0;
1048 }
1049
1050 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1051 if (msg_head->nframes > MAX_NFRAMES + 1)
1052 return -EINVAL;
1053
1054 if ((msg_head->flags & RX_RTR_FRAME) &&
1055 ((msg_head->nframes != 1) ||
1056 (!(msg_head->can_id & CAN_RTR_FLAG))))
1057 return -EINVAL;
1058
1059 /* check timeval limitations */
1060 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1061 return -EINVAL;
1062
1063 /* check the given can_id */
1064 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1065 if (op) {
1066 /* update existing BCM operation */
1067
1068 /*
1069 * Do we need more space for the CAN frames than currently
1070 * allocated? -> This is a _really_ unusual use-case and
1071 * therefore (complexity / locking) it is not supported.
1072 */
1073 if (msg_head->nframes > op->nframes)
1074 return -E2BIG;
1075
1076 if (msg_head->nframes) {
1077 /* update CAN frames content */
1078 err = memcpy_from_msg(op->frames, msg,
1079 msg_head->nframes * op->cfsiz);
1080 if (err < 0)
1081 return err;
1082
1083 /* clear last_frames to indicate 'nothing received' */
1084 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1085 }
1086
1087 op->nframes = msg_head->nframes;
1088 op->flags = msg_head->flags;
1089
1090 /* Only an update -> do not call can_rx_register() */
1091 do_rx_register = 0;
1092
1093 } else {
1094 /* insert new BCM operation for the given can_id */
1095 op = kzalloc(OPSIZ, GFP_KERNEL);
1096 if (!op)
1097 return -ENOMEM;
1098
1099 op->can_id = msg_head->can_id;
1100 op->nframes = msg_head->nframes;
1101 op->cfsiz = CFSIZ(msg_head->flags);
1102 op->flags = msg_head->flags;
1103
1104 if (msg_head->nframes > 1) {
1105 /* create array for CAN frames and copy the data */
1106 op->frames = kmalloc_array(msg_head->nframes,
1107 op->cfsiz,
1108 GFP_KERNEL);
1109 if (!op->frames) {
1110 kfree(op);
1111 return -ENOMEM;
1112 }
1113
1114 /* create and init array for received CAN frames */
1115 op->last_frames = kcalloc(msg_head->nframes,
1116 op->cfsiz,
1117 GFP_KERNEL);
1118 if (!op->last_frames) {
1119 kfree(op->frames);
1120 kfree(op);
1121 return -ENOMEM;
1122 }
1123
1124 } else {
1125 op->frames = &op->sframe;
1126 op->last_frames = &op->last_sframe;
1127 }
1128
1129 if (msg_head->nframes) {
1130 err = memcpy_from_msg(op->frames, msg,
1131 msg_head->nframes * op->cfsiz);
1132 if (err < 0) {
1133 if (op->frames != &op->sframe)
1134 kfree(op->frames);
1135 if (op->last_frames != &op->last_sframe)
1136 kfree(op->last_frames);
1137 kfree(op);
1138 return err;
1139 }
1140 }
1141
1142 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1143 op->sk = sk;
1144 op->ifindex = ifindex;
1145
1146 /* ifindex for timeout events w/o previous frame reception */
1147 op->rx_ifindex = ifindex;
1148
1149 /* initialize uninitialized (kzalloc) structure */
1150 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1151 HRTIMER_MODE_REL_SOFT);
1152 op->timer.function = bcm_rx_timeout_handler;
1153
1154 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1155 HRTIMER_MODE_REL_SOFT);
1156 op->thrtimer.function = bcm_rx_thr_handler;
1157
1158 /* add this bcm_op to the list of the rx_ops */
1159 list_add(&op->list, &bo->rx_ops);
1160
1161 /* call can_rx_register() */
1162 do_rx_register = 1;
1163
1164 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1165
1166 /* check flags */
1167
1168 if (op->flags & RX_RTR_FRAME) {
1169 struct canfd_frame *frame0 = op->frames;
1170
1171 /* no timers in RTR-mode */
1172 hrtimer_cancel(&op->thrtimer);
1173 hrtimer_cancel(&op->timer);
1174
1175 /*
1176 * funny feature in RX(!)_SETUP only for RTR-mode:
1177 * copy can_id into frame BUT without RTR-flag to
1178 * prevent a full-load-loopback-test ... ;-]
1179 */
1180 if ((op->flags & TX_CP_CAN_ID) ||
1181 (frame0->can_id == op->can_id))
1182 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1183
1184 } else {
1185 if (op->flags & SETTIMER) {
1186
1187 /* set timer value */
1188 op->ival1 = msg_head->ival1;
1189 op->ival2 = msg_head->ival2;
1190 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1191 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1192
1193 /* disable an active timer due to zero value? */
1194 if (!op->kt_ival1)
1195 hrtimer_cancel(&op->timer);
1196
1197 /*
1198 * In any case cancel the throttle timer, flush
1199 * potentially blocked msgs and reset throttle handling
1200 */
1201 op->kt_lastmsg = 0;
1202 hrtimer_cancel(&op->thrtimer);
1203 bcm_rx_thr_flush(op);
1204 }
1205
1206 if ((op->flags & STARTTIMER) && op->kt_ival1)
1207 hrtimer_start(&op->timer, op->kt_ival1,
1208 HRTIMER_MODE_REL_SOFT);
1209 }
1210
1211 /* now we can register for can_ids, if we added a new bcm_op */
1212 if (do_rx_register) {
1213 if (ifindex) {
1214 struct net_device *dev;
1215
1216 dev = dev_get_by_index(sock_net(sk), ifindex);
1217 if (dev) {
1218 err = can_rx_register(sock_net(sk), dev,
1219 op->can_id,
1220 REGMASK(op->can_id),
1221 bcm_rx_handler, op,
1222 "bcm", sk);
1223
1224 op->rx_reg_dev = dev;
1225 dev_put(dev);
1226 }
1227
1228 } else
1229 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1230 REGMASK(op->can_id),
1231 bcm_rx_handler, op, "bcm", sk);
1232 if (err) {
1233 /* this bcm rx op is broken -> remove it */
1234 list_del(&op->list);
1235 bcm_remove_op(op);
1236 return err;
1237 }
1238 }
1239
1240 return msg_head->nframes * op->cfsiz + MHSIZ;
1241 }
1242
1243 /*
1244 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1245 */
bcm_tx_send(struct msghdr * msg,int ifindex,struct sock * sk,int cfsiz)1246 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1247 int cfsiz)
1248 {
1249 struct sk_buff *skb;
1250 struct net_device *dev;
1251 int err;
1252
1253 /* we need a real device to send frames */
1254 if (!ifindex)
1255 return -ENODEV;
1256
1257 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1258 if (!skb)
1259 return -ENOMEM;
1260
1261 can_skb_reserve(skb);
1262
1263 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1264 if (err < 0) {
1265 kfree_skb(skb);
1266 return err;
1267 }
1268
1269 dev = dev_get_by_index(sock_net(sk), ifindex);
1270 if (!dev) {
1271 kfree_skb(skb);
1272 return -ENODEV;
1273 }
1274
1275 can_skb_prv(skb)->ifindex = dev->ifindex;
1276 can_skb_prv(skb)->skbcnt = 0;
1277 skb->dev = dev;
1278 can_skb_set_owner(skb, sk);
1279 err = can_send(skb, 1); /* send with loopback */
1280 dev_put(dev);
1281
1282 if (err)
1283 return err;
1284
1285 return cfsiz + MHSIZ;
1286 }
1287
1288 /*
1289 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1290 */
bcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1291 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1292 {
1293 struct sock *sk = sock->sk;
1294 struct bcm_sock *bo = bcm_sk(sk);
1295 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1296 struct bcm_msg_head msg_head;
1297 int cfsiz;
1298 int ret; /* read bytes or error codes as return value */
1299
1300 if (!bo->bound)
1301 return -ENOTCONN;
1302
1303 /* check for valid message length from userspace */
1304 if (size < MHSIZ)
1305 return -EINVAL;
1306
1307 /* read message head information */
1308 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1309 if (ret < 0)
1310 return ret;
1311
1312 cfsiz = CFSIZ(msg_head.flags);
1313 if ((size - MHSIZ) % cfsiz)
1314 return -EINVAL;
1315
1316 /* check for alternative ifindex for this bcm_op */
1317
1318 if (!ifindex && msg->msg_name) {
1319 /* no bound device as default => check msg_name */
1320 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1321
1322 if (msg->msg_namelen < BCM_MIN_NAMELEN)
1323 return -EINVAL;
1324
1325 if (addr->can_family != AF_CAN)
1326 return -EINVAL;
1327
1328 /* ifindex from sendto() */
1329 ifindex = addr->can_ifindex;
1330
1331 if (ifindex) {
1332 struct net_device *dev;
1333
1334 dev = dev_get_by_index(sock_net(sk), ifindex);
1335 if (!dev)
1336 return -ENODEV;
1337
1338 if (dev->type != ARPHRD_CAN) {
1339 dev_put(dev);
1340 return -ENODEV;
1341 }
1342
1343 dev_put(dev);
1344 }
1345 }
1346
1347 lock_sock(sk);
1348
1349 switch (msg_head.opcode) {
1350
1351 case TX_SETUP:
1352 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1353 break;
1354
1355 case RX_SETUP:
1356 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1357 break;
1358
1359 case TX_DELETE:
1360 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1361 ret = MHSIZ;
1362 else
1363 ret = -EINVAL;
1364 break;
1365
1366 case RX_DELETE:
1367 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1368 ret = MHSIZ;
1369 else
1370 ret = -EINVAL;
1371 break;
1372
1373 case TX_READ:
1374 /* reuse msg_head for the reply to TX_READ */
1375 msg_head.opcode = TX_STATUS;
1376 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1377 break;
1378
1379 case RX_READ:
1380 /* reuse msg_head for the reply to RX_READ */
1381 msg_head.opcode = RX_STATUS;
1382 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1383 break;
1384
1385 case TX_SEND:
1386 /* we need exactly one CAN frame behind the msg head */
1387 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1388 ret = -EINVAL;
1389 else
1390 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1391 break;
1392
1393 default:
1394 ret = -EINVAL;
1395 break;
1396 }
1397
1398 release_sock(sk);
1399
1400 return ret;
1401 }
1402
1403 /*
1404 * notification handler for netdevice status changes
1405 */
bcm_notify(struct bcm_sock * bo,unsigned long msg,struct net_device * dev)1406 static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1407 struct net_device *dev)
1408 {
1409 struct sock *sk = &bo->sk;
1410 struct bcm_op *op;
1411 int notify_enodev = 0;
1412
1413 if (!net_eq(dev_net(dev), sock_net(sk)))
1414 return;
1415
1416 switch (msg) {
1417
1418 case NETDEV_UNREGISTER:
1419 lock_sock(sk);
1420
1421 /* remove device specific receive entries */
1422 list_for_each_entry(op, &bo->rx_ops, list)
1423 if (op->rx_reg_dev == dev)
1424 bcm_rx_unreg(dev, op);
1425
1426 /* remove device reference, if this is our bound device */
1427 if (bo->bound && bo->ifindex == dev->ifindex) {
1428 bo->bound = 0;
1429 bo->ifindex = 0;
1430 notify_enodev = 1;
1431 }
1432
1433 release_sock(sk);
1434
1435 if (notify_enodev) {
1436 sk->sk_err = ENODEV;
1437 if (!sock_flag(sk, SOCK_DEAD))
1438 sk->sk_error_report(sk);
1439 }
1440 break;
1441
1442 case NETDEV_DOWN:
1443 if (bo->bound && bo->ifindex == dev->ifindex) {
1444 sk->sk_err = ENETDOWN;
1445 if (!sock_flag(sk, SOCK_DEAD))
1446 sk->sk_error_report(sk);
1447 }
1448 }
1449 }
1450
bcm_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)1451 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1452 void *ptr)
1453 {
1454 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1455
1456 if (dev->type != ARPHRD_CAN)
1457 return NOTIFY_DONE;
1458 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1459 return NOTIFY_DONE;
1460 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1461 return NOTIFY_DONE;
1462
1463 spin_lock(&bcm_notifier_lock);
1464 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1465 spin_unlock(&bcm_notifier_lock);
1466 bcm_notify(bcm_busy_notifier, msg, dev);
1467 spin_lock(&bcm_notifier_lock);
1468 }
1469 bcm_busy_notifier = NULL;
1470 spin_unlock(&bcm_notifier_lock);
1471 return NOTIFY_DONE;
1472 }
1473
1474 /*
1475 * initial settings for all BCM sockets to be set at socket creation time
1476 */
bcm_init(struct sock * sk)1477 static int bcm_init(struct sock *sk)
1478 {
1479 struct bcm_sock *bo = bcm_sk(sk);
1480
1481 bo->bound = 0;
1482 bo->ifindex = 0;
1483 bo->dropped_usr_msgs = 0;
1484 bo->bcm_proc_read = NULL;
1485
1486 INIT_LIST_HEAD(&bo->tx_ops);
1487 INIT_LIST_HEAD(&bo->rx_ops);
1488
1489 /* set notifier */
1490 spin_lock(&bcm_notifier_lock);
1491 list_add_tail(&bo->notifier, &bcm_notifier_list);
1492 spin_unlock(&bcm_notifier_lock);
1493
1494 return 0;
1495 }
1496
1497 /*
1498 * standard socket functions
1499 */
bcm_release(struct socket * sock)1500 static int bcm_release(struct socket *sock)
1501 {
1502 struct sock *sk = sock->sk;
1503 struct net *net;
1504 struct bcm_sock *bo;
1505 struct bcm_op *op, *next;
1506
1507 if (!sk)
1508 return 0;
1509
1510 net = sock_net(sk);
1511 bo = bcm_sk(sk);
1512
1513 /* remove bcm_ops, timer, rx_unregister(), etc. */
1514
1515 spin_lock(&bcm_notifier_lock);
1516 while (bcm_busy_notifier == bo) {
1517 spin_unlock(&bcm_notifier_lock);
1518 schedule_timeout_uninterruptible(1);
1519 spin_lock(&bcm_notifier_lock);
1520 }
1521 list_del(&bo->notifier);
1522 spin_unlock(&bcm_notifier_lock);
1523
1524 lock_sock(sk);
1525
1526 #if IS_ENABLED(CONFIG_PROC_FS)
1527 /* remove procfs entry */
1528 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1529 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1530 #endif /* CONFIG_PROC_FS */
1531
1532 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1533 bcm_remove_op(op);
1534
1535 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1536 /*
1537 * Don't care if we're bound or not (due to netdev problems)
1538 * can_rx_unregister() is always a save thing to do here.
1539 */
1540 if (op->ifindex) {
1541 /*
1542 * Only remove subscriptions that had not
1543 * been removed due to NETDEV_UNREGISTER
1544 * in bcm_notifier()
1545 */
1546 if (op->rx_reg_dev) {
1547 struct net_device *dev;
1548
1549 dev = dev_get_by_index(net, op->ifindex);
1550 if (dev) {
1551 bcm_rx_unreg(dev, op);
1552 dev_put(dev);
1553 }
1554 }
1555 } else
1556 can_rx_unregister(net, NULL, op->can_id,
1557 REGMASK(op->can_id),
1558 bcm_rx_handler, op);
1559
1560 }
1561
1562 synchronize_rcu();
1563
1564 list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1565 bcm_remove_op(op);
1566
1567 /* remove device reference */
1568 if (bo->bound) {
1569 bo->bound = 0;
1570 bo->ifindex = 0;
1571 }
1572
1573 sock_orphan(sk);
1574 sock->sk = NULL;
1575
1576 release_sock(sk);
1577 sock_put(sk);
1578
1579 return 0;
1580 }
1581
bcm_connect(struct socket * sock,struct sockaddr * uaddr,int len,int flags)1582 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1583 int flags)
1584 {
1585 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1586 struct sock *sk = sock->sk;
1587 struct bcm_sock *bo = bcm_sk(sk);
1588 struct net *net = sock_net(sk);
1589 int ret = 0;
1590
1591 if (len < BCM_MIN_NAMELEN)
1592 return -EINVAL;
1593
1594 lock_sock(sk);
1595
1596 if (bo->bound) {
1597 ret = -EISCONN;
1598 goto fail;
1599 }
1600
1601 /* bind a device to this socket */
1602 if (addr->can_ifindex) {
1603 struct net_device *dev;
1604
1605 dev = dev_get_by_index(net, addr->can_ifindex);
1606 if (!dev) {
1607 ret = -ENODEV;
1608 goto fail;
1609 }
1610 if (dev->type != ARPHRD_CAN) {
1611 dev_put(dev);
1612 ret = -ENODEV;
1613 goto fail;
1614 }
1615
1616 bo->ifindex = dev->ifindex;
1617 dev_put(dev);
1618
1619 } else {
1620 /* no interface reference for ifindex = 0 ('any' CAN device) */
1621 bo->ifindex = 0;
1622 }
1623
1624 #if IS_ENABLED(CONFIG_PROC_FS)
1625 if (net->can.bcmproc_dir) {
1626 /* unique socket address as filename */
1627 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1628 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1629 net->can.bcmproc_dir,
1630 bcm_proc_show, sk);
1631 if (!bo->bcm_proc_read) {
1632 ret = -ENOMEM;
1633 goto fail;
1634 }
1635 }
1636 #endif /* CONFIG_PROC_FS */
1637
1638 bo->bound = 1;
1639
1640 fail:
1641 release_sock(sk);
1642
1643 return ret;
1644 }
1645
bcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1646 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1647 int flags)
1648 {
1649 struct sock *sk = sock->sk;
1650 struct sk_buff *skb;
1651 int error = 0;
1652 int noblock;
1653 int err;
1654
1655 noblock = flags & MSG_DONTWAIT;
1656 flags &= ~MSG_DONTWAIT;
1657 skb = skb_recv_datagram(sk, flags, noblock, &error);
1658 if (!skb)
1659 return error;
1660
1661 if (skb->len < size)
1662 size = skb->len;
1663
1664 err = memcpy_to_msg(msg, skb->data, size);
1665 if (err < 0) {
1666 skb_free_datagram(sk, skb);
1667 return err;
1668 }
1669
1670 sock_recv_ts_and_drops(msg, sk, skb);
1671
1672 if (msg->msg_name) {
1673 __sockaddr_check_size(BCM_MIN_NAMELEN);
1674 msg->msg_namelen = BCM_MIN_NAMELEN;
1675 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1676 }
1677
1678 skb_free_datagram(sk, skb);
1679
1680 return size;
1681 }
1682
bcm_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1683 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1684 unsigned long arg)
1685 {
1686 /* no ioctls for socket layer -> hand it down to NIC layer */
1687 return -ENOIOCTLCMD;
1688 }
1689
1690 static const struct proto_ops bcm_ops = {
1691 .family = PF_CAN,
1692 .release = bcm_release,
1693 .bind = sock_no_bind,
1694 .connect = bcm_connect,
1695 .socketpair = sock_no_socketpair,
1696 .accept = sock_no_accept,
1697 .getname = sock_no_getname,
1698 .poll = datagram_poll,
1699 .ioctl = bcm_sock_no_ioctlcmd,
1700 .gettstamp = sock_gettstamp,
1701 .listen = sock_no_listen,
1702 .shutdown = sock_no_shutdown,
1703 .setsockopt = sock_no_setsockopt,
1704 .getsockopt = sock_no_getsockopt,
1705 .sendmsg = bcm_sendmsg,
1706 .recvmsg = bcm_recvmsg,
1707 .mmap = sock_no_mmap,
1708 .sendpage = sock_no_sendpage,
1709 };
1710
1711 static struct proto bcm_proto __read_mostly = {
1712 .name = "CAN_BCM",
1713 .owner = THIS_MODULE,
1714 .obj_size = sizeof(struct bcm_sock),
1715 .init = bcm_init,
1716 };
1717
1718 static const struct can_proto bcm_can_proto = {
1719 .type = SOCK_DGRAM,
1720 .protocol = CAN_BCM,
1721 .ops = &bcm_ops,
1722 .prot = &bcm_proto,
1723 };
1724
canbcm_pernet_init(struct net * net)1725 static int canbcm_pernet_init(struct net *net)
1726 {
1727 #if IS_ENABLED(CONFIG_PROC_FS)
1728 /* create /proc/net/can-bcm directory */
1729 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1730 #endif /* CONFIG_PROC_FS */
1731
1732 return 0;
1733 }
1734
canbcm_pernet_exit(struct net * net)1735 static void canbcm_pernet_exit(struct net *net)
1736 {
1737 #if IS_ENABLED(CONFIG_PROC_FS)
1738 /* remove /proc/net/can-bcm directory */
1739 if (net->can.bcmproc_dir)
1740 remove_proc_entry("can-bcm", net->proc_net);
1741 #endif /* CONFIG_PROC_FS */
1742 }
1743
1744 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1745 .init = canbcm_pernet_init,
1746 .exit = canbcm_pernet_exit,
1747 };
1748
1749 static struct notifier_block canbcm_notifier = {
1750 .notifier_call = bcm_notifier
1751 };
1752
bcm_module_init(void)1753 static int __init bcm_module_init(void)
1754 {
1755 int err;
1756
1757 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
1758
1759 err = can_proto_register(&bcm_can_proto);
1760 if (err < 0) {
1761 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1762 return err;
1763 }
1764
1765 register_pernet_subsys(&canbcm_pernet_ops);
1766 register_netdevice_notifier(&canbcm_notifier);
1767 return 0;
1768 }
1769
bcm_module_exit(void)1770 static void __exit bcm_module_exit(void)
1771 {
1772 can_proto_unregister(&bcm_can_proto);
1773 unregister_netdevice_notifier(&canbcm_notifier);
1774 unregister_pernet_subsys(&canbcm_pernet_ops);
1775 }
1776
1777 module_init(bcm_module_init);
1778 module_exit(bcm_module_exit);
1779