1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <net/sock.h>
62 #include <net/net_namespace.h>
63
64 /*
65 * To send multiple CAN frame content within TX_SETUP or to filter
66 * CAN messages with multiplex index within RX_SETUP, the number of
67 * different filters is limited to 256 due to the one byte index value.
68 */
69 #define MAX_NFRAMES 256
70
71 /* limit timers to 400 days for sending/timeouts */
72 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
73
74 /* use of last_frames[index].flags */
75 #define RX_RECV 0x40 /* received data for this element */
76 #define RX_THR 0x80 /* element not been sent due to throttle feature */
77 #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
78
79 /* get best masking value for can_rx_register() for a given single can_id */
80 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
81 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
82 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
83
84 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
85 MODULE_LICENSE("Dual BSD/GPL");
86 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
87 MODULE_ALIAS("can-proto-2");
88
89 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
90
91 /*
92 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
93 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
94 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
95 */
get_u64(const struct canfd_frame * cp,int offset)96 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
97 {
98 return *(u64 *)(cp->data + offset);
99 }
100
101 struct bcm_op {
102 struct list_head list;
103 struct rcu_head rcu;
104 int ifindex;
105 canid_t can_id;
106 u32 flags;
107 unsigned long frames_abs, frames_filtered;
108 struct bcm_timeval ival1, ival2;
109 struct hrtimer timer, thrtimer;
110 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
111 int rx_ifindex;
112 int cfsiz;
113 u32 count;
114 u32 nframes;
115 u32 currframe;
116 /* void pointers to arrays of struct can[fd]_frame */
117 void *frames;
118 void *last_frames;
119 struct canfd_frame sframe;
120 struct canfd_frame last_sframe;
121 struct sock *sk;
122 struct net_device *rx_reg_dev;
123 };
124
125 struct bcm_sock {
126 struct sock sk;
127 int bound;
128 int ifindex;
129 struct list_head notifier;
130 struct list_head rx_ops;
131 struct list_head tx_ops;
132 unsigned long dropped_usr_msgs;
133 struct proc_dir_entry *bcm_proc_read;
134 char procname [32]; /* inode number in decimal with \0 */
135 };
136
137 static LIST_HEAD(bcm_notifier_list);
138 static DEFINE_SPINLOCK(bcm_notifier_lock);
139 static struct bcm_sock *bcm_busy_notifier;
140
bcm_sk(const struct sock * sk)141 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
142 {
143 return (struct bcm_sock *)sk;
144 }
145
bcm_timeval_to_ktime(struct bcm_timeval tv)146 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
147 {
148 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
149 }
150
151 /* check limitations for timeval provided by user */
bcm_is_invalid_tv(struct bcm_msg_head * msg_head)152 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
153 {
154 if ((msg_head->ival1.tv_sec < 0) ||
155 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
156 (msg_head->ival1.tv_usec < 0) ||
157 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
158 (msg_head->ival2.tv_sec < 0) ||
159 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
160 (msg_head->ival2.tv_usec < 0) ||
161 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
162 return true;
163
164 return false;
165 }
166
167 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
168 #define OPSIZ sizeof(struct bcm_op)
169 #define MHSIZ sizeof(struct bcm_msg_head)
170
171 /*
172 * procfs functions
173 */
174 #if IS_ENABLED(CONFIG_PROC_FS)
bcm_proc_getifname(struct net * net,char * result,int ifindex)175 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
176 {
177 struct net_device *dev;
178
179 if (!ifindex)
180 return "any";
181
182 rcu_read_lock();
183 dev = dev_get_by_index_rcu(net, ifindex);
184 if (dev)
185 strcpy(result, dev->name);
186 else
187 strcpy(result, "???");
188 rcu_read_unlock();
189
190 return result;
191 }
192
bcm_proc_show(struct seq_file * m,void * v)193 static int bcm_proc_show(struct seq_file *m, void *v)
194 {
195 char ifname[IFNAMSIZ];
196 struct net *net = m->private;
197 struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
198 struct bcm_sock *bo = bcm_sk(sk);
199 struct bcm_op *op;
200
201 seq_printf(m, ">>> socket %pK", sk->sk_socket);
202 seq_printf(m, " / sk %pK", sk);
203 seq_printf(m, " / bo %pK", bo);
204 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
205 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
206 seq_printf(m, " <<<\n");
207
208 list_for_each_entry(op, &bo->rx_ops, list) {
209
210 unsigned long reduction;
211
212 /* print only active entries & prevent division by zero */
213 if (!op->frames_abs)
214 continue;
215
216 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
217 bcm_proc_getifname(net, ifname, op->ifindex));
218
219 if (op->flags & CAN_FD_FRAME)
220 seq_printf(m, "(%u)", op->nframes);
221 else
222 seq_printf(m, "[%u]", op->nframes);
223
224 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
225
226 if (op->kt_ival1)
227 seq_printf(m, "timeo=%lld ",
228 (long long)ktime_to_us(op->kt_ival1));
229
230 if (op->kt_ival2)
231 seq_printf(m, "thr=%lld ",
232 (long long)ktime_to_us(op->kt_ival2));
233
234 seq_printf(m, "# recv %ld (%ld) => reduction: ",
235 op->frames_filtered, op->frames_abs);
236
237 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
238
239 seq_printf(m, "%s%ld%%\n",
240 (reduction == 100) ? "near " : "", reduction);
241 }
242
243 list_for_each_entry(op, &bo->tx_ops, list) {
244
245 seq_printf(m, "tx_op: %03X %s ", op->can_id,
246 bcm_proc_getifname(net, ifname, op->ifindex));
247
248 if (op->flags & CAN_FD_FRAME)
249 seq_printf(m, "(%u) ", op->nframes);
250 else
251 seq_printf(m, "[%u] ", op->nframes);
252
253 if (op->kt_ival1)
254 seq_printf(m, "t1=%lld ",
255 (long long)ktime_to_us(op->kt_ival1));
256
257 if (op->kt_ival2)
258 seq_printf(m, "t2=%lld ",
259 (long long)ktime_to_us(op->kt_ival2));
260
261 seq_printf(m, "# sent %ld\n", op->frames_abs);
262 }
263 seq_putc(m, '\n');
264 return 0;
265 }
266 #endif /* CONFIG_PROC_FS */
267
268 /*
269 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
270 * of the given bcm tx op
271 */
bcm_can_tx(struct bcm_op * op)272 static void bcm_can_tx(struct bcm_op *op)
273 {
274 struct sk_buff *skb;
275 struct net_device *dev;
276 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
277 int err;
278
279 /* no target device? => exit */
280 if (!op->ifindex)
281 return;
282
283 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
284 if (!dev) {
285 /* RFC: should this bcm_op remove itself here? */
286 return;
287 }
288
289 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
290 if (!skb)
291 goto out;
292
293 can_skb_reserve(skb);
294 can_skb_prv(skb)->ifindex = dev->ifindex;
295 can_skb_prv(skb)->skbcnt = 0;
296
297 skb_put_data(skb, cf, op->cfsiz);
298
299 /* send with loopback */
300 skb->dev = dev;
301 can_skb_set_owner(skb, op->sk);
302 err = can_send(skb, 1);
303 if (!err)
304 op->frames_abs++;
305
306 op->currframe++;
307
308 /* reached last frame? */
309 if (op->currframe >= op->nframes)
310 op->currframe = 0;
311 out:
312 dev_put(dev);
313 }
314
315 /*
316 * bcm_send_to_user - send a BCM message to the userspace
317 * (consisting of bcm_msg_head + x CAN frames)
318 */
bcm_send_to_user(struct bcm_op * op,struct bcm_msg_head * head,struct canfd_frame * frames,int has_timestamp)319 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
320 struct canfd_frame *frames, int has_timestamp)
321 {
322 struct sk_buff *skb;
323 struct canfd_frame *firstframe;
324 struct sockaddr_can *addr;
325 struct sock *sk = op->sk;
326 unsigned int datalen = head->nframes * op->cfsiz;
327 int err;
328
329 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
330 if (!skb)
331 return;
332
333 skb_put_data(skb, head, sizeof(*head));
334
335 if (head->nframes) {
336 /* CAN frames starting here */
337 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
338
339 skb_put_data(skb, frames, datalen);
340
341 /*
342 * the BCM uses the flags-element of the canfd_frame
343 * structure for internal purposes. This is only
344 * relevant for updates that are generated by the
345 * BCM, where nframes is 1
346 */
347 if (head->nframes == 1)
348 firstframe->flags &= BCM_CAN_FLAGS_MASK;
349 }
350
351 if (has_timestamp) {
352 /* restore rx timestamp */
353 skb->tstamp = op->rx_stamp;
354 }
355
356 /*
357 * Put the datagram to the queue so that bcm_recvmsg() can
358 * get it from there. We need to pass the interface index to
359 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
360 * containing the interface index.
361 */
362
363 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
364 addr = (struct sockaddr_can *)skb->cb;
365 memset(addr, 0, sizeof(*addr));
366 addr->can_family = AF_CAN;
367 addr->can_ifindex = op->rx_ifindex;
368
369 err = sock_queue_rcv_skb(sk, skb);
370 if (err < 0) {
371 struct bcm_sock *bo = bcm_sk(sk);
372
373 kfree_skb(skb);
374 /* don't care about overflows in this statistic */
375 bo->dropped_usr_msgs++;
376 }
377 }
378
bcm_tx_set_expiry(struct bcm_op * op,struct hrtimer * hrt)379 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
380 {
381 ktime_t ival;
382
383 if (op->kt_ival1 && op->count)
384 ival = op->kt_ival1;
385 else if (op->kt_ival2)
386 ival = op->kt_ival2;
387 else
388 return false;
389
390 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
391 return true;
392 }
393
bcm_tx_start_timer(struct bcm_op * op)394 static void bcm_tx_start_timer(struct bcm_op *op)
395 {
396 if (bcm_tx_set_expiry(op, &op->timer))
397 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
398 }
399
400 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
bcm_tx_timeout_handler(struct hrtimer * hrtimer)401 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
402 {
403 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
404 struct bcm_msg_head msg_head;
405
406 if (op->kt_ival1 && (op->count > 0)) {
407 op->count--;
408 if (!op->count && (op->flags & TX_COUNTEVT)) {
409
410 /* create notification to user */
411 memset(&msg_head, 0, sizeof(msg_head));
412 msg_head.opcode = TX_EXPIRED;
413 msg_head.flags = op->flags;
414 msg_head.count = op->count;
415 msg_head.ival1 = op->ival1;
416 msg_head.ival2 = op->ival2;
417 msg_head.can_id = op->can_id;
418 msg_head.nframes = 0;
419
420 bcm_send_to_user(op, &msg_head, NULL, 0);
421 }
422 bcm_can_tx(op);
423
424 } else if (op->kt_ival2) {
425 bcm_can_tx(op);
426 }
427
428 return bcm_tx_set_expiry(op, &op->timer) ?
429 HRTIMER_RESTART : HRTIMER_NORESTART;
430 }
431
432 /*
433 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
434 */
bcm_rx_changed(struct bcm_op * op,struct canfd_frame * data)435 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
436 {
437 struct bcm_msg_head head;
438
439 /* update statistics */
440 op->frames_filtered++;
441
442 /* prevent statistics overflow */
443 if (op->frames_filtered > ULONG_MAX/100)
444 op->frames_filtered = op->frames_abs = 0;
445
446 /* this element is not throttled anymore */
447 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
448
449 memset(&head, 0, sizeof(head));
450 head.opcode = RX_CHANGED;
451 head.flags = op->flags;
452 head.count = op->count;
453 head.ival1 = op->ival1;
454 head.ival2 = op->ival2;
455 head.can_id = op->can_id;
456 head.nframes = 1;
457
458 bcm_send_to_user(op, &head, data, 1);
459 }
460
461 /*
462 * bcm_rx_update_and_send - process a detected relevant receive content change
463 * 1. update the last received data
464 * 2. send a notification to the user (if possible)
465 */
bcm_rx_update_and_send(struct bcm_op * op,struct canfd_frame * lastdata,const struct canfd_frame * rxdata)466 static void bcm_rx_update_and_send(struct bcm_op *op,
467 struct canfd_frame *lastdata,
468 const struct canfd_frame *rxdata)
469 {
470 memcpy(lastdata, rxdata, op->cfsiz);
471
472 /* mark as used and throttled by default */
473 lastdata->flags |= (RX_RECV|RX_THR);
474
475 /* throttling mode inactive ? */
476 if (!op->kt_ival2) {
477 /* send RX_CHANGED to the user immediately */
478 bcm_rx_changed(op, lastdata);
479 return;
480 }
481
482 /* with active throttling timer we are just done here */
483 if (hrtimer_active(&op->thrtimer))
484 return;
485
486 /* first reception with enabled throttling mode */
487 if (!op->kt_lastmsg)
488 goto rx_changed_settime;
489
490 /* got a second frame inside a potential throttle period? */
491 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
492 ktime_to_us(op->kt_ival2)) {
493 /* do not send the saved data - only start throttle timer */
494 hrtimer_start(&op->thrtimer,
495 ktime_add(op->kt_lastmsg, op->kt_ival2),
496 HRTIMER_MODE_ABS_SOFT);
497 return;
498 }
499
500 /* the gap was that big, that throttling was not needed here */
501 rx_changed_settime:
502 bcm_rx_changed(op, lastdata);
503 op->kt_lastmsg = ktime_get();
504 }
505
506 /*
507 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
508 * received data stored in op->last_frames[]
509 */
bcm_rx_cmp_to_index(struct bcm_op * op,unsigned int index,const struct canfd_frame * rxdata)510 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
511 const struct canfd_frame *rxdata)
512 {
513 struct canfd_frame *cf = op->frames + op->cfsiz * index;
514 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
515 int i;
516
517 /*
518 * no one uses the MSBs of flags for comparison,
519 * so we use it here to detect the first time of reception
520 */
521
522 if (!(lcf->flags & RX_RECV)) {
523 /* received data for the first time => send update to user */
524 bcm_rx_update_and_send(op, lcf, rxdata);
525 return;
526 }
527
528 /* do a real check in CAN frame data section */
529 for (i = 0; i < rxdata->len; i += 8) {
530 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
531 (get_u64(cf, i) & get_u64(lcf, i))) {
532 bcm_rx_update_and_send(op, lcf, rxdata);
533 return;
534 }
535 }
536
537 if (op->flags & RX_CHECK_DLC) {
538 /* do a real check in CAN frame length */
539 if (rxdata->len != lcf->len) {
540 bcm_rx_update_and_send(op, lcf, rxdata);
541 return;
542 }
543 }
544 }
545
546 /*
547 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
548 */
bcm_rx_starttimer(struct bcm_op * op)549 static void bcm_rx_starttimer(struct bcm_op *op)
550 {
551 if (op->flags & RX_NO_AUTOTIMER)
552 return;
553
554 if (op->kt_ival1)
555 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
556 }
557
558 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
bcm_rx_timeout_handler(struct hrtimer * hrtimer)559 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
560 {
561 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
562 struct bcm_msg_head msg_head;
563
564 /* if user wants to be informed, when cyclic CAN-Messages come back */
565 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
566 /* clear received CAN frames to indicate 'nothing received' */
567 memset(op->last_frames, 0, op->nframes * op->cfsiz);
568 }
569
570 /* create notification to user */
571 memset(&msg_head, 0, sizeof(msg_head));
572 msg_head.opcode = RX_TIMEOUT;
573 msg_head.flags = op->flags;
574 msg_head.count = op->count;
575 msg_head.ival1 = op->ival1;
576 msg_head.ival2 = op->ival2;
577 msg_head.can_id = op->can_id;
578 msg_head.nframes = 0;
579
580 bcm_send_to_user(op, &msg_head, NULL, 0);
581
582 return HRTIMER_NORESTART;
583 }
584
585 /*
586 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
587 */
bcm_rx_do_flush(struct bcm_op * op,unsigned int index)588 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
589 {
590 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
591
592 if ((op->last_frames) && (lcf->flags & RX_THR)) {
593 bcm_rx_changed(op, lcf);
594 return 1;
595 }
596 return 0;
597 }
598
599 /*
600 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
601 */
bcm_rx_thr_flush(struct bcm_op * op)602 static int bcm_rx_thr_flush(struct bcm_op *op)
603 {
604 int updated = 0;
605
606 if (op->nframes > 1) {
607 unsigned int i;
608
609 /* for MUX filter we start at index 1 */
610 for (i = 1; i < op->nframes; i++)
611 updated += bcm_rx_do_flush(op, i);
612
613 } else {
614 /* for RX_FILTER_ID and simple filter */
615 updated += bcm_rx_do_flush(op, 0);
616 }
617
618 return updated;
619 }
620
621 /*
622 * bcm_rx_thr_handler - the time for blocked content updates is over now:
623 * Check for throttled data and send it to the userspace
624 */
bcm_rx_thr_handler(struct hrtimer * hrtimer)625 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
626 {
627 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
628
629 if (bcm_rx_thr_flush(op)) {
630 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
631 return HRTIMER_RESTART;
632 } else {
633 /* rearm throttle handling */
634 op->kt_lastmsg = 0;
635 return HRTIMER_NORESTART;
636 }
637 }
638
639 /*
640 * bcm_rx_handler - handle a CAN frame reception
641 */
bcm_rx_handler(struct sk_buff * skb,void * data)642 static void bcm_rx_handler(struct sk_buff *skb, void *data)
643 {
644 struct bcm_op *op = (struct bcm_op *)data;
645 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
646 unsigned int i;
647
648 if (op->can_id != rxframe->can_id)
649 return;
650
651 /* make sure to handle the correct frame type (CAN / CAN FD) */
652 if (skb->len != op->cfsiz)
653 return;
654
655 /* disable timeout */
656 hrtimer_cancel(&op->timer);
657
658 /* save rx timestamp */
659 op->rx_stamp = skb->tstamp;
660 /* save originator for recvfrom() */
661 op->rx_ifindex = skb->dev->ifindex;
662 /* update statistics */
663 op->frames_abs++;
664
665 if (op->flags & RX_RTR_FRAME) {
666 /* send reply for RTR-request (placed in op->frames[0]) */
667 bcm_can_tx(op);
668 return;
669 }
670
671 if (op->flags & RX_FILTER_ID) {
672 /* the easiest case */
673 bcm_rx_update_and_send(op, op->last_frames, rxframe);
674 goto rx_starttimer;
675 }
676
677 if (op->nframes == 1) {
678 /* simple compare with index 0 */
679 bcm_rx_cmp_to_index(op, 0, rxframe);
680 goto rx_starttimer;
681 }
682
683 if (op->nframes > 1) {
684 /*
685 * multiplex compare
686 *
687 * find the first multiplex mask that fits.
688 * Remark: The MUX-mask is stored in index 0 - but only the
689 * first 64 bits of the frame data[] are relevant (CAN FD)
690 */
691
692 for (i = 1; i < op->nframes; i++) {
693 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
694 (get_u64(op->frames, 0) &
695 get_u64(op->frames + op->cfsiz * i, 0))) {
696 bcm_rx_cmp_to_index(op, i, rxframe);
697 break;
698 }
699 }
700 }
701
702 rx_starttimer:
703 bcm_rx_starttimer(op);
704 }
705
706 /*
707 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
708 */
bcm_find_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)709 static struct bcm_op *bcm_find_op(struct list_head *ops,
710 struct bcm_msg_head *mh, int ifindex)
711 {
712 struct bcm_op *op;
713
714 list_for_each_entry(op, ops, list) {
715 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
716 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
717 return op;
718 }
719
720 return NULL;
721 }
722
bcm_free_op_rcu(struct rcu_head * rcu_head)723 static void bcm_free_op_rcu(struct rcu_head *rcu_head)
724 {
725 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
726
727 if ((op->frames) && (op->frames != &op->sframe))
728 kfree(op->frames);
729
730 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
731 kfree(op->last_frames);
732
733 kfree(op);
734 }
735
bcm_remove_op(struct bcm_op * op)736 static void bcm_remove_op(struct bcm_op *op)
737 {
738 hrtimer_cancel(&op->timer);
739 hrtimer_cancel(&op->thrtimer);
740
741 call_rcu(&op->rcu, bcm_free_op_rcu);
742 }
743
bcm_rx_unreg(struct net_device * dev,struct bcm_op * op)744 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
745 {
746 if (op->rx_reg_dev == dev) {
747 can_rx_unregister(dev_net(dev), dev, op->can_id,
748 REGMASK(op->can_id), bcm_rx_handler, op);
749
750 /* mark as removed subscription */
751 op->rx_reg_dev = NULL;
752 } else
753 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
754 "mismatch %p %p\n", op->rx_reg_dev, dev);
755 }
756
757 /*
758 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
759 */
bcm_delete_rx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)760 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
761 int ifindex)
762 {
763 struct bcm_op *op, *n;
764
765 list_for_each_entry_safe(op, n, ops, list) {
766 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
767 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
768
769 /* disable automatic timer on frame reception */
770 op->flags |= RX_NO_AUTOTIMER;
771
772 /*
773 * Don't care if we're bound or not (due to netdev
774 * problems) can_rx_unregister() is always a save
775 * thing to do here.
776 */
777 if (op->ifindex) {
778 /*
779 * Only remove subscriptions that had not
780 * been removed due to NETDEV_UNREGISTER
781 * in bcm_notifier()
782 */
783 if (op->rx_reg_dev) {
784 struct net_device *dev;
785
786 dev = dev_get_by_index(sock_net(op->sk),
787 op->ifindex);
788 if (dev) {
789 bcm_rx_unreg(dev, op);
790 dev_put(dev);
791 }
792 }
793 } else
794 can_rx_unregister(sock_net(op->sk), NULL,
795 op->can_id,
796 REGMASK(op->can_id),
797 bcm_rx_handler, op);
798
799 list_del(&op->list);
800 bcm_remove_op(op);
801 return 1; /* done */
802 }
803 }
804
805 return 0; /* not found */
806 }
807
808 /*
809 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
810 */
bcm_delete_tx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)811 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
812 int ifindex)
813 {
814 struct bcm_op *op, *n;
815
816 list_for_each_entry_safe(op, n, ops, list) {
817 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
818 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
819 list_del(&op->list);
820 bcm_remove_op(op);
821 return 1; /* done */
822 }
823 }
824
825 return 0; /* not found */
826 }
827
828 /*
829 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
830 */
bcm_read_op(struct list_head * ops,struct bcm_msg_head * msg_head,int ifindex)831 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
832 int ifindex)
833 {
834 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
835
836 if (!op)
837 return -EINVAL;
838
839 /* put current values into msg_head */
840 msg_head->flags = op->flags;
841 msg_head->count = op->count;
842 msg_head->ival1 = op->ival1;
843 msg_head->ival2 = op->ival2;
844 msg_head->nframes = op->nframes;
845
846 bcm_send_to_user(op, msg_head, op->frames, 0);
847
848 return MHSIZ;
849 }
850
851 /*
852 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
853 */
bcm_tx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)854 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
855 int ifindex, struct sock *sk)
856 {
857 struct bcm_sock *bo = bcm_sk(sk);
858 struct bcm_op *op;
859 struct canfd_frame *cf;
860 unsigned int i;
861 int err;
862
863 /* we need a real device to send frames */
864 if (!ifindex)
865 return -ENODEV;
866
867 /* check nframes boundaries - we need at least one CAN frame */
868 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
869 return -EINVAL;
870
871 /* check timeval limitations */
872 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
873 return -EINVAL;
874
875 /* check the given can_id */
876 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
877 if (op) {
878 /* update existing BCM operation */
879
880 /*
881 * Do we need more space for the CAN frames than currently
882 * allocated? -> This is a _really_ unusual use-case and
883 * therefore (complexity / locking) it is not supported.
884 */
885 if (msg_head->nframes > op->nframes)
886 return -E2BIG;
887
888 /* update CAN frames content */
889 for (i = 0; i < msg_head->nframes; i++) {
890
891 cf = op->frames + op->cfsiz * i;
892 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
893
894 if (op->flags & CAN_FD_FRAME) {
895 if (cf->len > 64)
896 err = -EINVAL;
897 } else {
898 if (cf->len > 8)
899 err = -EINVAL;
900 }
901
902 if (err < 0)
903 return err;
904
905 if (msg_head->flags & TX_CP_CAN_ID) {
906 /* copy can_id into frame */
907 cf->can_id = msg_head->can_id;
908 }
909 }
910 op->flags = msg_head->flags;
911
912 } else {
913 /* insert new BCM operation for the given can_id */
914
915 op = kzalloc(OPSIZ, GFP_KERNEL);
916 if (!op)
917 return -ENOMEM;
918
919 op->can_id = msg_head->can_id;
920 op->cfsiz = CFSIZ(msg_head->flags);
921 op->flags = msg_head->flags;
922
923 /* create array for CAN frames and copy the data */
924 if (msg_head->nframes > 1) {
925 op->frames = kmalloc_array(msg_head->nframes,
926 op->cfsiz,
927 GFP_KERNEL);
928 if (!op->frames) {
929 kfree(op);
930 return -ENOMEM;
931 }
932 } else
933 op->frames = &op->sframe;
934
935 for (i = 0; i < msg_head->nframes; i++) {
936
937 cf = op->frames + op->cfsiz * i;
938 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
939 if (err < 0)
940 goto free_op;
941
942 if (op->flags & CAN_FD_FRAME) {
943 if (cf->len > 64)
944 err = -EINVAL;
945 } else {
946 if (cf->len > 8)
947 err = -EINVAL;
948 }
949
950 if (err < 0)
951 goto free_op;
952
953 if (msg_head->flags & TX_CP_CAN_ID) {
954 /* copy can_id into frame */
955 cf->can_id = msg_head->can_id;
956 }
957 }
958
959 /* tx_ops never compare with previous received messages */
960 op->last_frames = NULL;
961
962 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
963 op->sk = sk;
964 op->ifindex = ifindex;
965
966 /* initialize uninitialized (kzalloc) structure */
967 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
968 HRTIMER_MODE_REL_SOFT);
969 op->timer.function = bcm_tx_timeout_handler;
970
971 /* currently unused in tx_ops */
972 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
973 HRTIMER_MODE_REL_SOFT);
974
975 /* add this bcm_op to the list of the tx_ops */
976 list_add(&op->list, &bo->tx_ops);
977
978 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
979
980 if (op->nframes != msg_head->nframes) {
981 op->nframes = msg_head->nframes;
982 /* start multiple frame transmission with index 0 */
983 op->currframe = 0;
984 }
985
986 /* check flags */
987
988 if (op->flags & TX_RESET_MULTI_IDX) {
989 /* start multiple frame transmission with index 0 */
990 op->currframe = 0;
991 }
992
993 if (op->flags & SETTIMER) {
994 /* set timer values */
995 op->count = msg_head->count;
996 op->ival1 = msg_head->ival1;
997 op->ival2 = msg_head->ival2;
998 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
999 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1000
1001 /* disable an active timer due to zero values? */
1002 if (!op->kt_ival1 && !op->kt_ival2)
1003 hrtimer_cancel(&op->timer);
1004 }
1005
1006 if (op->flags & STARTTIMER) {
1007 hrtimer_cancel(&op->timer);
1008 /* spec: send CAN frame when starting timer */
1009 op->flags |= TX_ANNOUNCE;
1010 }
1011
1012 if (op->flags & TX_ANNOUNCE) {
1013 bcm_can_tx(op);
1014 if (op->count)
1015 op->count--;
1016 }
1017
1018 if (op->flags & STARTTIMER)
1019 bcm_tx_start_timer(op);
1020
1021 return msg_head->nframes * op->cfsiz + MHSIZ;
1022
1023 free_op:
1024 if (op->frames != &op->sframe)
1025 kfree(op->frames);
1026 kfree(op);
1027 return err;
1028 }
1029
1030 /*
1031 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1032 */
bcm_rx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)1033 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1034 int ifindex, struct sock *sk)
1035 {
1036 struct bcm_sock *bo = bcm_sk(sk);
1037 struct bcm_op *op;
1038 int do_rx_register;
1039 int err = 0;
1040
1041 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1042 /* be robust against wrong usage ... */
1043 msg_head->flags |= RX_FILTER_ID;
1044 /* ignore trailing garbage */
1045 msg_head->nframes = 0;
1046 }
1047
1048 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1049 if (msg_head->nframes > MAX_NFRAMES + 1)
1050 return -EINVAL;
1051
1052 if ((msg_head->flags & RX_RTR_FRAME) &&
1053 ((msg_head->nframes != 1) ||
1054 (!(msg_head->can_id & CAN_RTR_FLAG))))
1055 return -EINVAL;
1056
1057 /* check timeval limitations */
1058 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1059 return -EINVAL;
1060
1061 /* check the given can_id */
1062 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1063 if (op) {
1064 /* update existing BCM operation */
1065
1066 /*
1067 * Do we need more space for the CAN frames than currently
1068 * allocated? -> This is a _really_ unusual use-case and
1069 * therefore (complexity / locking) it is not supported.
1070 */
1071 if (msg_head->nframes > op->nframes)
1072 return -E2BIG;
1073
1074 if (msg_head->nframes) {
1075 /* update CAN frames content */
1076 err = memcpy_from_msg(op->frames, msg,
1077 msg_head->nframes * op->cfsiz);
1078 if (err < 0)
1079 return err;
1080
1081 /* clear last_frames to indicate 'nothing received' */
1082 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1083 }
1084
1085 op->nframes = msg_head->nframes;
1086 op->flags = msg_head->flags;
1087
1088 /* Only an update -> do not call can_rx_register() */
1089 do_rx_register = 0;
1090
1091 } else {
1092 /* insert new BCM operation for the given can_id */
1093 op = kzalloc(OPSIZ, GFP_KERNEL);
1094 if (!op)
1095 return -ENOMEM;
1096
1097 op->can_id = msg_head->can_id;
1098 op->nframes = msg_head->nframes;
1099 op->cfsiz = CFSIZ(msg_head->flags);
1100 op->flags = msg_head->flags;
1101
1102 if (msg_head->nframes > 1) {
1103 /* create array for CAN frames and copy the data */
1104 op->frames = kmalloc_array(msg_head->nframes,
1105 op->cfsiz,
1106 GFP_KERNEL);
1107 if (!op->frames) {
1108 kfree(op);
1109 return -ENOMEM;
1110 }
1111
1112 /* create and init array for received CAN frames */
1113 op->last_frames = kcalloc(msg_head->nframes,
1114 op->cfsiz,
1115 GFP_KERNEL);
1116 if (!op->last_frames) {
1117 kfree(op->frames);
1118 kfree(op);
1119 return -ENOMEM;
1120 }
1121
1122 } else {
1123 op->frames = &op->sframe;
1124 op->last_frames = &op->last_sframe;
1125 }
1126
1127 if (msg_head->nframes) {
1128 err = memcpy_from_msg(op->frames, msg,
1129 msg_head->nframes * op->cfsiz);
1130 if (err < 0) {
1131 if (op->frames != &op->sframe)
1132 kfree(op->frames);
1133 if (op->last_frames != &op->last_sframe)
1134 kfree(op->last_frames);
1135 kfree(op);
1136 return err;
1137 }
1138 }
1139
1140 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1141 op->sk = sk;
1142 op->ifindex = ifindex;
1143
1144 /* ifindex for timeout events w/o previous frame reception */
1145 op->rx_ifindex = ifindex;
1146
1147 /* initialize uninitialized (kzalloc) structure */
1148 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1149 HRTIMER_MODE_REL_SOFT);
1150 op->timer.function = bcm_rx_timeout_handler;
1151
1152 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1153 HRTIMER_MODE_REL_SOFT);
1154 op->thrtimer.function = bcm_rx_thr_handler;
1155
1156 /* add this bcm_op to the list of the rx_ops */
1157 list_add(&op->list, &bo->rx_ops);
1158
1159 /* call can_rx_register() */
1160 do_rx_register = 1;
1161
1162 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1163
1164 /* check flags */
1165
1166 if (op->flags & RX_RTR_FRAME) {
1167 struct canfd_frame *frame0 = op->frames;
1168
1169 /* no timers in RTR-mode */
1170 hrtimer_cancel(&op->thrtimer);
1171 hrtimer_cancel(&op->timer);
1172
1173 /*
1174 * funny feature in RX(!)_SETUP only for RTR-mode:
1175 * copy can_id into frame BUT without RTR-flag to
1176 * prevent a full-load-loopback-test ... ;-]
1177 */
1178 if ((op->flags & TX_CP_CAN_ID) ||
1179 (frame0->can_id == op->can_id))
1180 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1181
1182 } else {
1183 if (op->flags & SETTIMER) {
1184
1185 /* set timer value */
1186 op->ival1 = msg_head->ival1;
1187 op->ival2 = msg_head->ival2;
1188 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1189 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1190
1191 /* disable an active timer due to zero value? */
1192 if (!op->kt_ival1)
1193 hrtimer_cancel(&op->timer);
1194
1195 /*
1196 * In any case cancel the throttle timer, flush
1197 * potentially blocked msgs and reset throttle handling
1198 */
1199 op->kt_lastmsg = 0;
1200 hrtimer_cancel(&op->thrtimer);
1201 bcm_rx_thr_flush(op);
1202 }
1203
1204 if ((op->flags & STARTTIMER) && op->kt_ival1)
1205 hrtimer_start(&op->timer, op->kt_ival1,
1206 HRTIMER_MODE_REL_SOFT);
1207 }
1208
1209 /* now we can register for can_ids, if we added a new bcm_op */
1210 if (do_rx_register) {
1211 if (ifindex) {
1212 struct net_device *dev;
1213
1214 dev = dev_get_by_index(sock_net(sk), ifindex);
1215 if (dev) {
1216 err = can_rx_register(sock_net(sk), dev,
1217 op->can_id,
1218 REGMASK(op->can_id),
1219 bcm_rx_handler, op,
1220 "bcm", sk);
1221
1222 op->rx_reg_dev = dev;
1223 dev_put(dev);
1224 }
1225
1226 } else
1227 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1228 REGMASK(op->can_id),
1229 bcm_rx_handler, op, "bcm", sk);
1230 if (err) {
1231 /* this bcm rx op is broken -> remove it */
1232 list_del(&op->list);
1233 bcm_remove_op(op);
1234 return err;
1235 }
1236 }
1237
1238 return msg_head->nframes * op->cfsiz + MHSIZ;
1239 }
1240
1241 /*
1242 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1243 */
bcm_tx_send(struct msghdr * msg,int ifindex,struct sock * sk,int cfsiz)1244 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1245 int cfsiz)
1246 {
1247 struct sk_buff *skb;
1248 struct net_device *dev;
1249 int err;
1250
1251 /* we need a real device to send frames */
1252 if (!ifindex)
1253 return -ENODEV;
1254
1255 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1256 if (!skb)
1257 return -ENOMEM;
1258
1259 can_skb_reserve(skb);
1260
1261 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1262 if (err < 0) {
1263 kfree_skb(skb);
1264 return err;
1265 }
1266
1267 dev = dev_get_by_index(sock_net(sk), ifindex);
1268 if (!dev) {
1269 kfree_skb(skb);
1270 return -ENODEV;
1271 }
1272
1273 can_skb_prv(skb)->ifindex = dev->ifindex;
1274 can_skb_prv(skb)->skbcnt = 0;
1275 skb->dev = dev;
1276 can_skb_set_owner(skb, sk);
1277 err = can_send(skb, 1); /* send with loopback */
1278 dev_put(dev);
1279
1280 if (err)
1281 return err;
1282
1283 return cfsiz + MHSIZ;
1284 }
1285
1286 /*
1287 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1288 */
bcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1289 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1290 {
1291 struct sock *sk = sock->sk;
1292 struct bcm_sock *bo = bcm_sk(sk);
1293 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1294 struct bcm_msg_head msg_head;
1295 int cfsiz;
1296 int ret; /* read bytes or error codes as return value */
1297
1298 if (!bo->bound)
1299 return -ENOTCONN;
1300
1301 /* check for valid message length from userspace */
1302 if (size < MHSIZ)
1303 return -EINVAL;
1304
1305 /* read message head information */
1306 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1307 if (ret < 0)
1308 return ret;
1309
1310 cfsiz = CFSIZ(msg_head.flags);
1311 if ((size - MHSIZ) % cfsiz)
1312 return -EINVAL;
1313
1314 /* check for alternative ifindex for this bcm_op */
1315
1316 if (!ifindex && msg->msg_name) {
1317 /* no bound device as default => check msg_name */
1318 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1319
1320 if (msg->msg_namelen < BCM_MIN_NAMELEN)
1321 return -EINVAL;
1322
1323 if (addr->can_family != AF_CAN)
1324 return -EINVAL;
1325
1326 /* ifindex from sendto() */
1327 ifindex = addr->can_ifindex;
1328
1329 if (ifindex) {
1330 struct net_device *dev;
1331
1332 dev = dev_get_by_index(sock_net(sk), ifindex);
1333 if (!dev)
1334 return -ENODEV;
1335
1336 if (dev->type != ARPHRD_CAN) {
1337 dev_put(dev);
1338 return -ENODEV;
1339 }
1340
1341 dev_put(dev);
1342 }
1343 }
1344
1345 lock_sock(sk);
1346
1347 switch (msg_head.opcode) {
1348
1349 case TX_SETUP:
1350 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1351 break;
1352
1353 case RX_SETUP:
1354 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1355 break;
1356
1357 case TX_DELETE:
1358 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1359 ret = MHSIZ;
1360 else
1361 ret = -EINVAL;
1362 break;
1363
1364 case RX_DELETE:
1365 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1366 ret = MHSIZ;
1367 else
1368 ret = -EINVAL;
1369 break;
1370
1371 case TX_READ:
1372 /* reuse msg_head for the reply to TX_READ */
1373 msg_head.opcode = TX_STATUS;
1374 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1375 break;
1376
1377 case RX_READ:
1378 /* reuse msg_head for the reply to RX_READ */
1379 msg_head.opcode = RX_STATUS;
1380 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1381 break;
1382
1383 case TX_SEND:
1384 /* we need exactly one CAN frame behind the msg head */
1385 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1386 ret = -EINVAL;
1387 else
1388 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1389 break;
1390
1391 default:
1392 ret = -EINVAL;
1393 break;
1394 }
1395
1396 release_sock(sk);
1397
1398 return ret;
1399 }
1400
1401 /*
1402 * notification handler for netdevice status changes
1403 */
bcm_notify(struct bcm_sock * bo,unsigned long msg,struct net_device * dev)1404 static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1405 struct net_device *dev)
1406 {
1407 struct sock *sk = &bo->sk;
1408 struct bcm_op *op;
1409 int notify_enodev = 0;
1410
1411 if (!net_eq(dev_net(dev), sock_net(sk)))
1412 return;
1413
1414 switch (msg) {
1415
1416 case NETDEV_UNREGISTER:
1417 lock_sock(sk);
1418
1419 /* remove device specific receive entries */
1420 list_for_each_entry(op, &bo->rx_ops, list)
1421 if (op->rx_reg_dev == dev)
1422 bcm_rx_unreg(dev, op);
1423
1424 /* remove device reference, if this is our bound device */
1425 if (bo->bound && bo->ifindex == dev->ifindex) {
1426 bo->bound = 0;
1427 bo->ifindex = 0;
1428 notify_enodev = 1;
1429 }
1430
1431 release_sock(sk);
1432
1433 if (notify_enodev) {
1434 sk->sk_err = ENODEV;
1435 if (!sock_flag(sk, SOCK_DEAD))
1436 sk_error_report(sk);
1437 }
1438 break;
1439
1440 case NETDEV_DOWN:
1441 if (bo->bound && bo->ifindex == dev->ifindex) {
1442 sk->sk_err = ENETDOWN;
1443 if (!sock_flag(sk, SOCK_DEAD))
1444 sk_error_report(sk);
1445 }
1446 }
1447 }
1448
bcm_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)1449 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1450 void *ptr)
1451 {
1452 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1453
1454 if (dev->type != ARPHRD_CAN)
1455 return NOTIFY_DONE;
1456 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1457 return NOTIFY_DONE;
1458 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1459 return NOTIFY_DONE;
1460
1461 spin_lock(&bcm_notifier_lock);
1462 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1463 spin_unlock(&bcm_notifier_lock);
1464 bcm_notify(bcm_busy_notifier, msg, dev);
1465 spin_lock(&bcm_notifier_lock);
1466 }
1467 bcm_busy_notifier = NULL;
1468 spin_unlock(&bcm_notifier_lock);
1469 return NOTIFY_DONE;
1470 }
1471
1472 /*
1473 * initial settings for all BCM sockets to be set at socket creation time
1474 */
bcm_init(struct sock * sk)1475 static int bcm_init(struct sock *sk)
1476 {
1477 struct bcm_sock *bo = bcm_sk(sk);
1478
1479 bo->bound = 0;
1480 bo->ifindex = 0;
1481 bo->dropped_usr_msgs = 0;
1482 bo->bcm_proc_read = NULL;
1483
1484 INIT_LIST_HEAD(&bo->tx_ops);
1485 INIT_LIST_HEAD(&bo->rx_ops);
1486
1487 /* set notifier */
1488 spin_lock(&bcm_notifier_lock);
1489 list_add_tail(&bo->notifier, &bcm_notifier_list);
1490 spin_unlock(&bcm_notifier_lock);
1491
1492 return 0;
1493 }
1494
1495 /*
1496 * standard socket functions
1497 */
bcm_release(struct socket * sock)1498 static int bcm_release(struct socket *sock)
1499 {
1500 struct sock *sk = sock->sk;
1501 struct net *net;
1502 struct bcm_sock *bo;
1503 struct bcm_op *op, *next;
1504
1505 if (!sk)
1506 return 0;
1507
1508 net = sock_net(sk);
1509 bo = bcm_sk(sk);
1510
1511 /* remove bcm_ops, timer, rx_unregister(), etc. */
1512
1513 spin_lock(&bcm_notifier_lock);
1514 while (bcm_busy_notifier == bo) {
1515 spin_unlock(&bcm_notifier_lock);
1516 schedule_timeout_uninterruptible(1);
1517 spin_lock(&bcm_notifier_lock);
1518 }
1519 list_del(&bo->notifier);
1520 spin_unlock(&bcm_notifier_lock);
1521
1522 lock_sock(sk);
1523
1524 #if IS_ENABLED(CONFIG_PROC_FS)
1525 /* remove procfs entry */
1526 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1527 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1528 #endif /* CONFIG_PROC_FS */
1529
1530 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1531 bcm_remove_op(op);
1532
1533 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1534 /*
1535 * Don't care if we're bound or not (due to netdev problems)
1536 * can_rx_unregister() is always a save thing to do here.
1537 */
1538 if (op->ifindex) {
1539 /*
1540 * Only remove subscriptions that had not
1541 * been removed due to NETDEV_UNREGISTER
1542 * in bcm_notifier()
1543 */
1544 if (op->rx_reg_dev) {
1545 struct net_device *dev;
1546
1547 dev = dev_get_by_index(net, op->ifindex);
1548 if (dev) {
1549 bcm_rx_unreg(dev, op);
1550 dev_put(dev);
1551 }
1552 }
1553 } else
1554 can_rx_unregister(net, NULL, op->can_id,
1555 REGMASK(op->can_id),
1556 bcm_rx_handler, op);
1557
1558 }
1559
1560 synchronize_rcu();
1561
1562 list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1563 bcm_remove_op(op);
1564
1565 /* remove device reference */
1566 if (bo->bound) {
1567 bo->bound = 0;
1568 bo->ifindex = 0;
1569 }
1570
1571 sock_orphan(sk);
1572 sock->sk = NULL;
1573
1574 release_sock(sk);
1575 sock_put(sk);
1576
1577 return 0;
1578 }
1579
bcm_connect(struct socket * sock,struct sockaddr * uaddr,int len,int flags)1580 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1581 int flags)
1582 {
1583 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1584 struct sock *sk = sock->sk;
1585 struct bcm_sock *bo = bcm_sk(sk);
1586 struct net *net = sock_net(sk);
1587 int ret = 0;
1588
1589 if (len < BCM_MIN_NAMELEN)
1590 return -EINVAL;
1591
1592 lock_sock(sk);
1593
1594 if (bo->bound) {
1595 ret = -EISCONN;
1596 goto fail;
1597 }
1598
1599 /* bind a device to this socket */
1600 if (addr->can_ifindex) {
1601 struct net_device *dev;
1602
1603 dev = dev_get_by_index(net, addr->can_ifindex);
1604 if (!dev) {
1605 ret = -ENODEV;
1606 goto fail;
1607 }
1608 if (dev->type != ARPHRD_CAN) {
1609 dev_put(dev);
1610 ret = -ENODEV;
1611 goto fail;
1612 }
1613
1614 bo->ifindex = dev->ifindex;
1615 dev_put(dev);
1616
1617 } else {
1618 /* no interface reference for ifindex = 0 ('any' CAN device) */
1619 bo->ifindex = 0;
1620 }
1621
1622 #if IS_ENABLED(CONFIG_PROC_FS)
1623 if (net->can.bcmproc_dir) {
1624 /* unique socket address as filename */
1625 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1626 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1627 net->can.bcmproc_dir,
1628 bcm_proc_show, sk);
1629 if (!bo->bcm_proc_read) {
1630 ret = -ENOMEM;
1631 goto fail;
1632 }
1633 }
1634 #endif /* CONFIG_PROC_FS */
1635
1636 bo->bound = 1;
1637
1638 fail:
1639 release_sock(sk);
1640
1641 return ret;
1642 }
1643
bcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1644 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1645 int flags)
1646 {
1647 struct sock *sk = sock->sk;
1648 struct sk_buff *skb;
1649 int error = 0;
1650 int noblock;
1651 int err;
1652
1653 noblock = flags & MSG_DONTWAIT;
1654 flags &= ~MSG_DONTWAIT;
1655 skb = skb_recv_datagram(sk, flags, noblock, &error);
1656 if (!skb)
1657 return error;
1658
1659 if (skb->len < size)
1660 size = skb->len;
1661
1662 err = memcpy_to_msg(msg, skb->data, size);
1663 if (err < 0) {
1664 skb_free_datagram(sk, skb);
1665 return err;
1666 }
1667
1668 sock_recv_ts_and_drops(msg, sk, skb);
1669
1670 if (msg->msg_name) {
1671 __sockaddr_check_size(BCM_MIN_NAMELEN);
1672 msg->msg_namelen = BCM_MIN_NAMELEN;
1673 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1674 }
1675
1676 skb_free_datagram(sk, skb);
1677
1678 return size;
1679 }
1680
bcm_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1681 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1682 unsigned long arg)
1683 {
1684 /* no ioctls for socket layer -> hand it down to NIC layer */
1685 return -ENOIOCTLCMD;
1686 }
1687
1688 static const struct proto_ops bcm_ops = {
1689 .family = PF_CAN,
1690 .release = bcm_release,
1691 .bind = sock_no_bind,
1692 .connect = bcm_connect,
1693 .socketpair = sock_no_socketpair,
1694 .accept = sock_no_accept,
1695 .getname = sock_no_getname,
1696 .poll = datagram_poll,
1697 .ioctl = bcm_sock_no_ioctlcmd,
1698 .gettstamp = sock_gettstamp,
1699 .listen = sock_no_listen,
1700 .shutdown = sock_no_shutdown,
1701 .sendmsg = bcm_sendmsg,
1702 .recvmsg = bcm_recvmsg,
1703 .mmap = sock_no_mmap,
1704 .sendpage = sock_no_sendpage,
1705 };
1706
1707 static struct proto bcm_proto __read_mostly = {
1708 .name = "CAN_BCM",
1709 .owner = THIS_MODULE,
1710 .obj_size = sizeof(struct bcm_sock),
1711 .init = bcm_init,
1712 };
1713
1714 static const struct can_proto bcm_can_proto = {
1715 .type = SOCK_DGRAM,
1716 .protocol = CAN_BCM,
1717 .ops = &bcm_ops,
1718 .prot = &bcm_proto,
1719 };
1720
canbcm_pernet_init(struct net * net)1721 static int canbcm_pernet_init(struct net *net)
1722 {
1723 #if IS_ENABLED(CONFIG_PROC_FS)
1724 /* create /proc/net/can-bcm directory */
1725 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1726 #endif /* CONFIG_PROC_FS */
1727
1728 return 0;
1729 }
1730
canbcm_pernet_exit(struct net * net)1731 static void canbcm_pernet_exit(struct net *net)
1732 {
1733 #if IS_ENABLED(CONFIG_PROC_FS)
1734 /* remove /proc/net/can-bcm directory */
1735 if (net->can.bcmproc_dir)
1736 remove_proc_entry("can-bcm", net->proc_net);
1737 #endif /* CONFIG_PROC_FS */
1738 }
1739
1740 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1741 .init = canbcm_pernet_init,
1742 .exit = canbcm_pernet_exit,
1743 };
1744
1745 static struct notifier_block canbcm_notifier = {
1746 .notifier_call = bcm_notifier
1747 };
1748
bcm_module_init(void)1749 static int __init bcm_module_init(void)
1750 {
1751 int err;
1752
1753 pr_info("can: broadcast manager protocol\n");
1754
1755 err = can_proto_register(&bcm_can_proto);
1756 if (err < 0) {
1757 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1758 return err;
1759 }
1760
1761 register_pernet_subsys(&canbcm_pernet_ops);
1762 register_netdevice_notifier(&canbcm_notifier);
1763 return 0;
1764 }
1765
bcm_module_exit(void)1766 static void __exit bcm_module_exit(void)
1767 {
1768 can_proto_unregister(&bcm_can_proto);
1769 unregister_netdevice_notifier(&canbcm_notifier);
1770 unregister_pernet_subsys(&canbcm_pernet_ops);
1771 }
1772
1773 module_init(bcm_module_init);
1774 module_exit(bcm_module_exit);
1775