1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * DECnet Device Layer
7 *
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
10 *
11 * Changes:
12 * Steve Whitehouse : Devices now see incoming frames so they
13 * can mark on who it came from.
14 * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour
15 * can now have a device specific setup func.
16 * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/
17 * Steve Whitehouse : Fixed bug which sometimes killed timer
18 * Steve Whitehouse : Multiple ifaddr support
19 * Steve Whitehouse : SIOCGIFCONF is now a compile time option
20 * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding
21 * Steve Whitehouse : Removed timer1 - it's a user space issue now
22 * Patrick Caulfield : Fixed router hello message format
23 * Steve Whitehouse : Got rid of constant sizes for blksize for
24 * devices. All mtu based now.
25 */
26
27 #include <linux/capability.h>
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/init.h>
31 #include <linux/net.h>
32 #include <linux/netdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/timer.h>
36 #include <linux/string.h>
37 #include <linux/if_addr.h>
38 #include <linux/if_arp.h>
39 #include <linux/if_ether.h>
40 #include <linux/skbuff.h>
41 #include <linux/sysctl.h>
42 #include <linux/notifier.h>
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
45 #include <net/net_namespace.h>
46 #include <net/neighbour.h>
47 #include <net/dst.h>
48 #include <net/flow.h>
49 #include <net/fib_rules.h>
50 #include <net/netlink.h>
51 #include <net/dn.h>
52 #include <net/dn_dev.h>
53 #include <net/dn_route.h>
54 #include <net/dn_neigh.h>
55 #include <net/dn_fib.h>
56
57 #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
58
59 static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
60 static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
61 static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00};
62 static unsigned char dn_eco_version[3] = {0x02,0x00,0x00};
63
64 extern struct neigh_table dn_neigh_table;
65
66 /*
67 * decnet_address is kept in network order.
68 */
69 __le16 decnet_address = 0;
70
71 static DEFINE_RWLOCK(dndev_lock);
72 static struct net_device *decnet_default_device;
73 static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
74
75 static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
76 static void dn_dev_delete(struct net_device *dev);
77 static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa);
78
79 static int dn_eth_up(struct net_device *);
80 static void dn_eth_down(struct net_device *);
81 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa);
82 static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa);
83
84 static struct dn_dev_parms dn_dev_list[] = {
85 {
86 .type = ARPHRD_ETHER, /* Ethernet */
87 .mode = DN_DEV_BCAST,
88 .state = DN_DEV_S_RU,
89 .t2 = 1,
90 .t3 = 10,
91 .name = "ethernet",
92 .ctl_name = NET_DECNET_CONF_ETHER,
93 .up = dn_eth_up,
94 .down = dn_eth_down,
95 .timer3 = dn_send_brd_hello,
96 },
97 {
98 .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */
99 .mode = DN_DEV_BCAST,
100 .state = DN_DEV_S_RU,
101 .t2 = 1,
102 .t3 = 10,
103 .name = "ipgre",
104 .ctl_name = NET_DECNET_CONF_GRE,
105 .timer3 = dn_send_brd_hello,
106 },
107 #if 0
108 {
109 .type = ARPHRD_X25, /* Bog standard X.25 */
110 .mode = DN_DEV_UCAST,
111 .state = DN_DEV_S_DS,
112 .t2 = 1,
113 .t3 = 120,
114 .name = "x25",
115 .ctl_name = NET_DECNET_CONF_X25,
116 .timer3 = dn_send_ptp_hello,
117 },
118 #endif
119 #if 0
120 {
121 .type = ARPHRD_PPP, /* DECnet over PPP */
122 .mode = DN_DEV_BCAST,
123 .state = DN_DEV_S_RU,
124 .t2 = 1,
125 .t3 = 10,
126 .name = "ppp",
127 .ctl_name = NET_DECNET_CONF_PPP,
128 .timer3 = dn_send_brd_hello,
129 },
130 #endif
131 {
132 .type = ARPHRD_DDCMP, /* DECnet over DDCMP */
133 .mode = DN_DEV_UCAST,
134 .state = DN_DEV_S_DS,
135 .t2 = 1,
136 .t3 = 120,
137 .name = "ddcmp",
138 .ctl_name = NET_DECNET_CONF_DDCMP,
139 .timer3 = dn_send_ptp_hello,
140 },
141 {
142 .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */
143 .mode = DN_DEV_BCAST,
144 .state = DN_DEV_S_RU,
145 .t2 = 1,
146 .t3 = 10,
147 .name = "loopback",
148 .ctl_name = NET_DECNET_CONF_LOOPBACK,
149 .timer3 = dn_send_brd_hello,
150 }
151 };
152
153 #define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list)
154
155 #define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x)
156
157 #ifdef CONFIG_SYSCTL
158
159 static int min_t2[] = { 1 };
160 static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */
161 static int min_t3[] = { 1 };
162 static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */
163
164 static int min_priority[1];
165 static int max_priority[] = { 127 }; /* From DECnet spec */
166
167 static int dn_forwarding_proc(ctl_table *, int, struct file *,
168 void __user *, size_t *, loff_t *);
169 static int dn_forwarding_sysctl(ctl_table *table,
170 void __user *oldval, size_t __user *oldlenp,
171 void __user *newval, size_t newlen);
172
173 static struct dn_dev_sysctl_table {
174 struct ctl_table_header *sysctl_header;
175 ctl_table dn_dev_vars[5];
176 } dn_dev_sysctl = {
177 NULL,
178 {
179 {
180 .ctl_name = NET_DECNET_CONF_DEV_FORWARDING,
181 .procname = "forwarding",
182 .data = (void *)DN_DEV_PARMS_OFFSET(forwarding),
183 .maxlen = sizeof(int),
184 .mode = 0644,
185 .proc_handler = dn_forwarding_proc,
186 .strategy = dn_forwarding_sysctl,
187 },
188 {
189 .ctl_name = NET_DECNET_CONF_DEV_PRIORITY,
190 .procname = "priority",
191 .data = (void *)DN_DEV_PARMS_OFFSET(priority),
192 .maxlen = sizeof(int),
193 .mode = 0644,
194 .proc_handler = proc_dointvec_minmax,
195 .strategy = sysctl_intvec,
196 .extra1 = &min_priority,
197 .extra2 = &max_priority
198 },
199 {
200 .ctl_name = NET_DECNET_CONF_DEV_T2,
201 .procname = "t2",
202 .data = (void *)DN_DEV_PARMS_OFFSET(t2),
203 .maxlen = sizeof(int),
204 .mode = 0644,
205 .proc_handler = proc_dointvec_minmax,
206 .strategy = sysctl_intvec,
207 .extra1 = &min_t2,
208 .extra2 = &max_t2
209 },
210 {
211 .ctl_name = NET_DECNET_CONF_DEV_T3,
212 .procname = "t3",
213 .data = (void *)DN_DEV_PARMS_OFFSET(t3),
214 .maxlen = sizeof(int),
215 .mode = 0644,
216 .proc_handler = proc_dointvec_minmax,
217 .strategy = sysctl_intvec,
218 .extra1 = &min_t3,
219 .extra2 = &max_t3
220 },
221 {0}
222 },
223 };
224
dn_dev_sysctl_register(struct net_device * dev,struct dn_dev_parms * parms)225 static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
226 {
227 struct dn_dev_sysctl_table *t;
228 int i;
229
230 #define DN_CTL_PATH_DEV 3
231
232 struct ctl_path dn_ctl_path[] = {
233 { .procname = "net", .ctl_name = CTL_NET, },
234 { .procname = "decnet", .ctl_name = NET_DECNET, },
235 { .procname = "conf", .ctl_name = NET_DECNET_CONF, },
236 { /* to be set */ },
237 { },
238 };
239
240 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
241 if (t == NULL)
242 return;
243
244 for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
245 long offset = (long)t->dn_dev_vars[i].data;
246 t->dn_dev_vars[i].data = ((char *)parms) + offset;
247 }
248
249 if (dev) {
250 dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name;
251 dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = dev->ifindex;
252 } else {
253 dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
254 dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = parms->ctl_name;
255 }
256
257 t->dn_dev_vars[0].extra1 = (void *)dev;
258
259 t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars);
260 if (t->sysctl_header == NULL)
261 kfree(t);
262 else
263 parms->sysctl = t;
264 }
265
dn_dev_sysctl_unregister(struct dn_dev_parms * parms)266 static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
267 {
268 if (parms->sysctl) {
269 struct dn_dev_sysctl_table *t = parms->sysctl;
270 parms->sysctl = NULL;
271 unregister_sysctl_table(t->sysctl_header);
272 kfree(t);
273 }
274 }
275
dn_forwarding_proc(ctl_table * table,int write,struct file * filep,void __user * buffer,size_t * lenp,loff_t * ppos)276 static int dn_forwarding_proc(ctl_table *table, int write,
277 struct file *filep,
278 void __user *buffer,
279 size_t *lenp, loff_t *ppos)
280 {
281 #ifdef CONFIG_DECNET_ROUTER
282 struct net_device *dev = table->extra1;
283 struct dn_dev *dn_db;
284 int err;
285 int tmp, old;
286
287 if (table->extra1 == NULL)
288 return -EINVAL;
289
290 dn_db = dev->dn_ptr;
291 old = dn_db->parms.forwarding;
292
293 err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
294
295 if ((err >= 0) && write) {
296 if (dn_db->parms.forwarding < 0)
297 dn_db->parms.forwarding = 0;
298 if (dn_db->parms.forwarding > 2)
299 dn_db->parms.forwarding = 2;
300 /*
301 * What an ugly hack this is... its works, just. It
302 * would be nice if sysctl/proc were just that little
303 * bit more flexible so I don't have to write a special
304 * routine, or suffer hacks like this - SJW
305 */
306 tmp = dn_db->parms.forwarding;
307 dn_db->parms.forwarding = old;
308 if (dn_db->parms.down)
309 dn_db->parms.down(dev);
310 dn_db->parms.forwarding = tmp;
311 if (dn_db->parms.up)
312 dn_db->parms.up(dev);
313 }
314
315 return err;
316 #else
317 return -EINVAL;
318 #endif
319 }
320
dn_forwarding_sysctl(ctl_table * table,void __user * oldval,size_t __user * oldlenp,void __user * newval,size_t newlen)321 static int dn_forwarding_sysctl(ctl_table *table,
322 void __user *oldval, size_t __user *oldlenp,
323 void __user *newval, size_t newlen)
324 {
325 #ifdef CONFIG_DECNET_ROUTER
326 struct net_device *dev = table->extra1;
327 struct dn_dev *dn_db;
328 int value;
329
330 if (table->extra1 == NULL)
331 return -EINVAL;
332
333 dn_db = dev->dn_ptr;
334
335 if (newval && newlen) {
336 if (newlen != sizeof(int))
337 return -EINVAL;
338
339 if (get_user(value, (int __user *)newval))
340 return -EFAULT;
341 if (value < 0)
342 return -EINVAL;
343 if (value > 2)
344 return -EINVAL;
345
346 if (dn_db->parms.down)
347 dn_db->parms.down(dev);
348 dn_db->parms.forwarding = value;
349 if (dn_db->parms.up)
350 dn_db->parms.up(dev);
351 }
352
353 return 0;
354 #else
355 return -EINVAL;
356 #endif
357 }
358
359 #else /* CONFIG_SYSCTL */
dn_dev_sysctl_unregister(struct dn_dev_parms * parms)360 static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
361 {
362 }
dn_dev_sysctl_register(struct net_device * dev,struct dn_dev_parms * parms)363 static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
364 {
365 }
366
367 #endif /* CONFIG_SYSCTL */
368
mtu2blksize(struct net_device * dev)369 static inline __u16 mtu2blksize(struct net_device *dev)
370 {
371 u32 blksize = dev->mtu;
372 if (blksize > 0xffff)
373 blksize = 0xffff;
374
375 if (dev->type == ARPHRD_ETHER ||
376 dev->type == ARPHRD_PPP ||
377 dev->type == ARPHRD_IPGRE ||
378 dev->type == ARPHRD_LOOPBACK)
379 blksize -= 2;
380
381 return (__u16)blksize;
382 }
383
dn_dev_alloc_ifa(void)384 static struct dn_ifaddr *dn_dev_alloc_ifa(void)
385 {
386 struct dn_ifaddr *ifa;
387
388 ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
389
390 return ifa;
391 }
392
dn_dev_free_ifa(struct dn_ifaddr * ifa)393 static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
394 {
395 kfree(ifa);
396 }
397
dn_dev_del_ifa(struct dn_dev * dn_db,struct dn_ifaddr ** ifap,int destroy)398 static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
399 {
400 struct dn_ifaddr *ifa1 = *ifap;
401 unsigned char mac_addr[6];
402 struct net_device *dev = dn_db->dev;
403
404 ASSERT_RTNL();
405
406 *ifap = ifa1->ifa_next;
407
408 if (dn_db->dev->type == ARPHRD_ETHER) {
409 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
410 dn_dn2eth(mac_addr, ifa1->ifa_local);
411 dev_mc_delete(dev, mac_addr, ETH_ALEN, 0);
412 }
413 }
414
415 dn_ifaddr_notify(RTM_DELADDR, ifa1);
416 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
417 if (destroy) {
418 dn_dev_free_ifa(ifa1);
419
420 if (dn_db->ifa_list == NULL)
421 dn_dev_delete(dn_db->dev);
422 }
423 }
424
dn_dev_insert_ifa(struct dn_dev * dn_db,struct dn_ifaddr * ifa)425 static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
426 {
427 struct net_device *dev = dn_db->dev;
428 struct dn_ifaddr *ifa1;
429 unsigned char mac_addr[6];
430
431 ASSERT_RTNL();
432
433 /* Check for duplicates */
434 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
435 if (ifa1->ifa_local == ifa->ifa_local)
436 return -EEXIST;
437 }
438
439 if (dev->type == ARPHRD_ETHER) {
440 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
441 dn_dn2eth(mac_addr, ifa->ifa_local);
442 dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
443 }
444 }
445
446 ifa->ifa_next = dn_db->ifa_list;
447 dn_db->ifa_list = ifa;
448
449 dn_ifaddr_notify(RTM_NEWADDR, ifa);
450 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
451
452 return 0;
453 }
454
dn_dev_set_ifa(struct net_device * dev,struct dn_ifaddr * ifa)455 static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
456 {
457 struct dn_dev *dn_db = dev->dn_ptr;
458 int rv;
459
460 if (dn_db == NULL) {
461 int err;
462 dn_db = dn_dev_create(dev, &err);
463 if (dn_db == NULL)
464 return err;
465 }
466
467 ifa->ifa_dev = dn_db;
468
469 if (dev->flags & IFF_LOOPBACK)
470 ifa->ifa_scope = RT_SCOPE_HOST;
471
472 rv = dn_dev_insert_ifa(dn_db, ifa);
473 if (rv)
474 dn_dev_free_ifa(ifa);
475 return rv;
476 }
477
478
dn_dev_ioctl(unsigned int cmd,void __user * arg)479 int dn_dev_ioctl(unsigned int cmd, void __user *arg)
480 {
481 char buffer[DN_IFREQ_SIZE];
482 struct ifreq *ifr = (struct ifreq *)buffer;
483 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
484 struct dn_dev *dn_db;
485 struct net_device *dev;
486 struct dn_ifaddr *ifa = NULL, **ifap = NULL;
487 int ret = 0;
488
489 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
490 return -EFAULT;
491 ifr->ifr_name[IFNAMSIZ-1] = 0;
492
493 dev_load(&init_net, ifr->ifr_name);
494
495 switch(cmd) {
496 case SIOCGIFADDR:
497 break;
498 case SIOCSIFADDR:
499 if (!capable(CAP_NET_ADMIN))
500 return -EACCES;
501 if (sdn->sdn_family != AF_DECnet)
502 return -EINVAL;
503 break;
504 default:
505 return -EINVAL;
506 }
507
508 rtnl_lock();
509
510 if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) {
511 ret = -ENODEV;
512 goto done;
513 }
514
515 if ((dn_db = dev->dn_ptr) != NULL) {
516 for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
517 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
518 break;
519 }
520
521 if (ifa == NULL && cmd != SIOCSIFADDR) {
522 ret = -EADDRNOTAVAIL;
523 goto done;
524 }
525
526 switch(cmd) {
527 case SIOCGIFADDR:
528 *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
529 goto rarok;
530
531 case SIOCSIFADDR:
532 if (!ifa) {
533 if ((ifa = dn_dev_alloc_ifa()) == NULL) {
534 ret = -ENOBUFS;
535 break;
536 }
537 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
538 } else {
539 if (ifa->ifa_local == dn_saddr2dn(sdn))
540 break;
541 dn_dev_del_ifa(dn_db, ifap, 0);
542 }
543
544 ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
545
546 ret = dn_dev_set_ifa(dev, ifa);
547 }
548 done:
549 rtnl_unlock();
550
551 return ret;
552 rarok:
553 if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
554 ret = -EFAULT;
555 goto done;
556 }
557
dn_dev_get_default(void)558 struct net_device *dn_dev_get_default(void)
559 {
560 struct net_device *dev;
561 read_lock(&dndev_lock);
562 dev = decnet_default_device;
563 if (dev) {
564 if (dev->dn_ptr)
565 dev_hold(dev);
566 else
567 dev = NULL;
568 }
569 read_unlock(&dndev_lock);
570 return dev;
571 }
572
dn_dev_set_default(struct net_device * dev,int force)573 int dn_dev_set_default(struct net_device *dev, int force)
574 {
575 struct net_device *old = NULL;
576 int rv = -EBUSY;
577 if (!dev->dn_ptr)
578 return -ENODEV;
579 write_lock(&dndev_lock);
580 if (force || decnet_default_device == NULL) {
581 old = decnet_default_device;
582 decnet_default_device = dev;
583 rv = 0;
584 }
585 write_unlock(&dndev_lock);
586 if (old)
587 dev_put(old);
588 return rv;
589 }
590
dn_dev_check_default(struct net_device * dev)591 static void dn_dev_check_default(struct net_device *dev)
592 {
593 write_lock(&dndev_lock);
594 if (dev == decnet_default_device) {
595 decnet_default_device = NULL;
596 } else {
597 dev = NULL;
598 }
599 write_unlock(&dndev_lock);
600 if (dev)
601 dev_put(dev);
602 }
603
dn_dev_by_index(int ifindex)604 static struct dn_dev *dn_dev_by_index(int ifindex)
605 {
606 struct net_device *dev;
607 struct dn_dev *dn_dev = NULL;
608 dev = dev_get_by_index(&init_net, ifindex);
609 if (dev) {
610 dn_dev = dev->dn_ptr;
611 dev_put(dev);
612 }
613
614 return dn_dev;
615 }
616
617 static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
618 [IFA_ADDRESS] = { .type = NLA_U16 },
619 [IFA_LOCAL] = { .type = NLA_U16 },
620 [IFA_LABEL] = { .type = NLA_STRING,
621 .len = IFNAMSIZ - 1 },
622 };
623
dn_nl_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,void * arg)624 static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
625 {
626 struct net *net = sock_net(skb->sk);
627 struct nlattr *tb[IFA_MAX+1];
628 struct dn_dev *dn_db;
629 struct ifaddrmsg *ifm;
630 struct dn_ifaddr *ifa, **ifap;
631 int err = -EINVAL;
632
633 if (net != &init_net)
634 goto errout;
635
636 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
637 if (err < 0)
638 goto errout;
639
640 err = -ENODEV;
641 ifm = nlmsg_data(nlh);
642 if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
643 goto errout;
644
645 err = -EADDRNOTAVAIL;
646 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
647 if (tb[IFA_LOCAL] &&
648 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
649 continue;
650
651 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
652 continue;
653
654 dn_dev_del_ifa(dn_db, ifap, 1);
655 return 0;
656 }
657
658 errout:
659 return err;
660 }
661
dn_nl_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,void * arg)662 static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
663 {
664 struct net *net = sock_net(skb->sk);
665 struct nlattr *tb[IFA_MAX+1];
666 struct net_device *dev;
667 struct dn_dev *dn_db;
668 struct ifaddrmsg *ifm;
669 struct dn_ifaddr *ifa;
670 int err;
671
672 if (net != &init_net)
673 return -EINVAL;
674
675 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
676 if (err < 0)
677 return err;
678
679 if (tb[IFA_LOCAL] == NULL)
680 return -EINVAL;
681
682 ifm = nlmsg_data(nlh);
683 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
684 return -ENODEV;
685
686 if ((dn_db = dev->dn_ptr) == NULL) {
687 int err;
688 dn_db = dn_dev_create(dev, &err);
689 if (!dn_db)
690 return err;
691 }
692
693 if ((ifa = dn_dev_alloc_ifa()) == NULL)
694 return -ENOBUFS;
695
696 if (tb[IFA_ADDRESS] == NULL)
697 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
698
699 ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
700 ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
701 ifa->ifa_flags = ifm->ifa_flags;
702 ifa->ifa_scope = ifm->ifa_scope;
703 ifa->ifa_dev = dn_db;
704
705 if (tb[IFA_LABEL])
706 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
707 else
708 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
709
710 err = dn_dev_insert_ifa(dn_db, ifa);
711 if (err)
712 dn_dev_free_ifa(ifa);
713
714 return err;
715 }
716
dn_ifaddr_nlmsg_size(void)717 static inline size_t dn_ifaddr_nlmsg_size(void)
718 {
719 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
720 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
721 + nla_total_size(2) /* IFA_ADDRESS */
722 + nla_total_size(2); /* IFA_LOCAL */
723 }
724
dn_nl_fill_ifaddr(struct sk_buff * skb,struct dn_ifaddr * ifa,u32 pid,u32 seq,int event,unsigned int flags)725 static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
726 u32 pid, u32 seq, int event, unsigned int flags)
727 {
728 struct ifaddrmsg *ifm;
729 struct nlmsghdr *nlh;
730
731 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
732 if (nlh == NULL)
733 return -EMSGSIZE;
734
735 ifm = nlmsg_data(nlh);
736 ifm->ifa_family = AF_DECnet;
737 ifm->ifa_prefixlen = 16;
738 ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
739 ifm->ifa_scope = ifa->ifa_scope;
740 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
741
742 if (ifa->ifa_address)
743 NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
744 if (ifa->ifa_local)
745 NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
746 if (ifa->ifa_label[0])
747 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
748
749 return nlmsg_end(skb, nlh);
750
751 nla_put_failure:
752 nlmsg_cancel(skb, nlh);
753 return -EMSGSIZE;
754 }
755
dn_ifaddr_notify(int event,struct dn_ifaddr * ifa)756 static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
757 {
758 struct sk_buff *skb;
759 int err = -ENOBUFS;
760
761 skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
762 if (skb == NULL)
763 goto errout;
764
765 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
766 if (err < 0) {
767 /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
768 WARN_ON(err == -EMSGSIZE);
769 kfree_skb(skb);
770 goto errout;
771 }
772 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
773 errout:
774 if (err < 0)
775 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err);
776 }
777
dn_nl_dump_ifaddr(struct sk_buff * skb,struct netlink_callback * cb)778 static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
779 {
780 struct net *net = sock_net(skb->sk);
781 int idx, dn_idx = 0, skip_ndevs, skip_naddr;
782 struct net_device *dev;
783 struct dn_dev *dn_db;
784 struct dn_ifaddr *ifa;
785
786 if (net != &init_net)
787 return 0;
788
789 skip_ndevs = cb->args[0];
790 skip_naddr = cb->args[1];
791
792 idx = 0;
793 for_each_netdev(&init_net, dev) {
794 if (idx < skip_ndevs)
795 goto cont;
796 else if (idx > skip_ndevs) {
797 /* Only skip over addresses for first dev dumped
798 * in this iteration (idx == skip_ndevs) */
799 skip_naddr = 0;
800 }
801
802 if ((dn_db = dev->dn_ptr) == NULL)
803 goto cont;
804
805 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
806 ifa = ifa->ifa_next, dn_idx++) {
807 if (dn_idx < skip_naddr)
808 continue;
809
810 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
811 cb->nlh->nlmsg_seq, RTM_NEWADDR,
812 NLM_F_MULTI) < 0)
813 goto done;
814 }
815 cont:
816 idx++;
817 }
818 done:
819 cb->args[0] = idx;
820 cb->args[1] = dn_idx;
821
822 return skb->len;
823 }
824
dn_dev_get_first(struct net_device * dev,__le16 * addr)825 static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
826 {
827 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
828 struct dn_ifaddr *ifa;
829 int rv = -ENODEV;
830 if (dn_db == NULL)
831 goto out;
832 ifa = dn_db->ifa_list;
833 if (ifa != NULL) {
834 *addr = ifa->ifa_local;
835 rv = 0;
836 }
837 out:
838 return rv;
839 }
840
841 /*
842 * Find a default address to bind to.
843 *
844 * This is one of those areas where the initial VMS concepts don't really
845 * map onto the Linux concepts, and since we introduced multiple addresses
846 * per interface we have to cope with slightly odd ways of finding out what
847 * "our address" really is. Mostly it's not a problem; for this we just guess
848 * a sensible default. Eventually the routing code will take care of all the
849 * nasties for us I hope.
850 */
dn_dev_bind_default(__le16 * addr)851 int dn_dev_bind_default(__le16 *addr)
852 {
853 struct net_device *dev;
854 int rv;
855 dev = dn_dev_get_default();
856 last_chance:
857 if (dev) {
858 read_lock(&dev_base_lock);
859 rv = dn_dev_get_first(dev, addr);
860 read_unlock(&dev_base_lock);
861 dev_put(dev);
862 if (rv == 0 || dev == init_net.loopback_dev)
863 return rv;
864 }
865 dev = init_net.loopback_dev;
866 dev_hold(dev);
867 goto last_chance;
868 }
869
dn_send_endnode_hello(struct net_device * dev,struct dn_ifaddr * ifa)870 static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
871 {
872 struct endnode_hello_message *msg;
873 struct sk_buff *skb = NULL;
874 __le16 *pktlen;
875 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
876
877 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
878 return;
879
880 skb->dev = dev;
881
882 msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg));
883
884 msg->msgflg = 0x0D;
885 memcpy(msg->tiver, dn_eco_version, 3);
886 dn_dn2eth(msg->id, ifa->ifa_local);
887 msg->iinfo = DN_RT_INFO_ENDN;
888 msg->blksize = cpu_to_le16(mtu2blksize(dev));
889 msg->area = 0x00;
890 memset(msg->seed, 0, 8);
891 memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
892
893 if (dn_db->router) {
894 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
895 dn_dn2eth(msg->neighbor, dn->addr);
896 }
897
898 msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3);
899 msg->mpd = 0x00;
900 msg->datalen = 0x02;
901 memset(msg->data, 0xAA, 2);
902
903 pktlen = (__le16 *)skb_push(skb,2);
904 *pktlen = cpu_to_le16(skb->len - 2);
905
906 skb_reset_network_header(skb);
907
908 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
909 }
910
911
912 #define DRDELAY (5 * HZ)
913
dn_am_i_a_router(struct dn_neigh * dn,struct dn_dev * dn_db,struct dn_ifaddr * ifa)914 static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
915 {
916 /* First check time since device went up */
917 if ((jiffies - dn_db->uptime) < DRDELAY)
918 return 0;
919
920 /* If there is no router, then yes... */
921 if (!dn_db->router)
922 return 1;
923
924 /* otherwise only if we have a higher priority or.. */
925 if (dn->priority < dn_db->parms.priority)
926 return 1;
927
928 /* if we have equal priority and a higher node number */
929 if (dn->priority != dn_db->parms.priority)
930 return 0;
931
932 if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local))
933 return 1;
934
935 return 0;
936 }
937
dn_send_router_hello(struct net_device * dev,struct dn_ifaddr * ifa)938 static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
939 {
940 int n;
941 struct dn_dev *dn_db = dev->dn_ptr;
942 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
943 struct sk_buff *skb;
944 size_t size;
945 unsigned char *ptr;
946 unsigned char *i1, *i2;
947 __le16 *pktlen;
948 char *src;
949
950 if (mtu2blksize(dev) < (26 + 7))
951 return;
952
953 n = mtu2blksize(dev) - 26;
954 n /= 7;
955
956 if (n > 32)
957 n = 32;
958
959 size = 2 + 26 + 7 * n;
960
961 if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
962 return;
963
964 skb->dev = dev;
965 ptr = skb_put(skb, size);
966
967 *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH;
968 *ptr++ = 2; /* ECO */
969 *ptr++ = 0;
970 *ptr++ = 0;
971 dn_dn2eth(ptr, ifa->ifa_local);
972 src = ptr;
973 ptr += ETH_ALEN;
974 *ptr++ = dn_db->parms.forwarding == 1 ?
975 DN_RT_INFO_L1RT : DN_RT_INFO_L2RT;
976 *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev));
977 ptr += 2;
978 *ptr++ = dn_db->parms.priority; /* Priority */
979 *ptr++ = 0; /* Area: Reserved */
980 *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3);
981 ptr += 2;
982 *ptr++ = 0; /* MPD: Reserved */
983 i1 = ptr++;
984 memset(ptr, 0, 7); /* Name: Reserved */
985 ptr += 7;
986 i2 = ptr++;
987
988 n = dn_neigh_elist(dev, ptr, n);
989
990 *i2 = 7 * n;
991 *i1 = 8 + *i2;
992
993 skb_trim(skb, (27 + *i2));
994
995 pktlen = (__le16 *)skb_push(skb, 2);
996 *pktlen = cpu_to_le16(skb->len - 2);
997
998 skb_reset_network_header(skb);
999
1000 if (dn_am_i_a_router(dn, dn_db, ifa)) {
1001 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
1002 if (skb2) {
1003 dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src);
1004 }
1005 }
1006
1007 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
1008 }
1009
dn_send_brd_hello(struct net_device * dev,struct dn_ifaddr * ifa)1010 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
1011 {
1012 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
1013
1014 if (dn_db->parms.forwarding == 0)
1015 dn_send_endnode_hello(dev, ifa);
1016 else
1017 dn_send_router_hello(dev, ifa);
1018 }
1019
dn_send_ptp_hello(struct net_device * dev,struct dn_ifaddr * ifa)1020 static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
1021 {
1022 int tdlen = 16;
1023 int size = dev->hard_header_len + 2 + 4 + tdlen;
1024 struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
1025 int i;
1026 unsigned char *ptr;
1027 char src[ETH_ALEN];
1028
1029 if (skb == NULL)
1030 return ;
1031
1032 skb->dev = dev;
1033 skb_push(skb, dev->hard_header_len);
1034 ptr = skb_put(skb, 2 + 4 + tdlen);
1035
1036 *ptr++ = DN_RT_PKT_HELO;
1037 *((__le16 *)ptr) = ifa->ifa_local;
1038 ptr += 2;
1039 *ptr++ = tdlen;
1040
1041 for(i = 0; i < tdlen; i++)
1042 *ptr++ = 0252;
1043
1044 dn_dn2eth(src, ifa->ifa_local);
1045 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
1046 }
1047
dn_eth_up(struct net_device * dev)1048 static int dn_eth_up(struct net_device *dev)
1049 {
1050 struct dn_dev *dn_db = dev->dn_ptr;
1051
1052 if (dn_db->parms.forwarding == 0)
1053 dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
1054 else
1055 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
1056
1057 dn_db->use_long = 1;
1058
1059 return 0;
1060 }
1061
dn_eth_down(struct net_device * dev)1062 static void dn_eth_down(struct net_device *dev)
1063 {
1064 struct dn_dev *dn_db = dev->dn_ptr;
1065
1066 if (dn_db->parms.forwarding == 0)
1067 dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
1068 else
1069 dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
1070 }
1071
1072 static void dn_dev_set_timer(struct net_device *dev);
1073
dn_dev_timer_func(unsigned long arg)1074 static void dn_dev_timer_func(unsigned long arg)
1075 {
1076 struct net_device *dev = (struct net_device *)arg;
1077 struct dn_dev *dn_db = dev->dn_ptr;
1078 struct dn_ifaddr *ifa;
1079
1080 if (dn_db->t3 <= dn_db->parms.t2) {
1081 if (dn_db->parms.timer3) {
1082 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
1083 if (!(ifa->ifa_flags & IFA_F_SECONDARY))
1084 dn_db->parms.timer3(dev, ifa);
1085 }
1086 }
1087 dn_db->t3 = dn_db->parms.t3;
1088 } else {
1089 dn_db->t3 -= dn_db->parms.t2;
1090 }
1091
1092 dn_dev_set_timer(dev);
1093 }
1094
dn_dev_set_timer(struct net_device * dev)1095 static void dn_dev_set_timer(struct net_device *dev)
1096 {
1097 struct dn_dev *dn_db = dev->dn_ptr;
1098
1099 if (dn_db->parms.t2 > dn_db->parms.t3)
1100 dn_db->parms.t2 = dn_db->parms.t3;
1101
1102 dn_db->timer.data = (unsigned long)dev;
1103 dn_db->timer.function = dn_dev_timer_func;
1104 dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ);
1105
1106 add_timer(&dn_db->timer);
1107 }
1108
dn_dev_create(struct net_device * dev,int * err)1109 static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1110 {
1111 int i;
1112 struct dn_dev_parms *p = dn_dev_list;
1113 struct dn_dev *dn_db;
1114
1115 for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) {
1116 if (p->type == dev->type)
1117 break;
1118 }
1119
1120 *err = -ENODEV;
1121 if (i == DN_DEV_LIST_SIZE)
1122 return NULL;
1123
1124 *err = -ENOBUFS;
1125 if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
1126 return NULL;
1127
1128 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1129 smp_wmb();
1130 dev->dn_ptr = dn_db;
1131 dn_db->dev = dev;
1132 init_timer(&dn_db->timer);
1133
1134 dn_db->uptime = jiffies;
1135
1136 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1137 if (!dn_db->neigh_parms) {
1138 dev->dn_ptr = NULL;
1139 kfree(dn_db);
1140 return NULL;
1141 }
1142
1143 if (dn_db->parms.up) {
1144 if (dn_db->parms.up(dev) < 0) {
1145 neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
1146 dev->dn_ptr = NULL;
1147 kfree(dn_db);
1148 return NULL;
1149 }
1150 }
1151
1152 dn_dev_sysctl_register(dev, &dn_db->parms);
1153
1154 dn_dev_set_timer(dev);
1155
1156 *err = 0;
1157 return dn_db;
1158 }
1159
1160
1161 /*
1162 * This processes a device up event. We only start up
1163 * the loopback device & ethernet devices with correct
1164 * MAC addreses automatically. Others must be started
1165 * specifically.
1166 *
1167 * FIXME: How should we configure the loopback address ? If we could dispense
1168 * with using decnet_address here and for autobind, it will be one less thing
1169 * for users to worry about setting up.
1170 */
1171
dn_dev_up(struct net_device * dev)1172 void dn_dev_up(struct net_device *dev)
1173 {
1174 struct dn_ifaddr *ifa;
1175 __le16 addr = decnet_address;
1176 int maybe_default = 0;
1177 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
1178
1179 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
1180 return;
1181
1182 /*
1183 * Need to ensure that loopback device has a dn_db attached to it
1184 * to allow creation of neighbours against it, even though it might
1185 * not have a local address of its own. Might as well do the same for
1186 * all autoconfigured interfaces.
1187 */
1188 if (dn_db == NULL) {
1189 int err;
1190 dn_db = dn_dev_create(dev, &err);
1191 if (dn_db == NULL)
1192 return;
1193 }
1194
1195 if (dev->type == ARPHRD_ETHER) {
1196 if (memcmp(dev->dev_addr, dn_hiord, 4) != 0)
1197 return;
1198 addr = dn_eth2dn(dev->dev_addr);
1199 maybe_default = 1;
1200 }
1201
1202 if (addr == 0)
1203 return;
1204
1205 if ((ifa = dn_dev_alloc_ifa()) == NULL)
1206 return;
1207
1208 ifa->ifa_local = ifa->ifa_address = addr;
1209 ifa->ifa_flags = 0;
1210 ifa->ifa_scope = RT_SCOPE_UNIVERSE;
1211 strcpy(ifa->ifa_label, dev->name);
1212
1213 dn_dev_set_ifa(dev, ifa);
1214
1215 /*
1216 * Automagically set the default device to the first automatically
1217 * configured ethernet card in the system.
1218 */
1219 if (maybe_default) {
1220 dev_hold(dev);
1221 if (dn_dev_set_default(dev, 0))
1222 dev_put(dev);
1223 }
1224 }
1225
dn_dev_delete(struct net_device * dev)1226 static void dn_dev_delete(struct net_device *dev)
1227 {
1228 struct dn_dev *dn_db = dev->dn_ptr;
1229
1230 if (dn_db == NULL)
1231 return;
1232
1233 del_timer_sync(&dn_db->timer);
1234 dn_dev_sysctl_unregister(&dn_db->parms);
1235 dn_dev_check_default(dev);
1236 neigh_ifdown(&dn_neigh_table, dev);
1237
1238 if (dn_db->parms.down)
1239 dn_db->parms.down(dev);
1240
1241 dev->dn_ptr = NULL;
1242
1243 neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
1244 neigh_ifdown(&dn_neigh_table, dev);
1245
1246 if (dn_db->router)
1247 neigh_release(dn_db->router);
1248 if (dn_db->peer)
1249 neigh_release(dn_db->peer);
1250
1251 kfree(dn_db);
1252 }
1253
dn_dev_down(struct net_device * dev)1254 void dn_dev_down(struct net_device *dev)
1255 {
1256 struct dn_dev *dn_db = dev->dn_ptr;
1257 struct dn_ifaddr *ifa;
1258
1259 if (dn_db == NULL)
1260 return;
1261
1262 while((ifa = dn_db->ifa_list) != NULL) {
1263 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
1264 dn_dev_free_ifa(ifa);
1265 }
1266
1267 dn_dev_delete(dev);
1268 }
1269
dn_dev_init_pkt(struct sk_buff * skb)1270 void dn_dev_init_pkt(struct sk_buff *skb)
1271 {
1272 return;
1273 }
1274
dn_dev_veri_pkt(struct sk_buff * skb)1275 void dn_dev_veri_pkt(struct sk_buff *skb)
1276 {
1277 return;
1278 }
1279
dn_dev_hello(struct sk_buff * skb)1280 void dn_dev_hello(struct sk_buff *skb)
1281 {
1282 return;
1283 }
1284
dn_dev_devices_off(void)1285 void dn_dev_devices_off(void)
1286 {
1287 struct net_device *dev;
1288
1289 rtnl_lock();
1290 for_each_netdev(&init_net, dev)
1291 dn_dev_down(dev);
1292 rtnl_unlock();
1293
1294 }
1295
dn_dev_devices_on(void)1296 void dn_dev_devices_on(void)
1297 {
1298 struct net_device *dev;
1299
1300 rtnl_lock();
1301 for_each_netdev(&init_net, dev) {
1302 if (dev->flags & IFF_UP)
1303 dn_dev_up(dev);
1304 }
1305 rtnl_unlock();
1306 }
1307
register_dnaddr_notifier(struct notifier_block * nb)1308 int register_dnaddr_notifier(struct notifier_block *nb)
1309 {
1310 return blocking_notifier_chain_register(&dnaddr_chain, nb);
1311 }
1312
unregister_dnaddr_notifier(struct notifier_block * nb)1313 int unregister_dnaddr_notifier(struct notifier_block *nb)
1314 {
1315 return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
1316 }
1317
1318 #ifdef CONFIG_PROC_FS
is_dn_dev(struct net_device * dev)1319 static inline int is_dn_dev(struct net_device *dev)
1320 {
1321 return dev->dn_ptr != NULL;
1322 }
1323
dn_dev_seq_start(struct seq_file * seq,loff_t * pos)1324 static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1325 {
1326 int i;
1327 struct net_device *dev;
1328
1329 read_lock(&dev_base_lock);
1330
1331 if (*pos == 0)
1332 return SEQ_START_TOKEN;
1333
1334 i = 1;
1335 for_each_netdev(&init_net, dev) {
1336 if (!is_dn_dev(dev))
1337 continue;
1338
1339 if (i++ == *pos)
1340 return dev;
1341 }
1342
1343 return NULL;
1344 }
1345
dn_dev_seq_next(struct seq_file * seq,void * v,loff_t * pos)1346 static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1347 {
1348 struct net_device *dev;
1349
1350 ++*pos;
1351
1352 dev = (struct net_device *)v;
1353 if (v == SEQ_START_TOKEN)
1354 dev = net_device_entry(&init_net.dev_base_head);
1355
1356 for_each_netdev_continue(&init_net, dev) {
1357 if (!is_dn_dev(dev))
1358 continue;
1359
1360 return dev;
1361 }
1362
1363 return NULL;
1364 }
1365
dn_dev_seq_stop(struct seq_file * seq,void * v)1366 static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1367 {
1368 read_unlock(&dev_base_lock);
1369 }
1370
dn_type2asc(char type)1371 static char *dn_type2asc(char type)
1372 {
1373 switch(type) {
1374 case DN_DEV_BCAST:
1375 return "B";
1376 case DN_DEV_UCAST:
1377 return "U";
1378 case DN_DEV_MPOINT:
1379 return "M";
1380 }
1381
1382 return "?";
1383 }
1384
dn_dev_seq_show(struct seq_file * seq,void * v)1385 static int dn_dev_seq_show(struct seq_file *seq, void *v)
1386 {
1387 if (v == SEQ_START_TOKEN)
1388 seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n");
1389 else {
1390 struct net_device *dev = v;
1391 char peer_buf[DN_ASCBUF_LEN];
1392 char router_buf[DN_ASCBUF_LEN];
1393 struct dn_dev *dn_db = dev->dn_ptr;
1394
1395 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
1396 " %04hu %03d %02x %-10s %-7s %-7s\n",
1397 dev->name ? dev->name : "???",
1398 dn_type2asc(dn_db->parms.mode),
1399 0, 0,
1400 dn_db->t3, dn_db->parms.t3,
1401 mtu2blksize(dev),
1402 dn_db->parms.priority,
1403 dn_db->parms.state, dn_db->parms.name,
1404 dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "",
1405 dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : "");
1406 }
1407 return 0;
1408 }
1409
1410 static const struct seq_operations dn_dev_seq_ops = {
1411 .start = dn_dev_seq_start,
1412 .next = dn_dev_seq_next,
1413 .stop = dn_dev_seq_stop,
1414 .show = dn_dev_seq_show,
1415 };
1416
dn_dev_seq_open(struct inode * inode,struct file * file)1417 static int dn_dev_seq_open(struct inode *inode, struct file *file)
1418 {
1419 return seq_open(file, &dn_dev_seq_ops);
1420 }
1421
1422 static const struct file_operations dn_dev_seq_fops = {
1423 .owner = THIS_MODULE,
1424 .open = dn_dev_seq_open,
1425 .read = seq_read,
1426 .llseek = seq_lseek,
1427 .release = seq_release,
1428 };
1429
1430 #endif /* CONFIG_PROC_FS */
1431
1432 static int addr[2];
1433 module_param_array(addr, int, NULL, 0444);
1434 MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
1435
dn_dev_init(void)1436 void __init dn_dev_init(void)
1437 {
1438 if (addr[0] > 63 || addr[0] < 0) {
1439 printk(KERN_ERR "DECnet: Area must be between 0 and 63");
1440 return;
1441 }
1442
1443 if (addr[1] > 1023 || addr[1] < 0) {
1444 printk(KERN_ERR "DECnet: Node must be between 0 and 1023");
1445 return;
1446 }
1447
1448 decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]);
1449
1450 dn_dev_devices_on();
1451
1452 rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL);
1453 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL);
1454 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr);
1455
1456 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops);
1457
1458 #ifdef CONFIG_SYSCTL
1459 {
1460 int i;
1461 for(i = 0; i < DN_DEV_LIST_SIZE; i++)
1462 dn_dev_sysctl_register(NULL, &dn_dev_list[i]);
1463 }
1464 #endif /* CONFIG_SYSCTL */
1465 }
1466
dn_dev_cleanup(void)1467 void __exit dn_dev_cleanup(void)
1468 {
1469 #ifdef CONFIG_SYSCTL
1470 {
1471 int i;
1472 for(i = 0; i < DN_DEV_LIST_SIZE; i++)
1473 dn_dev_sysctl_unregister(&dn_dev_list[i]);
1474 }
1475 #endif /* CONFIG_SYSCTL */
1476
1477 proc_net_remove(&init_net, "decnet_dev");
1478
1479 dn_dev_devices_off();
1480 }
1481