• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2023 Huawei Device Co., Ltd.
4  *
5  * Description: Bluetooth virtual network device used in
6  * the NewIP over Bluetooth communication scenario.
7  *
8  * Author: Yang Yanjun <yangyanjun@huawei.com>
9  *
10  * Data: 2023-03-14
11  */
12 
13 #define pr_fmt(fmt) "newip-bt: [%s:%d] " fmt, __func__, __LINE__
14 
15 #include "btdev.h"
16 
17 #define ndev_name(vnet)  bt_virnet_get_ndev_name(vnet)  /* btn1/2/3/4/... */
18 #define cdev_name(vnet)  bt_virnet_get_cdev_name(vnet)  /* dev/btdev1/2/3/4/... */
19 
20 /* /sys/module/btdev/parameters/btdev_debug */
21 bool g_btdev_debug;
22 module_param_named(btdev_debug, g_btdev_debug, bool, 0644);
23 
24 #define btdev_dbg(fmt, ...) \
25 do { \
26 	if (g_btdev_debug) \
27 		pr_crit(fmt, ##__VA_ARGS__); \
28 } while (0)
29 
30 #define btdev_dbg_err(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
31 
32 static struct bt_drv *g_bt_drv;
33 
bt_seq_show(struct seq_file * m,void * v)34 static int bt_seq_show(struct seq_file *m, void *v)
35 {
36 	struct bt_virnet *vnet = NULL;
37 
38 	if (unlikely(!g_bt_drv)) {
39 		btdev_dbg_err("invalid bt_drv");
40 		return -EINVAL;
41 	}
42 
43 	seq_printf(m, "Total device: %d (bitmap: 0x%X) Ring size: %d\n",
44 		   bt_get_total_device(g_bt_drv), g_bt_drv->bitmap,
45 		   BT_RING_BUFFER_SIZE);
46 
47 	list_for_each_entry(vnet, &g_bt_drv->devices_table->head, virnet_entry) {
48 		seq_printf(m, "dev: %12s, interface: %7s, state: %12s, MTU: %4d\n",
49 			   cdev_name(vnet), ndev_name(vnet),
50 			   bt_virnet_get_state_rep(vnet), vnet->ndev->mtu);
51 		seq_printf(m, "ring head: %4d, ring tail: %4d, packets num: %4d\n",
52 			   vnet->tx_ring->head, vnet->tx_ring->tail,
53 			   bt_virnet_get_ring_packets(vnet));
54 	}
55 
56 	return OK;
57 }
58 
bt_proc_open(struct inode * inode,struct file * file)59 static int bt_proc_open(struct inode *inode, struct file *file)
60 {
61 	if (unlikely(!inode) || unlikely(!file)) {
62 		btdev_dbg_err("invalid parameter");
63 		return -EINVAL;
64 	}
65 
66 	return single_open(file, bt_seq_show, PDE_DATA(inode));
67 }
68 
69 static struct proc_ops g_bt_proc_fops = {
70 	.proc_open = bt_proc_open,
71 	.proc_read = seq_read,
72 	.proc_lseek = seq_lseek,
73 	.proc_release = single_release};
74 
__bt_virnet_open(struct file * filp,struct bt_virnet * vnet)75 static int __bt_virnet_open(struct file *filp, struct bt_virnet *vnet)
76 {
77 	struct net_device *ndev;
78 
79 	if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
80 		/* Check whether xx_open_limit is equal to 0 after subtracting 1.
81 		 * If so, return true
82 		 */
83 		if (unlikely(!atomic_dec_and_test(&vnet->io_file->read_open_limit)))
84 			goto read_twice_already;
85 	} else if ((filp->f_flags & O_ACCMODE) == O_WRONLY) {
86 		if (unlikely(!atomic_dec_and_test(&vnet->io_file->write_open_limit)))
87 			goto write_twice_already;
88 	} else if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
89 		if (unlikely(!atomic_dec_and_test(&vnet->io_file->read_open_limit)))
90 			goto read_twice_already;
91 		if (unlikely(!atomic_dec_and_test(&vnet->io_file->write_open_limit)))
92 			goto write_twice_already;
93 	}
94 
95 	/* Set xx_open_limit to 0 when the file is first opened */
96 	rtnl_lock();
97 	ndev = vnet->ndev;
98 	if (unlikely(!(ndev->flags & IFF_UP))) {
99 		int ret = dev_change_flags(ndev, ndev->flags | IFF_UP, NULL);
100 
101 		if (unlikely(ret < 0)) {
102 			rtnl_unlock();
103 			btdev_dbg_err("%s dev change flags failed, ret=%d", cdev_name(vnet), ret);
104 			return -EBUSY;
105 		}
106 	}
107 	rtnl_unlock();
108 
109 	set_state(vnet, BT_VIRNET_STATE_CONNECTED);
110 	filp->private_data = vnet;
111 	btdev_dbg("%s has been opened", cdev_name(vnet));
112 	return OK;
113 
114 	/* If the file is not opened for the first time, an error occurs
115 	 * and xx_open_limit is restored to the open state. (set to 0)
116 	 */
117 read_twice_already:
118 	atomic_inc(&vnet->io_file->read_open_limit);
119 	btdev_dbg_err("%s has been opened for read twice already", cdev_name(vnet));
120 	return -EBUSY;
121 
122 write_twice_already:
123 	atomic_inc(&vnet->io_file->write_open_limit);
124 	btdev_dbg_err("%s has been opened for write twice already", cdev_name(vnet));
125 	return -EBUSY;
126 }
127 
bt_io_file_open(struct inode * node,struct file * filp)128 static int bt_io_file_open(struct inode *node, struct file *filp)
129 {
130 	struct bt_virnet *vnet = NULL;
131 
132 	if (unlikely(!node) || unlikely(!filp)) {
133 		btdev_dbg_err("invalid parameter");
134 		return -EINVAL;
135 	}
136 
137 	list_for_each_entry(vnet, &g_bt_drv->devices_table->head, virnet_entry) {
138 		if (bt_virnet_get_cdev(vnet) == node->i_cdev)
139 			return __bt_virnet_open(filp, vnet);
140 	}
141 	return -EIO;
142 }
143 
bt_io_file_release(struct inode * node,struct file * filp)144 static int bt_io_file_release(struct inode *node, struct file *filp)
145 {
146 	struct bt_virnet *vnet = NULL;
147 
148 	if (unlikely(!filp) || unlikely(!filp->private_data)) {
149 		btdev_dbg_err("invalid parameter");
150 		return -EINVAL;
151 	}
152 
153 	vnet = filp->private_data;
154 	btdev_dbg("%s has been released", cdev_name(vnet));
155 
156 	/* Set xx_open_limit to 1 when the file is closed */
157 	if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
158 		atomic_inc(&vnet->io_file->read_open_limit);
159 	} else if ((filp->f_flags & O_ACCMODE) == O_WRONLY) {
160 		atomic_inc(&vnet->io_file->write_open_limit);
161 	} else if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
162 		atomic_inc(&vnet->io_file->read_open_limit);
163 		atomic_inc(&vnet->io_file->write_open_limit);
164 	}
165 
166 	set_state(vnet, BT_VIRNET_STATE_DISCONNECTED);
167 
168 	return OK;
169 }
170 
bt_io_file_read(struct file * filp,char __user * buffer,size_t size,loff_t * off)171 static ssize_t bt_io_file_read(struct file *filp,
172 			       char __user *buffer,
173 			       size_t size, loff_t *off)
174 {
175 	struct bt_virnet *vnet = NULL;
176 	ssize_t out_sz;
177 	struct sk_buff *skb = NULL;
178 
179 	if (unlikely(!filp) || unlikely(!buffer) || unlikely(!filp->private_data)) {
180 		btdev_dbg_err("invalid parameter");
181 		return -EINVAL;
182 	}
183 
184 	vnet = filp->private_data;
185 	while (unlikely(bt_ring_is_empty(vnet->tx_ring))) {
186 		if (filp->f_flags & O_NONBLOCK)
187 			return -EAGAIN;
188 
189 		if (wait_event_interruptible(vnet->rx_queue, !bt_ring_is_empty(vnet->tx_ring)))
190 			return -ERESTARTSYS;
191 	}
192 
193 	skb = bt_ring_current(vnet->tx_ring);
194 	if (unlikely(!skb)) {
195 		btdev_dbg_err("%s invalid skb", cdev_name(vnet));
196 		return -EINVAL;
197 	}
198 	out_sz = skb->len - MACADDR_LEN;
199 	if (unlikely(out_sz > size)) {
200 		/* Obtain the skb pointer from the ring buf and ask whether the user-state buf
201 		 * length can store data in the skb. If the user-state buf length is not enough,
202 		 * the skb cannot be released at this time, because the skb is still unchained
203 		 * on the ring buf.
204 		 */
205 		btdev_dbg_err("%s usr-buf too small, skb-len=%ld, usr-buf-len=%ld",
206 			      cdev_name(vnet), (long)out_sz, (long)size);
207 		return -EINVAL;
208 	}
209 
210 	bt_ring_consume(vnet->tx_ring);
211 	if (copy_to_user(buffer, skb->data + MACADDR_LEN, out_sz)) {
212 		/* The skb pointer is obtained from the ring buf and the skb has been unchained
213 		 * from the ring buf. In this case, the skb needs to be released when the skb data
214 		 * fails to be copied to the user mode.
215 		 */
216 		btdev_dbg_err("%s copy to user failed", cdev_name(vnet));
217 		dev_kfree_skb(skb);
218 		return -EIO;
219 	}
220 	dev_kfree_skb(skb);
221 
222 	btdev_dbg("read %ld data from %s", (long)out_sz, cdev_name(vnet));
223 	if (unlikely(netif_queue_stopped(vnet->ndev))) {
224 		btdev_dbg("consume data: wake the queue");
225 		netif_wake_queue(vnet->ndev);
226 	}
227 
228 	return out_sz;
229 }
230 
bt_io_file_write(struct file * filp,const char __user * buffer,size_t size,loff_t * off)231 static ssize_t bt_io_file_write(struct file *filp,
232 				const char __user *buffer,
233 				size_t size, loff_t *off)
234 {
235 	struct bt_virnet *vnet = NULL;
236 	struct sk_buff *skb = NULL;
237 	int ret;
238 	int len;
239 	ssize_t in_sz;
240 
241 	if (unlikely(!filp) || unlikely(!buffer) || unlikely(!filp->private_data)) {
242 		btdev_dbg_err("invalid parameter");
243 		return -EINVAL;
244 	}
245 
246 	vnet = filp->private_data;
247 	in_sz = size + MACADDR_LEN;
248 
249 	/* Ethernet head length: DMAC(6B) + SMAC(6B) + eth-type(2B) */
250 	skb = netdev_alloc_skb(bt_virnet_get_ndev(vnet), in_sz + NEWIP_TYPE_SIZE);
251 	if (unlikely(!skb))
252 		return -ENOMEM;
253 
254 	skb_reserve(skb, NEWIP_TYPE_SIZE);
255 	skb_put(skb, in_sz);
256 
257 	memset(skb->data, 0, MACADDR_LEN);
258 	if (copy_from_user(skb->data + MACADDR_LEN, buffer, size)) {
259 		btdev_dbg_err("%s copy from user failed", cdev_name(vnet));
260 		dev_kfree_skb(skb);
261 		return -EIO;
262 	}
263 
264 	len = skb->len;
265 	skb->dev = bt_virnet_get_ndev(vnet);
266 	skb->protocol = eth_type_trans(skb, bt_virnet_get_ndev(vnet));
267 	ret = netif_rx_ni(skb);
268 
269 	if (ret == NET_RX_SUCCESS) {
270 		btdev_dbg("write %lu bytes data to %s", size, cdev_name(vnet));
271 		vnet->ndev->stats.rx_packets++;
272 		vnet->ndev->stats.rx_bytes += len;
273 	} else {
274 		btdev_dbg_err("failed to write %lu bytes data to %s", size, cdev_name(vnet));
275 		vnet->ndev->stats.rx_errors++;
276 		vnet->ndev->stats.rx_dropped++;
277 	}
278 
279 	return size;
280 }
281 
bt_virnet_change_mtu(struct net_device * dev,int mtu)282 static int bt_virnet_change_mtu(struct net_device *dev, int mtu)
283 {
284 	if (unlikely(!dev) || unlikely(mtu < 0) || unlikely(mtu > BT_MAX_MTU)) {
285 		btdev_dbg_err("invalid parameter");
286 		return -EINVAL;
287 	}
288 	btdev_dbg("change %s mtu %u to %u", dev->name, dev->mtu, mtu);
289 	dev->mtu = mtu;
290 	return OK;
291 }
292 
bt_set_mtu(struct net_device * dev,int mtu)293 static int bt_set_mtu(struct net_device *dev, int mtu)
294 {
295 	int err = OK;
296 
297 	if (unlikely(mtu < 0) || unlikely(mtu > BT_MAX_MTU)) {
298 		btdev_dbg_err("invalid parameter");
299 		return -EINVAL;
300 	}
301 
302 	rtnl_lock();
303 	err = dev_set_mtu(dev, mtu);
304 	rtnl_unlock();
305 	if (err < 0)
306 		btdev_dbg_err("failed to set %s mtu to %d, err=%d", dev->name, mtu, err);
307 	else
308 		btdev_dbg("set %s mtu to %d", dev->name, mtu);
309 
310 	return err;
311 }
312 
bt_cmd_enable_virnet(struct bt_virnet * vnet,unsigned long arg)313 static int bt_cmd_enable_virnet(struct bt_virnet *vnet, unsigned long arg)
314 {
315 	int ret;
316 
317 	if (unlikely(vnet->state != BT_VIRNET_STATE_DISABLED)) {
318 		btdev_dbg_err("%s enable can only be set at disabled state", cdev_name(vnet));
319 		return -EINVAL; // enable failed
320 	}
321 
322 	rtnl_lock();
323 	ret = dev_change_flags(vnet->ndev, vnet->ndev->flags | IFF_UP, NULL);
324 	rtnl_unlock();
325 	if (unlikely(ret < 0)) {
326 		btdev_dbg_err("%s dev change flags failed, ret=%d", cdev_name(vnet), ret);
327 		return -EIO;
328 	}
329 
330 	btdev_dbg("%s has been enabled", cdev_name(vnet));
331 	set_state(vnet, BT_VIRNET_STATE_CONNECTED);
332 	return OK;
333 }
334 
bt_cmd_disable_virnet(struct bt_virnet * vnet,unsigned long arg)335 static int bt_cmd_disable_virnet(struct bt_virnet *vnet, unsigned long arg)
336 {
337 	int ret;
338 
339 	if (unlikely(vnet->state != BT_VIRNET_STATE_CONNECTED)) {
340 		btdev_dbg_err("%s disable can only be set at connected state", cdev_name(vnet));
341 		return -EINVAL;
342 	}
343 
344 	rtnl_lock();
345 	ret = dev_change_flags(vnet->ndev, vnet->ndev->flags & ~IFF_UP, NULL);
346 	rtnl_unlock();
347 	if (unlikely(ret < 0)) {
348 		btdev_dbg_err("%s dev change flags failed, ret=%d", cdev_name(vnet), ret);
349 		return -EIO;
350 	}
351 
352 	btdev_dbg("%s has been disabled", cdev_name(vnet));
353 	set_state(vnet, BT_VIRNET_STATE_DISABLED);
354 	return OK;
355 }
356 
bt_cmd_change_mtu(struct bt_virnet * vnet,unsigned long arg)357 static int bt_cmd_change_mtu(struct bt_virnet *vnet, unsigned long arg)
358 {
359 	int mtu;
360 	int ret;
361 
362 	if (unlikely(get_user(mtu, (int __user *)arg))) {
363 		btdev_dbg_err("%s get user failed", ndev_name(vnet));
364 		return -EIO;
365 	}
366 
367 	ret = bt_set_mtu(vnet->ndev, mtu);
368 	if (unlikely(ret < 0)) {
369 		btdev_dbg_err("%s changed mtu to %d failed", ndev_name(vnet), mtu);
370 		return -EIO;
371 	}
372 
373 	btdev_dbg("%s changed mtu to %d", ndev_name(vnet), mtu);
374 	return OK;
375 }
376 
bt_cmd_peek_packet(struct bt_virnet * vnet,unsigned long arg)377 static int bt_cmd_peek_packet(struct bt_virnet *vnet, unsigned long arg)
378 {
379 	u32 len;
380 	struct sk_buff *skb;
381 
382 	if (unlikely(bt_ring_is_empty(vnet->tx_ring))) {
383 		btdev_dbg_err("%s ring is empty", ndev_name(vnet));
384 		return -EAGAIN;
385 	}
386 
387 	/* The user state retrieves the data length from the ring buf, rather than
388 	 * unchain the skb from the ring buf, so there is no need to release the skb
389 	 */
390 	skb = bt_ring_current(vnet->tx_ring);
391 	if (unlikely(!skb)) {
392 		btdev_dbg_err("%s invalid skb", ndev_name(vnet));
393 		return -EINVAL;
394 	}
395 
396 	len = skb->len - MACADDR_LEN;
397 	if (unlikely(put_user(len, (int __user *)arg))) {
398 		btdev_dbg_err("%s put_user failed", ndev_name(vnet));
399 		return -EIO;
400 	}
401 
402 	btdev_dbg("%s get packet len is %u", ndev_name(vnet), len);
403 	return OK;
404 }
405 
bt_io_file_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)406 static long bt_io_file_ioctl(struct file *filep,
407 			     unsigned int cmd,
408 			     unsigned long arg)
409 {
410 	long ret;
411 	struct bt_virnet *vnet = NULL;
412 
413 	if (unlikely(!filep) || unlikely(!filep->private_data)) {
414 		btdev_dbg_err("invalid parameter");
415 		return -EINVAL;
416 	}
417 	vnet = filep->private_data;
418 	switch (cmd) {
419 	case BT_IOC_CHANGE_MTU:
420 		ret = bt_cmd_change_mtu(vnet, arg);
421 		break;
422 	case BT_IOC_ENABLE:
423 		ret = bt_cmd_enable_virnet(vnet, arg);
424 		break;
425 	case BT_IOC_DISABLE:
426 		ret = bt_cmd_disable_virnet(vnet, arg);
427 		break;
428 	case BT_IOC_PEEK_PACKET:
429 		ret = bt_cmd_peek_packet(vnet, arg);
430 		break;
431 	default:
432 		btdev_dbg_err("not a valid cmd(%u)", cmd);
433 		return -ENOIOCTLCMD;
434 	}
435 
436 	return ret;
437 }
438 
bt_io_file_poll(struct file * filp,poll_table * wait)439 static unsigned int bt_io_file_poll(struct file *filp, poll_table *wait)
440 {
441 	struct bt_virnet *vnet = NULL;
442 	unsigned int mask = 0;
443 
444 	if (unlikely(!filp) || unlikely(!wait) || unlikely(!filp->private_data)) {
445 		btdev_dbg_err("invalid parameter");
446 		return -EINVAL;
447 	}
448 	vnet = filp->private_data;
449 	poll_wait(filp, &vnet->rx_queue, wait);
450 
451 	if (!bt_ring_is_empty(vnet->tx_ring)) // readable
452 		mask |= POLLIN | POLLRDNORM;
453 
454 	if (!bt_ring_is_full(vnet->tx_ring)) // writable
455 		mask |= POLLOUT | POLLWRNORM;
456 
457 	return mask;
458 }
459 
460 static const struct file_operations g_bt_io_file_ops = {
461 	.owner = THIS_MODULE,
462 	.open = bt_io_file_open,
463 	.release = bt_io_file_release,
464 	.read = bt_io_file_read,
465 	.write = bt_io_file_write,
466 	.poll = bt_io_file_poll,
467 	.unlocked_ioctl = bt_io_file_ioctl,
468 	.compat_ioctl = bt_io_file_ioctl};
469 
bt_mng_file_open(struct inode * node,struct file * filp)470 static int bt_mng_file_open(struct inode *node, struct file *filp)
471 {
472 	if (unlikely(!filp)) {
473 		btdev_dbg_err("bt mng file open: invalid filp");
474 		return -EINVAL;
475 	}
476 
477 	/* Check whether open_limit is equal to 0 after subtracting 1. If so, return true */
478 	if (unlikely(!atomic_dec_and_test(&g_bt_drv->mng_file->open_limit))) {
479 		/* If the file is not opened for the first time, an error occurs
480 		 * and open_limit is restored to the open state. (set to 0)
481 		 */
482 		atomic_inc(&g_bt_drv->mng_file->open_limit);
483 		btdev_dbg_err("file %s has been opened already",
484 			      g_bt_drv->mng_file->bt_cdev->dev_filename);
485 		return -EBUSY;
486 	}
487 
488 	/* open_limit becomes 0 after the file is first opened */
489 	filp->private_data = g_bt_drv;
490 
491 	btdev_dbg("%s has been opened", g_bt_drv->mng_file->bt_cdev->dev_filename);
492 	return OK;
493 }
494 
bt_mng_file_release(struct inode * node,struct file * filp)495 static int bt_mng_file_release(struct inode *node, struct file *filp)
496 {
497 	struct bt_drv *drv = NULL;
498 
499 	if (unlikely(!filp) || unlikely(!filp->private_data)) {
500 		btdev_dbg_err("invalid parameter");
501 		return -EINVAL;
502 	}
503 	drv = filp->private_data;
504 
505 	/* Set open_limit to 1 when the file is closed */
506 	atomic_inc(&drv->mng_file->open_limit);
507 
508 	btdev_dbg("%s has been released", g_bt_drv->mng_file->bt_cdev->dev_filename);
509 	return OK;
510 }
511 
bt_cmd_create_virnet(struct bt_drv * bt_mng,unsigned long arg)512 static int bt_cmd_create_virnet(struct bt_drv *bt_mng, unsigned long arg)
513 {
514 	int id;
515 	int ret;
516 	struct bt_virnet *vnet = NULL;
517 	struct bt_uioc_args vp;
518 	unsigned long size;
519 
520 	mutex_lock(&bt_mng->bitmap_lock);
521 	id = bt_get_unused_id(bt_mng->bitmap);
522 	if ((unlikely(bt_mng->devices_table->num >= BT_VIRNET_MAX_NUM)) ||
523 	    (unlikely(id < 0))) {
524 		btdev_dbg_err("reach the limit of max virnets");
525 		goto virnet_create_failed;
526 	}
527 	vnet = bt_virnet_create(bt_mng, id);
528 	if (unlikely(!vnet)) {
529 		btdev_dbg_err("bt virnet create failed");
530 		goto virnet_create_failed;
531 	}
532 
533 	ret = bt_table_add_device(bt_mng->devices_table, vnet);
534 	if (unlikely(ret < 0)) {
535 		btdev_dbg_err("bt table add device failed: ret=%d", ret);
536 		goto add_device_failed;
537 	}
538 
539 	bt_set_bit(&bt_mng->bitmap, id);
540 	mutex_unlock(&bt_mng->bitmap_lock);
541 
542 	memcpy(vp.ifa_name, ndev_name(vnet), sizeof(vp.ifa_name));
543 	memcpy(vp.cfile_name, cdev_name(vnet), sizeof(vp.cfile_name));
544 
545 	mdelay(DELAY_100_MS);
546 
547 	size = copy_to_user((void __user *)arg, &vp, sizeof(struct bt_uioc_args));
548 	if (unlikely(size)) {
549 		btdev_dbg_err("copy_to_user failed: left size=%lu", size);
550 		goto copy_to_user_failed;
551 	}
552 
553 	btdev_dbg("%s has been created", ndev_name(vnet));
554 	return OK;
555 
556 copy_to_user_failed:
557 	mutex_lock(&bt_mng->bitmap_lock);
558 	bt_table_remove_device(bt_mng->devices_table, vnet);
559 	bt_clear_bit(&bt_mng->bitmap, id);
560 
561 add_device_failed:
562 	bt_virnet_destroy(vnet);
563 
564 virnet_create_failed:
565 	mutex_unlock(&bt_mng->bitmap_lock);
566 	return -EIO;
567 }
568 
bt_cmd_delete_virnet(struct bt_drv * bt_mng,unsigned long arg)569 static int bt_cmd_delete_virnet(struct bt_drv *bt_mng, unsigned long arg)
570 {
571 	int err;
572 	struct bt_virnet *vnet = NULL;
573 	struct bt_uioc_args vp;
574 	unsigned long size;
575 	dev_t number;
576 
577 	size = copy_from_user(&vp, (void __user *)arg,
578 			      sizeof(struct bt_uioc_args));
579 	if (unlikely(size)) {
580 		btdev_dbg_err("copy_from_user failed: left size=%lu", size);
581 		return -EIO;
582 	}
583 
584 	vnet = bt_table_find(bt_mng->devices_table, vp.ifa_name);
585 	if (unlikely(!vnet)) {
586 		btdev_dbg_err("virnet: %s cannot be found in bt table", vp.ifa_name);
587 		return -EIO; // not found
588 	}
589 
590 	btdev_dbg("%s has been deleted", ndev_name(vnet));
591 	mutex_lock(&bt_mng->bitmap_lock);
592 	err = bt_virnet_get_cdev_number(vnet, &number);
593 	if (likely(!err))
594 		bt_clear_bit(&bt_mng->bitmap, (u32)MINOR(number));
595 	bt_table_remove_device(bt_mng->devices_table, vnet);
596 	bt_virnet_destroy(vnet);
597 	mutex_unlock(&bt_mng->bitmap_lock);
598 	return OK;
599 }
600 
bt_cmd_query_all_virnets(struct bt_drv * bt_mng,unsigned long arg)601 static int bt_cmd_query_all_virnets(struct bt_drv *bt_mng, unsigned long arg)
602 {
603 	if (unlikely(put_user(bt_mng->bitmap, (u32 *)arg))) {
604 		btdev_dbg_err("put_user failed");
605 		return -EIO;
606 	}
607 	return OK;
608 }
609 
bt_cmd_delete_all_virnets(struct bt_drv * bt_mng,unsigned long arg)610 static int bt_cmd_delete_all_virnets(struct bt_drv *bt_mng, unsigned long arg)
611 {
612 	return bt_table_delete_all(bt_mng);
613 }
614 
bt_mng_file_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)615 static long bt_mng_file_ioctl(struct file *filep,
616 			      unsigned int cmd,
617 			      unsigned long arg)
618 {
619 	int ret;
620 	struct bt_drv *bt_mng = NULL;
621 
622 	if (unlikely(!filep) || unlikely(!filep->private_data)) {
623 		btdev_dbg_err("invalid parameter");
624 		return -EINVAL;
625 	}
626 	bt_mng = filep->private_data;
627 
628 	switch (cmd) {
629 	case BT_IOC_CREATE:
630 		ret = bt_cmd_create_virnet(bt_mng, arg);
631 		break;
632 	case BT_IOC_DELETE:
633 		ret = bt_cmd_delete_virnet(bt_mng, arg);
634 		break;
635 	case BT_IOC_QUERY_ALL:
636 		ret = bt_cmd_query_all_virnets(bt_mng, arg);
637 		break;
638 	case BT_IOC_DELETE_ALL:
639 		ret = bt_cmd_delete_all_virnets(bt_mng, arg);
640 		break;
641 	default:
642 		btdev_dbg_err("not a valid cmd(%u)", cmd);
643 		return -ENOIOCTLCMD;
644 	}
645 	return ret;
646 }
647 
648 static const struct file_operations g_bt_mng_file_ops = {
649 	.owner = THIS_MODULE,
650 	.open = bt_mng_file_open,
651 	.release = bt_mng_file_release,
652 	.unlocked_ioctl = bt_mng_file_ioctl,
653 	.compat_ioctl = bt_mng_file_ioctl};
654 
bt_virnet_xmit(struct sk_buff * skb,struct net_device * dev)655 static netdev_tx_t bt_virnet_xmit(struct sk_buff *skb,
656 				  struct net_device *dev)
657 {
658 	int ret;
659 	struct bt_virnet *vnet = NULL;
660 
661 	if (unlikely(!skb) || unlikely(!dev)) {
662 		btdev_dbg_err("invalid parameter");
663 		return -EINVAL;
664 	}
665 
666 	vnet = bt_table_find(g_bt_drv->devices_table, dev->name);
667 	if (unlikely(!vnet)) {
668 		btdev_dbg_err("bt_table_find %s failed", ndev_name(vnet));
669 		return -EINVAL;
670 	}
671 
672 	ret = bt_virnet_produce_data(vnet, (void *)skb);
673 	if (unlikely(ret < 0)) {
674 		btdev_dbg("%s produce data failed: ring is full, need to stop queue",
675 			  ndev_name(vnet));
676 		netif_stop_queue(vnet->ndev);
677 		return NETDEV_TX_BUSY;
678 	}
679 
680 	vnet->ndev->stats.tx_packets++;
681 	vnet->ndev->stats.tx_bytes += skb->len;
682 
683 	btdev_dbg("%s send success, skb-len=%u", ndev_name(vnet), skb->len);
684 	return NETDEV_TX_OK;
685 }
686 
687 static const struct net_device_ops g_bt_virnet_ops = {
688 	.ndo_start_xmit = bt_virnet_xmit,
689 	.ndo_change_mtu = bt_virnet_change_mtu};
690 
bt_table_init(void)691 static struct bt_table *bt_table_init(void)
692 {
693 	struct bt_table *tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
694 
695 	if (unlikely(!tbl)) {
696 		btdev_dbg_err("alloc failed");
697 		return NULL;
698 	}
699 
700 	INIT_LIST_HEAD(&tbl->head);
701 	mutex_init(&tbl->tbl_lock);
702 	tbl->num = 0;
703 	return tbl;
704 }
705 
bt_table_add_device(struct bt_table * tbl,struct bt_virnet * vn)706 static int bt_table_add_device(struct bt_table *tbl, struct bt_virnet *vn)
707 {
708 	struct bt_virnet *vnet = NULL;
709 
710 	if (unlikely(!tbl)) {
711 		btdev_dbg_err("invalid parameter");
712 		return -EINVAL;
713 	}
714 
715 	vnet = bt_table_find(tbl, ndev_name(vn));
716 	if (unlikely(vnet)) {
717 		btdev_dbg_err("found duplicated device %s", ndev_name(vn));
718 		return -ENOIOCTLCMD; // duplicated
719 	}
720 
721 	btdev_dbg("%s has been added", ndev_name(vn));
722 	mutex_lock(&tbl->tbl_lock);
723 	list_add_tail(&vn->virnet_entry, &tbl->head);
724 	if (tbl->num < UINT32_MAX)
725 		++tbl->num;
726 	mutex_unlock(&tbl->tbl_lock);
727 
728 	return OK;
729 }
730 
bt_table_remove_device(struct bt_table * tbl,struct bt_virnet * vn)731 static void bt_table_remove_device(struct bt_table *tbl, struct bt_virnet *vn)
732 {
733 	if (unlikely(!tbl))
734 		return;
735 
736 	btdev_dbg("%s has been removed", ndev_name(vn));
737 	mutex_lock(&tbl->tbl_lock);
738 	list_del(&vn->virnet_entry);
739 	if (tbl->num)
740 		--tbl->num;
741 	mutex_unlock(&tbl->tbl_lock);
742 }
743 
bt_table_find(struct bt_table * tbl,const char * ifa_name)744 static struct bt_virnet *bt_table_find(struct bt_table *tbl, const char *ifa_name)
745 {
746 	struct bt_virnet *vnet = NULL;
747 
748 	if (unlikely(!tbl) || unlikely(!ifa_name)) {
749 		btdev_dbg_err("invalid parameter");
750 		return NULL;
751 	}
752 
753 	list_for_each_entry(vnet, &tbl->head, virnet_entry) {
754 		if (!strcmp(ndev_name(vnet), ifa_name))
755 			return vnet;
756 	}
757 
758 	return NULL;
759 }
760 
__bt_table_delete_all(struct bt_drv * drv)761 static void __bt_table_delete_all(struct bt_drv *drv)
762 {
763 	dev_t number;
764 	struct bt_virnet *vnet = NULL;
765 	struct bt_virnet *tmp_vnet = NULL;
766 
767 	if (unlikely(!g_bt_drv->devices_table))
768 		return;
769 
770 	list_for_each_entry_safe(vnet,
771 				 tmp_vnet,
772 				 &drv->devices_table->head,
773 				 virnet_entry) {
774 		int err = bt_virnet_get_cdev_number(vnet, &number);
775 
776 		if (likely(!err))
777 			bt_clear_bit(&drv->bitmap, (u32)MINOR(number));
778 		list_del(&vnet->virnet_entry);
779 		btdev_dbg("%s has been deleted", ndev_name(vnet));
780 		bt_virnet_destroy(vnet);
781 	}
782 	drv->devices_table->num = 0;
783 }
784 
bt_table_delete_all(struct bt_drv * drv)785 static int bt_table_delete_all(struct bt_drv *drv)
786 {
787 	if (unlikely(!drv->devices_table))
788 		return -EINVAL;
789 
790 	mutex_lock(&drv->bitmap_lock);
791 	mutex_lock(&drv->devices_table->tbl_lock);
792 
793 	__bt_table_delete_all(drv);
794 
795 	mutex_unlock(&drv->devices_table->tbl_lock);
796 	mutex_unlock(&drv->bitmap_lock);
797 	return OK;
798 }
799 
bt_table_destroy(struct bt_drv * drv)800 static void bt_table_destroy(struct bt_drv *drv)
801 {
802 	__bt_table_delete_all(drv);
803 	kfree(drv->devices_table);
804 	drv->devices_table = NULL;
805 }
806 
__bt_ring_create(int size)807 static struct bt_ring *__bt_ring_create(int size)
808 {
809 	struct bt_ring *ring;
810 
811 	if (unlikely(size < 0))
812 		return NULL;
813 
814 	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
815 	if (unlikely(!ring)) {
816 		btdev_dbg_err("ring alloc failed");
817 		return NULL;
818 	}
819 
820 	ring->head = 0;
821 	ring->tail = 0;
822 	ring->data = kmalloc_array(size, sizeof(void *), GFP_KERNEL);
823 	if (unlikely(!ring->data)) {
824 		btdev_dbg_err("ring data allocfailed");
825 		kfree(ring);
826 		return NULL;
827 	}
828 	ring->size = size;
829 
830 	return ring;
831 }
832 
bt_ring_create(void)833 static struct bt_ring *bt_ring_create(void)
834 {
835 	return __bt_ring_create(BT_RING_BUFFER_SIZE);
836 }
837 
bt_ring_is_empty(const struct bt_ring * ring)838 static int bt_ring_is_empty(const struct bt_ring *ring)
839 {
840 	if (unlikely(!ring))
841 		return TRUE;
842 
843 	return ring->head == ring->tail;
844 }
845 
bt_ring_is_full(const struct bt_ring * ring)846 static int bt_ring_is_full(const struct bt_ring *ring)
847 {
848 	if (unlikely(!ring))
849 		return TRUE;
850 
851 	return (ring->head + 1) % ring->size == ring->tail;
852 }
853 
bt_ring_produce(struct bt_ring * ring,void * data)854 static void bt_ring_produce(struct bt_ring *ring, void *data)
855 {
856 	smp_mb(); // Make sure the read and write order is correct
857 	ring->data[ring->head] = data;
858 	ring->head = (ring->head + 1) % ring->size;
859 	smp_wmb(); // Make sure the write order is correct
860 }
861 
bt_ring_current(struct bt_ring * ring)862 static void *bt_ring_current(struct bt_ring *ring)
863 {
864 	void *data = NULL;
865 
866 	if (unlikely(!ring))
867 		return data;
868 
869 	data = ring->data[ring->tail];
870 	return data;
871 }
872 
bt_ring_consume(struct bt_ring * ring)873 static void bt_ring_consume(struct bt_ring *ring)
874 {
875 	if (unlikely(!ring))
876 		return;
877 
878 	smp_rmb(); // Make sure the read order is correct
879 	ring->tail = (ring->tail + 1) % ring->size;
880 	smp_mb(); // Make sure the read and write order is correct
881 }
882 
bt_ring_destroy(struct bt_ring * ring)883 static void bt_ring_destroy(struct bt_ring *ring)
884 {
885 	if (unlikely(!ring))
886 		return;
887 
888 	kfree(ring->data);
889 	kfree(ring);
890 }
891 
bt_virnet_produce_data(struct bt_virnet * dev,void * data)892 static int bt_virnet_produce_data(struct bt_virnet *dev, void *data)
893 {
894 	if (unlikely(bt_ring_is_full(dev->tx_ring))) {
895 		btdev_dbg("ring is full");
896 		return -ENFILE;
897 	}
898 
899 	/* There is a memory barrier inside the function */
900 	bt_ring_produce(dev->tx_ring, data);
901 	wake_up(&dev->rx_queue);
902 	return OK;
903 }
904 
905 /**
906  * register all the region
907  */
bt_cdev_region_init(int major,int count)908 static int bt_cdev_region_init(int major, int count)
909 {
910 	return register_chrdev_region(MKDEV(major, 0), count, "bt");
911 }
912 
bt_dev_class_create(void)913 static struct class *bt_dev_class_create(void)
914 {
915 	struct class *cls = class_create(THIS_MODULE, "bt");
916 
917 	if (IS_ERR(cls)) {
918 		btdev_dbg_err("create struct class failed");
919 		return NULL;
920 	}
921 	return cls;
922 }
923 
bt_dev_class_destroy(struct class * cls)924 static void bt_dev_class_destroy(struct class *cls)
925 {
926 	if (unlikely(!cls))
927 		return;
928 
929 	class_destroy(cls);
930 }
931 
bt_cdev_device_destroy(struct bt_cdev * dev)932 static void bt_cdev_device_destroy(struct bt_cdev *dev)
933 {
934 	device_destroy(dev->bt_class, dev->cdev->dev);
935 }
936 
bt_cdev_device_create(struct bt_cdev * dev,struct class * cls,u32 id)937 static int bt_cdev_device_create(struct bt_cdev *dev,
938 				 struct class *cls,
939 				 u32 id)
940 {
941 	struct device *device = NULL;
942 	dev_t devno = MKDEV(BT_DEV_MAJOR, id);
943 	int ret;
944 
945 	if (unlikely(!cls)) {
946 		btdev_dbg_err("not a valid class");
947 		return -EINVAL;
948 	}
949 
950 	dev->bt_class = cls;
951 	device = device_create(cls, NULL, devno, NULL, "%s%u", BT_DEV_NAME_PREFIX, id);
952 	if (IS_ERR(device)) {
953 		btdev_dbg_err("create device failed, id=%d", id);
954 		return -EIO;
955 	}
956 	ret = snprintf(dev->dev_filename, sizeof(dev->dev_filename),
957 		       "%s%u", BT_DEV_PATH_PREFIX, id);
958 	if (ret < 0) {
959 		btdev_dbg_err("snprintf failed, id=%d", id);
960 		bt_cdev_device_destroy(dev);
961 		return -EFAULT;
962 	}
963 
964 	btdev_dbg("%s has been created", dev->dev_filename);
965 	return OK;
966 }
967 
bt_cdev_create(const struct file_operations * ops,u32 id)968 static struct bt_cdev *bt_cdev_create(const struct file_operations *ops,
969 				      u32 id)
970 {
971 	int ret;
972 	int minor = id;
973 	struct bt_cdev *dev = NULL;
974 	struct cdev *chrdev = NULL;
975 
976 	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
977 	if (unlikely(!dev)) {
978 		btdev_dbg_err("dev alloc failed, id=%d", id);
979 		goto dev_alloc_failed;
980 	}
981 
982 	chrdev = cdev_alloc();
983 	if (unlikely(!chrdev)) {
984 		btdev_dbg_err("cdev alloc failed, id=%d", id);
985 		goto cdev_alloc_failed;
986 	}
987 
988 	cdev_init(chrdev, ops);
989 	dev->cdev = chrdev;
990 
991 	ret = cdev_add(chrdev, MKDEV(BT_DEV_MAJOR, minor), 1);
992 	if (unlikely(ret < 0)) {
993 		btdev_dbg_err("cdev add failed, id=%d", id);
994 		goto cdev_add_failed;
995 	}
996 
997 	if (unlikely(bt_cdev_device_create(dev, g_bt_drv->bt_class, minor) < 0)) {
998 		btdev_dbg_err("bt cdev device create failed, id=%d", id);
999 		goto cdev_device_create_failed;
1000 	}
1001 
1002 	return dev;
1003 
1004 cdev_device_create_failed:
1005 cdev_add_failed:
1006 	cdev_del(chrdev);
1007 
1008 cdev_alloc_failed:
1009 	kfree(dev);
1010 
1011 dev_alloc_failed:
1012 	return NULL;
1013 }
1014 
1015 /**
1016  * delete one char device
1017  */
bt_cdev_delete(struct bt_cdev * bt_cdev)1018 static void bt_cdev_delete(struct bt_cdev *bt_cdev)
1019 {
1020 	dev_t devno;
1021 
1022 	if (likely(bt_cdev)) {
1023 		devno = bt_cdev->cdev->dev;
1024 
1025 		/* BT_DEV_PATH_PREFIX + ID --> /dev/btdev1 */
1026 		unregister_chrdev(MAJOR(devno), bt_cdev->dev_filename + strlen(BT_DEV_PATH_PREFIX));
1027 		bt_cdev_device_destroy(bt_cdev);
1028 
1029 		cdev_del(bt_cdev->cdev);
1030 	} else {
1031 		btdev_dbg_err("cdev is null");
1032 	}
1033 }
1034 
1035 /**
1036  * create and add data char device
1037  */
bt_create_io_file(u32 id)1038 static struct bt_io_file *bt_create_io_file(u32 id)
1039 {
1040 	struct bt_io_file *file = kmalloc(sizeof(*file), GFP_KERNEL);
1041 
1042 	if (unlikely(!file)) {
1043 		btdev_dbg_err("file alloc failed, id=%d", id);
1044 		return NULL;
1045 	}
1046 	file->bt_cdev = bt_cdev_create(&g_bt_io_file_ops, id);
1047 	if (unlikely(!file->bt_cdev)) {
1048 		btdev_dbg_err("create cdev failed, id=%d", id);
1049 		kfree(file);
1050 		return NULL;
1051 	}
1052 	atomic_set(&file->read_open_limit, 1);
1053 	atomic_set(&file->write_open_limit, 1);
1054 	return file;
1055 }
1056 
bt_create_io_files(void)1057 static struct bt_io_file **bt_create_io_files(void)
1058 {
1059 	int i;
1060 	struct bt_io_file **all_files = kmalloc(BT_VIRNET_MAX_NUM * sizeof(struct bt_io_file *),
1061 						GFP_KERNEL);
1062 
1063 	if (unlikely(!all_files)) {
1064 		btdev_dbg_err("all_files alloc failed");
1065 		return NULL;
1066 	}
1067 	for (i = 0; i < BT_VIRNET_MAX_NUM; ++i)
1068 		all_files[i] = bt_create_io_file(i + 1);
1069 
1070 	return all_files;
1071 }
1072 
bt_delete_io_file(struct bt_io_file * file)1073 static void bt_delete_io_file(struct bt_io_file *file)
1074 {
1075 	if (unlikely(!file))
1076 		return;
1077 
1078 	bt_cdev_delete(file->bt_cdev);
1079 	kfree(file);
1080 }
1081 
bt_delete_io_files(struct bt_drv * bt_mng)1082 static void bt_delete_io_files(struct bt_drv *bt_mng)
1083 {
1084 	int i;
1085 
1086 	for (i = 0; i < BT_VIRNET_MAX_NUM; ++i)
1087 		bt_delete_io_file(bt_mng->io_files[i]);
1088 
1089 	kfree(bt_mng->io_files);
1090 	bt_mng->io_files = NULL;
1091 }
1092 
1093 /**
1094  * create and add management char device
1095  */
bt_create_mng_file(int id)1096 static struct bt_mng_file *bt_create_mng_file(int id)
1097 {
1098 	struct bt_mng_file *file = kmalloc(sizeof(*file), GFP_KERNEL);
1099 
1100 	if (unlikely(!file)) {
1101 		btdev_dbg_err("file alloc failed");
1102 		return NULL;
1103 	}
1104 
1105 	file->bt_cdev = bt_cdev_create(&g_bt_mng_file_ops, id);
1106 	if (unlikely(!file->bt_cdev)) {
1107 		btdev_dbg_err("create cdev failed");
1108 		kfree(file);
1109 		return NULL;
1110 	}
1111 
1112 	atomic_set(&file->open_limit, 1);
1113 
1114 	btdev_dbg("mng file has been created");
1115 	return file;
1116 }
1117 
bt_delete_mng_file(struct bt_mng_file * file)1118 static void bt_delete_mng_file(struct bt_mng_file *file)
1119 {
1120 	if (unlikely(!file))
1121 		return;
1122 
1123 	bt_cdev_delete(file->bt_cdev);
1124 	kfree(file);
1125 }
1126 
1127 /**
1128  * unregister the region
1129  */
bt_cdev_region_destroy(int major,int count)1130 static void bt_cdev_region_destroy(int major, int count)
1131 {
1132 	return unregister_chrdev_region(MKDEV(major, 0), count);
1133 }
1134 
1135 /**
1136  * create one net device
1137  */
bt_net_device_create(u32 id)1138 static struct net_device *bt_net_device_create(u32 id)
1139 {
1140 	struct net_device *ndev = NULL;
1141 	int err;
1142 	char ifa_name[IFNAMSIZ];
1143 
1144 	if (unlikely(id < 0) || unlikely(id > BT_VIRNET_MAX_NUM)) {
1145 		btdev_dbg_err("invalid id");
1146 		return NULL;
1147 	}
1148 	err = snprintf(ifa_name, sizeof(ifa_name), "%s%d", BT_VIRNET_NAME_PREFIX, id);
1149 	if (err < 0) {
1150 		btdev_dbg_err("snprintf failed, id=%d", id);
1151 		return NULL;
1152 	}
1153 	ndev = alloc_netdev(0, ifa_name, NET_NAME_UNKNOWN, ether_setup);
1154 	if (unlikely(!ndev)) {
1155 		btdev_dbg_err("%s ndev alloc failed", ifa_name);
1156 		return NULL;
1157 	}
1158 
1159 	ndev->netdev_ops = &g_bt_virnet_ops;
1160 	ndev->flags |= IFF_NOARP;
1161 	ndev->flags &= ~IFF_BROADCAST & ~IFF_MULTICAST;
1162 	ndev->min_mtu = 1;
1163 	ndev->max_mtu = ETH_MAX_MTU;
1164 
1165 	err = register_netdev(ndev);
1166 	if (unlikely(err)) {
1167 		btdev_dbg_err("%s register netdev failed", ifa_name);
1168 		free_netdev(ndev);
1169 		return NULL;
1170 	}
1171 
1172 	btdev_dbg("%s has been created", ifa_name);
1173 	return ndev;
1174 }
1175 
1176 /**
1177  * destroy one net device
1178  */
bt_net_device_destroy(struct net_device * dev)1179 static void bt_net_device_destroy(struct net_device *dev)
1180 {
1181 	btdev_dbg("%s has been destroyed", dev->name);
1182 	unregister_netdev(dev);
1183 	free_netdev(dev);
1184 }
1185 
bt_get_io_file(struct bt_drv * drv,int id)1186 static struct bt_io_file *bt_get_io_file(struct bt_drv *drv, int id)
1187 {
1188 	if (id >= 1 && id <= BT_VIRNET_MAX_NUM)
1189 		return drv->io_files[id - 1];
1190 
1191 	return NULL;
1192 }
1193 
1194 /**
1195  * create an virtual net_device
1196  */
bt_virnet_create(struct bt_drv * bt_mng,u32 id)1197 static struct bt_virnet *bt_virnet_create(struct bt_drv *bt_mng, u32 id)
1198 {
1199 	struct bt_virnet *vnet = kmalloc(sizeof(*vnet), GFP_KERNEL);
1200 
1201 	if (unlikely(!vnet)) {
1202 		btdev_dbg_err("invalid parameter");
1203 		goto out_of_memory;
1204 	}
1205 
1206 	vnet->tx_ring = bt_ring_create();
1207 	if (unlikely(!vnet->tx_ring)) {
1208 		btdev_dbg_err("create ring failed");
1209 		goto bt_ring_create_failed;
1210 	}
1211 
1212 	vnet->ndev = bt_net_device_create(id);
1213 	if (unlikely(!vnet->ndev)) {
1214 		btdev_dbg_err("create net device failed");
1215 		goto net_device_create_failed;
1216 	}
1217 
1218 	vnet->io_file = bt_get_io_file(bt_mng, id);
1219 	if (unlikely(!vnet->io_file)) {
1220 		btdev_dbg_err("create cdev failed");
1221 		goto get_io_file_failed;
1222 	}
1223 
1224 	init_waitqueue_head(&vnet->rx_queue);
1225 
1226 	set_state(vnet, BT_VIRNET_STATE_CREATED);
1227 	btdev_dbg("%s has been created", cdev_name(vnet));
1228 	return vnet;
1229 
1230 get_io_file_failed:
1231 	bt_net_device_destroy(vnet->ndev);
1232 
1233 net_device_create_failed:
1234 	bt_ring_destroy(vnet->tx_ring);
1235 
1236 bt_ring_create_failed:
1237 	kfree(vnet);
1238 
1239 out_of_memory:
1240 	return NULL;
1241 }
1242 
bt_virnet_destroy(struct bt_virnet * vnet)1243 static void bt_virnet_destroy(struct bt_virnet *vnet)
1244 {
1245 	btdev_dbg("%s has been destroyed", ndev_name(vnet));
1246 	bt_ring_destroy(vnet->tx_ring);
1247 	bt_net_device_destroy(vnet->ndev);
1248 
1249 	set_state(vnet, BT_VIRNET_STATE_DELETED);
1250 
1251 	kfree(vnet);
1252 }
1253 
bt_module_release(void)1254 static void __exit bt_module_release(void)
1255 {
1256 	if (likely(g_bt_drv)) {
1257 		bt_table_destroy(g_bt_drv);
1258 		bt_delete_io_files(g_bt_drv);
1259 		bt_delete_mng_file(g_bt_drv->mng_file);
1260 		bt_dev_class_destroy(g_bt_drv->bt_class);
1261 
1262 		kfree(g_bt_drv);
1263 		g_bt_drv = NULL;
1264 	}
1265 
1266 	bt_cdev_region_destroy(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM);
1267 	remove_proc_entry("bt_info_proc", NULL);
1268 	btdev_dbg("success");
1269 }
1270 
__bt_module_base_init(void)1271 static int __bt_module_base_init(void)
1272 {
1273 	int ret = 0;
1274 
1275 	g_bt_drv = kmalloc(sizeof(*g_bt_drv), GFP_KERNEL);
1276 	if (unlikely(!g_bt_drv)) {
1277 		btdev_dbg_err("bt_drv alloc failed");
1278 		ret = -ENOMEM;
1279 		goto btdrv_alloc_failed;
1280 	}
1281 
1282 	if (unlikely(bt_cdev_region_init(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM) < 0)) {
1283 		btdev_dbg_err("bt cdev region init failed");
1284 		ret = -EFAULT;
1285 		goto cdev_region_fail;
1286 	}
1287 
1288 	g_bt_drv->devices_table = bt_table_init();
1289 	if (unlikely(!g_bt_drv->devices_table)) {
1290 		btdev_dbg_err("bt table init failed");
1291 		ret = -ENOMEM;
1292 		goto table_init_fail;
1293 	}
1294 
1295 	g_bt_drv->bt_class = bt_dev_class_create();
1296 	if (unlikely(!g_bt_drv->bt_class)) {
1297 		btdev_dbg_err("class create failed");
1298 		ret = -ENOMEM;
1299 		goto class_create_fail;
1300 	}
1301 
1302 	g_bt_drv->io_files = bt_create_io_files();
1303 	if (unlikely(!g_bt_drv->io_files)) {
1304 		btdev_dbg_err("bt create io files failed");
1305 		ret = -ENOMEM;
1306 		goto io_files_create_fail;
1307 	}
1308 
1309 	mutex_init(&g_bt_drv->bitmap_lock);
1310 	g_bt_drv->bitmap = 0;
1311 	return ret;
1312 
1313 io_files_create_fail:
1314 	bt_dev_class_destroy(g_bt_drv->bt_class);
1315 
1316 class_create_fail:
1317 	bt_table_destroy(g_bt_drv);
1318 
1319 table_init_fail:
1320 	bt_cdev_region_destroy(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM);
1321 
1322 cdev_region_fail:
1323 	kfree(g_bt_drv);
1324 	g_bt_drv = NULL;
1325 
1326 btdrv_alloc_failed:
1327 	return ret;
1328 }
1329 
__bt_module_dev_create(void)1330 static int __bt_module_dev_create(void)
1331 {
1332 	int mid = 0;
1333 	struct proc_dir_entry *entry = NULL;
1334 	int ret = 0;
1335 
1336 	mutex_lock(&g_bt_drv->bitmap_lock);
1337 	g_bt_drv->mng_file = bt_create_mng_file(mid);
1338 	if (unlikely(!g_bt_drv->mng_file)) {
1339 		btdev_dbg_err("bt create mng file failed");
1340 		ret = -ENOMEM;
1341 		mutex_unlock(&g_bt_drv->bitmap_lock);
1342 		goto mng_file_create_fail;
1343 	}
1344 	bt_set_bit(&g_bt_drv->bitmap, mid);
1345 	mutex_unlock(&g_bt_drv->bitmap_lock);
1346 
1347 	entry = proc_create_data("bt_info_proc", 0, NULL, &g_bt_proc_fops, NULL);
1348 	if (unlikely(!entry)) {
1349 		btdev_dbg_err("create proc data failed");
1350 		ret = -ENOMEM;
1351 		goto proc_create_fail;
1352 	}
1353 
1354 	return ret;
1355 proc_create_fail:
1356 	bt_delete_mng_file(g_bt_drv->mng_file);
1357 
1358 mng_file_create_fail:
1359 	bt_delete_io_files(g_bt_drv);
1360 	bt_dev_class_destroy(g_bt_drv->bt_class);
1361 	bt_table_destroy(g_bt_drv);
1362 	bt_cdev_region_destroy(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM);
1363 	kfree(g_bt_drv);
1364 	g_bt_drv = NULL;
1365 
1366 	return ret;
1367 }
1368 
1369 /**
1370  *  module init function
1371  */
bt_module_init(void)1372 static int __init bt_module_init(void)
1373 {
1374 	int ret;
1375 
1376 	ret = __bt_module_base_init();
1377 	if (ret < 0)
1378 		return ret;
1379 
1380 	return __bt_module_dev_create();
1381 }
1382 
1383 module_init(bt_module_init);
1384 module_exit(bt_module_release);
1385 MODULE_LICENSE("GPL");
1386