• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2012 Coraid, Inc.  See COPYING for GPL terms. */
2 /*
3  * aoechr.c
4  * AoE character device driver
5  */
6 
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/skbuff.h>
14 #include <linux/export.h>
15 #include "aoe.h"
16 
17 enum {
18 	//MINOR_STAT = 1, (moved to sysfs)
19 	MINOR_ERR = 2,
20 	MINOR_DISCOVER,
21 	MINOR_INTERFACES,
22 	MINOR_REVALIDATE,
23 	MINOR_FLUSH,
24 	MSGSZ = 2048,
25 	NMSG = 100,		/* message backlog to retain */
26 };
27 
28 struct aoe_chardev {
29 	ulong minor;
30 	char name[32];
31 };
32 
33 enum { EMFL_VALID = 1 };
34 
35 struct ErrMsg {
36 	short flags;
37 	short len;
38 	char *msg;
39 };
40 
41 static DEFINE_MUTEX(aoechr_mutex);
42 
43 /* A ring buffer of error messages, to be read through
44  * "/dev/etherd/err".  When no messages are present,
45  * readers will block waiting for messages to appear.
46  */
47 static struct ErrMsg emsgs[NMSG];
48 static int emsgs_head_idx, emsgs_tail_idx;
49 static struct completion emsgs_comp;
50 static spinlock_t emsgs_lock;
51 static int nblocked_emsgs_readers;
52 static struct class *aoe_class;
53 static struct aoe_chardev chardevs[] = {
54 	{ MINOR_ERR, "err" },
55 	{ MINOR_DISCOVER, "discover" },
56 	{ MINOR_INTERFACES, "interfaces" },
57 	{ MINOR_REVALIDATE, "revalidate" },
58 	{ MINOR_FLUSH, "flush" },
59 };
60 
61 static int
discover(void)62 discover(void)
63 {
64 	aoecmd_cfg(0xffff, 0xff);
65 	return 0;
66 }
67 
68 static int
interfaces(const char __user * str,size_t size)69 interfaces(const char __user *str, size_t size)
70 {
71 	if (set_aoe_iflist(str, size)) {
72 		printk(KERN_ERR
73 			"aoe: could not set interface list: too many interfaces\n");
74 		return -EINVAL;
75 	}
76 	return 0;
77 }
78 
79 static int
revalidate(const char __user * str,size_t size)80 revalidate(const char __user *str, size_t size)
81 {
82 	int major, minor, n;
83 	ulong flags;
84 	struct aoedev *d;
85 	struct sk_buff *skb;
86 	char buf[16];
87 
88 	if (size >= sizeof buf)
89 		return -EINVAL;
90 	buf[sizeof buf - 1] = '\0';
91 	if (copy_from_user(buf, str, size))
92 		return -EFAULT;
93 
94 	n = sscanf(buf, "e%d.%d", &major, &minor);
95 	if (n != 2) {
96 		pr_err("aoe: invalid device specification %s\n", buf);
97 		return -EINVAL;
98 	}
99 	d = aoedev_by_aoeaddr(major, minor, 0);
100 	if (!d)
101 		return -EINVAL;
102 	spin_lock_irqsave(&d->lock, flags);
103 	aoecmd_cleanslate(d);
104 	aoecmd_cfg(major, minor);
105 loop:
106 	skb = aoecmd_ata_id(d);
107 	spin_unlock_irqrestore(&d->lock, flags);
108 	/* try again if we are able to sleep a bit,
109 	 * otherwise give up this revalidation
110 	 */
111 	if (!skb && !msleep_interruptible(250)) {
112 		spin_lock_irqsave(&d->lock, flags);
113 		goto loop;
114 	}
115 	aoedev_put(d);
116 	if (skb) {
117 		struct sk_buff_head queue;
118 		__skb_queue_head_init(&queue);
119 		__skb_queue_tail(&queue, skb);
120 		aoenet_xmit(&queue);
121 	}
122 	return 0;
123 }
124 
125 void
aoechr_error(char * msg)126 aoechr_error(char *msg)
127 {
128 	struct ErrMsg *em;
129 	char *mp;
130 	ulong flags, n;
131 
132 	n = strlen(msg);
133 
134 	spin_lock_irqsave(&emsgs_lock, flags);
135 
136 	em = emsgs + emsgs_tail_idx;
137 	if ((em->flags & EMFL_VALID)) {
138 bail:		spin_unlock_irqrestore(&emsgs_lock, flags);
139 		return;
140 	}
141 
142 	mp = kmemdup(msg, n, GFP_ATOMIC);
143 	if (mp == NULL) {
144 		printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
145 		goto bail;
146 	}
147 
148 	em->msg = mp;
149 	em->flags |= EMFL_VALID;
150 	em->len = n;
151 
152 	emsgs_tail_idx++;
153 	emsgs_tail_idx %= ARRAY_SIZE(emsgs);
154 
155 	spin_unlock_irqrestore(&emsgs_lock, flags);
156 
157 	if (nblocked_emsgs_readers)
158 		complete(&emsgs_comp);
159 }
160 
161 static ssize_t
aoechr_write(struct file * filp,const char __user * buf,size_t cnt,loff_t * offp)162 aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
163 {
164 	int ret = -EINVAL;
165 
166 	switch ((unsigned long) filp->private_data) {
167 	default:
168 		printk(KERN_INFO "aoe: can't write to that file.\n");
169 		break;
170 	case MINOR_DISCOVER:
171 		ret = discover();
172 		break;
173 	case MINOR_INTERFACES:
174 		ret = interfaces(buf, cnt);
175 		break;
176 	case MINOR_REVALIDATE:
177 		ret = revalidate(buf, cnt);
178 		break;
179 	case MINOR_FLUSH:
180 		ret = aoedev_flush(buf, cnt);
181 		break;
182 	}
183 	if (ret == 0)
184 		ret = cnt;
185 	return ret;
186 }
187 
188 static int
aoechr_open(struct inode * inode,struct file * filp)189 aoechr_open(struct inode *inode, struct file *filp)
190 {
191 	int n, i;
192 
193 	mutex_lock(&aoechr_mutex);
194 	n = iminor(inode);
195 	filp->private_data = (void *) (unsigned long) n;
196 
197 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
198 		if (chardevs[i].minor == n) {
199 			mutex_unlock(&aoechr_mutex);
200 			return 0;
201 		}
202 	mutex_unlock(&aoechr_mutex);
203 	return -EINVAL;
204 }
205 
206 static int
aoechr_rel(struct inode * inode,struct file * filp)207 aoechr_rel(struct inode *inode, struct file *filp)
208 {
209 	return 0;
210 }
211 
212 static ssize_t
aoechr_read(struct file * filp,char __user * buf,size_t cnt,loff_t * off)213 aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
214 {
215 	unsigned long n;
216 	char *mp;
217 	struct ErrMsg *em;
218 	ssize_t len;
219 	ulong flags;
220 
221 	n = (unsigned long) filp->private_data;
222 	if (n != MINOR_ERR)
223 		return -EFAULT;
224 
225 	spin_lock_irqsave(&emsgs_lock, flags);
226 
227 	for (;;) {
228 		em = emsgs + emsgs_head_idx;
229 		if ((em->flags & EMFL_VALID) != 0)
230 			break;
231 		if (filp->f_flags & O_NDELAY) {
232 			spin_unlock_irqrestore(&emsgs_lock, flags);
233 			return -EAGAIN;
234 		}
235 		nblocked_emsgs_readers++;
236 
237 		spin_unlock_irqrestore(&emsgs_lock, flags);
238 
239 		n = wait_for_completion_interruptible(&emsgs_comp);
240 
241 		spin_lock_irqsave(&emsgs_lock, flags);
242 
243 		nblocked_emsgs_readers--;
244 
245 		if (n) {
246 			spin_unlock_irqrestore(&emsgs_lock, flags);
247 			return -ERESTARTSYS;
248 		}
249 	}
250 	if (em->len > cnt) {
251 		spin_unlock_irqrestore(&emsgs_lock, flags);
252 		return -EAGAIN;
253 	}
254 	mp = em->msg;
255 	len = em->len;
256 	em->msg = NULL;
257 	em->flags &= ~EMFL_VALID;
258 
259 	emsgs_head_idx++;
260 	emsgs_head_idx %= ARRAY_SIZE(emsgs);
261 
262 	spin_unlock_irqrestore(&emsgs_lock, flags);
263 
264 	n = copy_to_user(buf, mp, len);
265 	kfree(mp);
266 	return n == 0 ? len : -EFAULT;
267 }
268 
269 static const struct file_operations aoe_fops = {
270 	.write = aoechr_write,
271 	.read = aoechr_read,
272 	.open = aoechr_open,
273 	.release = aoechr_rel,
274 	.owner = THIS_MODULE,
275 	.llseek = noop_llseek,
276 };
277 
aoe_devnode(struct device * dev,umode_t * mode)278 static char *aoe_devnode(struct device *dev, umode_t *mode)
279 {
280 	return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
281 }
282 
283 int __init
aoechr_init(void)284 aoechr_init(void)
285 {
286 	int n, i;
287 
288 	n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
289 	if (n < 0) {
290 		printk(KERN_ERR "aoe: can't register char device\n");
291 		return n;
292 	}
293 	init_completion(&emsgs_comp);
294 	spin_lock_init(&emsgs_lock);
295 	aoe_class = class_create(THIS_MODULE, "aoe");
296 	if (IS_ERR(aoe_class)) {
297 		unregister_chrdev(AOE_MAJOR, "aoechr");
298 		return PTR_ERR(aoe_class);
299 	}
300 	aoe_class->devnode = aoe_devnode;
301 
302 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
303 		device_create(aoe_class, NULL,
304 			      MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
305 			      chardevs[i].name);
306 
307 	return 0;
308 }
309 
310 void
aoechr_exit(void)311 aoechr_exit(void)
312 {
313 	int i;
314 
315 	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
316 		device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
317 	class_destroy(aoe_class);
318 	unregister_chrdev(AOE_MAJOR, "aoechr");
319 }
320 
321