• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * (C) 2001 Clemson University and The University of Chicago
4   *
5   * Changes by Acxiom Corporation to add protocol version to kernel
6   * communication, Copyright Acxiom Corporation, 2005.
7   *
8   * See COPYING in top-level directory.
9   */
10  
11  #include "protocol.h"
12  #include "orangefs-kernel.h"
13  #include "orangefs-dev-proto.h"
14  #include "orangefs-bufmap.h"
15  #include "orangefs-debugfs.h"
16  
17  #include <linux/debugfs.h>
18  #include <linux/slab.h>
19  
20  /* this file implements the /dev/pvfs2-req device node */
21  
22  uint32_t orangefs_userspace_version;
23  
24  static int open_access_count;
25  
26  static DEFINE_MUTEX(devreq_mutex);
27  
28  #define DUMP_DEVICE_ERROR()                                                   \
29  do {                                                                          \
30  	gossip_err("*****************************************************\n");\
31  	gossip_err("ORANGEFS Device Error:  You cannot open the device file ");  \
32  	gossip_err("\n/dev/%s more than once.  Please make sure that\nthere " \
33  		   "are no ", ORANGEFS_REQDEVICE_NAME);                          \
34  	gossip_err("instances of a program using this device\ncurrently "     \
35  		   "running. (You must verify this!)\n");                     \
36  	gossip_err("For example, you can use the lsof program as follows:\n");\
37  	gossip_err("'lsof | grep %s' (run this as root)\n",                   \
38  		   ORANGEFS_REQDEVICE_NAME);                                     \
39  	gossip_err("  open_access_count = %d\n", open_access_count);          \
40  	gossip_err("*****************************************************\n");\
41  } while (0)
42  
hash_func(__u64 tag,int table_size)43  static int hash_func(__u64 tag, int table_size)
44  {
45  	return do_div(tag, (unsigned int)table_size);
46  }
47  
orangefs_devreq_add_op(struct orangefs_kernel_op_s * op)48  static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
49  {
50  	int index = hash_func(op->tag, hash_table_size);
51  
52  	list_add_tail(&op->list, &orangefs_htable_ops_in_progress[index]);
53  }
54  
55  /*
56   * find the op with this tag and remove it from the in progress
57   * hash table.
58   */
orangefs_devreq_remove_op(__u64 tag)59  static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
60  {
61  	struct orangefs_kernel_op_s *op, *next;
62  	int index;
63  
64  	index = hash_func(tag, hash_table_size);
65  
66  	spin_lock(&orangefs_htable_ops_in_progress_lock);
67  	list_for_each_entry_safe(op,
68  				 next,
69  				 &orangefs_htable_ops_in_progress[index],
70  				 list) {
71  		if (op->tag == tag && !op_state_purged(op) &&
72  		    !op_state_given_up(op)) {
73  			list_del_init(&op->list);
74  			spin_unlock(&orangefs_htable_ops_in_progress_lock);
75  			return op;
76  		}
77  	}
78  
79  	spin_unlock(&orangefs_htable_ops_in_progress_lock);
80  	return NULL;
81  }
82  
83  /* Returns whether any FS are still pending remounted */
mark_all_pending_mounts(void)84  static int mark_all_pending_mounts(void)
85  {
86  	int unmounted = 1;
87  	struct orangefs_sb_info_s *orangefs_sb = NULL;
88  
89  	spin_lock(&orangefs_superblocks_lock);
90  	list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
91  		/* All of these file system require a remount */
92  		orangefs_sb->mount_pending = 1;
93  		unmounted = 0;
94  	}
95  	spin_unlock(&orangefs_superblocks_lock);
96  	return unmounted;
97  }
98  
99  /*
100   * Determine if a given file system needs to be remounted or not
101   *  Returns -1 on error
102   *           0 if already mounted
103   *           1 if needs remount
104   */
fs_mount_pending(__s32 fsid)105  static int fs_mount_pending(__s32 fsid)
106  {
107  	int mount_pending = -1;
108  	struct orangefs_sb_info_s *orangefs_sb = NULL;
109  
110  	spin_lock(&orangefs_superblocks_lock);
111  	list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
112  		if (orangefs_sb->fs_id == fsid) {
113  			mount_pending = orangefs_sb->mount_pending;
114  			break;
115  		}
116  	}
117  	spin_unlock(&orangefs_superblocks_lock);
118  	return mount_pending;
119  }
120  
orangefs_devreq_open(struct inode * inode,struct file * file)121  static int orangefs_devreq_open(struct inode *inode, struct file *file)
122  {
123  	int ret = -EINVAL;
124  
125  	/* in order to ensure that the filesystem driver sees correct UIDs */
126  	if (file->f_cred->user_ns != &init_user_ns) {
127  		gossip_err("%s: device cannot be opened outside init_user_ns\n",
128  			   __func__);
129  		goto out;
130  	}
131  
132  	if (!(file->f_flags & O_NONBLOCK)) {
133  		gossip_err("%s: device cannot be opened in blocking mode\n",
134  			   __func__);
135  		goto out;
136  	}
137  	ret = -EACCES;
138  	gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
139  	mutex_lock(&devreq_mutex);
140  
141  	if (open_access_count == 0) {
142  		open_access_count = 1;
143  		ret = 0;
144  	} else {
145  		DUMP_DEVICE_ERROR();
146  	}
147  	mutex_unlock(&devreq_mutex);
148  
149  out:
150  
151  	gossip_debug(GOSSIP_DEV_DEBUG,
152  		     "pvfs2-client-core: open device complete (ret = %d)\n",
153  		     ret);
154  	return ret;
155  }
156  
157  /* Function for read() callers into the device */
orangefs_devreq_read(struct file * file,char __user * buf,size_t count,loff_t * offset)158  static ssize_t orangefs_devreq_read(struct file *file,
159  				 char __user *buf,
160  				 size_t count, loff_t *offset)
161  {
162  	struct orangefs_kernel_op_s *op, *temp;
163  	__s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
164  	static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
165  	struct orangefs_kernel_op_s *cur_op;
166  	unsigned long ret;
167  
168  	/* We do not support blocking IO. */
169  	if (!(file->f_flags & O_NONBLOCK)) {
170  		gossip_err("%s: blocking read from client-core.\n",
171  			   __func__);
172  		return -EINVAL;
173  	}
174  
175  	/*
176  	 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
177  	 * always read with that size buffer.
178  	 */
179  	if (count != MAX_DEV_REQ_UPSIZE) {
180  		gossip_err("orangefs: client-core tried to read wrong size\n");
181  		return -EINVAL;
182  	}
183  
184  	/* Check for an empty list before locking. */
185  	if (list_empty(&orangefs_request_list))
186  		return -EAGAIN;
187  
188  restart:
189  	cur_op = NULL;
190  	/* Get next op (if any) from top of list. */
191  	spin_lock(&orangefs_request_list_lock);
192  	list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
193  		__s32 fsid;
194  		/* This lock is held past the end of the loop when we break. */
195  		spin_lock(&op->lock);
196  		if (unlikely(op_state_purged(op) || op_state_given_up(op))) {
197  			spin_unlock(&op->lock);
198  			continue;
199  		}
200  
201  		fsid = fsid_of_op(op);
202  		if (fsid != ORANGEFS_FS_ID_NULL) {
203  			int ret;
204  			/* Skip ops whose filesystem needs to be mounted. */
205  			ret = fs_mount_pending(fsid);
206  			if (ret == 1) {
207  				gossip_debug(GOSSIP_DEV_DEBUG,
208  				    "%s: mount pending, skipping op tag "
209  				    "%llu %s\n",
210  				    __func__,
211  				    llu(op->tag),
212  				    get_opname_string(op));
213  				spin_unlock(&op->lock);
214  				continue;
215  			/*
216  			 * Skip ops whose filesystem we don't know about unless
217  			 * it is being mounted or unmounted.  It is possible for
218  			 * a filesystem we don't know about to be unmounted if
219  			 * it fails to mount in the kernel after userspace has
220  			 * been sent the mount request.
221  			 */
222  			/* XXX: is there a better way to detect this? */
223  			} else if (ret == -1 &&
224  				   !(op->upcall.type ==
225  					ORANGEFS_VFS_OP_FS_MOUNT ||
226  				     op->upcall.type ==
227  					ORANGEFS_VFS_OP_GETATTR ||
228  				     op->upcall.type ==
229  					ORANGEFS_VFS_OP_FS_UMOUNT)) {
230  				gossip_debug(GOSSIP_DEV_DEBUG,
231  				    "orangefs: skipping op tag %llu %s\n",
232  				    llu(op->tag), get_opname_string(op));
233  				gossip_err(
234  				    "orangefs: ERROR: fs_mount_pending %d\n",
235  				    fsid);
236  				spin_unlock(&op->lock);
237  				continue;
238  			}
239  		}
240  		/*
241  		 * Either this op does not pertain to a filesystem, is mounting
242  		 * a filesystem, or pertains to a mounted filesystem. Let it
243  		 * through.
244  		 */
245  		cur_op = op;
246  		break;
247  	}
248  
249  	/*
250  	 * At this point we either have a valid op and can continue or have not
251  	 * found an op and must ask the client to try again later.
252  	 */
253  	if (!cur_op) {
254  		spin_unlock(&orangefs_request_list_lock);
255  		return -EAGAIN;
256  	}
257  
258  	gossip_debug(GOSSIP_DEV_DEBUG, "%s: reading op tag %llu %s\n",
259  		     __func__,
260  		     llu(cur_op->tag),
261  		     get_opname_string(cur_op));
262  
263  	/*
264  	 * Such an op should never be on the list in the first place. If so, we
265  	 * will abort.
266  	 */
267  	if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
268  		gossip_err("orangefs: ERROR: Current op already queued.\n");
269  		list_del_init(&cur_op->list);
270  		spin_unlock(&cur_op->lock);
271  		spin_unlock(&orangefs_request_list_lock);
272  		return -EAGAIN;
273  	}
274  
275  	list_del_init(&cur_op->list);
276  	spin_unlock(&orangefs_request_list_lock);
277  
278  	spin_unlock(&cur_op->lock);
279  
280  	/* Push the upcall out. */
281  	ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
282  	if (ret != 0)
283  		goto error;
284  	ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
285  	if (ret != 0)
286  		goto error;
287  	ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
288  	if (ret != 0)
289  		goto error;
290  	ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
291  			   sizeof(struct orangefs_upcall_s));
292  	if (ret != 0)
293  		goto error;
294  
295  	spin_lock(&orangefs_htable_ops_in_progress_lock);
296  	spin_lock(&cur_op->lock);
297  	if (unlikely(op_state_given_up(cur_op))) {
298  		spin_unlock(&cur_op->lock);
299  		spin_unlock(&orangefs_htable_ops_in_progress_lock);
300  		complete(&cur_op->waitq);
301  		goto restart;
302  	}
303  
304  	/*
305  	 * Set the operation to be in progress and move it between lists since
306  	 * it has been sent to the client.
307  	 */
308  	set_op_state_inprogress(cur_op);
309  	gossip_debug(GOSSIP_DEV_DEBUG,
310  		     "%s: 1 op:%s: op_state:%d: process:%s:\n",
311  		     __func__,
312  		     get_opname_string(cur_op),
313  		     cur_op->op_state,
314  		     current->comm);
315  	orangefs_devreq_add_op(cur_op);
316  	spin_unlock(&cur_op->lock);
317  	spin_unlock(&orangefs_htable_ops_in_progress_lock);
318  
319  	/* The client only asks to read one size buffer. */
320  	return MAX_DEV_REQ_UPSIZE;
321  error:
322  	/*
323  	 * We were unable to copy the op data to the client. Put the op back in
324  	 * list. If client has crashed, the op will be purged later when the
325  	 * device is released.
326  	 */
327  	gossip_err("orangefs: Failed to copy data to user space\n");
328  	spin_lock(&orangefs_request_list_lock);
329  	spin_lock(&cur_op->lock);
330  	if (likely(!op_state_given_up(cur_op))) {
331  		set_op_state_waiting(cur_op);
332  		gossip_debug(GOSSIP_DEV_DEBUG,
333  			     "%s: 2 op:%s: op_state:%d: process:%s:\n",
334  			     __func__,
335  			     get_opname_string(cur_op),
336  			     cur_op->op_state,
337  			     current->comm);
338  		list_add(&cur_op->list, &orangefs_request_list);
339  		spin_unlock(&cur_op->lock);
340  	} else {
341  		spin_unlock(&cur_op->lock);
342  		complete(&cur_op->waitq);
343  	}
344  	spin_unlock(&orangefs_request_list_lock);
345  	return -EFAULT;
346  }
347  
348  /*
349   * Function for writev() callers into the device.
350   *
351   * Userspace should have written:
352   *  - __u32 version
353   *  - __u32 magic
354   *  - __u64 tag
355   *  - struct orangefs_downcall_s
356   *  - trailer buffer (in the case of READDIR operations)
357   */
orangefs_devreq_write_iter(struct kiocb * iocb,struct iov_iter * iter)358  static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
359  				      struct iov_iter *iter)
360  {
361  	ssize_t ret;
362  	struct orangefs_kernel_op_s *op = NULL;
363  	struct {
364  		__u32 version;
365  		__u32 magic;
366  		__u64 tag;
367  	} head;
368  	int total = ret = iov_iter_count(iter);
369  	int downcall_size = sizeof(struct orangefs_downcall_s);
370  	int head_size = sizeof(head);
371  
372  	gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
373  		     __func__,
374  		     total,
375  		     ret);
376  
377          if (total < MAX_DEV_REQ_DOWNSIZE) {
378  		gossip_err("%s: total:%d: must be at least:%u:\n",
379  			   __func__,
380  			   total,
381  			   (unsigned int) MAX_DEV_REQ_DOWNSIZE);
382  		return -EFAULT;
383  	}
384  
385  	if (!copy_from_iter_full(&head, head_size, iter)) {
386  		gossip_err("%s: failed to copy head.\n", __func__);
387  		return -EFAULT;
388  	}
389  
390  	if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
391  		gossip_err("%s: userspace claims version"
392  			   "%d, minimum version required: %d.\n",
393  			   __func__,
394  			   head.version,
395  			   ORANGEFS_MINIMUM_USERSPACE_VERSION);
396  		return -EPROTO;
397  	}
398  
399  	if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
400  		gossip_err("Error: Device magic number does not match.\n");
401  		return -EPROTO;
402  	}
403  
404  	if (!orangefs_userspace_version) {
405  		orangefs_userspace_version = head.version;
406  	} else if (orangefs_userspace_version != head.version) {
407  		gossip_err("Error: userspace version changes\n");
408  		return -EPROTO;
409  	}
410  
411  	/* remove the op from the in progress hash table */
412  	op = orangefs_devreq_remove_op(head.tag);
413  	if (!op) {
414  		gossip_debug(GOSSIP_DEV_DEBUG,
415  			     "%s: No one's waiting for tag %llu\n",
416  			     __func__, llu(head.tag));
417  		return ret;
418  	}
419  
420  	if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
421  		gossip_err("%s: failed to copy downcall.\n", __func__);
422  		goto Efault;
423  	}
424  
425  	if (op->downcall.status)
426  		goto wakeup;
427  
428  	/*
429  	 * We've successfully peeled off the head and the downcall.
430  	 * Something has gone awry if total doesn't equal the
431  	 * sum of head_size, downcall_size and trailer_size.
432  	 */
433  	if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
434  		gossip_err("%s: funky write, head_size:%d"
435  			   ": downcall_size:%d: trailer_size:%lld"
436  			   ": total size:%d:\n",
437  			   __func__,
438  			   head_size,
439  			   downcall_size,
440  			   op->downcall.trailer_size,
441  			   total);
442  		goto Efault;
443  	}
444  
445  	/* Only READDIR operations should have trailers. */
446  	if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
447  	    (op->downcall.trailer_size != 0)) {
448  		gossip_err("%s: %x operation with trailer.",
449  			   __func__,
450  			   op->downcall.type);
451  		goto Efault;
452  	}
453  
454  	/* READDIR operations should always have trailers. */
455  	if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
456  	    (op->downcall.trailer_size == 0)) {
457  		gossip_err("%s: %x operation with no trailer.",
458  			   __func__,
459  			   op->downcall.type);
460  		goto Efault;
461  	}
462  
463  	if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
464  		goto wakeup;
465  
466  	op->downcall.trailer_buf = vmalloc(op->downcall.trailer_size);
467  	if (!op->downcall.trailer_buf)
468  		goto Enomem;
469  
470  	memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
471  	if (!copy_from_iter_full(op->downcall.trailer_buf,
472  			         op->downcall.trailer_size, iter)) {
473  		gossip_err("%s: failed to copy trailer.\n", __func__);
474  		vfree(op->downcall.trailer_buf);
475  		goto Efault;
476  	}
477  
478  wakeup:
479  	/*
480  	 * Return to vfs waitqueue, and back to service_operation
481  	 * through wait_for_matching_downcall.
482  	 */
483  	spin_lock(&op->lock);
484  	if (unlikely(op_is_cancel(op))) {
485  		spin_unlock(&op->lock);
486  		put_cancel(op);
487  	} else if (unlikely(op_state_given_up(op))) {
488  		spin_unlock(&op->lock);
489  		complete(&op->waitq);
490  	} else {
491  		set_op_state_serviced(op);
492  		gossip_debug(GOSSIP_DEV_DEBUG,
493  			     "%s: op:%s: op_state:%d: process:%s:\n",
494  			     __func__,
495  			     get_opname_string(op),
496  			     op->op_state,
497  			     current->comm);
498  		spin_unlock(&op->lock);
499  	}
500  	return ret;
501  
502  Efault:
503  	op->downcall.status = -(ORANGEFS_ERROR_BIT | 9);
504  	ret = -EFAULT;
505  	goto wakeup;
506  
507  Enomem:
508  	op->downcall.status = -(ORANGEFS_ERROR_BIT | 8);
509  	ret = -ENOMEM;
510  	goto wakeup;
511  }
512  
513  /*
514   * NOTE: gets called when the last reference to this device is dropped.
515   * Using the open_access_count variable, we enforce a reference count
516   * on this file so that it can be opened by only one process at a time.
517   * the devreq_mutex is used to make sure all i/o has completed
518   * before we call orangefs_bufmap_finalize, and similar such tricky
519   * situations
520   */
orangefs_devreq_release(struct inode * inode,struct file * file)521  static int orangefs_devreq_release(struct inode *inode, struct file *file)
522  {
523  	int unmounted = 0;
524  
525  	gossip_debug(GOSSIP_DEV_DEBUG,
526  		     "%s:pvfs2-client-core: exiting, closing device\n",
527  		     __func__);
528  
529  	mutex_lock(&devreq_mutex);
530  	orangefs_bufmap_finalize();
531  
532  	open_access_count = -1;
533  
534  	unmounted = mark_all_pending_mounts();
535  	gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
536  		     (unmounted ? "UNMOUNTED" : "MOUNTED"));
537  
538  	purge_waiting_ops();
539  	purge_inprogress_ops();
540  
541  	orangefs_bufmap_run_down();
542  
543  	gossip_debug(GOSSIP_DEV_DEBUG,
544  		     "pvfs2-client-core: device close complete\n");
545  	open_access_count = 0;
546  	orangefs_userspace_version = 0;
547  	mutex_unlock(&devreq_mutex);
548  	return 0;
549  }
550  
is_daemon_in_service(void)551  int is_daemon_in_service(void)
552  {
553  	int in_service;
554  
555  	/*
556  	 * What this function does is checks if client-core is alive
557  	 * based on the access count we maintain on the device.
558  	 */
559  	mutex_lock(&devreq_mutex);
560  	in_service = open_access_count == 1 ? 0 : -EIO;
561  	mutex_unlock(&devreq_mutex);
562  	return in_service;
563  }
564  
__is_daemon_in_service(void)565  bool __is_daemon_in_service(void)
566  {
567  	return open_access_count == 1;
568  }
569  
check_ioctl_command(unsigned int command)570  static inline long check_ioctl_command(unsigned int command)
571  {
572  	/* Check for valid ioctl codes */
573  	if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
574  		gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
575  			command,
576  			_IOC_TYPE(command),
577  			ORANGEFS_DEV_MAGIC);
578  		return -EINVAL;
579  	}
580  	/* and valid ioctl commands */
581  	if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
582  		gossip_err("Invalid ioctl command number [%d >= %d]\n",
583  			   _IOC_NR(command), ORANGEFS_DEV_MAXNR);
584  		return -ENOIOCTLCMD;
585  	}
586  	return 0;
587  }
588  
dispatch_ioctl_command(unsigned int command,unsigned long arg)589  static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
590  {
591  	static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
592  	static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
593  	static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
594  	struct ORANGEFS_dev_map_desc user_desc;
595  	int ret = 0;
596  	int upstream_kmod = 1;
597  	struct orangefs_sb_info_s *orangefs_sb;
598  
599  	/* mtmoore: add locking here */
600  
601  	switch (command) {
602  	case ORANGEFS_DEV_GET_MAGIC:
603  		return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
604  			-EIO :
605  			0);
606  	case ORANGEFS_DEV_GET_MAX_UPSIZE:
607  		return ((put_user(max_up_size,
608  				  (__s32 __user *) arg) == -EFAULT) ?
609  					-EIO :
610  					0);
611  	case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
612  		return ((put_user(max_down_size,
613  				  (__s32 __user *) arg) == -EFAULT) ?
614  					-EIO :
615  					0);
616  	case ORANGEFS_DEV_MAP:
617  		ret = copy_from_user(&user_desc,
618  				     (struct ORANGEFS_dev_map_desc __user *)
619  				     arg,
620  				     sizeof(struct ORANGEFS_dev_map_desc));
621  		/* WTF -EIO and not -EFAULT? */
622  		return ret ? -EIO : orangefs_bufmap_initialize(&user_desc);
623  	case ORANGEFS_DEV_REMOUNT_ALL:
624  		gossip_debug(GOSSIP_DEV_DEBUG,
625  			     "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
626  			     __func__);
627  
628  		/*
629  		 * remount all mounted orangefs volumes to regain the lost
630  		 * dynamic mount tables (if any) -- NOTE: this is done
631  		 * without keeping the superblock list locked due to the
632  		 * upcall/downcall waiting.  also, the request mutex is
633  		 * used to ensure that no operations will be serviced until
634  		 * all of the remounts are serviced (to avoid ops between
635  		 * mounts to fail)
636  		 */
637  		ret = mutex_lock_interruptible(&orangefs_request_mutex);
638  		if (ret < 0)
639  			return ret;
640  		gossip_debug(GOSSIP_DEV_DEBUG,
641  			     "%s: priority remount in progress\n",
642  			     __func__);
643  		spin_lock(&orangefs_superblocks_lock);
644  		list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
645  			/*
646  			 * We have to drop the spinlock, so entries can be
647  			 * removed.  They can't be freed, though, so we just
648  			 * keep the forward pointers and zero the back ones -
649  			 * that way we can get to the rest of the list.
650  			 */
651  			if (!orangefs_sb->list.prev)
652  				continue;
653  			gossip_debug(GOSSIP_DEV_DEBUG,
654  				     "%s: Remounting SB %p\n",
655  				     __func__,
656  				     orangefs_sb);
657  
658  			spin_unlock(&orangefs_superblocks_lock);
659  			ret = orangefs_remount(orangefs_sb);
660  			spin_lock(&orangefs_superblocks_lock);
661  			if (ret) {
662  				gossip_debug(GOSSIP_DEV_DEBUG,
663  					     "SB %p remount failed\n",
664  					     orangefs_sb);
665  				break;
666  			}
667  		}
668  		spin_unlock(&orangefs_superblocks_lock);
669  		gossip_debug(GOSSIP_DEV_DEBUG,
670  			     "%s: priority remount complete\n",
671  			     __func__);
672  		mutex_unlock(&orangefs_request_mutex);
673  		return ret;
674  
675  	case ORANGEFS_DEV_UPSTREAM:
676  		ret = copy_to_user((void __user *)arg,
677  				    &upstream_kmod,
678  				    sizeof(upstream_kmod));
679  
680  		if (ret != 0)
681  			return -EIO;
682  		else
683  			return ret;
684  
685  	case ORANGEFS_DEV_CLIENT_MASK:
686  		return orangefs_debugfs_new_client_mask((void __user *)arg);
687  	case ORANGEFS_DEV_CLIENT_STRING:
688  		return orangefs_debugfs_new_client_string((void __user *)arg);
689  	case ORANGEFS_DEV_DEBUG:
690  		return orangefs_debugfs_new_debug((void __user *)arg);
691  	default:
692  		return -ENOIOCTLCMD;
693  	}
694  	return -ENOIOCTLCMD;
695  }
696  
orangefs_devreq_ioctl(struct file * file,unsigned int command,unsigned long arg)697  static long orangefs_devreq_ioctl(struct file *file,
698  			       unsigned int command, unsigned long arg)
699  {
700  	long ret;
701  
702  	/* Check for properly constructed commands */
703  	ret = check_ioctl_command(command);
704  	if (ret < 0)
705  		return (int)ret;
706  
707  	return (int)dispatch_ioctl_command(command, arg);
708  }
709  
710  #ifdef CONFIG_COMPAT		/* CONFIG_COMPAT is in .config */
711  
712  /*  Compat structure for the ORANGEFS_DEV_MAP ioctl */
713  struct ORANGEFS_dev_map_desc32 {
714  	compat_uptr_t ptr;
715  	__s32 total_size;
716  	__s32 size;
717  	__s32 count;
718  };
719  
translate_dev_map26(unsigned long args,long * error)720  static unsigned long translate_dev_map26(unsigned long args, long *error)
721  {
722  	struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
723  	/*
724  	 * Depending on the architecture, allocate some space on the
725  	 * user-call-stack based on our expected layout.
726  	 */
727  	struct ORANGEFS_dev_map_desc __user *p =
728  	    compat_alloc_user_space(sizeof(*p));
729  	compat_uptr_t addr;
730  
731  	*error = 0;
732  	/* get the ptr from the 32 bit user-space */
733  	if (get_user(addr, &p32->ptr))
734  		goto err;
735  	/* try to put that into a 64-bit layout */
736  	if (put_user(compat_ptr(addr), &p->ptr))
737  		goto err;
738  	/* copy the remaining fields */
739  	if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
740  		goto err;
741  	if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
742  		goto err;
743  	if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
744  		goto err;
745  	return (unsigned long)p;
746  err:
747  	*error = -EFAULT;
748  	return 0;
749  }
750  
751  /*
752   * 32 bit user-space apps' ioctl handlers when kernel modules
753   * is compiled as a 64 bit one
754   */
orangefs_devreq_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long args)755  static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
756  				      unsigned long args)
757  {
758  	long ret;
759  	unsigned long arg = args;
760  
761  	/* Check for properly constructed commands */
762  	ret = check_ioctl_command(cmd);
763  	if (ret < 0)
764  		return ret;
765  	if (cmd == ORANGEFS_DEV_MAP) {
766  		/*
767  		 * convert the arguments to what we expect internally
768  		 * in kernel space
769  		 */
770  		arg = translate_dev_map26(args, &ret);
771  		if (ret < 0) {
772  			gossip_err("Could not translate dev map\n");
773  			return ret;
774  		}
775  	}
776  	/* no other ioctl requires translation */
777  	return dispatch_ioctl_command(cmd, arg);
778  }
779  
780  #endif /* CONFIG_COMPAT is in .config */
781  
782  /* the assigned character device major number */
783  static int orangefs_dev_major;
784  
785  /*
786   * Initialize orangefs device specific state:
787   * Must be called at module load time only
788   */
orangefs_dev_init(void)789  int orangefs_dev_init(void)
790  {
791  	/* register orangefs-req device  */
792  	orangefs_dev_major = register_chrdev(0,
793  					  ORANGEFS_REQDEVICE_NAME,
794  					  &orangefs_devreq_file_operations);
795  	if (orangefs_dev_major < 0) {
796  		gossip_debug(GOSSIP_DEV_DEBUG,
797  			     "Failed to register /dev/%s (error %d)\n",
798  			     ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
799  		return orangefs_dev_major;
800  	}
801  
802  	gossip_debug(GOSSIP_DEV_DEBUG,
803  		     "*** /dev/%s character device registered ***\n",
804  		     ORANGEFS_REQDEVICE_NAME);
805  	gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
806  		     ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
807  	return 0;
808  }
809  
orangefs_dev_cleanup(void)810  void orangefs_dev_cleanup(void)
811  {
812  	unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
813  	gossip_debug(GOSSIP_DEV_DEBUG,
814  		     "*** /dev/%s character device unregistered ***\n",
815  		     ORANGEFS_REQDEVICE_NAME);
816  }
817  
orangefs_devreq_poll(struct file * file,struct poll_table_struct * poll_table)818  static unsigned int orangefs_devreq_poll(struct file *file,
819  				      struct poll_table_struct *poll_table)
820  {
821  	int poll_revent_mask = 0;
822  
823  	poll_wait(file, &orangefs_request_list_waitq, poll_table);
824  
825  	if (!list_empty(&orangefs_request_list))
826  		poll_revent_mask |= POLL_IN;
827  	return poll_revent_mask;
828  }
829  
830  const struct file_operations orangefs_devreq_file_operations = {
831  	.owner = THIS_MODULE,
832  	.read = orangefs_devreq_read,
833  	.write_iter = orangefs_devreq_write_iter,
834  	.open = orangefs_devreq_open,
835  	.release = orangefs_devreq_release,
836  	.unlocked_ioctl = orangefs_devreq_ioctl,
837  
838  #ifdef CONFIG_COMPAT		/* CONFIG_COMPAT is in .config */
839  	.compat_ioctl = orangefs_devreq_compat_ioctl,
840  #endif
841  	.poll = orangefs_devreq_poll
842  };
843