1 /*
2 * linux/fs/ocfs2/ioctl.c
3 *
4 * Copyright (C) 2006 Herbert Poetzl
5 * adapted from Remy Card's ext2/ioctl.c
6 */
7
8 #include <linux/fs.h>
9 #include <linux/mount.h>
10 #include <linux/blkdev.h>
11 #include <linux/compat.h>
12
13 #include <cluster/masklog.h>
14
15 #include "ocfs2.h"
16 #include "alloc.h"
17 #include "dlmglue.h"
18 #include "file.h"
19 #include "inode.h"
20 #include "journal.h"
21
22 #include "ocfs2_fs.h"
23 #include "ioctl.h"
24 #include "resize.h"
25 #include "refcounttree.h"
26 #include "sysfile.h"
27 #include "dir.h"
28 #include "buffer_head_io.h"
29 #include "suballoc.h"
30 #include "move_extents.h"
31
32 #define o2info_from_user(a, b) \
33 copy_from_user(&(a), (b), sizeof(a))
34 #define o2info_to_user(a, b) \
35 copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
36
37 /*
38 * This is just a best-effort to tell userspace that this request
39 * caused the error.
40 */
o2info_set_request_error(struct ocfs2_info_request * kreq,struct ocfs2_info_request __user * req)41 static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
42 struct ocfs2_info_request __user *req)
43 {
44 kreq->ir_flags |= OCFS2_INFO_FL_ERROR;
45 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
46 }
47
o2info_set_request_filled(struct ocfs2_info_request * req)48 static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
49 {
50 req->ir_flags |= OCFS2_INFO_FL_FILLED;
51 }
52
o2info_clear_request_filled(struct ocfs2_info_request * req)53 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
54 {
55 req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
56 }
57
o2info_coherent(struct ocfs2_info_request * req)58 static inline int o2info_coherent(struct ocfs2_info_request *req)
59 {
60 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
61 }
62
ocfs2_get_inode_attr(struct inode * inode,unsigned * flags)63 static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
64 {
65 int status;
66
67 status = ocfs2_inode_lock(inode, NULL, 0);
68 if (status < 0) {
69 mlog_errno(status);
70 return status;
71 }
72 ocfs2_get_inode_flags(OCFS2_I(inode));
73 *flags = OCFS2_I(inode)->ip_attr;
74 ocfs2_inode_unlock(inode, 0);
75
76 return status;
77 }
78
ocfs2_set_inode_attr(struct inode * inode,unsigned flags,unsigned mask)79 static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
80 unsigned mask)
81 {
82 struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
83 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
84 handle_t *handle = NULL;
85 struct buffer_head *bh = NULL;
86 unsigned oldflags;
87 int status;
88
89 mutex_lock(&inode->i_mutex);
90
91 status = ocfs2_inode_lock(inode, &bh, 1);
92 if (status < 0) {
93 mlog_errno(status);
94 goto bail;
95 }
96
97 status = -EACCES;
98 if (!inode_owner_or_capable(inode))
99 goto bail_unlock;
100
101 if (!S_ISDIR(inode->i_mode))
102 flags &= ~OCFS2_DIRSYNC_FL;
103
104 oldflags = ocfs2_inode->ip_attr;
105 flags = flags & mask;
106 flags |= oldflags & ~mask;
107
108 /*
109 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
110 * the relevant capability.
111 */
112 status = -EPERM;
113 if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
114 (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
115 if (!capable(CAP_LINUX_IMMUTABLE))
116 goto bail_unlock;
117 }
118
119 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
120 if (IS_ERR(handle)) {
121 status = PTR_ERR(handle);
122 mlog_errno(status);
123 goto bail_unlock;
124 }
125
126 ocfs2_inode->ip_attr = flags;
127 ocfs2_set_inode_flags(inode);
128
129 status = ocfs2_mark_inode_dirty(handle, inode, bh);
130 if (status < 0)
131 mlog_errno(status);
132
133 ocfs2_commit_trans(osb, handle);
134
135 bail_unlock:
136 ocfs2_inode_unlock(inode, 1);
137 bail:
138 mutex_unlock(&inode->i_mutex);
139
140 brelse(bh);
141
142 return status;
143 }
144
ocfs2_info_handle_blocksize(struct inode * inode,struct ocfs2_info_request __user * req)145 static int ocfs2_info_handle_blocksize(struct inode *inode,
146 struct ocfs2_info_request __user *req)
147 {
148 struct ocfs2_info_blocksize oib;
149
150 if (o2info_from_user(oib, req))
151 return -EFAULT;
152
153 oib.ib_blocksize = inode->i_sb->s_blocksize;
154
155 o2info_set_request_filled(&oib.ib_req);
156
157 if (o2info_to_user(oib, req))
158 return -EFAULT;
159
160 return 0;
161 }
162
ocfs2_info_handle_clustersize(struct inode * inode,struct ocfs2_info_request __user * req)163 static int ocfs2_info_handle_clustersize(struct inode *inode,
164 struct ocfs2_info_request __user *req)
165 {
166 struct ocfs2_info_clustersize oic;
167 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
168
169 if (o2info_from_user(oic, req))
170 return -EFAULT;
171
172 oic.ic_clustersize = osb->s_clustersize;
173
174 o2info_set_request_filled(&oic.ic_req);
175
176 if (o2info_to_user(oic, req))
177 return -EFAULT;
178
179 return 0;
180 }
181
ocfs2_info_handle_maxslots(struct inode * inode,struct ocfs2_info_request __user * req)182 static int ocfs2_info_handle_maxslots(struct inode *inode,
183 struct ocfs2_info_request __user *req)
184 {
185 struct ocfs2_info_maxslots oim;
186 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
187
188 if (o2info_from_user(oim, req))
189 return -EFAULT;
190
191 oim.im_max_slots = osb->max_slots;
192
193 o2info_set_request_filled(&oim.im_req);
194
195 if (o2info_to_user(oim, req))
196 return -EFAULT;
197
198 return 0;
199 }
200
ocfs2_info_handle_label(struct inode * inode,struct ocfs2_info_request __user * req)201 static int ocfs2_info_handle_label(struct inode *inode,
202 struct ocfs2_info_request __user *req)
203 {
204 struct ocfs2_info_label oil;
205 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
206
207 if (o2info_from_user(oil, req))
208 return -EFAULT;
209
210 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
211
212 o2info_set_request_filled(&oil.il_req);
213
214 if (o2info_to_user(oil, req))
215 return -EFAULT;
216
217 return 0;
218 }
219
ocfs2_info_handle_uuid(struct inode * inode,struct ocfs2_info_request __user * req)220 static int ocfs2_info_handle_uuid(struct inode *inode,
221 struct ocfs2_info_request __user *req)
222 {
223 struct ocfs2_info_uuid oiu;
224 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
225
226 if (o2info_from_user(oiu, req))
227 return -EFAULT;
228
229 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
230
231 o2info_set_request_filled(&oiu.iu_req);
232
233 if (o2info_to_user(oiu, req))
234 return -EFAULT;
235
236 return 0;
237 }
238
ocfs2_info_handle_fs_features(struct inode * inode,struct ocfs2_info_request __user * req)239 static int ocfs2_info_handle_fs_features(struct inode *inode,
240 struct ocfs2_info_request __user *req)
241 {
242 struct ocfs2_info_fs_features oif;
243 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
244
245 if (o2info_from_user(oif, req))
246 return -EFAULT;
247
248 oif.if_compat_features = osb->s_feature_compat;
249 oif.if_incompat_features = osb->s_feature_incompat;
250 oif.if_ro_compat_features = osb->s_feature_ro_compat;
251
252 o2info_set_request_filled(&oif.if_req);
253
254 if (o2info_to_user(oif, req))
255 return -EFAULT;
256
257 return 0;
258 }
259
ocfs2_info_handle_journal_size(struct inode * inode,struct ocfs2_info_request __user * req)260 static int ocfs2_info_handle_journal_size(struct inode *inode,
261 struct ocfs2_info_request __user *req)
262 {
263 struct ocfs2_info_journal_size oij;
264 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
265
266 if (o2info_from_user(oij, req))
267 return -EFAULT;
268
269 oij.ij_journal_size = i_size_read(osb->journal->j_inode);
270
271 o2info_set_request_filled(&oij.ij_req);
272
273 if (o2info_to_user(oij, req))
274 return -EFAULT;
275
276 return 0;
277 }
278
ocfs2_info_scan_inode_alloc(struct ocfs2_super * osb,struct inode * inode_alloc,u64 blkno,struct ocfs2_info_freeinode * fi,u32 slot)279 static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
280 struct inode *inode_alloc, u64 blkno,
281 struct ocfs2_info_freeinode *fi,
282 u32 slot)
283 {
284 int status = 0, unlock = 0;
285
286 struct buffer_head *bh = NULL;
287 struct ocfs2_dinode *dinode_alloc = NULL;
288
289 if (inode_alloc)
290 mutex_lock(&inode_alloc->i_mutex);
291
292 if (o2info_coherent(&fi->ifi_req)) {
293 status = ocfs2_inode_lock(inode_alloc, &bh, 0);
294 if (status < 0) {
295 mlog_errno(status);
296 goto bail;
297 }
298 unlock = 1;
299 } else {
300 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
301 if (status < 0) {
302 mlog_errno(status);
303 goto bail;
304 }
305 }
306
307 dinode_alloc = (struct ocfs2_dinode *)bh->b_data;
308
309 fi->ifi_stat[slot].lfi_total =
310 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total);
311 fi->ifi_stat[slot].lfi_free =
312 le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) -
313 le32_to_cpu(dinode_alloc->id1.bitmap1.i_used);
314
315 bail:
316 if (unlock)
317 ocfs2_inode_unlock(inode_alloc, 0);
318
319 if (inode_alloc)
320 mutex_unlock(&inode_alloc->i_mutex);
321
322 brelse(bh);
323
324 return status;
325 }
326
ocfs2_info_handle_freeinode(struct inode * inode,struct ocfs2_info_request __user * req)327 static int ocfs2_info_handle_freeinode(struct inode *inode,
328 struct ocfs2_info_request __user *req)
329 {
330 u32 i;
331 u64 blkno = -1;
332 char namebuf[40];
333 int status, type = INODE_ALLOC_SYSTEM_INODE;
334 struct ocfs2_info_freeinode *oifi = NULL;
335 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
336 struct inode *inode_alloc = NULL;
337
338 oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL);
339 if (!oifi) {
340 status = -ENOMEM;
341 mlog_errno(status);
342 goto out_err;
343 }
344
345 if (o2info_from_user(*oifi, req)) {
346 status = -EFAULT;
347 goto out_free;
348 }
349
350 oifi->ifi_slotnum = osb->max_slots;
351
352 for (i = 0; i < oifi->ifi_slotnum; i++) {
353 if (o2info_coherent(&oifi->ifi_req)) {
354 inode_alloc = ocfs2_get_system_file_inode(osb, type, i);
355 if (!inode_alloc) {
356 mlog(ML_ERROR, "unable to get alloc inode in "
357 "slot %u\n", i);
358 status = -EIO;
359 goto bail;
360 }
361 } else {
362 ocfs2_sprintf_system_inode_name(namebuf,
363 sizeof(namebuf),
364 type, i);
365 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
366 namebuf,
367 strlen(namebuf),
368 &blkno);
369 if (status < 0) {
370 status = -ENOENT;
371 goto bail;
372 }
373 }
374
375 status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i);
376
377 iput(inode_alloc);
378 inode_alloc = NULL;
379
380 if (status < 0)
381 goto bail;
382 }
383
384 o2info_set_request_filled(&oifi->ifi_req);
385
386 if (o2info_to_user(*oifi, req)) {
387 status = -EFAULT;
388 goto out_free;
389 }
390
391 status = 0;
392 bail:
393 if (status)
394 o2info_set_request_error(&oifi->ifi_req, req);
395 out_free:
396 kfree(oifi);
397 out_err:
398 return status;
399 }
400
o2ffg_update_histogram(struct ocfs2_info_free_chunk_list * hist,unsigned int chunksize)401 static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
402 unsigned int chunksize)
403 {
404 int index;
405
406 index = __ilog2_u32(chunksize);
407 if (index >= OCFS2_INFO_MAX_HIST)
408 index = OCFS2_INFO_MAX_HIST - 1;
409
410 hist->fc_chunks[index]++;
411 hist->fc_clusters[index] += chunksize;
412 }
413
o2ffg_update_stats(struct ocfs2_info_freefrag_stats * stats,unsigned int chunksize)414 static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats,
415 unsigned int chunksize)
416 {
417 if (chunksize > stats->ffs_max)
418 stats->ffs_max = chunksize;
419
420 if (chunksize < stats->ffs_min)
421 stats->ffs_min = chunksize;
422
423 stats->ffs_avg += chunksize;
424 stats->ffs_free_chunks_real++;
425 }
426
ocfs2_info_update_ffg(struct ocfs2_info_freefrag * ffg,unsigned int chunksize)427 static void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg,
428 unsigned int chunksize)
429 {
430 o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize);
431 o2ffg_update_stats(&(ffg->iff_ffs), chunksize);
432 }
433
ocfs2_info_freefrag_scan_chain(struct ocfs2_super * osb,struct inode * gb_inode,struct ocfs2_dinode * gb_dinode,struct ocfs2_chain_rec * rec,struct ocfs2_info_freefrag * ffg,u32 chunks_in_group)434 static int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb,
435 struct inode *gb_inode,
436 struct ocfs2_dinode *gb_dinode,
437 struct ocfs2_chain_rec *rec,
438 struct ocfs2_info_freefrag *ffg,
439 u32 chunks_in_group)
440 {
441 int status = 0, used;
442 u64 blkno;
443
444 struct buffer_head *bh = NULL;
445 struct ocfs2_group_desc *bg = NULL;
446
447 unsigned int max_bits, num_clusters;
448 unsigned int offset = 0, cluster, chunk;
449 unsigned int chunk_free, last_chunksize = 0;
450
451 if (!le32_to_cpu(rec->c_free))
452 goto bail;
453
454 do {
455 if (!bg)
456 blkno = le64_to_cpu(rec->c_blkno);
457 else
458 blkno = le64_to_cpu(bg->bg_next_group);
459
460 if (bh) {
461 brelse(bh);
462 bh = NULL;
463 }
464
465 if (o2info_coherent(&ffg->iff_req))
466 status = ocfs2_read_group_descriptor(gb_inode,
467 gb_dinode,
468 blkno, &bh);
469 else
470 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
471
472 if (status < 0) {
473 mlog(ML_ERROR, "Can't read the group descriptor # "
474 "%llu from device.", (unsigned long long)blkno);
475 status = -EIO;
476 goto bail;
477 }
478
479 bg = (struct ocfs2_group_desc *)bh->b_data;
480
481 if (!le16_to_cpu(bg->bg_free_bits_count))
482 continue;
483
484 max_bits = le16_to_cpu(bg->bg_bits);
485 offset = 0;
486
487 for (chunk = 0; chunk < chunks_in_group; chunk++) {
488 /*
489 * last chunk may be not an entire one.
490 */
491 if ((offset + ffg->iff_chunksize) > max_bits)
492 num_clusters = max_bits - offset;
493 else
494 num_clusters = ffg->iff_chunksize;
495
496 chunk_free = 0;
497 for (cluster = 0; cluster < num_clusters; cluster++) {
498 used = ocfs2_test_bit(offset,
499 (unsigned long *)bg->bg_bitmap);
500 /*
501 * - chunk_free counts free clusters in #N chunk.
502 * - last_chunksize records the size(in) clusters
503 * for the last real free chunk being counted.
504 */
505 if (!used) {
506 last_chunksize++;
507 chunk_free++;
508 }
509
510 if (used && last_chunksize) {
511 ocfs2_info_update_ffg(ffg,
512 last_chunksize);
513 last_chunksize = 0;
514 }
515
516 offset++;
517 }
518
519 if (chunk_free == ffg->iff_chunksize)
520 ffg->iff_ffs.ffs_free_chunks++;
521 }
522
523 /*
524 * need to update the info for last free chunk.
525 */
526 if (last_chunksize)
527 ocfs2_info_update_ffg(ffg, last_chunksize);
528
529 } while (le64_to_cpu(bg->bg_next_group));
530
531 bail:
532 brelse(bh);
533
534 return status;
535 }
536
ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super * osb,struct inode * gb_inode,u64 blkno,struct ocfs2_info_freefrag * ffg)537 static int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb,
538 struct inode *gb_inode, u64 blkno,
539 struct ocfs2_info_freefrag *ffg)
540 {
541 u32 chunks_in_group;
542 int status = 0, unlock = 0, i;
543
544 struct buffer_head *bh = NULL;
545 struct ocfs2_chain_list *cl = NULL;
546 struct ocfs2_chain_rec *rec = NULL;
547 struct ocfs2_dinode *gb_dinode = NULL;
548
549 if (gb_inode)
550 mutex_lock(&gb_inode->i_mutex);
551
552 if (o2info_coherent(&ffg->iff_req)) {
553 status = ocfs2_inode_lock(gb_inode, &bh, 0);
554 if (status < 0) {
555 mlog_errno(status);
556 goto bail;
557 }
558 unlock = 1;
559 } else {
560 status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
561 if (status < 0) {
562 mlog_errno(status);
563 goto bail;
564 }
565 }
566
567 gb_dinode = (struct ocfs2_dinode *)bh->b_data;
568 cl = &(gb_dinode->id2.i_chain);
569
570 /*
571 * Chunksize(in) clusters from userspace should be
572 * less than clusters in a group.
573 */
574 if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
575 status = -EINVAL;
576 goto bail;
577 }
578
579 memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats));
580
581 ffg->iff_ffs.ffs_min = ~0U;
582 ffg->iff_ffs.ffs_clusters =
583 le32_to_cpu(gb_dinode->id1.bitmap1.i_total);
584 ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters -
585 le32_to_cpu(gb_dinode->id1.bitmap1.i_used);
586
587 chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
588
589 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
590 rec = &(cl->cl_recs[i]);
591 status = ocfs2_info_freefrag_scan_chain(osb, gb_inode,
592 gb_dinode,
593 rec, ffg,
594 chunks_in_group);
595 if (status)
596 goto bail;
597 }
598
599 if (ffg->iff_ffs.ffs_free_chunks_real)
600 ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg /
601 ffg->iff_ffs.ffs_free_chunks_real);
602 bail:
603 if (unlock)
604 ocfs2_inode_unlock(gb_inode, 0);
605
606 if (gb_inode)
607 mutex_unlock(&gb_inode->i_mutex);
608
609 if (gb_inode)
610 iput(gb_inode);
611
612 brelse(bh);
613
614 return status;
615 }
616
ocfs2_info_handle_freefrag(struct inode * inode,struct ocfs2_info_request __user * req)617 static int ocfs2_info_handle_freefrag(struct inode *inode,
618 struct ocfs2_info_request __user *req)
619 {
620 u64 blkno = -1;
621 char namebuf[40];
622 int status, type = GLOBAL_BITMAP_SYSTEM_INODE;
623
624 struct ocfs2_info_freefrag *oiff;
625 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
626 struct inode *gb_inode = NULL;
627
628 oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL);
629 if (!oiff) {
630 status = -ENOMEM;
631 mlog_errno(status);
632 goto out_err;
633 }
634
635 if (o2info_from_user(*oiff, req)) {
636 status = -EFAULT;
637 goto out_free;
638 }
639 /*
640 * chunksize from userspace should be power of 2.
641 */
642 if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) ||
643 (!oiff->iff_chunksize)) {
644 status = -EINVAL;
645 goto bail;
646 }
647
648 if (o2info_coherent(&oiff->iff_req)) {
649 gb_inode = ocfs2_get_system_file_inode(osb, type,
650 OCFS2_INVALID_SLOT);
651 if (!gb_inode) {
652 mlog(ML_ERROR, "unable to get global_bitmap inode\n");
653 status = -EIO;
654 goto bail;
655 }
656 } else {
657 ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type,
658 OCFS2_INVALID_SLOT);
659 status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
660 namebuf,
661 strlen(namebuf),
662 &blkno);
663 if (status < 0) {
664 status = -ENOENT;
665 goto bail;
666 }
667 }
668
669 status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff);
670 if (status < 0)
671 goto bail;
672
673 o2info_set_request_filled(&oiff->iff_req);
674
675 if (o2info_to_user(*oiff, req)) {
676 status = -EFAULT;
677 goto out_free;
678 }
679
680 status = 0;
681 bail:
682 if (status)
683 o2info_set_request_error(&oiff->iff_req, req);
684 out_free:
685 kfree(oiff);
686 out_err:
687 return status;
688 }
689
ocfs2_info_handle_unknown(struct inode * inode,struct ocfs2_info_request __user * req)690 static int ocfs2_info_handle_unknown(struct inode *inode,
691 struct ocfs2_info_request __user *req)
692 {
693 struct ocfs2_info_request oir;
694
695 if (o2info_from_user(oir, req))
696 return -EFAULT;
697
698 o2info_clear_request_filled(&oir);
699
700 if (o2info_to_user(oir, req))
701 return -EFAULT;
702
703 return 0;
704 }
705
706 /*
707 * Validate and distinguish OCFS2_IOC_INFO requests.
708 *
709 * - validate the magic number.
710 * - distinguish different requests.
711 * - validate size of different requests.
712 */
ocfs2_info_handle_request(struct inode * inode,struct ocfs2_info_request __user * req)713 static int ocfs2_info_handle_request(struct inode *inode,
714 struct ocfs2_info_request __user *req)
715 {
716 int status = -EFAULT;
717 struct ocfs2_info_request oir;
718
719 if (o2info_from_user(oir, req))
720 goto bail;
721
722 status = -EINVAL;
723 if (oir.ir_magic != OCFS2_INFO_MAGIC)
724 goto bail;
725
726 switch (oir.ir_code) {
727 case OCFS2_INFO_BLOCKSIZE:
728 if (oir.ir_size == sizeof(struct ocfs2_info_blocksize))
729 status = ocfs2_info_handle_blocksize(inode, req);
730 break;
731 case OCFS2_INFO_CLUSTERSIZE:
732 if (oir.ir_size == sizeof(struct ocfs2_info_clustersize))
733 status = ocfs2_info_handle_clustersize(inode, req);
734 break;
735 case OCFS2_INFO_MAXSLOTS:
736 if (oir.ir_size == sizeof(struct ocfs2_info_maxslots))
737 status = ocfs2_info_handle_maxslots(inode, req);
738 break;
739 case OCFS2_INFO_LABEL:
740 if (oir.ir_size == sizeof(struct ocfs2_info_label))
741 status = ocfs2_info_handle_label(inode, req);
742 break;
743 case OCFS2_INFO_UUID:
744 if (oir.ir_size == sizeof(struct ocfs2_info_uuid))
745 status = ocfs2_info_handle_uuid(inode, req);
746 break;
747 case OCFS2_INFO_FS_FEATURES:
748 if (oir.ir_size == sizeof(struct ocfs2_info_fs_features))
749 status = ocfs2_info_handle_fs_features(inode, req);
750 break;
751 case OCFS2_INFO_JOURNAL_SIZE:
752 if (oir.ir_size == sizeof(struct ocfs2_info_journal_size))
753 status = ocfs2_info_handle_journal_size(inode, req);
754 break;
755 case OCFS2_INFO_FREEINODE:
756 if (oir.ir_size == sizeof(struct ocfs2_info_freeinode))
757 status = ocfs2_info_handle_freeinode(inode, req);
758 break;
759 case OCFS2_INFO_FREEFRAG:
760 if (oir.ir_size == sizeof(struct ocfs2_info_freefrag))
761 status = ocfs2_info_handle_freefrag(inode, req);
762 break;
763 default:
764 status = ocfs2_info_handle_unknown(inode, req);
765 break;
766 }
767
768 bail:
769 return status;
770 }
771
ocfs2_get_request_ptr(struct ocfs2_info * info,int idx,u64 * req_addr,int compat_flag)772 static int ocfs2_get_request_ptr(struct ocfs2_info *info, int idx,
773 u64 *req_addr, int compat_flag)
774 {
775 int status = -EFAULT;
776 u64 __user *bp = NULL;
777
778 if (compat_flag) {
779 #ifdef CONFIG_COMPAT
780 /*
781 * pointer bp stores the base address of a pointers array,
782 * which collects all addresses of separate request.
783 */
784 bp = (u64 __user *)(unsigned long)compat_ptr(info->oi_requests);
785 #else
786 BUG();
787 #endif
788 } else
789 bp = (u64 __user *)(unsigned long)(info->oi_requests);
790
791 if (o2info_from_user(*req_addr, bp + idx))
792 goto bail;
793
794 status = 0;
795 bail:
796 return status;
797 }
798
799 /*
800 * OCFS2_IOC_INFO handles an array of requests passed from userspace.
801 *
802 * ocfs2_info_handle() recevies a large info aggregation, grab and
803 * validate the request count from header, then break it into small
804 * pieces, later specific handlers can handle them one by one.
805 *
806 * Idea here is to make each separate request small enough to ensure
807 * a better backward&forward compatibility, since a small piece of
808 * request will be less likely to be broken if disk layout get changed.
809 */
ocfs2_info_handle(struct inode * inode,struct ocfs2_info * info,int compat_flag)810 static int ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info,
811 int compat_flag)
812 {
813 int i, status = 0;
814 u64 req_addr;
815 struct ocfs2_info_request __user *reqp;
816
817 if ((info->oi_count > OCFS2_INFO_MAX_REQUEST) ||
818 (!info->oi_requests)) {
819 status = -EINVAL;
820 goto bail;
821 }
822
823 for (i = 0; i < info->oi_count; i++) {
824
825 status = ocfs2_get_request_ptr(info, i, &req_addr, compat_flag);
826 if (status)
827 break;
828
829 reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
830 if (!reqp) {
831 status = -EINVAL;
832 goto bail;
833 }
834
835 status = ocfs2_info_handle_request(inode, reqp);
836 if (status)
837 break;
838 }
839
840 bail:
841 return status;
842 }
843
ocfs2_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)844 long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
845 {
846 struct inode *inode = file_inode(filp);
847 unsigned int flags;
848 int new_clusters;
849 int status;
850 struct ocfs2_space_resv sr;
851 struct ocfs2_new_group_input input;
852 struct reflink_arguments args;
853 const char __user *old_path;
854 const char __user *new_path;
855 bool preserve;
856 struct ocfs2_info info;
857 void __user *argp = (void __user *)arg;
858
859 switch (cmd) {
860 case OCFS2_IOC_GETFLAGS:
861 status = ocfs2_get_inode_attr(inode, &flags);
862 if (status < 0)
863 return status;
864
865 flags &= OCFS2_FL_VISIBLE;
866 return put_user(flags, (int __user *) arg);
867 case OCFS2_IOC_SETFLAGS:
868 if (get_user(flags, (int __user *) arg))
869 return -EFAULT;
870
871 status = mnt_want_write_file(filp);
872 if (status)
873 return status;
874 status = ocfs2_set_inode_attr(inode, flags,
875 OCFS2_FL_MODIFIABLE);
876 mnt_drop_write_file(filp);
877 return status;
878 case OCFS2_IOC_RESVSP:
879 case OCFS2_IOC_RESVSP64:
880 case OCFS2_IOC_UNRESVSP:
881 case OCFS2_IOC_UNRESVSP64:
882 if (copy_from_user(&sr, (int __user *) arg, sizeof(sr)))
883 return -EFAULT;
884
885 return ocfs2_change_file_space(filp, cmd, &sr);
886 case OCFS2_IOC_GROUP_EXTEND:
887 if (!capable(CAP_SYS_RESOURCE))
888 return -EPERM;
889
890 if (get_user(new_clusters, (int __user *)arg))
891 return -EFAULT;
892
893 status = mnt_want_write_file(filp);
894 if (status)
895 return status;
896 status = ocfs2_group_extend(inode, new_clusters);
897 mnt_drop_write_file(filp);
898 return status;
899 case OCFS2_IOC_GROUP_ADD:
900 case OCFS2_IOC_GROUP_ADD64:
901 if (!capable(CAP_SYS_RESOURCE))
902 return -EPERM;
903
904 if (copy_from_user(&input, (int __user *) arg, sizeof(input)))
905 return -EFAULT;
906
907 status = mnt_want_write_file(filp);
908 if (status)
909 return status;
910 status = ocfs2_group_add(inode, &input);
911 mnt_drop_write_file(filp);
912 return status;
913 case OCFS2_IOC_REFLINK:
914 if (copy_from_user(&args, argp, sizeof(args)))
915 return -EFAULT;
916 old_path = (const char __user *)(unsigned long)args.old_path;
917 new_path = (const char __user *)(unsigned long)args.new_path;
918 preserve = (args.preserve != 0);
919
920 return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
921 case OCFS2_IOC_INFO:
922 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
923 return -EFAULT;
924
925 return ocfs2_info_handle(inode, &info, 0);
926 case FITRIM:
927 {
928 struct super_block *sb = inode->i_sb;
929 struct request_queue *q = bdev_get_queue(sb->s_bdev);
930 struct fstrim_range range;
931 int ret = 0;
932
933 if (!capable(CAP_SYS_ADMIN))
934 return -EPERM;
935
936 if (!blk_queue_discard(q))
937 return -EOPNOTSUPP;
938
939 if (copy_from_user(&range, argp, sizeof(range)))
940 return -EFAULT;
941
942 range.minlen = max_t(u64, q->limits.discard_granularity,
943 range.minlen);
944 ret = ocfs2_trim_fs(sb, &range);
945 if (ret < 0)
946 return ret;
947
948 if (copy_to_user(argp, &range, sizeof(range)))
949 return -EFAULT;
950
951 return 0;
952 }
953 case OCFS2_IOC_MOVE_EXT:
954 return ocfs2_ioctl_move_extents(filp, argp);
955 default:
956 return -ENOTTY;
957 }
958 }
959
960 #ifdef CONFIG_COMPAT
ocfs2_compat_ioctl(struct file * file,unsigned cmd,unsigned long arg)961 long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
962 {
963 bool preserve;
964 struct reflink_arguments args;
965 struct inode *inode = file_inode(file);
966 struct ocfs2_info info;
967 void __user *argp = (void __user *)arg;
968
969 switch (cmd) {
970 case OCFS2_IOC32_GETFLAGS:
971 cmd = OCFS2_IOC_GETFLAGS;
972 break;
973 case OCFS2_IOC32_SETFLAGS:
974 cmd = OCFS2_IOC_SETFLAGS;
975 break;
976 case OCFS2_IOC_RESVSP:
977 case OCFS2_IOC_RESVSP64:
978 case OCFS2_IOC_UNRESVSP:
979 case OCFS2_IOC_UNRESVSP64:
980 case OCFS2_IOC_GROUP_EXTEND:
981 case OCFS2_IOC_GROUP_ADD:
982 case OCFS2_IOC_GROUP_ADD64:
983 case FITRIM:
984 break;
985 case OCFS2_IOC_REFLINK:
986 if (copy_from_user(&args, argp, sizeof(args)))
987 return -EFAULT;
988 preserve = (args.preserve != 0);
989
990 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
991 compat_ptr(args.new_path), preserve);
992 case OCFS2_IOC_INFO:
993 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
994 return -EFAULT;
995
996 return ocfs2_info_handle(inode, &info, 1);
997 case OCFS2_IOC_MOVE_EXT:
998 break;
999 default:
1000 return -ENOIOCTLCMD;
1001 }
1002
1003 return ocfs2_ioctl(file, cmd, arg);
1004 }
1005 #endif
1006