1
2 /*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25 For usage instructions, please refer to:
26
27 Documentation/ABI/testing/sysfs-bus-rbd
28
29 */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
39
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
44 #include <linux/fs.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
49
50 #include "rbd_types.h"
51
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
53
54 /*
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
59 */
atomic_inc_return_safe(atomic_t * v)60 static int atomic_inc_return_safe(atomic_t *v)
61 {
62 unsigned int counter;
63
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71 }
72
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
atomic_dec_return_safe(atomic_t * v)74 static int atomic_dec_return_safe(atomic_t *v)
75 {
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85 }
86
87 #define RBD_DRV_NAME "rbd"
88
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
91
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
93
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
99
100 #define RBD_SNAP_HEAD_NAME "-"
101
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
103
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
107
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
109
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
113 /* Feature bits */
114
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
123
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
132
133 /* Features supported by this (client software) implementation. */
134
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
136
137 /*
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
140 */
141 #define DEV_NAME_LEN 32
142
143 /*
144 * block device image metadata (in-memory version)
145 */
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
148 char *object_prefix;
149 __u8 obj_order;
150 u64 stripe_unit;
151 u64 stripe_count;
152 s64 data_pool_id;
153 u64 features; /* Might be changeable someday? */
154
155 /* The remaining fields need to be updated occasionally */
156 u64 image_size;
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
160 };
161
162 /*
163 * An rbd image specification.
164 *
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
168 *
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
173 *
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
179 *
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
183 *
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
186 */
187 struct rbd_spec {
188 u64 pool_id;
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
191
192 const char *image_id;
193 const char *image_name;
194
195 u64 snap_id;
196 const char *snap_name;
197
198 struct kref kref;
199 };
200
201 /*
202 * an instance of the client. multiple devices may share an rbd client.
203 */
204 struct rbd_client {
205 struct ceph_client *client;
206 struct kref kref;
207 struct list_head node;
208 };
209
210 struct pending_result {
211 int result; /* first nonzero result */
212 int num_pending;
213 };
214
215 struct rbd_img_request;
216
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
222 };
223
224 enum obj_operation_type {
225 OBJ_OP_READ = 1,
226 OBJ_OP_WRITE,
227 OBJ_OP_DISCARD,
228 OBJ_OP_ZEROOUT,
229 };
230
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
236
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
239 RBD_OBJ_READ_OBJECT,
240 RBD_OBJ_READ_PARENT,
241 };
242
243 /*
244 * Writes go through the following state machine to deal with
245 * layering:
246 *
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
248 * . | .
249 * . v .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
251 * . | . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
254 * flattened) v | . .
255 * . v . .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
257 * | not needed) v
258 * v .
259 * done . . . . . . . . . . . . . . . . . .
260 * ^
261 * |
262 * RBD_OBJ_WRITE_FLAT
263 *
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
267 */
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
275 };
276
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
284 };
285
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
289 union {
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
292 };
293
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
296 u32 num_img_extents;
297
298 union {
299 struct ceph_bio_iter bio_pos;
300 struct {
301 struct ceph_bvec_iter bvec_pos;
302 u32 bvec_count;
303 u32 bvec_idx;
304 };
305 };
306
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
310
311 struct list_head osd_reqs; /* w/ r_private_item */
312
313 struct mutex state_mutex;
314 struct pending_result pending;
315 struct kref kref;
316 };
317
318 enum img_req_flags {
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
321 };
322
323 enum rbd_img_state {
324 RBD_IMG_START = 1,
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
328 };
329
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
334 unsigned long flags;
335 enum rbd_img_state state;
336 union {
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
339 };
340 struct rbd_obj_request *obj_request; /* obj req initiator */
341
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
344
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
348 int work_result;
349 };
350
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
355
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
360 };
361
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
366 };
367
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
370 u64 gid;
371 u64 handle;
372 };
373
374 struct rbd_mapping {
375 u64 size;
376 };
377
378 /*
379 * a single device
380 */
381 struct rbd_device {
382 int dev_id; /* blkdev unique id */
383
384 int major; /* blkdev assigned major */
385 int minor;
386 struct gendisk *disk; /* blkdev's gendisk and rq */
387
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
390
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
392
393 spinlock_t lock; /* queue, flags, open_count */
394
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
400
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
403
404 struct ceph_file_layout layout; /* used for all rbd requests */
405
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
409 u64 watch_cookie;
410 struct delayed_work watch_dwork;
411
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
424 int acquire_err;
425 struct completion releasing_wait;
426
427 spinlock_t object_map_lock;
428 u8 *object_map;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
431
432 struct workqueue_struct *task_wq;
433
434 struct rbd_spec *parent_spec;
435 u64 parent_overlap;
436 atomic_t parent_ref;
437 struct rbd_device *parent;
438
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
441
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
444
445 struct rbd_mapping mapping;
446
447 struct list_head node;
448
449 /* sysfs related */
450 struct device dev;
451 unsigned long open_count; /* protected by lock */
452 };
453
454 /*
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
457 * by rbd_dev->lock
458 */
459 enum rbd_dev_flags {
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
463 };
464
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
466
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
469
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
472
473 /* Slab caches for frequently-allocated structures */
474
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
477
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
480
481 static struct workqueue_struct *rbd_wq;
482
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
485 };
486
487 /*
488 * single-major requires >= 0.75 version of userspace rbd utility.
489 */
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
493
494 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(struct bus_type *bus, const char *buf,
496 size_t count);
497 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
498 size_t count);
499 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
500 size_t count);
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
502
rbd_dev_id_to_minor(int dev_id)503 static int rbd_dev_id_to_minor(int dev_id)
504 {
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
506 }
507
minor_to_rbd_dev_id(int minor)508 static int minor_to_rbd_dev_id(int minor)
509 {
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
511 }
512
rbd_is_ro(struct rbd_device * rbd_dev)513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
514 {
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
516 }
517
rbd_is_snap(struct rbd_device * rbd_dev)518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
519 {
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
521 }
522
__rbd_is_lock_owner(struct rbd_device * rbd_dev)523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
524 {
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
526
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
529 }
530
rbd_is_lock_owner(struct rbd_device * rbd_dev)531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
532 {
533 bool is_lock_owner;
534
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
539 }
540
supported_features_show(struct bus_type * bus,char * buf)541 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
542 {
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
544 }
545
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
551
552 static struct attribute *rbd_bus_attrs[] = {
553 &bus_attr_add.attr,
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
558 NULL,
559 };
560
rbd_bus_is_visible(struct kobject * kobj,struct attribute * attr,int index)561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
563 {
564 if (!single_major &&
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
567 return 0;
568
569 return attr->mode;
570 }
571
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
575 };
576 __ATTRIBUTE_GROUPS(rbd_bus);
577
578 static struct bus_type rbd_bus_type = {
579 .name = "rbd",
580 .bus_groups = rbd_bus_groups,
581 };
582
rbd_root_dev_release(struct device * dev)583 static void rbd_root_dev_release(struct device *dev)
584 {
585 }
586
587 static struct device rbd_root_dev = {
588 .init_name = "rbd",
589 .release = rbd_root_dev_release,
590 };
591
592 static __printf(2, 3)
rbd_warn(struct rbd_device * rbd_dev,const char * fmt,...)593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
594 {
595 struct va_format vaf;
596 va_list args;
597
598 va_start(args, fmt);
599 vaf.fmt = fmt;
600 vaf.va = &args;
601
602 if (!rbd_dev)
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
613 else /* punt */
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
616 va_end(args);
617 }
618
619 #ifdef RBD_DEBUG
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
623 "at line %d:\n\n" \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
626 BUG(); \
627 }
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
631
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
633
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
636 struct rbd_image_header *header);
637 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
638 u64 snap_id);
639 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
640 u8 *order, u64 *snap_size);
641 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
642
643 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
644 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
645
646 /*
647 * Return true if nothing else is pending.
648 */
pending_result_dec(struct pending_result * pending,int * result)649 static bool pending_result_dec(struct pending_result *pending, int *result)
650 {
651 rbd_assert(pending->num_pending > 0);
652
653 if (*result && !pending->result)
654 pending->result = *result;
655 if (--pending->num_pending)
656 return false;
657
658 *result = pending->result;
659 return true;
660 }
661
rbd_open(struct block_device * bdev,fmode_t mode)662 static int rbd_open(struct block_device *bdev, fmode_t mode)
663 {
664 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
665 bool removing = false;
666
667 spin_lock_irq(&rbd_dev->lock);
668 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
669 removing = true;
670 else
671 rbd_dev->open_count++;
672 spin_unlock_irq(&rbd_dev->lock);
673 if (removing)
674 return -ENOENT;
675
676 (void) get_device(&rbd_dev->dev);
677
678 return 0;
679 }
680
rbd_release(struct gendisk * disk,fmode_t mode)681 static void rbd_release(struct gendisk *disk, fmode_t mode)
682 {
683 struct rbd_device *rbd_dev = disk->private_data;
684 unsigned long open_count_before;
685
686 spin_lock_irq(&rbd_dev->lock);
687 open_count_before = rbd_dev->open_count--;
688 spin_unlock_irq(&rbd_dev->lock);
689 rbd_assert(open_count_before > 0);
690
691 put_device(&rbd_dev->dev);
692 }
693
rbd_ioctl_set_ro(struct rbd_device * rbd_dev,unsigned long arg)694 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
695 {
696 int ro;
697
698 if (get_user(ro, (int __user *)arg))
699 return -EFAULT;
700
701 /*
702 * Both images mapped read-only and snapshots can't be marked
703 * read-write.
704 */
705 if (!ro) {
706 if (rbd_is_ro(rbd_dev))
707 return -EROFS;
708
709 rbd_assert(!rbd_is_snap(rbd_dev));
710 }
711
712 /* Let blkdev_roset() handle it */
713 return -ENOTTY;
714 }
715
rbd_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)716 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
717 unsigned int cmd, unsigned long arg)
718 {
719 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
720 int ret;
721
722 switch (cmd) {
723 case BLKROSET:
724 ret = rbd_ioctl_set_ro(rbd_dev, arg);
725 break;
726 default:
727 ret = -ENOTTY;
728 }
729
730 return ret;
731 }
732
733 #ifdef CONFIG_COMPAT
rbd_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)734 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
735 unsigned int cmd, unsigned long arg)
736 {
737 return rbd_ioctl(bdev, mode, cmd, arg);
738 }
739 #endif /* CONFIG_COMPAT */
740
741 static const struct block_device_operations rbd_bd_ops = {
742 .owner = THIS_MODULE,
743 .open = rbd_open,
744 .release = rbd_release,
745 .ioctl = rbd_ioctl,
746 #ifdef CONFIG_COMPAT
747 .compat_ioctl = rbd_compat_ioctl,
748 #endif
749 };
750
751 /*
752 * Initialize an rbd client instance. Success or not, this function
753 * consumes ceph_opts. Caller holds client_mutex.
754 */
rbd_client_create(struct ceph_options * ceph_opts)755 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
756 {
757 struct rbd_client *rbdc;
758 int ret = -ENOMEM;
759
760 dout("%s:\n", __func__);
761 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
762 if (!rbdc)
763 goto out_opt;
764
765 kref_init(&rbdc->kref);
766 INIT_LIST_HEAD(&rbdc->node);
767
768 rbdc->client = ceph_create_client(ceph_opts, rbdc);
769 if (IS_ERR(rbdc->client))
770 goto out_rbdc;
771 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
772
773 ret = ceph_open_session(rbdc->client);
774 if (ret < 0)
775 goto out_client;
776
777 spin_lock(&rbd_client_list_lock);
778 list_add_tail(&rbdc->node, &rbd_client_list);
779 spin_unlock(&rbd_client_list_lock);
780
781 dout("%s: rbdc %p\n", __func__, rbdc);
782
783 return rbdc;
784 out_client:
785 ceph_destroy_client(rbdc->client);
786 out_rbdc:
787 kfree(rbdc);
788 out_opt:
789 if (ceph_opts)
790 ceph_destroy_options(ceph_opts);
791 dout("%s: error %d\n", __func__, ret);
792
793 return ERR_PTR(ret);
794 }
795
__rbd_get_client(struct rbd_client * rbdc)796 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
797 {
798 kref_get(&rbdc->kref);
799
800 return rbdc;
801 }
802
803 /*
804 * Find a ceph client with specific addr and configuration. If
805 * found, bump its reference count.
806 */
rbd_client_find(struct ceph_options * ceph_opts)807 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
808 {
809 struct rbd_client *client_node;
810 bool found = false;
811
812 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
813 return NULL;
814
815 spin_lock(&rbd_client_list_lock);
816 list_for_each_entry(client_node, &rbd_client_list, node) {
817 if (!ceph_compare_options(ceph_opts, client_node->client)) {
818 __rbd_get_client(client_node);
819
820 found = true;
821 break;
822 }
823 }
824 spin_unlock(&rbd_client_list_lock);
825
826 return found ? client_node : NULL;
827 }
828
829 /*
830 * (Per device) rbd map options
831 */
832 enum {
833 Opt_queue_depth,
834 Opt_alloc_size,
835 Opt_lock_timeout,
836 /* int args above */
837 Opt_pool_ns,
838 Opt_compression_hint,
839 /* string args above */
840 Opt_read_only,
841 Opt_read_write,
842 Opt_lock_on_read,
843 Opt_exclusive,
844 Opt_notrim,
845 };
846
847 enum {
848 Opt_compression_hint_none,
849 Opt_compression_hint_compressible,
850 Opt_compression_hint_incompressible,
851 };
852
853 static const struct constant_table rbd_param_compression_hint[] = {
854 {"none", Opt_compression_hint_none},
855 {"compressible", Opt_compression_hint_compressible},
856 {"incompressible", Opt_compression_hint_incompressible},
857 {}
858 };
859
860 static const struct fs_parameter_spec rbd_parameters[] = {
861 fsparam_u32 ("alloc_size", Opt_alloc_size),
862 fsparam_enum ("compression_hint", Opt_compression_hint,
863 rbd_param_compression_hint),
864 fsparam_flag ("exclusive", Opt_exclusive),
865 fsparam_flag ("lock_on_read", Opt_lock_on_read),
866 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
867 fsparam_flag ("notrim", Opt_notrim),
868 fsparam_string ("_pool_ns", Opt_pool_ns),
869 fsparam_u32 ("queue_depth", Opt_queue_depth),
870 fsparam_flag ("read_only", Opt_read_only),
871 fsparam_flag ("read_write", Opt_read_write),
872 fsparam_flag ("ro", Opt_read_only),
873 fsparam_flag ("rw", Opt_read_write),
874 {}
875 };
876
877 struct rbd_options {
878 int queue_depth;
879 int alloc_size;
880 unsigned long lock_timeout;
881 bool read_only;
882 bool lock_on_read;
883 bool exclusive;
884 bool trim;
885
886 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
887 };
888
889 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
890 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
891 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
892 #define RBD_READ_ONLY_DEFAULT false
893 #define RBD_LOCK_ON_READ_DEFAULT false
894 #define RBD_EXCLUSIVE_DEFAULT false
895 #define RBD_TRIM_DEFAULT true
896
897 struct rbd_parse_opts_ctx {
898 struct rbd_spec *spec;
899 struct ceph_options *copts;
900 struct rbd_options *opts;
901 };
902
obj_op_name(enum obj_operation_type op_type)903 static char* obj_op_name(enum obj_operation_type op_type)
904 {
905 switch (op_type) {
906 case OBJ_OP_READ:
907 return "read";
908 case OBJ_OP_WRITE:
909 return "write";
910 case OBJ_OP_DISCARD:
911 return "discard";
912 case OBJ_OP_ZEROOUT:
913 return "zeroout";
914 default:
915 return "???";
916 }
917 }
918
919 /*
920 * Destroy ceph client
921 *
922 * Caller must hold rbd_client_list_lock.
923 */
rbd_client_release(struct kref * kref)924 static void rbd_client_release(struct kref *kref)
925 {
926 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
927
928 dout("%s: rbdc %p\n", __func__, rbdc);
929 spin_lock(&rbd_client_list_lock);
930 list_del(&rbdc->node);
931 spin_unlock(&rbd_client_list_lock);
932
933 ceph_destroy_client(rbdc->client);
934 kfree(rbdc);
935 }
936
937 /*
938 * Drop reference to ceph client node. If it's not referenced anymore, release
939 * it.
940 */
rbd_put_client(struct rbd_client * rbdc)941 static void rbd_put_client(struct rbd_client *rbdc)
942 {
943 if (rbdc)
944 kref_put(&rbdc->kref, rbd_client_release);
945 }
946
947 /*
948 * Get a ceph client with specific addr and configuration, if one does
949 * not exist create it. Either way, ceph_opts is consumed by this
950 * function.
951 */
rbd_get_client(struct ceph_options * ceph_opts)952 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
953 {
954 struct rbd_client *rbdc;
955 int ret;
956
957 mutex_lock(&client_mutex);
958 rbdc = rbd_client_find(ceph_opts);
959 if (rbdc) {
960 ceph_destroy_options(ceph_opts);
961
962 /*
963 * Using an existing client. Make sure ->pg_pools is up to
964 * date before we look up the pool id in do_rbd_add().
965 */
966 ret = ceph_wait_for_latest_osdmap(rbdc->client,
967 rbdc->client->options->mount_timeout);
968 if (ret) {
969 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
970 rbd_put_client(rbdc);
971 rbdc = ERR_PTR(ret);
972 }
973 } else {
974 rbdc = rbd_client_create(ceph_opts);
975 }
976 mutex_unlock(&client_mutex);
977
978 return rbdc;
979 }
980
rbd_image_format_valid(u32 image_format)981 static bool rbd_image_format_valid(u32 image_format)
982 {
983 return image_format == 1 || image_format == 2;
984 }
985
rbd_dev_ondisk_valid(struct rbd_image_header_ondisk * ondisk)986 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
987 {
988 size_t size;
989 u32 snap_count;
990
991 /* The header has to start with the magic rbd header text */
992 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
993 return false;
994
995 /* The bio layer requires at least sector-sized I/O */
996
997 if (ondisk->options.order < SECTOR_SHIFT)
998 return false;
999
1000 /* If we use u64 in a few spots we may be able to loosen this */
1001
1002 if (ondisk->options.order > 8 * sizeof (int) - 1)
1003 return false;
1004
1005 /*
1006 * The size of a snapshot header has to fit in a size_t, and
1007 * that limits the number of snapshots.
1008 */
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1011 if (snap_count > size / sizeof (__le64))
1012 return false;
1013
1014 /*
1015 * Not only that, but the size of the entire the snapshot
1016 * header must also be representable in a size_t.
1017 */
1018 size -= snap_count * sizeof (__le64);
1019 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1020 return false;
1021
1022 return true;
1023 }
1024
1025 /*
1026 * returns the size of an object in the image
1027 */
rbd_obj_bytes(struct rbd_image_header * header)1028 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1029 {
1030 return 1U << header->obj_order;
1031 }
1032
rbd_init_layout(struct rbd_device * rbd_dev)1033 static void rbd_init_layout(struct rbd_device *rbd_dev)
1034 {
1035 if (rbd_dev->header.stripe_unit == 0 ||
1036 rbd_dev->header.stripe_count == 0) {
1037 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1038 rbd_dev->header.stripe_count = 1;
1039 }
1040
1041 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1042 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1043 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1044 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1045 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1046 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1047 }
1048
rbd_image_header_cleanup(struct rbd_image_header * header)1049 static void rbd_image_header_cleanup(struct rbd_image_header *header)
1050 {
1051 kfree(header->object_prefix);
1052 ceph_put_snap_context(header->snapc);
1053 kfree(header->snap_sizes);
1054 kfree(header->snap_names);
1055
1056 memset(header, 0, sizeof(*header));
1057 }
1058
1059 /*
1060 * Fill an rbd image header with information from the given format 1
1061 * on-disk header.
1062 */
rbd_header_from_disk(struct rbd_image_header * header,struct rbd_image_header_ondisk * ondisk,bool first_time)1063 static int rbd_header_from_disk(struct rbd_image_header *header,
1064 struct rbd_image_header_ondisk *ondisk,
1065 bool first_time)
1066 {
1067 struct ceph_snap_context *snapc;
1068 char *object_prefix = NULL;
1069 char *snap_names = NULL;
1070 u64 *snap_sizes = NULL;
1071 u32 snap_count;
1072 int ret = -ENOMEM;
1073 u32 i;
1074
1075 /* Allocate this now to avoid having to handle failure below */
1076
1077 if (first_time) {
1078 object_prefix = kstrndup(ondisk->object_prefix,
1079 sizeof(ondisk->object_prefix),
1080 GFP_KERNEL);
1081 if (!object_prefix)
1082 return -ENOMEM;
1083 }
1084
1085 /* Allocate the snapshot context and fill it in */
1086
1087 snap_count = le32_to_cpu(ondisk->snap_count);
1088 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1089 if (!snapc)
1090 goto out_err;
1091 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1092 if (snap_count) {
1093 struct rbd_image_snap_ondisk *snaps;
1094 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1095
1096 /* We'll keep a copy of the snapshot names... */
1097
1098 if (snap_names_len > (u64)SIZE_MAX)
1099 goto out_2big;
1100 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1101 if (!snap_names)
1102 goto out_err;
1103
1104 /* ...as well as the array of their sizes. */
1105 snap_sizes = kmalloc_array(snap_count,
1106 sizeof(*header->snap_sizes),
1107 GFP_KERNEL);
1108 if (!snap_sizes)
1109 goto out_err;
1110
1111 /*
1112 * Copy the names, and fill in each snapshot's id
1113 * and size.
1114 *
1115 * Note that rbd_dev_v1_header_info() guarantees the
1116 * ondisk buffer we're working with has
1117 * snap_names_len bytes beyond the end of the
1118 * snapshot id array, this memcpy() is safe.
1119 */
1120 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1121 snaps = ondisk->snaps;
1122 for (i = 0; i < snap_count; i++) {
1123 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1124 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1125 }
1126 }
1127
1128 /* We won't fail any more, fill in the header */
1129
1130 if (first_time) {
1131 header->object_prefix = object_prefix;
1132 header->obj_order = ondisk->options.order;
1133 }
1134
1135 /* The remaining fields always get updated (when we refresh) */
1136
1137 header->image_size = le64_to_cpu(ondisk->image_size);
1138 header->snapc = snapc;
1139 header->snap_names = snap_names;
1140 header->snap_sizes = snap_sizes;
1141
1142 return 0;
1143 out_2big:
1144 ret = -EIO;
1145 out_err:
1146 kfree(snap_sizes);
1147 kfree(snap_names);
1148 ceph_put_snap_context(snapc);
1149 kfree(object_prefix);
1150
1151 return ret;
1152 }
1153
_rbd_dev_v1_snap_name(struct rbd_device * rbd_dev,u32 which)1154 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1155 {
1156 const char *snap_name;
1157
1158 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1159
1160 /* Skip over names until we find the one we are looking for */
1161
1162 snap_name = rbd_dev->header.snap_names;
1163 while (which--)
1164 snap_name += strlen(snap_name) + 1;
1165
1166 return kstrdup(snap_name, GFP_KERNEL);
1167 }
1168
1169 /*
1170 * Snapshot id comparison function for use with qsort()/bsearch().
1171 * Note that result is for snapshots in *descending* order.
1172 */
snapid_compare_reverse(const void * s1,const void * s2)1173 static int snapid_compare_reverse(const void *s1, const void *s2)
1174 {
1175 u64 snap_id1 = *(u64 *)s1;
1176 u64 snap_id2 = *(u64 *)s2;
1177
1178 if (snap_id1 < snap_id2)
1179 return 1;
1180 return snap_id1 == snap_id2 ? 0 : -1;
1181 }
1182
1183 /*
1184 * Search a snapshot context to see if the given snapshot id is
1185 * present.
1186 *
1187 * Returns the position of the snapshot id in the array if it's found,
1188 * or BAD_SNAP_INDEX otherwise.
1189 *
1190 * Note: The snapshot array is in kept sorted (by the osd) in
1191 * reverse order, highest snapshot id first.
1192 */
rbd_dev_snap_index(struct rbd_device * rbd_dev,u64 snap_id)1193 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1194 {
1195 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1196 u64 *found;
1197
1198 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1199 sizeof (snap_id), snapid_compare_reverse);
1200
1201 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1202 }
1203
rbd_dev_v1_snap_name(struct rbd_device * rbd_dev,u64 snap_id)1204 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1205 u64 snap_id)
1206 {
1207 u32 which;
1208 const char *snap_name;
1209
1210 which = rbd_dev_snap_index(rbd_dev, snap_id);
1211 if (which == BAD_SNAP_INDEX)
1212 return ERR_PTR(-ENOENT);
1213
1214 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1215 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1216 }
1217
rbd_snap_name(struct rbd_device * rbd_dev,u64 snap_id)1218 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1219 {
1220 if (snap_id == CEPH_NOSNAP)
1221 return RBD_SNAP_HEAD_NAME;
1222
1223 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1224 if (rbd_dev->image_format == 1)
1225 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1226
1227 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1228 }
1229
rbd_snap_size(struct rbd_device * rbd_dev,u64 snap_id,u64 * snap_size)1230 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1231 u64 *snap_size)
1232 {
1233 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1234 if (snap_id == CEPH_NOSNAP) {
1235 *snap_size = rbd_dev->header.image_size;
1236 } else if (rbd_dev->image_format == 1) {
1237 u32 which;
1238
1239 which = rbd_dev_snap_index(rbd_dev, snap_id);
1240 if (which == BAD_SNAP_INDEX)
1241 return -ENOENT;
1242
1243 *snap_size = rbd_dev->header.snap_sizes[which];
1244 } else {
1245 u64 size = 0;
1246 int ret;
1247
1248 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1249 if (ret)
1250 return ret;
1251
1252 *snap_size = size;
1253 }
1254 return 0;
1255 }
1256
rbd_dev_mapping_set(struct rbd_device * rbd_dev)1257 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1258 {
1259 u64 snap_id = rbd_dev->spec->snap_id;
1260 u64 size = 0;
1261 int ret;
1262
1263 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1264 if (ret)
1265 return ret;
1266
1267 rbd_dev->mapping.size = size;
1268 return 0;
1269 }
1270
rbd_dev_mapping_clear(struct rbd_device * rbd_dev)1271 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1272 {
1273 rbd_dev->mapping.size = 0;
1274 }
1275
zero_bvec(struct bio_vec * bv)1276 static void zero_bvec(struct bio_vec *bv)
1277 {
1278 void *buf;
1279 unsigned long flags;
1280
1281 buf = bvec_kmap_irq(bv, &flags);
1282 memset(buf, 0, bv->bv_len);
1283 flush_dcache_page(bv->bv_page);
1284 bvec_kunmap_irq(buf, &flags);
1285 }
1286
zero_bios(struct ceph_bio_iter * bio_pos,u32 off,u32 bytes)1287 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1288 {
1289 struct ceph_bio_iter it = *bio_pos;
1290
1291 ceph_bio_iter_advance(&it, off);
1292 ceph_bio_iter_advance_step(&it, bytes, ({
1293 zero_bvec(&bv);
1294 }));
1295 }
1296
zero_bvecs(struct ceph_bvec_iter * bvec_pos,u32 off,u32 bytes)1297 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1298 {
1299 struct ceph_bvec_iter it = *bvec_pos;
1300
1301 ceph_bvec_iter_advance(&it, off);
1302 ceph_bvec_iter_advance_step(&it, bytes, ({
1303 zero_bvec(&bv);
1304 }));
1305 }
1306
1307 /*
1308 * Zero a range in @obj_req data buffer defined by a bio (list) or
1309 * (private) bio_vec array.
1310 *
1311 * @off is relative to the start of the data buffer.
1312 */
rbd_obj_zero_range(struct rbd_obj_request * obj_req,u32 off,u32 bytes)1313 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1314 u32 bytes)
1315 {
1316 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1317
1318 switch (obj_req->img_request->data_type) {
1319 case OBJ_REQUEST_BIO:
1320 zero_bios(&obj_req->bio_pos, off, bytes);
1321 break;
1322 case OBJ_REQUEST_BVECS:
1323 case OBJ_REQUEST_OWN_BVECS:
1324 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1325 break;
1326 default:
1327 BUG();
1328 }
1329 }
1330
1331 static void rbd_obj_request_destroy(struct kref *kref);
rbd_obj_request_put(struct rbd_obj_request * obj_request)1332 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1333 {
1334 rbd_assert(obj_request != NULL);
1335 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1336 kref_read(&obj_request->kref));
1337 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1338 }
1339
rbd_img_obj_request_add(struct rbd_img_request * img_request,struct rbd_obj_request * obj_request)1340 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1341 struct rbd_obj_request *obj_request)
1342 {
1343 rbd_assert(obj_request->img_request == NULL);
1344
1345 /* Image request now owns object's original reference */
1346 obj_request->img_request = img_request;
1347 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1348 }
1349
rbd_img_obj_request_del(struct rbd_img_request * img_request,struct rbd_obj_request * obj_request)1350 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1351 struct rbd_obj_request *obj_request)
1352 {
1353 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1354 list_del(&obj_request->ex.oe_item);
1355 rbd_assert(obj_request->img_request == img_request);
1356 rbd_obj_request_put(obj_request);
1357 }
1358
rbd_osd_submit(struct ceph_osd_request * osd_req)1359 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1360 {
1361 struct rbd_obj_request *obj_req = osd_req->r_priv;
1362
1363 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1364 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1365 obj_req->ex.oe_off, obj_req->ex.oe_len);
1366 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1367 }
1368
1369 /*
1370 * The default/initial value for all image request flags is 0. Each
1371 * is conditionally set to 1 at image request initialization time
1372 * and currently never change thereafter.
1373 */
img_request_layered_set(struct rbd_img_request * img_request)1374 static void img_request_layered_set(struct rbd_img_request *img_request)
1375 {
1376 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1377 }
1378
img_request_layered_test(struct rbd_img_request * img_request)1379 static bool img_request_layered_test(struct rbd_img_request *img_request)
1380 {
1381 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1382 }
1383
rbd_obj_is_entire(struct rbd_obj_request * obj_req)1384 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1385 {
1386 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1387
1388 return !obj_req->ex.oe_off &&
1389 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1390 }
1391
rbd_obj_is_tail(struct rbd_obj_request * obj_req)1392 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1393 {
1394 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1395
1396 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1397 rbd_dev->layout.object_size;
1398 }
1399
1400 /*
1401 * Must be called after rbd_obj_calc_img_extents().
1402 */
rbd_obj_set_copyup_enabled(struct rbd_obj_request * obj_req)1403 static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
1404 {
1405 rbd_assert(obj_req->img_request->snapc);
1406
1407 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1408 dout("%s %p objno %llu discard\n", __func__, obj_req,
1409 obj_req->ex.oe_objno);
1410 return;
1411 }
1412
1413 if (!obj_req->num_img_extents) {
1414 dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1415 obj_req->ex.oe_objno);
1416 return;
1417 }
1418
1419 if (rbd_obj_is_entire(obj_req) &&
1420 !obj_req->img_request->snapc->num_snaps) {
1421 dout("%s %p objno %llu entire\n", __func__, obj_req,
1422 obj_req->ex.oe_objno);
1423 return;
1424 }
1425
1426 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
1427 }
1428
rbd_obj_img_extents_bytes(struct rbd_obj_request * obj_req)1429 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1430 {
1431 return ceph_file_extents_bytes(obj_req->img_extents,
1432 obj_req->num_img_extents);
1433 }
1434
rbd_img_is_write(struct rbd_img_request * img_req)1435 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1436 {
1437 switch (img_req->op_type) {
1438 case OBJ_OP_READ:
1439 return false;
1440 case OBJ_OP_WRITE:
1441 case OBJ_OP_DISCARD:
1442 case OBJ_OP_ZEROOUT:
1443 return true;
1444 default:
1445 BUG();
1446 }
1447 }
1448
rbd_osd_req_callback(struct ceph_osd_request * osd_req)1449 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1450 {
1451 struct rbd_obj_request *obj_req = osd_req->r_priv;
1452 int result;
1453
1454 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1455 osd_req->r_result, obj_req);
1456
1457 /*
1458 * Writes aren't allowed to return a data payload. In some
1459 * guarded write cases (e.g. stat + zero on an empty object)
1460 * a stat response makes it through, but we don't care.
1461 */
1462 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1463 result = 0;
1464 else
1465 result = osd_req->r_result;
1466
1467 rbd_obj_handle_request(obj_req, result);
1468 }
1469
rbd_osd_format_read(struct ceph_osd_request * osd_req)1470 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1471 {
1472 struct rbd_obj_request *obj_request = osd_req->r_priv;
1473 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1474 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1475
1476 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1477 osd_req->r_snapid = obj_request->img_request->snap_id;
1478 }
1479
rbd_osd_format_write(struct ceph_osd_request * osd_req)1480 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1481 {
1482 struct rbd_obj_request *obj_request = osd_req->r_priv;
1483
1484 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1485 ktime_get_real_ts64(&osd_req->r_mtime);
1486 osd_req->r_data_offset = obj_request->ex.oe_off;
1487 }
1488
1489 static struct ceph_osd_request *
__rbd_obj_add_osd_request(struct rbd_obj_request * obj_req,struct ceph_snap_context * snapc,int num_ops)1490 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1491 struct ceph_snap_context *snapc, int num_ops)
1492 {
1493 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1494 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1495 struct ceph_osd_request *req;
1496 const char *name_format = rbd_dev->image_format == 1 ?
1497 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1498 int ret;
1499
1500 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1501 if (!req)
1502 return ERR_PTR(-ENOMEM);
1503
1504 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1505 req->r_callback = rbd_osd_req_callback;
1506 req->r_priv = obj_req;
1507
1508 /*
1509 * Data objects may be stored in a separate pool, but always in
1510 * the same namespace in that pool as the header in its pool.
1511 */
1512 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1513 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1514
1515 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1516 rbd_dev->header.object_prefix,
1517 obj_req->ex.oe_objno);
1518 if (ret)
1519 return ERR_PTR(ret);
1520
1521 return req;
1522 }
1523
1524 static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request * obj_req,int num_ops)1525 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1526 {
1527 rbd_assert(obj_req->img_request->snapc);
1528 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1529 num_ops);
1530 }
1531
rbd_obj_request_create(void)1532 static struct rbd_obj_request *rbd_obj_request_create(void)
1533 {
1534 struct rbd_obj_request *obj_request;
1535
1536 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1537 if (!obj_request)
1538 return NULL;
1539
1540 ceph_object_extent_init(&obj_request->ex);
1541 INIT_LIST_HEAD(&obj_request->osd_reqs);
1542 mutex_init(&obj_request->state_mutex);
1543 kref_init(&obj_request->kref);
1544
1545 dout("%s %p\n", __func__, obj_request);
1546 return obj_request;
1547 }
1548
rbd_obj_request_destroy(struct kref * kref)1549 static void rbd_obj_request_destroy(struct kref *kref)
1550 {
1551 struct rbd_obj_request *obj_request;
1552 struct ceph_osd_request *osd_req;
1553 u32 i;
1554
1555 obj_request = container_of(kref, struct rbd_obj_request, kref);
1556
1557 dout("%s: obj %p\n", __func__, obj_request);
1558
1559 while (!list_empty(&obj_request->osd_reqs)) {
1560 osd_req = list_first_entry(&obj_request->osd_reqs,
1561 struct ceph_osd_request, r_private_item);
1562 list_del_init(&osd_req->r_private_item);
1563 ceph_osdc_put_request(osd_req);
1564 }
1565
1566 switch (obj_request->img_request->data_type) {
1567 case OBJ_REQUEST_NODATA:
1568 case OBJ_REQUEST_BIO:
1569 case OBJ_REQUEST_BVECS:
1570 break; /* Nothing to do */
1571 case OBJ_REQUEST_OWN_BVECS:
1572 kfree(obj_request->bvec_pos.bvecs);
1573 break;
1574 default:
1575 BUG();
1576 }
1577
1578 kfree(obj_request->img_extents);
1579 if (obj_request->copyup_bvecs) {
1580 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1581 if (obj_request->copyup_bvecs[i].bv_page)
1582 __free_page(obj_request->copyup_bvecs[i].bv_page);
1583 }
1584 kfree(obj_request->copyup_bvecs);
1585 }
1586
1587 kmem_cache_free(rbd_obj_request_cache, obj_request);
1588 }
1589
1590 /* It's OK to call this for a device with no parent */
1591
1592 static void rbd_spec_put(struct rbd_spec *spec);
rbd_dev_unparent(struct rbd_device * rbd_dev)1593 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1594 {
1595 rbd_dev_remove_parent(rbd_dev);
1596 rbd_spec_put(rbd_dev->parent_spec);
1597 rbd_dev->parent_spec = NULL;
1598 rbd_dev->parent_overlap = 0;
1599 }
1600
1601 /*
1602 * Parent image reference counting is used to determine when an
1603 * image's parent fields can be safely torn down--after there are no
1604 * more in-flight requests to the parent image. When the last
1605 * reference is dropped, cleaning them up is safe.
1606 */
rbd_dev_parent_put(struct rbd_device * rbd_dev)1607 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1608 {
1609 int counter;
1610
1611 if (!rbd_dev->parent_spec)
1612 return;
1613
1614 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1615 if (counter > 0)
1616 return;
1617
1618 /* Last reference; clean up parent data structures */
1619
1620 if (!counter)
1621 rbd_dev_unparent(rbd_dev);
1622 else
1623 rbd_warn(rbd_dev, "parent reference underflow");
1624 }
1625
1626 /*
1627 * If an image has a non-zero parent overlap, get a reference to its
1628 * parent.
1629 *
1630 * Returns true if the rbd device has a parent with a non-zero
1631 * overlap and a reference for it was successfully taken, or
1632 * false otherwise.
1633 */
rbd_dev_parent_get(struct rbd_device * rbd_dev)1634 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1635 {
1636 int counter = 0;
1637
1638 if (!rbd_dev->parent_spec)
1639 return false;
1640
1641 if (rbd_dev->parent_overlap)
1642 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1643
1644 if (counter < 0)
1645 rbd_warn(rbd_dev, "parent reference overflow");
1646
1647 return counter > 0;
1648 }
1649
rbd_img_request_init(struct rbd_img_request * img_request,struct rbd_device * rbd_dev,enum obj_operation_type op_type)1650 static void rbd_img_request_init(struct rbd_img_request *img_request,
1651 struct rbd_device *rbd_dev,
1652 enum obj_operation_type op_type)
1653 {
1654 memset(img_request, 0, sizeof(*img_request));
1655
1656 img_request->rbd_dev = rbd_dev;
1657 img_request->op_type = op_type;
1658
1659 INIT_LIST_HEAD(&img_request->lock_item);
1660 INIT_LIST_HEAD(&img_request->object_extents);
1661 mutex_init(&img_request->state_mutex);
1662 }
1663
1664 /*
1665 * Only snap_id is captured here, for reads. For writes, snapshot
1666 * context is captured in rbd_img_object_requests() after exclusive
1667 * lock is ensured to be held.
1668 */
rbd_img_capture_header(struct rbd_img_request * img_req)1669 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1670 {
1671 struct rbd_device *rbd_dev = img_req->rbd_dev;
1672
1673 lockdep_assert_held(&rbd_dev->header_rwsem);
1674
1675 if (!rbd_img_is_write(img_req))
1676 img_req->snap_id = rbd_dev->spec->snap_id;
1677
1678 if (rbd_dev_parent_get(rbd_dev))
1679 img_request_layered_set(img_req);
1680 }
1681
rbd_img_request_destroy(struct rbd_img_request * img_request)1682 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1683 {
1684 struct rbd_obj_request *obj_request;
1685 struct rbd_obj_request *next_obj_request;
1686
1687 dout("%s: img %p\n", __func__, img_request);
1688
1689 WARN_ON(!list_empty(&img_request->lock_item));
1690 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1691 rbd_img_obj_request_del(img_request, obj_request);
1692
1693 if (img_request_layered_test(img_request))
1694 rbd_dev_parent_put(img_request->rbd_dev);
1695
1696 if (rbd_img_is_write(img_request))
1697 ceph_put_snap_context(img_request->snapc);
1698
1699 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1700 kmem_cache_free(rbd_img_request_cache, img_request);
1701 }
1702
1703 #define BITS_PER_OBJ 2
1704 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1705 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1706
__rbd_object_map_index(struct rbd_device * rbd_dev,u64 objno,u64 * index,u8 * shift)1707 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1708 u64 *index, u8 *shift)
1709 {
1710 u32 off;
1711
1712 rbd_assert(objno < rbd_dev->object_map_size);
1713 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1714 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1715 }
1716
__rbd_object_map_get(struct rbd_device * rbd_dev,u64 objno)1717 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1718 {
1719 u64 index;
1720 u8 shift;
1721
1722 lockdep_assert_held(&rbd_dev->object_map_lock);
1723 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1724 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1725 }
1726
__rbd_object_map_set(struct rbd_device * rbd_dev,u64 objno,u8 val)1727 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1728 {
1729 u64 index;
1730 u8 shift;
1731 u8 *p;
1732
1733 lockdep_assert_held(&rbd_dev->object_map_lock);
1734 rbd_assert(!(val & ~OBJ_MASK));
1735
1736 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1737 p = &rbd_dev->object_map[index];
1738 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1739 }
1740
rbd_object_map_get(struct rbd_device * rbd_dev,u64 objno)1741 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1742 {
1743 u8 state;
1744
1745 spin_lock(&rbd_dev->object_map_lock);
1746 state = __rbd_object_map_get(rbd_dev, objno);
1747 spin_unlock(&rbd_dev->object_map_lock);
1748 return state;
1749 }
1750
use_object_map(struct rbd_device * rbd_dev)1751 static bool use_object_map(struct rbd_device *rbd_dev)
1752 {
1753 /*
1754 * An image mapped read-only can't use the object map -- it isn't
1755 * loaded because the header lock isn't acquired. Someone else can
1756 * write to the image and update the object map behind our back.
1757 *
1758 * A snapshot can't be written to, so using the object map is always
1759 * safe.
1760 */
1761 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1762 return false;
1763
1764 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1765 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1766 }
1767
rbd_object_map_may_exist(struct rbd_device * rbd_dev,u64 objno)1768 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1769 {
1770 u8 state;
1771
1772 /* fall back to default logic if object map is disabled or invalid */
1773 if (!use_object_map(rbd_dev))
1774 return true;
1775
1776 state = rbd_object_map_get(rbd_dev, objno);
1777 return state != OBJECT_NONEXISTENT;
1778 }
1779
rbd_object_map_name(struct rbd_device * rbd_dev,u64 snap_id,struct ceph_object_id * oid)1780 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1781 struct ceph_object_id *oid)
1782 {
1783 if (snap_id == CEPH_NOSNAP)
1784 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1785 rbd_dev->spec->image_id);
1786 else
1787 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1788 rbd_dev->spec->image_id, snap_id);
1789 }
1790
rbd_object_map_lock(struct rbd_device * rbd_dev)1791 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1792 {
1793 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1794 CEPH_DEFINE_OID_ONSTACK(oid);
1795 u8 lock_type;
1796 char *lock_tag;
1797 struct ceph_locker *lockers;
1798 u32 num_lockers;
1799 bool broke_lock = false;
1800 int ret;
1801
1802 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1803
1804 again:
1805 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1806 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1807 if (ret != -EBUSY || broke_lock) {
1808 if (ret == -EEXIST)
1809 ret = 0; /* already locked by myself */
1810 if (ret)
1811 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1812 return ret;
1813 }
1814
1815 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1816 RBD_LOCK_NAME, &lock_type, &lock_tag,
1817 &lockers, &num_lockers);
1818 if (ret) {
1819 if (ret == -ENOENT)
1820 goto again;
1821
1822 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1823 return ret;
1824 }
1825
1826 kfree(lock_tag);
1827 if (num_lockers == 0)
1828 goto again;
1829
1830 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1831 ENTITY_NAME(lockers[0].id.name));
1832
1833 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1834 RBD_LOCK_NAME, lockers[0].id.cookie,
1835 &lockers[0].id.name);
1836 ceph_free_lockers(lockers, num_lockers);
1837 if (ret) {
1838 if (ret == -ENOENT)
1839 goto again;
1840
1841 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1842 return ret;
1843 }
1844
1845 broke_lock = true;
1846 goto again;
1847 }
1848
rbd_object_map_unlock(struct rbd_device * rbd_dev)1849 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1850 {
1851 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1852 CEPH_DEFINE_OID_ONSTACK(oid);
1853 int ret;
1854
1855 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1856
1857 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1858 "");
1859 if (ret && ret != -ENOENT)
1860 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1861 }
1862
decode_object_map_header(void ** p,void * end,u64 * object_map_size)1863 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1864 {
1865 u8 struct_v;
1866 u32 struct_len;
1867 u32 header_len;
1868 void *header_end;
1869 int ret;
1870
1871 ceph_decode_32_safe(p, end, header_len, e_inval);
1872 header_end = *p + header_len;
1873
1874 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1875 &struct_len);
1876 if (ret)
1877 return ret;
1878
1879 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1880
1881 *p = header_end;
1882 return 0;
1883
1884 e_inval:
1885 return -EINVAL;
1886 }
1887
__rbd_object_map_load(struct rbd_device * rbd_dev)1888 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1889 {
1890 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1891 CEPH_DEFINE_OID_ONSTACK(oid);
1892 struct page **pages;
1893 void *p, *end;
1894 size_t reply_len;
1895 u64 num_objects;
1896 u64 object_map_bytes;
1897 u64 object_map_size;
1898 int num_pages;
1899 int ret;
1900
1901 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1902
1903 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1904 rbd_dev->mapping.size);
1905 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1906 BITS_PER_BYTE);
1907 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1908 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1909 if (IS_ERR(pages))
1910 return PTR_ERR(pages);
1911
1912 reply_len = num_pages * PAGE_SIZE;
1913 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1914 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1915 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1916 NULL, 0, pages, &reply_len);
1917 if (ret)
1918 goto out;
1919
1920 p = page_address(pages[0]);
1921 end = p + min(reply_len, (size_t)PAGE_SIZE);
1922 ret = decode_object_map_header(&p, end, &object_map_size);
1923 if (ret)
1924 goto out;
1925
1926 if (object_map_size != num_objects) {
1927 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1928 object_map_size, num_objects);
1929 ret = -EINVAL;
1930 goto out;
1931 }
1932
1933 if (offset_in_page(p) + object_map_bytes > reply_len) {
1934 ret = -EINVAL;
1935 goto out;
1936 }
1937
1938 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1939 if (!rbd_dev->object_map) {
1940 ret = -ENOMEM;
1941 goto out;
1942 }
1943
1944 rbd_dev->object_map_size = object_map_size;
1945 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1946 offset_in_page(p), object_map_bytes);
1947
1948 out:
1949 ceph_release_page_vector(pages, num_pages);
1950 return ret;
1951 }
1952
rbd_object_map_free(struct rbd_device * rbd_dev)1953 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1954 {
1955 kvfree(rbd_dev->object_map);
1956 rbd_dev->object_map = NULL;
1957 rbd_dev->object_map_size = 0;
1958 }
1959
rbd_object_map_load(struct rbd_device * rbd_dev)1960 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1961 {
1962 int ret;
1963
1964 ret = __rbd_object_map_load(rbd_dev);
1965 if (ret)
1966 return ret;
1967
1968 ret = rbd_dev_v2_get_flags(rbd_dev);
1969 if (ret) {
1970 rbd_object_map_free(rbd_dev);
1971 return ret;
1972 }
1973
1974 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1975 rbd_warn(rbd_dev, "object map is invalid");
1976
1977 return 0;
1978 }
1979
rbd_object_map_open(struct rbd_device * rbd_dev)1980 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1981 {
1982 int ret;
1983
1984 ret = rbd_object_map_lock(rbd_dev);
1985 if (ret)
1986 return ret;
1987
1988 ret = rbd_object_map_load(rbd_dev);
1989 if (ret) {
1990 rbd_object_map_unlock(rbd_dev);
1991 return ret;
1992 }
1993
1994 return 0;
1995 }
1996
rbd_object_map_close(struct rbd_device * rbd_dev)1997 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1998 {
1999 rbd_object_map_free(rbd_dev);
2000 rbd_object_map_unlock(rbd_dev);
2001 }
2002
2003 /*
2004 * This function needs snap_id (or more precisely just something to
2005 * distinguish between HEAD and snapshot object maps), new_state and
2006 * current_state that were passed to rbd_object_map_update().
2007 *
2008 * To avoid allocating and stashing a context we piggyback on the OSD
2009 * request. A HEAD update has two ops (assert_locked). For new_state
2010 * and current_state we decode our own object_map_update op, encoded in
2011 * rbd_cls_object_map_update().
2012 */
rbd_object_map_update_finish(struct rbd_obj_request * obj_req,struct ceph_osd_request * osd_req)2013 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2014 struct ceph_osd_request *osd_req)
2015 {
2016 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2017 struct ceph_osd_data *osd_data;
2018 u64 objno;
2019 u8 state, new_state, current_state;
2020 bool has_current_state;
2021 void *p;
2022
2023 if (osd_req->r_result)
2024 return osd_req->r_result;
2025
2026 /*
2027 * Nothing to do for a snapshot object map.
2028 */
2029 if (osd_req->r_num_ops == 1)
2030 return 0;
2031
2032 /*
2033 * Update in-memory HEAD object map.
2034 */
2035 rbd_assert(osd_req->r_num_ops == 2);
2036 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2037 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2038
2039 p = page_address(osd_data->pages[0]);
2040 objno = ceph_decode_64(&p);
2041 rbd_assert(objno == obj_req->ex.oe_objno);
2042 rbd_assert(ceph_decode_64(&p) == objno + 1);
2043 new_state = ceph_decode_8(&p);
2044 has_current_state = ceph_decode_8(&p);
2045 if (has_current_state)
2046 current_state = ceph_decode_8(&p);
2047
2048 spin_lock(&rbd_dev->object_map_lock);
2049 state = __rbd_object_map_get(rbd_dev, objno);
2050 if (!has_current_state || current_state == state ||
2051 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2052 __rbd_object_map_set(rbd_dev, objno, new_state);
2053 spin_unlock(&rbd_dev->object_map_lock);
2054
2055 return 0;
2056 }
2057
rbd_object_map_callback(struct ceph_osd_request * osd_req)2058 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2059 {
2060 struct rbd_obj_request *obj_req = osd_req->r_priv;
2061 int result;
2062
2063 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2064 osd_req->r_result, obj_req);
2065
2066 result = rbd_object_map_update_finish(obj_req, osd_req);
2067 rbd_obj_handle_request(obj_req, result);
2068 }
2069
update_needed(struct rbd_device * rbd_dev,u64 objno,u8 new_state)2070 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2071 {
2072 u8 state = rbd_object_map_get(rbd_dev, objno);
2073
2074 if (state == new_state ||
2075 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2076 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2077 return false;
2078
2079 return true;
2080 }
2081
rbd_cls_object_map_update(struct ceph_osd_request * req,int which,u64 objno,u8 new_state,const u8 * current_state)2082 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2083 int which, u64 objno, u8 new_state,
2084 const u8 *current_state)
2085 {
2086 struct page **pages;
2087 void *p, *start;
2088 int ret;
2089
2090 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2091 if (ret)
2092 return ret;
2093
2094 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2095 if (IS_ERR(pages))
2096 return PTR_ERR(pages);
2097
2098 p = start = page_address(pages[0]);
2099 ceph_encode_64(&p, objno);
2100 ceph_encode_64(&p, objno + 1);
2101 ceph_encode_8(&p, new_state);
2102 if (current_state) {
2103 ceph_encode_8(&p, 1);
2104 ceph_encode_8(&p, *current_state);
2105 } else {
2106 ceph_encode_8(&p, 0);
2107 }
2108
2109 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2110 false, true);
2111 return 0;
2112 }
2113
2114 /*
2115 * Return:
2116 * 0 - object map update sent
2117 * 1 - object map update isn't needed
2118 * <0 - error
2119 */
rbd_object_map_update(struct rbd_obj_request * obj_req,u64 snap_id,u8 new_state,const u8 * current_state)2120 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2121 u8 new_state, const u8 *current_state)
2122 {
2123 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2124 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2125 struct ceph_osd_request *req;
2126 int num_ops = 1;
2127 int which = 0;
2128 int ret;
2129
2130 if (snap_id == CEPH_NOSNAP) {
2131 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2132 return 1;
2133
2134 num_ops++; /* assert_locked */
2135 }
2136
2137 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2138 if (!req)
2139 return -ENOMEM;
2140
2141 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2142 req->r_callback = rbd_object_map_callback;
2143 req->r_priv = obj_req;
2144
2145 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2146 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2147 req->r_flags = CEPH_OSD_FLAG_WRITE;
2148 ktime_get_real_ts64(&req->r_mtime);
2149
2150 if (snap_id == CEPH_NOSNAP) {
2151 /*
2152 * Protect against possible race conditions during lock
2153 * ownership transitions.
2154 */
2155 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2156 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2157 if (ret)
2158 return ret;
2159 }
2160
2161 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2162 new_state, current_state);
2163 if (ret)
2164 return ret;
2165
2166 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2167 if (ret)
2168 return ret;
2169
2170 ceph_osdc_start_request(osdc, req, false);
2171 return 0;
2172 }
2173
prune_extents(struct ceph_file_extent * img_extents,u32 * num_img_extents,u64 overlap)2174 static void prune_extents(struct ceph_file_extent *img_extents,
2175 u32 *num_img_extents, u64 overlap)
2176 {
2177 u32 cnt = *num_img_extents;
2178
2179 /* drop extents completely beyond the overlap */
2180 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2181 cnt--;
2182
2183 if (cnt) {
2184 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2185
2186 /* trim final overlapping extent */
2187 if (ex->fe_off + ex->fe_len > overlap)
2188 ex->fe_len = overlap - ex->fe_off;
2189 }
2190
2191 *num_img_extents = cnt;
2192 }
2193
2194 /*
2195 * Determine the byte range(s) covered by either just the object extent
2196 * or the entire object in the parent image.
2197 */
rbd_obj_calc_img_extents(struct rbd_obj_request * obj_req,bool entire)2198 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2199 bool entire)
2200 {
2201 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2202 int ret;
2203
2204 if (!rbd_dev->parent_overlap)
2205 return 0;
2206
2207 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2208 entire ? 0 : obj_req->ex.oe_off,
2209 entire ? rbd_dev->layout.object_size :
2210 obj_req->ex.oe_len,
2211 &obj_req->img_extents,
2212 &obj_req->num_img_extents);
2213 if (ret)
2214 return ret;
2215
2216 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2217 rbd_dev->parent_overlap);
2218 return 0;
2219 }
2220
rbd_osd_setup_data(struct ceph_osd_request * osd_req,int which)2221 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2222 {
2223 struct rbd_obj_request *obj_req = osd_req->r_priv;
2224
2225 switch (obj_req->img_request->data_type) {
2226 case OBJ_REQUEST_BIO:
2227 osd_req_op_extent_osd_data_bio(osd_req, which,
2228 &obj_req->bio_pos,
2229 obj_req->ex.oe_len);
2230 break;
2231 case OBJ_REQUEST_BVECS:
2232 case OBJ_REQUEST_OWN_BVECS:
2233 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2234 obj_req->ex.oe_len);
2235 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2236 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2237 &obj_req->bvec_pos);
2238 break;
2239 default:
2240 BUG();
2241 }
2242 }
2243
rbd_osd_setup_stat(struct ceph_osd_request * osd_req,int which)2244 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2245 {
2246 struct page **pages;
2247
2248 /*
2249 * The response data for a STAT call consists of:
2250 * le64 length;
2251 * struct {
2252 * le32 tv_sec;
2253 * le32 tv_nsec;
2254 * } mtime;
2255 */
2256 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2257 if (IS_ERR(pages))
2258 return PTR_ERR(pages);
2259
2260 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2261 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2262 8 + sizeof(struct ceph_timespec),
2263 0, false, true);
2264 return 0;
2265 }
2266
rbd_osd_setup_copyup(struct ceph_osd_request * osd_req,int which,u32 bytes)2267 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2268 u32 bytes)
2269 {
2270 struct rbd_obj_request *obj_req = osd_req->r_priv;
2271 int ret;
2272
2273 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2274 if (ret)
2275 return ret;
2276
2277 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2278 obj_req->copyup_bvec_count, bytes);
2279 return 0;
2280 }
2281
rbd_obj_init_read(struct rbd_obj_request * obj_req)2282 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2283 {
2284 obj_req->read_state = RBD_OBJ_READ_START;
2285 return 0;
2286 }
2287
__rbd_osd_setup_write_ops(struct ceph_osd_request * osd_req,int which)2288 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2289 int which)
2290 {
2291 struct rbd_obj_request *obj_req = osd_req->r_priv;
2292 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2293 u16 opcode;
2294
2295 if (!use_object_map(rbd_dev) ||
2296 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2297 osd_req_op_alloc_hint_init(osd_req, which++,
2298 rbd_dev->layout.object_size,
2299 rbd_dev->layout.object_size,
2300 rbd_dev->opts->alloc_hint_flags);
2301 }
2302
2303 if (rbd_obj_is_entire(obj_req))
2304 opcode = CEPH_OSD_OP_WRITEFULL;
2305 else
2306 opcode = CEPH_OSD_OP_WRITE;
2307
2308 osd_req_op_extent_init(osd_req, which, opcode,
2309 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2310 rbd_osd_setup_data(osd_req, which);
2311 }
2312
rbd_obj_init_write(struct rbd_obj_request * obj_req)2313 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2314 {
2315 int ret;
2316
2317 /* reverse map the entire object onto the parent */
2318 ret = rbd_obj_calc_img_extents(obj_req, true);
2319 if (ret)
2320 return ret;
2321
2322 obj_req->write_state = RBD_OBJ_WRITE_START;
2323 return 0;
2324 }
2325
truncate_or_zero_opcode(struct rbd_obj_request * obj_req)2326 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2327 {
2328 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2329 CEPH_OSD_OP_ZERO;
2330 }
2331
__rbd_osd_setup_discard_ops(struct ceph_osd_request * osd_req,int which)2332 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2333 int which)
2334 {
2335 struct rbd_obj_request *obj_req = osd_req->r_priv;
2336
2337 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2338 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2339 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2340 } else {
2341 osd_req_op_extent_init(osd_req, which,
2342 truncate_or_zero_opcode(obj_req),
2343 obj_req->ex.oe_off, obj_req->ex.oe_len,
2344 0, 0);
2345 }
2346 }
2347
rbd_obj_init_discard(struct rbd_obj_request * obj_req)2348 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2349 {
2350 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2351 u64 off, next_off;
2352 int ret;
2353
2354 /*
2355 * Align the range to alloc_size boundary and punt on discards
2356 * that are too small to free up any space.
2357 *
2358 * alloc_size == object_size && is_tail() is a special case for
2359 * filestore with filestore_punch_hole = false, needed to allow
2360 * truncate (in addition to delete).
2361 */
2362 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2363 !rbd_obj_is_tail(obj_req)) {
2364 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2365 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2366 rbd_dev->opts->alloc_size);
2367 if (off >= next_off)
2368 return 1;
2369
2370 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2371 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2372 off, next_off - off);
2373 obj_req->ex.oe_off = off;
2374 obj_req->ex.oe_len = next_off - off;
2375 }
2376
2377 /* reverse map the entire object onto the parent */
2378 ret = rbd_obj_calc_img_extents(obj_req, true);
2379 if (ret)
2380 return ret;
2381
2382 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2383 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2384 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2385
2386 obj_req->write_state = RBD_OBJ_WRITE_START;
2387 return 0;
2388 }
2389
__rbd_osd_setup_zeroout_ops(struct ceph_osd_request * osd_req,int which)2390 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2391 int which)
2392 {
2393 struct rbd_obj_request *obj_req = osd_req->r_priv;
2394 u16 opcode;
2395
2396 if (rbd_obj_is_entire(obj_req)) {
2397 if (obj_req->num_img_extents) {
2398 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2399 osd_req_op_init(osd_req, which++,
2400 CEPH_OSD_OP_CREATE, 0);
2401 opcode = CEPH_OSD_OP_TRUNCATE;
2402 } else {
2403 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2404 osd_req_op_init(osd_req, which++,
2405 CEPH_OSD_OP_DELETE, 0);
2406 opcode = 0;
2407 }
2408 } else {
2409 opcode = truncate_or_zero_opcode(obj_req);
2410 }
2411
2412 if (opcode)
2413 osd_req_op_extent_init(osd_req, which, opcode,
2414 obj_req->ex.oe_off, obj_req->ex.oe_len,
2415 0, 0);
2416 }
2417
rbd_obj_init_zeroout(struct rbd_obj_request * obj_req)2418 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2419 {
2420 int ret;
2421
2422 /* reverse map the entire object onto the parent */
2423 ret = rbd_obj_calc_img_extents(obj_req, true);
2424 if (ret)
2425 return ret;
2426
2427 if (!obj_req->num_img_extents) {
2428 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2429 if (rbd_obj_is_entire(obj_req))
2430 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2431 }
2432
2433 obj_req->write_state = RBD_OBJ_WRITE_START;
2434 return 0;
2435 }
2436
count_write_ops(struct rbd_obj_request * obj_req)2437 static int count_write_ops(struct rbd_obj_request *obj_req)
2438 {
2439 struct rbd_img_request *img_req = obj_req->img_request;
2440
2441 switch (img_req->op_type) {
2442 case OBJ_OP_WRITE:
2443 if (!use_object_map(img_req->rbd_dev) ||
2444 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2445 return 2; /* setallochint + write/writefull */
2446
2447 return 1; /* write/writefull */
2448 case OBJ_OP_DISCARD:
2449 return 1; /* delete/truncate/zero */
2450 case OBJ_OP_ZEROOUT:
2451 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2452 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2453 return 2; /* create + truncate */
2454
2455 return 1; /* delete/truncate/zero */
2456 default:
2457 BUG();
2458 }
2459 }
2460
rbd_osd_setup_write_ops(struct ceph_osd_request * osd_req,int which)2461 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2462 int which)
2463 {
2464 struct rbd_obj_request *obj_req = osd_req->r_priv;
2465
2466 switch (obj_req->img_request->op_type) {
2467 case OBJ_OP_WRITE:
2468 __rbd_osd_setup_write_ops(osd_req, which);
2469 break;
2470 case OBJ_OP_DISCARD:
2471 __rbd_osd_setup_discard_ops(osd_req, which);
2472 break;
2473 case OBJ_OP_ZEROOUT:
2474 __rbd_osd_setup_zeroout_ops(osd_req, which);
2475 break;
2476 default:
2477 BUG();
2478 }
2479 }
2480
2481 /*
2482 * Prune the list of object requests (adjust offset and/or length, drop
2483 * redundant requests). Prepare object request state machines and image
2484 * request state machine for execution.
2485 */
__rbd_img_fill_request(struct rbd_img_request * img_req)2486 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2487 {
2488 struct rbd_obj_request *obj_req, *next_obj_req;
2489 int ret;
2490
2491 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2492 switch (img_req->op_type) {
2493 case OBJ_OP_READ:
2494 ret = rbd_obj_init_read(obj_req);
2495 break;
2496 case OBJ_OP_WRITE:
2497 ret = rbd_obj_init_write(obj_req);
2498 break;
2499 case OBJ_OP_DISCARD:
2500 ret = rbd_obj_init_discard(obj_req);
2501 break;
2502 case OBJ_OP_ZEROOUT:
2503 ret = rbd_obj_init_zeroout(obj_req);
2504 break;
2505 default:
2506 BUG();
2507 }
2508 if (ret < 0)
2509 return ret;
2510 if (ret > 0) {
2511 rbd_img_obj_request_del(img_req, obj_req);
2512 continue;
2513 }
2514 }
2515
2516 img_req->state = RBD_IMG_START;
2517 return 0;
2518 }
2519
2520 union rbd_img_fill_iter {
2521 struct ceph_bio_iter bio_iter;
2522 struct ceph_bvec_iter bvec_iter;
2523 };
2524
2525 struct rbd_img_fill_ctx {
2526 enum obj_request_type pos_type;
2527 union rbd_img_fill_iter *pos;
2528 union rbd_img_fill_iter iter;
2529 ceph_object_extent_fn_t set_pos_fn;
2530 ceph_object_extent_fn_t count_fn;
2531 ceph_object_extent_fn_t copy_fn;
2532 };
2533
alloc_object_extent(void * arg)2534 static struct ceph_object_extent *alloc_object_extent(void *arg)
2535 {
2536 struct rbd_img_request *img_req = arg;
2537 struct rbd_obj_request *obj_req;
2538
2539 obj_req = rbd_obj_request_create();
2540 if (!obj_req)
2541 return NULL;
2542
2543 rbd_img_obj_request_add(img_req, obj_req);
2544 return &obj_req->ex;
2545 }
2546
2547 /*
2548 * While su != os && sc == 1 is technically not fancy (it's the same
2549 * layout as su == os && sc == 1), we can't use the nocopy path for it
2550 * because ->set_pos_fn() should be called only once per object.
2551 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2552 * treat su != os && sc == 1 as fancy.
2553 */
rbd_layout_is_fancy(struct ceph_file_layout * l)2554 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2555 {
2556 return l->stripe_unit != l->object_size;
2557 }
2558
rbd_img_fill_request_nocopy(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct rbd_img_fill_ctx * fctx)2559 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2560 struct ceph_file_extent *img_extents,
2561 u32 num_img_extents,
2562 struct rbd_img_fill_ctx *fctx)
2563 {
2564 u32 i;
2565 int ret;
2566
2567 img_req->data_type = fctx->pos_type;
2568
2569 /*
2570 * Create object requests and set each object request's starting
2571 * position in the provided bio (list) or bio_vec array.
2572 */
2573 fctx->iter = *fctx->pos;
2574 for (i = 0; i < num_img_extents; i++) {
2575 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2576 img_extents[i].fe_off,
2577 img_extents[i].fe_len,
2578 &img_req->object_extents,
2579 alloc_object_extent, img_req,
2580 fctx->set_pos_fn, &fctx->iter);
2581 if (ret)
2582 return ret;
2583 }
2584
2585 return __rbd_img_fill_request(img_req);
2586 }
2587
2588 /*
2589 * Map a list of image extents to a list of object extents, create the
2590 * corresponding object requests (normally each to a different object,
2591 * but not always) and add them to @img_req. For each object request,
2592 * set up its data descriptor to point to the corresponding chunk(s) of
2593 * @fctx->pos data buffer.
2594 *
2595 * Because ceph_file_to_extents() will merge adjacent object extents
2596 * together, each object request's data descriptor may point to multiple
2597 * different chunks of @fctx->pos data buffer.
2598 *
2599 * @fctx->pos data buffer is assumed to be large enough.
2600 */
rbd_img_fill_request(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct rbd_img_fill_ctx * fctx)2601 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2602 struct ceph_file_extent *img_extents,
2603 u32 num_img_extents,
2604 struct rbd_img_fill_ctx *fctx)
2605 {
2606 struct rbd_device *rbd_dev = img_req->rbd_dev;
2607 struct rbd_obj_request *obj_req;
2608 u32 i;
2609 int ret;
2610
2611 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2612 !rbd_layout_is_fancy(&rbd_dev->layout))
2613 return rbd_img_fill_request_nocopy(img_req, img_extents,
2614 num_img_extents, fctx);
2615
2616 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2617
2618 /*
2619 * Create object requests and determine ->bvec_count for each object
2620 * request. Note that ->bvec_count sum over all object requests may
2621 * be greater than the number of bio_vecs in the provided bio (list)
2622 * or bio_vec array because when mapped, those bio_vecs can straddle
2623 * stripe unit boundaries.
2624 */
2625 fctx->iter = *fctx->pos;
2626 for (i = 0; i < num_img_extents; i++) {
2627 ret = ceph_file_to_extents(&rbd_dev->layout,
2628 img_extents[i].fe_off,
2629 img_extents[i].fe_len,
2630 &img_req->object_extents,
2631 alloc_object_extent, img_req,
2632 fctx->count_fn, &fctx->iter);
2633 if (ret)
2634 return ret;
2635 }
2636
2637 for_each_obj_request(img_req, obj_req) {
2638 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2639 sizeof(*obj_req->bvec_pos.bvecs),
2640 GFP_NOIO);
2641 if (!obj_req->bvec_pos.bvecs)
2642 return -ENOMEM;
2643 }
2644
2645 /*
2646 * Fill in each object request's private bio_vec array, splitting and
2647 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2648 */
2649 fctx->iter = *fctx->pos;
2650 for (i = 0; i < num_img_extents; i++) {
2651 ret = ceph_iterate_extents(&rbd_dev->layout,
2652 img_extents[i].fe_off,
2653 img_extents[i].fe_len,
2654 &img_req->object_extents,
2655 fctx->copy_fn, &fctx->iter);
2656 if (ret)
2657 return ret;
2658 }
2659
2660 return __rbd_img_fill_request(img_req);
2661 }
2662
rbd_img_fill_nodata(struct rbd_img_request * img_req,u64 off,u64 len)2663 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2664 u64 off, u64 len)
2665 {
2666 struct ceph_file_extent ex = { off, len };
2667 union rbd_img_fill_iter dummy = {};
2668 struct rbd_img_fill_ctx fctx = {
2669 .pos_type = OBJ_REQUEST_NODATA,
2670 .pos = &dummy,
2671 };
2672
2673 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2674 }
2675
set_bio_pos(struct ceph_object_extent * ex,u32 bytes,void * arg)2676 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2677 {
2678 struct rbd_obj_request *obj_req =
2679 container_of(ex, struct rbd_obj_request, ex);
2680 struct ceph_bio_iter *it = arg;
2681
2682 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2683 obj_req->bio_pos = *it;
2684 ceph_bio_iter_advance(it, bytes);
2685 }
2686
count_bio_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2687 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2688 {
2689 struct rbd_obj_request *obj_req =
2690 container_of(ex, struct rbd_obj_request, ex);
2691 struct ceph_bio_iter *it = arg;
2692
2693 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2694 ceph_bio_iter_advance_step(it, bytes, ({
2695 obj_req->bvec_count++;
2696 }));
2697
2698 }
2699
copy_bio_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2700 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2701 {
2702 struct rbd_obj_request *obj_req =
2703 container_of(ex, struct rbd_obj_request, ex);
2704 struct ceph_bio_iter *it = arg;
2705
2706 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2707 ceph_bio_iter_advance_step(it, bytes, ({
2708 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2709 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2710 }));
2711 }
2712
__rbd_img_fill_from_bio(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct ceph_bio_iter * bio_pos)2713 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2714 struct ceph_file_extent *img_extents,
2715 u32 num_img_extents,
2716 struct ceph_bio_iter *bio_pos)
2717 {
2718 struct rbd_img_fill_ctx fctx = {
2719 .pos_type = OBJ_REQUEST_BIO,
2720 .pos = (union rbd_img_fill_iter *)bio_pos,
2721 .set_pos_fn = set_bio_pos,
2722 .count_fn = count_bio_bvecs,
2723 .copy_fn = copy_bio_bvecs,
2724 };
2725
2726 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2727 &fctx);
2728 }
2729
rbd_img_fill_from_bio(struct rbd_img_request * img_req,u64 off,u64 len,struct bio * bio)2730 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2731 u64 off, u64 len, struct bio *bio)
2732 {
2733 struct ceph_file_extent ex = { off, len };
2734 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2735
2736 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2737 }
2738
set_bvec_pos(struct ceph_object_extent * ex,u32 bytes,void * arg)2739 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2740 {
2741 struct rbd_obj_request *obj_req =
2742 container_of(ex, struct rbd_obj_request, ex);
2743 struct ceph_bvec_iter *it = arg;
2744
2745 obj_req->bvec_pos = *it;
2746 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2747 ceph_bvec_iter_advance(it, bytes);
2748 }
2749
count_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2750 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2751 {
2752 struct rbd_obj_request *obj_req =
2753 container_of(ex, struct rbd_obj_request, ex);
2754 struct ceph_bvec_iter *it = arg;
2755
2756 ceph_bvec_iter_advance_step(it, bytes, ({
2757 obj_req->bvec_count++;
2758 }));
2759 }
2760
copy_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2761 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2762 {
2763 struct rbd_obj_request *obj_req =
2764 container_of(ex, struct rbd_obj_request, ex);
2765 struct ceph_bvec_iter *it = arg;
2766
2767 ceph_bvec_iter_advance_step(it, bytes, ({
2768 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2769 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2770 }));
2771 }
2772
__rbd_img_fill_from_bvecs(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct ceph_bvec_iter * bvec_pos)2773 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2774 struct ceph_file_extent *img_extents,
2775 u32 num_img_extents,
2776 struct ceph_bvec_iter *bvec_pos)
2777 {
2778 struct rbd_img_fill_ctx fctx = {
2779 .pos_type = OBJ_REQUEST_BVECS,
2780 .pos = (union rbd_img_fill_iter *)bvec_pos,
2781 .set_pos_fn = set_bvec_pos,
2782 .count_fn = count_bvecs,
2783 .copy_fn = copy_bvecs,
2784 };
2785
2786 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2787 &fctx);
2788 }
2789
rbd_img_fill_from_bvecs(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct bio_vec * bvecs)2790 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2791 struct ceph_file_extent *img_extents,
2792 u32 num_img_extents,
2793 struct bio_vec *bvecs)
2794 {
2795 struct ceph_bvec_iter it = {
2796 .bvecs = bvecs,
2797 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2798 num_img_extents) },
2799 };
2800
2801 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2802 &it);
2803 }
2804
rbd_img_handle_request_work(struct work_struct * work)2805 static void rbd_img_handle_request_work(struct work_struct *work)
2806 {
2807 struct rbd_img_request *img_req =
2808 container_of(work, struct rbd_img_request, work);
2809
2810 rbd_img_handle_request(img_req, img_req->work_result);
2811 }
2812
rbd_img_schedule(struct rbd_img_request * img_req,int result)2813 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2814 {
2815 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2816 img_req->work_result = result;
2817 queue_work(rbd_wq, &img_req->work);
2818 }
2819
rbd_obj_may_exist(struct rbd_obj_request * obj_req)2820 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2821 {
2822 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2823
2824 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2825 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2826 return true;
2827 }
2828
2829 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2830 obj_req->ex.oe_objno);
2831 return false;
2832 }
2833
rbd_obj_read_object(struct rbd_obj_request * obj_req)2834 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2835 {
2836 struct ceph_osd_request *osd_req;
2837 int ret;
2838
2839 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2840 if (IS_ERR(osd_req))
2841 return PTR_ERR(osd_req);
2842
2843 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2844 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2845 rbd_osd_setup_data(osd_req, 0);
2846 rbd_osd_format_read(osd_req);
2847
2848 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2849 if (ret)
2850 return ret;
2851
2852 rbd_osd_submit(osd_req);
2853 return 0;
2854 }
2855
rbd_obj_read_from_parent(struct rbd_obj_request * obj_req)2856 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2857 {
2858 struct rbd_img_request *img_req = obj_req->img_request;
2859 struct rbd_device *parent = img_req->rbd_dev->parent;
2860 struct rbd_img_request *child_img_req;
2861 int ret;
2862
2863 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2864 if (!child_img_req)
2865 return -ENOMEM;
2866
2867 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2868 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2869 child_img_req->obj_request = obj_req;
2870
2871 down_read(&parent->header_rwsem);
2872 rbd_img_capture_header(child_img_req);
2873 up_read(&parent->header_rwsem);
2874
2875 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2876 obj_req);
2877
2878 if (!rbd_img_is_write(img_req)) {
2879 switch (img_req->data_type) {
2880 case OBJ_REQUEST_BIO:
2881 ret = __rbd_img_fill_from_bio(child_img_req,
2882 obj_req->img_extents,
2883 obj_req->num_img_extents,
2884 &obj_req->bio_pos);
2885 break;
2886 case OBJ_REQUEST_BVECS:
2887 case OBJ_REQUEST_OWN_BVECS:
2888 ret = __rbd_img_fill_from_bvecs(child_img_req,
2889 obj_req->img_extents,
2890 obj_req->num_img_extents,
2891 &obj_req->bvec_pos);
2892 break;
2893 default:
2894 BUG();
2895 }
2896 } else {
2897 ret = rbd_img_fill_from_bvecs(child_img_req,
2898 obj_req->img_extents,
2899 obj_req->num_img_extents,
2900 obj_req->copyup_bvecs);
2901 }
2902 if (ret) {
2903 rbd_img_request_destroy(child_img_req);
2904 return ret;
2905 }
2906
2907 /* avoid parent chain recursion */
2908 rbd_img_schedule(child_img_req, 0);
2909 return 0;
2910 }
2911
rbd_obj_advance_read(struct rbd_obj_request * obj_req,int * result)2912 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2913 {
2914 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2915 int ret;
2916
2917 again:
2918 switch (obj_req->read_state) {
2919 case RBD_OBJ_READ_START:
2920 rbd_assert(!*result);
2921
2922 if (!rbd_obj_may_exist(obj_req)) {
2923 *result = -ENOENT;
2924 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2925 goto again;
2926 }
2927
2928 ret = rbd_obj_read_object(obj_req);
2929 if (ret) {
2930 *result = ret;
2931 return true;
2932 }
2933 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2934 return false;
2935 case RBD_OBJ_READ_OBJECT:
2936 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2937 /* reverse map this object extent onto the parent */
2938 ret = rbd_obj_calc_img_extents(obj_req, false);
2939 if (ret) {
2940 *result = ret;
2941 return true;
2942 }
2943 if (obj_req->num_img_extents) {
2944 ret = rbd_obj_read_from_parent(obj_req);
2945 if (ret) {
2946 *result = ret;
2947 return true;
2948 }
2949 obj_req->read_state = RBD_OBJ_READ_PARENT;
2950 return false;
2951 }
2952 }
2953
2954 /*
2955 * -ENOENT means a hole in the image -- zero-fill the entire
2956 * length of the request. A short read also implies zero-fill
2957 * to the end of the request.
2958 */
2959 if (*result == -ENOENT) {
2960 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2961 *result = 0;
2962 } else if (*result >= 0) {
2963 if (*result < obj_req->ex.oe_len)
2964 rbd_obj_zero_range(obj_req, *result,
2965 obj_req->ex.oe_len - *result);
2966 else
2967 rbd_assert(*result == obj_req->ex.oe_len);
2968 *result = 0;
2969 }
2970 return true;
2971 case RBD_OBJ_READ_PARENT:
2972 /*
2973 * The parent image is read only up to the overlap -- zero-fill
2974 * from the overlap to the end of the request.
2975 */
2976 if (!*result) {
2977 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2978
2979 if (obj_overlap < obj_req->ex.oe_len)
2980 rbd_obj_zero_range(obj_req, obj_overlap,
2981 obj_req->ex.oe_len - obj_overlap);
2982 }
2983 return true;
2984 default:
2985 BUG();
2986 }
2987 }
2988
rbd_obj_write_is_noop(struct rbd_obj_request * obj_req)2989 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2990 {
2991 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2992
2993 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2994 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2995
2996 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2997 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2998 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2999 return true;
3000 }
3001
3002 return false;
3003 }
3004
3005 /*
3006 * Return:
3007 * 0 - object map update sent
3008 * 1 - object map update isn't needed
3009 * <0 - error
3010 */
rbd_obj_write_pre_object_map(struct rbd_obj_request * obj_req)3011 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3012 {
3013 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3014 u8 new_state;
3015
3016 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3017 return 1;
3018
3019 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3020 new_state = OBJECT_PENDING;
3021 else
3022 new_state = OBJECT_EXISTS;
3023
3024 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3025 }
3026
rbd_obj_write_object(struct rbd_obj_request * obj_req)3027 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3028 {
3029 struct ceph_osd_request *osd_req;
3030 int num_ops = count_write_ops(obj_req);
3031 int which = 0;
3032 int ret;
3033
3034 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3035 num_ops++; /* stat */
3036
3037 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3038 if (IS_ERR(osd_req))
3039 return PTR_ERR(osd_req);
3040
3041 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3042 ret = rbd_osd_setup_stat(osd_req, which++);
3043 if (ret)
3044 return ret;
3045 }
3046
3047 rbd_osd_setup_write_ops(osd_req, which);
3048 rbd_osd_format_write(osd_req);
3049
3050 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3051 if (ret)
3052 return ret;
3053
3054 rbd_osd_submit(osd_req);
3055 return 0;
3056 }
3057
3058 /*
3059 * copyup_bvecs pages are never highmem pages
3060 */
is_zero_bvecs(struct bio_vec * bvecs,u32 bytes)3061 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3062 {
3063 struct ceph_bvec_iter it = {
3064 .bvecs = bvecs,
3065 .iter = { .bi_size = bytes },
3066 };
3067
3068 ceph_bvec_iter_advance_step(&it, bytes, ({
3069 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3070 bv.bv_len))
3071 return false;
3072 }));
3073 return true;
3074 }
3075
3076 #define MODS_ONLY U32_MAX
3077
rbd_obj_copyup_empty_snapc(struct rbd_obj_request * obj_req,u32 bytes)3078 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3079 u32 bytes)
3080 {
3081 struct ceph_osd_request *osd_req;
3082 int ret;
3083
3084 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3085 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3086
3087 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3088 if (IS_ERR(osd_req))
3089 return PTR_ERR(osd_req);
3090
3091 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3092 if (ret)
3093 return ret;
3094
3095 rbd_osd_format_write(osd_req);
3096
3097 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3098 if (ret)
3099 return ret;
3100
3101 rbd_osd_submit(osd_req);
3102 return 0;
3103 }
3104
rbd_obj_copyup_current_snapc(struct rbd_obj_request * obj_req,u32 bytes)3105 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3106 u32 bytes)
3107 {
3108 struct ceph_osd_request *osd_req;
3109 int num_ops = count_write_ops(obj_req);
3110 int which = 0;
3111 int ret;
3112
3113 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3114
3115 if (bytes != MODS_ONLY)
3116 num_ops++; /* copyup */
3117
3118 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3119 if (IS_ERR(osd_req))
3120 return PTR_ERR(osd_req);
3121
3122 if (bytes != MODS_ONLY) {
3123 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3124 if (ret)
3125 return ret;
3126 }
3127
3128 rbd_osd_setup_write_ops(osd_req, which);
3129 rbd_osd_format_write(osd_req);
3130
3131 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3132 if (ret)
3133 return ret;
3134
3135 rbd_osd_submit(osd_req);
3136 return 0;
3137 }
3138
setup_copyup_bvecs(struct rbd_obj_request * obj_req,u64 obj_overlap)3139 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3140 {
3141 u32 i;
3142
3143 rbd_assert(!obj_req->copyup_bvecs);
3144 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3145 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3146 sizeof(*obj_req->copyup_bvecs),
3147 GFP_NOIO);
3148 if (!obj_req->copyup_bvecs)
3149 return -ENOMEM;
3150
3151 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3152 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3153
3154 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3155 if (!obj_req->copyup_bvecs[i].bv_page)
3156 return -ENOMEM;
3157
3158 obj_req->copyup_bvecs[i].bv_offset = 0;
3159 obj_req->copyup_bvecs[i].bv_len = len;
3160 obj_overlap -= len;
3161 }
3162
3163 rbd_assert(!obj_overlap);
3164 return 0;
3165 }
3166
3167 /*
3168 * The target object doesn't exist. Read the data for the entire
3169 * target object up to the overlap point (if any) from the parent,
3170 * so we can use it for a copyup.
3171 */
rbd_obj_copyup_read_parent(struct rbd_obj_request * obj_req)3172 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3173 {
3174 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3175 int ret;
3176
3177 rbd_assert(obj_req->num_img_extents);
3178 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3179 rbd_dev->parent_overlap);
3180 if (!obj_req->num_img_extents) {
3181 /*
3182 * The overlap has become 0 (most likely because the
3183 * image has been flattened). Re-submit the original write
3184 * request -- pass MODS_ONLY since the copyup isn't needed
3185 * anymore.
3186 */
3187 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3188 }
3189
3190 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3191 if (ret)
3192 return ret;
3193
3194 return rbd_obj_read_from_parent(obj_req);
3195 }
3196
rbd_obj_copyup_object_maps(struct rbd_obj_request * obj_req)3197 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3198 {
3199 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3200 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3201 u8 new_state;
3202 u32 i;
3203 int ret;
3204
3205 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3206
3207 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3208 return;
3209
3210 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3211 return;
3212
3213 for (i = 0; i < snapc->num_snaps; i++) {
3214 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3215 i + 1 < snapc->num_snaps)
3216 new_state = OBJECT_EXISTS_CLEAN;
3217 else
3218 new_state = OBJECT_EXISTS;
3219
3220 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3221 new_state, NULL);
3222 if (ret < 0) {
3223 obj_req->pending.result = ret;
3224 return;
3225 }
3226
3227 rbd_assert(!ret);
3228 obj_req->pending.num_pending++;
3229 }
3230 }
3231
rbd_obj_copyup_write_object(struct rbd_obj_request * obj_req)3232 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3233 {
3234 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3235 int ret;
3236
3237 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3238
3239 /*
3240 * Only send non-zero copyup data to save some I/O and network
3241 * bandwidth -- zero copyup data is equivalent to the object not
3242 * existing.
3243 */
3244 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3245 bytes = 0;
3246
3247 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3248 /*
3249 * Send a copyup request with an empty snapshot context to
3250 * deep-copyup the object through all existing snapshots.
3251 * A second request with the current snapshot context will be
3252 * sent for the actual modification.
3253 */
3254 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3255 if (ret) {
3256 obj_req->pending.result = ret;
3257 return;
3258 }
3259
3260 obj_req->pending.num_pending++;
3261 bytes = MODS_ONLY;
3262 }
3263
3264 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3265 if (ret) {
3266 obj_req->pending.result = ret;
3267 return;
3268 }
3269
3270 obj_req->pending.num_pending++;
3271 }
3272
rbd_obj_advance_copyup(struct rbd_obj_request * obj_req,int * result)3273 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3274 {
3275 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3276 int ret;
3277
3278 again:
3279 switch (obj_req->copyup_state) {
3280 case RBD_OBJ_COPYUP_START:
3281 rbd_assert(!*result);
3282
3283 ret = rbd_obj_copyup_read_parent(obj_req);
3284 if (ret) {
3285 *result = ret;
3286 return true;
3287 }
3288 if (obj_req->num_img_extents)
3289 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3290 else
3291 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3292 return false;
3293 case RBD_OBJ_COPYUP_READ_PARENT:
3294 if (*result)
3295 return true;
3296
3297 if (is_zero_bvecs(obj_req->copyup_bvecs,
3298 rbd_obj_img_extents_bytes(obj_req))) {
3299 dout("%s %p detected zeros\n", __func__, obj_req);
3300 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3301 }
3302
3303 rbd_obj_copyup_object_maps(obj_req);
3304 if (!obj_req->pending.num_pending) {
3305 *result = obj_req->pending.result;
3306 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3307 goto again;
3308 }
3309 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3310 return false;
3311 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3312 if (!pending_result_dec(&obj_req->pending, result))
3313 return false;
3314 fallthrough;
3315 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3316 if (*result) {
3317 rbd_warn(rbd_dev, "snap object map update failed: %d",
3318 *result);
3319 return true;
3320 }
3321
3322 rbd_obj_copyup_write_object(obj_req);
3323 if (!obj_req->pending.num_pending) {
3324 *result = obj_req->pending.result;
3325 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3326 goto again;
3327 }
3328 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3329 return false;
3330 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3331 if (!pending_result_dec(&obj_req->pending, result))
3332 return false;
3333 fallthrough;
3334 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3335 return true;
3336 default:
3337 BUG();
3338 }
3339 }
3340
3341 /*
3342 * Return:
3343 * 0 - object map update sent
3344 * 1 - object map update isn't needed
3345 * <0 - error
3346 */
rbd_obj_write_post_object_map(struct rbd_obj_request * obj_req)3347 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3348 {
3349 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3350 u8 current_state = OBJECT_PENDING;
3351
3352 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3353 return 1;
3354
3355 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3356 return 1;
3357
3358 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3359 ¤t_state);
3360 }
3361
rbd_obj_advance_write(struct rbd_obj_request * obj_req,int * result)3362 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3363 {
3364 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3365 int ret;
3366
3367 again:
3368 switch (obj_req->write_state) {
3369 case RBD_OBJ_WRITE_START:
3370 rbd_assert(!*result);
3371
3372 rbd_obj_set_copyup_enabled(obj_req);
3373 if (rbd_obj_write_is_noop(obj_req))
3374 return true;
3375
3376 ret = rbd_obj_write_pre_object_map(obj_req);
3377 if (ret < 0) {
3378 *result = ret;
3379 return true;
3380 }
3381 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3382 if (ret > 0)
3383 goto again;
3384 return false;
3385 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3386 if (*result) {
3387 rbd_warn(rbd_dev, "pre object map update failed: %d",
3388 *result);
3389 return true;
3390 }
3391 ret = rbd_obj_write_object(obj_req);
3392 if (ret) {
3393 *result = ret;
3394 return true;
3395 }
3396 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3397 return false;
3398 case RBD_OBJ_WRITE_OBJECT:
3399 if (*result == -ENOENT) {
3400 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3401 *result = 0;
3402 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3403 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3404 goto again;
3405 }
3406 /*
3407 * On a non-existent object:
3408 * delete - -ENOENT, truncate/zero - 0
3409 */
3410 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3411 *result = 0;
3412 }
3413 if (*result)
3414 return true;
3415
3416 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3417 goto again;
3418 case __RBD_OBJ_WRITE_COPYUP:
3419 if (!rbd_obj_advance_copyup(obj_req, result))
3420 return false;
3421 fallthrough;
3422 case RBD_OBJ_WRITE_COPYUP:
3423 if (*result) {
3424 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3425 return true;
3426 }
3427 ret = rbd_obj_write_post_object_map(obj_req);
3428 if (ret < 0) {
3429 *result = ret;
3430 return true;
3431 }
3432 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3433 if (ret > 0)
3434 goto again;
3435 return false;
3436 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3437 if (*result)
3438 rbd_warn(rbd_dev, "post object map update failed: %d",
3439 *result);
3440 return true;
3441 default:
3442 BUG();
3443 }
3444 }
3445
3446 /*
3447 * Return true if @obj_req is completed.
3448 */
__rbd_obj_handle_request(struct rbd_obj_request * obj_req,int * result)3449 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3450 int *result)
3451 {
3452 struct rbd_img_request *img_req = obj_req->img_request;
3453 struct rbd_device *rbd_dev = img_req->rbd_dev;
3454 bool done;
3455
3456 mutex_lock(&obj_req->state_mutex);
3457 if (!rbd_img_is_write(img_req))
3458 done = rbd_obj_advance_read(obj_req, result);
3459 else
3460 done = rbd_obj_advance_write(obj_req, result);
3461 mutex_unlock(&obj_req->state_mutex);
3462
3463 if (done && *result) {
3464 rbd_assert(*result < 0);
3465 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3466 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3467 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3468 }
3469 return done;
3470 }
3471
3472 /*
3473 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3474 * recursion.
3475 */
rbd_obj_handle_request(struct rbd_obj_request * obj_req,int result)3476 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3477 {
3478 if (__rbd_obj_handle_request(obj_req, &result))
3479 rbd_img_handle_request(obj_req->img_request, result);
3480 }
3481
need_exclusive_lock(struct rbd_img_request * img_req)3482 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3483 {
3484 struct rbd_device *rbd_dev = img_req->rbd_dev;
3485
3486 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3487 return false;
3488
3489 if (rbd_is_ro(rbd_dev))
3490 return false;
3491
3492 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3493 if (rbd_dev->opts->lock_on_read ||
3494 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3495 return true;
3496
3497 return rbd_img_is_write(img_req);
3498 }
3499
rbd_lock_add_request(struct rbd_img_request * img_req)3500 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3501 {
3502 struct rbd_device *rbd_dev = img_req->rbd_dev;
3503 bool locked;
3504
3505 lockdep_assert_held(&rbd_dev->lock_rwsem);
3506 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3507 spin_lock(&rbd_dev->lock_lists_lock);
3508 rbd_assert(list_empty(&img_req->lock_item));
3509 if (!locked)
3510 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3511 else
3512 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3513 spin_unlock(&rbd_dev->lock_lists_lock);
3514 return locked;
3515 }
3516
rbd_lock_del_request(struct rbd_img_request * img_req)3517 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3518 {
3519 struct rbd_device *rbd_dev = img_req->rbd_dev;
3520 bool need_wakeup = false;
3521
3522 lockdep_assert_held(&rbd_dev->lock_rwsem);
3523 spin_lock(&rbd_dev->lock_lists_lock);
3524 if (!list_empty(&img_req->lock_item)) {
3525 list_del_init(&img_req->lock_item);
3526 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3527 list_empty(&rbd_dev->running_list));
3528 }
3529 spin_unlock(&rbd_dev->lock_lists_lock);
3530 if (need_wakeup)
3531 complete(&rbd_dev->releasing_wait);
3532 }
3533
rbd_img_exclusive_lock(struct rbd_img_request * img_req)3534 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3535 {
3536 struct rbd_device *rbd_dev = img_req->rbd_dev;
3537
3538 if (!need_exclusive_lock(img_req))
3539 return 1;
3540
3541 if (rbd_lock_add_request(img_req))
3542 return 1;
3543
3544 if (rbd_dev->opts->exclusive) {
3545 WARN_ON(1); /* lock got released? */
3546 return -EROFS;
3547 }
3548
3549 /*
3550 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3551 * and cancel_delayed_work() in wake_lock_waiters().
3552 */
3553 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3554 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3555 return 0;
3556 }
3557
rbd_img_object_requests(struct rbd_img_request * img_req)3558 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3559 {
3560 struct rbd_device *rbd_dev = img_req->rbd_dev;
3561 struct rbd_obj_request *obj_req;
3562
3563 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3564 rbd_assert(!need_exclusive_lock(img_req) ||
3565 __rbd_is_lock_owner(rbd_dev));
3566
3567 if (rbd_img_is_write(img_req)) {
3568 rbd_assert(!img_req->snapc);
3569 down_read(&rbd_dev->header_rwsem);
3570 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3571 up_read(&rbd_dev->header_rwsem);
3572 }
3573
3574 for_each_obj_request(img_req, obj_req) {
3575 int result = 0;
3576
3577 if (__rbd_obj_handle_request(obj_req, &result)) {
3578 if (result) {
3579 img_req->pending.result = result;
3580 return;
3581 }
3582 } else {
3583 img_req->pending.num_pending++;
3584 }
3585 }
3586 }
3587
rbd_img_advance(struct rbd_img_request * img_req,int * result)3588 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3589 {
3590 int ret;
3591
3592 again:
3593 switch (img_req->state) {
3594 case RBD_IMG_START:
3595 rbd_assert(!*result);
3596
3597 ret = rbd_img_exclusive_lock(img_req);
3598 if (ret < 0) {
3599 *result = ret;
3600 return true;
3601 }
3602 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3603 if (ret > 0)
3604 goto again;
3605 return false;
3606 case RBD_IMG_EXCLUSIVE_LOCK:
3607 if (*result)
3608 return true;
3609
3610 rbd_img_object_requests(img_req);
3611 if (!img_req->pending.num_pending) {
3612 *result = img_req->pending.result;
3613 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3614 goto again;
3615 }
3616 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3617 return false;
3618 case __RBD_IMG_OBJECT_REQUESTS:
3619 if (!pending_result_dec(&img_req->pending, result))
3620 return false;
3621 fallthrough;
3622 case RBD_IMG_OBJECT_REQUESTS:
3623 return true;
3624 default:
3625 BUG();
3626 }
3627 }
3628
3629 /*
3630 * Return true if @img_req is completed.
3631 */
__rbd_img_handle_request(struct rbd_img_request * img_req,int * result)3632 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3633 int *result)
3634 {
3635 struct rbd_device *rbd_dev = img_req->rbd_dev;
3636 bool done;
3637
3638 if (need_exclusive_lock(img_req)) {
3639 down_read(&rbd_dev->lock_rwsem);
3640 mutex_lock(&img_req->state_mutex);
3641 done = rbd_img_advance(img_req, result);
3642 if (done)
3643 rbd_lock_del_request(img_req);
3644 mutex_unlock(&img_req->state_mutex);
3645 up_read(&rbd_dev->lock_rwsem);
3646 } else {
3647 mutex_lock(&img_req->state_mutex);
3648 done = rbd_img_advance(img_req, result);
3649 mutex_unlock(&img_req->state_mutex);
3650 }
3651
3652 if (done && *result) {
3653 rbd_assert(*result < 0);
3654 rbd_warn(rbd_dev, "%s%s result %d",
3655 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3656 obj_op_name(img_req->op_type), *result);
3657 }
3658 return done;
3659 }
3660
rbd_img_handle_request(struct rbd_img_request * img_req,int result)3661 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3662 {
3663 again:
3664 if (!__rbd_img_handle_request(img_req, &result))
3665 return;
3666
3667 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3668 struct rbd_obj_request *obj_req = img_req->obj_request;
3669
3670 rbd_img_request_destroy(img_req);
3671 if (__rbd_obj_handle_request(obj_req, &result)) {
3672 img_req = obj_req->img_request;
3673 goto again;
3674 }
3675 } else {
3676 struct request *rq = blk_mq_rq_from_pdu(img_req);
3677
3678 rbd_img_request_destroy(img_req);
3679 blk_mq_end_request(rq, errno_to_blk_status(result));
3680 }
3681 }
3682
3683 static const struct rbd_client_id rbd_empty_cid;
3684
rbd_cid_equal(const struct rbd_client_id * lhs,const struct rbd_client_id * rhs)3685 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3686 const struct rbd_client_id *rhs)
3687 {
3688 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3689 }
3690
rbd_get_cid(struct rbd_device * rbd_dev)3691 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3692 {
3693 struct rbd_client_id cid;
3694
3695 mutex_lock(&rbd_dev->watch_mutex);
3696 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3697 cid.handle = rbd_dev->watch_cookie;
3698 mutex_unlock(&rbd_dev->watch_mutex);
3699 return cid;
3700 }
3701
3702 /*
3703 * lock_rwsem must be held for write
3704 */
rbd_set_owner_cid(struct rbd_device * rbd_dev,const struct rbd_client_id * cid)3705 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3706 const struct rbd_client_id *cid)
3707 {
3708 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3709 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3710 cid->gid, cid->handle);
3711 rbd_dev->owner_cid = *cid; /* struct */
3712 }
3713
format_lock_cookie(struct rbd_device * rbd_dev,char * buf)3714 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3715 {
3716 mutex_lock(&rbd_dev->watch_mutex);
3717 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3718 mutex_unlock(&rbd_dev->watch_mutex);
3719 }
3720
__rbd_lock(struct rbd_device * rbd_dev,const char * cookie)3721 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3722 {
3723 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3724
3725 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3726 strcpy(rbd_dev->lock_cookie, cookie);
3727 rbd_set_owner_cid(rbd_dev, &cid);
3728 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3729 }
3730
3731 /*
3732 * lock_rwsem must be held for write
3733 */
rbd_lock(struct rbd_device * rbd_dev)3734 static int rbd_lock(struct rbd_device *rbd_dev)
3735 {
3736 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3737 char cookie[32];
3738 int ret;
3739
3740 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3741 rbd_dev->lock_cookie[0] != '\0');
3742
3743 format_lock_cookie(rbd_dev, cookie);
3744 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3745 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3746 RBD_LOCK_TAG, "", 0);
3747 if (ret && ret != -EEXIST)
3748 return ret;
3749
3750 __rbd_lock(rbd_dev, cookie);
3751 return 0;
3752 }
3753
3754 /*
3755 * lock_rwsem must be held for write
3756 */
rbd_unlock(struct rbd_device * rbd_dev)3757 static void rbd_unlock(struct rbd_device *rbd_dev)
3758 {
3759 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3760 int ret;
3761
3762 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3763 rbd_dev->lock_cookie[0] == '\0');
3764
3765 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3766 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3767 if (ret && ret != -ENOENT)
3768 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3769
3770 /* treat errors as the image is unlocked */
3771 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3772 rbd_dev->lock_cookie[0] = '\0';
3773 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3774 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3775 }
3776
__rbd_notify_op_lock(struct rbd_device * rbd_dev,enum rbd_notify_op notify_op,struct page *** preply_pages,size_t * preply_len)3777 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3778 enum rbd_notify_op notify_op,
3779 struct page ***preply_pages,
3780 size_t *preply_len)
3781 {
3782 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3783 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3784 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3785 int buf_size = sizeof(buf);
3786 void *p = buf;
3787
3788 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3789
3790 /* encode *LockPayload NotifyMessage (op + ClientId) */
3791 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3792 ceph_encode_32(&p, notify_op);
3793 ceph_encode_64(&p, cid.gid);
3794 ceph_encode_64(&p, cid.handle);
3795
3796 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3797 &rbd_dev->header_oloc, buf, buf_size,
3798 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3799 }
3800
rbd_notify_op_lock(struct rbd_device * rbd_dev,enum rbd_notify_op notify_op)3801 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3802 enum rbd_notify_op notify_op)
3803 {
3804 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3805 }
3806
rbd_notify_acquired_lock(struct work_struct * work)3807 static void rbd_notify_acquired_lock(struct work_struct *work)
3808 {
3809 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3810 acquired_lock_work);
3811
3812 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3813 }
3814
rbd_notify_released_lock(struct work_struct * work)3815 static void rbd_notify_released_lock(struct work_struct *work)
3816 {
3817 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3818 released_lock_work);
3819
3820 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3821 }
3822
rbd_request_lock(struct rbd_device * rbd_dev)3823 static int rbd_request_lock(struct rbd_device *rbd_dev)
3824 {
3825 struct page **reply_pages;
3826 size_t reply_len;
3827 bool lock_owner_responded = false;
3828 int ret;
3829
3830 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3831
3832 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3833 &reply_pages, &reply_len);
3834 if (ret && ret != -ETIMEDOUT) {
3835 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3836 goto out;
3837 }
3838
3839 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3840 void *p = page_address(reply_pages[0]);
3841 void *const end = p + reply_len;
3842 u32 n;
3843
3844 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3845 while (n--) {
3846 u8 struct_v;
3847 u32 len;
3848
3849 ceph_decode_need(&p, end, 8 + 8, e_inval);
3850 p += 8 + 8; /* skip gid and cookie */
3851
3852 ceph_decode_32_safe(&p, end, len, e_inval);
3853 if (!len)
3854 continue;
3855
3856 if (lock_owner_responded) {
3857 rbd_warn(rbd_dev,
3858 "duplicate lock owners detected");
3859 ret = -EIO;
3860 goto out;
3861 }
3862
3863 lock_owner_responded = true;
3864 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3865 &struct_v, &len);
3866 if (ret) {
3867 rbd_warn(rbd_dev,
3868 "failed to decode ResponseMessage: %d",
3869 ret);
3870 goto e_inval;
3871 }
3872
3873 ret = ceph_decode_32(&p);
3874 }
3875 }
3876
3877 if (!lock_owner_responded) {
3878 rbd_warn(rbd_dev, "no lock owners detected");
3879 ret = -ETIMEDOUT;
3880 }
3881
3882 out:
3883 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3884 return ret;
3885
3886 e_inval:
3887 ret = -EINVAL;
3888 goto out;
3889 }
3890
3891 /*
3892 * Either image request state machine(s) or rbd_add_acquire_lock()
3893 * (i.e. "rbd map").
3894 */
wake_lock_waiters(struct rbd_device * rbd_dev,int result)3895 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3896 {
3897 struct rbd_img_request *img_req;
3898
3899 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3900 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3901
3902 cancel_delayed_work(&rbd_dev->lock_dwork);
3903 if (!completion_done(&rbd_dev->acquire_wait)) {
3904 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3905 list_empty(&rbd_dev->running_list));
3906 rbd_dev->acquire_err = result;
3907 complete_all(&rbd_dev->acquire_wait);
3908 return;
3909 }
3910
3911 while (!list_empty(&rbd_dev->acquiring_list)) {
3912 img_req = list_first_entry(&rbd_dev->acquiring_list,
3913 struct rbd_img_request, lock_item);
3914 mutex_lock(&img_req->state_mutex);
3915 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3916 if (!result)
3917 list_move_tail(&img_req->lock_item,
3918 &rbd_dev->running_list);
3919 else
3920 list_del_init(&img_req->lock_item);
3921 rbd_img_schedule(img_req, result);
3922 mutex_unlock(&img_req->state_mutex);
3923 }
3924 }
3925
locker_equal(const struct ceph_locker * lhs,const struct ceph_locker * rhs)3926 static bool locker_equal(const struct ceph_locker *lhs,
3927 const struct ceph_locker *rhs)
3928 {
3929 return lhs->id.name.type == rhs->id.name.type &&
3930 lhs->id.name.num == rhs->id.name.num &&
3931 !strcmp(lhs->id.cookie, rhs->id.cookie) &&
3932 ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
3933 }
3934
free_locker(struct ceph_locker * locker)3935 static void free_locker(struct ceph_locker *locker)
3936 {
3937 if (locker)
3938 ceph_free_lockers(locker, 1);
3939 }
3940
get_lock_owner_info(struct rbd_device * rbd_dev)3941 static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3942 {
3943 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3944 struct ceph_locker *lockers;
3945 u32 num_lockers;
3946 u8 lock_type;
3947 char *lock_tag;
3948 int ret;
3949
3950 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3951
3952 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3953 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3954 &lock_type, &lock_tag, &lockers, &num_lockers);
3955 if (ret) {
3956 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3957 return ERR_PTR(ret);
3958 }
3959
3960 if (num_lockers == 0) {
3961 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3962 lockers = NULL;
3963 goto out;
3964 }
3965
3966 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3967 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3968 lock_tag);
3969 goto err_busy;
3970 }
3971
3972 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3973 rbd_warn(rbd_dev, "shared lock type detected");
3974 goto err_busy;
3975 }
3976
3977 WARN_ON(num_lockers != 1);
3978 if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3979 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3980 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3981 lockers[0].id.cookie);
3982 goto err_busy;
3983 }
3984
3985 out:
3986 kfree(lock_tag);
3987 return lockers;
3988
3989 err_busy:
3990 kfree(lock_tag);
3991 ceph_free_lockers(lockers, num_lockers);
3992 return ERR_PTR(-EBUSY);
3993 }
3994
find_watcher(struct rbd_device * rbd_dev,const struct ceph_locker * locker)3995 static int find_watcher(struct rbd_device *rbd_dev,
3996 const struct ceph_locker *locker)
3997 {
3998 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3999 struct ceph_watch_item *watchers;
4000 u32 num_watchers;
4001 u64 cookie;
4002 int i;
4003 int ret;
4004
4005 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4006 &rbd_dev->header_oloc, &watchers,
4007 &num_watchers);
4008 if (ret) {
4009 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
4010 return ret;
4011 }
4012
4013 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
4014 for (i = 0; i < num_watchers; i++) {
4015 /*
4016 * Ignore addr->type while comparing. This mimics
4017 * entity_addr_t::get_legacy_str() + strcmp().
4018 */
4019 if (ceph_addr_equal_no_type(&watchers[i].addr,
4020 &locker->info.addr) &&
4021 watchers[i].cookie == cookie) {
4022 struct rbd_client_id cid = {
4023 .gid = le64_to_cpu(watchers[i].name.num),
4024 .handle = cookie,
4025 };
4026
4027 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4028 rbd_dev, cid.gid, cid.handle);
4029 rbd_set_owner_cid(rbd_dev, &cid);
4030 ret = 1;
4031 goto out;
4032 }
4033 }
4034
4035 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4036 ret = 0;
4037 out:
4038 kfree(watchers);
4039 return ret;
4040 }
4041
4042 /*
4043 * lock_rwsem must be held for write
4044 */
rbd_try_lock(struct rbd_device * rbd_dev)4045 static int rbd_try_lock(struct rbd_device *rbd_dev)
4046 {
4047 struct ceph_client *client = rbd_dev->rbd_client->client;
4048 struct ceph_locker *locker, *refreshed_locker;
4049 int ret;
4050
4051 for (;;) {
4052 locker = refreshed_locker = NULL;
4053
4054 ret = rbd_lock(rbd_dev);
4055 if (!ret)
4056 goto out;
4057 if (ret != -EBUSY) {
4058 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4059 goto out;
4060 }
4061
4062 /* determine if the current lock holder is still alive */
4063 locker = get_lock_owner_info(rbd_dev);
4064 if (IS_ERR(locker)) {
4065 ret = PTR_ERR(locker);
4066 locker = NULL;
4067 goto out;
4068 }
4069 if (!locker)
4070 goto again;
4071
4072 ret = find_watcher(rbd_dev, locker);
4073 if (ret)
4074 goto out; /* request lock or error */
4075
4076 refreshed_locker = get_lock_owner_info(rbd_dev);
4077 if (IS_ERR(refreshed_locker)) {
4078 ret = PTR_ERR(refreshed_locker);
4079 refreshed_locker = NULL;
4080 goto out;
4081 }
4082 if (!refreshed_locker ||
4083 !locker_equal(locker, refreshed_locker))
4084 goto again;
4085
4086 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4087 ENTITY_NAME(locker->id.name));
4088
4089 ret = ceph_monc_blocklist_add(&client->monc,
4090 &locker->info.addr);
4091 if (ret) {
4092 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4093 ENTITY_NAME(locker->id.name), ret);
4094 goto out;
4095 }
4096
4097 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4098 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4099 locker->id.cookie, &locker->id.name);
4100 if (ret && ret != -ENOENT) {
4101 rbd_warn(rbd_dev, "failed to break header lock: %d",
4102 ret);
4103 goto out;
4104 }
4105
4106 again:
4107 free_locker(refreshed_locker);
4108 free_locker(locker);
4109 }
4110
4111 out:
4112 free_locker(refreshed_locker);
4113 free_locker(locker);
4114 return ret;
4115 }
4116
rbd_post_acquire_action(struct rbd_device * rbd_dev)4117 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4118 {
4119 int ret;
4120
4121 ret = rbd_dev_refresh(rbd_dev);
4122 if (ret)
4123 return ret;
4124
4125 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4126 ret = rbd_object_map_open(rbd_dev);
4127 if (ret)
4128 return ret;
4129 }
4130
4131 return 0;
4132 }
4133
4134 /*
4135 * Return:
4136 * 0 - lock acquired
4137 * 1 - caller should call rbd_request_lock()
4138 * <0 - error
4139 */
rbd_try_acquire_lock(struct rbd_device * rbd_dev)4140 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4141 {
4142 int ret;
4143
4144 down_read(&rbd_dev->lock_rwsem);
4145 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4146 rbd_dev->lock_state);
4147 if (__rbd_is_lock_owner(rbd_dev)) {
4148 up_read(&rbd_dev->lock_rwsem);
4149 return 0;
4150 }
4151
4152 up_read(&rbd_dev->lock_rwsem);
4153 down_write(&rbd_dev->lock_rwsem);
4154 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4155 rbd_dev->lock_state);
4156 if (__rbd_is_lock_owner(rbd_dev)) {
4157 up_write(&rbd_dev->lock_rwsem);
4158 return 0;
4159 }
4160
4161 ret = rbd_try_lock(rbd_dev);
4162 if (ret < 0) {
4163 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4164 goto out;
4165 }
4166 if (ret > 0) {
4167 up_write(&rbd_dev->lock_rwsem);
4168 return ret;
4169 }
4170
4171 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4172 rbd_assert(list_empty(&rbd_dev->running_list));
4173
4174 ret = rbd_post_acquire_action(rbd_dev);
4175 if (ret) {
4176 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4177 /*
4178 * Can't stay in RBD_LOCK_STATE_LOCKED because
4179 * rbd_lock_add_request() would let the request through,
4180 * assuming that e.g. object map is locked and loaded.
4181 */
4182 rbd_unlock(rbd_dev);
4183 }
4184
4185 out:
4186 wake_lock_waiters(rbd_dev, ret);
4187 up_write(&rbd_dev->lock_rwsem);
4188 return ret;
4189 }
4190
rbd_acquire_lock(struct work_struct * work)4191 static void rbd_acquire_lock(struct work_struct *work)
4192 {
4193 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4194 struct rbd_device, lock_dwork);
4195 int ret;
4196
4197 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4198 again:
4199 ret = rbd_try_acquire_lock(rbd_dev);
4200 if (ret <= 0) {
4201 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4202 return;
4203 }
4204
4205 ret = rbd_request_lock(rbd_dev);
4206 if (ret == -ETIMEDOUT) {
4207 goto again; /* treat this as a dead client */
4208 } else if (ret == -EROFS) {
4209 rbd_warn(rbd_dev, "peer will not release lock");
4210 down_write(&rbd_dev->lock_rwsem);
4211 wake_lock_waiters(rbd_dev, ret);
4212 up_write(&rbd_dev->lock_rwsem);
4213 } else if (ret < 0) {
4214 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4215 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4216 RBD_RETRY_DELAY);
4217 } else {
4218 /*
4219 * lock owner acked, but resend if we don't see them
4220 * release the lock
4221 */
4222 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4223 rbd_dev);
4224 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4225 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4226 }
4227 }
4228
rbd_quiesce_lock(struct rbd_device * rbd_dev)4229 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4230 {
4231 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4232 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4233
4234 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4235 return false;
4236
4237 /*
4238 * Ensure that all in-flight IO is flushed.
4239 */
4240 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4241 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4242 if (list_empty(&rbd_dev->running_list))
4243 return true;
4244
4245 up_write(&rbd_dev->lock_rwsem);
4246 wait_for_completion(&rbd_dev->releasing_wait);
4247
4248 down_write(&rbd_dev->lock_rwsem);
4249 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4250 return false;
4251
4252 rbd_assert(list_empty(&rbd_dev->running_list));
4253 return true;
4254 }
4255
rbd_pre_release_action(struct rbd_device * rbd_dev)4256 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4257 {
4258 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4259 rbd_object_map_close(rbd_dev);
4260 }
4261
__rbd_release_lock(struct rbd_device * rbd_dev)4262 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4263 {
4264 rbd_assert(list_empty(&rbd_dev->running_list));
4265
4266 rbd_pre_release_action(rbd_dev);
4267 rbd_unlock(rbd_dev);
4268 }
4269
4270 /*
4271 * lock_rwsem must be held for write
4272 */
rbd_release_lock(struct rbd_device * rbd_dev)4273 static void rbd_release_lock(struct rbd_device *rbd_dev)
4274 {
4275 if (!rbd_quiesce_lock(rbd_dev))
4276 return;
4277
4278 __rbd_release_lock(rbd_dev);
4279
4280 /*
4281 * Give others a chance to grab the lock - we would re-acquire
4282 * almost immediately if we got new IO while draining the running
4283 * list otherwise. We need to ack our own notifications, so this
4284 * lock_dwork will be requeued from rbd_handle_released_lock() by
4285 * way of maybe_kick_acquire().
4286 */
4287 cancel_delayed_work(&rbd_dev->lock_dwork);
4288 }
4289
rbd_release_lock_work(struct work_struct * work)4290 static void rbd_release_lock_work(struct work_struct *work)
4291 {
4292 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4293 unlock_work);
4294
4295 down_write(&rbd_dev->lock_rwsem);
4296 rbd_release_lock(rbd_dev);
4297 up_write(&rbd_dev->lock_rwsem);
4298 }
4299
maybe_kick_acquire(struct rbd_device * rbd_dev)4300 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4301 {
4302 bool have_requests;
4303
4304 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4305 if (__rbd_is_lock_owner(rbd_dev))
4306 return;
4307
4308 spin_lock(&rbd_dev->lock_lists_lock);
4309 have_requests = !list_empty(&rbd_dev->acquiring_list);
4310 spin_unlock(&rbd_dev->lock_lists_lock);
4311 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4312 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4313 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4314 }
4315 }
4316
rbd_handle_acquired_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)4317 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4318 void **p)
4319 {
4320 struct rbd_client_id cid = { 0 };
4321
4322 if (struct_v >= 2) {
4323 cid.gid = ceph_decode_64(p);
4324 cid.handle = ceph_decode_64(p);
4325 }
4326
4327 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4328 cid.handle);
4329 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4330 down_write(&rbd_dev->lock_rwsem);
4331 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4332 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4333 __func__, rbd_dev, cid.gid, cid.handle);
4334 } else {
4335 rbd_set_owner_cid(rbd_dev, &cid);
4336 }
4337 downgrade_write(&rbd_dev->lock_rwsem);
4338 } else {
4339 down_read(&rbd_dev->lock_rwsem);
4340 }
4341
4342 maybe_kick_acquire(rbd_dev);
4343 up_read(&rbd_dev->lock_rwsem);
4344 }
4345
rbd_handle_released_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)4346 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4347 void **p)
4348 {
4349 struct rbd_client_id cid = { 0 };
4350
4351 if (struct_v >= 2) {
4352 cid.gid = ceph_decode_64(p);
4353 cid.handle = ceph_decode_64(p);
4354 }
4355
4356 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4357 cid.handle);
4358 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4359 down_write(&rbd_dev->lock_rwsem);
4360 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4361 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4362 __func__, rbd_dev, cid.gid, cid.handle,
4363 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4364 } else {
4365 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4366 }
4367 downgrade_write(&rbd_dev->lock_rwsem);
4368 } else {
4369 down_read(&rbd_dev->lock_rwsem);
4370 }
4371
4372 maybe_kick_acquire(rbd_dev);
4373 up_read(&rbd_dev->lock_rwsem);
4374 }
4375
4376 /*
4377 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4378 * ResponseMessage is needed.
4379 */
rbd_handle_request_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)4380 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4381 void **p)
4382 {
4383 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4384 struct rbd_client_id cid = { 0 };
4385 int result = 1;
4386
4387 if (struct_v >= 2) {
4388 cid.gid = ceph_decode_64(p);
4389 cid.handle = ceph_decode_64(p);
4390 }
4391
4392 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4393 cid.handle);
4394 if (rbd_cid_equal(&cid, &my_cid))
4395 return result;
4396
4397 down_read(&rbd_dev->lock_rwsem);
4398 if (__rbd_is_lock_owner(rbd_dev)) {
4399 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4400 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4401 goto out_unlock;
4402
4403 /*
4404 * encode ResponseMessage(0) so the peer can detect
4405 * a missing owner
4406 */
4407 result = 0;
4408
4409 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4410 if (!rbd_dev->opts->exclusive) {
4411 dout("%s rbd_dev %p queueing unlock_work\n",
4412 __func__, rbd_dev);
4413 queue_work(rbd_dev->task_wq,
4414 &rbd_dev->unlock_work);
4415 } else {
4416 /* refuse to release the lock */
4417 result = -EROFS;
4418 }
4419 }
4420 }
4421
4422 out_unlock:
4423 up_read(&rbd_dev->lock_rwsem);
4424 return result;
4425 }
4426
__rbd_acknowledge_notify(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie,s32 * result)4427 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4428 u64 notify_id, u64 cookie, s32 *result)
4429 {
4430 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4431 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4432 int buf_size = sizeof(buf);
4433 int ret;
4434
4435 if (result) {
4436 void *p = buf;
4437
4438 /* encode ResponseMessage */
4439 ceph_start_encoding(&p, 1, 1,
4440 buf_size - CEPH_ENCODING_START_BLK_LEN);
4441 ceph_encode_32(&p, *result);
4442 } else {
4443 buf_size = 0;
4444 }
4445
4446 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4447 &rbd_dev->header_oloc, notify_id, cookie,
4448 buf, buf_size);
4449 if (ret)
4450 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4451 }
4452
rbd_acknowledge_notify(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie)4453 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4454 u64 cookie)
4455 {
4456 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4457 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4458 }
4459
rbd_acknowledge_notify_result(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie,s32 result)4460 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4461 u64 notify_id, u64 cookie, s32 result)
4462 {
4463 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4464 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4465 }
4466
rbd_watch_cb(void * arg,u64 notify_id,u64 cookie,u64 notifier_id,void * data,size_t data_len)4467 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4468 u64 notifier_id, void *data, size_t data_len)
4469 {
4470 struct rbd_device *rbd_dev = arg;
4471 void *p = data;
4472 void *const end = p + data_len;
4473 u8 struct_v = 0;
4474 u32 len;
4475 u32 notify_op;
4476 int ret;
4477
4478 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4479 __func__, rbd_dev, cookie, notify_id, data_len);
4480 if (data_len) {
4481 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4482 &struct_v, &len);
4483 if (ret) {
4484 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4485 ret);
4486 return;
4487 }
4488
4489 notify_op = ceph_decode_32(&p);
4490 } else {
4491 /* legacy notification for header updates */
4492 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4493 len = 0;
4494 }
4495
4496 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4497 switch (notify_op) {
4498 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4499 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4500 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4501 break;
4502 case RBD_NOTIFY_OP_RELEASED_LOCK:
4503 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4504 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4505 break;
4506 case RBD_NOTIFY_OP_REQUEST_LOCK:
4507 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4508 if (ret <= 0)
4509 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4510 cookie, ret);
4511 else
4512 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4513 break;
4514 case RBD_NOTIFY_OP_HEADER_UPDATE:
4515 ret = rbd_dev_refresh(rbd_dev);
4516 if (ret)
4517 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4518
4519 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4520 break;
4521 default:
4522 if (rbd_is_lock_owner(rbd_dev))
4523 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4524 cookie, -EOPNOTSUPP);
4525 else
4526 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4527 break;
4528 }
4529 }
4530
4531 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4532
rbd_watch_errcb(void * arg,u64 cookie,int err)4533 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4534 {
4535 struct rbd_device *rbd_dev = arg;
4536
4537 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4538
4539 down_write(&rbd_dev->lock_rwsem);
4540 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4541 up_write(&rbd_dev->lock_rwsem);
4542
4543 mutex_lock(&rbd_dev->watch_mutex);
4544 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4545 __rbd_unregister_watch(rbd_dev);
4546 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4547
4548 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4549 }
4550 mutex_unlock(&rbd_dev->watch_mutex);
4551 }
4552
4553 /*
4554 * watch_mutex must be locked
4555 */
__rbd_register_watch(struct rbd_device * rbd_dev)4556 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4557 {
4558 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4559 struct ceph_osd_linger_request *handle;
4560
4561 rbd_assert(!rbd_dev->watch_handle);
4562 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4563
4564 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4565 &rbd_dev->header_oloc, rbd_watch_cb,
4566 rbd_watch_errcb, rbd_dev);
4567 if (IS_ERR(handle))
4568 return PTR_ERR(handle);
4569
4570 rbd_dev->watch_handle = handle;
4571 return 0;
4572 }
4573
4574 /*
4575 * watch_mutex must be locked
4576 */
__rbd_unregister_watch(struct rbd_device * rbd_dev)4577 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4578 {
4579 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4580 int ret;
4581
4582 rbd_assert(rbd_dev->watch_handle);
4583 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4584
4585 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4586 if (ret)
4587 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4588
4589 rbd_dev->watch_handle = NULL;
4590 }
4591
rbd_register_watch(struct rbd_device * rbd_dev)4592 static int rbd_register_watch(struct rbd_device *rbd_dev)
4593 {
4594 int ret;
4595
4596 mutex_lock(&rbd_dev->watch_mutex);
4597 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4598 ret = __rbd_register_watch(rbd_dev);
4599 if (ret)
4600 goto out;
4601
4602 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4603 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4604
4605 out:
4606 mutex_unlock(&rbd_dev->watch_mutex);
4607 return ret;
4608 }
4609
cancel_tasks_sync(struct rbd_device * rbd_dev)4610 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4611 {
4612 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4613
4614 cancel_work_sync(&rbd_dev->acquired_lock_work);
4615 cancel_work_sync(&rbd_dev->released_lock_work);
4616 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4617 cancel_work_sync(&rbd_dev->unlock_work);
4618 }
4619
4620 /*
4621 * header_rwsem must not be held to avoid a deadlock with
4622 * rbd_dev_refresh() when flushing notifies.
4623 */
rbd_unregister_watch(struct rbd_device * rbd_dev)4624 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4625 {
4626 cancel_tasks_sync(rbd_dev);
4627
4628 mutex_lock(&rbd_dev->watch_mutex);
4629 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4630 __rbd_unregister_watch(rbd_dev);
4631 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4632 mutex_unlock(&rbd_dev->watch_mutex);
4633
4634 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4635 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4636 }
4637
4638 /*
4639 * lock_rwsem must be held for write
4640 */
rbd_reacquire_lock(struct rbd_device * rbd_dev)4641 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4642 {
4643 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4644 char cookie[32];
4645 int ret;
4646
4647 if (!rbd_quiesce_lock(rbd_dev))
4648 return;
4649
4650 format_lock_cookie(rbd_dev, cookie);
4651 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4652 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4653 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4654 RBD_LOCK_TAG, cookie);
4655 if (ret) {
4656 if (ret != -EOPNOTSUPP)
4657 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4658 ret);
4659
4660 /*
4661 * Lock cookie cannot be updated on older OSDs, so do
4662 * a manual release and queue an acquire.
4663 */
4664 __rbd_release_lock(rbd_dev);
4665 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4666 } else {
4667 __rbd_lock(rbd_dev, cookie);
4668 wake_lock_waiters(rbd_dev, 0);
4669 }
4670 }
4671
rbd_reregister_watch(struct work_struct * work)4672 static void rbd_reregister_watch(struct work_struct *work)
4673 {
4674 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4675 struct rbd_device, watch_dwork);
4676 int ret;
4677
4678 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4679
4680 mutex_lock(&rbd_dev->watch_mutex);
4681 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4682 mutex_unlock(&rbd_dev->watch_mutex);
4683 return;
4684 }
4685
4686 ret = __rbd_register_watch(rbd_dev);
4687 if (ret) {
4688 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4689 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4690 queue_delayed_work(rbd_dev->task_wq,
4691 &rbd_dev->watch_dwork,
4692 RBD_RETRY_DELAY);
4693 mutex_unlock(&rbd_dev->watch_mutex);
4694 return;
4695 }
4696
4697 mutex_unlock(&rbd_dev->watch_mutex);
4698 down_write(&rbd_dev->lock_rwsem);
4699 wake_lock_waiters(rbd_dev, ret);
4700 up_write(&rbd_dev->lock_rwsem);
4701 return;
4702 }
4703
4704 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4705 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4706 mutex_unlock(&rbd_dev->watch_mutex);
4707
4708 down_write(&rbd_dev->lock_rwsem);
4709 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4710 rbd_reacquire_lock(rbd_dev);
4711 up_write(&rbd_dev->lock_rwsem);
4712
4713 ret = rbd_dev_refresh(rbd_dev);
4714 if (ret)
4715 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4716 }
4717
4718 /*
4719 * Synchronous osd object method call. Returns the number of bytes
4720 * returned in the outbound buffer, or a negative error code.
4721 */
rbd_obj_method_sync(struct rbd_device * rbd_dev,struct ceph_object_id * oid,struct ceph_object_locator * oloc,const char * method_name,const void * outbound,size_t outbound_size,void * inbound,size_t inbound_size)4722 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4723 struct ceph_object_id *oid,
4724 struct ceph_object_locator *oloc,
4725 const char *method_name,
4726 const void *outbound,
4727 size_t outbound_size,
4728 void *inbound,
4729 size_t inbound_size)
4730 {
4731 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4732 struct page *req_page = NULL;
4733 struct page *reply_page;
4734 int ret;
4735
4736 /*
4737 * Method calls are ultimately read operations. The result
4738 * should placed into the inbound buffer provided. They
4739 * also supply outbound data--parameters for the object
4740 * method. Currently if this is present it will be a
4741 * snapshot id.
4742 */
4743 if (outbound) {
4744 if (outbound_size > PAGE_SIZE)
4745 return -E2BIG;
4746
4747 req_page = alloc_page(GFP_KERNEL);
4748 if (!req_page)
4749 return -ENOMEM;
4750
4751 memcpy(page_address(req_page), outbound, outbound_size);
4752 }
4753
4754 reply_page = alloc_page(GFP_KERNEL);
4755 if (!reply_page) {
4756 if (req_page)
4757 __free_page(req_page);
4758 return -ENOMEM;
4759 }
4760
4761 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4762 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4763 &reply_page, &inbound_size);
4764 if (!ret) {
4765 memcpy(inbound, page_address(reply_page), inbound_size);
4766 ret = inbound_size;
4767 }
4768
4769 if (req_page)
4770 __free_page(req_page);
4771 __free_page(reply_page);
4772 return ret;
4773 }
4774
rbd_queue_workfn(struct work_struct * work)4775 static void rbd_queue_workfn(struct work_struct *work)
4776 {
4777 struct rbd_img_request *img_request =
4778 container_of(work, struct rbd_img_request, work);
4779 struct rbd_device *rbd_dev = img_request->rbd_dev;
4780 enum obj_operation_type op_type = img_request->op_type;
4781 struct request *rq = blk_mq_rq_from_pdu(img_request);
4782 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4783 u64 length = blk_rq_bytes(rq);
4784 u64 mapping_size;
4785 int result;
4786
4787 /* Ignore/skip any zero-length requests */
4788 if (!length) {
4789 dout("%s: zero-length request\n", __func__);
4790 result = 0;
4791 goto err_img_request;
4792 }
4793
4794 blk_mq_start_request(rq);
4795
4796 down_read(&rbd_dev->header_rwsem);
4797 mapping_size = rbd_dev->mapping.size;
4798 rbd_img_capture_header(img_request);
4799 up_read(&rbd_dev->header_rwsem);
4800
4801 if (offset + length > mapping_size) {
4802 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4803 length, mapping_size);
4804 result = -EIO;
4805 goto err_img_request;
4806 }
4807
4808 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4809 img_request, obj_op_name(op_type), offset, length);
4810
4811 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4812 result = rbd_img_fill_nodata(img_request, offset, length);
4813 else
4814 result = rbd_img_fill_from_bio(img_request, offset, length,
4815 rq->bio);
4816 if (result)
4817 goto err_img_request;
4818
4819 rbd_img_handle_request(img_request, 0);
4820 return;
4821
4822 err_img_request:
4823 rbd_img_request_destroy(img_request);
4824 if (result)
4825 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4826 obj_op_name(op_type), length, offset, result);
4827 blk_mq_end_request(rq, errno_to_blk_status(result));
4828 }
4829
rbd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)4830 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4831 const struct blk_mq_queue_data *bd)
4832 {
4833 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4834 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4835 enum obj_operation_type op_type;
4836
4837 switch (req_op(bd->rq)) {
4838 case REQ_OP_DISCARD:
4839 op_type = OBJ_OP_DISCARD;
4840 break;
4841 case REQ_OP_WRITE_ZEROES:
4842 op_type = OBJ_OP_ZEROOUT;
4843 break;
4844 case REQ_OP_WRITE:
4845 op_type = OBJ_OP_WRITE;
4846 break;
4847 case REQ_OP_READ:
4848 op_type = OBJ_OP_READ;
4849 break;
4850 default:
4851 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4852 return BLK_STS_IOERR;
4853 }
4854
4855 rbd_img_request_init(img_req, rbd_dev, op_type);
4856
4857 if (rbd_img_is_write(img_req)) {
4858 if (rbd_is_ro(rbd_dev)) {
4859 rbd_warn(rbd_dev, "%s on read-only mapping",
4860 obj_op_name(img_req->op_type));
4861 return BLK_STS_IOERR;
4862 }
4863 rbd_assert(!rbd_is_snap(rbd_dev));
4864 }
4865
4866 INIT_WORK(&img_req->work, rbd_queue_workfn);
4867 queue_work(rbd_wq, &img_req->work);
4868 return BLK_STS_OK;
4869 }
4870
rbd_free_disk(struct rbd_device * rbd_dev)4871 static void rbd_free_disk(struct rbd_device *rbd_dev)
4872 {
4873 blk_cleanup_queue(rbd_dev->disk->queue);
4874 blk_mq_free_tag_set(&rbd_dev->tag_set);
4875 put_disk(rbd_dev->disk);
4876 rbd_dev->disk = NULL;
4877 }
4878
rbd_obj_read_sync(struct rbd_device * rbd_dev,struct ceph_object_id * oid,struct ceph_object_locator * oloc,void * buf,int buf_len)4879 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4880 struct ceph_object_id *oid,
4881 struct ceph_object_locator *oloc,
4882 void *buf, int buf_len)
4883
4884 {
4885 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4886 struct ceph_osd_request *req;
4887 struct page **pages;
4888 int num_pages = calc_pages_for(0, buf_len);
4889 int ret;
4890
4891 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4892 if (!req)
4893 return -ENOMEM;
4894
4895 ceph_oid_copy(&req->r_base_oid, oid);
4896 ceph_oloc_copy(&req->r_base_oloc, oloc);
4897 req->r_flags = CEPH_OSD_FLAG_READ;
4898
4899 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4900 if (IS_ERR(pages)) {
4901 ret = PTR_ERR(pages);
4902 goto out_req;
4903 }
4904
4905 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4906 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4907 true);
4908
4909 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4910 if (ret)
4911 goto out_req;
4912
4913 ceph_osdc_start_request(osdc, req, false);
4914 ret = ceph_osdc_wait_request(osdc, req);
4915 if (ret >= 0)
4916 ceph_copy_from_page_vector(pages, buf, 0, ret);
4917
4918 out_req:
4919 ceph_osdc_put_request(req);
4920 return ret;
4921 }
4922
4923 /*
4924 * Read the complete header for the given rbd device. On successful
4925 * return, the rbd_dev->header field will contain up-to-date
4926 * information about the image.
4927 */
rbd_dev_v1_header_info(struct rbd_device * rbd_dev,struct rbd_image_header * header,bool first_time)4928 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
4929 struct rbd_image_header *header,
4930 bool first_time)
4931 {
4932 struct rbd_image_header_ondisk *ondisk = NULL;
4933 u32 snap_count = 0;
4934 u64 names_size = 0;
4935 u32 want_count;
4936 int ret;
4937
4938 /*
4939 * The complete header will include an array of its 64-bit
4940 * snapshot ids, followed by the names of those snapshots as
4941 * a contiguous block of NUL-terminated strings. Note that
4942 * the number of snapshots could change by the time we read
4943 * it in, in which case we re-read it.
4944 */
4945 do {
4946 size_t size;
4947
4948 kfree(ondisk);
4949
4950 size = sizeof (*ondisk);
4951 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4952 size += names_size;
4953 ondisk = kmalloc(size, GFP_KERNEL);
4954 if (!ondisk)
4955 return -ENOMEM;
4956
4957 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4958 &rbd_dev->header_oloc, ondisk, size);
4959 if (ret < 0)
4960 goto out;
4961 if ((size_t)ret < size) {
4962 ret = -ENXIO;
4963 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4964 size, ret);
4965 goto out;
4966 }
4967 if (!rbd_dev_ondisk_valid(ondisk)) {
4968 ret = -ENXIO;
4969 rbd_warn(rbd_dev, "invalid header");
4970 goto out;
4971 }
4972
4973 names_size = le64_to_cpu(ondisk->snap_names_len);
4974 want_count = snap_count;
4975 snap_count = le32_to_cpu(ondisk->snap_count);
4976 } while (snap_count != want_count);
4977
4978 ret = rbd_header_from_disk(header, ondisk, first_time);
4979 out:
4980 kfree(ondisk);
4981
4982 return ret;
4983 }
4984
rbd_dev_update_size(struct rbd_device * rbd_dev)4985 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4986 {
4987 sector_t size;
4988
4989 /*
4990 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4991 * try to update its size. If REMOVING is set, updating size
4992 * is just useless work since the device can't be opened.
4993 */
4994 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4995 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4996 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4997 dout("setting size to %llu sectors", (unsigned long long)size);
4998 set_capacity(rbd_dev->disk, size);
4999 revalidate_disk_size(rbd_dev->disk, true);
5000 }
5001 }
5002
5003 static const struct blk_mq_ops rbd_mq_ops = {
5004 .queue_rq = rbd_queue_rq,
5005 };
5006
rbd_init_disk(struct rbd_device * rbd_dev)5007 static int rbd_init_disk(struct rbd_device *rbd_dev)
5008 {
5009 struct gendisk *disk;
5010 struct request_queue *q;
5011 unsigned int objset_bytes =
5012 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5013 int err;
5014
5015 /* create gendisk info */
5016 disk = alloc_disk(single_major ?
5017 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5018 RBD_MINORS_PER_MAJOR);
5019 if (!disk)
5020 return -ENOMEM;
5021
5022 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5023 rbd_dev->dev_id);
5024 disk->major = rbd_dev->major;
5025 disk->first_minor = rbd_dev->minor;
5026 if (single_major)
5027 disk->flags |= GENHD_FL_EXT_DEVT;
5028 disk->fops = &rbd_bd_ops;
5029 disk->private_data = rbd_dev;
5030
5031 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5032 rbd_dev->tag_set.ops = &rbd_mq_ops;
5033 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5034 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5035 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5036 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
5037 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
5038
5039 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5040 if (err)
5041 goto out_disk;
5042
5043 q = blk_mq_init_queue(&rbd_dev->tag_set);
5044 if (IS_ERR(q)) {
5045 err = PTR_ERR(q);
5046 goto out_tag_set;
5047 }
5048
5049 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5050 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5051
5052 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5053 q->limits.max_sectors = queue_max_hw_sectors(q);
5054 blk_queue_max_segments(q, USHRT_MAX);
5055 blk_queue_max_segment_size(q, UINT_MAX);
5056 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5057 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5058
5059 if (rbd_dev->opts->trim) {
5060 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5061 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5062 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5063 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5064 }
5065
5066 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5067 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
5068
5069 /*
5070 * disk_release() expects a queue ref from add_disk() and will
5071 * put it. Hold an extra ref until add_disk() is called.
5072 */
5073 WARN_ON(!blk_get_queue(q));
5074 disk->queue = q;
5075 q->queuedata = rbd_dev;
5076
5077 rbd_dev->disk = disk;
5078
5079 return 0;
5080 out_tag_set:
5081 blk_mq_free_tag_set(&rbd_dev->tag_set);
5082 out_disk:
5083 put_disk(disk);
5084 return err;
5085 }
5086
5087 /*
5088 sysfs
5089 */
5090
dev_to_rbd_dev(struct device * dev)5091 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5092 {
5093 return container_of(dev, struct rbd_device, dev);
5094 }
5095
rbd_size_show(struct device * dev,struct device_attribute * attr,char * buf)5096 static ssize_t rbd_size_show(struct device *dev,
5097 struct device_attribute *attr, char *buf)
5098 {
5099 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5100
5101 return sprintf(buf, "%llu\n",
5102 (unsigned long long)rbd_dev->mapping.size);
5103 }
5104
rbd_features_show(struct device * dev,struct device_attribute * attr,char * buf)5105 static ssize_t rbd_features_show(struct device *dev,
5106 struct device_attribute *attr, char *buf)
5107 {
5108 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5109
5110 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5111 }
5112
rbd_major_show(struct device * dev,struct device_attribute * attr,char * buf)5113 static ssize_t rbd_major_show(struct device *dev,
5114 struct device_attribute *attr, char *buf)
5115 {
5116 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5117
5118 if (rbd_dev->major)
5119 return sprintf(buf, "%d\n", rbd_dev->major);
5120
5121 return sprintf(buf, "(none)\n");
5122 }
5123
rbd_minor_show(struct device * dev,struct device_attribute * attr,char * buf)5124 static ssize_t rbd_minor_show(struct device *dev,
5125 struct device_attribute *attr, char *buf)
5126 {
5127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5128
5129 return sprintf(buf, "%d\n", rbd_dev->minor);
5130 }
5131
rbd_client_addr_show(struct device * dev,struct device_attribute * attr,char * buf)5132 static ssize_t rbd_client_addr_show(struct device *dev,
5133 struct device_attribute *attr, char *buf)
5134 {
5135 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5136 struct ceph_entity_addr *client_addr =
5137 ceph_client_addr(rbd_dev->rbd_client->client);
5138
5139 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5140 le32_to_cpu(client_addr->nonce));
5141 }
5142
rbd_client_id_show(struct device * dev,struct device_attribute * attr,char * buf)5143 static ssize_t rbd_client_id_show(struct device *dev,
5144 struct device_attribute *attr, char *buf)
5145 {
5146 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5147
5148 return sprintf(buf, "client%lld\n",
5149 ceph_client_gid(rbd_dev->rbd_client->client));
5150 }
5151
rbd_cluster_fsid_show(struct device * dev,struct device_attribute * attr,char * buf)5152 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5153 struct device_attribute *attr, char *buf)
5154 {
5155 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5156
5157 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5158 }
5159
rbd_config_info_show(struct device * dev,struct device_attribute * attr,char * buf)5160 static ssize_t rbd_config_info_show(struct device *dev,
5161 struct device_attribute *attr, char *buf)
5162 {
5163 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5164
5165 if (!capable(CAP_SYS_ADMIN))
5166 return -EPERM;
5167
5168 return sprintf(buf, "%s\n", rbd_dev->config_info);
5169 }
5170
rbd_pool_show(struct device * dev,struct device_attribute * attr,char * buf)5171 static ssize_t rbd_pool_show(struct device *dev,
5172 struct device_attribute *attr, char *buf)
5173 {
5174 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5175
5176 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5177 }
5178
rbd_pool_id_show(struct device * dev,struct device_attribute * attr,char * buf)5179 static ssize_t rbd_pool_id_show(struct device *dev,
5180 struct device_attribute *attr, char *buf)
5181 {
5182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5183
5184 return sprintf(buf, "%llu\n",
5185 (unsigned long long) rbd_dev->spec->pool_id);
5186 }
5187
rbd_pool_ns_show(struct device * dev,struct device_attribute * attr,char * buf)5188 static ssize_t rbd_pool_ns_show(struct device *dev,
5189 struct device_attribute *attr, char *buf)
5190 {
5191 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5192
5193 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5194 }
5195
rbd_name_show(struct device * dev,struct device_attribute * attr,char * buf)5196 static ssize_t rbd_name_show(struct device *dev,
5197 struct device_attribute *attr, char *buf)
5198 {
5199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5200
5201 if (rbd_dev->spec->image_name)
5202 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5203
5204 return sprintf(buf, "(unknown)\n");
5205 }
5206
rbd_image_id_show(struct device * dev,struct device_attribute * attr,char * buf)5207 static ssize_t rbd_image_id_show(struct device *dev,
5208 struct device_attribute *attr, char *buf)
5209 {
5210 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5211
5212 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5213 }
5214
5215 /*
5216 * Shows the name of the currently-mapped snapshot (or
5217 * RBD_SNAP_HEAD_NAME for the base image).
5218 */
rbd_snap_show(struct device * dev,struct device_attribute * attr,char * buf)5219 static ssize_t rbd_snap_show(struct device *dev,
5220 struct device_attribute *attr,
5221 char *buf)
5222 {
5223 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5224
5225 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5226 }
5227
rbd_snap_id_show(struct device * dev,struct device_attribute * attr,char * buf)5228 static ssize_t rbd_snap_id_show(struct device *dev,
5229 struct device_attribute *attr, char *buf)
5230 {
5231 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5232
5233 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5234 }
5235
5236 /*
5237 * For a v2 image, shows the chain of parent images, separated by empty
5238 * lines. For v1 images or if there is no parent, shows "(no parent
5239 * image)".
5240 */
rbd_parent_show(struct device * dev,struct device_attribute * attr,char * buf)5241 static ssize_t rbd_parent_show(struct device *dev,
5242 struct device_attribute *attr,
5243 char *buf)
5244 {
5245 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5246 ssize_t count = 0;
5247
5248 if (!rbd_dev->parent)
5249 return sprintf(buf, "(no parent image)\n");
5250
5251 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5252 struct rbd_spec *spec = rbd_dev->parent_spec;
5253
5254 count += sprintf(&buf[count], "%s"
5255 "pool_id %llu\npool_name %s\n"
5256 "pool_ns %s\n"
5257 "image_id %s\nimage_name %s\n"
5258 "snap_id %llu\nsnap_name %s\n"
5259 "overlap %llu\n",
5260 !count ? "" : "\n", /* first? */
5261 spec->pool_id, spec->pool_name,
5262 spec->pool_ns ?: "",
5263 spec->image_id, spec->image_name ?: "(unknown)",
5264 spec->snap_id, spec->snap_name,
5265 rbd_dev->parent_overlap);
5266 }
5267
5268 return count;
5269 }
5270
rbd_image_refresh(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)5271 static ssize_t rbd_image_refresh(struct device *dev,
5272 struct device_attribute *attr,
5273 const char *buf,
5274 size_t size)
5275 {
5276 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5277 int ret;
5278
5279 if (!capable(CAP_SYS_ADMIN))
5280 return -EPERM;
5281
5282 ret = rbd_dev_refresh(rbd_dev);
5283 if (ret)
5284 return ret;
5285
5286 return size;
5287 }
5288
5289 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5290 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5291 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5292 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5293 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5294 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5295 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5296 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5297 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5298 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5299 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5300 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5301 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5302 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5303 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5304 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5305 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5306
5307 static struct attribute *rbd_attrs[] = {
5308 &dev_attr_size.attr,
5309 &dev_attr_features.attr,
5310 &dev_attr_major.attr,
5311 &dev_attr_minor.attr,
5312 &dev_attr_client_addr.attr,
5313 &dev_attr_client_id.attr,
5314 &dev_attr_cluster_fsid.attr,
5315 &dev_attr_config_info.attr,
5316 &dev_attr_pool.attr,
5317 &dev_attr_pool_id.attr,
5318 &dev_attr_pool_ns.attr,
5319 &dev_attr_name.attr,
5320 &dev_attr_image_id.attr,
5321 &dev_attr_current_snap.attr,
5322 &dev_attr_snap_id.attr,
5323 &dev_attr_parent.attr,
5324 &dev_attr_refresh.attr,
5325 NULL
5326 };
5327
5328 static struct attribute_group rbd_attr_group = {
5329 .attrs = rbd_attrs,
5330 };
5331
5332 static const struct attribute_group *rbd_attr_groups[] = {
5333 &rbd_attr_group,
5334 NULL
5335 };
5336
5337 static void rbd_dev_release(struct device *dev);
5338
5339 static const struct device_type rbd_device_type = {
5340 .name = "rbd",
5341 .groups = rbd_attr_groups,
5342 .release = rbd_dev_release,
5343 };
5344
rbd_spec_get(struct rbd_spec * spec)5345 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5346 {
5347 kref_get(&spec->kref);
5348
5349 return spec;
5350 }
5351
5352 static void rbd_spec_free(struct kref *kref);
rbd_spec_put(struct rbd_spec * spec)5353 static void rbd_spec_put(struct rbd_spec *spec)
5354 {
5355 if (spec)
5356 kref_put(&spec->kref, rbd_spec_free);
5357 }
5358
rbd_spec_alloc(void)5359 static struct rbd_spec *rbd_spec_alloc(void)
5360 {
5361 struct rbd_spec *spec;
5362
5363 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5364 if (!spec)
5365 return NULL;
5366
5367 spec->pool_id = CEPH_NOPOOL;
5368 spec->snap_id = CEPH_NOSNAP;
5369 kref_init(&spec->kref);
5370
5371 return spec;
5372 }
5373
rbd_spec_free(struct kref * kref)5374 static void rbd_spec_free(struct kref *kref)
5375 {
5376 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5377
5378 kfree(spec->pool_name);
5379 kfree(spec->pool_ns);
5380 kfree(spec->image_id);
5381 kfree(spec->image_name);
5382 kfree(spec->snap_name);
5383 kfree(spec);
5384 }
5385
rbd_dev_free(struct rbd_device * rbd_dev)5386 static void rbd_dev_free(struct rbd_device *rbd_dev)
5387 {
5388 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5389 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5390
5391 ceph_oid_destroy(&rbd_dev->header_oid);
5392 ceph_oloc_destroy(&rbd_dev->header_oloc);
5393 kfree(rbd_dev->config_info);
5394
5395 rbd_put_client(rbd_dev->rbd_client);
5396 rbd_spec_put(rbd_dev->spec);
5397 kfree(rbd_dev->opts);
5398 kfree(rbd_dev);
5399 }
5400
rbd_dev_release(struct device * dev)5401 static void rbd_dev_release(struct device *dev)
5402 {
5403 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5404 bool need_put = !!rbd_dev->opts;
5405
5406 if (need_put) {
5407 destroy_workqueue(rbd_dev->task_wq);
5408 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5409 }
5410
5411 rbd_dev_free(rbd_dev);
5412
5413 /*
5414 * This is racy, but way better than putting module outside of
5415 * the release callback. The race window is pretty small, so
5416 * doing something similar to dm (dm-builtin.c) is overkill.
5417 */
5418 if (need_put)
5419 module_put(THIS_MODULE);
5420 }
5421
__rbd_dev_create(struct rbd_spec * spec)5422 static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
5423 {
5424 struct rbd_device *rbd_dev;
5425
5426 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5427 if (!rbd_dev)
5428 return NULL;
5429
5430 spin_lock_init(&rbd_dev->lock);
5431 INIT_LIST_HEAD(&rbd_dev->node);
5432 init_rwsem(&rbd_dev->header_rwsem);
5433
5434 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5435 ceph_oid_init(&rbd_dev->header_oid);
5436 rbd_dev->header_oloc.pool = spec->pool_id;
5437 if (spec->pool_ns) {
5438 WARN_ON(!*spec->pool_ns);
5439 rbd_dev->header_oloc.pool_ns =
5440 ceph_find_or_create_string(spec->pool_ns,
5441 strlen(spec->pool_ns));
5442 }
5443
5444 mutex_init(&rbd_dev->watch_mutex);
5445 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5446 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5447
5448 init_rwsem(&rbd_dev->lock_rwsem);
5449 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5450 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5451 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5452 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5453 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5454 spin_lock_init(&rbd_dev->lock_lists_lock);
5455 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5456 INIT_LIST_HEAD(&rbd_dev->running_list);
5457 init_completion(&rbd_dev->acquire_wait);
5458 init_completion(&rbd_dev->releasing_wait);
5459
5460 spin_lock_init(&rbd_dev->object_map_lock);
5461
5462 rbd_dev->dev.bus = &rbd_bus_type;
5463 rbd_dev->dev.type = &rbd_device_type;
5464 rbd_dev->dev.parent = &rbd_root_dev;
5465 device_initialize(&rbd_dev->dev);
5466
5467 return rbd_dev;
5468 }
5469
5470 /*
5471 * Create a mapping rbd_dev.
5472 */
rbd_dev_create(struct rbd_client * rbdc,struct rbd_spec * spec,struct rbd_options * opts)5473 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5474 struct rbd_spec *spec,
5475 struct rbd_options *opts)
5476 {
5477 struct rbd_device *rbd_dev;
5478
5479 rbd_dev = __rbd_dev_create(spec);
5480 if (!rbd_dev)
5481 return NULL;
5482
5483 /* get an id and fill in device name */
5484 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5485 minor_to_rbd_dev_id(1 << MINORBITS),
5486 GFP_KERNEL);
5487 if (rbd_dev->dev_id < 0)
5488 goto fail_rbd_dev;
5489
5490 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5491 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5492 rbd_dev->name);
5493 if (!rbd_dev->task_wq)
5494 goto fail_dev_id;
5495
5496 /* we have a ref from do_rbd_add() */
5497 __module_get(THIS_MODULE);
5498
5499 rbd_dev->rbd_client = rbdc;
5500 rbd_dev->spec = spec;
5501 rbd_dev->opts = opts;
5502
5503 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5504 return rbd_dev;
5505
5506 fail_dev_id:
5507 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5508 fail_rbd_dev:
5509 rbd_dev_free(rbd_dev);
5510 return NULL;
5511 }
5512
rbd_dev_destroy(struct rbd_device * rbd_dev)5513 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5514 {
5515 if (rbd_dev)
5516 put_device(&rbd_dev->dev);
5517 }
5518
5519 /*
5520 * Get the size and object order for an image snapshot, or if
5521 * snap_id is CEPH_NOSNAP, gets this information for the base
5522 * image.
5523 */
_rbd_dev_v2_snap_size(struct rbd_device * rbd_dev,u64 snap_id,u8 * order,u64 * snap_size)5524 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5525 u8 *order, u64 *snap_size)
5526 {
5527 __le64 snapid = cpu_to_le64(snap_id);
5528 int ret;
5529 struct {
5530 u8 order;
5531 __le64 size;
5532 } __attribute__ ((packed)) size_buf = { 0 };
5533
5534 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5535 &rbd_dev->header_oloc, "get_size",
5536 &snapid, sizeof(snapid),
5537 &size_buf, sizeof(size_buf));
5538 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5539 if (ret < 0)
5540 return ret;
5541 if (ret < sizeof (size_buf))
5542 return -ERANGE;
5543
5544 if (order) {
5545 *order = size_buf.order;
5546 dout(" order %u", (unsigned int)*order);
5547 }
5548 *snap_size = le64_to_cpu(size_buf.size);
5549
5550 dout(" snap_id 0x%016llx snap_size = %llu\n",
5551 (unsigned long long)snap_id,
5552 (unsigned long long)*snap_size);
5553
5554 return 0;
5555 }
5556
rbd_dev_v2_object_prefix(struct rbd_device * rbd_dev,char ** pobject_prefix)5557 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
5558 char **pobject_prefix)
5559 {
5560 size_t size;
5561 void *reply_buf;
5562 char *object_prefix;
5563 int ret;
5564 void *p;
5565
5566 /* Response will be an encoded string, which includes a length */
5567 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5568 reply_buf = kzalloc(size, GFP_KERNEL);
5569 if (!reply_buf)
5570 return -ENOMEM;
5571
5572 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5573 &rbd_dev->header_oloc, "get_object_prefix",
5574 NULL, 0, reply_buf, size);
5575 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5576 if (ret < 0)
5577 goto out;
5578
5579 p = reply_buf;
5580 object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
5581 GFP_NOIO);
5582 if (IS_ERR(object_prefix)) {
5583 ret = PTR_ERR(object_prefix);
5584 goto out;
5585 }
5586 ret = 0;
5587
5588 *pobject_prefix = object_prefix;
5589 dout(" object_prefix = %s\n", object_prefix);
5590 out:
5591 kfree(reply_buf);
5592
5593 return ret;
5594 }
5595
_rbd_dev_v2_snap_features(struct rbd_device * rbd_dev,u64 snap_id,bool read_only,u64 * snap_features)5596 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5597 bool read_only, u64 *snap_features)
5598 {
5599 struct {
5600 __le64 snap_id;
5601 u8 read_only;
5602 } features_in;
5603 struct {
5604 __le64 features;
5605 __le64 incompat;
5606 } __attribute__ ((packed)) features_buf = { 0 };
5607 u64 unsup;
5608 int ret;
5609
5610 features_in.snap_id = cpu_to_le64(snap_id);
5611 features_in.read_only = read_only;
5612
5613 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5614 &rbd_dev->header_oloc, "get_features",
5615 &features_in, sizeof(features_in),
5616 &features_buf, sizeof(features_buf));
5617 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5618 if (ret < 0)
5619 return ret;
5620 if (ret < sizeof (features_buf))
5621 return -ERANGE;
5622
5623 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5624 if (unsup) {
5625 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5626 unsup);
5627 return -ENXIO;
5628 }
5629
5630 *snap_features = le64_to_cpu(features_buf.features);
5631
5632 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5633 (unsigned long long)snap_id,
5634 (unsigned long long)*snap_features,
5635 (unsigned long long)le64_to_cpu(features_buf.incompat));
5636
5637 return 0;
5638 }
5639
5640 /*
5641 * These are generic image flags, but since they are used only for
5642 * object map, store them in rbd_dev->object_map_flags.
5643 *
5644 * For the same reason, this function is called only on object map
5645 * (re)load and not on header refresh.
5646 */
rbd_dev_v2_get_flags(struct rbd_device * rbd_dev)5647 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5648 {
5649 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5650 __le64 flags;
5651 int ret;
5652
5653 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5654 &rbd_dev->header_oloc, "get_flags",
5655 &snapid, sizeof(snapid),
5656 &flags, sizeof(flags));
5657 if (ret < 0)
5658 return ret;
5659 if (ret < sizeof(flags))
5660 return -EBADMSG;
5661
5662 rbd_dev->object_map_flags = le64_to_cpu(flags);
5663 return 0;
5664 }
5665
5666 struct parent_image_info {
5667 u64 pool_id;
5668 const char *pool_ns;
5669 const char *image_id;
5670 u64 snap_id;
5671
5672 bool has_overlap;
5673 u64 overlap;
5674 };
5675
rbd_parent_info_cleanup(struct parent_image_info * pii)5676 static void rbd_parent_info_cleanup(struct parent_image_info *pii)
5677 {
5678 kfree(pii->pool_ns);
5679 kfree(pii->image_id);
5680
5681 memset(pii, 0, sizeof(*pii));
5682 }
5683
5684 /*
5685 * The caller is responsible for @pii.
5686 */
decode_parent_image_spec(void ** p,void * end,struct parent_image_info * pii)5687 static int decode_parent_image_spec(void **p, void *end,
5688 struct parent_image_info *pii)
5689 {
5690 u8 struct_v;
5691 u32 struct_len;
5692 int ret;
5693
5694 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5695 &struct_v, &struct_len);
5696 if (ret)
5697 return ret;
5698
5699 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5700 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5701 if (IS_ERR(pii->pool_ns)) {
5702 ret = PTR_ERR(pii->pool_ns);
5703 pii->pool_ns = NULL;
5704 return ret;
5705 }
5706 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5707 if (IS_ERR(pii->image_id)) {
5708 ret = PTR_ERR(pii->image_id);
5709 pii->image_id = NULL;
5710 return ret;
5711 }
5712 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5713 return 0;
5714
5715 e_inval:
5716 return -EINVAL;
5717 }
5718
__get_parent_info(struct rbd_device * rbd_dev,struct page * req_page,struct page * reply_page,struct parent_image_info * pii)5719 static int __get_parent_info(struct rbd_device *rbd_dev,
5720 struct page *req_page,
5721 struct page *reply_page,
5722 struct parent_image_info *pii)
5723 {
5724 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5725 size_t reply_len = PAGE_SIZE;
5726 void *p, *end;
5727 int ret;
5728
5729 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5730 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5731 req_page, sizeof(u64), &reply_page, &reply_len);
5732 if (ret)
5733 return ret == -EOPNOTSUPP ? 1 : ret;
5734
5735 p = page_address(reply_page);
5736 end = p + reply_len;
5737 ret = decode_parent_image_spec(&p, end, pii);
5738 if (ret)
5739 return ret;
5740
5741 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5742 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5743 req_page, sizeof(u64), &reply_page, &reply_len);
5744 if (ret)
5745 return ret;
5746
5747 p = page_address(reply_page);
5748 end = p + reply_len;
5749 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5750 if (pii->has_overlap)
5751 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5752
5753 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5754 __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5755 pii->has_overlap, pii->overlap);
5756 return 0;
5757
5758 e_inval:
5759 return -EINVAL;
5760 }
5761
5762 /*
5763 * The caller is responsible for @pii.
5764 */
__get_parent_info_legacy(struct rbd_device * rbd_dev,struct page * req_page,struct page * reply_page,struct parent_image_info * pii)5765 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5766 struct page *req_page,
5767 struct page *reply_page,
5768 struct parent_image_info *pii)
5769 {
5770 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5771 size_t reply_len = PAGE_SIZE;
5772 void *p, *end;
5773 int ret;
5774
5775 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5776 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5777 req_page, sizeof(u64), &reply_page, &reply_len);
5778 if (ret)
5779 return ret;
5780
5781 p = page_address(reply_page);
5782 end = p + reply_len;
5783 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5784 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5785 if (IS_ERR(pii->image_id)) {
5786 ret = PTR_ERR(pii->image_id);
5787 pii->image_id = NULL;
5788 return ret;
5789 }
5790 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5791 pii->has_overlap = true;
5792 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5793
5794 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5795 __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5796 pii->has_overlap, pii->overlap);
5797 return 0;
5798
5799 e_inval:
5800 return -EINVAL;
5801 }
5802
rbd_dev_v2_parent_info(struct rbd_device * rbd_dev,struct parent_image_info * pii)5803 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
5804 struct parent_image_info *pii)
5805 {
5806 struct page *req_page, *reply_page;
5807 void *p;
5808 int ret;
5809
5810 req_page = alloc_page(GFP_KERNEL);
5811 if (!req_page)
5812 return -ENOMEM;
5813
5814 reply_page = alloc_page(GFP_KERNEL);
5815 if (!reply_page) {
5816 __free_page(req_page);
5817 return -ENOMEM;
5818 }
5819
5820 p = page_address(req_page);
5821 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5822 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5823 if (ret > 0)
5824 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5825 pii);
5826
5827 __free_page(req_page);
5828 __free_page(reply_page);
5829 return ret;
5830 }
5831
rbd_dev_setup_parent(struct rbd_device * rbd_dev)5832 static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
5833 {
5834 struct rbd_spec *parent_spec;
5835 struct parent_image_info pii = { 0 };
5836 int ret;
5837
5838 parent_spec = rbd_spec_alloc();
5839 if (!parent_spec)
5840 return -ENOMEM;
5841
5842 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
5843 if (ret)
5844 goto out_err;
5845
5846 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
5847 goto out; /* No parent? No problem. */
5848
5849 /* The ceph file layout needs to fit pool id in 32 bits */
5850
5851 ret = -EIO;
5852 if (pii.pool_id > (u64)U32_MAX) {
5853 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5854 (unsigned long long)pii.pool_id, U32_MAX);
5855 goto out_err;
5856 }
5857
5858 /*
5859 * The parent won't change except when the clone is flattened,
5860 * so we only need to record the parent image spec once.
5861 */
5862 parent_spec->pool_id = pii.pool_id;
5863 if (pii.pool_ns && *pii.pool_ns) {
5864 parent_spec->pool_ns = pii.pool_ns;
5865 pii.pool_ns = NULL;
5866 }
5867 parent_spec->image_id = pii.image_id;
5868 pii.image_id = NULL;
5869 parent_spec->snap_id = pii.snap_id;
5870
5871 rbd_assert(!rbd_dev->parent_spec);
5872 rbd_dev->parent_spec = parent_spec;
5873 parent_spec = NULL; /* rbd_dev now owns this */
5874
5875 /*
5876 * Record the parent overlap. If it's zero, issue a warning as
5877 * we will proceed as if there is no parent.
5878 */
5879 if (!pii.overlap)
5880 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5881 rbd_dev->parent_overlap = pii.overlap;
5882
5883 out:
5884 ret = 0;
5885 out_err:
5886 rbd_parent_info_cleanup(&pii);
5887 rbd_spec_put(parent_spec);
5888 return ret;
5889 }
5890
rbd_dev_v2_striping_info(struct rbd_device * rbd_dev,u64 * stripe_unit,u64 * stripe_count)5891 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
5892 u64 *stripe_unit, u64 *stripe_count)
5893 {
5894 struct {
5895 __le64 stripe_unit;
5896 __le64 stripe_count;
5897 } __attribute__ ((packed)) striping_info_buf = { 0 };
5898 size_t size = sizeof (striping_info_buf);
5899 int ret;
5900
5901 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5902 &rbd_dev->header_oloc, "get_stripe_unit_count",
5903 NULL, 0, &striping_info_buf, size);
5904 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5905 if (ret < 0)
5906 return ret;
5907 if (ret < size)
5908 return -ERANGE;
5909
5910 *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
5911 *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
5912 dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
5913 *stripe_count);
5914
5915 return 0;
5916 }
5917
rbd_dev_v2_data_pool(struct rbd_device * rbd_dev,s64 * data_pool_id)5918 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
5919 {
5920 __le64 data_pool_buf;
5921 int ret;
5922
5923 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5924 &rbd_dev->header_oloc, "get_data_pool",
5925 NULL, 0, &data_pool_buf,
5926 sizeof(data_pool_buf));
5927 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5928 if (ret < 0)
5929 return ret;
5930 if (ret < sizeof(data_pool_buf))
5931 return -EBADMSG;
5932
5933 *data_pool_id = le64_to_cpu(data_pool_buf);
5934 dout(" data_pool_id = %lld\n", *data_pool_id);
5935 WARN_ON(*data_pool_id == CEPH_NOPOOL);
5936
5937 return 0;
5938 }
5939
rbd_dev_image_name(struct rbd_device * rbd_dev)5940 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5941 {
5942 CEPH_DEFINE_OID_ONSTACK(oid);
5943 size_t image_id_size;
5944 char *image_id;
5945 void *p;
5946 void *end;
5947 size_t size;
5948 void *reply_buf = NULL;
5949 size_t len = 0;
5950 char *image_name = NULL;
5951 int ret;
5952
5953 rbd_assert(!rbd_dev->spec->image_name);
5954
5955 len = strlen(rbd_dev->spec->image_id);
5956 image_id_size = sizeof (__le32) + len;
5957 image_id = kmalloc(image_id_size, GFP_KERNEL);
5958 if (!image_id)
5959 return NULL;
5960
5961 p = image_id;
5962 end = image_id + image_id_size;
5963 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5964
5965 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5966 reply_buf = kmalloc(size, GFP_KERNEL);
5967 if (!reply_buf)
5968 goto out;
5969
5970 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5971 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5972 "dir_get_name", image_id, image_id_size,
5973 reply_buf, size);
5974 if (ret < 0)
5975 goto out;
5976 p = reply_buf;
5977 end = reply_buf + ret;
5978
5979 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5980 if (IS_ERR(image_name))
5981 image_name = NULL;
5982 else
5983 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5984 out:
5985 kfree(reply_buf);
5986 kfree(image_id);
5987
5988 return image_name;
5989 }
5990
rbd_v1_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)5991 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5992 {
5993 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5994 const char *snap_name;
5995 u32 which = 0;
5996
5997 /* Skip over names until we find the one we are looking for */
5998
5999 snap_name = rbd_dev->header.snap_names;
6000 while (which < snapc->num_snaps) {
6001 if (!strcmp(name, snap_name))
6002 return snapc->snaps[which];
6003 snap_name += strlen(snap_name) + 1;
6004 which++;
6005 }
6006 return CEPH_NOSNAP;
6007 }
6008
rbd_v2_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)6009 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6010 {
6011 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6012 u32 which;
6013 bool found = false;
6014 u64 snap_id;
6015
6016 for (which = 0; !found && which < snapc->num_snaps; which++) {
6017 const char *snap_name;
6018
6019 snap_id = snapc->snaps[which];
6020 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6021 if (IS_ERR(snap_name)) {
6022 /* ignore no-longer existing snapshots */
6023 if (PTR_ERR(snap_name) == -ENOENT)
6024 continue;
6025 else
6026 break;
6027 }
6028 found = !strcmp(name, snap_name);
6029 kfree(snap_name);
6030 }
6031 return found ? snap_id : CEPH_NOSNAP;
6032 }
6033
6034 /*
6035 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6036 * no snapshot by that name is found, or if an error occurs.
6037 */
rbd_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)6038 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6039 {
6040 if (rbd_dev->image_format == 1)
6041 return rbd_v1_snap_id_by_name(rbd_dev, name);
6042
6043 return rbd_v2_snap_id_by_name(rbd_dev, name);
6044 }
6045
6046 /*
6047 * An image being mapped will have everything but the snap id.
6048 */
rbd_spec_fill_snap_id(struct rbd_device * rbd_dev)6049 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6050 {
6051 struct rbd_spec *spec = rbd_dev->spec;
6052
6053 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6054 rbd_assert(spec->image_id && spec->image_name);
6055 rbd_assert(spec->snap_name);
6056
6057 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6058 u64 snap_id;
6059
6060 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6061 if (snap_id == CEPH_NOSNAP)
6062 return -ENOENT;
6063
6064 spec->snap_id = snap_id;
6065 } else {
6066 spec->snap_id = CEPH_NOSNAP;
6067 }
6068
6069 return 0;
6070 }
6071
6072 /*
6073 * A parent image will have all ids but none of the names.
6074 *
6075 * All names in an rbd spec are dynamically allocated. It's OK if we
6076 * can't figure out the name for an image id.
6077 */
rbd_spec_fill_names(struct rbd_device * rbd_dev)6078 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6079 {
6080 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6081 struct rbd_spec *spec = rbd_dev->spec;
6082 const char *pool_name;
6083 const char *image_name;
6084 const char *snap_name;
6085 int ret;
6086
6087 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6088 rbd_assert(spec->image_id);
6089 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6090
6091 /* Get the pool name; we have to make our own copy of this */
6092
6093 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6094 if (!pool_name) {
6095 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6096 return -EIO;
6097 }
6098 pool_name = kstrdup(pool_name, GFP_KERNEL);
6099 if (!pool_name)
6100 return -ENOMEM;
6101
6102 /* Fetch the image name; tolerate failure here */
6103
6104 image_name = rbd_dev_image_name(rbd_dev);
6105 if (!image_name)
6106 rbd_warn(rbd_dev, "unable to get image name");
6107
6108 /* Fetch the snapshot name */
6109
6110 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6111 if (IS_ERR(snap_name)) {
6112 ret = PTR_ERR(snap_name);
6113 goto out_err;
6114 }
6115
6116 spec->pool_name = pool_name;
6117 spec->image_name = image_name;
6118 spec->snap_name = snap_name;
6119
6120 return 0;
6121
6122 out_err:
6123 kfree(image_name);
6124 kfree(pool_name);
6125 return ret;
6126 }
6127
rbd_dev_v2_snap_context(struct rbd_device * rbd_dev,struct ceph_snap_context ** psnapc)6128 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
6129 struct ceph_snap_context **psnapc)
6130 {
6131 size_t size;
6132 int ret;
6133 void *reply_buf;
6134 void *p;
6135 void *end;
6136 u64 seq;
6137 u32 snap_count;
6138 struct ceph_snap_context *snapc;
6139 u32 i;
6140
6141 /*
6142 * We'll need room for the seq value (maximum snapshot id),
6143 * snapshot count, and array of that many snapshot ids.
6144 * For now we have a fixed upper limit on the number we're
6145 * prepared to receive.
6146 */
6147 size = sizeof (__le64) + sizeof (__le32) +
6148 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6149 reply_buf = kzalloc(size, GFP_KERNEL);
6150 if (!reply_buf)
6151 return -ENOMEM;
6152
6153 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6154 &rbd_dev->header_oloc, "get_snapcontext",
6155 NULL, 0, reply_buf, size);
6156 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6157 if (ret < 0)
6158 goto out;
6159
6160 p = reply_buf;
6161 end = reply_buf + ret;
6162 ret = -ERANGE;
6163 ceph_decode_64_safe(&p, end, seq, out);
6164 ceph_decode_32_safe(&p, end, snap_count, out);
6165
6166 /*
6167 * Make sure the reported number of snapshot ids wouldn't go
6168 * beyond the end of our buffer. But before checking that,
6169 * make sure the computed size of the snapshot context we
6170 * allocate is representable in a size_t.
6171 */
6172 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6173 / sizeof (u64)) {
6174 ret = -EINVAL;
6175 goto out;
6176 }
6177 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6178 goto out;
6179 ret = 0;
6180
6181 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6182 if (!snapc) {
6183 ret = -ENOMEM;
6184 goto out;
6185 }
6186 snapc->seq = seq;
6187 for (i = 0; i < snap_count; i++)
6188 snapc->snaps[i] = ceph_decode_64(&p);
6189
6190 *psnapc = snapc;
6191 dout(" snap context seq = %llu, snap_count = %u\n",
6192 (unsigned long long)seq, (unsigned int)snap_count);
6193 out:
6194 kfree(reply_buf);
6195
6196 return ret;
6197 }
6198
rbd_dev_v2_snap_name(struct rbd_device * rbd_dev,u64 snap_id)6199 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6200 u64 snap_id)
6201 {
6202 size_t size;
6203 void *reply_buf;
6204 __le64 snapid;
6205 int ret;
6206 void *p;
6207 void *end;
6208 char *snap_name;
6209
6210 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6211 reply_buf = kmalloc(size, GFP_KERNEL);
6212 if (!reply_buf)
6213 return ERR_PTR(-ENOMEM);
6214
6215 snapid = cpu_to_le64(snap_id);
6216 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6217 &rbd_dev->header_oloc, "get_snapshot_name",
6218 &snapid, sizeof(snapid), reply_buf, size);
6219 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6220 if (ret < 0) {
6221 snap_name = ERR_PTR(ret);
6222 goto out;
6223 }
6224
6225 p = reply_buf;
6226 end = reply_buf + ret;
6227 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6228 if (IS_ERR(snap_name))
6229 goto out;
6230
6231 dout(" snap_id 0x%016llx snap_name = %s\n",
6232 (unsigned long long)snap_id, snap_name);
6233 out:
6234 kfree(reply_buf);
6235
6236 return snap_name;
6237 }
6238
rbd_dev_v2_header_info(struct rbd_device * rbd_dev,struct rbd_image_header * header,bool first_time)6239 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
6240 struct rbd_image_header *header,
6241 bool first_time)
6242 {
6243 int ret;
6244
6245 ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
6246 first_time ? &header->obj_order : NULL,
6247 &header->image_size);
6248 if (ret)
6249 return ret;
6250
6251 if (first_time) {
6252 ret = rbd_dev_v2_header_onetime(rbd_dev, header);
6253 if (ret)
6254 return ret;
6255 }
6256
6257 ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
6258 if (ret)
6259 return ret;
6260
6261 return 0;
6262 }
6263
rbd_dev_header_info(struct rbd_device * rbd_dev,struct rbd_image_header * header,bool first_time)6264 static int rbd_dev_header_info(struct rbd_device *rbd_dev,
6265 struct rbd_image_header *header,
6266 bool first_time)
6267 {
6268 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6269 rbd_assert(!header->object_prefix && !header->snapc);
6270
6271 if (rbd_dev->image_format == 1)
6272 return rbd_dev_v1_header_info(rbd_dev, header, first_time);
6273
6274 return rbd_dev_v2_header_info(rbd_dev, header, first_time);
6275 }
6276
6277 /*
6278 * Skips over white space at *buf, and updates *buf to point to the
6279 * first found non-space character (if any). Returns the length of
6280 * the token (string of non-white space characters) found. Note
6281 * that *buf must be terminated with '\0'.
6282 */
next_token(const char ** buf)6283 static inline size_t next_token(const char **buf)
6284 {
6285 /*
6286 * These are the characters that produce nonzero for
6287 * isspace() in the "C" and "POSIX" locales.
6288 */
6289 const char *spaces = " \f\n\r\t\v";
6290
6291 *buf += strspn(*buf, spaces); /* Find start of token */
6292
6293 return strcspn(*buf, spaces); /* Return token length */
6294 }
6295
6296 /*
6297 * Finds the next token in *buf, dynamically allocates a buffer big
6298 * enough to hold a copy of it, and copies the token into the new
6299 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6300 * that a duplicate buffer is created even for a zero-length token.
6301 *
6302 * Returns a pointer to the newly-allocated duplicate, or a null
6303 * pointer if memory for the duplicate was not available. If
6304 * the lenp argument is a non-null pointer, the length of the token
6305 * (not including the '\0') is returned in *lenp.
6306 *
6307 * If successful, the *buf pointer will be updated to point beyond
6308 * the end of the found token.
6309 *
6310 * Note: uses GFP_KERNEL for allocation.
6311 */
dup_token(const char ** buf,size_t * lenp)6312 static inline char *dup_token(const char **buf, size_t *lenp)
6313 {
6314 char *dup;
6315 size_t len;
6316
6317 len = next_token(buf);
6318 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6319 if (!dup)
6320 return NULL;
6321 *(dup + len) = '\0';
6322 *buf += len;
6323
6324 if (lenp)
6325 *lenp = len;
6326
6327 return dup;
6328 }
6329
rbd_parse_param(struct fs_parameter * param,struct rbd_parse_opts_ctx * pctx)6330 static int rbd_parse_param(struct fs_parameter *param,
6331 struct rbd_parse_opts_ctx *pctx)
6332 {
6333 struct rbd_options *opt = pctx->opts;
6334 struct fs_parse_result result;
6335 struct p_log log = {.prefix = "rbd"};
6336 int token, ret;
6337
6338 ret = ceph_parse_param(param, pctx->copts, NULL);
6339 if (ret != -ENOPARAM)
6340 return ret;
6341
6342 token = __fs_parse(&log, rbd_parameters, param, &result);
6343 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6344 if (token < 0) {
6345 if (token == -ENOPARAM)
6346 return inval_plog(&log, "Unknown parameter '%s'",
6347 param->key);
6348 return token;
6349 }
6350
6351 switch (token) {
6352 case Opt_queue_depth:
6353 if (result.uint_32 < 1)
6354 goto out_of_range;
6355 opt->queue_depth = result.uint_32;
6356 break;
6357 case Opt_alloc_size:
6358 if (result.uint_32 < SECTOR_SIZE)
6359 goto out_of_range;
6360 if (!is_power_of_2(result.uint_32))
6361 return inval_plog(&log, "alloc_size must be a power of 2");
6362 opt->alloc_size = result.uint_32;
6363 break;
6364 case Opt_lock_timeout:
6365 /* 0 is "wait forever" (i.e. infinite timeout) */
6366 if (result.uint_32 > INT_MAX / 1000)
6367 goto out_of_range;
6368 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6369 break;
6370 case Opt_pool_ns:
6371 kfree(pctx->spec->pool_ns);
6372 pctx->spec->pool_ns = param->string;
6373 param->string = NULL;
6374 break;
6375 case Opt_compression_hint:
6376 switch (result.uint_32) {
6377 case Opt_compression_hint_none:
6378 opt->alloc_hint_flags &=
6379 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6380 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6381 break;
6382 case Opt_compression_hint_compressible:
6383 opt->alloc_hint_flags |=
6384 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6385 opt->alloc_hint_flags &=
6386 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6387 break;
6388 case Opt_compression_hint_incompressible:
6389 opt->alloc_hint_flags |=
6390 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6391 opt->alloc_hint_flags &=
6392 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6393 break;
6394 default:
6395 BUG();
6396 }
6397 break;
6398 case Opt_read_only:
6399 opt->read_only = true;
6400 break;
6401 case Opt_read_write:
6402 opt->read_only = false;
6403 break;
6404 case Opt_lock_on_read:
6405 opt->lock_on_read = true;
6406 break;
6407 case Opt_exclusive:
6408 opt->exclusive = true;
6409 break;
6410 case Opt_notrim:
6411 opt->trim = false;
6412 break;
6413 default:
6414 BUG();
6415 }
6416
6417 return 0;
6418
6419 out_of_range:
6420 return inval_plog(&log, "%s out of range", param->key);
6421 }
6422
6423 /*
6424 * This duplicates most of generic_parse_monolithic(), untying it from
6425 * fs_context and skipping standard superblock and security options.
6426 */
rbd_parse_options(char * options,struct rbd_parse_opts_ctx * pctx)6427 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6428 {
6429 char *key;
6430 int ret = 0;
6431
6432 dout("%s '%s'\n", __func__, options);
6433 while ((key = strsep(&options, ",")) != NULL) {
6434 if (*key) {
6435 struct fs_parameter param = {
6436 .key = key,
6437 .type = fs_value_is_flag,
6438 };
6439 char *value = strchr(key, '=');
6440 size_t v_len = 0;
6441
6442 if (value) {
6443 if (value == key)
6444 continue;
6445 *value++ = 0;
6446 v_len = strlen(value);
6447 param.string = kmemdup_nul(value, v_len,
6448 GFP_KERNEL);
6449 if (!param.string)
6450 return -ENOMEM;
6451 param.type = fs_value_is_string;
6452 }
6453 param.size = v_len;
6454
6455 ret = rbd_parse_param(¶m, pctx);
6456 kfree(param.string);
6457 if (ret)
6458 break;
6459 }
6460 }
6461
6462 return ret;
6463 }
6464
6465 /*
6466 * Parse the options provided for an "rbd add" (i.e., rbd image
6467 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6468 * and the data written is passed here via a NUL-terminated buffer.
6469 * Returns 0 if successful or an error code otherwise.
6470 *
6471 * The information extracted from these options is recorded in
6472 * the other parameters which return dynamically-allocated
6473 * structures:
6474 * ceph_opts
6475 * The address of a pointer that will refer to a ceph options
6476 * structure. Caller must release the returned pointer using
6477 * ceph_destroy_options() when it is no longer needed.
6478 * rbd_opts
6479 * Address of an rbd options pointer. Fully initialized by
6480 * this function; caller must release with kfree().
6481 * spec
6482 * Address of an rbd image specification pointer. Fully
6483 * initialized by this function based on parsed options.
6484 * Caller must release with rbd_spec_put().
6485 *
6486 * The options passed take this form:
6487 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6488 * where:
6489 * <mon_addrs>
6490 * A comma-separated list of one or more monitor addresses.
6491 * A monitor address is an ip address, optionally followed
6492 * by a port number (separated by a colon).
6493 * I.e.: ip1[:port1][,ip2[:port2]...]
6494 * <options>
6495 * A comma-separated list of ceph and/or rbd options.
6496 * <pool_name>
6497 * The name of the rados pool containing the rbd image.
6498 * <image_name>
6499 * The name of the image in that pool to map.
6500 * <snap_id>
6501 * An optional snapshot id. If provided, the mapping will
6502 * present data from the image at the time that snapshot was
6503 * created. The image head is used if no snapshot id is
6504 * provided. Snapshot mappings are always read-only.
6505 */
rbd_add_parse_args(const char * buf,struct ceph_options ** ceph_opts,struct rbd_options ** opts,struct rbd_spec ** rbd_spec)6506 static int rbd_add_parse_args(const char *buf,
6507 struct ceph_options **ceph_opts,
6508 struct rbd_options **opts,
6509 struct rbd_spec **rbd_spec)
6510 {
6511 size_t len;
6512 char *options;
6513 const char *mon_addrs;
6514 char *snap_name;
6515 size_t mon_addrs_size;
6516 struct rbd_parse_opts_ctx pctx = { 0 };
6517 int ret;
6518
6519 /* The first four tokens are required */
6520
6521 len = next_token(&buf);
6522 if (!len) {
6523 rbd_warn(NULL, "no monitor address(es) provided");
6524 return -EINVAL;
6525 }
6526 mon_addrs = buf;
6527 mon_addrs_size = len;
6528 buf += len;
6529
6530 ret = -EINVAL;
6531 options = dup_token(&buf, NULL);
6532 if (!options)
6533 return -ENOMEM;
6534 if (!*options) {
6535 rbd_warn(NULL, "no options provided");
6536 goto out_err;
6537 }
6538
6539 pctx.spec = rbd_spec_alloc();
6540 if (!pctx.spec)
6541 goto out_mem;
6542
6543 pctx.spec->pool_name = dup_token(&buf, NULL);
6544 if (!pctx.spec->pool_name)
6545 goto out_mem;
6546 if (!*pctx.spec->pool_name) {
6547 rbd_warn(NULL, "no pool name provided");
6548 goto out_err;
6549 }
6550
6551 pctx.spec->image_name = dup_token(&buf, NULL);
6552 if (!pctx.spec->image_name)
6553 goto out_mem;
6554 if (!*pctx.spec->image_name) {
6555 rbd_warn(NULL, "no image name provided");
6556 goto out_err;
6557 }
6558
6559 /*
6560 * Snapshot name is optional; default is to use "-"
6561 * (indicating the head/no snapshot).
6562 */
6563 len = next_token(&buf);
6564 if (!len) {
6565 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6566 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6567 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6568 ret = -ENAMETOOLONG;
6569 goto out_err;
6570 }
6571 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6572 if (!snap_name)
6573 goto out_mem;
6574 *(snap_name + len) = '\0';
6575 pctx.spec->snap_name = snap_name;
6576
6577 pctx.copts = ceph_alloc_options();
6578 if (!pctx.copts)
6579 goto out_mem;
6580
6581 /* Initialize all rbd options to the defaults */
6582
6583 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6584 if (!pctx.opts)
6585 goto out_mem;
6586
6587 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6588 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6589 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6590 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6591 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6592 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6593 pctx.opts->trim = RBD_TRIM_DEFAULT;
6594
6595 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
6596 if (ret)
6597 goto out_err;
6598
6599 ret = rbd_parse_options(options, &pctx);
6600 if (ret)
6601 goto out_err;
6602
6603 *ceph_opts = pctx.copts;
6604 *opts = pctx.opts;
6605 *rbd_spec = pctx.spec;
6606 kfree(options);
6607 return 0;
6608
6609 out_mem:
6610 ret = -ENOMEM;
6611 out_err:
6612 kfree(pctx.opts);
6613 ceph_destroy_options(pctx.copts);
6614 rbd_spec_put(pctx.spec);
6615 kfree(options);
6616 return ret;
6617 }
6618
rbd_dev_image_unlock(struct rbd_device * rbd_dev)6619 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6620 {
6621 down_write(&rbd_dev->lock_rwsem);
6622 if (__rbd_is_lock_owner(rbd_dev))
6623 __rbd_release_lock(rbd_dev);
6624 up_write(&rbd_dev->lock_rwsem);
6625 }
6626
6627 /*
6628 * If the wait is interrupted, an error is returned even if the lock
6629 * was successfully acquired. rbd_dev_image_unlock() will release it
6630 * if needed.
6631 */
rbd_add_acquire_lock(struct rbd_device * rbd_dev)6632 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6633 {
6634 long ret;
6635
6636 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6637 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6638 return 0;
6639
6640 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6641 return -EINVAL;
6642 }
6643
6644 if (rbd_is_ro(rbd_dev))
6645 return 0;
6646
6647 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6648 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6649 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6650 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6651 if (ret > 0) {
6652 ret = rbd_dev->acquire_err;
6653 } else {
6654 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6655 if (!ret)
6656 ret = -ETIMEDOUT;
6657
6658 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6659 }
6660 if (ret)
6661 return ret;
6662
6663 /*
6664 * The lock may have been released by now, unless automatic lock
6665 * transitions are disabled.
6666 */
6667 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6668 return 0;
6669 }
6670
6671 /*
6672 * An rbd format 2 image has a unique identifier, distinct from the
6673 * name given to it by the user. Internally, that identifier is
6674 * what's used to specify the names of objects related to the image.
6675 *
6676 * A special "rbd id" object is used to map an rbd image name to its
6677 * id. If that object doesn't exist, then there is no v2 rbd image
6678 * with the supplied name.
6679 *
6680 * This function will record the given rbd_dev's image_id field if
6681 * it can be determined, and in that case will return 0. If any
6682 * errors occur a negative errno will be returned and the rbd_dev's
6683 * image_id field will be unchanged (and should be NULL).
6684 */
rbd_dev_image_id(struct rbd_device * rbd_dev)6685 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6686 {
6687 int ret;
6688 size_t size;
6689 CEPH_DEFINE_OID_ONSTACK(oid);
6690 void *response;
6691 char *image_id;
6692
6693 /*
6694 * When probing a parent image, the image id is already
6695 * known (and the image name likely is not). There's no
6696 * need to fetch the image id again in this case. We
6697 * do still need to set the image format though.
6698 */
6699 if (rbd_dev->spec->image_id) {
6700 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6701
6702 return 0;
6703 }
6704
6705 /*
6706 * First, see if the format 2 image id file exists, and if
6707 * so, get the image's persistent id from it.
6708 */
6709 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6710 rbd_dev->spec->image_name);
6711 if (ret)
6712 return ret;
6713
6714 dout("rbd id object name is %s\n", oid.name);
6715
6716 /* Response will be an encoded string, which includes a length */
6717 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6718 response = kzalloc(size, GFP_NOIO);
6719 if (!response) {
6720 ret = -ENOMEM;
6721 goto out;
6722 }
6723
6724 /* If it doesn't exist we'll assume it's a format 1 image */
6725
6726 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6727 "get_id", NULL, 0,
6728 response, size);
6729 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6730 if (ret == -ENOENT) {
6731 image_id = kstrdup("", GFP_KERNEL);
6732 ret = image_id ? 0 : -ENOMEM;
6733 if (!ret)
6734 rbd_dev->image_format = 1;
6735 } else if (ret >= 0) {
6736 void *p = response;
6737
6738 image_id = ceph_extract_encoded_string(&p, p + ret,
6739 NULL, GFP_NOIO);
6740 ret = PTR_ERR_OR_ZERO(image_id);
6741 if (!ret)
6742 rbd_dev->image_format = 2;
6743 }
6744
6745 if (!ret) {
6746 rbd_dev->spec->image_id = image_id;
6747 dout("image_id is %s\n", image_id);
6748 }
6749 out:
6750 kfree(response);
6751 ceph_oid_destroy(&oid);
6752 return ret;
6753 }
6754
6755 /*
6756 * Undo whatever state changes are made by v1 or v2 header info
6757 * call.
6758 */
rbd_dev_unprobe(struct rbd_device * rbd_dev)6759 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6760 {
6761 rbd_dev_parent_put(rbd_dev);
6762 rbd_object_map_free(rbd_dev);
6763 rbd_dev_mapping_clear(rbd_dev);
6764
6765 /* Free dynamic fields from the header, then zero it out */
6766
6767 rbd_image_header_cleanup(&rbd_dev->header);
6768 }
6769
rbd_dev_v2_header_onetime(struct rbd_device * rbd_dev,struct rbd_image_header * header)6770 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
6771 struct rbd_image_header *header)
6772 {
6773 int ret;
6774
6775 ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
6776 if (ret)
6777 return ret;
6778
6779 /*
6780 * Get the and check features for the image. Currently the
6781 * features are assumed to never change.
6782 */
6783 ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
6784 rbd_is_ro(rbd_dev), &header->features);
6785 if (ret)
6786 return ret;
6787
6788 /* If the image supports fancy striping, get its parameters */
6789
6790 if (header->features & RBD_FEATURE_STRIPINGV2) {
6791 ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
6792 &header->stripe_count);
6793 if (ret)
6794 return ret;
6795 }
6796
6797 if (header->features & RBD_FEATURE_DATA_POOL) {
6798 ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
6799 if (ret)
6800 return ret;
6801 }
6802
6803 return 0;
6804 }
6805
6806 /*
6807 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6808 * rbd_dev_image_probe() recursion depth, which means it's also the
6809 * length of the already discovered part of the parent chain.
6810 */
rbd_dev_probe_parent(struct rbd_device * rbd_dev,int depth)6811 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6812 {
6813 struct rbd_device *parent = NULL;
6814 int ret;
6815
6816 if (!rbd_dev->parent_spec)
6817 return 0;
6818
6819 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6820 pr_info("parent chain is too long (%d)\n", depth);
6821 ret = -EINVAL;
6822 goto out_err;
6823 }
6824
6825 parent = __rbd_dev_create(rbd_dev->parent_spec);
6826 if (!parent) {
6827 ret = -ENOMEM;
6828 goto out_err;
6829 }
6830
6831 /*
6832 * Images related by parent/child relationships always share
6833 * rbd_client and spec/parent_spec, so bump their refcounts.
6834 */
6835 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6836 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6837
6838 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6839
6840 ret = rbd_dev_image_probe(parent, depth);
6841 if (ret < 0)
6842 goto out_err;
6843
6844 rbd_dev->parent = parent;
6845 atomic_set(&rbd_dev->parent_ref, 1);
6846 return 0;
6847
6848 out_err:
6849 rbd_dev_unparent(rbd_dev);
6850 rbd_dev_destroy(parent);
6851 return ret;
6852 }
6853
rbd_dev_device_release(struct rbd_device * rbd_dev)6854 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6855 {
6856 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6857 rbd_free_disk(rbd_dev);
6858 if (!single_major)
6859 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6860 }
6861
6862 /*
6863 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6864 * upon return.
6865 */
rbd_dev_device_setup(struct rbd_device * rbd_dev)6866 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6867 {
6868 int ret;
6869
6870 /* Record our major and minor device numbers. */
6871
6872 if (!single_major) {
6873 ret = register_blkdev(0, rbd_dev->name);
6874 if (ret < 0)
6875 goto err_out_unlock;
6876
6877 rbd_dev->major = ret;
6878 rbd_dev->minor = 0;
6879 } else {
6880 rbd_dev->major = rbd_major;
6881 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6882 }
6883
6884 /* Set up the blkdev mapping. */
6885
6886 ret = rbd_init_disk(rbd_dev);
6887 if (ret)
6888 goto err_out_blkdev;
6889
6890 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6891 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6892
6893 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6894 if (ret)
6895 goto err_out_disk;
6896
6897 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6898 up_write(&rbd_dev->header_rwsem);
6899 return 0;
6900
6901 err_out_disk:
6902 rbd_free_disk(rbd_dev);
6903 err_out_blkdev:
6904 if (!single_major)
6905 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6906 err_out_unlock:
6907 up_write(&rbd_dev->header_rwsem);
6908 return ret;
6909 }
6910
rbd_dev_header_name(struct rbd_device * rbd_dev)6911 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6912 {
6913 struct rbd_spec *spec = rbd_dev->spec;
6914 int ret;
6915
6916 /* Record the header object name for this rbd image. */
6917
6918 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6919 if (rbd_dev->image_format == 1)
6920 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6921 spec->image_name, RBD_SUFFIX);
6922 else
6923 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6924 RBD_HEADER_PREFIX, spec->image_id);
6925
6926 return ret;
6927 }
6928
rbd_print_dne(struct rbd_device * rbd_dev,bool is_snap)6929 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6930 {
6931 if (!is_snap) {
6932 pr_info("image %s/%s%s%s does not exist\n",
6933 rbd_dev->spec->pool_name,
6934 rbd_dev->spec->pool_ns ?: "",
6935 rbd_dev->spec->pool_ns ? "/" : "",
6936 rbd_dev->spec->image_name);
6937 } else {
6938 pr_info("snap %s/%s%s%s@%s does not exist\n",
6939 rbd_dev->spec->pool_name,
6940 rbd_dev->spec->pool_ns ?: "",
6941 rbd_dev->spec->pool_ns ? "/" : "",
6942 rbd_dev->spec->image_name,
6943 rbd_dev->spec->snap_name);
6944 }
6945 }
6946
rbd_dev_image_release(struct rbd_device * rbd_dev)6947 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6948 {
6949 if (!rbd_is_ro(rbd_dev))
6950 rbd_unregister_watch(rbd_dev);
6951
6952 rbd_dev_unprobe(rbd_dev);
6953 rbd_dev->image_format = 0;
6954 kfree(rbd_dev->spec->image_id);
6955 rbd_dev->spec->image_id = NULL;
6956 }
6957
6958 /*
6959 * Probe for the existence of the header object for the given rbd
6960 * device. If this image is the one being mapped (i.e., not a
6961 * parent), initiate a watch on its header object before using that
6962 * object to get detailed information about the rbd image.
6963 *
6964 * On success, returns with header_rwsem held for write if called
6965 * with @depth == 0.
6966 */
rbd_dev_image_probe(struct rbd_device * rbd_dev,int depth)6967 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6968 {
6969 bool need_watch = !rbd_is_ro(rbd_dev);
6970 int ret;
6971
6972 /*
6973 * Get the id from the image id object. Unless there's an
6974 * error, rbd_dev->spec->image_id will be filled in with
6975 * a dynamically-allocated string, and rbd_dev->image_format
6976 * will be set to either 1 or 2.
6977 */
6978 ret = rbd_dev_image_id(rbd_dev);
6979 if (ret)
6980 return ret;
6981
6982 ret = rbd_dev_header_name(rbd_dev);
6983 if (ret)
6984 goto err_out_format;
6985
6986 if (need_watch) {
6987 ret = rbd_register_watch(rbd_dev);
6988 if (ret) {
6989 if (ret == -ENOENT)
6990 rbd_print_dne(rbd_dev, false);
6991 goto err_out_format;
6992 }
6993 }
6994
6995 if (!depth)
6996 down_write(&rbd_dev->header_rwsem);
6997
6998 ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
6999 if (ret) {
7000 if (ret == -ENOENT && !need_watch)
7001 rbd_print_dne(rbd_dev, false);
7002 goto err_out_probe;
7003 }
7004
7005 rbd_init_layout(rbd_dev);
7006
7007 /*
7008 * If this image is the one being mapped, we have pool name and
7009 * id, image name and id, and snap name - need to fill snap id.
7010 * Otherwise this is a parent image, identified by pool, image
7011 * and snap ids - need to fill in names for those ids.
7012 */
7013 if (!depth)
7014 ret = rbd_spec_fill_snap_id(rbd_dev);
7015 else
7016 ret = rbd_spec_fill_names(rbd_dev);
7017 if (ret) {
7018 if (ret == -ENOENT)
7019 rbd_print_dne(rbd_dev, true);
7020 goto err_out_probe;
7021 }
7022
7023 ret = rbd_dev_mapping_set(rbd_dev);
7024 if (ret)
7025 goto err_out_probe;
7026
7027 if (rbd_is_snap(rbd_dev) &&
7028 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7029 ret = rbd_object_map_load(rbd_dev);
7030 if (ret)
7031 goto err_out_probe;
7032 }
7033
7034 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7035 ret = rbd_dev_setup_parent(rbd_dev);
7036 if (ret)
7037 goto err_out_probe;
7038 }
7039
7040 ret = rbd_dev_probe_parent(rbd_dev, depth);
7041 if (ret)
7042 goto err_out_probe;
7043
7044 dout("discovered format %u image, header name is %s\n",
7045 rbd_dev->image_format, rbd_dev->header_oid.name);
7046 return 0;
7047
7048 err_out_probe:
7049 if (!depth)
7050 up_write(&rbd_dev->header_rwsem);
7051 if (need_watch)
7052 rbd_unregister_watch(rbd_dev);
7053 rbd_dev_unprobe(rbd_dev);
7054 err_out_format:
7055 rbd_dev->image_format = 0;
7056 kfree(rbd_dev->spec->image_id);
7057 rbd_dev->spec->image_id = NULL;
7058 return ret;
7059 }
7060
rbd_dev_update_header(struct rbd_device * rbd_dev,struct rbd_image_header * header)7061 static void rbd_dev_update_header(struct rbd_device *rbd_dev,
7062 struct rbd_image_header *header)
7063 {
7064 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
7065 rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
7066
7067 if (rbd_dev->header.image_size != header->image_size) {
7068 rbd_dev->header.image_size = header->image_size;
7069
7070 if (!rbd_is_snap(rbd_dev)) {
7071 rbd_dev->mapping.size = header->image_size;
7072 rbd_dev_update_size(rbd_dev);
7073 }
7074 }
7075
7076 ceph_put_snap_context(rbd_dev->header.snapc);
7077 rbd_dev->header.snapc = header->snapc;
7078 header->snapc = NULL;
7079
7080 if (rbd_dev->image_format == 1) {
7081 kfree(rbd_dev->header.snap_names);
7082 rbd_dev->header.snap_names = header->snap_names;
7083 header->snap_names = NULL;
7084
7085 kfree(rbd_dev->header.snap_sizes);
7086 rbd_dev->header.snap_sizes = header->snap_sizes;
7087 header->snap_sizes = NULL;
7088 }
7089 }
7090
rbd_dev_update_parent(struct rbd_device * rbd_dev,struct parent_image_info * pii)7091 static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
7092 struct parent_image_info *pii)
7093 {
7094 if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
7095 /*
7096 * Either the parent never existed, or we have
7097 * record of it but the image got flattened so it no
7098 * longer has a parent. When the parent of a
7099 * layered image disappears we immediately set the
7100 * overlap to 0. The effect of this is that all new
7101 * requests will be treated as if the image had no
7102 * parent.
7103 *
7104 * If !pii.has_overlap, the parent image spec is not
7105 * applicable. It's there to avoid duplication in each
7106 * snapshot record.
7107 */
7108 if (rbd_dev->parent_overlap) {
7109 rbd_dev->parent_overlap = 0;
7110 rbd_dev_parent_put(rbd_dev);
7111 pr_info("%s: clone has been flattened\n",
7112 rbd_dev->disk->disk_name);
7113 }
7114 } else {
7115 rbd_assert(rbd_dev->parent_spec);
7116
7117 /*
7118 * Update the parent overlap. If it became zero, issue
7119 * a warning as we will proceed as if there is no parent.
7120 */
7121 if (!pii->overlap && rbd_dev->parent_overlap)
7122 rbd_warn(rbd_dev,
7123 "clone has become standalone (overlap 0)");
7124 rbd_dev->parent_overlap = pii->overlap;
7125 }
7126 }
7127
rbd_dev_refresh(struct rbd_device * rbd_dev)7128 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
7129 {
7130 struct rbd_image_header header = { 0 };
7131 struct parent_image_info pii = { 0 };
7132 int ret;
7133
7134 dout("%s rbd_dev %p\n", __func__, rbd_dev);
7135
7136 ret = rbd_dev_header_info(rbd_dev, &header, false);
7137 if (ret)
7138 goto out;
7139
7140 /*
7141 * If there is a parent, see if it has disappeared due to the
7142 * mapped image getting flattened.
7143 */
7144 if (rbd_dev->parent) {
7145 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
7146 if (ret)
7147 goto out;
7148 }
7149
7150 down_write(&rbd_dev->header_rwsem);
7151 rbd_dev_update_header(rbd_dev, &header);
7152 if (rbd_dev->parent)
7153 rbd_dev_update_parent(rbd_dev, &pii);
7154 up_write(&rbd_dev->header_rwsem);
7155
7156 out:
7157 rbd_parent_info_cleanup(&pii);
7158 rbd_image_header_cleanup(&header);
7159 return ret;
7160 }
7161
do_rbd_add(struct bus_type * bus,const char * buf,size_t count)7162 static ssize_t do_rbd_add(struct bus_type *bus,
7163 const char *buf,
7164 size_t count)
7165 {
7166 struct rbd_device *rbd_dev = NULL;
7167 struct ceph_options *ceph_opts = NULL;
7168 struct rbd_options *rbd_opts = NULL;
7169 struct rbd_spec *spec = NULL;
7170 struct rbd_client *rbdc;
7171 int rc;
7172
7173 if (!capable(CAP_SYS_ADMIN))
7174 return -EPERM;
7175
7176 if (!try_module_get(THIS_MODULE))
7177 return -ENODEV;
7178
7179 /* parse add command */
7180 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7181 if (rc < 0)
7182 goto out;
7183
7184 rbdc = rbd_get_client(ceph_opts);
7185 if (IS_ERR(rbdc)) {
7186 rc = PTR_ERR(rbdc);
7187 goto err_out_args;
7188 }
7189
7190 /* pick the pool */
7191 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7192 if (rc < 0) {
7193 if (rc == -ENOENT)
7194 pr_info("pool %s does not exist\n", spec->pool_name);
7195 goto err_out_client;
7196 }
7197 spec->pool_id = (u64)rc;
7198
7199 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7200 if (!rbd_dev) {
7201 rc = -ENOMEM;
7202 goto err_out_client;
7203 }
7204 rbdc = NULL; /* rbd_dev now owns this */
7205 spec = NULL; /* rbd_dev now owns this */
7206 rbd_opts = NULL; /* rbd_dev now owns this */
7207
7208 /* if we are mapping a snapshot it will be a read-only mapping */
7209 if (rbd_dev->opts->read_only ||
7210 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7211 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7212
7213 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7214 if (!rbd_dev->config_info) {
7215 rc = -ENOMEM;
7216 goto err_out_rbd_dev;
7217 }
7218
7219 rc = rbd_dev_image_probe(rbd_dev, 0);
7220 if (rc < 0)
7221 goto err_out_rbd_dev;
7222
7223 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7224 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7225 rbd_dev->layout.object_size);
7226 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7227 }
7228
7229 rc = rbd_dev_device_setup(rbd_dev);
7230 if (rc)
7231 goto err_out_image_probe;
7232
7233 rc = rbd_add_acquire_lock(rbd_dev);
7234 if (rc)
7235 goto err_out_image_lock;
7236
7237 /* Everything's ready. Announce the disk to the world. */
7238
7239 rc = device_add(&rbd_dev->dev);
7240 if (rc)
7241 goto err_out_image_lock;
7242
7243 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7244 /* see rbd_init_disk() */
7245 blk_put_queue(rbd_dev->disk->queue);
7246
7247 spin_lock(&rbd_dev_list_lock);
7248 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7249 spin_unlock(&rbd_dev_list_lock);
7250
7251 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7252 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7253 rbd_dev->header.features);
7254 rc = count;
7255 out:
7256 module_put(THIS_MODULE);
7257 return rc;
7258
7259 err_out_image_lock:
7260 rbd_dev_image_unlock(rbd_dev);
7261 rbd_dev_device_release(rbd_dev);
7262 err_out_image_probe:
7263 rbd_dev_image_release(rbd_dev);
7264 err_out_rbd_dev:
7265 rbd_dev_destroy(rbd_dev);
7266 err_out_client:
7267 rbd_put_client(rbdc);
7268 err_out_args:
7269 rbd_spec_put(spec);
7270 kfree(rbd_opts);
7271 goto out;
7272 }
7273
add_store(struct bus_type * bus,const char * buf,size_t count)7274 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7275 {
7276 if (single_major)
7277 return -EINVAL;
7278
7279 return do_rbd_add(bus, buf, count);
7280 }
7281
add_single_major_store(struct bus_type * bus,const char * buf,size_t count)7282 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7283 size_t count)
7284 {
7285 return do_rbd_add(bus, buf, count);
7286 }
7287
rbd_dev_remove_parent(struct rbd_device * rbd_dev)7288 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7289 {
7290 while (rbd_dev->parent) {
7291 struct rbd_device *first = rbd_dev;
7292 struct rbd_device *second = first->parent;
7293 struct rbd_device *third;
7294
7295 /*
7296 * Follow to the parent with no grandparent and
7297 * remove it.
7298 */
7299 while (second && (third = second->parent)) {
7300 first = second;
7301 second = third;
7302 }
7303 rbd_assert(second);
7304 rbd_dev_image_release(second);
7305 rbd_dev_destroy(second);
7306 first->parent = NULL;
7307 first->parent_overlap = 0;
7308
7309 rbd_assert(first->parent_spec);
7310 rbd_spec_put(first->parent_spec);
7311 first->parent_spec = NULL;
7312 }
7313 }
7314
do_rbd_remove(struct bus_type * bus,const char * buf,size_t count)7315 static ssize_t do_rbd_remove(struct bus_type *bus,
7316 const char *buf,
7317 size_t count)
7318 {
7319 struct rbd_device *rbd_dev = NULL;
7320 struct list_head *tmp;
7321 int dev_id;
7322 char opt_buf[6];
7323 bool force = false;
7324 int ret;
7325
7326 if (!capable(CAP_SYS_ADMIN))
7327 return -EPERM;
7328
7329 dev_id = -1;
7330 opt_buf[0] = '\0';
7331 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7332 if (dev_id < 0) {
7333 pr_err("dev_id out of range\n");
7334 return -EINVAL;
7335 }
7336 if (opt_buf[0] != '\0') {
7337 if (!strcmp(opt_buf, "force")) {
7338 force = true;
7339 } else {
7340 pr_err("bad remove option at '%s'\n", opt_buf);
7341 return -EINVAL;
7342 }
7343 }
7344
7345 ret = -ENOENT;
7346 spin_lock(&rbd_dev_list_lock);
7347 list_for_each(tmp, &rbd_dev_list) {
7348 rbd_dev = list_entry(tmp, struct rbd_device, node);
7349 if (rbd_dev->dev_id == dev_id) {
7350 ret = 0;
7351 break;
7352 }
7353 }
7354 if (!ret) {
7355 spin_lock_irq(&rbd_dev->lock);
7356 if (rbd_dev->open_count && !force)
7357 ret = -EBUSY;
7358 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7359 &rbd_dev->flags))
7360 ret = -EINPROGRESS;
7361 spin_unlock_irq(&rbd_dev->lock);
7362 }
7363 spin_unlock(&rbd_dev_list_lock);
7364 if (ret)
7365 return ret;
7366
7367 if (force) {
7368 /*
7369 * Prevent new IO from being queued and wait for existing
7370 * IO to complete/fail.
7371 */
7372 blk_mq_freeze_queue(rbd_dev->disk->queue);
7373 blk_set_queue_dying(rbd_dev->disk->queue);
7374 }
7375
7376 del_gendisk(rbd_dev->disk);
7377 spin_lock(&rbd_dev_list_lock);
7378 list_del_init(&rbd_dev->node);
7379 spin_unlock(&rbd_dev_list_lock);
7380 device_del(&rbd_dev->dev);
7381
7382 rbd_dev_image_unlock(rbd_dev);
7383 rbd_dev_device_release(rbd_dev);
7384 rbd_dev_image_release(rbd_dev);
7385 rbd_dev_destroy(rbd_dev);
7386 return count;
7387 }
7388
remove_store(struct bus_type * bus,const char * buf,size_t count)7389 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7390 {
7391 if (single_major)
7392 return -EINVAL;
7393
7394 return do_rbd_remove(bus, buf, count);
7395 }
7396
remove_single_major_store(struct bus_type * bus,const char * buf,size_t count)7397 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7398 size_t count)
7399 {
7400 return do_rbd_remove(bus, buf, count);
7401 }
7402
7403 /*
7404 * create control files in sysfs
7405 * /sys/bus/rbd/...
7406 */
rbd_sysfs_init(void)7407 static int __init rbd_sysfs_init(void)
7408 {
7409 int ret;
7410
7411 ret = device_register(&rbd_root_dev);
7412 if (ret < 0)
7413 return ret;
7414
7415 ret = bus_register(&rbd_bus_type);
7416 if (ret < 0)
7417 device_unregister(&rbd_root_dev);
7418
7419 return ret;
7420 }
7421
rbd_sysfs_cleanup(void)7422 static void __exit rbd_sysfs_cleanup(void)
7423 {
7424 bus_unregister(&rbd_bus_type);
7425 device_unregister(&rbd_root_dev);
7426 }
7427
rbd_slab_init(void)7428 static int __init rbd_slab_init(void)
7429 {
7430 rbd_assert(!rbd_img_request_cache);
7431 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7432 if (!rbd_img_request_cache)
7433 return -ENOMEM;
7434
7435 rbd_assert(!rbd_obj_request_cache);
7436 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7437 if (!rbd_obj_request_cache)
7438 goto out_err;
7439
7440 return 0;
7441
7442 out_err:
7443 kmem_cache_destroy(rbd_img_request_cache);
7444 rbd_img_request_cache = NULL;
7445 return -ENOMEM;
7446 }
7447
rbd_slab_exit(void)7448 static void rbd_slab_exit(void)
7449 {
7450 rbd_assert(rbd_obj_request_cache);
7451 kmem_cache_destroy(rbd_obj_request_cache);
7452 rbd_obj_request_cache = NULL;
7453
7454 rbd_assert(rbd_img_request_cache);
7455 kmem_cache_destroy(rbd_img_request_cache);
7456 rbd_img_request_cache = NULL;
7457 }
7458
rbd_init(void)7459 static int __init rbd_init(void)
7460 {
7461 int rc;
7462
7463 if (!libceph_compatible(NULL)) {
7464 rbd_warn(NULL, "libceph incompatibility (quitting)");
7465 return -EINVAL;
7466 }
7467
7468 rc = rbd_slab_init();
7469 if (rc)
7470 return rc;
7471
7472 /*
7473 * The number of active work items is limited by the number of
7474 * rbd devices * queue depth, so leave @max_active at default.
7475 */
7476 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7477 if (!rbd_wq) {
7478 rc = -ENOMEM;
7479 goto err_out_slab;
7480 }
7481
7482 if (single_major) {
7483 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7484 if (rbd_major < 0) {
7485 rc = rbd_major;
7486 goto err_out_wq;
7487 }
7488 }
7489
7490 rc = rbd_sysfs_init();
7491 if (rc)
7492 goto err_out_blkdev;
7493
7494 if (single_major)
7495 pr_info("loaded (major %d)\n", rbd_major);
7496 else
7497 pr_info("loaded\n");
7498
7499 return 0;
7500
7501 err_out_blkdev:
7502 if (single_major)
7503 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7504 err_out_wq:
7505 destroy_workqueue(rbd_wq);
7506 err_out_slab:
7507 rbd_slab_exit();
7508 return rc;
7509 }
7510
rbd_exit(void)7511 static void __exit rbd_exit(void)
7512 {
7513 ida_destroy(&rbd_dev_id_ida);
7514 rbd_sysfs_cleanup();
7515 if (single_major)
7516 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7517 destroy_workqueue(rbd_wq);
7518 rbd_slab_exit();
7519 }
7520
7521 module_init(rbd_init);
7522 module_exit(rbd_exit);
7523
7524 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7525 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7526 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7527 /* following authorship retained from original osdblk.c */
7528 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7529
7530 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7531 MODULE_LICENSE("GPL");
7532