1
2 /*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25 For usage instructions, please refer to:
26
27 Documentation/ABI/testing/sysfs-bus-rbd
28
29 */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
39
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
44 #include <linux/fs.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
49
50 #include "rbd_types.h"
51
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
53
54 /*
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
59 */
atomic_inc_return_safe(atomic_t * v)60 static int atomic_inc_return_safe(atomic_t *v)
61 {
62 unsigned int counter;
63
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71 }
72
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
atomic_dec_return_safe(atomic_t * v)74 static int atomic_dec_return_safe(atomic_t *v)
75 {
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85 }
86
87 #define RBD_DRV_NAME "rbd"
88
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
91
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
93
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
99
100 #define RBD_SNAP_HEAD_NAME "-"
101
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
103
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
107
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
109
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
113 /* Feature bits */
114
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
123
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
132
133 /* Features supported by this (client software) implementation. */
134
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
136
137 /*
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
140 */
141 #define DEV_NAME_LEN 32
142
143 /*
144 * block device image metadata (in-memory version)
145 */
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
148 char *object_prefix;
149 __u8 obj_order;
150 u64 stripe_unit;
151 u64 stripe_count;
152 s64 data_pool_id;
153 u64 features; /* Might be changeable someday? */
154
155 /* The remaining fields need to be updated occasionally */
156 u64 image_size;
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
160 };
161
162 /*
163 * An rbd image specification.
164 *
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
168 *
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
173 *
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
179 *
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
183 *
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
186 */
187 struct rbd_spec {
188 u64 pool_id;
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
191
192 const char *image_id;
193 const char *image_name;
194
195 u64 snap_id;
196 const char *snap_name;
197
198 struct kref kref;
199 };
200
201 /*
202 * an instance of the client. multiple devices may share an rbd client.
203 */
204 struct rbd_client {
205 struct ceph_client *client;
206 struct kref kref;
207 struct list_head node;
208 };
209
210 struct pending_result {
211 int result; /* first nonzero result */
212 int num_pending;
213 };
214
215 struct rbd_img_request;
216
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
222 };
223
224 enum obj_operation_type {
225 OBJ_OP_READ = 1,
226 OBJ_OP_WRITE,
227 OBJ_OP_DISCARD,
228 OBJ_OP_ZEROOUT,
229 };
230
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
236
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
239 RBD_OBJ_READ_OBJECT,
240 RBD_OBJ_READ_PARENT,
241 };
242
243 /*
244 * Writes go through the following state machine to deal with
245 * layering:
246 *
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
248 * . | .
249 * . v .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
251 * . | . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
254 * flattened) v | . .
255 * . v . .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
257 * | not needed) v
258 * v .
259 * done . . . . . . . . . . . . . . . . . .
260 * ^
261 * |
262 * RBD_OBJ_WRITE_FLAT
263 *
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
267 */
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
275 };
276
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
284 };
285
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
289 union {
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
292 };
293
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
296 u32 num_img_extents;
297
298 union {
299 struct ceph_bio_iter bio_pos;
300 struct {
301 struct ceph_bvec_iter bvec_pos;
302 u32 bvec_count;
303 u32 bvec_idx;
304 };
305 };
306
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
310
311 struct list_head osd_reqs; /* w/ r_private_item */
312
313 struct mutex state_mutex;
314 struct pending_result pending;
315 struct kref kref;
316 };
317
318 enum img_req_flags {
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
321 };
322
323 enum rbd_img_state {
324 RBD_IMG_START = 1,
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
328 };
329
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
334 unsigned long flags;
335 enum rbd_img_state state;
336 union {
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
339 };
340 struct rbd_obj_request *obj_request; /* obj req initiator */
341
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
344
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
348 int work_result;
349 };
350
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
355
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
360 };
361
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
366 };
367
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
370 u64 gid;
371 u64 handle;
372 };
373
374 struct rbd_mapping {
375 u64 size;
376 };
377
378 /*
379 * a single device
380 */
381 struct rbd_device {
382 int dev_id; /* blkdev unique id */
383
384 int major; /* blkdev assigned major */
385 int minor;
386 struct gendisk *disk; /* blkdev's gendisk and rq */
387
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
390
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
392
393 spinlock_t lock; /* queue, flags, open_count */
394
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
400
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
403
404 struct ceph_file_layout layout; /* used for all rbd requests */
405
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
409 u64 watch_cookie;
410 struct delayed_work watch_dwork;
411
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
424 int acquire_err;
425 struct completion releasing_wait;
426
427 spinlock_t object_map_lock;
428 u8 *object_map;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
431
432 struct workqueue_struct *task_wq;
433
434 struct rbd_spec *parent_spec;
435 u64 parent_overlap;
436 atomic_t parent_ref;
437 struct rbd_device *parent;
438
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
441
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
444
445 struct rbd_mapping mapping;
446
447 struct list_head node;
448
449 /* sysfs related */
450 struct device dev;
451 unsigned long open_count; /* protected by lock */
452 };
453
454 /*
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
457 * by rbd_dev->lock
458 */
459 enum rbd_dev_flags {
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
463 };
464
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
466
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
469
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
472
473 /* Slab caches for frequently-allocated structures */
474
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
477
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
480
481 static struct workqueue_struct *rbd_wq;
482
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
485 };
486
487 /*
488 * single-major requires >= 0.75 version of userspace rbd utility.
489 */
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
493
494 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(struct bus_type *bus, const char *buf,
496 size_t count);
497 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
498 size_t count);
499 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
500 size_t count);
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
502
rbd_dev_id_to_minor(int dev_id)503 static int rbd_dev_id_to_minor(int dev_id)
504 {
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
506 }
507
minor_to_rbd_dev_id(int minor)508 static int minor_to_rbd_dev_id(int minor)
509 {
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
511 }
512
rbd_is_ro(struct rbd_device * rbd_dev)513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
514 {
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
516 }
517
rbd_is_snap(struct rbd_device * rbd_dev)518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
519 {
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
521 }
522
__rbd_is_lock_owner(struct rbd_device * rbd_dev)523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
524 {
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
526
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
529 }
530
rbd_is_lock_owner(struct rbd_device * rbd_dev)531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
532 {
533 bool is_lock_owner;
534
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
539 }
540
supported_features_show(struct bus_type * bus,char * buf)541 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
542 {
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
544 }
545
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
551
552 static struct attribute *rbd_bus_attrs[] = {
553 &bus_attr_add.attr,
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
558 NULL,
559 };
560
rbd_bus_is_visible(struct kobject * kobj,struct attribute * attr,int index)561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
563 {
564 if (!single_major &&
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
567 return 0;
568
569 return attr->mode;
570 }
571
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
575 };
576 __ATTRIBUTE_GROUPS(rbd_bus);
577
578 static struct bus_type rbd_bus_type = {
579 .name = "rbd",
580 .bus_groups = rbd_bus_groups,
581 };
582
rbd_root_dev_release(struct device * dev)583 static void rbd_root_dev_release(struct device *dev)
584 {
585 }
586
587 static struct device rbd_root_dev = {
588 .init_name = "rbd",
589 .release = rbd_root_dev_release,
590 };
591
592 static __printf(2, 3)
rbd_warn(struct rbd_device * rbd_dev,const char * fmt,...)593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
594 {
595 struct va_format vaf;
596 va_list args;
597
598 va_start(args, fmt);
599 vaf.fmt = fmt;
600 vaf.va = &args;
601
602 if (!rbd_dev)
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
613 else /* punt */
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
616 va_end(args);
617 }
618
619 #ifdef RBD_DEBUG
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
623 "at line %d:\n\n" \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
626 BUG(); \
627 }
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
631
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
633
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
636 struct rbd_image_header *header);
637 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
638 u64 snap_id);
639 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
640 u8 *order, u64 *snap_size);
641 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
642
643 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
644 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
645
646 /*
647 * Return true if nothing else is pending.
648 */
pending_result_dec(struct pending_result * pending,int * result)649 static bool pending_result_dec(struct pending_result *pending, int *result)
650 {
651 rbd_assert(pending->num_pending > 0);
652
653 if (*result && !pending->result)
654 pending->result = *result;
655 if (--pending->num_pending)
656 return false;
657
658 *result = pending->result;
659 return true;
660 }
661
rbd_open(struct block_device * bdev,fmode_t mode)662 static int rbd_open(struct block_device *bdev, fmode_t mode)
663 {
664 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
665 bool removing = false;
666
667 spin_lock_irq(&rbd_dev->lock);
668 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
669 removing = true;
670 else
671 rbd_dev->open_count++;
672 spin_unlock_irq(&rbd_dev->lock);
673 if (removing)
674 return -ENOENT;
675
676 (void) get_device(&rbd_dev->dev);
677
678 return 0;
679 }
680
rbd_release(struct gendisk * disk,fmode_t mode)681 static void rbd_release(struct gendisk *disk, fmode_t mode)
682 {
683 struct rbd_device *rbd_dev = disk->private_data;
684 unsigned long open_count_before;
685
686 spin_lock_irq(&rbd_dev->lock);
687 open_count_before = rbd_dev->open_count--;
688 spin_unlock_irq(&rbd_dev->lock);
689 rbd_assert(open_count_before > 0);
690
691 put_device(&rbd_dev->dev);
692 }
693
rbd_ioctl_set_ro(struct rbd_device * rbd_dev,unsigned long arg)694 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
695 {
696 int ro;
697
698 if (get_user(ro, (int __user *)arg))
699 return -EFAULT;
700
701 /*
702 * Both images mapped read-only and snapshots can't be marked
703 * read-write.
704 */
705 if (!ro) {
706 if (rbd_is_ro(rbd_dev))
707 return -EROFS;
708
709 rbd_assert(!rbd_is_snap(rbd_dev));
710 }
711
712 /* Let blkdev_roset() handle it */
713 return -ENOTTY;
714 }
715
rbd_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)716 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
717 unsigned int cmd, unsigned long arg)
718 {
719 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
720 int ret;
721
722 switch (cmd) {
723 case BLKROSET:
724 ret = rbd_ioctl_set_ro(rbd_dev, arg);
725 break;
726 default:
727 ret = -ENOTTY;
728 }
729
730 return ret;
731 }
732
733 #ifdef CONFIG_COMPAT
rbd_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)734 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
735 unsigned int cmd, unsigned long arg)
736 {
737 return rbd_ioctl(bdev, mode, cmd, arg);
738 }
739 #endif /* CONFIG_COMPAT */
740
741 static const struct block_device_operations rbd_bd_ops = {
742 .owner = THIS_MODULE,
743 .open = rbd_open,
744 .release = rbd_release,
745 .ioctl = rbd_ioctl,
746 #ifdef CONFIG_COMPAT
747 .compat_ioctl = rbd_compat_ioctl,
748 #endif
749 };
750
751 /*
752 * Initialize an rbd client instance. Success or not, this function
753 * consumes ceph_opts. Caller holds client_mutex.
754 */
rbd_client_create(struct ceph_options * ceph_opts)755 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
756 {
757 struct rbd_client *rbdc;
758 int ret = -ENOMEM;
759
760 dout("%s:\n", __func__);
761 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
762 if (!rbdc)
763 goto out_opt;
764
765 kref_init(&rbdc->kref);
766 INIT_LIST_HEAD(&rbdc->node);
767
768 rbdc->client = ceph_create_client(ceph_opts, rbdc);
769 if (IS_ERR(rbdc->client))
770 goto out_rbdc;
771 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
772
773 ret = ceph_open_session(rbdc->client);
774 if (ret < 0)
775 goto out_client;
776
777 spin_lock(&rbd_client_list_lock);
778 list_add_tail(&rbdc->node, &rbd_client_list);
779 spin_unlock(&rbd_client_list_lock);
780
781 dout("%s: rbdc %p\n", __func__, rbdc);
782
783 return rbdc;
784 out_client:
785 ceph_destroy_client(rbdc->client);
786 out_rbdc:
787 kfree(rbdc);
788 out_opt:
789 if (ceph_opts)
790 ceph_destroy_options(ceph_opts);
791 dout("%s: error %d\n", __func__, ret);
792
793 return ERR_PTR(ret);
794 }
795
__rbd_get_client(struct rbd_client * rbdc)796 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
797 {
798 kref_get(&rbdc->kref);
799
800 return rbdc;
801 }
802
803 /*
804 * Find a ceph client with specific addr and configuration. If
805 * found, bump its reference count.
806 */
rbd_client_find(struct ceph_options * ceph_opts)807 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
808 {
809 struct rbd_client *client_node;
810 bool found = false;
811
812 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
813 return NULL;
814
815 spin_lock(&rbd_client_list_lock);
816 list_for_each_entry(client_node, &rbd_client_list, node) {
817 if (!ceph_compare_options(ceph_opts, client_node->client)) {
818 __rbd_get_client(client_node);
819
820 found = true;
821 break;
822 }
823 }
824 spin_unlock(&rbd_client_list_lock);
825
826 return found ? client_node : NULL;
827 }
828
829 /*
830 * (Per device) rbd map options
831 */
832 enum {
833 Opt_queue_depth,
834 Opt_alloc_size,
835 Opt_lock_timeout,
836 /* int args above */
837 Opt_pool_ns,
838 Opt_compression_hint,
839 /* string args above */
840 Opt_read_only,
841 Opt_read_write,
842 Opt_lock_on_read,
843 Opt_exclusive,
844 Opt_notrim,
845 };
846
847 enum {
848 Opt_compression_hint_none,
849 Opt_compression_hint_compressible,
850 Opt_compression_hint_incompressible,
851 };
852
853 static const struct constant_table rbd_param_compression_hint[] = {
854 {"none", Opt_compression_hint_none},
855 {"compressible", Opt_compression_hint_compressible},
856 {"incompressible", Opt_compression_hint_incompressible},
857 {}
858 };
859
860 static const struct fs_parameter_spec rbd_parameters[] = {
861 fsparam_u32 ("alloc_size", Opt_alloc_size),
862 fsparam_enum ("compression_hint", Opt_compression_hint,
863 rbd_param_compression_hint),
864 fsparam_flag ("exclusive", Opt_exclusive),
865 fsparam_flag ("lock_on_read", Opt_lock_on_read),
866 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
867 fsparam_flag ("notrim", Opt_notrim),
868 fsparam_string ("_pool_ns", Opt_pool_ns),
869 fsparam_u32 ("queue_depth", Opt_queue_depth),
870 fsparam_flag ("read_only", Opt_read_only),
871 fsparam_flag ("read_write", Opt_read_write),
872 fsparam_flag ("ro", Opt_read_only),
873 fsparam_flag ("rw", Opt_read_write),
874 {}
875 };
876
877 struct rbd_options {
878 int queue_depth;
879 int alloc_size;
880 unsigned long lock_timeout;
881 bool read_only;
882 bool lock_on_read;
883 bool exclusive;
884 bool trim;
885
886 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
887 };
888
889 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
890 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
891 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
892 #define RBD_READ_ONLY_DEFAULT false
893 #define RBD_LOCK_ON_READ_DEFAULT false
894 #define RBD_EXCLUSIVE_DEFAULT false
895 #define RBD_TRIM_DEFAULT true
896
897 struct rbd_parse_opts_ctx {
898 struct rbd_spec *spec;
899 struct ceph_options *copts;
900 struct rbd_options *opts;
901 };
902
obj_op_name(enum obj_operation_type op_type)903 static char* obj_op_name(enum obj_operation_type op_type)
904 {
905 switch (op_type) {
906 case OBJ_OP_READ:
907 return "read";
908 case OBJ_OP_WRITE:
909 return "write";
910 case OBJ_OP_DISCARD:
911 return "discard";
912 case OBJ_OP_ZEROOUT:
913 return "zeroout";
914 default:
915 return "???";
916 }
917 }
918
919 /*
920 * Destroy ceph client
921 *
922 * Caller must hold rbd_client_list_lock.
923 */
rbd_client_release(struct kref * kref)924 static void rbd_client_release(struct kref *kref)
925 {
926 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
927
928 dout("%s: rbdc %p\n", __func__, rbdc);
929 spin_lock(&rbd_client_list_lock);
930 list_del(&rbdc->node);
931 spin_unlock(&rbd_client_list_lock);
932
933 ceph_destroy_client(rbdc->client);
934 kfree(rbdc);
935 }
936
937 /*
938 * Drop reference to ceph client node. If it's not referenced anymore, release
939 * it.
940 */
rbd_put_client(struct rbd_client * rbdc)941 static void rbd_put_client(struct rbd_client *rbdc)
942 {
943 if (rbdc)
944 kref_put(&rbdc->kref, rbd_client_release);
945 }
946
947 /*
948 * Get a ceph client with specific addr and configuration, if one does
949 * not exist create it. Either way, ceph_opts is consumed by this
950 * function.
951 */
rbd_get_client(struct ceph_options * ceph_opts)952 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
953 {
954 struct rbd_client *rbdc;
955 int ret;
956
957 mutex_lock(&client_mutex);
958 rbdc = rbd_client_find(ceph_opts);
959 if (rbdc) {
960 ceph_destroy_options(ceph_opts);
961
962 /*
963 * Using an existing client. Make sure ->pg_pools is up to
964 * date before we look up the pool id in do_rbd_add().
965 */
966 ret = ceph_wait_for_latest_osdmap(rbdc->client,
967 rbdc->client->options->mount_timeout);
968 if (ret) {
969 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
970 rbd_put_client(rbdc);
971 rbdc = ERR_PTR(ret);
972 }
973 } else {
974 rbdc = rbd_client_create(ceph_opts);
975 }
976 mutex_unlock(&client_mutex);
977
978 return rbdc;
979 }
980
rbd_image_format_valid(u32 image_format)981 static bool rbd_image_format_valid(u32 image_format)
982 {
983 return image_format == 1 || image_format == 2;
984 }
985
rbd_dev_ondisk_valid(struct rbd_image_header_ondisk * ondisk)986 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
987 {
988 size_t size;
989 u32 snap_count;
990
991 /* The header has to start with the magic rbd header text */
992 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
993 return false;
994
995 /* The bio layer requires at least sector-sized I/O */
996
997 if (ondisk->options.order < SECTOR_SHIFT)
998 return false;
999
1000 /* If we use u64 in a few spots we may be able to loosen this */
1001
1002 if (ondisk->options.order > 8 * sizeof (int) - 1)
1003 return false;
1004
1005 /*
1006 * The size of a snapshot header has to fit in a size_t, and
1007 * that limits the number of snapshots.
1008 */
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1011 if (snap_count > size / sizeof (__le64))
1012 return false;
1013
1014 /*
1015 * Not only that, but the size of the entire the snapshot
1016 * header must also be representable in a size_t.
1017 */
1018 size -= snap_count * sizeof (__le64);
1019 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1020 return false;
1021
1022 return true;
1023 }
1024
1025 /*
1026 * returns the size of an object in the image
1027 */
rbd_obj_bytes(struct rbd_image_header * header)1028 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1029 {
1030 return 1U << header->obj_order;
1031 }
1032
rbd_init_layout(struct rbd_device * rbd_dev)1033 static void rbd_init_layout(struct rbd_device *rbd_dev)
1034 {
1035 if (rbd_dev->header.stripe_unit == 0 ||
1036 rbd_dev->header.stripe_count == 0) {
1037 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1038 rbd_dev->header.stripe_count = 1;
1039 }
1040
1041 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1042 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1043 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1044 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1045 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1046 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1047 }
1048
rbd_image_header_cleanup(struct rbd_image_header * header)1049 static void rbd_image_header_cleanup(struct rbd_image_header *header)
1050 {
1051 kfree(header->object_prefix);
1052 ceph_put_snap_context(header->snapc);
1053 kfree(header->snap_sizes);
1054 kfree(header->snap_names);
1055
1056 memset(header, 0, sizeof(*header));
1057 }
1058
1059 /*
1060 * Fill an rbd image header with information from the given format 1
1061 * on-disk header.
1062 */
rbd_header_from_disk(struct rbd_image_header * header,struct rbd_image_header_ondisk * ondisk,bool first_time)1063 static int rbd_header_from_disk(struct rbd_image_header *header,
1064 struct rbd_image_header_ondisk *ondisk,
1065 bool first_time)
1066 {
1067 struct ceph_snap_context *snapc;
1068 char *object_prefix = NULL;
1069 char *snap_names = NULL;
1070 u64 *snap_sizes = NULL;
1071 u32 snap_count;
1072 int ret = -ENOMEM;
1073 u32 i;
1074
1075 /* Allocate this now to avoid having to handle failure below */
1076
1077 if (first_time) {
1078 object_prefix = kstrndup(ondisk->object_prefix,
1079 sizeof(ondisk->object_prefix),
1080 GFP_KERNEL);
1081 if (!object_prefix)
1082 return -ENOMEM;
1083 }
1084
1085 /* Allocate the snapshot context and fill it in */
1086
1087 snap_count = le32_to_cpu(ondisk->snap_count);
1088 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1089 if (!snapc)
1090 goto out_err;
1091 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1092 if (snap_count) {
1093 struct rbd_image_snap_ondisk *snaps;
1094 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1095
1096 /* We'll keep a copy of the snapshot names... */
1097
1098 if (snap_names_len > (u64)SIZE_MAX)
1099 goto out_2big;
1100 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1101 if (!snap_names)
1102 goto out_err;
1103
1104 /* ...as well as the array of their sizes. */
1105 snap_sizes = kmalloc_array(snap_count,
1106 sizeof(*header->snap_sizes),
1107 GFP_KERNEL);
1108 if (!snap_sizes)
1109 goto out_err;
1110
1111 /*
1112 * Copy the names, and fill in each snapshot's id
1113 * and size.
1114 *
1115 * Note that rbd_dev_v1_header_info() guarantees the
1116 * ondisk buffer we're working with has
1117 * snap_names_len bytes beyond the end of the
1118 * snapshot id array, this memcpy() is safe.
1119 */
1120 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1121 snaps = ondisk->snaps;
1122 for (i = 0; i < snap_count; i++) {
1123 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1124 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1125 }
1126 }
1127
1128 /* We won't fail any more, fill in the header */
1129
1130 if (first_time) {
1131 header->object_prefix = object_prefix;
1132 header->obj_order = ondisk->options.order;
1133 }
1134
1135 /* The remaining fields always get updated (when we refresh) */
1136
1137 header->image_size = le64_to_cpu(ondisk->image_size);
1138 header->snapc = snapc;
1139 header->snap_names = snap_names;
1140 header->snap_sizes = snap_sizes;
1141
1142 return 0;
1143 out_2big:
1144 ret = -EIO;
1145 out_err:
1146 kfree(snap_sizes);
1147 kfree(snap_names);
1148 ceph_put_snap_context(snapc);
1149 kfree(object_prefix);
1150
1151 return ret;
1152 }
1153
_rbd_dev_v1_snap_name(struct rbd_device * rbd_dev,u32 which)1154 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1155 {
1156 const char *snap_name;
1157
1158 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1159
1160 /* Skip over names until we find the one we are looking for */
1161
1162 snap_name = rbd_dev->header.snap_names;
1163 while (which--)
1164 snap_name += strlen(snap_name) + 1;
1165
1166 return kstrdup(snap_name, GFP_KERNEL);
1167 }
1168
1169 /*
1170 * Snapshot id comparison function for use with qsort()/bsearch().
1171 * Note that result is for snapshots in *descending* order.
1172 */
snapid_compare_reverse(const void * s1,const void * s2)1173 static int snapid_compare_reverse(const void *s1, const void *s2)
1174 {
1175 u64 snap_id1 = *(u64 *)s1;
1176 u64 snap_id2 = *(u64 *)s2;
1177
1178 if (snap_id1 < snap_id2)
1179 return 1;
1180 return snap_id1 == snap_id2 ? 0 : -1;
1181 }
1182
1183 /*
1184 * Search a snapshot context to see if the given snapshot id is
1185 * present.
1186 *
1187 * Returns the position of the snapshot id in the array if it's found,
1188 * or BAD_SNAP_INDEX otherwise.
1189 *
1190 * Note: The snapshot array is in kept sorted (by the osd) in
1191 * reverse order, highest snapshot id first.
1192 */
rbd_dev_snap_index(struct rbd_device * rbd_dev,u64 snap_id)1193 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1194 {
1195 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1196 u64 *found;
1197
1198 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1199 sizeof (snap_id), snapid_compare_reverse);
1200
1201 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1202 }
1203
rbd_dev_v1_snap_name(struct rbd_device * rbd_dev,u64 snap_id)1204 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1205 u64 snap_id)
1206 {
1207 u32 which;
1208 const char *snap_name;
1209
1210 which = rbd_dev_snap_index(rbd_dev, snap_id);
1211 if (which == BAD_SNAP_INDEX)
1212 return ERR_PTR(-ENOENT);
1213
1214 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1215 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1216 }
1217
rbd_snap_name(struct rbd_device * rbd_dev,u64 snap_id)1218 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1219 {
1220 if (snap_id == CEPH_NOSNAP)
1221 return RBD_SNAP_HEAD_NAME;
1222
1223 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1224 if (rbd_dev->image_format == 1)
1225 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1226
1227 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1228 }
1229
rbd_snap_size(struct rbd_device * rbd_dev,u64 snap_id,u64 * snap_size)1230 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1231 u64 *snap_size)
1232 {
1233 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1234 if (snap_id == CEPH_NOSNAP) {
1235 *snap_size = rbd_dev->header.image_size;
1236 } else if (rbd_dev->image_format == 1) {
1237 u32 which;
1238
1239 which = rbd_dev_snap_index(rbd_dev, snap_id);
1240 if (which == BAD_SNAP_INDEX)
1241 return -ENOENT;
1242
1243 *snap_size = rbd_dev->header.snap_sizes[which];
1244 } else {
1245 u64 size = 0;
1246 int ret;
1247
1248 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1249 if (ret)
1250 return ret;
1251
1252 *snap_size = size;
1253 }
1254 return 0;
1255 }
1256
rbd_dev_mapping_set(struct rbd_device * rbd_dev)1257 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1258 {
1259 u64 snap_id = rbd_dev->spec->snap_id;
1260 u64 size = 0;
1261 int ret;
1262
1263 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1264 if (ret)
1265 return ret;
1266
1267 rbd_dev->mapping.size = size;
1268 return 0;
1269 }
1270
rbd_dev_mapping_clear(struct rbd_device * rbd_dev)1271 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1272 {
1273 rbd_dev->mapping.size = 0;
1274 }
1275
zero_bvec(struct bio_vec * bv)1276 static void zero_bvec(struct bio_vec *bv)
1277 {
1278 void *buf;
1279 unsigned long flags;
1280
1281 buf = bvec_kmap_irq(bv, &flags);
1282 memset(buf, 0, bv->bv_len);
1283 flush_dcache_page(bv->bv_page);
1284 bvec_kunmap_irq(buf, &flags);
1285 }
1286
zero_bios(struct ceph_bio_iter * bio_pos,u32 off,u32 bytes)1287 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1288 {
1289 struct ceph_bio_iter it = *bio_pos;
1290
1291 ceph_bio_iter_advance(&it, off);
1292 ceph_bio_iter_advance_step(&it, bytes, ({
1293 zero_bvec(&bv);
1294 }));
1295 }
1296
zero_bvecs(struct ceph_bvec_iter * bvec_pos,u32 off,u32 bytes)1297 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1298 {
1299 struct ceph_bvec_iter it = *bvec_pos;
1300
1301 ceph_bvec_iter_advance(&it, off);
1302 ceph_bvec_iter_advance_step(&it, bytes, ({
1303 zero_bvec(&bv);
1304 }));
1305 }
1306
1307 /*
1308 * Zero a range in @obj_req data buffer defined by a bio (list) or
1309 * (private) bio_vec array.
1310 *
1311 * @off is relative to the start of the data buffer.
1312 */
rbd_obj_zero_range(struct rbd_obj_request * obj_req,u32 off,u32 bytes)1313 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1314 u32 bytes)
1315 {
1316 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1317
1318 switch (obj_req->img_request->data_type) {
1319 case OBJ_REQUEST_BIO:
1320 zero_bios(&obj_req->bio_pos, off, bytes);
1321 break;
1322 case OBJ_REQUEST_BVECS:
1323 case OBJ_REQUEST_OWN_BVECS:
1324 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1325 break;
1326 default:
1327 BUG();
1328 }
1329 }
1330
1331 static void rbd_obj_request_destroy(struct kref *kref);
rbd_obj_request_put(struct rbd_obj_request * obj_request)1332 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1333 {
1334 rbd_assert(obj_request != NULL);
1335 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1336 kref_read(&obj_request->kref));
1337 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1338 }
1339
rbd_img_obj_request_add(struct rbd_img_request * img_request,struct rbd_obj_request * obj_request)1340 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1341 struct rbd_obj_request *obj_request)
1342 {
1343 rbd_assert(obj_request->img_request == NULL);
1344
1345 /* Image request now owns object's original reference */
1346 obj_request->img_request = img_request;
1347 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1348 }
1349
rbd_img_obj_request_del(struct rbd_img_request * img_request,struct rbd_obj_request * obj_request)1350 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1351 struct rbd_obj_request *obj_request)
1352 {
1353 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1354 list_del(&obj_request->ex.oe_item);
1355 rbd_assert(obj_request->img_request == img_request);
1356 rbd_obj_request_put(obj_request);
1357 }
1358
rbd_osd_submit(struct ceph_osd_request * osd_req)1359 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1360 {
1361 struct rbd_obj_request *obj_req = osd_req->r_priv;
1362
1363 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1364 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1365 obj_req->ex.oe_off, obj_req->ex.oe_len);
1366 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1367 }
1368
1369 /*
1370 * The default/initial value for all image request flags is 0. Each
1371 * is conditionally set to 1 at image request initialization time
1372 * and currently never change thereafter.
1373 */
img_request_layered_set(struct rbd_img_request * img_request)1374 static void img_request_layered_set(struct rbd_img_request *img_request)
1375 {
1376 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1377 }
1378
img_request_layered_test(struct rbd_img_request * img_request)1379 static bool img_request_layered_test(struct rbd_img_request *img_request)
1380 {
1381 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1382 }
1383
rbd_obj_is_entire(struct rbd_obj_request * obj_req)1384 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1385 {
1386 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1387
1388 return !obj_req->ex.oe_off &&
1389 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1390 }
1391
rbd_obj_is_tail(struct rbd_obj_request * obj_req)1392 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1393 {
1394 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1395
1396 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1397 rbd_dev->layout.object_size;
1398 }
1399
1400 /*
1401 * Must be called after rbd_obj_calc_img_extents().
1402 */
rbd_obj_set_copyup_enabled(struct rbd_obj_request * obj_req)1403 static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
1404 {
1405 rbd_assert(obj_req->img_request->snapc);
1406
1407 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1408 dout("%s %p objno %llu discard\n", __func__, obj_req,
1409 obj_req->ex.oe_objno);
1410 return;
1411 }
1412
1413 if (!obj_req->num_img_extents) {
1414 dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1415 obj_req->ex.oe_objno);
1416 return;
1417 }
1418
1419 if (rbd_obj_is_entire(obj_req) &&
1420 !obj_req->img_request->snapc->num_snaps) {
1421 dout("%s %p objno %llu entire\n", __func__, obj_req,
1422 obj_req->ex.oe_objno);
1423 return;
1424 }
1425
1426 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
1427 }
1428
rbd_obj_img_extents_bytes(struct rbd_obj_request * obj_req)1429 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1430 {
1431 return ceph_file_extents_bytes(obj_req->img_extents,
1432 obj_req->num_img_extents);
1433 }
1434
rbd_img_is_write(struct rbd_img_request * img_req)1435 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1436 {
1437 switch (img_req->op_type) {
1438 case OBJ_OP_READ:
1439 return false;
1440 case OBJ_OP_WRITE:
1441 case OBJ_OP_DISCARD:
1442 case OBJ_OP_ZEROOUT:
1443 return true;
1444 default:
1445 BUG();
1446 }
1447 }
1448
rbd_osd_req_callback(struct ceph_osd_request * osd_req)1449 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1450 {
1451 struct rbd_obj_request *obj_req = osd_req->r_priv;
1452 int result;
1453
1454 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1455 osd_req->r_result, obj_req);
1456
1457 /*
1458 * Writes aren't allowed to return a data payload. In some
1459 * guarded write cases (e.g. stat + zero on an empty object)
1460 * a stat response makes it through, but we don't care.
1461 */
1462 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1463 result = 0;
1464 else
1465 result = osd_req->r_result;
1466
1467 rbd_obj_handle_request(obj_req, result);
1468 }
1469
rbd_osd_format_read(struct ceph_osd_request * osd_req)1470 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1471 {
1472 struct rbd_obj_request *obj_request = osd_req->r_priv;
1473 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1474 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1475
1476 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1477 osd_req->r_snapid = obj_request->img_request->snap_id;
1478 }
1479
rbd_osd_format_write(struct ceph_osd_request * osd_req)1480 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1481 {
1482 struct rbd_obj_request *obj_request = osd_req->r_priv;
1483
1484 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1485 ktime_get_real_ts64(&osd_req->r_mtime);
1486 osd_req->r_data_offset = obj_request->ex.oe_off;
1487 }
1488
1489 static struct ceph_osd_request *
__rbd_obj_add_osd_request(struct rbd_obj_request * obj_req,struct ceph_snap_context * snapc,int num_ops)1490 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1491 struct ceph_snap_context *snapc, int num_ops)
1492 {
1493 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1494 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1495 struct ceph_osd_request *req;
1496 const char *name_format = rbd_dev->image_format == 1 ?
1497 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1498 int ret;
1499
1500 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1501 if (!req)
1502 return ERR_PTR(-ENOMEM);
1503
1504 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1505 req->r_callback = rbd_osd_req_callback;
1506 req->r_priv = obj_req;
1507
1508 /*
1509 * Data objects may be stored in a separate pool, but always in
1510 * the same namespace in that pool as the header in its pool.
1511 */
1512 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1513 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1514
1515 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1516 rbd_dev->header.object_prefix,
1517 obj_req->ex.oe_objno);
1518 if (ret)
1519 return ERR_PTR(ret);
1520
1521 return req;
1522 }
1523
1524 static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request * obj_req,int num_ops)1525 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1526 {
1527 rbd_assert(obj_req->img_request->snapc);
1528 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1529 num_ops);
1530 }
1531
rbd_obj_request_create(void)1532 static struct rbd_obj_request *rbd_obj_request_create(void)
1533 {
1534 struct rbd_obj_request *obj_request;
1535
1536 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1537 if (!obj_request)
1538 return NULL;
1539
1540 ceph_object_extent_init(&obj_request->ex);
1541 INIT_LIST_HEAD(&obj_request->osd_reqs);
1542 mutex_init(&obj_request->state_mutex);
1543 kref_init(&obj_request->kref);
1544
1545 dout("%s %p\n", __func__, obj_request);
1546 return obj_request;
1547 }
1548
rbd_obj_request_destroy(struct kref * kref)1549 static void rbd_obj_request_destroy(struct kref *kref)
1550 {
1551 struct rbd_obj_request *obj_request;
1552 struct ceph_osd_request *osd_req;
1553 u32 i;
1554
1555 obj_request = container_of(kref, struct rbd_obj_request, kref);
1556
1557 dout("%s: obj %p\n", __func__, obj_request);
1558
1559 while (!list_empty(&obj_request->osd_reqs)) {
1560 osd_req = list_first_entry(&obj_request->osd_reqs,
1561 struct ceph_osd_request, r_private_item);
1562 list_del_init(&osd_req->r_private_item);
1563 ceph_osdc_put_request(osd_req);
1564 }
1565
1566 switch (obj_request->img_request->data_type) {
1567 case OBJ_REQUEST_NODATA:
1568 case OBJ_REQUEST_BIO:
1569 case OBJ_REQUEST_BVECS:
1570 break; /* Nothing to do */
1571 case OBJ_REQUEST_OWN_BVECS:
1572 kfree(obj_request->bvec_pos.bvecs);
1573 break;
1574 default:
1575 BUG();
1576 }
1577
1578 kfree(obj_request->img_extents);
1579 if (obj_request->copyup_bvecs) {
1580 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1581 if (obj_request->copyup_bvecs[i].bv_page)
1582 __free_page(obj_request->copyup_bvecs[i].bv_page);
1583 }
1584 kfree(obj_request->copyup_bvecs);
1585 }
1586
1587 kmem_cache_free(rbd_obj_request_cache, obj_request);
1588 }
1589
1590 /* It's OK to call this for a device with no parent */
1591
1592 static void rbd_spec_put(struct rbd_spec *spec);
rbd_dev_unparent(struct rbd_device * rbd_dev)1593 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1594 {
1595 rbd_dev_remove_parent(rbd_dev);
1596 rbd_spec_put(rbd_dev->parent_spec);
1597 rbd_dev->parent_spec = NULL;
1598 rbd_dev->parent_overlap = 0;
1599 }
1600
1601 /*
1602 * Parent image reference counting is used to determine when an
1603 * image's parent fields can be safely torn down--after there are no
1604 * more in-flight requests to the parent image. When the last
1605 * reference is dropped, cleaning them up is safe.
1606 */
rbd_dev_parent_put(struct rbd_device * rbd_dev)1607 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1608 {
1609 int counter;
1610
1611 if (!rbd_dev->parent_spec)
1612 return;
1613
1614 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1615 if (counter > 0)
1616 return;
1617
1618 /* Last reference; clean up parent data structures */
1619
1620 if (!counter)
1621 rbd_dev_unparent(rbd_dev);
1622 else
1623 rbd_warn(rbd_dev, "parent reference underflow");
1624 }
1625
1626 /*
1627 * If an image has a non-zero parent overlap, get a reference to its
1628 * parent.
1629 *
1630 * Returns true if the rbd device has a parent with a non-zero
1631 * overlap and a reference for it was successfully taken, or
1632 * false otherwise.
1633 */
rbd_dev_parent_get(struct rbd_device * rbd_dev)1634 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1635 {
1636 int counter = 0;
1637
1638 if (!rbd_dev->parent_spec)
1639 return false;
1640
1641 if (rbd_dev->parent_overlap)
1642 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1643
1644 if (counter < 0)
1645 rbd_warn(rbd_dev, "parent reference overflow");
1646
1647 return counter > 0;
1648 }
1649
rbd_img_request_init(struct rbd_img_request * img_request,struct rbd_device * rbd_dev,enum obj_operation_type op_type)1650 static void rbd_img_request_init(struct rbd_img_request *img_request,
1651 struct rbd_device *rbd_dev,
1652 enum obj_operation_type op_type)
1653 {
1654 memset(img_request, 0, sizeof(*img_request));
1655
1656 img_request->rbd_dev = rbd_dev;
1657 img_request->op_type = op_type;
1658
1659 INIT_LIST_HEAD(&img_request->lock_item);
1660 INIT_LIST_HEAD(&img_request->object_extents);
1661 mutex_init(&img_request->state_mutex);
1662 }
1663
1664 /*
1665 * Only snap_id is captured here, for reads. For writes, snapshot
1666 * context is captured in rbd_img_object_requests() after exclusive
1667 * lock is ensured to be held.
1668 */
rbd_img_capture_header(struct rbd_img_request * img_req)1669 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1670 {
1671 struct rbd_device *rbd_dev = img_req->rbd_dev;
1672
1673 lockdep_assert_held(&rbd_dev->header_rwsem);
1674
1675 if (!rbd_img_is_write(img_req))
1676 img_req->snap_id = rbd_dev->spec->snap_id;
1677
1678 if (rbd_dev_parent_get(rbd_dev))
1679 img_request_layered_set(img_req);
1680 }
1681
rbd_img_request_destroy(struct rbd_img_request * img_request)1682 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1683 {
1684 struct rbd_obj_request *obj_request;
1685 struct rbd_obj_request *next_obj_request;
1686
1687 dout("%s: img %p\n", __func__, img_request);
1688
1689 WARN_ON(!list_empty(&img_request->lock_item));
1690 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1691 rbd_img_obj_request_del(img_request, obj_request);
1692
1693 if (img_request_layered_test(img_request))
1694 rbd_dev_parent_put(img_request->rbd_dev);
1695
1696 if (rbd_img_is_write(img_request))
1697 ceph_put_snap_context(img_request->snapc);
1698
1699 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1700 kmem_cache_free(rbd_img_request_cache, img_request);
1701 }
1702
1703 #define BITS_PER_OBJ 2
1704 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1705 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1706
__rbd_object_map_index(struct rbd_device * rbd_dev,u64 objno,u64 * index,u8 * shift)1707 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1708 u64 *index, u8 *shift)
1709 {
1710 u32 off;
1711
1712 rbd_assert(objno < rbd_dev->object_map_size);
1713 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1714 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1715 }
1716
__rbd_object_map_get(struct rbd_device * rbd_dev,u64 objno)1717 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1718 {
1719 u64 index;
1720 u8 shift;
1721
1722 lockdep_assert_held(&rbd_dev->object_map_lock);
1723 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1724 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1725 }
1726
__rbd_object_map_set(struct rbd_device * rbd_dev,u64 objno,u8 val)1727 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1728 {
1729 u64 index;
1730 u8 shift;
1731 u8 *p;
1732
1733 lockdep_assert_held(&rbd_dev->object_map_lock);
1734 rbd_assert(!(val & ~OBJ_MASK));
1735
1736 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1737 p = &rbd_dev->object_map[index];
1738 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1739 }
1740
rbd_object_map_get(struct rbd_device * rbd_dev,u64 objno)1741 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1742 {
1743 u8 state;
1744
1745 spin_lock(&rbd_dev->object_map_lock);
1746 state = __rbd_object_map_get(rbd_dev, objno);
1747 spin_unlock(&rbd_dev->object_map_lock);
1748 return state;
1749 }
1750
use_object_map(struct rbd_device * rbd_dev)1751 static bool use_object_map(struct rbd_device *rbd_dev)
1752 {
1753 /*
1754 * An image mapped read-only can't use the object map -- it isn't
1755 * loaded because the header lock isn't acquired. Someone else can
1756 * write to the image and update the object map behind our back.
1757 *
1758 * A snapshot can't be written to, so using the object map is always
1759 * safe.
1760 */
1761 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1762 return false;
1763
1764 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1765 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1766 }
1767
rbd_object_map_may_exist(struct rbd_device * rbd_dev,u64 objno)1768 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1769 {
1770 u8 state;
1771
1772 /* fall back to default logic if object map is disabled or invalid */
1773 if (!use_object_map(rbd_dev))
1774 return true;
1775
1776 state = rbd_object_map_get(rbd_dev, objno);
1777 return state != OBJECT_NONEXISTENT;
1778 }
1779
rbd_object_map_name(struct rbd_device * rbd_dev,u64 snap_id,struct ceph_object_id * oid)1780 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1781 struct ceph_object_id *oid)
1782 {
1783 if (snap_id == CEPH_NOSNAP)
1784 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1785 rbd_dev->spec->image_id);
1786 else
1787 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1788 rbd_dev->spec->image_id, snap_id);
1789 }
1790
rbd_object_map_lock(struct rbd_device * rbd_dev)1791 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1792 {
1793 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1794 CEPH_DEFINE_OID_ONSTACK(oid);
1795 u8 lock_type;
1796 char *lock_tag;
1797 struct ceph_locker *lockers;
1798 u32 num_lockers;
1799 bool broke_lock = false;
1800 int ret;
1801
1802 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1803
1804 again:
1805 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1806 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1807 if (ret != -EBUSY || broke_lock) {
1808 if (ret == -EEXIST)
1809 ret = 0; /* already locked by myself */
1810 if (ret)
1811 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1812 return ret;
1813 }
1814
1815 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1816 RBD_LOCK_NAME, &lock_type, &lock_tag,
1817 &lockers, &num_lockers);
1818 if (ret) {
1819 if (ret == -ENOENT)
1820 goto again;
1821
1822 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1823 return ret;
1824 }
1825
1826 kfree(lock_tag);
1827 if (num_lockers == 0)
1828 goto again;
1829
1830 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1831 ENTITY_NAME(lockers[0].id.name));
1832
1833 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1834 RBD_LOCK_NAME, lockers[0].id.cookie,
1835 &lockers[0].id.name);
1836 ceph_free_lockers(lockers, num_lockers);
1837 if (ret) {
1838 if (ret == -ENOENT)
1839 goto again;
1840
1841 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1842 return ret;
1843 }
1844
1845 broke_lock = true;
1846 goto again;
1847 }
1848
rbd_object_map_unlock(struct rbd_device * rbd_dev)1849 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1850 {
1851 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1852 CEPH_DEFINE_OID_ONSTACK(oid);
1853 int ret;
1854
1855 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1856
1857 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1858 "");
1859 if (ret && ret != -ENOENT)
1860 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1861 }
1862
decode_object_map_header(void ** p,void * end,u64 * object_map_size)1863 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1864 {
1865 u8 struct_v;
1866 u32 struct_len;
1867 u32 header_len;
1868 void *header_end;
1869 int ret;
1870
1871 ceph_decode_32_safe(p, end, header_len, e_inval);
1872 header_end = *p + header_len;
1873
1874 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1875 &struct_len);
1876 if (ret)
1877 return ret;
1878
1879 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1880
1881 *p = header_end;
1882 return 0;
1883
1884 e_inval:
1885 return -EINVAL;
1886 }
1887
__rbd_object_map_load(struct rbd_device * rbd_dev)1888 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1889 {
1890 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1891 CEPH_DEFINE_OID_ONSTACK(oid);
1892 struct page **pages;
1893 void *p, *end;
1894 size_t reply_len;
1895 u64 num_objects;
1896 u64 object_map_bytes;
1897 u64 object_map_size;
1898 int num_pages;
1899 int ret;
1900
1901 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1902
1903 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1904 rbd_dev->mapping.size);
1905 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1906 BITS_PER_BYTE);
1907 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1908 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1909 if (IS_ERR(pages))
1910 return PTR_ERR(pages);
1911
1912 reply_len = num_pages * PAGE_SIZE;
1913 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1914 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1915 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1916 NULL, 0, pages, &reply_len);
1917 if (ret)
1918 goto out;
1919
1920 p = page_address(pages[0]);
1921 end = p + min(reply_len, (size_t)PAGE_SIZE);
1922 ret = decode_object_map_header(&p, end, &object_map_size);
1923 if (ret)
1924 goto out;
1925
1926 if (object_map_size != num_objects) {
1927 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1928 object_map_size, num_objects);
1929 ret = -EINVAL;
1930 goto out;
1931 }
1932
1933 if (offset_in_page(p) + object_map_bytes > reply_len) {
1934 ret = -EINVAL;
1935 goto out;
1936 }
1937
1938 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1939 if (!rbd_dev->object_map) {
1940 ret = -ENOMEM;
1941 goto out;
1942 }
1943
1944 rbd_dev->object_map_size = object_map_size;
1945 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1946 offset_in_page(p), object_map_bytes);
1947
1948 out:
1949 ceph_release_page_vector(pages, num_pages);
1950 return ret;
1951 }
1952
rbd_object_map_free(struct rbd_device * rbd_dev)1953 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1954 {
1955 kvfree(rbd_dev->object_map);
1956 rbd_dev->object_map = NULL;
1957 rbd_dev->object_map_size = 0;
1958 }
1959
rbd_object_map_load(struct rbd_device * rbd_dev)1960 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1961 {
1962 int ret;
1963
1964 ret = __rbd_object_map_load(rbd_dev);
1965 if (ret)
1966 return ret;
1967
1968 ret = rbd_dev_v2_get_flags(rbd_dev);
1969 if (ret) {
1970 rbd_object_map_free(rbd_dev);
1971 return ret;
1972 }
1973
1974 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1975 rbd_warn(rbd_dev, "object map is invalid");
1976
1977 return 0;
1978 }
1979
rbd_object_map_open(struct rbd_device * rbd_dev)1980 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1981 {
1982 int ret;
1983
1984 ret = rbd_object_map_lock(rbd_dev);
1985 if (ret)
1986 return ret;
1987
1988 ret = rbd_object_map_load(rbd_dev);
1989 if (ret) {
1990 rbd_object_map_unlock(rbd_dev);
1991 return ret;
1992 }
1993
1994 return 0;
1995 }
1996
rbd_object_map_close(struct rbd_device * rbd_dev)1997 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1998 {
1999 rbd_object_map_free(rbd_dev);
2000 rbd_object_map_unlock(rbd_dev);
2001 }
2002
2003 /*
2004 * This function needs snap_id (or more precisely just something to
2005 * distinguish between HEAD and snapshot object maps), new_state and
2006 * current_state that were passed to rbd_object_map_update().
2007 *
2008 * To avoid allocating and stashing a context we piggyback on the OSD
2009 * request. A HEAD update has two ops (assert_locked). For new_state
2010 * and current_state we decode our own object_map_update op, encoded in
2011 * rbd_cls_object_map_update().
2012 */
rbd_object_map_update_finish(struct rbd_obj_request * obj_req,struct ceph_osd_request * osd_req)2013 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2014 struct ceph_osd_request *osd_req)
2015 {
2016 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2017 struct ceph_osd_data *osd_data;
2018 u64 objno;
2019 u8 state, new_state, current_state;
2020 bool has_current_state;
2021 void *p;
2022
2023 if (osd_req->r_result)
2024 return osd_req->r_result;
2025
2026 /*
2027 * Nothing to do for a snapshot object map.
2028 */
2029 if (osd_req->r_num_ops == 1)
2030 return 0;
2031
2032 /*
2033 * Update in-memory HEAD object map.
2034 */
2035 rbd_assert(osd_req->r_num_ops == 2);
2036 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2037 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2038
2039 p = page_address(osd_data->pages[0]);
2040 objno = ceph_decode_64(&p);
2041 rbd_assert(objno == obj_req->ex.oe_objno);
2042 rbd_assert(ceph_decode_64(&p) == objno + 1);
2043 new_state = ceph_decode_8(&p);
2044 has_current_state = ceph_decode_8(&p);
2045 if (has_current_state)
2046 current_state = ceph_decode_8(&p);
2047
2048 spin_lock(&rbd_dev->object_map_lock);
2049 state = __rbd_object_map_get(rbd_dev, objno);
2050 if (!has_current_state || current_state == state ||
2051 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2052 __rbd_object_map_set(rbd_dev, objno, new_state);
2053 spin_unlock(&rbd_dev->object_map_lock);
2054
2055 return 0;
2056 }
2057
rbd_object_map_callback(struct ceph_osd_request * osd_req)2058 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2059 {
2060 struct rbd_obj_request *obj_req = osd_req->r_priv;
2061 int result;
2062
2063 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2064 osd_req->r_result, obj_req);
2065
2066 result = rbd_object_map_update_finish(obj_req, osd_req);
2067 rbd_obj_handle_request(obj_req, result);
2068 }
2069
update_needed(struct rbd_device * rbd_dev,u64 objno,u8 new_state)2070 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2071 {
2072 u8 state = rbd_object_map_get(rbd_dev, objno);
2073
2074 if (state == new_state ||
2075 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2076 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2077 return false;
2078
2079 return true;
2080 }
2081
rbd_cls_object_map_update(struct ceph_osd_request * req,int which,u64 objno,u8 new_state,const u8 * current_state)2082 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2083 int which, u64 objno, u8 new_state,
2084 const u8 *current_state)
2085 {
2086 struct page **pages;
2087 void *p, *start;
2088 int ret;
2089
2090 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2091 if (ret)
2092 return ret;
2093
2094 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2095 if (IS_ERR(pages))
2096 return PTR_ERR(pages);
2097
2098 p = start = page_address(pages[0]);
2099 ceph_encode_64(&p, objno);
2100 ceph_encode_64(&p, objno + 1);
2101 ceph_encode_8(&p, new_state);
2102 if (current_state) {
2103 ceph_encode_8(&p, 1);
2104 ceph_encode_8(&p, *current_state);
2105 } else {
2106 ceph_encode_8(&p, 0);
2107 }
2108
2109 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2110 false, true);
2111 return 0;
2112 }
2113
2114 /*
2115 * Return:
2116 * 0 - object map update sent
2117 * 1 - object map update isn't needed
2118 * <0 - error
2119 */
rbd_object_map_update(struct rbd_obj_request * obj_req,u64 snap_id,u8 new_state,const u8 * current_state)2120 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2121 u8 new_state, const u8 *current_state)
2122 {
2123 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2124 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2125 struct ceph_osd_request *req;
2126 int num_ops = 1;
2127 int which = 0;
2128 int ret;
2129
2130 if (snap_id == CEPH_NOSNAP) {
2131 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2132 return 1;
2133
2134 num_ops++; /* assert_locked */
2135 }
2136
2137 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2138 if (!req)
2139 return -ENOMEM;
2140
2141 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2142 req->r_callback = rbd_object_map_callback;
2143 req->r_priv = obj_req;
2144
2145 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2146 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2147 req->r_flags = CEPH_OSD_FLAG_WRITE;
2148 ktime_get_real_ts64(&req->r_mtime);
2149
2150 if (snap_id == CEPH_NOSNAP) {
2151 /*
2152 * Protect against possible race conditions during lock
2153 * ownership transitions.
2154 */
2155 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2156 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2157 if (ret)
2158 return ret;
2159 }
2160
2161 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2162 new_state, current_state);
2163 if (ret)
2164 return ret;
2165
2166 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2167 if (ret)
2168 return ret;
2169
2170 ceph_osdc_start_request(osdc, req, false);
2171 return 0;
2172 }
2173
prune_extents(struct ceph_file_extent * img_extents,u32 * num_img_extents,u64 overlap)2174 static void prune_extents(struct ceph_file_extent *img_extents,
2175 u32 *num_img_extents, u64 overlap)
2176 {
2177 u32 cnt = *num_img_extents;
2178
2179 /* drop extents completely beyond the overlap */
2180 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2181 cnt--;
2182
2183 if (cnt) {
2184 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2185
2186 /* trim final overlapping extent */
2187 if (ex->fe_off + ex->fe_len > overlap)
2188 ex->fe_len = overlap - ex->fe_off;
2189 }
2190
2191 *num_img_extents = cnt;
2192 }
2193
2194 /*
2195 * Determine the byte range(s) covered by either just the object extent
2196 * or the entire object in the parent image.
2197 */
rbd_obj_calc_img_extents(struct rbd_obj_request * obj_req,bool entire)2198 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2199 bool entire)
2200 {
2201 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2202 int ret;
2203
2204 if (!rbd_dev->parent_overlap)
2205 return 0;
2206
2207 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2208 entire ? 0 : obj_req->ex.oe_off,
2209 entire ? rbd_dev->layout.object_size :
2210 obj_req->ex.oe_len,
2211 &obj_req->img_extents,
2212 &obj_req->num_img_extents);
2213 if (ret)
2214 return ret;
2215
2216 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2217 rbd_dev->parent_overlap);
2218 return 0;
2219 }
2220
rbd_osd_setup_data(struct ceph_osd_request * osd_req,int which)2221 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2222 {
2223 struct rbd_obj_request *obj_req = osd_req->r_priv;
2224
2225 switch (obj_req->img_request->data_type) {
2226 case OBJ_REQUEST_BIO:
2227 osd_req_op_extent_osd_data_bio(osd_req, which,
2228 &obj_req->bio_pos,
2229 obj_req->ex.oe_len);
2230 break;
2231 case OBJ_REQUEST_BVECS:
2232 case OBJ_REQUEST_OWN_BVECS:
2233 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2234 obj_req->ex.oe_len);
2235 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2236 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2237 &obj_req->bvec_pos);
2238 break;
2239 default:
2240 BUG();
2241 }
2242 }
2243
rbd_osd_setup_stat(struct ceph_osd_request * osd_req,int which)2244 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2245 {
2246 struct page **pages;
2247
2248 /*
2249 * The response data for a STAT call consists of:
2250 * le64 length;
2251 * struct {
2252 * le32 tv_sec;
2253 * le32 tv_nsec;
2254 * } mtime;
2255 */
2256 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2257 if (IS_ERR(pages))
2258 return PTR_ERR(pages);
2259
2260 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2261 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2262 8 + sizeof(struct ceph_timespec),
2263 0, false, true);
2264 return 0;
2265 }
2266
rbd_osd_setup_copyup(struct ceph_osd_request * osd_req,int which,u32 bytes)2267 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2268 u32 bytes)
2269 {
2270 struct rbd_obj_request *obj_req = osd_req->r_priv;
2271 int ret;
2272
2273 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2274 if (ret)
2275 return ret;
2276
2277 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2278 obj_req->copyup_bvec_count, bytes);
2279 return 0;
2280 }
2281
rbd_obj_init_read(struct rbd_obj_request * obj_req)2282 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2283 {
2284 obj_req->read_state = RBD_OBJ_READ_START;
2285 return 0;
2286 }
2287
__rbd_osd_setup_write_ops(struct ceph_osd_request * osd_req,int which)2288 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2289 int which)
2290 {
2291 struct rbd_obj_request *obj_req = osd_req->r_priv;
2292 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2293 u16 opcode;
2294
2295 if (!use_object_map(rbd_dev) ||
2296 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2297 osd_req_op_alloc_hint_init(osd_req, which++,
2298 rbd_dev->layout.object_size,
2299 rbd_dev->layout.object_size,
2300 rbd_dev->opts->alloc_hint_flags);
2301 }
2302
2303 if (rbd_obj_is_entire(obj_req))
2304 opcode = CEPH_OSD_OP_WRITEFULL;
2305 else
2306 opcode = CEPH_OSD_OP_WRITE;
2307
2308 osd_req_op_extent_init(osd_req, which, opcode,
2309 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2310 rbd_osd_setup_data(osd_req, which);
2311 }
2312
rbd_obj_init_write(struct rbd_obj_request * obj_req)2313 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2314 {
2315 int ret;
2316
2317 /* reverse map the entire object onto the parent */
2318 ret = rbd_obj_calc_img_extents(obj_req, true);
2319 if (ret)
2320 return ret;
2321
2322 obj_req->write_state = RBD_OBJ_WRITE_START;
2323 return 0;
2324 }
2325
truncate_or_zero_opcode(struct rbd_obj_request * obj_req)2326 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2327 {
2328 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2329 CEPH_OSD_OP_ZERO;
2330 }
2331
__rbd_osd_setup_discard_ops(struct ceph_osd_request * osd_req,int which)2332 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2333 int which)
2334 {
2335 struct rbd_obj_request *obj_req = osd_req->r_priv;
2336
2337 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2338 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2339 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2340 } else {
2341 osd_req_op_extent_init(osd_req, which,
2342 truncate_or_zero_opcode(obj_req),
2343 obj_req->ex.oe_off, obj_req->ex.oe_len,
2344 0, 0);
2345 }
2346 }
2347
rbd_obj_init_discard(struct rbd_obj_request * obj_req)2348 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2349 {
2350 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2351 u64 off, next_off;
2352 int ret;
2353
2354 /*
2355 * Align the range to alloc_size boundary and punt on discards
2356 * that are too small to free up any space.
2357 *
2358 * alloc_size == object_size && is_tail() is a special case for
2359 * filestore with filestore_punch_hole = false, needed to allow
2360 * truncate (in addition to delete).
2361 */
2362 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2363 !rbd_obj_is_tail(obj_req)) {
2364 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2365 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2366 rbd_dev->opts->alloc_size);
2367 if (off >= next_off)
2368 return 1;
2369
2370 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2371 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2372 off, next_off - off);
2373 obj_req->ex.oe_off = off;
2374 obj_req->ex.oe_len = next_off - off;
2375 }
2376
2377 /* reverse map the entire object onto the parent */
2378 ret = rbd_obj_calc_img_extents(obj_req, true);
2379 if (ret)
2380 return ret;
2381
2382 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2383 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2384 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2385
2386 obj_req->write_state = RBD_OBJ_WRITE_START;
2387 return 0;
2388 }
2389
__rbd_osd_setup_zeroout_ops(struct ceph_osd_request * osd_req,int which)2390 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2391 int which)
2392 {
2393 struct rbd_obj_request *obj_req = osd_req->r_priv;
2394 u16 opcode;
2395
2396 if (rbd_obj_is_entire(obj_req)) {
2397 if (obj_req->num_img_extents) {
2398 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2399 osd_req_op_init(osd_req, which++,
2400 CEPH_OSD_OP_CREATE, 0);
2401 opcode = CEPH_OSD_OP_TRUNCATE;
2402 } else {
2403 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2404 osd_req_op_init(osd_req, which++,
2405 CEPH_OSD_OP_DELETE, 0);
2406 opcode = 0;
2407 }
2408 } else {
2409 opcode = truncate_or_zero_opcode(obj_req);
2410 }
2411
2412 if (opcode)
2413 osd_req_op_extent_init(osd_req, which, opcode,
2414 obj_req->ex.oe_off, obj_req->ex.oe_len,
2415 0, 0);
2416 }
2417
rbd_obj_init_zeroout(struct rbd_obj_request * obj_req)2418 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2419 {
2420 int ret;
2421
2422 /* reverse map the entire object onto the parent */
2423 ret = rbd_obj_calc_img_extents(obj_req, true);
2424 if (ret)
2425 return ret;
2426
2427 if (!obj_req->num_img_extents) {
2428 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2429 if (rbd_obj_is_entire(obj_req))
2430 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2431 }
2432
2433 obj_req->write_state = RBD_OBJ_WRITE_START;
2434 return 0;
2435 }
2436
count_write_ops(struct rbd_obj_request * obj_req)2437 static int count_write_ops(struct rbd_obj_request *obj_req)
2438 {
2439 struct rbd_img_request *img_req = obj_req->img_request;
2440
2441 switch (img_req->op_type) {
2442 case OBJ_OP_WRITE:
2443 if (!use_object_map(img_req->rbd_dev) ||
2444 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2445 return 2; /* setallochint + write/writefull */
2446
2447 return 1; /* write/writefull */
2448 case OBJ_OP_DISCARD:
2449 return 1; /* delete/truncate/zero */
2450 case OBJ_OP_ZEROOUT:
2451 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2452 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2453 return 2; /* create + truncate */
2454
2455 return 1; /* delete/truncate/zero */
2456 default:
2457 BUG();
2458 }
2459 }
2460
rbd_osd_setup_write_ops(struct ceph_osd_request * osd_req,int which)2461 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2462 int which)
2463 {
2464 struct rbd_obj_request *obj_req = osd_req->r_priv;
2465
2466 switch (obj_req->img_request->op_type) {
2467 case OBJ_OP_WRITE:
2468 __rbd_osd_setup_write_ops(osd_req, which);
2469 break;
2470 case OBJ_OP_DISCARD:
2471 __rbd_osd_setup_discard_ops(osd_req, which);
2472 break;
2473 case OBJ_OP_ZEROOUT:
2474 __rbd_osd_setup_zeroout_ops(osd_req, which);
2475 break;
2476 default:
2477 BUG();
2478 }
2479 }
2480
2481 /*
2482 * Prune the list of object requests (adjust offset and/or length, drop
2483 * redundant requests). Prepare object request state machines and image
2484 * request state machine for execution.
2485 */
__rbd_img_fill_request(struct rbd_img_request * img_req)2486 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2487 {
2488 struct rbd_obj_request *obj_req, *next_obj_req;
2489 int ret;
2490
2491 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2492 switch (img_req->op_type) {
2493 case OBJ_OP_READ:
2494 ret = rbd_obj_init_read(obj_req);
2495 break;
2496 case OBJ_OP_WRITE:
2497 ret = rbd_obj_init_write(obj_req);
2498 break;
2499 case OBJ_OP_DISCARD:
2500 ret = rbd_obj_init_discard(obj_req);
2501 break;
2502 case OBJ_OP_ZEROOUT:
2503 ret = rbd_obj_init_zeroout(obj_req);
2504 break;
2505 default:
2506 BUG();
2507 }
2508 if (ret < 0)
2509 return ret;
2510 if (ret > 0) {
2511 rbd_img_obj_request_del(img_req, obj_req);
2512 continue;
2513 }
2514 }
2515
2516 img_req->state = RBD_IMG_START;
2517 return 0;
2518 }
2519
2520 union rbd_img_fill_iter {
2521 struct ceph_bio_iter bio_iter;
2522 struct ceph_bvec_iter bvec_iter;
2523 };
2524
2525 struct rbd_img_fill_ctx {
2526 enum obj_request_type pos_type;
2527 union rbd_img_fill_iter *pos;
2528 union rbd_img_fill_iter iter;
2529 ceph_object_extent_fn_t set_pos_fn;
2530 ceph_object_extent_fn_t count_fn;
2531 ceph_object_extent_fn_t copy_fn;
2532 };
2533
alloc_object_extent(void * arg)2534 static struct ceph_object_extent *alloc_object_extent(void *arg)
2535 {
2536 struct rbd_img_request *img_req = arg;
2537 struct rbd_obj_request *obj_req;
2538
2539 obj_req = rbd_obj_request_create();
2540 if (!obj_req)
2541 return NULL;
2542
2543 rbd_img_obj_request_add(img_req, obj_req);
2544 return &obj_req->ex;
2545 }
2546
2547 /*
2548 * While su != os && sc == 1 is technically not fancy (it's the same
2549 * layout as su == os && sc == 1), we can't use the nocopy path for it
2550 * because ->set_pos_fn() should be called only once per object.
2551 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2552 * treat su != os && sc == 1 as fancy.
2553 */
rbd_layout_is_fancy(struct ceph_file_layout * l)2554 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2555 {
2556 return l->stripe_unit != l->object_size;
2557 }
2558
rbd_img_fill_request_nocopy(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct rbd_img_fill_ctx * fctx)2559 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2560 struct ceph_file_extent *img_extents,
2561 u32 num_img_extents,
2562 struct rbd_img_fill_ctx *fctx)
2563 {
2564 u32 i;
2565 int ret;
2566
2567 img_req->data_type = fctx->pos_type;
2568
2569 /*
2570 * Create object requests and set each object request's starting
2571 * position in the provided bio (list) or bio_vec array.
2572 */
2573 fctx->iter = *fctx->pos;
2574 for (i = 0; i < num_img_extents; i++) {
2575 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2576 img_extents[i].fe_off,
2577 img_extents[i].fe_len,
2578 &img_req->object_extents,
2579 alloc_object_extent, img_req,
2580 fctx->set_pos_fn, &fctx->iter);
2581 if (ret)
2582 return ret;
2583 }
2584
2585 return __rbd_img_fill_request(img_req);
2586 }
2587
2588 /*
2589 * Map a list of image extents to a list of object extents, create the
2590 * corresponding object requests (normally each to a different object,
2591 * but not always) and add them to @img_req. For each object request,
2592 * set up its data descriptor to point to the corresponding chunk(s) of
2593 * @fctx->pos data buffer.
2594 *
2595 * Because ceph_file_to_extents() will merge adjacent object extents
2596 * together, each object request's data descriptor may point to multiple
2597 * different chunks of @fctx->pos data buffer.
2598 *
2599 * @fctx->pos data buffer is assumed to be large enough.
2600 */
rbd_img_fill_request(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct rbd_img_fill_ctx * fctx)2601 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2602 struct ceph_file_extent *img_extents,
2603 u32 num_img_extents,
2604 struct rbd_img_fill_ctx *fctx)
2605 {
2606 struct rbd_device *rbd_dev = img_req->rbd_dev;
2607 struct rbd_obj_request *obj_req;
2608 u32 i;
2609 int ret;
2610
2611 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2612 !rbd_layout_is_fancy(&rbd_dev->layout))
2613 return rbd_img_fill_request_nocopy(img_req, img_extents,
2614 num_img_extents, fctx);
2615
2616 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2617
2618 /*
2619 * Create object requests and determine ->bvec_count for each object
2620 * request. Note that ->bvec_count sum over all object requests may
2621 * be greater than the number of bio_vecs in the provided bio (list)
2622 * or bio_vec array because when mapped, those bio_vecs can straddle
2623 * stripe unit boundaries.
2624 */
2625 fctx->iter = *fctx->pos;
2626 for (i = 0; i < num_img_extents; i++) {
2627 ret = ceph_file_to_extents(&rbd_dev->layout,
2628 img_extents[i].fe_off,
2629 img_extents[i].fe_len,
2630 &img_req->object_extents,
2631 alloc_object_extent, img_req,
2632 fctx->count_fn, &fctx->iter);
2633 if (ret)
2634 return ret;
2635 }
2636
2637 for_each_obj_request(img_req, obj_req) {
2638 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2639 sizeof(*obj_req->bvec_pos.bvecs),
2640 GFP_NOIO);
2641 if (!obj_req->bvec_pos.bvecs)
2642 return -ENOMEM;
2643 }
2644
2645 /*
2646 * Fill in each object request's private bio_vec array, splitting and
2647 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2648 */
2649 fctx->iter = *fctx->pos;
2650 for (i = 0; i < num_img_extents; i++) {
2651 ret = ceph_iterate_extents(&rbd_dev->layout,
2652 img_extents[i].fe_off,
2653 img_extents[i].fe_len,
2654 &img_req->object_extents,
2655 fctx->copy_fn, &fctx->iter);
2656 if (ret)
2657 return ret;
2658 }
2659
2660 return __rbd_img_fill_request(img_req);
2661 }
2662
rbd_img_fill_nodata(struct rbd_img_request * img_req,u64 off,u64 len)2663 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2664 u64 off, u64 len)
2665 {
2666 struct ceph_file_extent ex = { off, len };
2667 union rbd_img_fill_iter dummy = {};
2668 struct rbd_img_fill_ctx fctx = {
2669 .pos_type = OBJ_REQUEST_NODATA,
2670 .pos = &dummy,
2671 };
2672
2673 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2674 }
2675
set_bio_pos(struct ceph_object_extent * ex,u32 bytes,void * arg)2676 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2677 {
2678 struct rbd_obj_request *obj_req =
2679 container_of(ex, struct rbd_obj_request, ex);
2680 struct ceph_bio_iter *it = arg;
2681
2682 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2683 obj_req->bio_pos = *it;
2684 ceph_bio_iter_advance(it, bytes);
2685 }
2686
count_bio_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2687 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2688 {
2689 struct rbd_obj_request *obj_req =
2690 container_of(ex, struct rbd_obj_request, ex);
2691 struct ceph_bio_iter *it = arg;
2692
2693 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2694 ceph_bio_iter_advance_step(it, bytes, ({
2695 obj_req->bvec_count++;
2696 }));
2697
2698 }
2699
copy_bio_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2700 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2701 {
2702 struct rbd_obj_request *obj_req =
2703 container_of(ex, struct rbd_obj_request, ex);
2704 struct ceph_bio_iter *it = arg;
2705
2706 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2707 ceph_bio_iter_advance_step(it, bytes, ({
2708 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2709 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2710 }));
2711 }
2712
__rbd_img_fill_from_bio(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct ceph_bio_iter * bio_pos)2713 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2714 struct ceph_file_extent *img_extents,
2715 u32 num_img_extents,
2716 struct ceph_bio_iter *bio_pos)
2717 {
2718 struct rbd_img_fill_ctx fctx = {
2719 .pos_type = OBJ_REQUEST_BIO,
2720 .pos = (union rbd_img_fill_iter *)bio_pos,
2721 .set_pos_fn = set_bio_pos,
2722 .count_fn = count_bio_bvecs,
2723 .copy_fn = copy_bio_bvecs,
2724 };
2725
2726 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2727 &fctx);
2728 }
2729
rbd_img_fill_from_bio(struct rbd_img_request * img_req,u64 off,u64 len,struct bio * bio)2730 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2731 u64 off, u64 len, struct bio *bio)
2732 {
2733 struct ceph_file_extent ex = { off, len };
2734 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2735
2736 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2737 }
2738
set_bvec_pos(struct ceph_object_extent * ex,u32 bytes,void * arg)2739 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2740 {
2741 struct rbd_obj_request *obj_req =
2742 container_of(ex, struct rbd_obj_request, ex);
2743 struct ceph_bvec_iter *it = arg;
2744
2745 obj_req->bvec_pos = *it;
2746 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2747 ceph_bvec_iter_advance(it, bytes);
2748 }
2749
count_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2750 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2751 {
2752 struct rbd_obj_request *obj_req =
2753 container_of(ex, struct rbd_obj_request, ex);
2754 struct ceph_bvec_iter *it = arg;
2755
2756 ceph_bvec_iter_advance_step(it, bytes, ({
2757 obj_req->bvec_count++;
2758 }));
2759 }
2760
copy_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2761 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2762 {
2763 struct rbd_obj_request *obj_req =
2764 container_of(ex, struct rbd_obj_request, ex);
2765 struct ceph_bvec_iter *it = arg;
2766
2767 ceph_bvec_iter_advance_step(it, bytes, ({
2768 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2769 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2770 }));
2771 }
2772
__rbd_img_fill_from_bvecs(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct ceph_bvec_iter * bvec_pos)2773 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2774 struct ceph_file_extent *img_extents,
2775 u32 num_img_extents,
2776 struct ceph_bvec_iter *bvec_pos)
2777 {
2778 struct rbd_img_fill_ctx fctx = {
2779 .pos_type = OBJ_REQUEST_BVECS,
2780 .pos = (union rbd_img_fill_iter *)bvec_pos,
2781 .set_pos_fn = set_bvec_pos,
2782 .count_fn = count_bvecs,
2783 .copy_fn = copy_bvecs,
2784 };
2785
2786 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2787 &fctx);
2788 }
2789
rbd_img_fill_from_bvecs(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct bio_vec * bvecs)2790 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2791 struct ceph_file_extent *img_extents,
2792 u32 num_img_extents,
2793 struct bio_vec *bvecs)
2794 {
2795 struct ceph_bvec_iter it = {
2796 .bvecs = bvecs,
2797 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2798 num_img_extents) },
2799 };
2800
2801 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2802 &it);
2803 }
2804
rbd_img_handle_request_work(struct work_struct * work)2805 static void rbd_img_handle_request_work(struct work_struct *work)
2806 {
2807 struct rbd_img_request *img_req =
2808 container_of(work, struct rbd_img_request, work);
2809
2810 rbd_img_handle_request(img_req, img_req->work_result);
2811 }
2812
rbd_img_schedule(struct rbd_img_request * img_req,int result)2813 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2814 {
2815 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2816 img_req->work_result = result;
2817 queue_work(rbd_wq, &img_req->work);
2818 }
2819
rbd_obj_may_exist(struct rbd_obj_request * obj_req)2820 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2821 {
2822 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2823
2824 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2825 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2826 return true;
2827 }
2828
2829 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2830 obj_req->ex.oe_objno);
2831 return false;
2832 }
2833
rbd_obj_read_object(struct rbd_obj_request * obj_req)2834 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2835 {
2836 struct ceph_osd_request *osd_req;
2837 int ret;
2838
2839 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2840 if (IS_ERR(osd_req))
2841 return PTR_ERR(osd_req);
2842
2843 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2844 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2845 rbd_osd_setup_data(osd_req, 0);
2846 rbd_osd_format_read(osd_req);
2847
2848 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2849 if (ret)
2850 return ret;
2851
2852 rbd_osd_submit(osd_req);
2853 return 0;
2854 }
2855
rbd_obj_read_from_parent(struct rbd_obj_request * obj_req)2856 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2857 {
2858 struct rbd_img_request *img_req = obj_req->img_request;
2859 struct rbd_device *parent = img_req->rbd_dev->parent;
2860 struct rbd_img_request *child_img_req;
2861 int ret;
2862
2863 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2864 if (!child_img_req)
2865 return -ENOMEM;
2866
2867 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2868 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2869 child_img_req->obj_request = obj_req;
2870
2871 down_read(&parent->header_rwsem);
2872 rbd_img_capture_header(child_img_req);
2873 up_read(&parent->header_rwsem);
2874
2875 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2876 obj_req);
2877
2878 if (!rbd_img_is_write(img_req)) {
2879 switch (img_req->data_type) {
2880 case OBJ_REQUEST_BIO:
2881 ret = __rbd_img_fill_from_bio(child_img_req,
2882 obj_req->img_extents,
2883 obj_req->num_img_extents,
2884 &obj_req->bio_pos);
2885 break;
2886 case OBJ_REQUEST_BVECS:
2887 case OBJ_REQUEST_OWN_BVECS:
2888 ret = __rbd_img_fill_from_bvecs(child_img_req,
2889 obj_req->img_extents,
2890 obj_req->num_img_extents,
2891 &obj_req->bvec_pos);
2892 break;
2893 default:
2894 BUG();
2895 }
2896 } else {
2897 ret = rbd_img_fill_from_bvecs(child_img_req,
2898 obj_req->img_extents,
2899 obj_req->num_img_extents,
2900 obj_req->copyup_bvecs);
2901 }
2902 if (ret) {
2903 rbd_img_request_destroy(child_img_req);
2904 return ret;
2905 }
2906
2907 /* avoid parent chain recursion */
2908 rbd_img_schedule(child_img_req, 0);
2909 return 0;
2910 }
2911
rbd_obj_advance_read(struct rbd_obj_request * obj_req,int * result)2912 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2913 {
2914 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2915 int ret;
2916
2917 again:
2918 switch (obj_req->read_state) {
2919 case RBD_OBJ_READ_START:
2920 rbd_assert(!*result);
2921
2922 if (!rbd_obj_may_exist(obj_req)) {
2923 *result = -ENOENT;
2924 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2925 goto again;
2926 }
2927
2928 ret = rbd_obj_read_object(obj_req);
2929 if (ret) {
2930 *result = ret;
2931 return true;
2932 }
2933 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2934 return false;
2935 case RBD_OBJ_READ_OBJECT:
2936 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2937 /* reverse map this object extent onto the parent */
2938 ret = rbd_obj_calc_img_extents(obj_req, false);
2939 if (ret) {
2940 *result = ret;
2941 return true;
2942 }
2943 if (obj_req->num_img_extents) {
2944 ret = rbd_obj_read_from_parent(obj_req);
2945 if (ret) {
2946 *result = ret;
2947 return true;
2948 }
2949 obj_req->read_state = RBD_OBJ_READ_PARENT;
2950 return false;
2951 }
2952 }
2953
2954 /*
2955 * -ENOENT means a hole in the image -- zero-fill the entire
2956 * length of the request. A short read also implies zero-fill
2957 * to the end of the request.
2958 */
2959 if (*result == -ENOENT) {
2960 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2961 *result = 0;
2962 } else if (*result >= 0) {
2963 if (*result < obj_req->ex.oe_len)
2964 rbd_obj_zero_range(obj_req, *result,
2965 obj_req->ex.oe_len - *result);
2966 else
2967 rbd_assert(*result == obj_req->ex.oe_len);
2968 *result = 0;
2969 }
2970 return true;
2971 case RBD_OBJ_READ_PARENT:
2972 /*
2973 * The parent image is read only up to the overlap -- zero-fill
2974 * from the overlap to the end of the request.
2975 */
2976 if (!*result) {
2977 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2978
2979 if (obj_overlap < obj_req->ex.oe_len)
2980 rbd_obj_zero_range(obj_req, obj_overlap,
2981 obj_req->ex.oe_len - obj_overlap);
2982 }
2983 return true;
2984 default:
2985 BUG();
2986 }
2987 }
2988
rbd_obj_write_is_noop(struct rbd_obj_request * obj_req)2989 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2990 {
2991 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2992
2993 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2994 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2995
2996 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2997 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2998 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2999 return true;
3000 }
3001
3002 return false;
3003 }
3004
3005 /*
3006 * Return:
3007 * 0 - object map update sent
3008 * 1 - object map update isn't needed
3009 * <0 - error
3010 */
rbd_obj_write_pre_object_map(struct rbd_obj_request * obj_req)3011 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3012 {
3013 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3014 u8 new_state;
3015
3016 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3017 return 1;
3018
3019 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3020 new_state = OBJECT_PENDING;
3021 else
3022 new_state = OBJECT_EXISTS;
3023
3024 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3025 }
3026
rbd_obj_write_object(struct rbd_obj_request * obj_req)3027 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3028 {
3029 struct ceph_osd_request *osd_req;
3030 int num_ops = count_write_ops(obj_req);
3031 int which = 0;
3032 int ret;
3033
3034 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3035 num_ops++; /* stat */
3036
3037 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3038 if (IS_ERR(osd_req))
3039 return PTR_ERR(osd_req);
3040
3041 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3042 ret = rbd_osd_setup_stat(osd_req, which++);
3043 if (ret)
3044 return ret;
3045 }
3046
3047 rbd_osd_setup_write_ops(osd_req, which);
3048 rbd_osd_format_write(osd_req);
3049
3050 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3051 if (ret)
3052 return ret;
3053
3054 rbd_osd_submit(osd_req);
3055 return 0;
3056 }
3057
3058 /*
3059 * copyup_bvecs pages are never highmem pages
3060 */
is_zero_bvecs(struct bio_vec * bvecs,u32 bytes)3061 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3062 {
3063 struct ceph_bvec_iter it = {
3064 .bvecs = bvecs,
3065 .iter = { .bi_size = bytes },
3066 };
3067
3068 ceph_bvec_iter_advance_step(&it, bytes, ({
3069 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3070 bv.bv_len))
3071 return false;
3072 }));
3073 return true;
3074 }
3075
3076 #define MODS_ONLY U32_MAX
3077
rbd_obj_copyup_empty_snapc(struct rbd_obj_request * obj_req,u32 bytes)3078 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3079 u32 bytes)
3080 {
3081 struct ceph_osd_request *osd_req;
3082 int ret;
3083
3084 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3085 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3086
3087 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3088 if (IS_ERR(osd_req))
3089 return PTR_ERR(osd_req);
3090
3091 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3092 if (ret)
3093 return ret;
3094
3095 rbd_osd_format_write(osd_req);
3096
3097 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3098 if (ret)
3099 return ret;
3100
3101 rbd_osd_submit(osd_req);
3102 return 0;
3103 }
3104
rbd_obj_copyup_current_snapc(struct rbd_obj_request * obj_req,u32 bytes)3105 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3106 u32 bytes)
3107 {
3108 struct ceph_osd_request *osd_req;
3109 int num_ops = count_write_ops(obj_req);
3110 int which = 0;
3111 int ret;
3112
3113 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3114
3115 if (bytes != MODS_ONLY)
3116 num_ops++; /* copyup */
3117
3118 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3119 if (IS_ERR(osd_req))
3120 return PTR_ERR(osd_req);
3121
3122 if (bytes != MODS_ONLY) {
3123 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3124 if (ret)
3125 return ret;
3126 }
3127
3128 rbd_osd_setup_write_ops(osd_req, which);
3129 rbd_osd_format_write(osd_req);
3130
3131 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3132 if (ret)
3133 return ret;
3134
3135 rbd_osd_submit(osd_req);
3136 return 0;
3137 }
3138
setup_copyup_bvecs(struct rbd_obj_request * obj_req,u64 obj_overlap)3139 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3140 {
3141 u32 i;
3142
3143 rbd_assert(!obj_req->copyup_bvecs);
3144 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3145 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3146 sizeof(*obj_req->copyup_bvecs),
3147 GFP_NOIO);
3148 if (!obj_req->copyup_bvecs)
3149 return -ENOMEM;
3150
3151 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3152 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3153
3154 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3155 if (!obj_req->copyup_bvecs[i].bv_page)
3156 return -ENOMEM;
3157
3158 obj_req->copyup_bvecs[i].bv_offset = 0;
3159 obj_req->copyup_bvecs[i].bv_len = len;
3160 obj_overlap -= len;
3161 }
3162
3163 rbd_assert(!obj_overlap);
3164 return 0;
3165 }
3166
3167 /*
3168 * The target object doesn't exist. Read the data for the entire
3169 * target object up to the overlap point (if any) from the parent,
3170 * so we can use it for a copyup.
3171 */
rbd_obj_copyup_read_parent(struct rbd_obj_request * obj_req)3172 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3173 {
3174 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3175 int ret;
3176
3177 rbd_assert(obj_req->num_img_extents);
3178 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3179 rbd_dev->parent_overlap);
3180 if (!obj_req->num_img_extents) {
3181 /*
3182 * The overlap has become 0 (most likely because the
3183 * image has been flattened). Re-submit the original write
3184 * request -- pass MODS_ONLY since the copyup isn't needed
3185 * anymore.
3186 */
3187 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3188 }
3189
3190 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3191 if (ret)
3192 return ret;
3193
3194 return rbd_obj_read_from_parent(obj_req);
3195 }
3196
rbd_obj_copyup_object_maps(struct rbd_obj_request * obj_req)3197 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3198 {
3199 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3200 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3201 u8 new_state;
3202 u32 i;
3203 int ret;
3204
3205 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3206
3207 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3208 return;
3209
3210 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3211 return;
3212
3213 for (i = 0; i < snapc->num_snaps; i++) {
3214 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3215 i + 1 < snapc->num_snaps)
3216 new_state = OBJECT_EXISTS_CLEAN;
3217 else
3218 new_state = OBJECT_EXISTS;
3219
3220 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3221 new_state, NULL);
3222 if (ret < 0) {
3223 obj_req->pending.result = ret;
3224 return;
3225 }
3226
3227 rbd_assert(!ret);
3228 obj_req->pending.num_pending++;
3229 }
3230 }
3231
rbd_obj_copyup_write_object(struct rbd_obj_request * obj_req)3232 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3233 {
3234 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3235 int ret;
3236
3237 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3238
3239 /*
3240 * Only send non-zero copyup data to save some I/O and network
3241 * bandwidth -- zero copyup data is equivalent to the object not
3242 * existing.
3243 */
3244 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3245 bytes = 0;
3246
3247 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3248 /*
3249 * Send a copyup request with an empty snapshot context to
3250 * deep-copyup the object through all existing snapshots.
3251 * A second request with the current snapshot context will be
3252 * sent for the actual modification.
3253 */
3254 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3255 if (ret) {
3256 obj_req->pending.result = ret;
3257 return;
3258 }
3259
3260 obj_req->pending.num_pending++;
3261 bytes = MODS_ONLY;
3262 }
3263
3264 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3265 if (ret) {
3266 obj_req->pending.result = ret;
3267 return;
3268 }
3269
3270 obj_req->pending.num_pending++;
3271 }
3272
rbd_obj_advance_copyup(struct rbd_obj_request * obj_req,int * result)3273 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3274 {
3275 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3276 int ret;
3277
3278 again:
3279 switch (obj_req->copyup_state) {
3280 case RBD_OBJ_COPYUP_START:
3281 rbd_assert(!*result);
3282
3283 ret = rbd_obj_copyup_read_parent(obj_req);
3284 if (ret) {
3285 *result = ret;
3286 return true;
3287 }
3288 if (obj_req->num_img_extents)
3289 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3290 else
3291 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3292 return false;
3293 case RBD_OBJ_COPYUP_READ_PARENT:
3294 if (*result)
3295 return true;
3296
3297 if (is_zero_bvecs(obj_req->copyup_bvecs,
3298 rbd_obj_img_extents_bytes(obj_req))) {
3299 dout("%s %p detected zeros\n", __func__, obj_req);
3300 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3301 }
3302
3303 rbd_obj_copyup_object_maps(obj_req);
3304 if (!obj_req->pending.num_pending) {
3305 *result = obj_req->pending.result;
3306 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3307 goto again;
3308 }
3309 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3310 return false;
3311 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3312 if (!pending_result_dec(&obj_req->pending, result))
3313 return false;
3314 fallthrough;
3315 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3316 if (*result) {
3317 rbd_warn(rbd_dev, "snap object map update failed: %d",
3318 *result);
3319 return true;
3320 }
3321
3322 rbd_obj_copyup_write_object(obj_req);
3323 if (!obj_req->pending.num_pending) {
3324 *result = obj_req->pending.result;
3325 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3326 goto again;
3327 }
3328 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3329 return false;
3330 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3331 if (!pending_result_dec(&obj_req->pending, result))
3332 return false;
3333 fallthrough;
3334 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3335 return true;
3336 default:
3337 BUG();
3338 }
3339 }
3340
3341 /*
3342 * Return:
3343 * 0 - object map update sent
3344 * 1 - object map update isn't needed
3345 * <0 - error
3346 */
rbd_obj_write_post_object_map(struct rbd_obj_request * obj_req)3347 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3348 {
3349 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3350 u8 current_state = OBJECT_PENDING;
3351
3352 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3353 return 1;
3354
3355 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3356 return 1;
3357
3358 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3359 ¤t_state);
3360 }
3361
rbd_obj_advance_write(struct rbd_obj_request * obj_req,int * result)3362 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3363 {
3364 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3365 int ret;
3366
3367 again:
3368 switch (obj_req->write_state) {
3369 case RBD_OBJ_WRITE_START:
3370 rbd_assert(!*result);
3371
3372 rbd_obj_set_copyup_enabled(obj_req);
3373 if (rbd_obj_write_is_noop(obj_req))
3374 return true;
3375
3376 ret = rbd_obj_write_pre_object_map(obj_req);
3377 if (ret < 0) {
3378 *result = ret;
3379 return true;
3380 }
3381 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3382 if (ret > 0)
3383 goto again;
3384 return false;
3385 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3386 if (*result) {
3387 rbd_warn(rbd_dev, "pre object map update failed: %d",
3388 *result);
3389 return true;
3390 }
3391 ret = rbd_obj_write_object(obj_req);
3392 if (ret) {
3393 *result = ret;
3394 return true;
3395 }
3396 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3397 return false;
3398 case RBD_OBJ_WRITE_OBJECT:
3399 if (*result == -ENOENT) {
3400 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3401 *result = 0;
3402 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3403 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3404 goto again;
3405 }
3406 /*
3407 * On a non-existent object:
3408 * delete - -ENOENT, truncate/zero - 0
3409 */
3410 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3411 *result = 0;
3412 }
3413 if (*result)
3414 return true;
3415
3416 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3417 goto again;
3418 case __RBD_OBJ_WRITE_COPYUP:
3419 if (!rbd_obj_advance_copyup(obj_req, result))
3420 return false;
3421 fallthrough;
3422 case RBD_OBJ_WRITE_COPYUP:
3423 if (*result) {
3424 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3425 return true;
3426 }
3427 ret = rbd_obj_write_post_object_map(obj_req);
3428 if (ret < 0) {
3429 *result = ret;
3430 return true;
3431 }
3432 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3433 if (ret > 0)
3434 goto again;
3435 return false;
3436 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3437 if (*result)
3438 rbd_warn(rbd_dev, "post object map update failed: %d",
3439 *result);
3440 return true;
3441 default:
3442 BUG();
3443 }
3444 }
3445
3446 /*
3447 * Return true if @obj_req is completed.
3448 */
__rbd_obj_handle_request(struct rbd_obj_request * obj_req,int * result)3449 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3450 int *result)
3451 {
3452 struct rbd_img_request *img_req = obj_req->img_request;
3453 struct rbd_device *rbd_dev = img_req->rbd_dev;
3454 bool done;
3455
3456 mutex_lock(&obj_req->state_mutex);
3457 if (!rbd_img_is_write(img_req))
3458 done = rbd_obj_advance_read(obj_req, result);
3459 else
3460 done = rbd_obj_advance_write(obj_req, result);
3461 mutex_unlock(&obj_req->state_mutex);
3462
3463 if (done && *result) {
3464 rbd_assert(*result < 0);
3465 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3466 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3467 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3468 }
3469 return done;
3470 }
3471
3472 /*
3473 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3474 * recursion.
3475 */
rbd_obj_handle_request(struct rbd_obj_request * obj_req,int result)3476 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3477 {
3478 if (__rbd_obj_handle_request(obj_req, &result))
3479 rbd_img_handle_request(obj_req->img_request, result);
3480 }
3481
need_exclusive_lock(struct rbd_img_request * img_req)3482 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3483 {
3484 struct rbd_device *rbd_dev = img_req->rbd_dev;
3485
3486 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3487 return false;
3488
3489 if (rbd_is_ro(rbd_dev))
3490 return false;
3491
3492 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3493 if (rbd_dev->opts->lock_on_read ||
3494 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3495 return true;
3496
3497 return rbd_img_is_write(img_req);
3498 }
3499
rbd_lock_add_request(struct rbd_img_request * img_req)3500 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3501 {
3502 struct rbd_device *rbd_dev = img_req->rbd_dev;
3503 bool locked;
3504
3505 lockdep_assert_held(&rbd_dev->lock_rwsem);
3506 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3507 spin_lock(&rbd_dev->lock_lists_lock);
3508 rbd_assert(list_empty(&img_req->lock_item));
3509 if (!locked)
3510 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3511 else
3512 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3513 spin_unlock(&rbd_dev->lock_lists_lock);
3514 return locked;
3515 }
3516
rbd_lock_del_request(struct rbd_img_request * img_req)3517 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3518 {
3519 struct rbd_device *rbd_dev = img_req->rbd_dev;
3520 bool need_wakeup;
3521
3522 lockdep_assert_held(&rbd_dev->lock_rwsem);
3523 spin_lock(&rbd_dev->lock_lists_lock);
3524 rbd_assert(!list_empty(&img_req->lock_item));
3525 list_del_init(&img_req->lock_item);
3526 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3527 list_empty(&rbd_dev->running_list));
3528 spin_unlock(&rbd_dev->lock_lists_lock);
3529 if (need_wakeup)
3530 complete(&rbd_dev->releasing_wait);
3531 }
3532
rbd_img_exclusive_lock(struct rbd_img_request * img_req)3533 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3534 {
3535 struct rbd_device *rbd_dev = img_req->rbd_dev;
3536
3537 if (!need_exclusive_lock(img_req))
3538 return 1;
3539
3540 if (rbd_lock_add_request(img_req))
3541 return 1;
3542
3543 if (rbd_dev->opts->exclusive) {
3544 WARN_ON(1); /* lock got released? */
3545 return -EROFS;
3546 }
3547
3548 /*
3549 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3550 * and cancel_delayed_work() in wake_lock_waiters().
3551 */
3552 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3553 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3554 return 0;
3555 }
3556
rbd_img_object_requests(struct rbd_img_request * img_req)3557 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3558 {
3559 struct rbd_device *rbd_dev = img_req->rbd_dev;
3560 struct rbd_obj_request *obj_req;
3561
3562 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3563 rbd_assert(!need_exclusive_lock(img_req) ||
3564 __rbd_is_lock_owner(rbd_dev));
3565
3566 if (rbd_img_is_write(img_req)) {
3567 rbd_assert(!img_req->snapc);
3568 down_read(&rbd_dev->header_rwsem);
3569 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3570 up_read(&rbd_dev->header_rwsem);
3571 }
3572
3573 for_each_obj_request(img_req, obj_req) {
3574 int result = 0;
3575
3576 if (__rbd_obj_handle_request(obj_req, &result)) {
3577 if (result) {
3578 img_req->pending.result = result;
3579 return;
3580 }
3581 } else {
3582 img_req->pending.num_pending++;
3583 }
3584 }
3585 }
3586
rbd_img_advance(struct rbd_img_request * img_req,int * result)3587 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3588 {
3589 int ret;
3590
3591 again:
3592 switch (img_req->state) {
3593 case RBD_IMG_START:
3594 rbd_assert(!*result);
3595
3596 ret = rbd_img_exclusive_lock(img_req);
3597 if (ret < 0) {
3598 *result = ret;
3599 return true;
3600 }
3601 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3602 if (ret > 0)
3603 goto again;
3604 return false;
3605 case RBD_IMG_EXCLUSIVE_LOCK:
3606 if (*result)
3607 return true;
3608
3609 rbd_img_object_requests(img_req);
3610 if (!img_req->pending.num_pending) {
3611 *result = img_req->pending.result;
3612 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3613 goto again;
3614 }
3615 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3616 return false;
3617 case __RBD_IMG_OBJECT_REQUESTS:
3618 if (!pending_result_dec(&img_req->pending, result))
3619 return false;
3620 fallthrough;
3621 case RBD_IMG_OBJECT_REQUESTS:
3622 return true;
3623 default:
3624 BUG();
3625 }
3626 }
3627
3628 /*
3629 * Return true if @img_req is completed.
3630 */
__rbd_img_handle_request(struct rbd_img_request * img_req,int * result)3631 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3632 int *result)
3633 {
3634 struct rbd_device *rbd_dev = img_req->rbd_dev;
3635 bool done;
3636
3637 if (need_exclusive_lock(img_req)) {
3638 down_read(&rbd_dev->lock_rwsem);
3639 mutex_lock(&img_req->state_mutex);
3640 done = rbd_img_advance(img_req, result);
3641 if (done)
3642 rbd_lock_del_request(img_req);
3643 mutex_unlock(&img_req->state_mutex);
3644 up_read(&rbd_dev->lock_rwsem);
3645 } else {
3646 mutex_lock(&img_req->state_mutex);
3647 done = rbd_img_advance(img_req, result);
3648 mutex_unlock(&img_req->state_mutex);
3649 }
3650
3651 if (done && *result) {
3652 rbd_assert(*result < 0);
3653 rbd_warn(rbd_dev, "%s%s result %d",
3654 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3655 obj_op_name(img_req->op_type), *result);
3656 }
3657 return done;
3658 }
3659
rbd_img_handle_request(struct rbd_img_request * img_req,int result)3660 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3661 {
3662 again:
3663 if (!__rbd_img_handle_request(img_req, &result))
3664 return;
3665
3666 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3667 struct rbd_obj_request *obj_req = img_req->obj_request;
3668
3669 rbd_img_request_destroy(img_req);
3670 if (__rbd_obj_handle_request(obj_req, &result)) {
3671 img_req = obj_req->img_request;
3672 goto again;
3673 }
3674 } else {
3675 struct request *rq = blk_mq_rq_from_pdu(img_req);
3676
3677 rbd_img_request_destroy(img_req);
3678 blk_mq_end_request(rq, errno_to_blk_status(result));
3679 }
3680 }
3681
3682 static const struct rbd_client_id rbd_empty_cid;
3683
rbd_cid_equal(const struct rbd_client_id * lhs,const struct rbd_client_id * rhs)3684 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3685 const struct rbd_client_id *rhs)
3686 {
3687 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3688 }
3689
rbd_get_cid(struct rbd_device * rbd_dev)3690 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3691 {
3692 struct rbd_client_id cid;
3693
3694 mutex_lock(&rbd_dev->watch_mutex);
3695 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3696 cid.handle = rbd_dev->watch_cookie;
3697 mutex_unlock(&rbd_dev->watch_mutex);
3698 return cid;
3699 }
3700
3701 /*
3702 * lock_rwsem must be held for write
3703 */
rbd_set_owner_cid(struct rbd_device * rbd_dev,const struct rbd_client_id * cid)3704 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3705 const struct rbd_client_id *cid)
3706 {
3707 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3708 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3709 cid->gid, cid->handle);
3710 rbd_dev->owner_cid = *cid; /* struct */
3711 }
3712
format_lock_cookie(struct rbd_device * rbd_dev,char * buf)3713 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3714 {
3715 mutex_lock(&rbd_dev->watch_mutex);
3716 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3717 mutex_unlock(&rbd_dev->watch_mutex);
3718 }
3719
__rbd_lock(struct rbd_device * rbd_dev,const char * cookie)3720 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3721 {
3722 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3723
3724 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3725 strcpy(rbd_dev->lock_cookie, cookie);
3726 rbd_set_owner_cid(rbd_dev, &cid);
3727 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3728 }
3729
3730 /*
3731 * lock_rwsem must be held for write
3732 */
rbd_lock(struct rbd_device * rbd_dev)3733 static int rbd_lock(struct rbd_device *rbd_dev)
3734 {
3735 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3736 char cookie[32];
3737 int ret;
3738
3739 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3740 rbd_dev->lock_cookie[0] != '\0');
3741
3742 format_lock_cookie(rbd_dev, cookie);
3743 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3744 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3745 RBD_LOCK_TAG, "", 0);
3746 if (ret && ret != -EEXIST)
3747 return ret;
3748
3749 __rbd_lock(rbd_dev, cookie);
3750 return 0;
3751 }
3752
3753 /*
3754 * lock_rwsem must be held for write
3755 */
rbd_unlock(struct rbd_device * rbd_dev)3756 static void rbd_unlock(struct rbd_device *rbd_dev)
3757 {
3758 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3759 int ret;
3760
3761 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3762 rbd_dev->lock_cookie[0] == '\0');
3763
3764 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3765 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3766 if (ret && ret != -ENOENT)
3767 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3768
3769 /* treat errors as the image is unlocked */
3770 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3771 rbd_dev->lock_cookie[0] = '\0';
3772 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3773 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3774 }
3775
__rbd_notify_op_lock(struct rbd_device * rbd_dev,enum rbd_notify_op notify_op,struct page *** preply_pages,size_t * preply_len)3776 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3777 enum rbd_notify_op notify_op,
3778 struct page ***preply_pages,
3779 size_t *preply_len)
3780 {
3781 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3782 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3783 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3784 int buf_size = sizeof(buf);
3785 void *p = buf;
3786
3787 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3788
3789 /* encode *LockPayload NotifyMessage (op + ClientId) */
3790 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3791 ceph_encode_32(&p, notify_op);
3792 ceph_encode_64(&p, cid.gid);
3793 ceph_encode_64(&p, cid.handle);
3794
3795 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3796 &rbd_dev->header_oloc, buf, buf_size,
3797 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3798 }
3799
rbd_notify_op_lock(struct rbd_device * rbd_dev,enum rbd_notify_op notify_op)3800 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3801 enum rbd_notify_op notify_op)
3802 {
3803 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3804 }
3805
rbd_notify_acquired_lock(struct work_struct * work)3806 static void rbd_notify_acquired_lock(struct work_struct *work)
3807 {
3808 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3809 acquired_lock_work);
3810
3811 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3812 }
3813
rbd_notify_released_lock(struct work_struct * work)3814 static void rbd_notify_released_lock(struct work_struct *work)
3815 {
3816 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3817 released_lock_work);
3818
3819 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3820 }
3821
rbd_request_lock(struct rbd_device * rbd_dev)3822 static int rbd_request_lock(struct rbd_device *rbd_dev)
3823 {
3824 struct page **reply_pages;
3825 size_t reply_len;
3826 bool lock_owner_responded = false;
3827 int ret;
3828
3829 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3830
3831 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3832 &reply_pages, &reply_len);
3833 if (ret && ret != -ETIMEDOUT) {
3834 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3835 goto out;
3836 }
3837
3838 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3839 void *p = page_address(reply_pages[0]);
3840 void *const end = p + reply_len;
3841 u32 n;
3842
3843 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3844 while (n--) {
3845 u8 struct_v;
3846 u32 len;
3847
3848 ceph_decode_need(&p, end, 8 + 8, e_inval);
3849 p += 8 + 8; /* skip gid and cookie */
3850
3851 ceph_decode_32_safe(&p, end, len, e_inval);
3852 if (!len)
3853 continue;
3854
3855 if (lock_owner_responded) {
3856 rbd_warn(rbd_dev,
3857 "duplicate lock owners detected");
3858 ret = -EIO;
3859 goto out;
3860 }
3861
3862 lock_owner_responded = true;
3863 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3864 &struct_v, &len);
3865 if (ret) {
3866 rbd_warn(rbd_dev,
3867 "failed to decode ResponseMessage: %d",
3868 ret);
3869 goto e_inval;
3870 }
3871
3872 ret = ceph_decode_32(&p);
3873 }
3874 }
3875
3876 if (!lock_owner_responded) {
3877 rbd_warn(rbd_dev, "no lock owners detected");
3878 ret = -ETIMEDOUT;
3879 }
3880
3881 out:
3882 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3883 return ret;
3884
3885 e_inval:
3886 ret = -EINVAL;
3887 goto out;
3888 }
3889
3890 /*
3891 * Either image request state machine(s) or rbd_add_acquire_lock()
3892 * (i.e. "rbd map").
3893 */
wake_lock_waiters(struct rbd_device * rbd_dev,int result)3894 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3895 {
3896 struct rbd_img_request *img_req;
3897
3898 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3899 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3900
3901 cancel_delayed_work(&rbd_dev->lock_dwork);
3902 if (!completion_done(&rbd_dev->acquire_wait)) {
3903 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3904 list_empty(&rbd_dev->running_list));
3905 rbd_dev->acquire_err = result;
3906 complete_all(&rbd_dev->acquire_wait);
3907 return;
3908 }
3909
3910 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3911 mutex_lock(&img_req->state_mutex);
3912 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3913 rbd_img_schedule(img_req, result);
3914 mutex_unlock(&img_req->state_mutex);
3915 }
3916
3917 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3918 }
3919
locker_equal(const struct ceph_locker * lhs,const struct ceph_locker * rhs)3920 static bool locker_equal(const struct ceph_locker *lhs,
3921 const struct ceph_locker *rhs)
3922 {
3923 return lhs->id.name.type == rhs->id.name.type &&
3924 lhs->id.name.num == rhs->id.name.num &&
3925 !strcmp(lhs->id.cookie, rhs->id.cookie) &&
3926 ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
3927 }
3928
free_locker(struct ceph_locker * locker)3929 static void free_locker(struct ceph_locker *locker)
3930 {
3931 if (locker)
3932 ceph_free_lockers(locker, 1);
3933 }
3934
get_lock_owner_info(struct rbd_device * rbd_dev)3935 static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3936 {
3937 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3938 struct ceph_locker *lockers;
3939 u32 num_lockers;
3940 u8 lock_type;
3941 char *lock_tag;
3942 int ret;
3943
3944 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3945
3946 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3947 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3948 &lock_type, &lock_tag, &lockers, &num_lockers);
3949 if (ret) {
3950 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3951 return ERR_PTR(ret);
3952 }
3953
3954 if (num_lockers == 0) {
3955 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3956 lockers = NULL;
3957 goto out;
3958 }
3959
3960 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3961 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3962 lock_tag);
3963 goto err_busy;
3964 }
3965
3966 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3967 rbd_warn(rbd_dev, "shared lock type detected");
3968 goto err_busy;
3969 }
3970
3971 WARN_ON(num_lockers != 1);
3972 if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3973 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3974 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3975 lockers[0].id.cookie);
3976 goto err_busy;
3977 }
3978
3979 out:
3980 kfree(lock_tag);
3981 return lockers;
3982
3983 err_busy:
3984 kfree(lock_tag);
3985 ceph_free_lockers(lockers, num_lockers);
3986 return ERR_PTR(-EBUSY);
3987 }
3988
find_watcher(struct rbd_device * rbd_dev,const struct ceph_locker * locker)3989 static int find_watcher(struct rbd_device *rbd_dev,
3990 const struct ceph_locker *locker)
3991 {
3992 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3993 struct ceph_watch_item *watchers;
3994 u32 num_watchers;
3995 u64 cookie;
3996 int i;
3997 int ret;
3998
3999 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4000 &rbd_dev->header_oloc, &watchers,
4001 &num_watchers);
4002 if (ret) {
4003 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
4004 return ret;
4005 }
4006
4007 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
4008 for (i = 0; i < num_watchers; i++) {
4009 /*
4010 * Ignore addr->type while comparing. This mimics
4011 * entity_addr_t::get_legacy_str() + strcmp().
4012 */
4013 if (ceph_addr_equal_no_type(&watchers[i].addr,
4014 &locker->info.addr) &&
4015 watchers[i].cookie == cookie) {
4016 struct rbd_client_id cid = {
4017 .gid = le64_to_cpu(watchers[i].name.num),
4018 .handle = cookie,
4019 };
4020
4021 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4022 rbd_dev, cid.gid, cid.handle);
4023 rbd_set_owner_cid(rbd_dev, &cid);
4024 ret = 1;
4025 goto out;
4026 }
4027 }
4028
4029 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4030 ret = 0;
4031 out:
4032 kfree(watchers);
4033 return ret;
4034 }
4035
4036 /*
4037 * lock_rwsem must be held for write
4038 */
rbd_try_lock(struct rbd_device * rbd_dev)4039 static int rbd_try_lock(struct rbd_device *rbd_dev)
4040 {
4041 struct ceph_client *client = rbd_dev->rbd_client->client;
4042 struct ceph_locker *locker, *refreshed_locker;
4043 int ret;
4044
4045 for (;;) {
4046 locker = refreshed_locker = NULL;
4047
4048 ret = rbd_lock(rbd_dev);
4049 if (!ret)
4050 goto out;
4051 if (ret != -EBUSY) {
4052 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4053 goto out;
4054 }
4055
4056 /* determine if the current lock holder is still alive */
4057 locker = get_lock_owner_info(rbd_dev);
4058 if (IS_ERR(locker)) {
4059 ret = PTR_ERR(locker);
4060 locker = NULL;
4061 goto out;
4062 }
4063 if (!locker)
4064 goto again;
4065
4066 ret = find_watcher(rbd_dev, locker);
4067 if (ret)
4068 goto out; /* request lock or error */
4069
4070 refreshed_locker = get_lock_owner_info(rbd_dev);
4071 if (IS_ERR(refreshed_locker)) {
4072 ret = PTR_ERR(refreshed_locker);
4073 refreshed_locker = NULL;
4074 goto out;
4075 }
4076 if (!refreshed_locker ||
4077 !locker_equal(locker, refreshed_locker))
4078 goto again;
4079
4080 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4081 ENTITY_NAME(locker->id.name));
4082
4083 ret = ceph_monc_blocklist_add(&client->monc,
4084 &locker->info.addr);
4085 if (ret) {
4086 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4087 ENTITY_NAME(locker->id.name), ret);
4088 goto out;
4089 }
4090
4091 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4092 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4093 locker->id.cookie, &locker->id.name);
4094 if (ret && ret != -ENOENT) {
4095 rbd_warn(rbd_dev, "failed to break header lock: %d",
4096 ret);
4097 goto out;
4098 }
4099
4100 again:
4101 free_locker(refreshed_locker);
4102 free_locker(locker);
4103 }
4104
4105 out:
4106 free_locker(refreshed_locker);
4107 free_locker(locker);
4108 return ret;
4109 }
4110
rbd_post_acquire_action(struct rbd_device * rbd_dev)4111 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4112 {
4113 int ret;
4114
4115 ret = rbd_dev_refresh(rbd_dev);
4116 if (ret)
4117 return ret;
4118
4119 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4120 ret = rbd_object_map_open(rbd_dev);
4121 if (ret)
4122 return ret;
4123 }
4124
4125 return 0;
4126 }
4127
4128 /*
4129 * Return:
4130 * 0 - lock acquired
4131 * 1 - caller should call rbd_request_lock()
4132 * <0 - error
4133 */
rbd_try_acquire_lock(struct rbd_device * rbd_dev)4134 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4135 {
4136 int ret;
4137
4138 down_read(&rbd_dev->lock_rwsem);
4139 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4140 rbd_dev->lock_state);
4141 if (__rbd_is_lock_owner(rbd_dev)) {
4142 up_read(&rbd_dev->lock_rwsem);
4143 return 0;
4144 }
4145
4146 up_read(&rbd_dev->lock_rwsem);
4147 down_write(&rbd_dev->lock_rwsem);
4148 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4149 rbd_dev->lock_state);
4150 if (__rbd_is_lock_owner(rbd_dev)) {
4151 up_write(&rbd_dev->lock_rwsem);
4152 return 0;
4153 }
4154
4155 ret = rbd_try_lock(rbd_dev);
4156 if (ret < 0) {
4157 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4158 goto out;
4159 }
4160 if (ret > 0) {
4161 up_write(&rbd_dev->lock_rwsem);
4162 return ret;
4163 }
4164
4165 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4166 rbd_assert(list_empty(&rbd_dev->running_list));
4167
4168 ret = rbd_post_acquire_action(rbd_dev);
4169 if (ret) {
4170 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4171 /*
4172 * Can't stay in RBD_LOCK_STATE_LOCKED because
4173 * rbd_lock_add_request() would let the request through,
4174 * assuming that e.g. object map is locked and loaded.
4175 */
4176 rbd_unlock(rbd_dev);
4177 }
4178
4179 out:
4180 wake_lock_waiters(rbd_dev, ret);
4181 up_write(&rbd_dev->lock_rwsem);
4182 return ret;
4183 }
4184
rbd_acquire_lock(struct work_struct * work)4185 static void rbd_acquire_lock(struct work_struct *work)
4186 {
4187 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4188 struct rbd_device, lock_dwork);
4189 int ret;
4190
4191 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4192 again:
4193 ret = rbd_try_acquire_lock(rbd_dev);
4194 if (ret <= 0) {
4195 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4196 return;
4197 }
4198
4199 ret = rbd_request_lock(rbd_dev);
4200 if (ret == -ETIMEDOUT) {
4201 goto again; /* treat this as a dead client */
4202 } else if (ret == -EROFS) {
4203 rbd_warn(rbd_dev, "peer will not release lock");
4204 down_write(&rbd_dev->lock_rwsem);
4205 wake_lock_waiters(rbd_dev, ret);
4206 up_write(&rbd_dev->lock_rwsem);
4207 } else if (ret < 0) {
4208 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4209 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4210 RBD_RETRY_DELAY);
4211 } else {
4212 /*
4213 * lock owner acked, but resend if we don't see them
4214 * release the lock
4215 */
4216 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4217 rbd_dev);
4218 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4219 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4220 }
4221 }
4222
rbd_quiesce_lock(struct rbd_device * rbd_dev)4223 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4224 {
4225 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4226 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4227
4228 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4229 return false;
4230
4231 /*
4232 * Ensure that all in-flight IO is flushed.
4233 */
4234 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4235 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4236 if (list_empty(&rbd_dev->running_list))
4237 return true;
4238
4239 up_write(&rbd_dev->lock_rwsem);
4240 wait_for_completion(&rbd_dev->releasing_wait);
4241
4242 down_write(&rbd_dev->lock_rwsem);
4243 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4244 return false;
4245
4246 rbd_assert(list_empty(&rbd_dev->running_list));
4247 return true;
4248 }
4249
rbd_pre_release_action(struct rbd_device * rbd_dev)4250 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4251 {
4252 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4253 rbd_object_map_close(rbd_dev);
4254 }
4255
__rbd_release_lock(struct rbd_device * rbd_dev)4256 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4257 {
4258 rbd_assert(list_empty(&rbd_dev->running_list));
4259
4260 rbd_pre_release_action(rbd_dev);
4261 rbd_unlock(rbd_dev);
4262 }
4263
4264 /*
4265 * lock_rwsem must be held for write
4266 */
rbd_release_lock(struct rbd_device * rbd_dev)4267 static void rbd_release_lock(struct rbd_device *rbd_dev)
4268 {
4269 if (!rbd_quiesce_lock(rbd_dev))
4270 return;
4271
4272 __rbd_release_lock(rbd_dev);
4273
4274 /*
4275 * Give others a chance to grab the lock - we would re-acquire
4276 * almost immediately if we got new IO while draining the running
4277 * list otherwise. We need to ack our own notifications, so this
4278 * lock_dwork will be requeued from rbd_handle_released_lock() by
4279 * way of maybe_kick_acquire().
4280 */
4281 cancel_delayed_work(&rbd_dev->lock_dwork);
4282 }
4283
rbd_release_lock_work(struct work_struct * work)4284 static void rbd_release_lock_work(struct work_struct *work)
4285 {
4286 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4287 unlock_work);
4288
4289 down_write(&rbd_dev->lock_rwsem);
4290 rbd_release_lock(rbd_dev);
4291 up_write(&rbd_dev->lock_rwsem);
4292 }
4293
maybe_kick_acquire(struct rbd_device * rbd_dev)4294 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4295 {
4296 bool have_requests;
4297
4298 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4299 if (__rbd_is_lock_owner(rbd_dev))
4300 return;
4301
4302 spin_lock(&rbd_dev->lock_lists_lock);
4303 have_requests = !list_empty(&rbd_dev->acquiring_list);
4304 spin_unlock(&rbd_dev->lock_lists_lock);
4305 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4306 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4307 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4308 }
4309 }
4310
rbd_handle_acquired_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)4311 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4312 void **p)
4313 {
4314 struct rbd_client_id cid = { 0 };
4315
4316 if (struct_v >= 2) {
4317 cid.gid = ceph_decode_64(p);
4318 cid.handle = ceph_decode_64(p);
4319 }
4320
4321 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4322 cid.handle);
4323 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4324 down_write(&rbd_dev->lock_rwsem);
4325 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4326 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4327 __func__, rbd_dev, cid.gid, cid.handle);
4328 } else {
4329 rbd_set_owner_cid(rbd_dev, &cid);
4330 }
4331 downgrade_write(&rbd_dev->lock_rwsem);
4332 } else {
4333 down_read(&rbd_dev->lock_rwsem);
4334 }
4335
4336 maybe_kick_acquire(rbd_dev);
4337 up_read(&rbd_dev->lock_rwsem);
4338 }
4339
rbd_handle_released_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)4340 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4341 void **p)
4342 {
4343 struct rbd_client_id cid = { 0 };
4344
4345 if (struct_v >= 2) {
4346 cid.gid = ceph_decode_64(p);
4347 cid.handle = ceph_decode_64(p);
4348 }
4349
4350 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4351 cid.handle);
4352 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4353 down_write(&rbd_dev->lock_rwsem);
4354 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4355 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4356 __func__, rbd_dev, cid.gid, cid.handle,
4357 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4358 } else {
4359 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4360 }
4361 downgrade_write(&rbd_dev->lock_rwsem);
4362 } else {
4363 down_read(&rbd_dev->lock_rwsem);
4364 }
4365
4366 maybe_kick_acquire(rbd_dev);
4367 up_read(&rbd_dev->lock_rwsem);
4368 }
4369
4370 /*
4371 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4372 * ResponseMessage is needed.
4373 */
rbd_handle_request_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)4374 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4375 void **p)
4376 {
4377 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4378 struct rbd_client_id cid = { 0 };
4379 int result = 1;
4380
4381 if (struct_v >= 2) {
4382 cid.gid = ceph_decode_64(p);
4383 cid.handle = ceph_decode_64(p);
4384 }
4385
4386 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4387 cid.handle);
4388 if (rbd_cid_equal(&cid, &my_cid))
4389 return result;
4390
4391 down_read(&rbd_dev->lock_rwsem);
4392 if (__rbd_is_lock_owner(rbd_dev)) {
4393 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4394 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4395 goto out_unlock;
4396
4397 /*
4398 * encode ResponseMessage(0) so the peer can detect
4399 * a missing owner
4400 */
4401 result = 0;
4402
4403 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4404 if (!rbd_dev->opts->exclusive) {
4405 dout("%s rbd_dev %p queueing unlock_work\n",
4406 __func__, rbd_dev);
4407 queue_work(rbd_dev->task_wq,
4408 &rbd_dev->unlock_work);
4409 } else {
4410 /* refuse to release the lock */
4411 result = -EROFS;
4412 }
4413 }
4414 }
4415
4416 out_unlock:
4417 up_read(&rbd_dev->lock_rwsem);
4418 return result;
4419 }
4420
__rbd_acknowledge_notify(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie,s32 * result)4421 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4422 u64 notify_id, u64 cookie, s32 *result)
4423 {
4424 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4425 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4426 int buf_size = sizeof(buf);
4427 int ret;
4428
4429 if (result) {
4430 void *p = buf;
4431
4432 /* encode ResponseMessage */
4433 ceph_start_encoding(&p, 1, 1,
4434 buf_size - CEPH_ENCODING_START_BLK_LEN);
4435 ceph_encode_32(&p, *result);
4436 } else {
4437 buf_size = 0;
4438 }
4439
4440 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4441 &rbd_dev->header_oloc, notify_id, cookie,
4442 buf, buf_size);
4443 if (ret)
4444 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4445 }
4446
rbd_acknowledge_notify(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie)4447 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4448 u64 cookie)
4449 {
4450 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4451 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4452 }
4453
rbd_acknowledge_notify_result(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie,s32 result)4454 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4455 u64 notify_id, u64 cookie, s32 result)
4456 {
4457 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4458 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4459 }
4460
rbd_watch_cb(void * arg,u64 notify_id,u64 cookie,u64 notifier_id,void * data,size_t data_len)4461 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4462 u64 notifier_id, void *data, size_t data_len)
4463 {
4464 struct rbd_device *rbd_dev = arg;
4465 void *p = data;
4466 void *const end = p + data_len;
4467 u8 struct_v = 0;
4468 u32 len;
4469 u32 notify_op;
4470 int ret;
4471
4472 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4473 __func__, rbd_dev, cookie, notify_id, data_len);
4474 if (data_len) {
4475 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4476 &struct_v, &len);
4477 if (ret) {
4478 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4479 ret);
4480 return;
4481 }
4482
4483 notify_op = ceph_decode_32(&p);
4484 } else {
4485 /* legacy notification for header updates */
4486 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4487 len = 0;
4488 }
4489
4490 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4491 switch (notify_op) {
4492 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4493 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4494 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4495 break;
4496 case RBD_NOTIFY_OP_RELEASED_LOCK:
4497 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4498 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4499 break;
4500 case RBD_NOTIFY_OP_REQUEST_LOCK:
4501 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4502 if (ret <= 0)
4503 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4504 cookie, ret);
4505 else
4506 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4507 break;
4508 case RBD_NOTIFY_OP_HEADER_UPDATE:
4509 ret = rbd_dev_refresh(rbd_dev);
4510 if (ret)
4511 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4512
4513 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4514 break;
4515 default:
4516 if (rbd_is_lock_owner(rbd_dev))
4517 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4518 cookie, -EOPNOTSUPP);
4519 else
4520 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4521 break;
4522 }
4523 }
4524
4525 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4526
rbd_watch_errcb(void * arg,u64 cookie,int err)4527 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4528 {
4529 struct rbd_device *rbd_dev = arg;
4530
4531 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4532
4533 down_write(&rbd_dev->lock_rwsem);
4534 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4535 up_write(&rbd_dev->lock_rwsem);
4536
4537 mutex_lock(&rbd_dev->watch_mutex);
4538 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4539 __rbd_unregister_watch(rbd_dev);
4540 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4541
4542 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4543 }
4544 mutex_unlock(&rbd_dev->watch_mutex);
4545 }
4546
4547 /*
4548 * watch_mutex must be locked
4549 */
__rbd_register_watch(struct rbd_device * rbd_dev)4550 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4551 {
4552 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4553 struct ceph_osd_linger_request *handle;
4554
4555 rbd_assert(!rbd_dev->watch_handle);
4556 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4557
4558 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4559 &rbd_dev->header_oloc, rbd_watch_cb,
4560 rbd_watch_errcb, rbd_dev);
4561 if (IS_ERR(handle))
4562 return PTR_ERR(handle);
4563
4564 rbd_dev->watch_handle = handle;
4565 return 0;
4566 }
4567
4568 /*
4569 * watch_mutex must be locked
4570 */
__rbd_unregister_watch(struct rbd_device * rbd_dev)4571 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4572 {
4573 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4574 int ret;
4575
4576 rbd_assert(rbd_dev->watch_handle);
4577 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4578
4579 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4580 if (ret)
4581 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4582
4583 rbd_dev->watch_handle = NULL;
4584 }
4585
rbd_register_watch(struct rbd_device * rbd_dev)4586 static int rbd_register_watch(struct rbd_device *rbd_dev)
4587 {
4588 int ret;
4589
4590 mutex_lock(&rbd_dev->watch_mutex);
4591 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4592 ret = __rbd_register_watch(rbd_dev);
4593 if (ret)
4594 goto out;
4595
4596 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4597 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4598
4599 out:
4600 mutex_unlock(&rbd_dev->watch_mutex);
4601 return ret;
4602 }
4603
cancel_tasks_sync(struct rbd_device * rbd_dev)4604 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4605 {
4606 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4607
4608 cancel_work_sync(&rbd_dev->acquired_lock_work);
4609 cancel_work_sync(&rbd_dev->released_lock_work);
4610 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4611 cancel_work_sync(&rbd_dev->unlock_work);
4612 }
4613
4614 /*
4615 * header_rwsem must not be held to avoid a deadlock with
4616 * rbd_dev_refresh() when flushing notifies.
4617 */
rbd_unregister_watch(struct rbd_device * rbd_dev)4618 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4619 {
4620 cancel_tasks_sync(rbd_dev);
4621
4622 mutex_lock(&rbd_dev->watch_mutex);
4623 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4624 __rbd_unregister_watch(rbd_dev);
4625 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4626 mutex_unlock(&rbd_dev->watch_mutex);
4627
4628 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4629 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4630 }
4631
4632 /*
4633 * lock_rwsem must be held for write
4634 */
rbd_reacquire_lock(struct rbd_device * rbd_dev)4635 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4636 {
4637 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4638 char cookie[32];
4639 int ret;
4640
4641 if (!rbd_quiesce_lock(rbd_dev))
4642 return;
4643
4644 format_lock_cookie(rbd_dev, cookie);
4645 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4646 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4647 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4648 RBD_LOCK_TAG, cookie);
4649 if (ret) {
4650 if (ret != -EOPNOTSUPP)
4651 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4652 ret);
4653
4654 /*
4655 * Lock cookie cannot be updated on older OSDs, so do
4656 * a manual release and queue an acquire.
4657 */
4658 __rbd_release_lock(rbd_dev);
4659 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4660 } else {
4661 __rbd_lock(rbd_dev, cookie);
4662 wake_lock_waiters(rbd_dev, 0);
4663 }
4664 }
4665
rbd_reregister_watch(struct work_struct * work)4666 static void rbd_reregister_watch(struct work_struct *work)
4667 {
4668 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4669 struct rbd_device, watch_dwork);
4670 int ret;
4671
4672 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4673
4674 mutex_lock(&rbd_dev->watch_mutex);
4675 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4676 mutex_unlock(&rbd_dev->watch_mutex);
4677 return;
4678 }
4679
4680 ret = __rbd_register_watch(rbd_dev);
4681 if (ret) {
4682 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4683 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4684 queue_delayed_work(rbd_dev->task_wq,
4685 &rbd_dev->watch_dwork,
4686 RBD_RETRY_DELAY);
4687 mutex_unlock(&rbd_dev->watch_mutex);
4688 return;
4689 }
4690
4691 mutex_unlock(&rbd_dev->watch_mutex);
4692 down_write(&rbd_dev->lock_rwsem);
4693 wake_lock_waiters(rbd_dev, ret);
4694 up_write(&rbd_dev->lock_rwsem);
4695 return;
4696 }
4697
4698 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4699 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4700 mutex_unlock(&rbd_dev->watch_mutex);
4701
4702 down_write(&rbd_dev->lock_rwsem);
4703 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4704 rbd_reacquire_lock(rbd_dev);
4705 up_write(&rbd_dev->lock_rwsem);
4706
4707 ret = rbd_dev_refresh(rbd_dev);
4708 if (ret)
4709 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4710 }
4711
4712 /*
4713 * Synchronous osd object method call. Returns the number of bytes
4714 * returned in the outbound buffer, or a negative error code.
4715 */
rbd_obj_method_sync(struct rbd_device * rbd_dev,struct ceph_object_id * oid,struct ceph_object_locator * oloc,const char * method_name,const void * outbound,size_t outbound_size,void * inbound,size_t inbound_size)4716 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4717 struct ceph_object_id *oid,
4718 struct ceph_object_locator *oloc,
4719 const char *method_name,
4720 const void *outbound,
4721 size_t outbound_size,
4722 void *inbound,
4723 size_t inbound_size)
4724 {
4725 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4726 struct page *req_page = NULL;
4727 struct page *reply_page;
4728 int ret;
4729
4730 /*
4731 * Method calls are ultimately read operations. The result
4732 * should placed into the inbound buffer provided. They
4733 * also supply outbound data--parameters for the object
4734 * method. Currently if this is present it will be a
4735 * snapshot id.
4736 */
4737 if (outbound) {
4738 if (outbound_size > PAGE_SIZE)
4739 return -E2BIG;
4740
4741 req_page = alloc_page(GFP_KERNEL);
4742 if (!req_page)
4743 return -ENOMEM;
4744
4745 memcpy(page_address(req_page), outbound, outbound_size);
4746 }
4747
4748 reply_page = alloc_page(GFP_KERNEL);
4749 if (!reply_page) {
4750 if (req_page)
4751 __free_page(req_page);
4752 return -ENOMEM;
4753 }
4754
4755 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4756 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4757 &reply_page, &inbound_size);
4758 if (!ret) {
4759 memcpy(inbound, page_address(reply_page), inbound_size);
4760 ret = inbound_size;
4761 }
4762
4763 if (req_page)
4764 __free_page(req_page);
4765 __free_page(reply_page);
4766 return ret;
4767 }
4768
rbd_queue_workfn(struct work_struct * work)4769 static void rbd_queue_workfn(struct work_struct *work)
4770 {
4771 struct rbd_img_request *img_request =
4772 container_of(work, struct rbd_img_request, work);
4773 struct rbd_device *rbd_dev = img_request->rbd_dev;
4774 enum obj_operation_type op_type = img_request->op_type;
4775 struct request *rq = blk_mq_rq_from_pdu(img_request);
4776 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4777 u64 length = blk_rq_bytes(rq);
4778 u64 mapping_size;
4779 int result;
4780
4781 /* Ignore/skip any zero-length requests */
4782 if (!length) {
4783 dout("%s: zero-length request\n", __func__);
4784 result = 0;
4785 goto err_img_request;
4786 }
4787
4788 blk_mq_start_request(rq);
4789
4790 down_read(&rbd_dev->header_rwsem);
4791 mapping_size = rbd_dev->mapping.size;
4792 rbd_img_capture_header(img_request);
4793 up_read(&rbd_dev->header_rwsem);
4794
4795 if (offset + length > mapping_size) {
4796 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4797 length, mapping_size);
4798 result = -EIO;
4799 goto err_img_request;
4800 }
4801
4802 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4803 img_request, obj_op_name(op_type), offset, length);
4804
4805 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4806 result = rbd_img_fill_nodata(img_request, offset, length);
4807 else
4808 result = rbd_img_fill_from_bio(img_request, offset, length,
4809 rq->bio);
4810 if (result)
4811 goto err_img_request;
4812
4813 rbd_img_handle_request(img_request, 0);
4814 return;
4815
4816 err_img_request:
4817 rbd_img_request_destroy(img_request);
4818 if (result)
4819 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4820 obj_op_name(op_type), length, offset, result);
4821 blk_mq_end_request(rq, errno_to_blk_status(result));
4822 }
4823
rbd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)4824 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4825 const struct blk_mq_queue_data *bd)
4826 {
4827 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4828 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4829 enum obj_operation_type op_type;
4830
4831 switch (req_op(bd->rq)) {
4832 case REQ_OP_DISCARD:
4833 op_type = OBJ_OP_DISCARD;
4834 break;
4835 case REQ_OP_WRITE_ZEROES:
4836 op_type = OBJ_OP_ZEROOUT;
4837 break;
4838 case REQ_OP_WRITE:
4839 op_type = OBJ_OP_WRITE;
4840 break;
4841 case REQ_OP_READ:
4842 op_type = OBJ_OP_READ;
4843 break;
4844 default:
4845 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4846 return BLK_STS_IOERR;
4847 }
4848
4849 rbd_img_request_init(img_req, rbd_dev, op_type);
4850
4851 if (rbd_img_is_write(img_req)) {
4852 if (rbd_is_ro(rbd_dev)) {
4853 rbd_warn(rbd_dev, "%s on read-only mapping",
4854 obj_op_name(img_req->op_type));
4855 return BLK_STS_IOERR;
4856 }
4857 rbd_assert(!rbd_is_snap(rbd_dev));
4858 }
4859
4860 INIT_WORK(&img_req->work, rbd_queue_workfn);
4861 queue_work(rbd_wq, &img_req->work);
4862 return BLK_STS_OK;
4863 }
4864
rbd_free_disk(struct rbd_device * rbd_dev)4865 static void rbd_free_disk(struct rbd_device *rbd_dev)
4866 {
4867 blk_cleanup_queue(rbd_dev->disk->queue);
4868 blk_mq_free_tag_set(&rbd_dev->tag_set);
4869 put_disk(rbd_dev->disk);
4870 rbd_dev->disk = NULL;
4871 }
4872
rbd_obj_read_sync(struct rbd_device * rbd_dev,struct ceph_object_id * oid,struct ceph_object_locator * oloc,void * buf,int buf_len)4873 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4874 struct ceph_object_id *oid,
4875 struct ceph_object_locator *oloc,
4876 void *buf, int buf_len)
4877
4878 {
4879 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4880 struct ceph_osd_request *req;
4881 struct page **pages;
4882 int num_pages = calc_pages_for(0, buf_len);
4883 int ret;
4884
4885 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4886 if (!req)
4887 return -ENOMEM;
4888
4889 ceph_oid_copy(&req->r_base_oid, oid);
4890 ceph_oloc_copy(&req->r_base_oloc, oloc);
4891 req->r_flags = CEPH_OSD_FLAG_READ;
4892
4893 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4894 if (IS_ERR(pages)) {
4895 ret = PTR_ERR(pages);
4896 goto out_req;
4897 }
4898
4899 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4900 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4901 true);
4902
4903 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4904 if (ret)
4905 goto out_req;
4906
4907 ceph_osdc_start_request(osdc, req, false);
4908 ret = ceph_osdc_wait_request(osdc, req);
4909 if (ret >= 0)
4910 ceph_copy_from_page_vector(pages, buf, 0, ret);
4911
4912 out_req:
4913 ceph_osdc_put_request(req);
4914 return ret;
4915 }
4916
4917 /*
4918 * Read the complete header for the given rbd device. On successful
4919 * return, the rbd_dev->header field will contain up-to-date
4920 * information about the image.
4921 */
rbd_dev_v1_header_info(struct rbd_device * rbd_dev,struct rbd_image_header * header,bool first_time)4922 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
4923 struct rbd_image_header *header,
4924 bool first_time)
4925 {
4926 struct rbd_image_header_ondisk *ondisk = NULL;
4927 u32 snap_count = 0;
4928 u64 names_size = 0;
4929 u32 want_count;
4930 int ret;
4931
4932 /*
4933 * The complete header will include an array of its 64-bit
4934 * snapshot ids, followed by the names of those snapshots as
4935 * a contiguous block of NUL-terminated strings. Note that
4936 * the number of snapshots could change by the time we read
4937 * it in, in which case we re-read it.
4938 */
4939 do {
4940 size_t size;
4941
4942 kfree(ondisk);
4943
4944 size = sizeof (*ondisk);
4945 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4946 size += names_size;
4947 ondisk = kmalloc(size, GFP_KERNEL);
4948 if (!ondisk)
4949 return -ENOMEM;
4950
4951 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4952 &rbd_dev->header_oloc, ondisk, size);
4953 if (ret < 0)
4954 goto out;
4955 if ((size_t)ret < size) {
4956 ret = -ENXIO;
4957 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4958 size, ret);
4959 goto out;
4960 }
4961 if (!rbd_dev_ondisk_valid(ondisk)) {
4962 ret = -ENXIO;
4963 rbd_warn(rbd_dev, "invalid header");
4964 goto out;
4965 }
4966
4967 names_size = le64_to_cpu(ondisk->snap_names_len);
4968 want_count = snap_count;
4969 snap_count = le32_to_cpu(ondisk->snap_count);
4970 } while (snap_count != want_count);
4971
4972 ret = rbd_header_from_disk(header, ondisk, first_time);
4973 out:
4974 kfree(ondisk);
4975
4976 return ret;
4977 }
4978
rbd_dev_update_size(struct rbd_device * rbd_dev)4979 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4980 {
4981 sector_t size;
4982
4983 /*
4984 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4985 * try to update its size. If REMOVING is set, updating size
4986 * is just useless work since the device can't be opened.
4987 */
4988 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4989 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4990 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4991 dout("setting size to %llu sectors", (unsigned long long)size);
4992 set_capacity(rbd_dev->disk, size);
4993 revalidate_disk_size(rbd_dev->disk, true);
4994 }
4995 }
4996
4997 static const struct blk_mq_ops rbd_mq_ops = {
4998 .queue_rq = rbd_queue_rq,
4999 };
5000
rbd_init_disk(struct rbd_device * rbd_dev)5001 static int rbd_init_disk(struct rbd_device *rbd_dev)
5002 {
5003 struct gendisk *disk;
5004 struct request_queue *q;
5005 unsigned int objset_bytes =
5006 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5007 int err;
5008
5009 /* create gendisk info */
5010 disk = alloc_disk(single_major ?
5011 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5012 RBD_MINORS_PER_MAJOR);
5013 if (!disk)
5014 return -ENOMEM;
5015
5016 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5017 rbd_dev->dev_id);
5018 disk->major = rbd_dev->major;
5019 disk->first_minor = rbd_dev->minor;
5020 if (single_major)
5021 disk->flags |= GENHD_FL_EXT_DEVT;
5022 disk->fops = &rbd_bd_ops;
5023 disk->private_data = rbd_dev;
5024
5025 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5026 rbd_dev->tag_set.ops = &rbd_mq_ops;
5027 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5028 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5029 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5030 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
5031 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
5032
5033 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5034 if (err)
5035 goto out_disk;
5036
5037 q = blk_mq_init_queue(&rbd_dev->tag_set);
5038 if (IS_ERR(q)) {
5039 err = PTR_ERR(q);
5040 goto out_tag_set;
5041 }
5042
5043 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5044 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5045
5046 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5047 q->limits.max_sectors = queue_max_hw_sectors(q);
5048 blk_queue_max_segments(q, USHRT_MAX);
5049 blk_queue_max_segment_size(q, UINT_MAX);
5050 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5051 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5052
5053 if (rbd_dev->opts->trim) {
5054 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5055 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5056 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5057 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5058 }
5059
5060 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5061 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
5062
5063 /*
5064 * disk_release() expects a queue ref from add_disk() and will
5065 * put it. Hold an extra ref until add_disk() is called.
5066 */
5067 WARN_ON(!blk_get_queue(q));
5068 disk->queue = q;
5069 q->queuedata = rbd_dev;
5070
5071 rbd_dev->disk = disk;
5072
5073 return 0;
5074 out_tag_set:
5075 blk_mq_free_tag_set(&rbd_dev->tag_set);
5076 out_disk:
5077 put_disk(disk);
5078 return err;
5079 }
5080
5081 /*
5082 sysfs
5083 */
5084
dev_to_rbd_dev(struct device * dev)5085 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5086 {
5087 return container_of(dev, struct rbd_device, dev);
5088 }
5089
rbd_size_show(struct device * dev,struct device_attribute * attr,char * buf)5090 static ssize_t rbd_size_show(struct device *dev,
5091 struct device_attribute *attr, char *buf)
5092 {
5093 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5094
5095 return sprintf(buf, "%llu\n",
5096 (unsigned long long)rbd_dev->mapping.size);
5097 }
5098
rbd_features_show(struct device * dev,struct device_attribute * attr,char * buf)5099 static ssize_t rbd_features_show(struct device *dev,
5100 struct device_attribute *attr, char *buf)
5101 {
5102 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5103
5104 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5105 }
5106
rbd_major_show(struct device * dev,struct device_attribute * attr,char * buf)5107 static ssize_t rbd_major_show(struct device *dev,
5108 struct device_attribute *attr, char *buf)
5109 {
5110 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5111
5112 if (rbd_dev->major)
5113 return sprintf(buf, "%d\n", rbd_dev->major);
5114
5115 return sprintf(buf, "(none)\n");
5116 }
5117
rbd_minor_show(struct device * dev,struct device_attribute * attr,char * buf)5118 static ssize_t rbd_minor_show(struct device *dev,
5119 struct device_attribute *attr, char *buf)
5120 {
5121 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5122
5123 return sprintf(buf, "%d\n", rbd_dev->minor);
5124 }
5125
rbd_client_addr_show(struct device * dev,struct device_attribute * attr,char * buf)5126 static ssize_t rbd_client_addr_show(struct device *dev,
5127 struct device_attribute *attr, char *buf)
5128 {
5129 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5130 struct ceph_entity_addr *client_addr =
5131 ceph_client_addr(rbd_dev->rbd_client->client);
5132
5133 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5134 le32_to_cpu(client_addr->nonce));
5135 }
5136
rbd_client_id_show(struct device * dev,struct device_attribute * attr,char * buf)5137 static ssize_t rbd_client_id_show(struct device *dev,
5138 struct device_attribute *attr, char *buf)
5139 {
5140 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5141
5142 return sprintf(buf, "client%lld\n",
5143 ceph_client_gid(rbd_dev->rbd_client->client));
5144 }
5145
rbd_cluster_fsid_show(struct device * dev,struct device_attribute * attr,char * buf)5146 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5147 struct device_attribute *attr, char *buf)
5148 {
5149 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5150
5151 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5152 }
5153
rbd_config_info_show(struct device * dev,struct device_attribute * attr,char * buf)5154 static ssize_t rbd_config_info_show(struct device *dev,
5155 struct device_attribute *attr, char *buf)
5156 {
5157 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5158
5159 if (!capable(CAP_SYS_ADMIN))
5160 return -EPERM;
5161
5162 return sprintf(buf, "%s\n", rbd_dev->config_info);
5163 }
5164
rbd_pool_show(struct device * dev,struct device_attribute * attr,char * buf)5165 static ssize_t rbd_pool_show(struct device *dev,
5166 struct device_attribute *attr, char *buf)
5167 {
5168 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5169
5170 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5171 }
5172
rbd_pool_id_show(struct device * dev,struct device_attribute * attr,char * buf)5173 static ssize_t rbd_pool_id_show(struct device *dev,
5174 struct device_attribute *attr, char *buf)
5175 {
5176 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5177
5178 return sprintf(buf, "%llu\n",
5179 (unsigned long long) rbd_dev->spec->pool_id);
5180 }
5181
rbd_pool_ns_show(struct device * dev,struct device_attribute * attr,char * buf)5182 static ssize_t rbd_pool_ns_show(struct device *dev,
5183 struct device_attribute *attr, char *buf)
5184 {
5185 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5186
5187 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5188 }
5189
rbd_name_show(struct device * dev,struct device_attribute * attr,char * buf)5190 static ssize_t rbd_name_show(struct device *dev,
5191 struct device_attribute *attr, char *buf)
5192 {
5193 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5194
5195 if (rbd_dev->spec->image_name)
5196 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5197
5198 return sprintf(buf, "(unknown)\n");
5199 }
5200
rbd_image_id_show(struct device * dev,struct device_attribute * attr,char * buf)5201 static ssize_t rbd_image_id_show(struct device *dev,
5202 struct device_attribute *attr, char *buf)
5203 {
5204 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5205
5206 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5207 }
5208
5209 /*
5210 * Shows the name of the currently-mapped snapshot (or
5211 * RBD_SNAP_HEAD_NAME for the base image).
5212 */
rbd_snap_show(struct device * dev,struct device_attribute * attr,char * buf)5213 static ssize_t rbd_snap_show(struct device *dev,
5214 struct device_attribute *attr,
5215 char *buf)
5216 {
5217 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5218
5219 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5220 }
5221
rbd_snap_id_show(struct device * dev,struct device_attribute * attr,char * buf)5222 static ssize_t rbd_snap_id_show(struct device *dev,
5223 struct device_attribute *attr, char *buf)
5224 {
5225 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5226
5227 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5228 }
5229
5230 /*
5231 * For a v2 image, shows the chain of parent images, separated by empty
5232 * lines. For v1 images or if there is no parent, shows "(no parent
5233 * image)".
5234 */
rbd_parent_show(struct device * dev,struct device_attribute * attr,char * buf)5235 static ssize_t rbd_parent_show(struct device *dev,
5236 struct device_attribute *attr,
5237 char *buf)
5238 {
5239 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5240 ssize_t count = 0;
5241
5242 if (!rbd_dev->parent)
5243 return sprintf(buf, "(no parent image)\n");
5244
5245 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5246 struct rbd_spec *spec = rbd_dev->parent_spec;
5247
5248 count += sprintf(&buf[count], "%s"
5249 "pool_id %llu\npool_name %s\n"
5250 "pool_ns %s\n"
5251 "image_id %s\nimage_name %s\n"
5252 "snap_id %llu\nsnap_name %s\n"
5253 "overlap %llu\n",
5254 !count ? "" : "\n", /* first? */
5255 spec->pool_id, spec->pool_name,
5256 spec->pool_ns ?: "",
5257 spec->image_id, spec->image_name ?: "(unknown)",
5258 spec->snap_id, spec->snap_name,
5259 rbd_dev->parent_overlap);
5260 }
5261
5262 return count;
5263 }
5264
rbd_image_refresh(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)5265 static ssize_t rbd_image_refresh(struct device *dev,
5266 struct device_attribute *attr,
5267 const char *buf,
5268 size_t size)
5269 {
5270 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5271 int ret;
5272
5273 if (!capable(CAP_SYS_ADMIN))
5274 return -EPERM;
5275
5276 ret = rbd_dev_refresh(rbd_dev);
5277 if (ret)
5278 return ret;
5279
5280 return size;
5281 }
5282
5283 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5284 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5285 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5286 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5287 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5288 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5289 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5290 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5291 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5292 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5293 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5294 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5295 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5296 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5297 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5298 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5299 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5300
5301 static struct attribute *rbd_attrs[] = {
5302 &dev_attr_size.attr,
5303 &dev_attr_features.attr,
5304 &dev_attr_major.attr,
5305 &dev_attr_minor.attr,
5306 &dev_attr_client_addr.attr,
5307 &dev_attr_client_id.attr,
5308 &dev_attr_cluster_fsid.attr,
5309 &dev_attr_config_info.attr,
5310 &dev_attr_pool.attr,
5311 &dev_attr_pool_id.attr,
5312 &dev_attr_pool_ns.attr,
5313 &dev_attr_name.attr,
5314 &dev_attr_image_id.attr,
5315 &dev_attr_current_snap.attr,
5316 &dev_attr_snap_id.attr,
5317 &dev_attr_parent.attr,
5318 &dev_attr_refresh.attr,
5319 NULL
5320 };
5321
5322 static struct attribute_group rbd_attr_group = {
5323 .attrs = rbd_attrs,
5324 };
5325
5326 static const struct attribute_group *rbd_attr_groups[] = {
5327 &rbd_attr_group,
5328 NULL
5329 };
5330
5331 static void rbd_dev_release(struct device *dev);
5332
5333 static const struct device_type rbd_device_type = {
5334 .name = "rbd",
5335 .groups = rbd_attr_groups,
5336 .release = rbd_dev_release,
5337 };
5338
rbd_spec_get(struct rbd_spec * spec)5339 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5340 {
5341 kref_get(&spec->kref);
5342
5343 return spec;
5344 }
5345
5346 static void rbd_spec_free(struct kref *kref);
rbd_spec_put(struct rbd_spec * spec)5347 static void rbd_spec_put(struct rbd_spec *spec)
5348 {
5349 if (spec)
5350 kref_put(&spec->kref, rbd_spec_free);
5351 }
5352
rbd_spec_alloc(void)5353 static struct rbd_spec *rbd_spec_alloc(void)
5354 {
5355 struct rbd_spec *spec;
5356
5357 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5358 if (!spec)
5359 return NULL;
5360
5361 spec->pool_id = CEPH_NOPOOL;
5362 spec->snap_id = CEPH_NOSNAP;
5363 kref_init(&spec->kref);
5364
5365 return spec;
5366 }
5367
rbd_spec_free(struct kref * kref)5368 static void rbd_spec_free(struct kref *kref)
5369 {
5370 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5371
5372 kfree(spec->pool_name);
5373 kfree(spec->pool_ns);
5374 kfree(spec->image_id);
5375 kfree(spec->image_name);
5376 kfree(spec->snap_name);
5377 kfree(spec);
5378 }
5379
rbd_dev_free(struct rbd_device * rbd_dev)5380 static void rbd_dev_free(struct rbd_device *rbd_dev)
5381 {
5382 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5383 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5384
5385 ceph_oid_destroy(&rbd_dev->header_oid);
5386 ceph_oloc_destroy(&rbd_dev->header_oloc);
5387 kfree(rbd_dev->config_info);
5388
5389 rbd_put_client(rbd_dev->rbd_client);
5390 rbd_spec_put(rbd_dev->spec);
5391 kfree(rbd_dev->opts);
5392 kfree(rbd_dev);
5393 }
5394
rbd_dev_release(struct device * dev)5395 static void rbd_dev_release(struct device *dev)
5396 {
5397 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5398 bool need_put = !!rbd_dev->opts;
5399
5400 if (need_put) {
5401 destroy_workqueue(rbd_dev->task_wq);
5402 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5403 }
5404
5405 rbd_dev_free(rbd_dev);
5406
5407 /*
5408 * This is racy, but way better than putting module outside of
5409 * the release callback. The race window is pretty small, so
5410 * doing something similar to dm (dm-builtin.c) is overkill.
5411 */
5412 if (need_put)
5413 module_put(THIS_MODULE);
5414 }
5415
__rbd_dev_create(struct rbd_spec * spec)5416 static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
5417 {
5418 struct rbd_device *rbd_dev;
5419
5420 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5421 if (!rbd_dev)
5422 return NULL;
5423
5424 spin_lock_init(&rbd_dev->lock);
5425 INIT_LIST_HEAD(&rbd_dev->node);
5426 init_rwsem(&rbd_dev->header_rwsem);
5427
5428 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5429 ceph_oid_init(&rbd_dev->header_oid);
5430 rbd_dev->header_oloc.pool = spec->pool_id;
5431 if (spec->pool_ns) {
5432 WARN_ON(!*spec->pool_ns);
5433 rbd_dev->header_oloc.pool_ns =
5434 ceph_find_or_create_string(spec->pool_ns,
5435 strlen(spec->pool_ns));
5436 }
5437
5438 mutex_init(&rbd_dev->watch_mutex);
5439 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5440 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5441
5442 init_rwsem(&rbd_dev->lock_rwsem);
5443 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5444 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5445 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5446 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5447 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5448 spin_lock_init(&rbd_dev->lock_lists_lock);
5449 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5450 INIT_LIST_HEAD(&rbd_dev->running_list);
5451 init_completion(&rbd_dev->acquire_wait);
5452 init_completion(&rbd_dev->releasing_wait);
5453
5454 spin_lock_init(&rbd_dev->object_map_lock);
5455
5456 rbd_dev->dev.bus = &rbd_bus_type;
5457 rbd_dev->dev.type = &rbd_device_type;
5458 rbd_dev->dev.parent = &rbd_root_dev;
5459 device_initialize(&rbd_dev->dev);
5460
5461 return rbd_dev;
5462 }
5463
5464 /*
5465 * Create a mapping rbd_dev.
5466 */
rbd_dev_create(struct rbd_client * rbdc,struct rbd_spec * spec,struct rbd_options * opts)5467 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5468 struct rbd_spec *spec,
5469 struct rbd_options *opts)
5470 {
5471 struct rbd_device *rbd_dev;
5472
5473 rbd_dev = __rbd_dev_create(spec);
5474 if (!rbd_dev)
5475 return NULL;
5476
5477 /* get an id and fill in device name */
5478 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5479 minor_to_rbd_dev_id(1 << MINORBITS),
5480 GFP_KERNEL);
5481 if (rbd_dev->dev_id < 0)
5482 goto fail_rbd_dev;
5483
5484 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5485 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5486 rbd_dev->name);
5487 if (!rbd_dev->task_wq)
5488 goto fail_dev_id;
5489
5490 /* we have a ref from do_rbd_add() */
5491 __module_get(THIS_MODULE);
5492
5493 rbd_dev->rbd_client = rbdc;
5494 rbd_dev->spec = spec;
5495 rbd_dev->opts = opts;
5496
5497 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5498 return rbd_dev;
5499
5500 fail_dev_id:
5501 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5502 fail_rbd_dev:
5503 rbd_dev_free(rbd_dev);
5504 return NULL;
5505 }
5506
rbd_dev_destroy(struct rbd_device * rbd_dev)5507 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5508 {
5509 if (rbd_dev)
5510 put_device(&rbd_dev->dev);
5511 }
5512
5513 /*
5514 * Get the size and object order for an image snapshot, or if
5515 * snap_id is CEPH_NOSNAP, gets this information for the base
5516 * image.
5517 */
_rbd_dev_v2_snap_size(struct rbd_device * rbd_dev,u64 snap_id,u8 * order,u64 * snap_size)5518 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5519 u8 *order, u64 *snap_size)
5520 {
5521 __le64 snapid = cpu_to_le64(snap_id);
5522 int ret;
5523 struct {
5524 u8 order;
5525 __le64 size;
5526 } __attribute__ ((packed)) size_buf = { 0 };
5527
5528 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5529 &rbd_dev->header_oloc, "get_size",
5530 &snapid, sizeof(snapid),
5531 &size_buf, sizeof(size_buf));
5532 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5533 if (ret < 0)
5534 return ret;
5535 if (ret < sizeof (size_buf))
5536 return -ERANGE;
5537
5538 if (order) {
5539 *order = size_buf.order;
5540 dout(" order %u", (unsigned int)*order);
5541 }
5542 *snap_size = le64_to_cpu(size_buf.size);
5543
5544 dout(" snap_id 0x%016llx snap_size = %llu\n",
5545 (unsigned long long)snap_id,
5546 (unsigned long long)*snap_size);
5547
5548 return 0;
5549 }
5550
rbd_dev_v2_object_prefix(struct rbd_device * rbd_dev,char ** pobject_prefix)5551 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
5552 char **pobject_prefix)
5553 {
5554 size_t size;
5555 void *reply_buf;
5556 char *object_prefix;
5557 int ret;
5558 void *p;
5559
5560 /* Response will be an encoded string, which includes a length */
5561 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5562 reply_buf = kzalloc(size, GFP_KERNEL);
5563 if (!reply_buf)
5564 return -ENOMEM;
5565
5566 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5567 &rbd_dev->header_oloc, "get_object_prefix",
5568 NULL, 0, reply_buf, size);
5569 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5570 if (ret < 0)
5571 goto out;
5572
5573 p = reply_buf;
5574 object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
5575 GFP_NOIO);
5576 if (IS_ERR(object_prefix)) {
5577 ret = PTR_ERR(object_prefix);
5578 goto out;
5579 }
5580 ret = 0;
5581
5582 *pobject_prefix = object_prefix;
5583 dout(" object_prefix = %s\n", object_prefix);
5584 out:
5585 kfree(reply_buf);
5586
5587 return ret;
5588 }
5589
_rbd_dev_v2_snap_features(struct rbd_device * rbd_dev,u64 snap_id,bool read_only,u64 * snap_features)5590 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5591 bool read_only, u64 *snap_features)
5592 {
5593 struct {
5594 __le64 snap_id;
5595 u8 read_only;
5596 } features_in;
5597 struct {
5598 __le64 features;
5599 __le64 incompat;
5600 } __attribute__ ((packed)) features_buf = { 0 };
5601 u64 unsup;
5602 int ret;
5603
5604 features_in.snap_id = cpu_to_le64(snap_id);
5605 features_in.read_only = read_only;
5606
5607 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5608 &rbd_dev->header_oloc, "get_features",
5609 &features_in, sizeof(features_in),
5610 &features_buf, sizeof(features_buf));
5611 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5612 if (ret < 0)
5613 return ret;
5614 if (ret < sizeof (features_buf))
5615 return -ERANGE;
5616
5617 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5618 if (unsup) {
5619 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5620 unsup);
5621 return -ENXIO;
5622 }
5623
5624 *snap_features = le64_to_cpu(features_buf.features);
5625
5626 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5627 (unsigned long long)snap_id,
5628 (unsigned long long)*snap_features,
5629 (unsigned long long)le64_to_cpu(features_buf.incompat));
5630
5631 return 0;
5632 }
5633
5634 /*
5635 * These are generic image flags, but since they are used only for
5636 * object map, store them in rbd_dev->object_map_flags.
5637 *
5638 * For the same reason, this function is called only on object map
5639 * (re)load and not on header refresh.
5640 */
rbd_dev_v2_get_flags(struct rbd_device * rbd_dev)5641 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5642 {
5643 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5644 __le64 flags;
5645 int ret;
5646
5647 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5648 &rbd_dev->header_oloc, "get_flags",
5649 &snapid, sizeof(snapid),
5650 &flags, sizeof(flags));
5651 if (ret < 0)
5652 return ret;
5653 if (ret < sizeof(flags))
5654 return -EBADMSG;
5655
5656 rbd_dev->object_map_flags = le64_to_cpu(flags);
5657 return 0;
5658 }
5659
5660 struct parent_image_info {
5661 u64 pool_id;
5662 const char *pool_ns;
5663 const char *image_id;
5664 u64 snap_id;
5665
5666 bool has_overlap;
5667 u64 overlap;
5668 };
5669
rbd_parent_info_cleanup(struct parent_image_info * pii)5670 static void rbd_parent_info_cleanup(struct parent_image_info *pii)
5671 {
5672 kfree(pii->pool_ns);
5673 kfree(pii->image_id);
5674
5675 memset(pii, 0, sizeof(*pii));
5676 }
5677
5678 /*
5679 * The caller is responsible for @pii.
5680 */
decode_parent_image_spec(void ** p,void * end,struct parent_image_info * pii)5681 static int decode_parent_image_spec(void **p, void *end,
5682 struct parent_image_info *pii)
5683 {
5684 u8 struct_v;
5685 u32 struct_len;
5686 int ret;
5687
5688 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5689 &struct_v, &struct_len);
5690 if (ret)
5691 return ret;
5692
5693 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5694 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5695 if (IS_ERR(pii->pool_ns)) {
5696 ret = PTR_ERR(pii->pool_ns);
5697 pii->pool_ns = NULL;
5698 return ret;
5699 }
5700 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5701 if (IS_ERR(pii->image_id)) {
5702 ret = PTR_ERR(pii->image_id);
5703 pii->image_id = NULL;
5704 return ret;
5705 }
5706 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5707 return 0;
5708
5709 e_inval:
5710 return -EINVAL;
5711 }
5712
__get_parent_info(struct rbd_device * rbd_dev,struct page * req_page,struct page * reply_page,struct parent_image_info * pii)5713 static int __get_parent_info(struct rbd_device *rbd_dev,
5714 struct page *req_page,
5715 struct page *reply_page,
5716 struct parent_image_info *pii)
5717 {
5718 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5719 size_t reply_len = PAGE_SIZE;
5720 void *p, *end;
5721 int ret;
5722
5723 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5724 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5725 req_page, sizeof(u64), &reply_page, &reply_len);
5726 if (ret)
5727 return ret == -EOPNOTSUPP ? 1 : ret;
5728
5729 p = page_address(reply_page);
5730 end = p + reply_len;
5731 ret = decode_parent_image_spec(&p, end, pii);
5732 if (ret)
5733 return ret;
5734
5735 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5736 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5737 req_page, sizeof(u64), &reply_page, &reply_len);
5738 if (ret)
5739 return ret;
5740
5741 p = page_address(reply_page);
5742 end = p + reply_len;
5743 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5744 if (pii->has_overlap)
5745 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5746
5747 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5748 __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5749 pii->has_overlap, pii->overlap);
5750 return 0;
5751
5752 e_inval:
5753 return -EINVAL;
5754 }
5755
5756 /*
5757 * The caller is responsible for @pii.
5758 */
__get_parent_info_legacy(struct rbd_device * rbd_dev,struct page * req_page,struct page * reply_page,struct parent_image_info * pii)5759 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5760 struct page *req_page,
5761 struct page *reply_page,
5762 struct parent_image_info *pii)
5763 {
5764 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5765 size_t reply_len = PAGE_SIZE;
5766 void *p, *end;
5767 int ret;
5768
5769 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5770 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5771 req_page, sizeof(u64), &reply_page, &reply_len);
5772 if (ret)
5773 return ret;
5774
5775 p = page_address(reply_page);
5776 end = p + reply_len;
5777 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5778 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5779 if (IS_ERR(pii->image_id)) {
5780 ret = PTR_ERR(pii->image_id);
5781 pii->image_id = NULL;
5782 return ret;
5783 }
5784 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5785 pii->has_overlap = true;
5786 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5787
5788 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5789 __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5790 pii->has_overlap, pii->overlap);
5791 return 0;
5792
5793 e_inval:
5794 return -EINVAL;
5795 }
5796
rbd_dev_v2_parent_info(struct rbd_device * rbd_dev,struct parent_image_info * pii)5797 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
5798 struct parent_image_info *pii)
5799 {
5800 struct page *req_page, *reply_page;
5801 void *p;
5802 int ret;
5803
5804 req_page = alloc_page(GFP_KERNEL);
5805 if (!req_page)
5806 return -ENOMEM;
5807
5808 reply_page = alloc_page(GFP_KERNEL);
5809 if (!reply_page) {
5810 __free_page(req_page);
5811 return -ENOMEM;
5812 }
5813
5814 p = page_address(req_page);
5815 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5816 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5817 if (ret > 0)
5818 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5819 pii);
5820
5821 __free_page(req_page);
5822 __free_page(reply_page);
5823 return ret;
5824 }
5825
rbd_dev_setup_parent(struct rbd_device * rbd_dev)5826 static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
5827 {
5828 struct rbd_spec *parent_spec;
5829 struct parent_image_info pii = { 0 };
5830 int ret;
5831
5832 parent_spec = rbd_spec_alloc();
5833 if (!parent_spec)
5834 return -ENOMEM;
5835
5836 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
5837 if (ret)
5838 goto out_err;
5839
5840 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
5841 goto out; /* No parent? No problem. */
5842
5843 /* The ceph file layout needs to fit pool id in 32 bits */
5844
5845 ret = -EIO;
5846 if (pii.pool_id > (u64)U32_MAX) {
5847 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5848 (unsigned long long)pii.pool_id, U32_MAX);
5849 goto out_err;
5850 }
5851
5852 /*
5853 * The parent won't change except when the clone is flattened,
5854 * so we only need to record the parent image spec once.
5855 */
5856 parent_spec->pool_id = pii.pool_id;
5857 if (pii.pool_ns && *pii.pool_ns) {
5858 parent_spec->pool_ns = pii.pool_ns;
5859 pii.pool_ns = NULL;
5860 }
5861 parent_spec->image_id = pii.image_id;
5862 pii.image_id = NULL;
5863 parent_spec->snap_id = pii.snap_id;
5864
5865 rbd_assert(!rbd_dev->parent_spec);
5866 rbd_dev->parent_spec = parent_spec;
5867 parent_spec = NULL; /* rbd_dev now owns this */
5868
5869 /*
5870 * Record the parent overlap. If it's zero, issue a warning as
5871 * we will proceed as if there is no parent.
5872 */
5873 if (!pii.overlap)
5874 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5875 rbd_dev->parent_overlap = pii.overlap;
5876
5877 out:
5878 ret = 0;
5879 out_err:
5880 rbd_parent_info_cleanup(&pii);
5881 rbd_spec_put(parent_spec);
5882 return ret;
5883 }
5884
rbd_dev_v2_striping_info(struct rbd_device * rbd_dev,u64 * stripe_unit,u64 * stripe_count)5885 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
5886 u64 *stripe_unit, u64 *stripe_count)
5887 {
5888 struct {
5889 __le64 stripe_unit;
5890 __le64 stripe_count;
5891 } __attribute__ ((packed)) striping_info_buf = { 0 };
5892 size_t size = sizeof (striping_info_buf);
5893 int ret;
5894
5895 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5896 &rbd_dev->header_oloc, "get_stripe_unit_count",
5897 NULL, 0, &striping_info_buf, size);
5898 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5899 if (ret < 0)
5900 return ret;
5901 if (ret < size)
5902 return -ERANGE;
5903
5904 *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
5905 *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
5906 dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
5907 *stripe_count);
5908
5909 return 0;
5910 }
5911
rbd_dev_v2_data_pool(struct rbd_device * rbd_dev,s64 * data_pool_id)5912 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
5913 {
5914 __le64 data_pool_buf;
5915 int ret;
5916
5917 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5918 &rbd_dev->header_oloc, "get_data_pool",
5919 NULL, 0, &data_pool_buf,
5920 sizeof(data_pool_buf));
5921 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5922 if (ret < 0)
5923 return ret;
5924 if (ret < sizeof(data_pool_buf))
5925 return -EBADMSG;
5926
5927 *data_pool_id = le64_to_cpu(data_pool_buf);
5928 dout(" data_pool_id = %lld\n", *data_pool_id);
5929 WARN_ON(*data_pool_id == CEPH_NOPOOL);
5930
5931 return 0;
5932 }
5933
rbd_dev_image_name(struct rbd_device * rbd_dev)5934 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5935 {
5936 CEPH_DEFINE_OID_ONSTACK(oid);
5937 size_t image_id_size;
5938 char *image_id;
5939 void *p;
5940 void *end;
5941 size_t size;
5942 void *reply_buf = NULL;
5943 size_t len = 0;
5944 char *image_name = NULL;
5945 int ret;
5946
5947 rbd_assert(!rbd_dev->spec->image_name);
5948
5949 len = strlen(rbd_dev->spec->image_id);
5950 image_id_size = sizeof (__le32) + len;
5951 image_id = kmalloc(image_id_size, GFP_KERNEL);
5952 if (!image_id)
5953 return NULL;
5954
5955 p = image_id;
5956 end = image_id + image_id_size;
5957 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5958
5959 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5960 reply_buf = kmalloc(size, GFP_KERNEL);
5961 if (!reply_buf)
5962 goto out;
5963
5964 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5965 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5966 "dir_get_name", image_id, image_id_size,
5967 reply_buf, size);
5968 if (ret < 0)
5969 goto out;
5970 p = reply_buf;
5971 end = reply_buf + ret;
5972
5973 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5974 if (IS_ERR(image_name))
5975 image_name = NULL;
5976 else
5977 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5978 out:
5979 kfree(reply_buf);
5980 kfree(image_id);
5981
5982 return image_name;
5983 }
5984
rbd_v1_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)5985 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5986 {
5987 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5988 const char *snap_name;
5989 u32 which = 0;
5990
5991 /* Skip over names until we find the one we are looking for */
5992
5993 snap_name = rbd_dev->header.snap_names;
5994 while (which < snapc->num_snaps) {
5995 if (!strcmp(name, snap_name))
5996 return snapc->snaps[which];
5997 snap_name += strlen(snap_name) + 1;
5998 which++;
5999 }
6000 return CEPH_NOSNAP;
6001 }
6002
rbd_v2_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)6003 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6004 {
6005 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6006 u32 which;
6007 bool found = false;
6008 u64 snap_id;
6009
6010 for (which = 0; !found && which < snapc->num_snaps; which++) {
6011 const char *snap_name;
6012
6013 snap_id = snapc->snaps[which];
6014 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6015 if (IS_ERR(snap_name)) {
6016 /* ignore no-longer existing snapshots */
6017 if (PTR_ERR(snap_name) == -ENOENT)
6018 continue;
6019 else
6020 break;
6021 }
6022 found = !strcmp(name, snap_name);
6023 kfree(snap_name);
6024 }
6025 return found ? snap_id : CEPH_NOSNAP;
6026 }
6027
6028 /*
6029 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6030 * no snapshot by that name is found, or if an error occurs.
6031 */
rbd_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)6032 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6033 {
6034 if (rbd_dev->image_format == 1)
6035 return rbd_v1_snap_id_by_name(rbd_dev, name);
6036
6037 return rbd_v2_snap_id_by_name(rbd_dev, name);
6038 }
6039
6040 /*
6041 * An image being mapped will have everything but the snap id.
6042 */
rbd_spec_fill_snap_id(struct rbd_device * rbd_dev)6043 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6044 {
6045 struct rbd_spec *spec = rbd_dev->spec;
6046
6047 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6048 rbd_assert(spec->image_id && spec->image_name);
6049 rbd_assert(spec->snap_name);
6050
6051 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6052 u64 snap_id;
6053
6054 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6055 if (snap_id == CEPH_NOSNAP)
6056 return -ENOENT;
6057
6058 spec->snap_id = snap_id;
6059 } else {
6060 spec->snap_id = CEPH_NOSNAP;
6061 }
6062
6063 return 0;
6064 }
6065
6066 /*
6067 * A parent image will have all ids but none of the names.
6068 *
6069 * All names in an rbd spec are dynamically allocated. It's OK if we
6070 * can't figure out the name for an image id.
6071 */
rbd_spec_fill_names(struct rbd_device * rbd_dev)6072 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6073 {
6074 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6075 struct rbd_spec *spec = rbd_dev->spec;
6076 const char *pool_name;
6077 const char *image_name;
6078 const char *snap_name;
6079 int ret;
6080
6081 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6082 rbd_assert(spec->image_id);
6083 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6084
6085 /* Get the pool name; we have to make our own copy of this */
6086
6087 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6088 if (!pool_name) {
6089 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6090 return -EIO;
6091 }
6092 pool_name = kstrdup(pool_name, GFP_KERNEL);
6093 if (!pool_name)
6094 return -ENOMEM;
6095
6096 /* Fetch the image name; tolerate failure here */
6097
6098 image_name = rbd_dev_image_name(rbd_dev);
6099 if (!image_name)
6100 rbd_warn(rbd_dev, "unable to get image name");
6101
6102 /* Fetch the snapshot name */
6103
6104 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6105 if (IS_ERR(snap_name)) {
6106 ret = PTR_ERR(snap_name);
6107 goto out_err;
6108 }
6109
6110 spec->pool_name = pool_name;
6111 spec->image_name = image_name;
6112 spec->snap_name = snap_name;
6113
6114 return 0;
6115
6116 out_err:
6117 kfree(image_name);
6118 kfree(pool_name);
6119 return ret;
6120 }
6121
rbd_dev_v2_snap_context(struct rbd_device * rbd_dev,struct ceph_snap_context ** psnapc)6122 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
6123 struct ceph_snap_context **psnapc)
6124 {
6125 size_t size;
6126 int ret;
6127 void *reply_buf;
6128 void *p;
6129 void *end;
6130 u64 seq;
6131 u32 snap_count;
6132 struct ceph_snap_context *snapc;
6133 u32 i;
6134
6135 /*
6136 * We'll need room for the seq value (maximum snapshot id),
6137 * snapshot count, and array of that many snapshot ids.
6138 * For now we have a fixed upper limit on the number we're
6139 * prepared to receive.
6140 */
6141 size = sizeof (__le64) + sizeof (__le32) +
6142 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6143 reply_buf = kzalloc(size, GFP_KERNEL);
6144 if (!reply_buf)
6145 return -ENOMEM;
6146
6147 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6148 &rbd_dev->header_oloc, "get_snapcontext",
6149 NULL, 0, reply_buf, size);
6150 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6151 if (ret < 0)
6152 goto out;
6153
6154 p = reply_buf;
6155 end = reply_buf + ret;
6156 ret = -ERANGE;
6157 ceph_decode_64_safe(&p, end, seq, out);
6158 ceph_decode_32_safe(&p, end, snap_count, out);
6159
6160 /*
6161 * Make sure the reported number of snapshot ids wouldn't go
6162 * beyond the end of our buffer. But before checking that,
6163 * make sure the computed size of the snapshot context we
6164 * allocate is representable in a size_t.
6165 */
6166 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6167 / sizeof (u64)) {
6168 ret = -EINVAL;
6169 goto out;
6170 }
6171 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6172 goto out;
6173 ret = 0;
6174
6175 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6176 if (!snapc) {
6177 ret = -ENOMEM;
6178 goto out;
6179 }
6180 snapc->seq = seq;
6181 for (i = 0; i < snap_count; i++)
6182 snapc->snaps[i] = ceph_decode_64(&p);
6183
6184 *psnapc = snapc;
6185 dout(" snap context seq = %llu, snap_count = %u\n",
6186 (unsigned long long)seq, (unsigned int)snap_count);
6187 out:
6188 kfree(reply_buf);
6189
6190 return ret;
6191 }
6192
rbd_dev_v2_snap_name(struct rbd_device * rbd_dev,u64 snap_id)6193 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6194 u64 snap_id)
6195 {
6196 size_t size;
6197 void *reply_buf;
6198 __le64 snapid;
6199 int ret;
6200 void *p;
6201 void *end;
6202 char *snap_name;
6203
6204 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6205 reply_buf = kmalloc(size, GFP_KERNEL);
6206 if (!reply_buf)
6207 return ERR_PTR(-ENOMEM);
6208
6209 snapid = cpu_to_le64(snap_id);
6210 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6211 &rbd_dev->header_oloc, "get_snapshot_name",
6212 &snapid, sizeof(snapid), reply_buf, size);
6213 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6214 if (ret < 0) {
6215 snap_name = ERR_PTR(ret);
6216 goto out;
6217 }
6218
6219 p = reply_buf;
6220 end = reply_buf + ret;
6221 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6222 if (IS_ERR(snap_name))
6223 goto out;
6224
6225 dout(" snap_id 0x%016llx snap_name = %s\n",
6226 (unsigned long long)snap_id, snap_name);
6227 out:
6228 kfree(reply_buf);
6229
6230 return snap_name;
6231 }
6232
rbd_dev_v2_header_info(struct rbd_device * rbd_dev,struct rbd_image_header * header,bool first_time)6233 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
6234 struct rbd_image_header *header,
6235 bool first_time)
6236 {
6237 int ret;
6238
6239 ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
6240 first_time ? &header->obj_order : NULL,
6241 &header->image_size);
6242 if (ret)
6243 return ret;
6244
6245 if (first_time) {
6246 ret = rbd_dev_v2_header_onetime(rbd_dev, header);
6247 if (ret)
6248 return ret;
6249 }
6250
6251 ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
6252 if (ret)
6253 return ret;
6254
6255 return 0;
6256 }
6257
rbd_dev_header_info(struct rbd_device * rbd_dev,struct rbd_image_header * header,bool first_time)6258 static int rbd_dev_header_info(struct rbd_device *rbd_dev,
6259 struct rbd_image_header *header,
6260 bool first_time)
6261 {
6262 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6263 rbd_assert(!header->object_prefix && !header->snapc);
6264
6265 if (rbd_dev->image_format == 1)
6266 return rbd_dev_v1_header_info(rbd_dev, header, first_time);
6267
6268 return rbd_dev_v2_header_info(rbd_dev, header, first_time);
6269 }
6270
6271 /*
6272 * Skips over white space at *buf, and updates *buf to point to the
6273 * first found non-space character (if any). Returns the length of
6274 * the token (string of non-white space characters) found. Note
6275 * that *buf must be terminated with '\0'.
6276 */
next_token(const char ** buf)6277 static inline size_t next_token(const char **buf)
6278 {
6279 /*
6280 * These are the characters that produce nonzero for
6281 * isspace() in the "C" and "POSIX" locales.
6282 */
6283 const char *spaces = " \f\n\r\t\v";
6284
6285 *buf += strspn(*buf, spaces); /* Find start of token */
6286
6287 return strcspn(*buf, spaces); /* Return token length */
6288 }
6289
6290 /*
6291 * Finds the next token in *buf, dynamically allocates a buffer big
6292 * enough to hold a copy of it, and copies the token into the new
6293 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6294 * that a duplicate buffer is created even for a zero-length token.
6295 *
6296 * Returns a pointer to the newly-allocated duplicate, or a null
6297 * pointer if memory for the duplicate was not available. If
6298 * the lenp argument is a non-null pointer, the length of the token
6299 * (not including the '\0') is returned in *lenp.
6300 *
6301 * If successful, the *buf pointer will be updated to point beyond
6302 * the end of the found token.
6303 *
6304 * Note: uses GFP_KERNEL for allocation.
6305 */
dup_token(const char ** buf,size_t * lenp)6306 static inline char *dup_token(const char **buf, size_t *lenp)
6307 {
6308 char *dup;
6309 size_t len;
6310
6311 len = next_token(buf);
6312 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6313 if (!dup)
6314 return NULL;
6315 *(dup + len) = '\0';
6316 *buf += len;
6317
6318 if (lenp)
6319 *lenp = len;
6320
6321 return dup;
6322 }
6323
rbd_parse_param(struct fs_parameter * param,struct rbd_parse_opts_ctx * pctx)6324 static int rbd_parse_param(struct fs_parameter *param,
6325 struct rbd_parse_opts_ctx *pctx)
6326 {
6327 struct rbd_options *opt = pctx->opts;
6328 struct fs_parse_result result;
6329 struct p_log log = {.prefix = "rbd"};
6330 int token, ret;
6331
6332 ret = ceph_parse_param(param, pctx->copts, NULL);
6333 if (ret != -ENOPARAM)
6334 return ret;
6335
6336 token = __fs_parse(&log, rbd_parameters, param, &result);
6337 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6338 if (token < 0) {
6339 if (token == -ENOPARAM)
6340 return inval_plog(&log, "Unknown parameter '%s'",
6341 param->key);
6342 return token;
6343 }
6344
6345 switch (token) {
6346 case Opt_queue_depth:
6347 if (result.uint_32 < 1)
6348 goto out_of_range;
6349 opt->queue_depth = result.uint_32;
6350 break;
6351 case Opt_alloc_size:
6352 if (result.uint_32 < SECTOR_SIZE)
6353 goto out_of_range;
6354 if (!is_power_of_2(result.uint_32))
6355 return inval_plog(&log, "alloc_size must be a power of 2");
6356 opt->alloc_size = result.uint_32;
6357 break;
6358 case Opt_lock_timeout:
6359 /* 0 is "wait forever" (i.e. infinite timeout) */
6360 if (result.uint_32 > INT_MAX / 1000)
6361 goto out_of_range;
6362 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6363 break;
6364 case Opt_pool_ns:
6365 kfree(pctx->spec->pool_ns);
6366 pctx->spec->pool_ns = param->string;
6367 param->string = NULL;
6368 break;
6369 case Opt_compression_hint:
6370 switch (result.uint_32) {
6371 case Opt_compression_hint_none:
6372 opt->alloc_hint_flags &=
6373 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6374 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6375 break;
6376 case Opt_compression_hint_compressible:
6377 opt->alloc_hint_flags |=
6378 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6379 opt->alloc_hint_flags &=
6380 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6381 break;
6382 case Opt_compression_hint_incompressible:
6383 opt->alloc_hint_flags |=
6384 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6385 opt->alloc_hint_flags &=
6386 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6387 break;
6388 default:
6389 BUG();
6390 }
6391 break;
6392 case Opt_read_only:
6393 opt->read_only = true;
6394 break;
6395 case Opt_read_write:
6396 opt->read_only = false;
6397 break;
6398 case Opt_lock_on_read:
6399 opt->lock_on_read = true;
6400 break;
6401 case Opt_exclusive:
6402 opt->exclusive = true;
6403 break;
6404 case Opt_notrim:
6405 opt->trim = false;
6406 break;
6407 default:
6408 BUG();
6409 }
6410
6411 return 0;
6412
6413 out_of_range:
6414 return inval_plog(&log, "%s out of range", param->key);
6415 }
6416
6417 /*
6418 * This duplicates most of generic_parse_monolithic(), untying it from
6419 * fs_context and skipping standard superblock and security options.
6420 */
rbd_parse_options(char * options,struct rbd_parse_opts_ctx * pctx)6421 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6422 {
6423 char *key;
6424 int ret = 0;
6425
6426 dout("%s '%s'\n", __func__, options);
6427 while ((key = strsep(&options, ",")) != NULL) {
6428 if (*key) {
6429 struct fs_parameter param = {
6430 .key = key,
6431 .type = fs_value_is_flag,
6432 };
6433 char *value = strchr(key, '=');
6434 size_t v_len = 0;
6435
6436 if (value) {
6437 if (value == key)
6438 continue;
6439 *value++ = 0;
6440 v_len = strlen(value);
6441 param.string = kmemdup_nul(value, v_len,
6442 GFP_KERNEL);
6443 if (!param.string)
6444 return -ENOMEM;
6445 param.type = fs_value_is_string;
6446 }
6447 param.size = v_len;
6448
6449 ret = rbd_parse_param(¶m, pctx);
6450 kfree(param.string);
6451 if (ret)
6452 break;
6453 }
6454 }
6455
6456 return ret;
6457 }
6458
6459 /*
6460 * Parse the options provided for an "rbd add" (i.e., rbd image
6461 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6462 * and the data written is passed here via a NUL-terminated buffer.
6463 * Returns 0 if successful or an error code otherwise.
6464 *
6465 * The information extracted from these options is recorded in
6466 * the other parameters which return dynamically-allocated
6467 * structures:
6468 * ceph_opts
6469 * The address of a pointer that will refer to a ceph options
6470 * structure. Caller must release the returned pointer using
6471 * ceph_destroy_options() when it is no longer needed.
6472 * rbd_opts
6473 * Address of an rbd options pointer. Fully initialized by
6474 * this function; caller must release with kfree().
6475 * spec
6476 * Address of an rbd image specification pointer. Fully
6477 * initialized by this function based on parsed options.
6478 * Caller must release with rbd_spec_put().
6479 *
6480 * The options passed take this form:
6481 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6482 * where:
6483 * <mon_addrs>
6484 * A comma-separated list of one or more monitor addresses.
6485 * A monitor address is an ip address, optionally followed
6486 * by a port number (separated by a colon).
6487 * I.e.: ip1[:port1][,ip2[:port2]...]
6488 * <options>
6489 * A comma-separated list of ceph and/or rbd options.
6490 * <pool_name>
6491 * The name of the rados pool containing the rbd image.
6492 * <image_name>
6493 * The name of the image in that pool to map.
6494 * <snap_id>
6495 * An optional snapshot id. If provided, the mapping will
6496 * present data from the image at the time that snapshot was
6497 * created. The image head is used if no snapshot id is
6498 * provided. Snapshot mappings are always read-only.
6499 */
rbd_add_parse_args(const char * buf,struct ceph_options ** ceph_opts,struct rbd_options ** opts,struct rbd_spec ** rbd_spec)6500 static int rbd_add_parse_args(const char *buf,
6501 struct ceph_options **ceph_opts,
6502 struct rbd_options **opts,
6503 struct rbd_spec **rbd_spec)
6504 {
6505 size_t len;
6506 char *options;
6507 const char *mon_addrs;
6508 char *snap_name;
6509 size_t mon_addrs_size;
6510 struct rbd_parse_opts_ctx pctx = { 0 };
6511 int ret;
6512
6513 /* The first four tokens are required */
6514
6515 len = next_token(&buf);
6516 if (!len) {
6517 rbd_warn(NULL, "no monitor address(es) provided");
6518 return -EINVAL;
6519 }
6520 mon_addrs = buf;
6521 mon_addrs_size = len;
6522 buf += len;
6523
6524 ret = -EINVAL;
6525 options = dup_token(&buf, NULL);
6526 if (!options)
6527 return -ENOMEM;
6528 if (!*options) {
6529 rbd_warn(NULL, "no options provided");
6530 goto out_err;
6531 }
6532
6533 pctx.spec = rbd_spec_alloc();
6534 if (!pctx.spec)
6535 goto out_mem;
6536
6537 pctx.spec->pool_name = dup_token(&buf, NULL);
6538 if (!pctx.spec->pool_name)
6539 goto out_mem;
6540 if (!*pctx.spec->pool_name) {
6541 rbd_warn(NULL, "no pool name provided");
6542 goto out_err;
6543 }
6544
6545 pctx.spec->image_name = dup_token(&buf, NULL);
6546 if (!pctx.spec->image_name)
6547 goto out_mem;
6548 if (!*pctx.spec->image_name) {
6549 rbd_warn(NULL, "no image name provided");
6550 goto out_err;
6551 }
6552
6553 /*
6554 * Snapshot name is optional; default is to use "-"
6555 * (indicating the head/no snapshot).
6556 */
6557 len = next_token(&buf);
6558 if (!len) {
6559 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6560 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6561 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6562 ret = -ENAMETOOLONG;
6563 goto out_err;
6564 }
6565 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6566 if (!snap_name)
6567 goto out_mem;
6568 *(snap_name + len) = '\0';
6569 pctx.spec->snap_name = snap_name;
6570
6571 pctx.copts = ceph_alloc_options();
6572 if (!pctx.copts)
6573 goto out_mem;
6574
6575 /* Initialize all rbd options to the defaults */
6576
6577 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6578 if (!pctx.opts)
6579 goto out_mem;
6580
6581 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6582 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6583 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6584 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6585 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6586 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6587 pctx.opts->trim = RBD_TRIM_DEFAULT;
6588
6589 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
6590 if (ret)
6591 goto out_err;
6592
6593 ret = rbd_parse_options(options, &pctx);
6594 if (ret)
6595 goto out_err;
6596
6597 *ceph_opts = pctx.copts;
6598 *opts = pctx.opts;
6599 *rbd_spec = pctx.spec;
6600 kfree(options);
6601 return 0;
6602
6603 out_mem:
6604 ret = -ENOMEM;
6605 out_err:
6606 kfree(pctx.opts);
6607 ceph_destroy_options(pctx.copts);
6608 rbd_spec_put(pctx.spec);
6609 kfree(options);
6610 return ret;
6611 }
6612
rbd_dev_image_unlock(struct rbd_device * rbd_dev)6613 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6614 {
6615 down_write(&rbd_dev->lock_rwsem);
6616 if (__rbd_is_lock_owner(rbd_dev))
6617 __rbd_release_lock(rbd_dev);
6618 up_write(&rbd_dev->lock_rwsem);
6619 }
6620
6621 /*
6622 * If the wait is interrupted, an error is returned even if the lock
6623 * was successfully acquired. rbd_dev_image_unlock() will release it
6624 * if needed.
6625 */
rbd_add_acquire_lock(struct rbd_device * rbd_dev)6626 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6627 {
6628 long ret;
6629
6630 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6631 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6632 return 0;
6633
6634 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6635 return -EINVAL;
6636 }
6637
6638 if (rbd_is_ro(rbd_dev))
6639 return 0;
6640
6641 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6642 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6643 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6644 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6645 if (ret > 0) {
6646 ret = rbd_dev->acquire_err;
6647 } else {
6648 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6649 if (!ret)
6650 ret = -ETIMEDOUT;
6651
6652 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6653 }
6654 if (ret)
6655 return ret;
6656
6657 /*
6658 * The lock may have been released by now, unless automatic lock
6659 * transitions are disabled.
6660 */
6661 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6662 return 0;
6663 }
6664
6665 /*
6666 * An rbd format 2 image has a unique identifier, distinct from the
6667 * name given to it by the user. Internally, that identifier is
6668 * what's used to specify the names of objects related to the image.
6669 *
6670 * A special "rbd id" object is used to map an rbd image name to its
6671 * id. If that object doesn't exist, then there is no v2 rbd image
6672 * with the supplied name.
6673 *
6674 * This function will record the given rbd_dev's image_id field if
6675 * it can be determined, and in that case will return 0. If any
6676 * errors occur a negative errno will be returned and the rbd_dev's
6677 * image_id field will be unchanged (and should be NULL).
6678 */
rbd_dev_image_id(struct rbd_device * rbd_dev)6679 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6680 {
6681 int ret;
6682 size_t size;
6683 CEPH_DEFINE_OID_ONSTACK(oid);
6684 void *response;
6685 char *image_id;
6686
6687 /*
6688 * When probing a parent image, the image id is already
6689 * known (and the image name likely is not). There's no
6690 * need to fetch the image id again in this case. We
6691 * do still need to set the image format though.
6692 */
6693 if (rbd_dev->spec->image_id) {
6694 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6695
6696 return 0;
6697 }
6698
6699 /*
6700 * First, see if the format 2 image id file exists, and if
6701 * so, get the image's persistent id from it.
6702 */
6703 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6704 rbd_dev->spec->image_name);
6705 if (ret)
6706 return ret;
6707
6708 dout("rbd id object name is %s\n", oid.name);
6709
6710 /* Response will be an encoded string, which includes a length */
6711 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6712 response = kzalloc(size, GFP_NOIO);
6713 if (!response) {
6714 ret = -ENOMEM;
6715 goto out;
6716 }
6717
6718 /* If it doesn't exist we'll assume it's a format 1 image */
6719
6720 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6721 "get_id", NULL, 0,
6722 response, size);
6723 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6724 if (ret == -ENOENT) {
6725 image_id = kstrdup("", GFP_KERNEL);
6726 ret = image_id ? 0 : -ENOMEM;
6727 if (!ret)
6728 rbd_dev->image_format = 1;
6729 } else if (ret >= 0) {
6730 void *p = response;
6731
6732 image_id = ceph_extract_encoded_string(&p, p + ret,
6733 NULL, GFP_NOIO);
6734 ret = PTR_ERR_OR_ZERO(image_id);
6735 if (!ret)
6736 rbd_dev->image_format = 2;
6737 }
6738
6739 if (!ret) {
6740 rbd_dev->spec->image_id = image_id;
6741 dout("image_id is %s\n", image_id);
6742 }
6743 out:
6744 kfree(response);
6745 ceph_oid_destroy(&oid);
6746 return ret;
6747 }
6748
6749 /*
6750 * Undo whatever state changes are made by v1 or v2 header info
6751 * call.
6752 */
rbd_dev_unprobe(struct rbd_device * rbd_dev)6753 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6754 {
6755 rbd_dev_parent_put(rbd_dev);
6756 rbd_object_map_free(rbd_dev);
6757 rbd_dev_mapping_clear(rbd_dev);
6758
6759 /* Free dynamic fields from the header, then zero it out */
6760
6761 rbd_image_header_cleanup(&rbd_dev->header);
6762 }
6763
rbd_dev_v2_header_onetime(struct rbd_device * rbd_dev,struct rbd_image_header * header)6764 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
6765 struct rbd_image_header *header)
6766 {
6767 int ret;
6768
6769 ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
6770 if (ret)
6771 return ret;
6772
6773 /*
6774 * Get the and check features for the image. Currently the
6775 * features are assumed to never change.
6776 */
6777 ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
6778 rbd_is_ro(rbd_dev), &header->features);
6779 if (ret)
6780 return ret;
6781
6782 /* If the image supports fancy striping, get its parameters */
6783
6784 if (header->features & RBD_FEATURE_STRIPINGV2) {
6785 ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
6786 &header->stripe_count);
6787 if (ret)
6788 return ret;
6789 }
6790
6791 if (header->features & RBD_FEATURE_DATA_POOL) {
6792 ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
6793 if (ret)
6794 return ret;
6795 }
6796
6797 return 0;
6798 }
6799
6800 /*
6801 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6802 * rbd_dev_image_probe() recursion depth, which means it's also the
6803 * length of the already discovered part of the parent chain.
6804 */
rbd_dev_probe_parent(struct rbd_device * rbd_dev,int depth)6805 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6806 {
6807 struct rbd_device *parent = NULL;
6808 int ret;
6809
6810 if (!rbd_dev->parent_spec)
6811 return 0;
6812
6813 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6814 pr_info("parent chain is too long (%d)\n", depth);
6815 ret = -EINVAL;
6816 goto out_err;
6817 }
6818
6819 parent = __rbd_dev_create(rbd_dev->parent_spec);
6820 if (!parent) {
6821 ret = -ENOMEM;
6822 goto out_err;
6823 }
6824
6825 /*
6826 * Images related by parent/child relationships always share
6827 * rbd_client and spec/parent_spec, so bump their refcounts.
6828 */
6829 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6830 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6831
6832 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6833
6834 ret = rbd_dev_image_probe(parent, depth);
6835 if (ret < 0)
6836 goto out_err;
6837
6838 rbd_dev->parent = parent;
6839 atomic_set(&rbd_dev->parent_ref, 1);
6840 return 0;
6841
6842 out_err:
6843 rbd_dev_unparent(rbd_dev);
6844 rbd_dev_destroy(parent);
6845 return ret;
6846 }
6847
rbd_dev_device_release(struct rbd_device * rbd_dev)6848 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6849 {
6850 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6851 rbd_free_disk(rbd_dev);
6852 if (!single_major)
6853 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6854 }
6855
6856 /*
6857 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6858 * upon return.
6859 */
rbd_dev_device_setup(struct rbd_device * rbd_dev)6860 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6861 {
6862 int ret;
6863
6864 /* Record our major and minor device numbers. */
6865
6866 if (!single_major) {
6867 ret = register_blkdev(0, rbd_dev->name);
6868 if (ret < 0)
6869 goto err_out_unlock;
6870
6871 rbd_dev->major = ret;
6872 rbd_dev->minor = 0;
6873 } else {
6874 rbd_dev->major = rbd_major;
6875 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6876 }
6877
6878 /* Set up the blkdev mapping. */
6879
6880 ret = rbd_init_disk(rbd_dev);
6881 if (ret)
6882 goto err_out_blkdev;
6883
6884 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6885 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6886
6887 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6888 if (ret)
6889 goto err_out_disk;
6890
6891 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6892 up_write(&rbd_dev->header_rwsem);
6893 return 0;
6894
6895 err_out_disk:
6896 rbd_free_disk(rbd_dev);
6897 err_out_blkdev:
6898 if (!single_major)
6899 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6900 err_out_unlock:
6901 up_write(&rbd_dev->header_rwsem);
6902 return ret;
6903 }
6904
rbd_dev_header_name(struct rbd_device * rbd_dev)6905 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6906 {
6907 struct rbd_spec *spec = rbd_dev->spec;
6908 int ret;
6909
6910 /* Record the header object name for this rbd image. */
6911
6912 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6913 if (rbd_dev->image_format == 1)
6914 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6915 spec->image_name, RBD_SUFFIX);
6916 else
6917 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6918 RBD_HEADER_PREFIX, spec->image_id);
6919
6920 return ret;
6921 }
6922
rbd_print_dne(struct rbd_device * rbd_dev,bool is_snap)6923 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6924 {
6925 if (!is_snap) {
6926 pr_info("image %s/%s%s%s does not exist\n",
6927 rbd_dev->spec->pool_name,
6928 rbd_dev->spec->pool_ns ?: "",
6929 rbd_dev->spec->pool_ns ? "/" : "",
6930 rbd_dev->spec->image_name);
6931 } else {
6932 pr_info("snap %s/%s%s%s@%s does not exist\n",
6933 rbd_dev->spec->pool_name,
6934 rbd_dev->spec->pool_ns ?: "",
6935 rbd_dev->spec->pool_ns ? "/" : "",
6936 rbd_dev->spec->image_name,
6937 rbd_dev->spec->snap_name);
6938 }
6939 }
6940
rbd_dev_image_release(struct rbd_device * rbd_dev)6941 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6942 {
6943 if (!rbd_is_ro(rbd_dev))
6944 rbd_unregister_watch(rbd_dev);
6945
6946 rbd_dev_unprobe(rbd_dev);
6947 rbd_dev->image_format = 0;
6948 kfree(rbd_dev->spec->image_id);
6949 rbd_dev->spec->image_id = NULL;
6950 }
6951
6952 /*
6953 * Probe for the existence of the header object for the given rbd
6954 * device. If this image is the one being mapped (i.e., not a
6955 * parent), initiate a watch on its header object before using that
6956 * object to get detailed information about the rbd image.
6957 *
6958 * On success, returns with header_rwsem held for write if called
6959 * with @depth == 0.
6960 */
rbd_dev_image_probe(struct rbd_device * rbd_dev,int depth)6961 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6962 {
6963 bool need_watch = !rbd_is_ro(rbd_dev);
6964 int ret;
6965
6966 /*
6967 * Get the id from the image id object. Unless there's an
6968 * error, rbd_dev->spec->image_id will be filled in with
6969 * a dynamically-allocated string, and rbd_dev->image_format
6970 * will be set to either 1 or 2.
6971 */
6972 ret = rbd_dev_image_id(rbd_dev);
6973 if (ret)
6974 return ret;
6975
6976 ret = rbd_dev_header_name(rbd_dev);
6977 if (ret)
6978 goto err_out_format;
6979
6980 if (need_watch) {
6981 ret = rbd_register_watch(rbd_dev);
6982 if (ret) {
6983 if (ret == -ENOENT)
6984 rbd_print_dne(rbd_dev, false);
6985 goto err_out_format;
6986 }
6987 }
6988
6989 if (!depth)
6990 down_write(&rbd_dev->header_rwsem);
6991
6992 ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
6993 if (ret) {
6994 if (ret == -ENOENT && !need_watch)
6995 rbd_print_dne(rbd_dev, false);
6996 goto err_out_probe;
6997 }
6998
6999 rbd_init_layout(rbd_dev);
7000
7001 /*
7002 * If this image is the one being mapped, we have pool name and
7003 * id, image name and id, and snap name - need to fill snap id.
7004 * Otherwise this is a parent image, identified by pool, image
7005 * and snap ids - need to fill in names for those ids.
7006 */
7007 if (!depth)
7008 ret = rbd_spec_fill_snap_id(rbd_dev);
7009 else
7010 ret = rbd_spec_fill_names(rbd_dev);
7011 if (ret) {
7012 if (ret == -ENOENT)
7013 rbd_print_dne(rbd_dev, true);
7014 goto err_out_probe;
7015 }
7016
7017 ret = rbd_dev_mapping_set(rbd_dev);
7018 if (ret)
7019 goto err_out_probe;
7020
7021 if (rbd_is_snap(rbd_dev) &&
7022 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7023 ret = rbd_object_map_load(rbd_dev);
7024 if (ret)
7025 goto err_out_probe;
7026 }
7027
7028 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7029 ret = rbd_dev_setup_parent(rbd_dev);
7030 if (ret)
7031 goto err_out_probe;
7032 }
7033
7034 ret = rbd_dev_probe_parent(rbd_dev, depth);
7035 if (ret)
7036 goto err_out_probe;
7037
7038 dout("discovered format %u image, header name is %s\n",
7039 rbd_dev->image_format, rbd_dev->header_oid.name);
7040 return 0;
7041
7042 err_out_probe:
7043 if (!depth)
7044 up_write(&rbd_dev->header_rwsem);
7045 if (need_watch)
7046 rbd_unregister_watch(rbd_dev);
7047 rbd_dev_unprobe(rbd_dev);
7048 err_out_format:
7049 rbd_dev->image_format = 0;
7050 kfree(rbd_dev->spec->image_id);
7051 rbd_dev->spec->image_id = NULL;
7052 return ret;
7053 }
7054
rbd_dev_update_header(struct rbd_device * rbd_dev,struct rbd_image_header * header)7055 static void rbd_dev_update_header(struct rbd_device *rbd_dev,
7056 struct rbd_image_header *header)
7057 {
7058 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
7059 rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
7060
7061 if (rbd_dev->header.image_size != header->image_size) {
7062 rbd_dev->header.image_size = header->image_size;
7063
7064 if (!rbd_is_snap(rbd_dev)) {
7065 rbd_dev->mapping.size = header->image_size;
7066 rbd_dev_update_size(rbd_dev);
7067 }
7068 }
7069
7070 ceph_put_snap_context(rbd_dev->header.snapc);
7071 rbd_dev->header.snapc = header->snapc;
7072 header->snapc = NULL;
7073
7074 if (rbd_dev->image_format == 1) {
7075 kfree(rbd_dev->header.snap_names);
7076 rbd_dev->header.snap_names = header->snap_names;
7077 header->snap_names = NULL;
7078
7079 kfree(rbd_dev->header.snap_sizes);
7080 rbd_dev->header.snap_sizes = header->snap_sizes;
7081 header->snap_sizes = NULL;
7082 }
7083 }
7084
rbd_dev_update_parent(struct rbd_device * rbd_dev,struct parent_image_info * pii)7085 static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
7086 struct parent_image_info *pii)
7087 {
7088 if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
7089 /*
7090 * Either the parent never existed, or we have
7091 * record of it but the image got flattened so it no
7092 * longer has a parent. When the parent of a
7093 * layered image disappears we immediately set the
7094 * overlap to 0. The effect of this is that all new
7095 * requests will be treated as if the image had no
7096 * parent.
7097 *
7098 * If !pii.has_overlap, the parent image spec is not
7099 * applicable. It's there to avoid duplication in each
7100 * snapshot record.
7101 */
7102 if (rbd_dev->parent_overlap) {
7103 rbd_dev->parent_overlap = 0;
7104 rbd_dev_parent_put(rbd_dev);
7105 pr_info("%s: clone has been flattened\n",
7106 rbd_dev->disk->disk_name);
7107 }
7108 } else {
7109 rbd_assert(rbd_dev->parent_spec);
7110
7111 /*
7112 * Update the parent overlap. If it became zero, issue
7113 * a warning as we will proceed as if there is no parent.
7114 */
7115 if (!pii->overlap && rbd_dev->parent_overlap)
7116 rbd_warn(rbd_dev,
7117 "clone has become standalone (overlap 0)");
7118 rbd_dev->parent_overlap = pii->overlap;
7119 }
7120 }
7121
rbd_dev_refresh(struct rbd_device * rbd_dev)7122 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
7123 {
7124 struct rbd_image_header header = { 0 };
7125 struct parent_image_info pii = { 0 };
7126 int ret;
7127
7128 dout("%s rbd_dev %p\n", __func__, rbd_dev);
7129
7130 ret = rbd_dev_header_info(rbd_dev, &header, false);
7131 if (ret)
7132 goto out;
7133
7134 /*
7135 * If there is a parent, see if it has disappeared due to the
7136 * mapped image getting flattened.
7137 */
7138 if (rbd_dev->parent) {
7139 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
7140 if (ret)
7141 goto out;
7142 }
7143
7144 down_write(&rbd_dev->header_rwsem);
7145 rbd_dev_update_header(rbd_dev, &header);
7146 if (rbd_dev->parent)
7147 rbd_dev_update_parent(rbd_dev, &pii);
7148 up_write(&rbd_dev->header_rwsem);
7149
7150 out:
7151 rbd_parent_info_cleanup(&pii);
7152 rbd_image_header_cleanup(&header);
7153 return ret;
7154 }
7155
do_rbd_add(struct bus_type * bus,const char * buf,size_t count)7156 static ssize_t do_rbd_add(struct bus_type *bus,
7157 const char *buf,
7158 size_t count)
7159 {
7160 struct rbd_device *rbd_dev = NULL;
7161 struct ceph_options *ceph_opts = NULL;
7162 struct rbd_options *rbd_opts = NULL;
7163 struct rbd_spec *spec = NULL;
7164 struct rbd_client *rbdc;
7165 int rc;
7166
7167 if (!capable(CAP_SYS_ADMIN))
7168 return -EPERM;
7169
7170 if (!try_module_get(THIS_MODULE))
7171 return -ENODEV;
7172
7173 /* parse add command */
7174 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7175 if (rc < 0)
7176 goto out;
7177
7178 rbdc = rbd_get_client(ceph_opts);
7179 if (IS_ERR(rbdc)) {
7180 rc = PTR_ERR(rbdc);
7181 goto err_out_args;
7182 }
7183
7184 /* pick the pool */
7185 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7186 if (rc < 0) {
7187 if (rc == -ENOENT)
7188 pr_info("pool %s does not exist\n", spec->pool_name);
7189 goto err_out_client;
7190 }
7191 spec->pool_id = (u64)rc;
7192
7193 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7194 if (!rbd_dev) {
7195 rc = -ENOMEM;
7196 goto err_out_client;
7197 }
7198 rbdc = NULL; /* rbd_dev now owns this */
7199 spec = NULL; /* rbd_dev now owns this */
7200 rbd_opts = NULL; /* rbd_dev now owns this */
7201
7202 /* if we are mapping a snapshot it will be a read-only mapping */
7203 if (rbd_dev->opts->read_only ||
7204 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7205 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7206
7207 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7208 if (!rbd_dev->config_info) {
7209 rc = -ENOMEM;
7210 goto err_out_rbd_dev;
7211 }
7212
7213 rc = rbd_dev_image_probe(rbd_dev, 0);
7214 if (rc < 0)
7215 goto err_out_rbd_dev;
7216
7217 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7218 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7219 rbd_dev->layout.object_size);
7220 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7221 }
7222
7223 rc = rbd_dev_device_setup(rbd_dev);
7224 if (rc)
7225 goto err_out_image_probe;
7226
7227 rc = rbd_add_acquire_lock(rbd_dev);
7228 if (rc)
7229 goto err_out_image_lock;
7230
7231 /* Everything's ready. Announce the disk to the world. */
7232
7233 rc = device_add(&rbd_dev->dev);
7234 if (rc)
7235 goto err_out_image_lock;
7236
7237 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7238 /* see rbd_init_disk() */
7239 blk_put_queue(rbd_dev->disk->queue);
7240
7241 spin_lock(&rbd_dev_list_lock);
7242 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7243 spin_unlock(&rbd_dev_list_lock);
7244
7245 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7246 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7247 rbd_dev->header.features);
7248 rc = count;
7249 out:
7250 module_put(THIS_MODULE);
7251 return rc;
7252
7253 err_out_image_lock:
7254 rbd_dev_image_unlock(rbd_dev);
7255 rbd_dev_device_release(rbd_dev);
7256 err_out_image_probe:
7257 rbd_dev_image_release(rbd_dev);
7258 err_out_rbd_dev:
7259 rbd_dev_destroy(rbd_dev);
7260 err_out_client:
7261 rbd_put_client(rbdc);
7262 err_out_args:
7263 rbd_spec_put(spec);
7264 kfree(rbd_opts);
7265 goto out;
7266 }
7267
add_store(struct bus_type * bus,const char * buf,size_t count)7268 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7269 {
7270 if (single_major)
7271 return -EINVAL;
7272
7273 return do_rbd_add(bus, buf, count);
7274 }
7275
add_single_major_store(struct bus_type * bus,const char * buf,size_t count)7276 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7277 size_t count)
7278 {
7279 return do_rbd_add(bus, buf, count);
7280 }
7281
rbd_dev_remove_parent(struct rbd_device * rbd_dev)7282 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7283 {
7284 while (rbd_dev->parent) {
7285 struct rbd_device *first = rbd_dev;
7286 struct rbd_device *second = first->parent;
7287 struct rbd_device *third;
7288
7289 /*
7290 * Follow to the parent with no grandparent and
7291 * remove it.
7292 */
7293 while (second && (third = second->parent)) {
7294 first = second;
7295 second = third;
7296 }
7297 rbd_assert(second);
7298 rbd_dev_image_release(second);
7299 rbd_dev_destroy(second);
7300 first->parent = NULL;
7301 first->parent_overlap = 0;
7302
7303 rbd_assert(first->parent_spec);
7304 rbd_spec_put(first->parent_spec);
7305 first->parent_spec = NULL;
7306 }
7307 }
7308
do_rbd_remove(struct bus_type * bus,const char * buf,size_t count)7309 static ssize_t do_rbd_remove(struct bus_type *bus,
7310 const char *buf,
7311 size_t count)
7312 {
7313 struct rbd_device *rbd_dev = NULL;
7314 struct list_head *tmp;
7315 int dev_id;
7316 char opt_buf[6];
7317 bool force = false;
7318 int ret;
7319
7320 if (!capable(CAP_SYS_ADMIN))
7321 return -EPERM;
7322
7323 dev_id = -1;
7324 opt_buf[0] = '\0';
7325 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7326 if (dev_id < 0) {
7327 pr_err("dev_id out of range\n");
7328 return -EINVAL;
7329 }
7330 if (opt_buf[0] != '\0') {
7331 if (!strcmp(opt_buf, "force")) {
7332 force = true;
7333 } else {
7334 pr_err("bad remove option at '%s'\n", opt_buf);
7335 return -EINVAL;
7336 }
7337 }
7338
7339 ret = -ENOENT;
7340 spin_lock(&rbd_dev_list_lock);
7341 list_for_each(tmp, &rbd_dev_list) {
7342 rbd_dev = list_entry(tmp, struct rbd_device, node);
7343 if (rbd_dev->dev_id == dev_id) {
7344 ret = 0;
7345 break;
7346 }
7347 }
7348 if (!ret) {
7349 spin_lock_irq(&rbd_dev->lock);
7350 if (rbd_dev->open_count && !force)
7351 ret = -EBUSY;
7352 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7353 &rbd_dev->flags))
7354 ret = -EINPROGRESS;
7355 spin_unlock_irq(&rbd_dev->lock);
7356 }
7357 spin_unlock(&rbd_dev_list_lock);
7358 if (ret)
7359 return ret;
7360
7361 if (force) {
7362 /*
7363 * Prevent new IO from being queued and wait for existing
7364 * IO to complete/fail.
7365 */
7366 blk_mq_freeze_queue(rbd_dev->disk->queue);
7367 blk_set_queue_dying(rbd_dev->disk->queue);
7368 }
7369
7370 del_gendisk(rbd_dev->disk);
7371 spin_lock(&rbd_dev_list_lock);
7372 list_del_init(&rbd_dev->node);
7373 spin_unlock(&rbd_dev_list_lock);
7374 device_del(&rbd_dev->dev);
7375
7376 rbd_dev_image_unlock(rbd_dev);
7377 rbd_dev_device_release(rbd_dev);
7378 rbd_dev_image_release(rbd_dev);
7379 rbd_dev_destroy(rbd_dev);
7380 return count;
7381 }
7382
remove_store(struct bus_type * bus,const char * buf,size_t count)7383 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7384 {
7385 if (single_major)
7386 return -EINVAL;
7387
7388 return do_rbd_remove(bus, buf, count);
7389 }
7390
remove_single_major_store(struct bus_type * bus,const char * buf,size_t count)7391 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7392 size_t count)
7393 {
7394 return do_rbd_remove(bus, buf, count);
7395 }
7396
7397 /*
7398 * create control files in sysfs
7399 * /sys/bus/rbd/...
7400 */
rbd_sysfs_init(void)7401 static int __init rbd_sysfs_init(void)
7402 {
7403 int ret;
7404
7405 ret = device_register(&rbd_root_dev);
7406 if (ret < 0)
7407 return ret;
7408
7409 ret = bus_register(&rbd_bus_type);
7410 if (ret < 0)
7411 device_unregister(&rbd_root_dev);
7412
7413 return ret;
7414 }
7415
rbd_sysfs_cleanup(void)7416 static void __exit rbd_sysfs_cleanup(void)
7417 {
7418 bus_unregister(&rbd_bus_type);
7419 device_unregister(&rbd_root_dev);
7420 }
7421
rbd_slab_init(void)7422 static int __init rbd_slab_init(void)
7423 {
7424 rbd_assert(!rbd_img_request_cache);
7425 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7426 if (!rbd_img_request_cache)
7427 return -ENOMEM;
7428
7429 rbd_assert(!rbd_obj_request_cache);
7430 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7431 if (!rbd_obj_request_cache)
7432 goto out_err;
7433
7434 return 0;
7435
7436 out_err:
7437 kmem_cache_destroy(rbd_img_request_cache);
7438 rbd_img_request_cache = NULL;
7439 return -ENOMEM;
7440 }
7441
rbd_slab_exit(void)7442 static void rbd_slab_exit(void)
7443 {
7444 rbd_assert(rbd_obj_request_cache);
7445 kmem_cache_destroy(rbd_obj_request_cache);
7446 rbd_obj_request_cache = NULL;
7447
7448 rbd_assert(rbd_img_request_cache);
7449 kmem_cache_destroy(rbd_img_request_cache);
7450 rbd_img_request_cache = NULL;
7451 }
7452
rbd_init(void)7453 static int __init rbd_init(void)
7454 {
7455 int rc;
7456
7457 if (!libceph_compatible(NULL)) {
7458 rbd_warn(NULL, "libceph incompatibility (quitting)");
7459 return -EINVAL;
7460 }
7461
7462 rc = rbd_slab_init();
7463 if (rc)
7464 return rc;
7465
7466 /*
7467 * The number of active work items is limited by the number of
7468 * rbd devices * queue depth, so leave @max_active at default.
7469 */
7470 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7471 if (!rbd_wq) {
7472 rc = -ENOMEM;
7473 goto err_out_slab;
7474 }
7475
7476 if (single_major) {
7477 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7478 if (rbd_major < 0) {
7479 rc = rbd_major;
7480 goto err_out_wq;
7481 }
7482 }
7483
7484 rc = rbd_sysfs_init();
7485 if (rc)
7486 goto err_out_blkdev;
7487
7488 if (single_major)
7489 pr_info("loaded (major %d)\n", rbd_major);
7490 else
7491 pr_info("loaded\n");
7492
7493 return 0;
7494
7495 err_out_blkdev:
7496 if (single_major)
7497 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7498 err_out_wq:
7499 destroy_workqueue(rbd_wq);
7500 err_out_slab:
7501 rbd_slab_exit();
7502 return rc;
7503 }
7504
rbd_exit(void)7505 static void __exit rbd_exit(void)
7506 {
7507 ida_destroy(&rbd_dev_id_ida);
7508 rbd_sysfs_cleanup();
7509 if (single_major)
7510 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7511 destroy_workqueue(rbd_wq);
7512 rbd_slab_exit();
7513 }
7514
7515 module_init(rbd_init);
7516 module_exit(rbd_exit);
7517
7518 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7519 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7520 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7521 /* following authorship retained from original osdblk.c */
7522 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7523
7524 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7525 MODULE_LICENSE("GPL");
7526