1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 drbd_int.h
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11
12 */
13
14 #ifndef _DRBD_INT_H
15 #define _DRBD_INT_H
16
17 #include <crypto/hash.h>
18 #include <linux/compiler.h>
19 #include <linux/types.h>
20 #include <linux/list.h>
21 #include <linux/sched/signal.h>
22 #include <linux/bitops.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/tcp.h>
26 #include <linux/mutex.h>
27 #include <linux/major.h>
28 #include <linux/blkdev.h>
29 #include <linux/backing-dev.h>
30 #include <linux/genhd.h>
31 #include <linux/idr.h>
32 #include <linux/dynamic_debug.h>
33 #include <net/tcp.h>
34 #include <linux/lru_cache.h>
35 #include <linux/prefetch.h>
36 #include <linux/drbd_genl_api.h>
37 #include <linux/drbd.h>
38 #include "drbd_strings.h"
39 #include "drbd_state.h"
40 #include "drbd_protocol.h"
41
42 #ifdef __CHECKER__
43 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
44 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
45 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
46 #else
47 # define __protected_by(x)
48 # define __protected_read_by(x)
49 # define __protected_write_by(x)
50 #endif
51
52 /* shared module parameters, defined in drbd_main.c */
53 #ifdef CONFIG_DRBD_FAULT_INJECTION
54 extern int drbd_enable_faults;
55 extern int drbd_fault_rate;
56 #endif
57
58 extern unsigned int drbd_minor_count;
59 extern char drbd_usermode_helper[];
60 extern int drbd_proc_details;
61
62
63 /* This is used to stop/restart our threads.
64 * Cannot use SIGTERM nor SIGKILL, since these
65 * are sent out by init on runlevel changes
66 * I choose SIGHUP for now.
67 */
68 #define DRBD_SIGKILL SIGHUP
69
70 #define ID_IN_SYNC (4711ULL)
71 #define ID_OUT_OF_SYNC (4712ULL)
72 #define ID_SYNCER (-1ULL)
73
74 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
75
76 struct drbd_device;
77 struct drbd_connection;
78
79 #define __drbd_printk_device(level, device, fmt, args...) \
80 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
81 #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
82 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
83 #define __drbd_printk_resource(level, resource, fmt, args...) \
84 printk(level "drbd %s: " fmt, (resource)->name, ## args)
85 #define __drbd_printk_connection(level, connection, fmt, args...) \
86 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
87
88 void drbd_printk_with_wrong_object_type(void);
89
90 #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
91 (__builtin_types_compatible_p(typeof(obj), type) || \
92 __builtin_types_compatible_p(typeof(obj), const type)), \
93 func(level, (const type)(obj), fmt, ## args)
94
95 #define drbd_printk(level, obj, fmt, args...) \
96 __builtin_choose_expr( \
97 __drbd_printk_if_same_type(obj, struct drbd_device *, \
98 __drbd_printk_device, level, fmt, ## args), \
99 __builtin_choose_expr( \
100 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
101 __drbd_printk_resource, level, fmt, ## args), \
102 __builtin_choose_expr( \
103 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
104 __drbd_printk_connection, level, fmt, ## args), \
105 __builtin_choose_expr( \
106 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
107 __drbd_printk_peer_device, level, fmt, ## args), \
108 drbd_printk_with_wrong_object_type()))))
109
110 #define drbd_dbg(obj, fmt, args...) \
111 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
112 #define drbd_alert(obj, fmt, args...) \
113 drbd_printk(KERN_ALERT, obj, fmt, ## args)
114 #define drbd_err(obj, fmt, args...) \
115 drbd_printk(KERN_ERR, obj, fmt, ## args)
116 #define drbd_warn(obj, fmt, args...) \
117 drbd_printk(KERN_WARNING, obj, fmt, ## args)
118 #define drbd_info(obj, fmt, args...) \
119 drbd_printk(KERN_INFO, obj, fmt, ## args)
120 #define drbd_emerg(obj, fmt, args...) \
121 drbd_printk(KERN_EMERG, obj, fmt, ## args)
122
123 #define dynamic_drbd_dbg(device, fmt, args...) \
124 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
125
126 #define D_ASSERT(device, exp) do { \
127 if (!(exp)) \
128 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
129 } while (0)
130
131 /**
132 * expect - Make an assertion
133 *
134 * Unlike the assert macro, this macro returns a boolean result.
135 */
136 #define expect(exp) ({ \
137 bool _bool = (exp); \
138 if (!_bool) \
139 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
140 #exp, __func__); \
141 _bool; \
142 })
143
144 /* Defines to control fault insertion */
145 enum {
146 DRBD_FAULT_MD_WR = 0, /* meta data write */
147 DRBD_FAULT_MD_RD = 1, /* read */
148 DRBD_FAULT_RS_WR = 2, /* resync */
149 DRBD_FAULT_RS_RD = 3,
150 DRBD_FAULT_DT_WR = 4, /* data */
151 DRBD_FAULT_DT_RD = 5,
152 DRBD_FAULT_DT_RA = 6, /* data read ahead */
153 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
154 DRBD_FAULT_AL_EE = 8, /* alloc ee */
155 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
156
157 DRBD_FAULT_MAX,
158 };
159
160 extern unsigned int
161 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
162
163 static inline int
drbd_insert_fault(struct drbd_device * device,unsigned int type)164 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
165 #ifdef CONFIG_DRBD_FAULT_INJECTION
166 return drbd_fault_rate &&
167 (drbd_enable_faults & (1<<type)) &&
168 _drbd_insert_fault(device, type);
169 #else
170 return 0;
171 #endif
172 }
173
174 /* integer division, round _UP_ to the next integer */
175 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
176 /* usual integer division */
177 #define div_floor(A, B) ((A)/(B))
178
179 extern struct ratelimit_state drbd_ratelimit_state;
180 extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
181 extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
182
183 extern const char *cmdname(enum drbd_packet cmd);
184
185 /* for sending/receiving the bitmap,
186 * possibly in some encoding scheme */
187 struct bm_xfer_ctx {
188 /* "const"
189 * stores total bits and long words
190 * of the bitmap, so we don't need to
191 * call the accessor functions over and again. */
192 unsigned long bm_bits;
193 unsigned long bm_words;
194 /* during xfer, current position within the bitmap */
195 unsigned long bit_offset;
196 unsigned long word_offset;
197
198 /* statistics; index: (h->command == P_BITMAP) */
199 unsigned packets[2];
200 unsigned bytes[2];
201 };
202
203 extern void INFO_bm_xfer_stats(struct drbd_device *device,
204 const char *direction, struct bm_xfer_ctx *c);
205
bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx * c)206 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
207 {
208 /* word_offset counts "native long words" (32 or 64 bit),
209 * aligned at 64 bit.
210 * Encoded packet may end at an unaligned bit offset.
211 * In case a fallback clear text packet is transmitted in
212 * between, we adjust this offset back to the last 64bit
213 * aligned "native long word", which makes coding and decoding
214 * the plain text bitmap much more convenient. */
215 #if BITS_PER_LONG == 64
216 c->word_offset = c->bit_offset >> 6;
217 #elif BITS_PER_LONG == 32
218 c->word_offset = c->bit_offset >> 5;
219 c->word_offset &= ~(1UL);
220 #else
221 # error "unsupported BITS_PER_LONG"
222 #endif
223 }
224
225 extern unsigned int drbd_header_size(struct drbd_connection *connection);
226
227 /**********************************************************************/
228 enum drbd_thread_state {
229 NONE,
230 RUNNING,
231 EXITING,
232 RESTARTING
233 };
234
235 struct drbd_thread {
236 spinlock_t t_lock;
237 struct task_struct *task;
238 struct completion stop;
239 enum drbd_thread_state t_state;
240 int (*function) (struct drbd_thread *);
241 struct drbd_resource *resource;
242 struct drbd_connection *connection;
243 int reset_cpu_mask;
244 const char *name;
245 };
246
get_t_state(struct drbd_thread * thi)247 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
248 {
249 /* THINK testing the t_state seems to be uncritical in all cases
250 * (but thread_{start,stop}), so we can read it *without* the lock.
251 * --lge */
252
253 smp_rmb();
254 return thi->t_state;
255 }
256
257 struct drbd_work {
258 struct list_head list;
259 int (*cb)(struct drbd_work *, int cancel);
260 };
261
262 struct drbd_device_work {
263 struct drbd_work w;
264 struct drbd_device *device;
265 };
266
267 #include "drbd_interval.h"
268
269 extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
270
271 extern void lock_all_resources(void);
272 extern void unlock_all_resources(void);
273
274 struct drbd_request {
275 struct drbd_work w;
276 struct drbd_device *device;
277
278 /* if local IO is not allowed, will be NULL.
279 * if local IO _is_ allowed, holds the locally submitted bio clone,
280 * or, after local IO completion, the ERR_PTR(error).
281 * see drbd_request_endio(). */
282 struct bio *private_bio;
283
284 struct drbd_interval i;
285
286 /* epoch: used to check on "completion" whether this req was in
287 * the current epoch, and we therefore have to close it,
288 * causing a p_barrier packet to be send, starting a new epoch.
289 *
290 * This corresponds to "barrier" in struct p_barrier[_ack],
291 * and to "barrier_nr" in struct drbd_epoch (and various
292 * comments/function parameters/local variable names).
293 */
294 unsigned int epoch;
295
296 struct list_head tl_requests; /* ring list in the transfer log */
297 struct bio *master_bio; /* master bio pointer */
298
299 /* see struct drbd_device */
300 struct list_head req_pending_master_completion;
301 struct list_head req_pending_local;
302
303 /* for generic IO accounting */
304 unsigned long start_jif;
305
306 /* for DRBD internal statistics */
307
308 /* Minimal set of time stamps to determine if we wait for activity log
309 * transactions, local disk or peer. 32 bit "jiffies" are good enough,
310 * we don't expect a DRBD request to be stalled for several month.
311 */
312
313 /* before actual request processing */
314 unsigned long in_actlog_jif;
315
316 /* local disk */
317 unsigned long pre_submit_jif;
318
319 /* per connection */
320 unsigned long pre_send_jif;
321 unsigned long acked_jif;
322 unsigned long net_done_jif;
323
324 /* Possibly even more detail to track each phase:
325 * master_completion_jif
326 * how long did it take to complete the master bio
327 * (application visible latency)
328 * allocated_jif
329 * how long the master bio was blocked until we finally allocated
330 * a tracking struct
331 * in_actlog_jif
332 * how long did we wait for activity log transactions
333 *
334 * net_queued_jif
335 * when did we finally queue it for sending
336 * pre_send_jif
337 * when did we start sending it
338 * post_send_jif
339 * how long did we block in the network stack trying to send it
340 * acked_jif
341 * when did we receive (or fake, in protocol A) a remote ACK
342 * net_done_jif
343 * when did we receive final acknowledgement (P_BARRIER_ACK),
344 * or decide, e.g. on connection loss, that we do no longer expect
345 * anything from this peer for this request.
346 *
347 * pre_submit_jif
348 * post_sub_jif
349 * when did we start submiting to the lower level device,
350 * and how long did we block in that submit function
351 * local_completion_jif
352 * how long did it take the lower level device to complete this request
353 */
354
355
356 /* once it hits 0, we may complete the master_bio */
357 atomic_t completion_ref;
358 /* once it hits 0, we may destroy this drbd_request object */
359 struct kref kref;
360
361 unsigned rq_state; /* see comments above _req_mod() */
362 };
363
364 struct drbd_epoch {
365 struct drbd_connection *connection;
366 struct list_head list;
367 unsigned int barrier_nr;
368 atomic_t epoch_size; /* increased on every request added. */
369 atomic_t active; /* increased on every req. added, and dec on every finished. */
370 unsigned long flags;
371 };
372
373 /* Prototype declaration of function defined in drbd_receiver.c */
374 int drbdd_init(struct drbd_thread *);
375 int drbd_asender(struct drbd_thread *);
376
377 /* drbd_epoch flag bits */
378 enum {
379 DE_HAVE_BARRIER_NUMBER,
380 };
381
382 enum epoch_event {
383 EV_PUT,
384 EV_GOT_BARRIER_NR,
385 EV_BECAME_LAST,
386 EV_CLEANUP = 32, /* used as flag */
387 };
388
389 struct digest_info {
390 int digest_size;
391 void *digest;
392 };
393
394 struct drbd_peer_request {
395 struct drbd_work w;
396 struct drbd_peer_device *peer_device;
397 struct drbd_epoch *epoch; /* for writes */
398 struct page *pages;
399 atomic_t pending_bios;
400 struct drbd_interval i;
401 /* see comments on ee flag bits below */
402 unsigned long flags;
403 unsigned long submit_jif;
404 union {
405 u64 block_id;
406 struct digest_info *digest;
407 };
408 };
409
410 /* ee flag bits.
411 * While corresponding bios are in flight, the only modification will be
412 * set_bit WAS_ERROR, which has to be atomic.
413 * If no bios are in flight yet, or all have been completed,
414 * non-atomic modification to ee->flags is ok.
415 */
416 enum {
417 __EE_CALL_AL_COMPLETE_IO,
418 __EE_MAY_SET_IN_SYNC,
419
420 /* is this a TRIM aka REQ_OP_DISCARD? */
421 __EE_TRIM,
422 /* explicit zero-out requested, or
423 * our lower level cannot handle trim,
424 * and we want to fall back to zeroout instead */
425 __EE_ZEROOUT,
426
427 /* In case a barrier failed,
428 * we need to resubmit without the barrier flag. */
429 __EE_RESUBMITTED,
430
431 /* we may have several bios per peer request.
432 * if any of those fail, we set this flag atomically
433 * from the endio callback */
434 __EE_WAS_ERROR,
435
436 /* This ee has a pointer to a digest instead of a block id */
437 __EE_HAS_DIGEST,
438
439 /* Conflicting local requests need to be restarted after this request */
440 __EE_RESTART_REQUESTS,
441
442 /* The peer wants a write ACK for this (wire proto C) */
443 __EE_SEND_WRITE_ACK,
444
445 /* Is set when net_conf had two_primaries set while creating this peer_req */
446 __EE_IN_INTERVAL_TREE,
447
448 /* for debugfs: */
449 /* has this been submitted, or does it still wait for something else? */
450 __EE_SUBMITTED,
451
452 /* this is/was a write request */
453 __EE_WRITE,
454
455 /* this is/was a write same request */
456 __EE_WRITE_SAME,
457
458 /* this originates from application on peer
459 * (not some resync or verify or other DRBD internal request) */
460 __EE_APPLICATION,
461
462 /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
463 __EE_RS_THIN_REQ,
464 };
465 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
466 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
467 #define EE_TRIM (1<<__EE_TRIM)
468 #define EE_ZEROOUT (1<<__EE_ZEROOUT)
469 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
470 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
471 #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
472 #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
473 #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
474 #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
475 #define EE_SUBMITTED (1<<__EE_SUBMITTED)
476 #define EE_WRITE (1<<__EE_WRITE)
477 #define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
478 #define EE_APPLICATION (1<<__EE_APPLICATION)
479 #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
480
481 /* flag bits per device */
482 enum {
483 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
484 MD_DIRTY, /* current uuids and flags not yet on disk */
485 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
486 CL_ST_CHG_SUCCESS,
487 CL_ST_CHG_FAIL,
488 CRASHED_PRIMARY, /* This node was a crashed primary.
489 * Gets cleared when the state.conn
490 * goes into C_CONNECTED state. */
491 CONSIDER_RESYNC,
492
493 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
494
495 BITMAP_IO, /* suspend application io;
496 once no more io in flight, start bitmap io */
497 BITMAP_IO_QUEUED, /* Started bitmap IO */
498 WAS_IO_ERROR, /* Local disk failed, returned IO error */
499 WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
500 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
501 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
502 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
503 * the peer, if it changed there as well. */
504 NEW_CUR_UUID, /* Create new current UUID when thawing IO */
505 AL_SUSPENDED, /* Activity logging is currently suspended. */
506 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
507 B_RS_H_DONE, /* Before resync handler done (already executed) */
508 DISCARD_MY_DATA, /* discard_my_data flag per volume */
509 READ_BALANCE_RR,
510
511 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush
512 * from drbd_flush_after_epoch() */
513
514 /* cleared only after backing device related structures have been destroyed. */
515 GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */
516
517 /* to be used in drbd_device_post_work() */
518 GO_DISKLESS, /* tell worker to schedule cleanup before detach */
519 DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */
520 MD_SYNC, /* tell worker to call drbd_md_sync() */
521 RS_START, /* tell worker to start resync/OV */
522 RS_PROGRESS, /* tell worker that resync made significant progress */
523 RS_DONE, /* tell worker that resync is done */
524 };
525
526 struct drbd_bitmap; /* opaque for drbd_device */
527
528 /* definition of bits in bm_flags to be used in drbd_bm_lock
529 * and drbd_bitmap_io and friends. */
530 enum bm_flag {
531 /* currently locked for bulk operation */
532 BM_LOCKED_MASK = 0xf,
533
534 /* in detail, that is: */
535 BM_DONT_CLEAR = 0x1,
536 BM_DONT_SET = 0x2,
537 BM_DONT_TEST = 0x4,
538
539 /* so we can mark it locked for bulk operation,
540 * and still allow all non-bulk operations */
541 BM_IS_LOCKED = 0x8,
542
543 /* (test bit, count bit) allowed (common case) */
544 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
545
546 /* testing bits, as well as setting new bits allowed, but clearing bits
547 * would be unexpected. Used during bitmap receive. Setting new bits
548 * requires sending of "out-of-sync" information, though. */
549 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
550
551 /* for drbd_bm_write_copy_pages, everything is allowed,
552 * only concurrent bulk operations are locked out. */
553 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
554 };
555
556 struct drbd_work_queue {
557 struct list_head q;
558 spinlock_t q_lock; /* to protect the list. */
559 wait_queue_head_t q_wait;
560 };
561
562 struct drbd_socket {
563 struct mutex mutex;
564 struct socket *socket;
565 /* this way we get our
566 * send/receive buffers off the stack */
567 void *sbuf;
568 void *rbuf;
569 };
570
571 struct drbd_md {
572 u64 md_offset; /* sector offset to 'super' block */
573
574 u64 la_size_sect; /* last agreed size, unit sectors */
575 spinlock_t uuid_lock;
576 u64 uuid[UI_SIZE];
577 u64 device_uuid;
578 u32 flags;
579 u32 md_size_sect;
580
581 s32 al_offset; /* signed relative sector offset to activity log */
582 s32 bm_offset; /* signed relative sector offset to bitmap */
583
584 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
585 s32 meta_dev_idx;
586
587 /* see al_tr_number_to_on_disk_sector() */
588 u32 al_stripes;
589 u32 al_stripe_size_4k;
590 u32 al_size_4k; /* cached product of the above */
591 };
592
593 struct drbd_backing_dev {
594 struct block_device *backing_bdev;
595 struct block_device *md_bdev;
596 struct drbd_md md;
597 struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
598 sector_t known_size; /* last known size of that backing device */
599 };
600
601 struct drbd_md_io {
602 struct page *page;
603 unsigned long start_jif; /* last call to drbd_md_get_buffer */
604 unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */
605 const char *current_use;
606 atomic_t in_use;
607 unsigned int done;
608 int error;
609 };
610
611 struct bm_io_work {
612 struct drbd_work w;
613 char *why;
614 enum bm_flag flags;
615 int (*io_fn)(struct drbd_device *device);
616 void (*done)(struct drbd_device *device, int rv);
617 };
618
619 struct fifo_buffer {
620 unsigned int head_index;
621 unsigned int size;
622 int total; /* sum of all values */
623 int values[];
624 };
625 extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
626
627 /* flag bits per connection */
628 enum {
629 NET_CONGESTED, /* The data socket is congested */
630 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
631 SEND_PING,
632 GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
633 CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
634 CONN_WD_ST_CHG_OKAY,
635 CONN_WD_ST_CHG_FAIL,
636 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
637 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
638 STATE_SENT, /* Do not change state/UUIDs while this is set */
639 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
640 * pending, from drbd worker context.
641 * If set, bdi_write_congested() returns true,
642 * so shrink_page_list() would not recurse into,
643 * and potentially deadlock on, this drbd worker.
644 */
645 DISCONNECT_SENT,
646
647 DEVICE_WORK_PENDING, /* tell worker that some device has pending work */
648 };
649
650 enum which_state { NOW, OLD = NOW, NEW };
651
652 struct drbd_resource {
653 char *name;
654 #ifdef CONFIG_DEBUG_FS
655 struct dentry *debugfs_res;
656 struct dentry *debugfs_res_volumes;
657 struct dentry *debugfs_res_connections;
658 struct dentry *debugfs_res_in_flight_summary;
659 #endif
660 struct kref kref;
661 struct idr devices; /* volume number to device mapping */
662 struct list_head connections;
663 struct list_head resources;
664 struct res_opts res_opts;
665 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
666 struct mutex adm_mutex; /* mutex to serialize administrative requests */
667 spinlock_t req_lock;
668
669 unsigned susp:1; /* IO suspended by user */
670 unsigned susp_nod:1; /* IO suspended because no data */
671 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
672
673 enum write_ordering_e write_ordering;
674
675 cpumask_var_t cpu_mask;
676 };
677
678 struct drbd_thread_timing_details
679 {
680 unsigned long start_jif;
681 void *cb_addr;
682 const char *caller_fn;
683 unsigned int line;
684 unsigned int cb_nr;
685 };
686
687 struct drbd_connection {
688 struct list_head connections;
689 struct drbd_resource *resource;
690 #ifdef CONFIG_DEBUG_FS
691 struct dentry *debugfs_conn;
692 struct dentry *debugfs_conn_callback_history;
693 struct dentry *debugfs_conn_oldest_requests;
694 #endif
695 struct kref kref;
696 struct idr peer_devices; /* volume number to peer device mapping */
697 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
698 struct mutex cstate_mutex; /* Protects graceful disconnects */
699 unsigned int connect_cnt; /* Inc each time a connection is established */
700
701 unsigned long flags;
702 struct net_conf *net_conf; /* content protected by rcu */
703 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
704
705 struct sockaddr_storage my_addr;
706 int my_addr_len;
707 struct sockaddr_storage peer_addr;
708 int peer_addr_len;
709
710 struct drbd_socket data; /* data/barrier/cstate/parameter packets */
711 struct drbd_socket meta; /* ping/ack (metadata) packets */
712 int agreed_pro_version; /* actually used protocol version */
713 u32 agreed_features;
714 unsigned long last_received; /* in jiffies, either socket */
715 unsigned int ko_count;
716
717 struct list_head transfer_log; /* all requests not yet fully processed */
718
719 struct crypto_shash *cram_hmac_tfm;
720 struct crypto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
721 struct crypto_shash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
722 struct crypto_shash *csums_tfm;
723 struct crypto_shash *verify_tfm;
724 void *int_dig_in;
725 void *int_dig_vv;
726
727 /* receiver side */
728 struct drbd_epoch *current_epoch;
729 spinlock_t epoch_lock;
730 unsigned int epochs;
731 atomic_t current_tle_nr; /* transfer log epoch number */
732 unsigned current_tle_writes; /* writes seen within this tl epoch */
733
734 unsigned long last_reconnect_jif;
735 /* empty member on older kernels without blk_start_plug() */
736 struct blk_plug receiver_plug;
737 struct drbd_thread receiver;
738 struct drbd_thread worker;
739 struct drbd_thread ack_receiver;
740 struct workqueue_struct *ack_sender;
741
742 /* cached pointers,
743 * so we can look up the oldest pending requests more quickly.
744 * protected by resource->req_lock */
745 struct drbd_request *req_next; /* DRBD 9: todo.req_next */
746 struct drbd_request *req_ack_pending;
747 struct drbd_request *req_not_net_done;
748
749 /* sender side */
750 struct drbd_work_queue sender_work;
751
752 #define DRBD_THREAD_DETAILS_HIST 16
753 unsigned int w_cb_nr; /* keeps counting up */
754 unsigned int r_cb_nr; /* keeps counting up */
755 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
756 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
757
758 struct {
759 unsigned long last_sent_barrier_jif;
760
761 /* whether this sender thread
762 * has processed a single write yet. */
763 bool seen_any_write_yet;
764
765 /* Which barrier number to send with the next P_BARRIER */
766 int current_epoch_nr;
767
768 /* how many write requests have been sent
769 * with req->epoch == current_epoch_nr.
770 * If none, no P_BARRIER will be sent. */
771 unsigned current_epoch_writes;
772 } send;
773 };
774
has_net_conf(struct drbd_connection * connection)775 static inline bool has_net_conf(struct drbd_connection *connection)
776 {
777 bool has_net_conf;
778
779 rcu_read_lock();
780 has_net_conf = rcu_dereference(connection->net_conf);
781 rcu_read_unlock();
782
783 return has_net_conf;
784 }
785
786 void __update_timing_details(
787 struct drbd_thread_timing_details *tdp,
788 unsigned int *cb_nr,
789 void *cb,
790 const char *fn, const unsigned int line);
791
792 #define update_worker_timing_details(c, cb) \
793 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
794 #define update_receiver_timing_details(c, cb) \
795 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
796
797 struct submit_worker {
798 struct workqueue_struct *wq;
799 struct work_struct worker;
800
801 /* protected by ..->resource->req_lock */
802 struct list_head writes;
803 };
804
805 struct drbd_peer_device {
806 struct list_head peer_devices;
807 struct drbd_device *device;
808 struct drbd_connection *connection;
809 struct work_struct send_acks_work;
810 #ifdef CONFIG_DEBUG_FS
811 struct dentry *debugfs_peer_dev;
812 #endif
813 };
814
815 struct drbd_device {
816 struct drbd_resource *resource;
817 struct list_head peer_devices;
818 struct list_head pending_bitmap_io;
819
820 unsigned long flush_jif;
821 #ifdef CONFIG_DEBUG_FS
822 struct dentry *debugfs_minor;
823 struct dentry *debugfs_vol;
824 struct dentry *debugfs_vol_oldest_requests;
825 struct dentry *debugfs_vol_act_log_extents;
826 struct dentry *debugfs_vol_resync_extents;
827 struct dentry *debugfs_vol_data_gen_id;
828 struct dentry *debugfs_vol_ed_gen_id;
829 #endif
830
831 unsigned int vnr; /* volume number within the connection */
832 unsigned int minor; /* device minor number */
833
834 struct kref kref;
835
836 /* things that are stored as / read from meta data on disk */
837 unsigned long flags;
838
839 /* configured by drbdsetup */
840 struct drbd_backing_dev *ldev __protected_by(local);
841
842 sector_t p_size; /* partner's disk size */
843 struct request_queue *rq_queue;
844 struct gendisk *vdisk;
845
846 unsigned long last_reattach_jif;
847 struct drbd_work resync_work;
848 struct drbd_work unplug_work;
849 struct timer_list resync_timer;
850 struct timer_list md_sync_timer;
851 struct timer_list start_resync_timer;
852 struct timer_list request_timer;
853
854 /* Used after attach while negotiating new disk state. */
855 union drbd_state new_state_tmp;
856
857 union drbd_dev_state state;
858 wait_queue_head_t misc_wait;
859 wait_queue_head_t state_wait; /* upon each state change. */
860 unsigned int send_cnt;
861 unsigned int recv_cnt;
862 unsigned int read_cnt;
863 unsigned int writ_cnt;
864 unsigned int al_writ_cnt;
865 unsigned int bm_writ_cnt;
866 atomic_t ap_bio_cnt; /* Requests we need to complete */
867 atomic_t ap_actlog_cnt; /* Requests waiting for activity log */
868 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
869 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
870 atomic_t unacked_cnt; /* Need to send replies for */
871 atomic_t local_cnt; /* Waiting for local completion */
872 atomic_t suspend_cnt;
873
874 /* Interval tree of pending local requests */
875 struct rb_root read_requests;
876 struct rb_root write_requests;
877
878 /* for statistics and timeouts */
879 /* [0] read, [1] write */
880 struct list_head pending_master_completion[2];
881 struct list_head pending_completion[2];
882
883 /* use checksums for *this* resync */
884 bool use_csums;
885 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
886 unsigned long rs_total;
887 /* number of resync blocks that failed in this run */
888 unsigned long rs_failed;
889 /* Syncer's start time [unit jiffies] */
890 unsigned long rs_start;
891 /* cumulated time in PausedSyncX state [unit jiffies] */
892 unsigned long rs_paused;
893 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
894 unsigned long rs_same_csum;
895 #define DRBD_SYNC_MARKS 8
896 #define DRBD_SYNC_MARK_STEP (3*HZ)
897 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
898 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
899 /* marks's time [unit jiffies] */
900 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
901 /* current index into rs_mark_{left,time} */
902 int rs_last_mark;
903 unsigned long rs_last_bcast; /* [unit jiffies] */
904
905 /* where does the admin want us to start? (sector) */
906 sector_t ov_start_sector;
907 sector_t ov_stop_sector;
908 /* where are we now? (sector) */
909 sector_t ov_position;
910 /* Start sector of out of sync range (to merge printk reporting). */
911 sector_t ov_last_oos_start;
912 /* size of out-of-sync range in sectors. */
913 sector_t ov_last_oos_size;
914 unsigned long ov_left; /* in bits */
915
916 struct drbd_bitmap *bitmap;
917 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
918
919 /* Used to track operations of resync... */
920 struct lru_cache *resync;
921 /* Number of locked elements in resync LRU */
922 unsigned int resync_locked;
923 /* resync extent number waiting for application requests */
924 unsigned int resync_wenr;
925
926 int open_cnt;
927 u64 *p_uuid;
928
929 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
930 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
931 struct list_head done_ee; /* need to send P_WRITE_ACK */
932 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
933 struct list_head net_ee; /* zero-copy network send in progress */
934
935 int next_barrier_nr;
936 struct list_head resync_reads;
937 atomic_t pp_in_use; /* allocated from page pool */
938 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
939 wait_queue_head_t ee_wait;
940 struct drbd_md_io md_io;
941 spinlock_t al_lock;
942 wait_queue_head_t al_wait;
943 struct lru_cache *act_log; /* activity log */
944 unsigned int al_tr_number;
945 int al_tr_cycle;
946 wait_queue_head_t seq_wait;
947 atomic_t packet_seq;
948 unsigned int peer_seq;
949 spinlock_t peer_seq_lock;
950 unsigned long comm_bm_set; /* communicated number of set bits. */
951 struct bm_io_work bm_io_work;
952 u64 ed_uuid; /* UUID of the exposed data */
953 struct mutex own_state_mutex;
954 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
955 char congestion_reason; /* Why we where congested... */
956 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
957 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
958 int rs_last_sect_ev; /* counter to compare with */
959 int rs_last_events; /* counter of read or write "events" (unit sectors)
960 * on the lower level device when we last looked. */
961 int c_sync_rate; /* current resync rate after syncer throttle magic */
962 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
963 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
964 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
965 unsigned int peer_max_bio_size;
966 unsigned int local_max_bio_size;
967
968 /* any requests that would block in drbd_make_request()
969 * are deferred to this single-threaded work queue */
970 struct submit_worker submit;
971 };
972
973 struct drbd_bm_aio_ctx {
974 struct drbd_device *device;
975 struct list_head list; /* on device->pending_bitmap_io */;
976 unsigned long start_jif;
977 atomic_t in_flight;
978 unsigned int done;
979 unsigned flags;
980 #define BM_AIO_COPY_PAGES 1
981 #define BM_AIO_WRITE_HINTED 2
982 #define BM_AIO_WRITE_ALL_PAGES 4
983 #define BM_AIO_READ 8
984 int error;
985 struct kref kref;
986 };
987
988 struct drbd_config_context {
989 /* assigned from drbd_genlmsghdr */
990 unsigned int minor;
991 /* assigned from request attributes, if present */
992 unsigned int volume;
993 #define VOLUME_UNSPECIFIED (-1U)
994 /* pointer into the request skb,
995 * limited lifetime! */
996 char *resource_name;
997 struct nlattr *my_addr;
998 struct nlattr *peer_addr;
999
1000 /* reply buffer */
1001 struct sk_buff *reply_skb;
1002 /* pointer into reply buffer */
1003 struct drbd_genlmsghdr *reply_dh;
1004 /* resolved from attributes, if possible */
1005 struct drbd_device *device;
1006 struct drbd_resource *resource;
1007 struct drbd_connection *connection;
1008 };
1009
minor_to_device(unsigned int minor)1010 static inline struct drbd_device *minor_to_device(unsigned int minor)
1011 {
1012 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1013 }
1014
first_peer_device(struct drbd_device * device)1015 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1016 {
1017 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1018 }
1019
1020 static inline struct drbd_peer_device *
conn_peer_device(struct drbd_connection * connection,int volume_number)1021 conn_peer_device(struct drbd_connection *connection, int volume_number)
1022 {
1023 return idr_find(&connection->peer_devices, volume_number);
1024 }
1025
1026 #define for_each_resource(resource, _resources) \
1027 list_for_each_entry(resource, _resources, resources)
1028
1029 #define for_each_resource_rcu(resource, _resources) \
1030 list_for_each_entry_rcu(resource, _resources, resources)
1031
1032 #define for_each_resource_safe(resource, tmp, _resources) \
1033 list_for_each_entry_safe(resource, tmp, _resources, resources)
1034
1035 #define for_each_connection(connection, resource) \
1036 list_for_each_entry(connection, &resource->connections, connections)
1037
1038 #define for_each_connection_rcu(connection, resource) \
1039 list_for_each_entry_rcu(connection, &resource->connections, connections)
1040
1041 #define for_each_connection_safe(connection, tmp, resource) \
1042 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1043
1044 #define for_each_peer_device(peer_device, device) \
1045 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1046
1047 #define for_each_peer_device_rcu(peer_device, device) \
1048 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1049
1050 #define for_each_peer_device_safe(peer_device, tmp, device) \
1051 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1052
device_to_minor(struct drbd_device * device)1053 static inline unsigned int device_to_minor(struct drbd_device *device)
1054 {
1055 return device->minor;
1056 }
1057
1058 /*
1059 * function declarations
1060 *************************/
1061
1062 /* drbd_main.c */
1063
1064 enum dds_flags {
1065 DDSF_FORCED = 1,
1066 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1067 };
1068
1069 extern void drbd_init_set_defaults(struct drbd_device *device);
1070 extern int drbd_thread_start(struct drbd_thread *thi);
1071 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1072 #ifdef CONFIG_SMP
1073 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1074 #else
1075 #define drbd_thread_current_set_cpu(A) ({})
1076 #endif
1077 extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1078 unsigned int set_size);
1079 extern void tl_clear(struct drbd_connection *);
1080 extern void drbd_free_sock(struct drbd_connection *connection);
1081 extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1082 void *buf, size_t size, unsigned msg_flags);
1083 extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1084 unsigned);
1085
1086 extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1087 extern int drbd_send_protocol(struct drbd_connection *connection);
1088 extern int drbd_send_uuids(struct drbd_peer_device *);
1089 extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1090 extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1091 extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1092 extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1093 extern int drbd_send_current_state(struct drbd_peer_device *);
1094 extern int drbd_send_sync_param(struct drbd_peer_device *);
1095 extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1096 u32 set_size);
1097 extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1098 struct drbd_peer_request *);
1099 extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1100 struct p_block_req *rp);
1101 extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1102 struct p_data *dp, int data_size);
1103 extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1104 sector_t sector, int blksize, u64 block_id);
1105 extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1106 extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1107 struct drbd_peer_request *);
1108 extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1109 extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1110 sector_t sector, int size, u64 block_id);
1111 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1112 int size, void *digest, int digest_size,
1113 enum drbd_packet cmd);
1114 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1115
1116 extern int drbd_send_bitmap(struct drbd_device *device);
1117 extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1118 extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1119 extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1120 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1121 extern void drbd_device_cleanup(struct drbd_device *device);
1122 extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1123 extern void drbd_queue_unplug(struct drbd_device *device);
1124
1125 extern void conn_md_sync(struct drbd_connection *connection);
1126 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1127 extern void drbd_md_sync(struct drbd_device *device);
1128 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1129 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1130 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1132 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1133 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1134 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1135 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1136 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1137 extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1138 extern void drbd_md_mark_dirty(struct drbd_device *device);
1139 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1140 int (*io_fn)(struct drbd_device *),
1141 void (*done)(struct drbd_device *, int),
1142 char *why, enum bm_flag flags);
1143 extern int drbd_bitmap_io(struct drbd_device *device,
1144 int (*io_fn)(struct drbd_device *),
1145 char *why, enum bm_flag flags);
1146 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1147 int (*io_fn)(struct drbd_device *),
1148 char *why, enum bm_flag flags);
1149 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1150 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1151
1152 /* Meta data layout
1153 *
1154 * We currently have two possible layouts.
1155 * Offsets in (512 byte) sectors.
1156 * external:
1157 * |----------- md_size_sect ------------------|
1158 * [ 4k superblock ][ activity log ][ Bitmap ]
1159 * | al_offset == 8 |
1160 * | bm_offset = al_offset + X |
1161 * ==> bitmap sectors = md_size_sect - bm_offset
1162 *
1163 * Variants:
1164 * old, indexed fixed size meta data:
1165 *
1166 * internal:
1167 * |----------- md_size_sect ------------------|
1168 * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*]
1169 * | al_offset < 0 |
1170 * | bm_offset = al_offset - Y |
1171 * ==> bitmap sectors = Y = al_offset - bm_offset
1172 *
1173 * [padding*] are zero or up to 7 unused 512 Byte sectors to the
1174 * end of the device, so that the [4k superblock] will be 4k aligned.
1175 *
1176 * The activity log consists of 4k transaction blocks,
1177 * which are written in a ring-buffer, or striped ring-buffer like fashion,
1178 * which are writtensize used to be fixed 32kB,
1179 * but is about to become configurable.
1180 */
1181
1182 /* Our old fixed size meta data layout
1183 * allows up to about 3.8TB, so if you want more,
1184 * you need to use the "flexible" meta data format. */
1185 #define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */
1186 #define MD_4kB_SECT 8
1187 #define MD_32kB_SECT 64
1188
1189 /* One activity log extent represents 4M of storage */
1190 #define AL_EXTENT_SHIFT 22
1191 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1192
1193 /* We could make these currently hardcoded constants configurable
1194 * variables at create-md time (or even re-configurable at runtime?).
1195 * Which will require some more changes to the DRBD "super block"
1196 * and attach code.
1197 *
1198 * updates per transaction:
1199 * This many changes to the active set can be logged with one transaction.
1200 * This number is arbitrary.
1201 * context per transaction:
1202 * This many context extent numbers are logged with each transaction.
1203 * This number is resulting from the transaction block size (4k), the layout
1204 * of the transaction header, and the number of updates per transaction.
1205 * See drbd_actlog.c:struct al_transaction_on_disk
1206 * */
1207 #define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
1208 #define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
1209
1210 #if BITS_PER_LONG == 32
1211 #define LN2_BPL 5
1212 #define cpu_to_lel(A) cpu_to_le32(A)
1213 #define lel_to_cpu(A) le32_to_cpu(A)
1214 #elif BITS_PER_LONG == 64
1215 #define LN2_BPL 6
1216 #define cpu_to_lel(A) cpu_to_le64(A)
1217 #define lel_to_cpu(A) le64_to_cpu(A)
1218 #else
1219 #error "LN2 of BITS_PER_LONG unknown!"
1220 #endif
1221
1222 /* resync bitmap */
1223 /* 16MB sized 'bitmap extent' to track syncer usage */
1224 struct bm_extent {
1225 int rs_left; /* number of bits set (out of sync) in this extent. */
1226 int rs_failed; /* number of failed resync requests in this extent. */
1227 unsigned long flags;
1228 struct lc_element lce;
1229 };
1230
1231 #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */
1232 #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */
1233 #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */
1234
1235 /* drbd_bitmap.c */
1236 /*
1237 * We need to store one bit for a block.
1238 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1239 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1240 * Bit 1 ==> local node thinks this block needs to be synced.
1241 */
1242
1243 #define SLEEP_TIME (HZ/10)
1244
1245 /* We do bitmap IO in units of 4k blocks.
1246 * We also still have a hardcoded 4k per bit relation. */
1247 #define BM_BLOCK_SHIFT 12 /* 4k per bit */
1248 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1249 /* mostly arbitrarily set the represented size of one bitmap extent,
1250 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1251 * at 4k per bit resolution) */
1252 #define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
1253 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1254
1255 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1256 #error "HAVE YOU FIXED drbdmeta AS WELL??"
1257 #endif
1258
1259 /* thus many _storage_ sectors are described by one bit */
1260 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1261 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1262 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1263
1264 /* bit to represented kilo byte conversion */
1265 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1266
1267 /* in which _bitmap_ extent (resp. sector) the bit for a certain
1268 * _storage_ sector is located in */
1269 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1270 #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1271
1272 /* first storage sector a bitmap extent corresponds to */
1273 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1274 /* how much _storage_ sectors we have per bitmap extent */
1275 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1276 /* how many bits are covered by one bitmap extent (resync extent) */
1277 #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1278
1279 #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1280
1281
1282 /* in one sector of the bitmap, we have this many activity_log extents. */
1283 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1284
1285 /* the extent in "PER_EXTENT" below is an activity log extent
1286 * we need that many (long words/bytes) to store the bitmap
1287 * of one AL_EXTENT_SIZE chunk of storage.
1288 * we can store the bitmap for that many AL_EXTENTS within
1289 * one sector of the _on_disk_ bitmap:
1290 * bit 0 bit 37 bit 38 bit (512*8)-1
1291 * ...|........|........|.. // ..|........|
1292 * sect. 0 `296 `304 ^(512*8*8)-1
1293 *
1294 #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1295 #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128
1296 #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4
1297 */
1298
1299 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1300 /* we have a certain meta data variant that has a fixed on-disk size of 128
1301 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1302 * log, leaving this many sectors for the bitmap.
1303 */
1304
1305 #define DRBD_MAX_SECTORS_FIXED_BM \
1306 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1307 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1308 /* 16 TB in units of sectors */
1309 #if BITS_PER_LONG == 32
1310 /* adjust by one page worth of bitmap,
1311 * so we won't wrap around in drbd_bm_find_next_bit.
1312 * you should use 64bit OS for that much storage, anyways. */
1313 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1314 #else
1315 /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1316 #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1317 /* corresponds to (1UL << 38) bits right now. */
1318 #endif
1319
1320 /* Estimate max bio size as 256 * PAGE_SIZE,
1321 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1322 * Since we may live in a mixed-platform cluster,
1323 * we limit us to a platform agnostic constant here for now.
1324 * A followup commit may allow even bigger BIO sizes,
1325 * once we thought that through. */
1326 #define DRBD_MAX_BIO_SIZE (1U << 20)
1327 #if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1328 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1329 #endif
1330 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
1331
1332 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1333 #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1334
1335 /* For now, don't allow more than half of what we can "activate" in one
1336 * activity log transaction to be discarded in one go. We may need to rework
1337 * drbd_al_begin_io() to allow for even larger discard ranges */
1338 #define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1339 #define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1340
1341 extern int drbd_bm_init(struct drbd_device *device);
1342 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1343 extern void drbd_bm_cleanup(struct drbd_device *device);
1344 extern void drbd_bm_set_all(struct drbd_device *device);
1345 extern void drbd_bm_clear_all(struct drbd_device *device);
1346 /* set/clear/test only a few bits at a time */
1347 extern int drbd_bm_set_bits(
1348 struct drbd_device *device, unsigned long s, unsigned long e);
1349 extern int drbd_bm_clear_bits(
1350 struct drbd_device *device, unsigned long s, unsigned long e);
1351 extern int drbd_bm_count_bits(
1352 struct drbd_device *device, const unsigned long s, const unsigned long e);
1353 /* bm_set_bits variant for use while holding drbd_bm_lock,
1354 * may process the whole bitmap in one go */
1355 extern void _drbd_bm_set_bits(struct drbd_device *device,
1356 const unsigned long s, const unsigned long e);
1357 extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1358 extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1359 extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1360 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1361 extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1362 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1363 extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1364 extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1365 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1366 extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1367 extern size_t drbd_bm_words(struct drbd_device *device);
1368 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1369 extern sector_t drbd_bm_capacity(struct drbd_device *device);
1370
1371 #define DRBD_END_OF_BITMAP (~(unsigned long)0)
1372 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1373 /* bm_find_next variants for use while you hold drbd_bm_lock() */
1374 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1375 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1376 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1377 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1378 /* for receive_bitmap */
1379 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1380 size_t number, unsigned long *buffer);
1381 /* for _drbd_send_bitmap */
1382 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1383 size_t number, unsigned long *buffer);
1384
1385 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1386 extern void drbd_bm_unlock(struct drbd_device *device);
1387 /* drbd_main.c */
1388
1389 extern struct kmem_cache *drbd_request_cache;
1390 extern struct kmem_cache *drbd_ee_cache; /* peer requests */
1391 extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
1392 extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
1393 extern mempool_t drbd_request_mempool;
1394 extern mempool_t drbd_ee_mempool;
1395
1396 /* drbd's page pool, used to buffer data received from the peer,
1397 * or data requested by the peer.
1398 *
1399 * This does not have an emergency reserve.
1400 *
1401 * When allocating from this pool, it first takes pages from the pool.
1402 * Only if the pool is depleted will try to allocate from the system.
1403 *
1404 * The assumption is that pages taken from this pool will be processed,
1405 * and given back, "quickly", and then can be recycled, so we can avoid
1406 * frequent calls to alloc_page(), and still will be able to make progress even
1407 * under memory pressure.
1408 */
1409 extern struct page *drbd_pp_pool;
1410 extern spinlock_t drbd_pp_lock;
1411 extern int drbd_pp_vacant;
1412 extern wait_queue_head_t drbd_pp_wait;
1413
1414 /* We also need a standard (emergency-reserve backed) page pool
1415 * for meta data IO (activity log, bitmap).
1416 * We can keep it global, as long as it is used as "N pages at a time".
1417 * 128 should be plenty, currently we probably can get away with as few as 1.
1418 */
1419 #define DRBD_MIN_POOL_PAGES 128
1420 extern mempool_t drbd_md_io_page_pool;
1421
1422 /* We also need to make sure we get a bio
1423 * when we need it for housekeeping purposes */
1424 extern struct bio_set drbd_md_io_bio_set;
1425 /* to allocate from that set */
1426 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1427
1428 /* And a bio_set for cloning */
1429 extern struct bio_set drbd_io_bio_set;
1430
1431 extern struct mutex resources_mutex;
1432
1433 extern int conn_lowest_minor(struct drbd_connection *connection);
1434 extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1435 extern void drbd_destroy_device(struct kref *kref);
1436 extern void drbd_delete_device(struct drbd_device *device);
1437
1438 extern struct drbd_resource *drbd_create_resource(const char *name);
1439 extern void drbd_free_resource(struct drbd_resource *resource);
1440
1441 extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1442 extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1443 extern void drbd_destroy_connection(struct kref *kref);
1444 extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1445 void *peer_addr, int peer_addr_len);
1446 extern struct drbd_resource *drbd_find_resource(const char *name);
1447 extern void drbd_destroy_resource(struct kref *kref);
1448 extern void conn_free_crypto(struct drbd_connection *connection);
1449
1450 /* drbd_req */
1451 extern void do_submit(struct work_struct *ws);
1452 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1453 extern blk_qc_t drbd_submit_bio(struct bio *bio);
1454 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1455 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1456
1457
1458 /* drbd_nl.c */
1459
1460 extern struct mutex notification_mutex;
1461
1462 extern void drbd_suspend_io(struct drbd_device *device);
1463 extern void drbd_resume_io(struct drbd_device *device);
1464 extern char *ppsize(char *buf, unsigned long long size);
1465 extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1466 enum determine_dev_size {
1467 DS_ERROR_SHRINK = -3,
1468 DS_ERROR_SPACE_MD = -2,
1469 DS_ERROR = -1,
1470 DS_UNCHANGED = 0,
1471 DS_SHRUNK = 1,
1472 DS_GREW = 2,
1473 DS_GREW_FROM_ZERO = 3,
1474 };
1475 extern enum determine_dev_size
1476 drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1477 extern void resync_after_online_grow(struct drbd_device *);
1478 extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1479 struct drbd_backing_dev *bdev, struct o_qlim *o);
1480 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1481 enum drbd_role new_role,
1482 int force);
1483 extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1484 extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1485 extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1486 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1487
1488 /* drbd_worker.c */
1489 /* bi_end_io handlers */
1490 extern void drbd_md_endio(struct bio *bio);
1491 extern void drbd_peer_request_endio(struct bio *bio);
1492 extern void drbd_request_endio(struct bio *bio);
1493 extern int drbd_worker(struct drbd_thread *thi);
1494 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1495 void drbd_resync_after_changed(struct drbd_device *device);
1496 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1497 extern void resume_next_sg(struct drbd_device *device);
1498 extern void suspend_other_sg(struct drbd_device *device);
1499 extern int drbd_resync_finished(struct drbd_device *device);
1500 /* maybe rather drbd_main.c ? */
1501 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1502 extern void drbd_md_put_buffer(struct drbd_device *device);
1503 extern int drbd_md_sync_page_io(struct drbd_device *device,
1504 struct drbd_backing_dev *bdev, sector_t sector, int op);
1505 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1506 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1507 struct drbd_backing_dev *bdev, unsigned int *done);
1508 extern void drbd_rs_controller_reset(struct drbd_device *device);
1509
ov_out_of_sync_print(struct drbd_device * device)1510 static inline void ov_out_of_sync_print(struct drbd_device *device)
1511 {
1512 if (device->ov_last_oos_size) {
1513 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1514 (unsigned long long)device->ov_last_oos_start,
1515 (unsigned long)device->ov_last_oos_size);
1516 }
1517 device->ov_last_oos_size = 0;
1518 }
1519
1520
1521 extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1522 extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1523 void *);
1524 /* worker callbacks */
1525 extern int w_e_end_data_req(struct drbd_work *, int);
1526 extern int w_e_end_rsdata_req(struct drbd_work *, int);
1527 extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1528 extern int w_e_end_ov_reply(struct drbd_work *, int);
1529 extern int w_e_end_ov_req(struct drbd_work *, int);
1530 extern int w_ov_finished(struct drbd_work *, int);
1531 extern int w_resync_timer(struct drbd_work *, int);
1532 extern int w_send_write_hint(struct drbd_work *, int);
1533 extern int w_send_dblock(struct drbd_work *, int);
1534 extern int w_send_read_req(struct drbd_work *, int);
1535 extern int w_e_reissue(struct drbd_work *, int);
1536 extern int w_restart_disk_io(struct drbd_work *, int);
1537 extern int w_send_out_of_sync(struct drbd_work *, int);
1538 extern int w_start_resync(struct drbd_work *, int);
1539
1540 extern void resync_timer_fn(struct timer_list *t);
1541 extern void start_resync_timer_fn(struct timer_list *t);
1542
1543 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1544
1545 /* drbd_receiver.c */
1546 extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1547 sector_t start, unsigned int nr_sectors, int flags);
1548 extern int drbd_receiver(struct drbd_thread *thi);
1549 extern int drbd_ack_receiver(struct drbd_thread *thi);
1550 extern void drbd_send_ping_wf(struct work_struct *ws);
1551 extern void drbd_send_acks_wf(struct work_struct *ws);
1552 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1553 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1554 bool throttle_if_app_is_waiting);
1555 extern int drbd_submit_peer_request(struct drbd_device *,
1556 struct drbd_peer_request *, const unsigned,
1557 const unsigned, const int);
1558 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1559 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1560 sector_t, unsigned int,
1561 unsigned int,
1562 gfp_t) __must_hold(local);
1563 extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1564 int);
1565 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1566 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1567 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1568 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1569 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1570 extern int drbd_connected(struct drbd_peer_device *);
1571
1572 /* sets the number of 512 byte sectors of our virtual device */
1573 void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1574
1575 /*
1576 * used to submit our private bio
1577 */
drbd_submit_bio_noacct(struct drbd_device * device,int fault_type,struct bio * bio)1578 static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1579 int fault_type, struct bio *bio)
1580 {
1581 __release(local);
1582 if (!bio->bi_disk) {
1583 drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
1584 bio->bi_status = BLK_STS_IOERR;
1585 bio_endio(bio);
1586 return;
1587 }
1588
1589 if (drbd_insert_fault(device, fault_type))
1590 bio_io_error(bio);
1591 else
1592 submit_bio_noacct(bio);
1593 }
1594
1595 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1596 enum write_ordering_e wo);
1597
1598 /* drbd_proc.c */
1599 extern struct proc_dir_entry *drbd_proc;
1600 int drbd_seq_show(struct seq_file *seq, void *v);
1601
1602 /* drbd_actlog.c */
1603 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1604 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1605 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1606 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1607 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1608 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1609 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1610 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1611 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1612 extern void drbd_rs_cancel_all(struct drbd_device *device);
1613 extern int drbd_rs_del_all(struct drbd_device *device);
1614 extern void drbd_rs_failed_io(struct drbd_device *device,
1615 sector_t sector, int size);
1616 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1617
1618 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1619 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1620 enum update_sync_bits_mode mode);
1621 #define drbd_set_in_sync(device, sector, size) \
1622 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1623 #define drbd_set_out_of_sync(device, sector, size) \
1624 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1625 #define drbd_rs_failed_io(device, sector, size) \
1626 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1627 extern void drbd_al_shrink(struct drbd_device *device);
1628 extern int drbd_al_initialize(struct drbd_device *, void *);
1629
1630 /* drbd_nl.c */
1631 /* state info broadcast */
1632 struct sib_info {
1633 enum drbd_state_info_bcast_reason sib_reason;
1634 union {
1635 struct {
1636 char *helper_name;
1637 unsigned helper_exit_code;
1638 };
1639 struct {
1640 union drbd_state os;
1641 union drbd_state ns;
1642 };
1643 };
1644 };
1645 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1646
1647 extern int notify_resource_state(struct sk_buff *,
1648 unsigned int,
1649 struct drbd_resource *,
1650 struct resource_info *,
1651 enum drbd_notification_type);
1652 extern int notify_device_state(struct sk_buff *,
1653 unsigned int,
1654 struct drbd_device *,
1655 struct device_info *,
1656 enum drbd_notification_type);
1657 extern int notify_connection_state(struct sk_buff *,
1658 unsigned int,
1659 struct drbd_connection *,
1660 struct connection_info *,
1661 enum drbd_notification_type);
1662 extern int notify_peer_device_state(struct sk_buff *,
1663 unsigned int,
1664 struct drbd_peer_device *,
1665 struct peer_device_info *,
1666 enum drbd_notification_type);
1667 extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1668 struct drbd_connection *, const char *, int);
1669
1670 /*
1671 * inline helper functions
1672 *************************/
1673
1674 /* see also page_chain_add and friends in drbd_receiver.c */
page_chain_next(struct page * page)1675 static inline struct page *page_chain_next(struct page *page)
1676 {
1677 return (struct page *)page_private(page);
1678 }
1679 #define page_chain_for_each(page) \
1680 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1681 page = page_chain_next(page))
1682 #define page_chain_for_each_safe(page, n) \
1683 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1684
1685
drbd_peer_req_has_active_page(struct drbd_peer_request * peer_req)1686 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1687 {
1688 struct page *page = peer_req->pages;
1689 page_chain_for_each(page) {
1690 if (page_count(page) > 1)
1691 return 1;
1692 }
1693 return 0;
1694 }
1695
drbd_read_state(struct drbd_device * device)1696 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1697 {
1698 struct drbd_resource *resource = device->resource;
1699 union drbd_state rv;
1700
1701 rv.i = device->state.i;
1702 rv.susp = resource->susp;
1703 rv.susp_nod = resource->susp_nod;
1704 rv.susp_fen = resource->susp_fen;
1705
1706 return rv;
1707 }
1708
1709 enum drbd_force_detach_flags {
1710 DRBD_READ_ERROR,
1711 DRBD_WRITE_ERROR,
1712 DRBD_META_IO_ERROR,
1713 DRBD_FORCE_DETACH,
1714 };
1715
1716 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
__drbd_chk_io_error_(struct drbd_device * device,enum drbd_force_detach_flags df,const char * where)1717 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1718 enum drbd_force_detach_flags df,
1719 const char *where)
1720 {
1721 enum drbd_io_error_p ep;
1722
1723 rcu_read_lock();
1724 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1725 rcu_read_unlock();
1726 switch (ep) {
1727 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1728 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1729 if (__ratelimit(&drbd_ratelimit_state))
1730 drbd_err(device, "Local IO failed in %s.\n", where);
1731 if (device->state.disk > D_INCONSISTENT)
1732 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1733 break;
1734 }
1735 fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
1736 case EP_DETACH:
1737 case EP_CALL_HELPER:
1738 /* Remember whether we saw a READ or WRITE error.
1739 *
1740 * Recovery of the affected area for WRITE failure is covered
1741 * by the activity log.
1742 * READ errors may fall outside that area though. Certain READ
1743 * errors can be "healed" by writing good data to the affected
1744 * blocks, which triggers block re-allocation in lower layers.
1745 *
1746 * If we can not write the bitmap after a READ error,
1747 * we may need to trigger a full sync (see w_go_diskless()).
1748 *
1749 * Force-detach is not really an IO error, but rather a
1750 * desperate measure to try to deal with a completely
1751 * unresponsive lower level IO stack.
1752 * Still it should be treated as a WRITE error.
1753 *
1754 * Meta IO error is always WRITE error:
1755 * we read meta data only once during attach,
1756 * which will fail in case of errors.
1757 */
1758 set_bit(WAS_IO_ERROR, &device->flags);
1759 if (df == DRBD_READ_ERROR)
1760 set_bit(WAS_READ_ERROR, &device->flags);
1761 if (df == DRBD_FORCE_DETACH)
1762 set_bit(FORCE_DETACH, &device->flags);
1763 if (device->state.disk > D_FAILED) {
1764 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1765 drbd_err(device,
1766 "Local IO failed in %s. Detaching...\n", where);
1767 }
1768 break;
1769 }
1770 }
1771
1772 /**
1773 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1774 * @device: DRBD device.
1775 * @error: Error code passed to the IO completion callback
1776 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1777 *
1778 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1779 */
1780 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
drbd_chk_io_error_(struct drbd_device * device,int error,enum drbd_force_detach_flags forcedetach,const char * where)1781 static inline void drbd_chk_io_error_(struct drbd_device *device,
1782 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1783 {
1784 if (error) {
1785 unsigned long flags;
1786 spin_lock_irqsave(&device->resource->req_lock, flags);
1787 __drbd_chk_io_error_(device, forcedetach, where);
1788 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1789 }
1790 }
1791
1792
1793 /**
1794 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1795 * @bdev: Meta data block device.
1796 *
1797 * BTW, for internal meta data, this happens to be the maximum capacity
1798 * we could agree upon with our peer node.
1799 */
drbd_md_first_sector(struct drbd_backing_dev * bdev)1800 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1801 {
1802 switch (bdev->md.meta_dev_idx) {
1803 case DRBD_MD_INDEX_INTERNAL:
1804 case DRBD_MD_INDEX_FLEX_INT:
1805 return bdev->md.md_offset + bdev->md.bm_offset;
1806 case DRBD_MD_INDEX_FLEX_EXT:
1807 default:
1808 return bdev->md.md_offset;
1809 }
1810 }
1811
1812 /**
1813 * drbd_md_last_sector() - Return the last sector number of the meta data area
1814 * @bdev: Meta data block device.
1815 */
drbd_md_last_sector(struct drbd_backing_dev * bdev)1816 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1817 {
1818 switch (bdev->md.meta_dev_idx) {
1819 case DRBD_MD_INDEX_INTERNAL:
1820 case DRBD_MD_INDEX_FLEX_INT:
1821 return bdev->md.md_offset + MD_4kB_SECT -1;
1822 case DRBD_MD_INDEX_FLEX_EXT:
1823 default:
1824 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1825 }
1826 }
1827
1828 /* Returns the number of 512 byte sectors of the device */
drbd_get_capacity(struct block_device * bdev)1829 static inline sector_t drbd_get_capacity(struct block_device *bdev)
1830 {
1831 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1832 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1833 }
1834
1835 /**
1836 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1837 * @bdev: Meta data block device.
1838 *
1839 * returns the capacity we announce to out peer. we clip ourselves at the
1840 * various MAX_SECTORS, because if we don't, current implementation will
1841 * oops sooner or later
1842 */
drbd_get_max_capacity(struct drbd_backing_dev * bdev)1843 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1844 {
1845 sector_t s;
1846
1847 switch (bdev->md.meta_dev_idx) {
1848 case DRBD_MD_INDEX_INTERNAL:
1849 case DRBD_MD_INDEX_FLEX_INT:
1850 s = drbd_get_capacity(bdev->backing_bdev)
1851 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1852 drbd_md_first_sector(bdev))
1853 : 0;
1854 break;
1855 case DRBD_MD_INDEX_FLEX_EXT:
1856 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1857 drbd_get_capacity(bdev->backing_bdev));
1858 /* clip at maximum size the meta device can support */
1859 s = min_t(sector_t, s,
1860 BM_EXT_TO_SECT(bdev->md.md_size_sect
1861 - bdev->md.bm_offset));
1862 break;
1863 default:
1864 s = min_t(sector_t, DRBD_MAX_SECTORS,
1865 drbd_get_capacity(bdev->backing_bdev));
1866 }
1867 return s;
1868 }
1869
1870 /**
1871 * drbd_md_ss() - Return the sector number of our meta data super block
1872 * @bdev: Meta data block device.
1873 */
drbd_md_ss(struct drbd_backing_dev * bdev)1874 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1875 {
1876 const int meta_dev_idx = bdev->md.meta_dev_idx;
1877
1878 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1879 return 0;
1880
1881 /* Since drbd08, internal meta data is always "flexible".
1882 * position: last 4k aligned block of 4k size */
1883 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1884 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1885 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1886
1887 /* external, some index; this is the old fixed size layout */
1888 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1889 }
1890
1891 static inline void
drbd_queue_work(struct drbd_work_queue * q,struct drbd_work * w)1892 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1893 {
1894 unsigned long flags;
1895 spin_lock_irqsave(&q->q_lock, flags);
1896 list_add_tail(&w->list, &q->q);
1897 spin_unlock_irqrestore(&q->q_lock, flags);
1898 wake_up(&q->q_wait);
1899 }
1900
1901 static inline void
drbd_queue_work_if_unqueued(struct drbd_work_queue * q,struct drbd_work * w)1902 drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1903 {
1904 unsigned long flags;
1905 spin_lock_irqsave(&q->q_lock, flags);
1906 if (list_empty_careful(&w->list))
1907 list_add_tail(&w->list, &q->q);
1908 spin_unlock_irqrestore(&q->q_lock, flags);
1909 wake_up(&q->q_wait);
1910 }
1911
1912 static inline void
drbd_device_post_work(struct drbd_device * device,int work_bit)1913 drbd_device_post_work(struct drbd_device *device, int work_bit)
1914 {
1915 if (!test_and_set_bit(work_bit, &device->flags)) {
1916 struct drbd_connection *connection =
1917 first_peer_device(device)->connection;
1918 struct drbd_work_queue *q = &connection->sender_work;
1919 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1920 wake_up(&q->q_wait);
1921 }
1922 }
1923
1924 extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1925
1926 /* To get the ack_receiver out of the blocking network stack,
1927 * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1928 * and send a ping, we need to send a signal.
1929 * Which signal we send is irrelevant. */
wake_ack_receiver(struct drbd_connection * connection)1930 static inline void wake_ack_receiver(struct drbd_connection *connection)
1931 {
1932 struct task_struct *task = connection->ack_receiver.task;
1933 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1934 send_sig(SIGXCPU, task, 1);
1935 }
1936
request_ping(struct drbd_connection * connection)1937 static inline void request_ping(struct drbd_connection *connection)
1938 {
1939 set_bit(SEND_PING, &connection->flags);
1940 wake_ack_receiver(connection);
1941 }
1942
1943 extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1944 extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1945 extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1946 enum drbd_packet, unsigned int, void *,
1947 unsigned int);
1948 extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1949 enum drbd_packet, unsigned int, void *,
1950 unsigned int);
1951
1952 extern int drbd_send_ping(struct drbd_connection *connection);
1953 extern int drbd_send_ping_ack(struct drbd_connection *connection);
1954 extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1955 extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1956
drbd_thread_stop(struct drbd_thread * thi)1957 static inline void drbd_thread_stop(struct drbd_thread *thi)
1958 {
1959 _drbd_thread_stop(thi, false, true);
1960 }
1961
drbd_thread_stop_nowait(struct drbd_thread * thi)1962 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1963 {
1964 _drbd_thread_stop(thi, false, false);
1965 }
1966
drbd_thread_restart_nowait(struct drbd_thread * thi)1967 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1968 {
1969 _drbd_thread_stop(thi, true, false);
1970 }
1971
1972 /* counts how many answer packets packets we expect from our peer,
1973 * for either explicit application requests,
1974 * or implicit barrier packets as necessary.
1975 * increased:
1976 * w_send_barrier
1977 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
1978 * it is much easier and equally valid to count what we queue for the
1979 * worker, even before it actually was queued or send.
1980 * (drbd_make_request_common; recovery path on read io-error)
1981 * decreased:
1982 * got_BarrierAck (respective tl_clear, tl_clear_barrier)
1983 * _req_mod(req, DATA_RECEIVED)
1984 * [from receive_DataReply]
1985 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
1986 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1987 * for some reason it is NOT decreased in got_NegAck,
1988 * but in the resulting cleanup code from report_params.
1989 * we should try to remember the reason for that...
1990 * _req_mod(req, SEND_FAILED or SEND_CANCELED)
1991 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
1992 * [from tl_clear_barrier]
1993 */
inc_ap_pending(struct drbd_device * device)1994 static inline void inc_ap_pending(struct drbd_device *device)
1995 {
1996 atomic_inc(&device->ap_pending_cnt);
1997 }
1998
1999 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2000 if (atomic_read(&device->which) < 0) \
2001 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2002 func, line, \
2003 atomic_read(&device->which))
2004
2005 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
_dec_ap_pending(struct drbd_device * device,const char * func,int line)2006 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2007 {
2008 if (atomic_dec_and_test(&device->ap_pending_cnt))
2009 wake_up(&device->misc_wait);
2010 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2011 }
2012
2013 /* counts how many resync-related answers we still expect from the peer
2014 * increase decrease
2015 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
2016 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
2017 * (or P_NEG_ACK with ID_SYNCER)
2018 */
inc_rs_pending(struct drbd_device * device)2019 static inline void inc_rs_pending(struct drbd_device *device)
2020 {
2021 atomic_inc(&device->rs_pending_cnt);
2022 }
2023
2024 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
_dec_rs_pending(struct drbd_device * device,const char * func,int line)2025 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2026 {
2027 atomic_dec(&device->rs_pending_cnt);
2028 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2029 }
2030
2031 /* counts how many answers we still need to send to the peer.
2032 * increased on
2033 * receive_Data unless protocol A;
2034 * we need to send a P_RECV_ACK (proto B)
2035 * or P_WRITE_ACK (proto C)
2036 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
2037 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
2038 * receive_Barrier_* we need to send a P_BARRIER_ACK
2039 */
inc_unacked(struct drbd_device * device)2040 static inline void inc_unacked(struct drbd_device *device)
2041 {
2042 atomic_inc(&device->unacked_cnt);
2043 }
2044
2045 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
_dec_unacked(struct drbd_device * device,const char * func,int line)2046 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2047 {
2048 atomic_dec(&device->unacked_cnt);
2049 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2050 }
2051
2052 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
_sub_unacked(struct drbd_device * device,int n,const char * func,int line)2053 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2054 {
2055 atomic_sub(n, &device->unacked_cnt);
2056 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2057 }
2058
is_sync_target_state(enum drbd_conns connection_state)2059 static inline bool is_sync_target_state(enum drbd_conns connection_state)
2060 {
2061 return connection_state == C_SYNC_TARGET ||
2062 connection_state == C_PAUSED_SYNC_T;
2063 }
2064
is_sync_source_state(enum drbd_conns connection_state)2065 static inline bool is_sync_source_state(enum drbd_conns connection_state)
2066 {
2067 return connection_state == C_SYNC_SOURCE ||
2068 connection_state == C_PAUSED_SYNC_S;
2069 }
2070
is_sync_state(enum drbd_conns connection_state)2071 static inline bool is_sync_state(enum drbd_conns connection_state)
2072 {
2073 return is_sync_source_state(connection_state) ||
2074 is_sync_target_state(connection_state);
2075 }
2076
2077 /**
2078 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2079 * @_device: DRBD device.
2080 * @_min_state: Minimum device state required for success.
2081 *
2082 * You have to call put_ldev() when finished working with device->ldev.
2083 */
2084 #define get_ldev_if_state(_device, _min_state) \
2085 (_get_ldev_if_state((_device), (_min_state)) ? \
2086 ({ __acquire(x); true; }) : false)
2087 #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2088
put_ldev(struct drbd_device * device)2089 static inline void put_ldev(struct drbd_device *device)
2090 {
2091 enum drbd_disk_state disk_state = device->state.disk;
2092 /* We must check the state *before* the atomic_dec becomes visible,
2093 * or we have a theoretical race where someone hitting zero,
2094 * while state still D_FAILED, will then see D_DISKLESS in the
2095 * condition below and calling into destroy, where he must not, yet. */
2096 int i = atomic_dec_return(&device->local_cnt);
2097
2098 /* This may be called from some endio handler,
2099 * so we must not sleep here. */
2100
2101 __release(local);
2102 D_ASSERT(device, i >= 0);
2103 if (i == 0) {
2104 if (disk_state == D_DISKLESS)
2105 /* even internal references gone, safe to destroy */
2106 drbd_device_post_work(device, DESTROY_DISK);
2107 if (disk_state == D_FAILED)
2108 /* all application IO references gone. */
2109 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2110 drbd_device_post_work(device, GO_DISKLESS);
2111 wake_up(&device->misc_wait);
2112 }
2113 }
2114
2115 #ifndef __CHECKER__
_get_ldev_if_state(struct drbd_device * device,enum drbd_disk_state mins)2116 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2117 {
2118 int io_allowed;
2119
2120 /* never get a reference while D_DISKLESS */
2121 if (device->state.disk == D_DISKLESS)
2122 return 0;
2123
2124 atomic_inc(&device->local_cnt);
2125 io_allowed = (device->state.disk >= mins);
2126 if (!io_allowed)
2127 put_ldev(device);
2128 return io_allowed;
2129 }
2130 #else
2131 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2132 #endif
2133
2134 /* this throttles on-the-fly application requests
2135 * according to max_buffers settings;
2136 * maybe re-implement using semaphores? */
drbd_get_max_buffers(struct drbd_device * device)2137 static inline int drbd_get_max_buffers(struct drbd_device *device)
2138 {
2139 struct net_conf *nc;
2140 int mxb;
2141
2142 rcu_read_lock();
2143 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2144 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
2145 rcu_read_unlock();
2146
2147 return mxb;
2148 }
2149
drbd_state_is_stable(struct drbd_device * device)2150 static inline int drbd_state_is_stable(struct drbd_device *device)
2151 {
2152 union drbd_dev_state s = device->state;
2153
2154 /* DO NOT add a default clause, we want the compiler to warn us
2155 * for any newly introduced state we may have forgotten to add here */
2156
2157 switch ((enum drbd_conns)s.conn) {
2158 /* new io only accepted when there is no connection, ... */
2159 case C_STANDALONE:
2160 case C_WF_CONNECTION:
2161 /* ... or there is a well established connection. */
2162 case C_CONNECTED:
2163 case C_SYNC_SOURCE:
2164 case C_SYNC_TARGET:
2165 case C_VERIFY_S:
2166 case C_VERIFY_T:
2167 case C_PAUSED_SYNC_S:
2168 case C_PAUSED_SYNC_T:
2169 case C_AHEAD:
2170 case C_BEHIND:
2171 /* transitional states, IO allowed */
2172 case C_DISCONNECTING:
2173 case C_UNCONNECTED:
2174 case C_TIMEOUT:
2175 case C_BROKEN_PIPE:
2176 case C_NETWORK_FAILURE:
2177 case C_PROTOCOL_ERROR:
2178 case C_TEAR_DOWN:
2179 case C_WF_REPORT_PARAMS:
2180 case C_STARTING_SYNC_S:
2181 case C_STARTING_SYNC_T:
2182 break;
2183
2184 /* Allow IO in BM exchange states with new protocols */
2185 case C_WF_BITMAP_S:
2186 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2187 return 0;
2188 break;
2189
2190 /* no new io accepted in these states */
2191 case C_WF_BITMAP_T:
2192 case C_WF_SYNC_UUID:
2193 case C_MASK:
2194 /* not "stable" */
2195 return 0;
2196 }
2197
2198 switch ((enum drbd_disk_state)s.disk) {
2199 case D_DISKLESS:
2200 case D_INCONSISTENT:
2201 case D_OUTDATED:
2202 case D_CONSISTENT:
2203 case D_UP_TO_DATE:
2204 case D_FAILED:
2205 /* disk state is stable as well. */
2206 break;
2207
2208 /* no new io accepted during transitional states */
2209 case D_ATTACHING:
2210 case D_NEGOTIATING:
2211 case D_UNKNOWN:
2212 case D_MASK:
2213 /* not "stable" */
2214 return 0;
2215 }
2216
2217 return 1;
2218 }
2219
drbd_suspended(struct drbd_device * device)2220 static inline int drbd_suspended(struct drbd_device *device)
2221 {
2222 struct drbd_resource *resource = device->resource;
2223
2224 return resource->susp || resource->susp_fen || resource->susp_nod;
2225 }
2226
may_inc_ap_bio(struct drbd_device * device)2227 static inline bool may_inc_ap_bio(struct drbd_device *device)
2228 {
2229 int mxb = drbd_get_max_buffers(device);
2230
2231 if (drbd_suspended(device))
2232 return false;
2233 if (atomic_read(&device->suspend_cnt))
2234 return false;
2235
2236 /* to avoid potential deadlock or bitmap corruption,
2237 * in various places, we only allow new application io
2238 * to start during "stable" states. */
2239
2240 /* no new io accepted when attaching or detaching the disk */
2241 if (!drbd_state_is_stable(device))
2242 return false;
2243
2244 /* since some older kernels don't have atomic_add_unless,
2245 * and we are within the spinlock anyways, we have this workaround. */
2246 if (atomic_read(&device->ap_bio_cnt) > mxb)
2247 return false;
2248 if (test_bit(BITMAP_IO, &device->flags))
2249 return false;
2250 return true;
2251 }
2252
inc_ap_bio_cond(struct drbd_device * device)2253 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2254 {
2255 bool rv = false;
2256
2257 spin_lock_irq(&device->resource->req_lock);
2258 rv = may_inc_ap_bio(device);
2259 if (rv)
2260 atomic_inc(&device->ap_bio_cnt);
2261 spin_unlock_irq(&device->resource->req_lock);
2262
2263 return rv;
2264 }
2265
inc_ap_bio(struct drbd_device * device)2266 static inline void inc_ap_bio(struct drbd_device *device)
2267 {
2268 /* we wait here
2269 * as long as the device is suspended
2270 * until the bitmap is no longer on the fly during connection
2271 * handshake as long as we would exceed the max_buffer limit.
2272 *
2273 * to avoid races with the reconnect code,
2274 * we need to atomic_inc within the spinlock. */
2275
2276 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2277 }
2278
dec_ap_bio(struct drbd_device * device)2279 static inline void dec_ap_bio(struct drbd_device *device)
2280 {
2281 int mxb = drbd_get_max_buffers(device);
2282 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2283
2284 D_ASSERT(device, ap_bio >= 0);
2285
2286 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2287 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2288 drbd_queue_work(&first_peer_device(device)->
2289 connection->sender_work,
2290 &device->bm_io_work.w);
2291 }
2292
2293 /* this currently does wake_up for every dec_ap_bio!
2294 * maybe rather introduce some type of hysteresis?
2295 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2296 if (ap_bio < mxb)
2297 wake_up(&device->misc_wait);
2298 }
2299
verify_can_do_stop_sector(struct drbd_device * device)2300 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2301 {
2302 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2303 first_peer_device(device)->connection->agreed_pro_version != 100;
2304 }
2305
drbd_set_ed_uuid(struct drbd_device * device,u64 val)2306 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2307 {
2308 int changed = device->ed_uuid != val;
2309 device->ed_uuid = val;
2310 return changed;
2311 }
2312
drbd_queue_order_type(struct drbd_device * device)2313 static inline int drbd_queue_order_type(struct drbd_device *device)
2314 {
2315 /* sorry, we currently have no working implementation
2316 * of distributed TCQ stuff */
2317 #ifndef QUEUE_ORDERED_NONE
2318 #define QUEUE_ORDERED_NONE 0
2319 #endif
2320 return QUEUE_ORDERED_NONE;
2321 }
2322
first_connection(struct drbd_resource * resource)2323 static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2324 {
2325 return list_first_entry_or_null(&resource->connections,
2326 struct drbd_connection, connections);
2327 }
2328
2329 #endif
2330