• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    drbd_req.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 
24  */
25 
26 #include <linux/module.h>
27 
28 #include <linux/slab.h>
29 #include <linux/drbd.h>
30 #include "drbd_int.h"
31 #include "drbd_req.h"
32 
33 
34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
35 
36 /* Update disk stats at start of I/O request */
_drbd_start_io_acct(struct drbd_device * device,struct drbd_request * req)37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
38 {
39 	generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
40 			      &device->vdisk->part0);
41 }
42 
43 /* Update disk stats when completing request upwards */
_drbd_end_io_acct(struct drbd_device * device,struct drbd_request * req)44 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
45 {
46 	generic_end_io_acct(bio_data_dir(req->master_bio),
47 			    &device->vdisk->part0, req->start_jif);
48 }
49 
drbd_req_new(struct drbd_device * device,struct bio * bio_src)50 static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
51 {
52 	struct drbd_request *req;
53 
54 	req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
55 	if (!req)
56 		return NULL;
57 	memset(req, 0, sizeof(*req));
58 
59 	drbd_req_make_private_bio(req, bio_src);
60 	req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
61 		      | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
62 		      | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
63 	req->device = device;
64 	req->master_bio = bio_src;
65 	req->epoch = 0;
66 
67 	drbd_clear_interval(&req->i);
68 	req->i.sector     = bio_src->bi_iter.bi_sector;
69 	req->i.size      = bio_src->bi_iter.bi_size;
70 	req->i.local = true;
71 	req->i.waiting = false;
72 
73 	INIT_LIST_HEAD(&req->tl_requests);
74 	INIT_LIST_HEAD(&req->w.list);
75 	INIT_LIST_HEAD(&req->req_pending_master_completion);
76 	INIT_LIST_HEAD(&req->req_pending_local);
77 
78 	/* one reference to be put by __drbd_make_request */
79 	atomic_set(&req->completion_ref, 1);
80 	/* one kref as long as completion_ref > 0 */
81 	kref_init(&req->kref);
82 	return req;
83 }
84 
drbd_remove_request_interval(struct rb_root * root,struct drbd_request * req)85 static void drbd_remove_request_interval(struct rb_root *root,
86 					 struct drbd_request *req)
87 {
88 	struct drbd_device *device = req->device;
89 	struct drbd_interval *i = &req->i;
90 
91 	drbd_remove_interval(root, i);
92 
93 	/* Wake up any processes waiting for this request to complete.  */
94 	if (i->waiting)
95 		wake_up(&device->misc_wait);
96 }
97 
drbd_req_destroy(struct kref * kref)98 void drbd_req_destroy(struct kref *kref)
99 {
100 	struct drbd_request *req = container_of(kref, struct drbd_request, kref);
101 	struct drbd_device *device = req->device;
102 	const unsigned s = req->rq_state;
103 
104 	if ((req->master_bio && !(s & RQ_POSTPONED)) ||
105 		atomic_read(&req->completion_ref) ||
106 		(s & RQ_LOCAL_PENDING) ||
107 		((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
108 		drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
109 				s, atomic_read(&req->completion_ref));
110 		return;
111 	}
112 
113 	/* If called from mod_rq_state (expected normal case) or
114 	 * drbd_send_and_submit (the less likely normal path), this holds the
115 	 * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
116 	 * though it may be still empty (never added to the transfer log).
117 	 *
118 	 * If called from do_retry(), we do NOT hold the req_lock, but we are
119 	 * still allowed to unconditionally list_del(&req->tl_requests),
120 	 * because it will be on a local on-stack list only. */
121 	list_del_init(&req->tl_requests);
122 
123 	/* finally remove the request from the conflict detection
124 	 * respective block_id verification interval tree. */
125 	if (!drbd_interval_empty(&req->i)) {
126 		struct rb_root *root;
127 
128 		if (s & RQ_WRITE)
129 			root = &device->write_requests;
130 		else
131 			root = &device->read_requests;
132 		drbd_remove_request_interval(root, req);
133 	} else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
134 		drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
135 			s, (unsigned long long)req->i.sector, req->i.size);
136 
137 	/* if it was a write, we may have to set the corresponding
138 	 * bit(s) out-of-sync first. If it had a local part, we need to
139 	 * release the reference to the activity log. */
140 	if (s & RQ_WRITE) {
141 		/* Set out-of-sync unless both OK flags are set
142 		 * (local only or remote failed).
143 		 * Other places where we set out-of-sync:
144 		 * READ with local io-error */
145 
146 		/* There is a special case:
147 		 * we may notice late that IO was suspended,
148 		 * and postpone, or schedule for retry, a write,
149 		 * before it even was submitted or sent.
150 		 * In that case we do not want to touch the bitmap at all.
151 		 */
152 		if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
153 			if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
154 				drbd_set_out_of_sync(device, req->i.sector, req->i.size);
155 
156 			if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
157 				drbd_set_in_sync(device, req->i.sector, req->i.size);
158 		}
159 
160 		/* one might be tempted to move the drbd_al_complete_io
161 		 * to the local io completion callback drbd_request_endio.
162 		 * but, if this was a mirror write, we may only
163 		 * drbd_al_complete_io after this is RQ_NET_DONE,
164 		 * otherwise the extent could be dropped from the al
165 		 * before it has actually been written on the peer.
166 		 * if we crash before our peer knows about the request,
167 		 * but after the extent has been dropped from the al,
168 		 * we would forget to resync the corresponding extent.
169 		 */
170 		if (s & RQ_IN_ACT_LOG) {
171 			if (get_ldev_if_state(device, D_FAILED)) {
172 				drbd_al_complete_io(device, &req->i);
173 				put_ldev(device);
174 			} else if (__ratelimit(&drbd_ratelimit_state)) {
175 				drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
176 					 "but my Disk seems to have failed :(\n",
177 					 (unsigned long long) req->i.sector, req->i.size);
178 			}
179 		}
180 	}
181 
182 	mempool_free(req, drbd_request_mempool);
183 }
184 
wake_all_senders(struct drbd_connection * connection)185 static void wake_all_senders(struct drbd_connection *connection)
186 {
187 	wake_up(&connection->sender_work.q_wait);
188 }
189 
190 /* must hold resource->req_lock */
start_new_tl_epoch(struct drbd_connection * connection)191 void start_new_tl_epoch(struct drbd_connection *connection)
192 {
193 	/* no point closing an epoch, if it is empty, anyways. */
194 	if (connection->current_tle_writes == 0)
195 		return;
196 
197 	connection->current_tle_writes = 0;
198 	atomic_inc(&connection->current_tle_nr);
199 	wake_all_senders(connection);
200 }
201 
complete_master_bio(struct drbd_device * device,struct bio_and_error * m)202 void complete_master_bio(struct drbd_device *device,
203 		struct bio_and_error *m)
204 {
205 	m->bio->bi_error = m->error;
206 	bio_endio(m->bio);
207 	dec_ap_bio(device);
208 }
209 
210 
211 /* Helper for __req_mod().
212  * Set m->bio to the master bio, if it is fit to be completed,
213  * or leave it alone (it is initialized to NULL in __req_mod),
214  * if it has already been completed, or cannot be completed yet.
215  * If m->bio is set, the error status to be returned is placed in m->error.
216  */
217 static
drbd_req_complete(struct drbd_request * req,struct bio_and_error * m)218 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
219 {
220 	const unsigned s = req->rq_state;
221 	struct drbd_device *device = req->device;
222 	int error, ok;
223 
224 	/* we must not complete the master bio, while it is
225 	 *	still being processed by _drbd_send_zc_bio (drbd_send_dblock)
226 	 *	not yet acknowledged by the peer
227 	 *	not yet completed by the local io subsystem
228 	 * these flags may get cleared in any order by
229 	 *	the worker,
230 	 *	the receiver,
231 	 *	the bio_endio completion callbacks.
232 	 */
233 	if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
234 	    (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
235 	    (s & RQ_COMPLETION_SUSP)) {
236 		drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
237 		return;
238 	}
239 
240 	if (!req->master_bio) {
241 		drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
242 		return;
243 	}
244 
245 	/*
246 	 * figure out whether to report success or failure.
247 	 *
248 	 * report success when at least one of the operations succeeded.
249 	 * or, to put the other way,
250 	 * only report failure, when both operations failed.
251 	 *
252 	 * what to do about the failures is handled elsewhere.
253 	 * what we need to do here is just: complete the master_bio.
254 	 *
255 	 * local completion error, if any, has been stored as ERR_PTR
256 	 * in private_bio within drbd_request_endio.
257 	 */
258 	ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
259 	error = PTR_ERR(req->private_bio);
260 
261 	/* Before we can signal completion to the upper layers,
262 	 * we may need to close the current transfer log epoch.
263 	 * We are within the request lock, so we can simply compare
264 	 * the request epoch number with the current transfer log
265 	 * epoch number.  If they match, increase the current_tle_nr,
266 	 * and reset the transfer log epoch write_cnt.
267 	 */
268 	if (op_is_write(bio_op(req->master_bio)) &&
269 	    req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
270 		start_new_tl_epoch(first_peer_device(device)->connection);
271 
272 	/* Update disk stats */
273 	_drbd_end_io_acct(device, req);
274 
275 	/* If READ failed,
276 	 * have it be pushed back to the retry work queue,
277 	 * so it will re-enter __drbd_make_request(),
278 	 * and be re-assigned to a suitable local or remote path,
279 	 * or failed if we do not have access to good data anymore.
280 	 *
281 	 * Unless it was failed early by __drbd_make_request(),
282 	 * because no path was available, in which case
283 	 * it was not even added to the transfer_log.
284 	 *
285 	 * read-ahead may fail, and will not be retried.
286 	 *
287 	 * WRITE should have used all available paths already.
288 	 */
289 	if (!ok &&
290 	    bio_op(req->master_bio) == REQ_OP_READ &&
291 	    !(req->master_bio->bi_opf & REQ_RAHEAD) &&
292 	    !list_empty(&req->tl_requests))
293 		req->rq_state |= RQ_POSTPONED;
294 
295 	if (!(req->rq_state & RQ_POSTPONED)) {
296 		m->error = ok ? 0 : (error ?: -EIO);
297 		m->bio = req->master_bio;
298 		req->master_bio = NULL;
299 		/* We leave it in the tree, to be able to verify later
300 		 * write-acks in protocol != C during resync.
301 		 * But we mark it as "complete", so it won't be counted as
302 		 * conflict in a multi-primary setup. */
303 		req->i.completed = true;
304 	}
305 
306 	if (req->i.waiting)
307 		wake_up(&device->misc_wait);
308 
309 	/* Either we are about to complete to upper layers,
310 	 * or we will restart this request.
311 	 * In either case, the request object will be destroyed soon,
312 	 * so better remove it from all lists. */
313 	list_del_init(&req->req_pending_master_completion);
314 }
315 
316 /* still holds resource->req_lock */
drbd_req_put_completion_ref(struct drbd_request * req,struct bio_and_error * m,int put)317 static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
318 {
319 	struct drbd_device *device = req->device;
320 	D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
321 
322 	if (!atomic_sub_and_test(put, &req->completion_ref))
323 		return 0;
324 
325 	drbd_req_complete(req, m);
326 
327 	if (req->rq_state & RQ_POSTPONED) {
328 		/* don't destroy the req object just yet,
329 		 * but queue it for retry */
330 		drbd_restart_request(req);
331 		return 0;
332 	}
333 
334 	return 1;
335 }
336 
set_if_null_req_next(struct drbd_peer_device * peer_device,struct drbd_request * req)337 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
338 {
339 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
340 	if (!connection)
341 		return;
342 	if (connection->req_next == NULL)
343 		connection->req_next = req;
344 }
345 
advance_conn_req_next(struct drbd_peer_device * peer_device,struct drbd_request * req)346 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
347 {
348 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
349 	if (!connection)
350 		return;
351 	if (connection->req_next != req)
352 		return;
353 	list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
354 		const unsigned s = req->rq_state;
355 		if (s & RQ_NET_QUEUED)
356 			break;
357 	}
358 	if (&req->tl_requests == &connection->transfer_log)
359 		req = NULL;
360 	connection->req_next = req;
361 }
362 
set_if_null_req_ack_pending(struct drbd_peer_device * peer_device,struct drbd_request * req)363 static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
364 {
365 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
366 	if (!connection)
367 		return;
368 	if (connection->req_ack_pending == NULL)
369 		connection->req_ack_pending = req;
370 }
371 
advance_conn_req_ack_pending(struct drbd_peer_device * peer_device,struct drbd_request * req)372 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
373 {
374 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
375 	if (!connection)
376 		return;
377 	if (connection->req_ack_pending != req)
378 		return;
379 	list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
380 		const unsigned s = req->rq_state;
381 		if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
382 			break;
383 	}
384 	if (&req->tl_requests == &connection->transfer_log)
385 		req = NULL;
386 	connection->req_ack_pending = req;
387 }
388 
set_if_null_req_not_net_done(struct drbd_peer_device * peer_device,struct drbd_request * req)389 static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
390 {
391 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
392 	if (!connection)
393 		return;
394 	if (connection->req_not_net_done == NULL)
395 		connection->req_not_net_done = req;
396 }
397 
advance_conn_req_not_net_done(struct drbd_peer_device * peer_device,struct drbd_request * req)398 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
399 {
400 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
401 	if (!connection)
402 		return;
403 	if (connection->req_not_net_done != req)
404 		return;
405 	list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
406 		const unsigned s = req->rq_state;
407 		if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
408 			break;
409 	}
410 	if (&req->tl_requests == &connection->transfer_log)
411 		req = NULL;
412 	connection->req_not_net_done = req;
413 }
414 
415 /* I'd like this to be the only place that manipulates
416  * req->completion_ref and req->kref. */
mod_rq_state(struct drbd_request * req,struct bio_and_error * m,int clear,int set)417 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
418 		int clear, int set)
419 {
420 	struct drbd_device *device = req->device;
421 	struct drbd_peer_device *peer_device = first_peer_device(device);
422 	unsigned s = req->rq_state;
423 	int c_put = 0;
424 	int k_put = 0;
425 
426 	if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
427 		set |= RQ_COMPLETION_SUSP;
428 
429 	/* apply */
430 
431 	req->rq_state &= ~clear;
432 	req->rq_state |= set;
433 
434 	/* no change? */
435 	if (req->rq_state == s)
436 		return;
437 
438 	/* intent: get references */
439 
440 	if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
441 		atomic_inc(&req->completion_ref);
442 
443 	if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
444 		inc_ap_pending(device);
445 		atomic_inc(&req->completion_ref);
446 	}
447 
448 	if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
449 		atomic_inc(&req->completion_ref);
450 		set_if_null_req_next(peer_device, req);
451 	}
452 
453 	if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
454 		kref_get(&req->kref); /* wait for the DONE */
455 
456 	if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
457 		/* potentially already completed in the ack_receiver thread */
458 		if (!(s & RQ_NET_DONE)) {
459 			atomic_add(req->i.size >> 9, &device->ap_in_flight);
460 			set_if_null_req_not_net_done(peer_device, req);
461 		}
462 		if (req->rq_state & RQ_NET_PENDING)
463 			set_if_null_req_ack_pending(peer_device, req);
464 	}
465 
466 	if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
467 		atomic_inc(&req->completion_ref);
468 
469 	/* progress: put references */
470 
471 	if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
472 		++c_put;
473 
474 	if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
475 		D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
476 		/* local completion may still come in later,
477 		 * we need to keep the req object around. */
478 		kref_get(&req->kref);
479 		++c_put;
480 	}
481 
482 	if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
483 		if (req->rq_state & RQ_LOCAL_ABORTED)
484 			++k_put;
485 		else
486 			++c_put;
487 		list_del_init(&req->req_pending_local);
488 	}
489 
490 	if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
491 		dec_ap_pending(device);
492 		++c_put;
493 		req->acked_jif = jiffies;
494 		advance_conn_req_ack_pending(peer_device, req);
495 	}
496 
497 	if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
498 		++c_put;
499 		advance_conn_req_next(peer_device, req);
500 	}
501 
502 	if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
503 		if (s & RQ_NET_SENT)
504 			atomic_sub(req->i.size >> 9, &device->ap_in_flight);
505 		if (s & RQ_EXP_BARR_ACK)
506 			++k_put;
507 		req->net_done_jif = jiffies;
508 
509 		/* in ahead/behind mode, or just in case,
510 		 * before we finally destroy this request,
511 		 * the caching pointers must not reference it anymore */
512 		advance_conn_req_next(peer_device, req);
513 		advance_conn_req_ack_pending(peer_device, req);
514 		advance_conn_req_not_net_done(peer_device, req);
515 	}
516 
517 	/* potentially complete and destroy */
518 
519 	if (k_put || c_put) {
520 		/* Completion does it's own kref_put.  If we are going to
521 		 * kref_sub below, we need req to be still around then. */
522 		int at_least = k_put + !!c_put;
523 		int refcount = atomic_read(&req->kref.refcount);
524 		if (refcount < at_least)
525 			drbd_err(device,
526 				"mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
527 				s, req->rq_state, refcount, at_least);
528 	}
529 
530 	/* If we made progress, retry conflicting peer requests, if any. */
531 	if (req->i.waiting)
532 		wake_up(&device->misc_wait);
533 
534 	if (c_put)
535 		k_put += drbd_req_put_completion_ref(req, m, c_put);
536 	if (k_put)
537 		kref_sub(&req->kref, k_put, drbd_req_destroy);
538 }
539 
drbd_report_io_error(struct drbd_device * device,struct drbd_request * req)540 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
541 {
542         char b[BDEVNAME_SIZE];
543 
544 	if (!__ratelimit(&drbd_ratelimit_state))
545 		return;
546 
547 	drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
548 			(req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
549 			(unsigned long long)req->i.sector,
550 			req->i.size >> 9,
551 			bdevname(device->ldev->backing_bdev, b));
552 }
553 
554 /* Helper for HANDED_OVER_TO_NETWORK.
555  * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
556  * Is it also still "PENDING"?
557  * --> If so, clear PENDING and set NET_OK below.
558  * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
559  * (and we must not set RQ_NET_OK) */
is_pending_write_protocol_A(struct drbd_request * req)560 static inline bool is_pending_write_protocol_A(struct drbd_request *req)
561 {
562 	return (req->rq_state &
563 		   (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
564 		== (RQ_WRITE|RQ_NET_PENDING);
565 }
566 
567 /* obviously this could be coded as many single functions
568  * instead of one huge switch,
569  * or by putting the code directly in the respective locations
570  * (as it has been before).
571  *
572  * but having it this way
573  *  enforces that it is all in this one place, where it is easier to audit,
574  *  it makes it obvious that whatever "event" "happens" to a request should
575  *  happen "atomically" within the req_lock,
576  *  and it enforces that we have to think in a very structured manner
577  *  about the "events" that may happen to a request during its life time ...
578  */
__req_mod(struct drbd_request * req,enum drbd_req_event what,struct bio_and_error * m)579 int __req_mod(struct drbd_request *req, enum drbd_req_event what,
580 		struct bio_and_error *m)
581 {
582 	struct drbd_device *const device = req->device;
583 	struct drbd_peer_device *const peer_device = first_peer_device(device);
584 	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
585 	struct net_conf *nc;
586 	int p, rv = 0;
587 
588 	if (m)
589 		m->bio = NULL;
590 
591 	switch (what) {
592 	default:
593 		drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
594 		break;
595 
596 	/* does not happen...
597 	 * initialization done in drbd_req_new
598 	case CREATED:
599 		break;
600 		*/
601 
602 	case TO_BE_SENT: /* via network */
603 		/* reached via __drbd_make_request
604 		 * and from w_read_retry_remote */
605 		D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
606 		rcu_read_lock();
607 		nc = rcu_dereference(connection->net_conf);
608 		p = nc->wire_protocol;
609 		rcu_read_unlock();
610 		req->rq_state |=
611 			p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
612 			p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
613 		mod_rq_state(req, m, 0, RQ_NET_PENDING);
614 		break;
615 
616 	case TO_BE_SUBMITTED: /* locally */
617 		/* reached via __drbd_make_request */
618 		D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
619 		mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
620 		break;
621 
622 	case COMPLETED_OK:
623 		if (req->rq_state & RQ_WRITE)
624 			device->writ_cnt += req->i.size >> 9;
625 		else
626 			device->read_cnt += req->i.size >> 9;
627 
628 		mod_rq_state(req, m, RQ_LOCAL_PENDING,
629 				RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
630 		break;
631 
632 	case ABORT_DISK_IO:
633 		mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
634 		break;
635 
636 	case WRITE_COMPLETED_WITH_ERROR:
637 		drbd_report_io_error(device, req);
638 		__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
639 		mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
640 		break;
641 
642 	case READ_COMPLETED_WITH_ERROR:
643 		drbd_set_out_of_sync(device, req->i.sector, req->i.size);
644 		drbd_report_io_error(device, req);
645 		__drbd_chk_io_error(device, DRBD_READ_ERROR);
646 		/* fall through. */
647 	case READ_AHEAD_COMPLETED_WITH_ERROR:
648 		/* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
649 		mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
650 		break;
651 
652 	case DISCARD_COMPLETED_NOTSUPP:
653 	case DISCARD_COMPLETED_WITH_ERROR:
654 		/* I'd rather not detach from local disk just because it
655 		 * failed a REQ_DISCARD. */
656 		mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
657 		break;
658 
659 	case QUEUE_FOR_NET_READ:
660 		/* READ, and
661 		 * no local disk,
662 		 * or target area marked as invalid,
663 		 * or just got an io-error. */
664 		/* from __drbd_make_request
665 		 * or from bio_endio during read io-error recovery */
666 
667 		/* So we can verify the handle in the answer packet.
668 		 * Corresponding drbd_remove_request_interval is in
669 		 * drbd_req_complete() */
670 		D_ASSERT(device, drbd_interval_empty(&req->i));
671 		drbd_insert_interval(&device->read_requests, &req->i);
672 
673 		set_bit(UNPLUG_REMOTE, &device->flags);
674 
675 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
676 		D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
677 		mod_rq_state(req, m, 0, RQ_NET_QUEUED);
678 		req->w.cb = w_send_read_req;
679 		drbd_queue_work(&connection->sender_work,
680 				&req->w);
681 		break;
682 
683 	case QUEUE_FOR_NET_WRITE:
684 		/* assert something? */
685 		/* from __drbd_make_request only */
686 
687 		/* Corresponding drbd_remove_request_interval is in
688 		 * drbd_req_complete() */
689 		D_ASSERT(device, drbd_interval_empty(&req->i));
690 		drbd_insert_interval(&device->write_requests, &req->i);
691 
692 		/* NOTE
693 		 * In case the req ended up on the transfer log before being
694 		 * queued on the worker, it could lead to this request being
695 		 * missed during cleanup after connection loss.
696 		 * So we have to do both operations here,
697 		 * within the same lock that protects the transfer log.
698 		 *
699 		 * _req_add_to_epoch(req); this has to be after the
700 		 * _maybe_start_new_epoch(req); which happened in
701 		 * __drbd_make_request, because we now may set the bit
702 		 * again ourselves to close the current epoch.
703 		 *
704 		 * Add req to the (now) current epoch (barrier). */
705 
706 		/* otherwise we may lose an unplug, which may cause some remote
707 		 * io-scheduler timeout to expire, increasing maximum latency,
708 		 * hurting performance. */
709 		set_bit(UNPLUG_REMOTE, &device->flags);
710 
711 		/* queue work item to send data */
712 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
713 		mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
714 		req->w.cb =  w_send_dblock;
715 		drbd_queue_work(&connection->sender_work,
716 				&req->w);
717 
718 		/* close the epoch, in case it outgrew the limit */
719 		rcu_read_lock();
720 		nc = rcu_dereference(connection->net_conf);
721 		p = nc->max_epoch_size;
722 		rcu_read_unlock();
723 		if (connection->current_tle_writes >= p)
724 			start_new_tl_epoch(connection);
725 
726 		break;
727 
728 	case QUEUE_FOR_SEND_OOS:
729 		mod_rq_state(req, m, 0, RQ_NET_QUEUED);
730 		req->w.cb =  w_send_out_of_sync;
731 		drbd_queue_work(&connection->sender_work,
732 				&req->w);
733 		break;
734 
735 	case READ_RETRY_REMOTE_CANCELED:
736 	case SEND_CANCELED:
737 	case SEND_FAILED:
738 		/* real cleanup will be done from tl_clear.  just update flags
739 		 * so it is no longer marked as on the worker queue */
740 		mod_rq_state(req, m, RQ_NET_QUEUED, 0);
741 		break;
742 
743 	case HANDED_OVER_TO_NETWORK:
744 		/* assert something? */
745 		if (is_pending_write_protocol_A(req))
746 			/* this is what is dangerous about protocol A:
747 			 * pretend it was successfully written on the peer. */
748 			mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
749 						RQ_NET_SENT|RQ_NET_OK);
750 		else
751 			mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
752 		/* It is still not yet RQ_NET_DONE until the
753 		 * corresponding epoch barrier got acked as well,
754 		 * so we know what to dirty on connection loss. */
755 		break;
756 
757 	case OOS_HANDED_TO_NETWORK:
758 		/* Was not set PENDING, no longer QUEUED, so is now DONE
759 		 * as far as this connection is concerned. */
760 		mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
761 		break;
762 
763 	case CONNECTION_LOST_WHILE_PENDING:
764 		/* transfer log cleanup after connection loss */
765 		mod_rq_state(req, m,
766 				RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
767 				RQ_NET_DONE);
768 		break;
769 
770 	case CONFLICT_RESOLVED:
771 		/* for superseded conflicting writes of multiple primaries,
772 		 * there is no need to keep anything in the tl, potential
773 		 * node crashes are covered by the activity log.
774 		 *
775 		 * If this request had been marked as RQ_POSTPONED before,
776 		 * it will actually not be completed, but "restarted",
777 		 * resubmitted from the retry worker context. */
778 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
779 		D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
780 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
781 		break;
782 
783 	case WRITE_ACKED_BY_PEER_AND_SIS:
784 		req->rq_state |= RQ_NET_SIS;
785 	case WRITE_ACKED_BY_PEER:
786 		/* Normal operation protocol C: successfully written on peer.
787 		 * During resync, even in protocol != C,
788 		 * we requested an explicit write ack anyways.
789 		 * Which means we cannot even assert anything here.
790 		 * Nothing more to do here.
791 		 * We want to keep the tl in place for all protocols, to cater
792 		 * for volatile write-back caches on lower level devices. */
793 		goto ack_common;
794 	case RECV_ACKED_BY_PEER:
795 		D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
796 		/* protocol B; pretends to be successfully written on peer.
797 		 * see also notes above in HANDED_OVER_TO_NETWORK about
798 		 * protocol != C */
799 	ack_common:
800 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
801 		break;
802 
803 	case POSTPONE_WRITE:
804 		D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
805 		/* If this node has already detected the write conflict, the
806 		 * worker will be waiting on misc_wait.  Wake it up once this
807 		 * request has completed locally.
808 		 */
809 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
810 		req->rq_state |= RQ_POSTPONED;
811 		if (req->i.waiting)
812 			wake_up(&device->misc_wait);
813 		/* Do not clear RQ_NET_PENDING. This request will make further
814 		 * progress via restart_conflicting_writes() or
815 		 * fail_postponed_requests(). Hopefully. */
816 		break;
817 
818 	case NEG_ACKED:
819 		mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
820 		break;
821 
822 	case FAIL_FROZEN_DISK_IO:
823 		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
824 			break;
825 		mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
826 		break;
827 
828 	case RESTART_FROZEN_DISK_IO:
829 		if (!(req->rq_state & RQ_LOCAL_COMPLETED))
830 			break;
831 
832 		mod_rq_state(req, m,
833 				RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
834 				RQ_LOCAL_PENDING);
835 
836 		rv = MR_READ;
837 		if (bio_data_dir(req->master_bio) == WRITE)
838 			rv = MR_WRITE;
839 
840 		get_ldev(device); /* always succeeds in this call path */
841 		req->w.cb = w_restart_disk_io;
842 		drbd_queue_work(&connection->sender_work,
843 				&req->w);
844 		break;
845 
846 	case RESEND:
847 		/* Simply complete (local only) READs. */
848 		if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
849 			mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
850 			break;
851 		}
852 
853 		/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
854 		   before the connection loss (B&C only); only P_BARRIER_ACK
855 		   (or the local completion?) was missing when we suspended.
856 		   Throwing them out of the TL here by pretending we got a BARRIER_ACK.
857 		   During connection handshake, we ensure that the peer was not rebooted. */
858 		if (!(req->rq_state & RQ_NET_OK)) {
859 			/* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
860 			 * in that case we must not set RQ_NET_PENDING. */
861 
862 			mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
863 			if (req->w.cb) {
864 				/* w.cb expected to be w_send_dblock, or w_send_read_req */
865 				drbd_queue_work(&connection->sender_work,
866 						&req->w);
867 				rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
868 			} /* else: FIXME can this happen? */
869 			break;
870 		}
871 		/* else, fall through to BARRIER_ACKED */
872 
873 	case BARRIER_ACKED:
874 		/* barrier ack for READ requests does not make sense */
875 		if (!(req->rq_state & RQ_WRITE))
876 			break;
877 
878 		if (req->rq_state & RQ_NET_PENDING) {
879 			/* barrier came in before all requests were acked.
880 			 * this is bad, because if the connection is lost now,
881 			 * we won't be able to clean them up... */
882 			drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
883 		}
884 		/* Allowed to complete requests, even while suspended.
885 		 * As this is called for all requests within a matching epoch,
886 		 * we need to filter, and only set RQ_NET_DONE for those that
887 		 * have actually been on the wire. */
888 		mod_rq_state(req, m, RQ_COMPLETION_SUSP,
889 				(req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
890 		break;
891 
892 	case DATA_RECEIVED:
893 		D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
894 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
895 		break;
896 
897 	case QUEUE_AS_DRBD_BARRIER:
898 		start_new_tl_epoch(connection);
899 		mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
900 		break;
901 	};
902 
903 	return rv;
904 }
905 
906 /* we may do a local read if:
907  * - we are consistent (of course),
908  * - or we are generally inconsistent,
909  *   BUT we are still/already IN SYNC for this area.
910  *   since size may be bigger than BM_BLOCK_SIZE,
911  *   we may need to check several bits.
912  */
drbd_may_do_local_read(struct drbd_device * device,sector_t sector,int size)913 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
914 {
915 	unsigned long sbnr, ebnr;
916 	sector_t esector, nr_sectors;
917 
918 	if (device->state.disk == D_UP_TO_DATE)
919 		return true;
920 	if (device->state.disk != D_INCONSISTENT)
921 		return false;
922 	esector = sector + (size >> 9) - 1;
923 	nr_sectors = drbd_get_capacity(device->this_bdev);
924 	D_ASSERT(device, sector  < nr_sectors);
925 	D_ASSERT(device, esector < nr_sectors);
926 
927 	sbnr = BM_SECT_TO_BIT(sector);
928 	ebnr = BM_SECT_TO_BIT(esector);
929 
930 	return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
931 }
932 
remote_due_to_read_balancing(struct drbd_device * device,sector_t sector,enum drbd_read_balancing rbm)933 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
934 		enum drbd_read_balancing rbm)
935 {
936 	struct backing_dev_info *bdi;
937 	int stripe_shift;
938 
939 	switch (rbm) {
940 	case RB_CONGESTED_REMOTE:
941 		bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
942 		return bdi_read_congested(bdi);
943 	case RB_LEAST_PENDING:
944 		return atomic_read(&device->local_cnt) >
945 			atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
946 	case RB_32K_STRIPING:  /* stripe_shift = 15 */
947 	case RB_64K_STRIPING:
948 	case RB_128K_STRIPING:
949 	case RB_256K_STRIPING:
950 	case RB_512K_STRIPING:
951 	case RB_1M_STRIPING:   /* stripe_shift = 20 */
952 		stripe_shift = (rbm - RB_32K_STRIPING + 15);
953 		return (sector >> (stripe_shift - 9)) & 1;
954 	case RB_ROUND_ROBIN:
955 		return test_and_change_bit(READ_BALANCE_RR, &device->flags);
956 	case RB_PREFER_REMOTE:
957 		return true;
958 	case RB_PREFER_LOCAL:
959 	default:
960 		return false;
961 	}
962 }
963 
964 /*
965  * complete_conflicting_writes  -  wait for any conflicting write requests
966  *
967  * The write_requests tree contains all active write requests which we
968  * currently know about.  Wait for any requests to complete which conflict with
969  * the new one.
970  *
971  * Only way out: remove the conflicting intervals from the tree.
972  */
complete_conflicting_writes(struct drbd_request * req)973 static void complete_conflicting_writes(struct drbd_request *req)
974 {
975 	DEFINE_WAIT(wait);
976 	struct drbd_device *device = req->device;
977 	struct drbd_interval *i;
978 	sector_t sector = req->i.sector;
979 	int size = req->i.size;
980 
981 	for (;;) {
982 		drbd_for_each_overlap(i, &device->write_requests, sector, size) {
983 			/* Ignore, if already completed to upper layers. */
984 			if (i->completed)
985 				continue;
986 			/* Handle the first found overlap.  After the schedule
987 			 * we have to restart the tree walk. */
988 			break;
989 		}
990 		if (!i)	/* if any */
991 			break;
992 
993 		/* Indicate to wake up device->misc_wait on progress.  */
994 		prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
995 		i->waiting = true;
996 		spin_unlock_irq(&device->resource->req_lock);
997 		schedule();
998 		spin_lock_irq(&device->resource->req_lock);
999 	}
1000 	finish_wait(&device->misc_wait, &wait);
1001 }
1002 
1003 /* called within req_lock */
maybe_pull_ahead(struct drbd_device * device)1004 static void maybe_pull_ahead(struct drbd_device *device)
1005 {
1006 	struct drbd_connection *connection = first_peer_device(device)->connection;
1007 	struct net_conf *nc;
1008 	bool congested = false;
1009 	enum drbd_on_congestion on_congestion;
1010 
1011 	rcu_read_lock();
1012 	nc = rcu_dereference(connection->net_conf);
1013 	on_congestion = nc ? nc->on_congestion : OC_BLOCK;
1014 	rcu_read_unlock();
1015 	if (on_congestion == OC_BLOCK ||
1016 	    connection->agreed_pro_version < 96)
1017 		return;
1018 
1019 	if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
1020 		return; /* nothing to do ... */
1021 
1022 	/* If I don't even have good local storage, we can not reasonably try
1023 	 * to pull ahead of the peer. We also need the local reference to make
1024 	 * sure device->act_log is there.
1025 	 */
1026 	if (!get_ldev_if_state(device, D_UP_TO_DATE))
1027 		return;
1028 
1029 	if (nc->cong_fill &&
1030 	    atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
1031 		drbd_info(device, "Congestion-fill threshold reached\n");
1032 		congested = true;
1033 	}
1034 
1035 	if (device->act_log->used >= nc->cong_extents) {
1036 		drbd_info(device, "Congestion-extents threshold reached\n");
1037 		congested = true;
1038 	}
1039 
1040 	if (congested) {
1041 		/* start a new epoch for non-mirrored writes */
1042 		start_new_tl_epoch(first_peer_device(device)->connection);
1043 
1044 		if (on_congestion == OC_PULL_AHEAD)
1045 			_drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
1046 		else  /*nc->on_congestion == OC_DISCONNECT */
1047 			_drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
1048 	}
1049 	put_ldev(device);
1050 }
1051 
1052 /* If this returns false, and req->private_bio is still set,
1053  * this should be submitted locally.
1054  *
1055  * If it returns false, but req->private_bio is not set,
1056  * we do not have access to good data :(
1057  *
1058  * Otherwise, this destroys req->private_bio, if any,
1059  * and returns true.
1060  */
do_remote_read(struct drbd_request * req)1061 static bool do_remote_read(struct drbd_request *req)
1062 {
1063 	struct drbd_device *device = req->device;
1064 	enum drbd_read_balancing rbm;
1065 
1066 	if (req->private_bio) {
1067 		if (!drbd_may_do_local_read(device,
1068 					req->i.sector, req->i.size)) {
1069 			bio_put(req->private_bio);
1070 			req->private_bio = NULL;
1071 			put_ldev(device);
1072 		}
1073 	}
1074 
1075 	if (device->state.pdsk != D_UP_TO_DATE)
1076 		return false;
1077 
1078 	if (req->private_bio == NULL)
1079 		return true;
1080 
1081 	/* TODO: improve read balancing decisions, take into account drbd
1082 	 * protocol, pending requests etc. */
1083 
1084 	rcu_read_lock();
1085 	rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
1086 	rcu_read_unlock();
1087 
1088 	if (rbm == RB_PREFER_LOCAL && req->private_bio)
1089 		return false; /* submit locally */
1090 
1091 	if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
1092 		if (req->private_bio) {
1093 			bio_put(req->private_bio);
1094 			req->private_bio = NULL;
1095 			put_ldev(device);
1096 		}
1097 		return true;
1098 	}
1099 
1100 	return false;
1101 }
1102 
drbd_should_do_remote(union drbd_dev_state s)1103 bool drbd_should_do_remote(union drbd_dev_state s)
1104 {
1105 	return s.pdsk == D_UP_TO_DATE ||
1106 		(s.pdsk >= D_INCONSISTENT &&
1107 		 s.conn >= C_WF_BITMAP_T &&
1108 		 s.conn < C_AHEAD);
1109 	/* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
1110 	   That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
1111 	   states. */
1112 }
1113 
drbd_should_send_out_of_sync(union drbd_dev_state s)1114 static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
1115 {
1116 	return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
1117 	/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
1118 	   since we enter state C_AHEAD only if proto >= 96 */
1119 }
1120 
1121 /* returns number of connections (== 1, for drbd 8.4)
1122  * expected to actually write this data,
1123  * which does NOT include those that we are L_AHEAD for. */
drbd_process_write_request(struct drbd_request * req)1124 static int drbd_process_write_request(struct drbd_request *req)
1125 {
1126 	struct drbd_device *device = req->device;
1127 	int remote, send_oos;
1128 
1129 	remote = drbd_should_do_remote(device->state);
1130 	send_oos = drbd_should_send_out_of_sync(device->state);
1131 
1132 	/* Need to replicate writes.  Unless it is an empty flush,
1133 	 * which is better mapped to a DRBD P_BARRIER packet,
1134 	 * also for drbd wire protocol compatibility reasons.
1135 	 * If this was a flush, just start a new epoch.
1136 	 * Unless the current epoch was empty anyways, or we are not currently
1137 	 * replicating, in which case there is no point. */
1138 	if (unlikely(req->i.size == 0)) {
1139 		/* The only size==0 bios we expect are empty flushes. */
1140 		D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
1141 		if (remote)
1142 			_req_mod(req, QUEUE_AS_DRBD_BARRIER);
1143 		return remote;
1144 	}
1145 
1146 	if (!remote && !send_oos)
1147 		return 0;
1148 
1149 	D_ASSERT(device, !(remote && send_oos));
1150 
1151 	if (remote) {
1152 		_req_mod(req, TO_BE_SENT);
1153 		_req_mod(req, QUEUE_FOR_NET_WRITE);
1154 	} else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
1155 		_req_mod(req, QUEUE_FOR_SEND_OOS);
1156 
1157 	return remote;
1158 }
1159 
drbd_process_discard_req(struct drbd_request * req)1160 static void drbd_process_discard_req(struct drbd_request *req)
1161 {
1162 	int err = drbd_issue_discard_or_zero_out(req->device,
1163 				req->i.sector, req->i.size >> 9, true);
1164 
1165 	if (err)
1166 		req->private_bio->bi_error = -EIO;
1167 	bio_endio(req->private_bio);
1168 }
1169 
1170 static void
drbd_submit_req_private_bio(struct drbd_request * req)1171 drbd_submit_req_private_bio(struct drbd_request *req)
1172 {
1173 	struct drbd_device *device = req->device;
1174 	struct bio *bio = req->private_bio;
1175 	unsigned int type;
1176 
1177 	if (bio_op(bio) != REQ_OP_READ)
1178 		type = DRBD_FAULT_DT_WR;
1179 	else if (bio->bi_opf & REQ_RAHEAD)
1180 		type = DRBD_FAULT_DT_RA;
1181 	else
1182 		type = DRBD_FAULT_DT_RD;
1183 
1184 	bio->bi_bdev = device->ldev->backing_bdev;
1185 
1186 	/* State may have changed since we grabbed our reference on the
1187 	 * ->ldev member. Double check, and short-circuit to endio.
1188 	 * In case the last activity log transaction failed to get on
1189 	 * stable storage, and this is a WRITE, we may not even submit
1190 	 * this bio. */
1191 	if (get_ldev(device)) {
1192 		if (drbd_insert_fault(device, type))
1193 			bio_io_error(bio);
1194 		else if (bio_op(bio) == REQ_OP_DISCARD)
1195 			drbd_process_discard_req(req);
1196 		else
1197 			generic_make_request(bio);
1198 		put_ldev(device);
1199 	} else
1200 		bio_io_error(bio);
1201 }
1202 
drbd_queue_write(struct drbd_device * device,struct drbd_request * req)1203 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1204 {
1205 	spin_lock_irq(&device->resource->req_lock);
1206 	list_add_tail(&req->tl_requests, &device->submit.writes);
1207 	list_add_tail(&req->req_pending_master_completion,
1208 			&device->pending_master_completion[1 /* WRITE */]);
1209 	spin_unlock_irq(&device->resource->req_lock);
1210 	queue_work(device->submit.wq, &device->submit.worker);
1211 	/* do_submit() may sleep internally on al_wait, too */
1212 	wake_up(&device->al_wait);
1213 }
1214 
1215 /* returns the new drbd_request pointer, if the caller is expected to
1216  * drbd_send_and_submit() it (to save latency), or NULL if we queued the
1217  * request on the submitter thread.
1218  * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1219  */
1220 static struct drbd_request *
drbd_request_prepare(struct drbd_device * device,struct bio * bio,unsigned long start_jif)1221 drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1222 {
1223 	const int rw = bio_data_dir(bio);
1224 	struct drbd_request *req;
1225 
1226 	/* allocate outside of all locks; */
1227 	req = drbd_req_new(device, bio);
1228 	if (!req) {
1229 		dec_ap_bio(device);
1230 		/* only pass the error to the upper layers.
1231 		 * if user cannot handle io errors, that's not our business. */
1232 		drbd_err(device, "could not kmalloc() req\n");
1233 		bio->bi_error = -ENOMEM;
1234 		bio_endio(bio);
1235 		return ERR_PTR(-ENOMEM);
1236 	}
1237 	req->start_jif = start_jif;
1238 
1239 	if (!get_ldev(device)) {
1240 		bio_put(req->private_bio);
1241 		req->private_bio = NULL;
1242 	}
1243 
1244 	/* Update disk stats */
1245 	_drbd_start_io_acct(device, req);
1246 
1247 	/* process discards always from our submitter thread */
1248 	if (bio_op(bio) & REQ_OP_DISCARD)
1249 		goto queue_for_submitter_thread;
1250 
1251 	if (rw == WRITE && req->private_bio && req->i.size
1252 	&& !test_bit(AL_SUSPENDED, &device->flags)) {
1253 		if (!drbd_al_begin_io_fastpath(device, &req->i))
1254 			goto queue_for_submitter_thread;
1255 		req->rq_state |= RQ_IN_ACT_LOG;
1256 		req->in_actlog_jif = jiffies;
1257 	}
1258 	return req;
1259 
1260  queue_for_submitter_thread:
1261 	atomic_inc(&device->ap_actlog_cnt);
1262 	drbd_queue_write(device, req);
1263 	return NULL;
1264 }
1265 
1266 /* Require at least one path to current data.
1267  * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
1268  * We would not allow to read what was written,
1269  * we would not have bumped the data generation uuids,
1270  * we would cause data divergence for all the wrong reasons.
1271  *
1272  * If we don't see at least one D_UP_TO_DATE, we will fail this request,
1273  * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
1274  * and queues for retry later.
1275  */
may_do_writes(struct drbd_device * device)1276 static bool may_do_writes(struct drbd_device *device)
1277 {
1278 	const union drbd_dev_state s = device->state;
1279 	return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
1280 }
1281 
drbd_send_and_submit(struct drbd_device * device,struct drbd_request * req)1282 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1283 {
1284 	struct drbd_resource *resource = device->resource;
1285 	const int rw = bio_data_dir(req->master_bio);
1286 	struct bio_and_error m = { NULL, };
1287 	bool no_remote = false;
1288 	bool submit_private_bio = false;
1289 
1290 	spin_lock_irq(&resource->req_lock);
1291 	if (rw == WRITE) {
1292 		/* This may temporarily give up the req_lock,
1293 		 * but will re-aquire it before it returns here.
1294 		 * Needs to be before the check on drbd_suspended() */
1295 		complete_conflicting_writes(req);
1296 		/* no more giving up req_lock from now on! */
1297 
1298 		/* check for congestion, and potentially stop sending
1299 		 * full data updates, but start sending "dirty bits" only. */
1300 		maybe_pull_ahead(device);
1301 	}
1302 
1303 
1304 	if (drbd_suspended(device)) {
1305 		/* push back and retry: */
1306 		req->rq_state |= RQ_POSTPONED;
1307 		if (req->private_bio) {
1308 			bio_put(req->private_bio);
1309 			req->private_bio = NULL;
1310 			put_ldev(device);
1311 		}
1312 		goto out;
1313 	}
1314 
1315 	/* We fail READ early, if we can not serve it.
1316 	 * We must do this before req is registered on any lists.
1317 	 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
1318 	if (rw != WRITE) {
1319 		if (!do_remote_read(req) && !req->private_bio)
1320 			goto nodata;
1321 	}
1322 
1323 	/* which transfer log epoch does this belong to? */
1324 	req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1325 
1326 	/* no point in adding empty flushes to the transfer log,
1327 	 * they are mapped to drbd barriers already. */
1328 	if (likely(req->i.size!=0)) {
1329 		if (rw == WRITE)
1330 			first_peer_device(device)->connection->current_tle_writes++;
1331 
1332 		list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1333 	}
1334 
1335 	if (rw == WRITE) {
1336 		if (req->private_bio && !may_do_writes(device)) {
1337 			bio_put(req->private_bio);
1338 			req->private_bio = NULL;
1339 			put_ldev(device);
1340 			goto nodata;
1341 		}
1342 		if (!drbd_process_write_request(req))
1343 			no_remote = true;
1344 	} else {
1345 		/* We either have a private_bio, or we can read from remote.
1346 		 * Otherwise we had done the goto nodata above. */
1347 		if (req->private_bio == NULL) {
1348 			_req_mod(req, TO_BE_SENT);
1349 			_req_mod(req, QUEUE_FOR_NET_READ);
1350 		} else
1351 			no_remote = true;
1352 	}
1353 
1354 	/* If it took the fast path in drbd_request_prepare, add it here.
1355 	 * The slow path has added it already. */
1356 	if (list_empty(&req->req_pending_master_completion))
1357 		list_add_tail(&req->req_pending_master_completion,
1358 			&device->pending_master_completion[rw == WRITE]);
1359 	if (req->private_bio) {
1360 		/* needs to be marked within the same spinlock */
1361 		req->pre_submit_jif = jiffies;
1362 		list_add_tail(&req->req_pending_local,
1363 			&device->pending_completion[rw == WRITE]);
1364 		_req_mod(req, TO_BE_SUBMITTED);
1365 		/* but we need to give up the spinlock to submit */
1366 		submit_private_bio = true;
1367 	} else if (no_remote) {
1368 nodata:
1369 		if (__ratelimit(&drbd_ratelimit_state))
1370 			drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1371 					(unsigned long long)req->i.sector, req->i.size >> 9);
1372 		/* A write may have been queued for send_oos, however.
1373 		 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
1374 	}
1375 
1376 out:
1377 	if (drbd_req_put_completion_ref(req, &m, 1))
1378 		kref_put(&req->kref, drbd_req_destroy);
1379 	spin_unlock_irq(&resource->req_lock);
1380 
1381 	/* Even though above is a kref_put(), this is safe.
1382 	 * As long as we still need to submit our private bio,
1383 	 * we hold a completion ref, and the request cannot disappear.
1384 	 * If however this request did not even have a private bio to submit
1385 	 * (e.g. remote read), req may already be invalid now.
1386 	 * That's why we cannot check on req->private_bio. */
1387 	if (submit_private_bio)
1388 		drbd_submit_req_private_bio(req);
1389 	if (m.bio)
1390 		complete_master_bio(device, &m);
1391 }
1392 
__drbd_make_request(struct drbd_device * device,struct bio * bio,unsigned long start_jif)1393 void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1394 {
1395 	struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
1396 	if (IS_ERR_OR_NULL(req))
1397 		return;
1398 	drbd_send_and_submit(device, req);
1399 }
1400 
submit_fast_path(struct drbd_device * device,struct list_head * incoming)1401 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
1402 {
1403 	struct drbd_request *req, *tmp;
1404 	list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1405 		const int rw = bio_data_dir(req->master_bio);
1406 
1407 		if (rw == WRITE /* rw != WRITE should not even end up here! */
1408 		&& req->private_bio && req->i.size
1409 		&& !test_bit(AL_SUSPENDED, &device->flags)) {
1410 			if (!drbd_al_begin_io_fastpath(device, &req->i))
1411 				continue;
1412 
1413 			req->rq_state |= RQ_IN_ACT_LOG;
1414 			req->in_actlog_jif = jiffies;
1415 			atomic_dec(&device->ap_actlog_cnt);
1416 		}
1417 
1418 		list_del_init(&req->tl_requests);
1419 		drbd_send_and_submit(device, req);
1420 	}
1421 }
1422 
prepare_al_transaction_nonblock(struct drbd_device * device,struct list_head * incoming,struct list_head * pending,struct list_head * later)1423 static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1424 					    struct list_head *incoming,
1425 					    struct list_head *pending,
1426 					    struct list_head *later)
1427 {
1428 	struct drbd_request *req, *tmp;
1429 	int wake = 0;
1430 	int err;
1431 
1432 	spin_lock_irq(&device->al_lock);
1433 	list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1434 		err = drbd_al_begin_io_nonblock(device, &req->i);
1435 		if (err == -ENOBUFS)
1436 			break;
1437 		if (err == -EBUSY)
1438 			wake = 1;
1439 		if (err)
1440 			list_move_tail(&req->tl_requests, later);
1441 		else
1442 			list_move_tail(&req->tl_requests, pending);
1443 	}
1444 	spin_unlock_irq(&device->al_lock);
1445 	if (wake)
1446 		wake_up(&device->al_wait);
1447 	return !list_empty(pending);
1448 }
1449 
send_and_submit_pending(struct drbd_device * device,struct list_head * pending)1450 void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1451 {
1452 	struct drbd_request *req, *tmp;
1453 
1454 	list_for_each_entry_safe(req, tmp, pending, tl_requests) {
1455 		req->rq_state |= RQ_IN_ACT_LOG;
1456 		req->in_actlog_jif = jiffies;
1457 		atomic_dec(&device->ap_actlog_cnt);
1458 		list_del_init(&req->tl_requests);
1459 		drbd_send_and_submit(device, req);
1460 	}
1461 }
1462 
do_submit(struct work_struct * ws)1463 void do_submit(struct work_struct *ws)
1464 {
1465 	struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
1466 	LIST_HEAD(incoming);	/* from drbd_make_request() */
1467 	LIST_HEAD(pending);	/* to be submitted after next AL-transaction commit */
1468 	LIST_HEAD(busy);	/* blocked by resync requests */
1469 
1470 	/* grab new incoming requests */
1471 	spin_lock_irq(&device->resource->req_lock);
1472 	list_splice_tail_init(&device->submit.writes, &incoming);
1473 	spin_unlock_irq(&device->resource->req_lock);
1474 
1475 	for (;;) {
1476 		DEFINE_WAIT(wait);
1477 
1478 		/* move used-to-be-busy back to front of incoming */
1479 		list_splice_init(&busy, &incoming);
1480 		submit_fast_path(device, &incoming);
1481 		if (list_empty(&incoming))
1482 			break;
1483 
1484 		for (;;) {
1485 			prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1486 
1487 			list_splice_init(&busy, &incoming);
1488 			prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1489 			if (!list_empty(&pending))
1490 				break;
1491 
1492 			schedule();
1493 
1494 			/* If all currently "hot" activity log extents are kept busy by
1495 			 * incoming requests, we still must not totally starve new
1496 			 * requests to "cold" extents.
1497 			 * Something left on &incoming means there had not been
1498 			 * enough update slots available, and the activity log
1499 			 * has been marked as "starving".
1500 			 *
1501 			 * Try again now, without looking for new requests,
1502 			 * effectively blocking all new requests until we made
1503 			 * at least _some_ progress with what we currently have.
1504 			 */
1505 			if (!list_empty(&incoming))
1506 				continue;
1507 
1508 			/* Nothing moved to pending, but nothing left
1509 			 * on incoming: all moved to busy!
1510 			 * Grab new and iterate. */
1511 			spin_lock_irq(&device->resource->req_lock);
1512 			list_splice_tail_init(&device->submit.writes, &incoming);
1513 			spin_unlock_irq(&device->resource->req_lock);
1514 		}
1515 		finish_wait(&device->al_wait, &wait);
1516 
1517 		/* If the transaction was full, before all incoming requests
1518 		 * had been processed, skip ahead to commit, and iterate
1519 		 * without splicing in more incoming requests from upper layers.
1520 		 *
1521 		 * Else, if all incoming have been processed,
1522 		 * they have become either "pending" (to be submitted after
1523 		 * next transaction commit) or "busy" (blocked by resync).
1524 		 *
1525 		 * Maybe more was queued, while we prepared the transaction?
1526 		 * Try to stuff those into this transaction as well.
1527 		 * Be strictly non-blocking here,
1528 		 * we already have something to commit.
1529 		 *
1530 		 * Commit if we don't make any more progres.
1531 		 */
1532 
1533 		while (list_empty(&incoming)) {
1534 			LIST_HEAD(more_pending);
1535 			LIST_HEAD(more_incoming);
1536 			bool made_progress;
1537 
1538 			/* It is ok to look outside the lock,
1539 			 * it's only an optimization anyways */
1540 			if (list_empty(&device->submit.writes))
1541 				break;
1542 
1543 			spin_lock_irq(&device->resource->req_lock);
1544 			list_splice_tail_init(&device->submit.writes, &more_incoming);
1545 			spin_unlock_irq(&device->resource->req_lock);
1546 
1547 			if (list_empty(&more_incoming))
1548 				break;
1549 
1550 			made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
1551 
1552 			list_splice_tail_init(&more_pending, &pending);
1553 			list_splice_tail_init(&more_incoming, &incoming);
1554 			if (!made_progress)
1555 				break;
1556 		}
1557 
1558 		drbd_al_begin_io_commit(device);
1559 		send_and_submit_pending(device, &pending);
1560 	}
1561 }
1562 
drbd_make_request(struct request_queue * q,struct bio * bio)1563 blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
1564 {
1565 	struct drbd_device *device = (struct drbd_device *) q->queuedata;
1566 	unsigned long start_jif;
1567 
1568 	blk_queue_split(q, &bio, q->bio_split);
1569 
1570 	start_jif = jiffies;
1571 
1572 	/*
1573 	 * what we "blindly" assume:
1574 	 */
1575 	D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1576 
1577 	inc_ap_bio(device);
1578 	__drbd_make_request(device, bio, start_jif);
1579 	return BLK_QC_T_NONE;
1580 }
1581 
net_timeout_reached(struct drbd_request * net_req,struct drbd_connection * connection,unsigned long now,unsigned long ent,unsigned int ko_count,unsigned int timeout)1582 static bool net_timeout_reached(struct drbd_request *net_req,
1583 		struct drbd_connection *connection,
1584 		unsigned long now, unsigned long ent,
1585 		unsigned int ko_count, unsigned int timeout)
1586 {
1587 	struct drbd_device *device = net_req->device;
1588 
1589 	if (!time_after(now, net_req->pre_send_jif + ent))
1590 		return false;
1591 
1592 	if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
1593 		return false;
1594 
1595 	if (net_req->rq_state & RQ_NET_PENDING) {
1596 		drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1597 			jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1598 		return true;
1599 	}
1600 
1601 	/* We received an ACK already (or are using protocol A),
1602 	 * but are waiting for the epoch closing barrier ack.
1603 	 * Check if we sent the barrier already.  We should not blame the peer
1604 	 * for being unresponsive, if we did not even ask it yet. */
1605 	if (net_req->epoch == connection->send.current_epoch_nr) {
1606 		drbd_warn(device,
1607 			"We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
1608 			jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1609 		return false;
1610 	}
1611 
1612 	/* Worst case: we may have been blocked for whatever reason, then
1613 	 * suddenly are able to send a lot of requests (and epoch separating
1614 	 * barriers) in quick succession.
1615 	 * The timestamp of the net_req may be much too old and not correspond
1616 	 * to the sending time of the relevant unack'ed barrier packet, so
1617 	 * would trigger a spurious timeout.  The latest barrier packet may
1618 	 * have a too recent timestamp to trigger the timeout, potentially miss
1619 	 * a timeout.  Right now we don't have a place to conveniently store
1620 	 * these timestamps.
1621 	 * But in this particular situation, the application requests are still
1622 	 * completed to upper layers, DRBD should still "feel" responsive.
1623 	 * No need yet to kill this connection, it may still recover.
1624 	 * If not, eventually we will have queued enough into the network for
1625 	 * us to block. From that point of view, the timestamp of the last sent
1626 	 * barrier packet is relevant enough.
1627 	 */
1628 	if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
1629 		drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1630 			connection->send.last_sent_barrier_jif, now,
1631 			jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
1632 		return true;
1633 	}
1634 	return false;
1635 }
1636 
1637 /* A request is considered timed out, if
1638  * - we have some effective timeout from the configuration,
1639  *   with some state restrictions applied,
1640  * - the oldest request is waiting for a response from the network
1641  *   resp. the local disk,
1642  * - the oldest request is in fact older than the effective timeout,
1643  * - the connection was established (resp. disk was attached)
1644  *   for longer than the timeout already.
1645  * Note that for 32bit jiffies and very stable connections/disks,
1646  * we may have a wrap around, which is catched by
1647  *   !time_in_range(now, last_..._jif, last_..._jif + timeout).
1648  *
1649  * Side effect: once per 32bit wrap-around interval, which means every
1650  * ~198 days with 250 HZ, we have a window where the timeout would need
1651  * to expire twice (worst case) to become effective. Good enough.
1652  */
1653 
request_timer_fn(unsigned long data)1654 void request_timer_fn(unsigned long data)
1655 {
1656 	struct drbd_device *device = (struct drbd_device *) data;
1657 	struct drbd_connection *connection = first_peer_device(device)->connection;
1658 	struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
1659 	struct net_conf *nc;
1660 	unsigned long oldest_submit_jif;
1661 	unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1662 	unsigned long now;
1663 	unsigned int ko_count = 0, timeout = 0;
1664 
1665 	rcu_read_lock();
1666 	nc = rcu_dereference(connection->net_conf);
1667 	if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
1668 		ko_count = nc->ko_count;
1669 		timeout = nc->timeout;
1670 	}
1671 
1672 	if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
1673 		dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1674 		put_ldev(device);
1675 	}
1676 	rcu_read_unlock();
1677 
1678 
1679 	ent = timeout * HZ/10 * ko_count;
1680 	et = min_not_zero(dt, ent);
1681 
1682 	if (!et)
1683 		return; /* Recurring timer stopped */
1684 
1685 	now = jiffies;
1686 	nt = now + et;
1687 
1688 	spin_lock_irq(&device->resource->req_lock);
1689 	req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1690 	req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
1691 
1692 	/* maybe the oldest request waiting for the peer is in fact still
1693 	 * blocking in tcp sendmsg.  That's ok, though, that's handled via the
1694 	 * socket send timeout, requesting a ping, and bumping ko-count in
1695 	 * we_should_drop_the_connection().
1696 	 */
1697 
1698 	/* check the oldest request we did successfully sent,
1699 	 * but which is still waiting for an ACK. */
1700 	req_peer = connection->req_ack_pending;
1701 
1702 	/* if we don't have such request (e.g. protocoll A)
1703 	 * check the oldest requests which is still waiting on its epoch
1704 	 * closing barrier ack. */
1705 	if (!req_peer)
1706 		req_peer = connection->req_not_net_done;
1707 
1708 	/* evaluate the oldest peer request only in one timer! */
1709 	if (req_peer && req_peer->device != device)
1710 		req_peer = NULL;
1711 
1712 	/* do we have something to evaluate? */
1713 	if (req_peer == NULL && req_write == NULL && req_read == NULL)
1714 		goto out;
1715 
1716 	oldest_submit_jif =
1717 		(req_write && req_read)
1718 		? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1719 		  ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1720 		: req_write ? req_write->pre_submit_jif
1721 		: req_read ? req_read->pre_submit_jif : now;
1722 
1723 	if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
1724 		_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
1725 
1726 	if (dt && oldest_submit_jif != now &&
1727 		 time_after(now, oldest_submit_jif + dt) &&
1728 		!time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1729 		drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1730 		__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1731 	}
1732 
1733 	/* Reschedule timer for the nearest not already expired timeout.
1734 	 * Fallback to now + min(effective network timeout, disk timeout). */
1735 	ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1736 		? req_peer->pre_send_jif + ent : now + et;
1737 	dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1738 		? oldest_submit_jif + dt : now + et;
1739 	nt = time_before(ent, dt) ? ent : dt;
1740 out:
1741 	spin_unlock_irq(&device->resource->req_lock);
1742 	mod_timer(&device->request_timer, nt);
1743 }
1744