• Home
  • Raw
  • Download

Lines Matching refs:req

37 static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)  in _drbd_start_io_acct()  argument
39 const int rw = bio_data_dir(req->master_bio); in _drbd_start_io_acct()
44 part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9); in _drbd_start_io_acct()
52 static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) in _drbd_end_io_acct() argument
54 int rw = bio_data_dir(req->master_bio); in _drbd_end_io_acct()
55 unsigned long duration = jiffies - req->start_time; in _drbd_end_io_acct()
67 struct drbd_request *req; in drbd_req_new() local
69 req = mempool_alloc(drbd_request_mempool, GFP_NOIO); in drbd_req_new()
70 if (!req) in drbd_req_new()
73 drbd_req_make_private_bio(req, bio_src); in drbd_req_new()
74 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; in drbd_req_new()
75 req->w.mdev = mdev; in drbd_req_new()
76 req->master_bio = bio_src; in drbd_req_new()
77 req->epoch = 0; in drbd_req_new()
79 drbd_clear_interval(&req->i); in drbd_req_new()
80 req->i.sector = bio_src->bi_sector; in drbd_req_new()
81 req->i.size = bio_src->bi_size; in drbd_req_new()
82 req->i.local = true; in drbd_req_new()
83 req->i.waiting = false; in drbd_req_new()
85 INIT_LIST_HEAD(&req->tl_requests); in drbd_req_new()
86 INIT_LIST_HEAD(&req->w.list); in drbd_req_new()
89 atomic_set(&req->completion_ref, 1); in drbd_req_new()
91 kref_init(&req->kref); in drbd_req_new()
92 return req; in drbd_req_new()
97 struct drbd_request *req = container_of(kref, struct drbd_request, kref); in drbd_req_destroy() local
98 struct drbd_conf *mdev = req->w.mdev; in drbd_req_destroy()
99 const unsigned s = req->rq_state; in drbd_req_destroy()
101 if ((req->master_bio && !(s & RQ_POSTPONED)) || in drbd_req_destroy()
102 atomic_read(&req->completion_ref) || in drbd_req_destroy()
106 s, atomic_read(&req->completion_ref)); in drbd_req_destroy()
116 list_del_init(&req->tl_requests); in drbd_req_destroy()
135 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); in drbd_req_destroy()
138 drbd_set_in_sync(mdev, req->i.sector, req->i.size); in drbd_req_destroy()
153 drbd_al_complete_io(mdev, &req->i); in drbd_req_destroy()
158 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy()
163 mempool_free(req, drbd_request_mempool); in drbd_req_destroy()
191 struct drbd_request *req) in drbd_remove_request_interval() argument
193 struct drbd_conf *mdev = req->w.mdev; in drbd_remove_request_interval()
194 struct drbd_interval *i = &req->i; in drbd_remove_request_interval()
210 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) in drbd_req_complete() argument
212 const unsigned s = req->rq_state; in drbd_req_complete()
213 struct drbd_conf *mdev = req->w.mdev; in drbd_req_complete()
233 if (!req->master_bio) { in drbd_req_complete()
238 rw = bio_rw(req->master_bio); in drbd_req_complete()
254 error = PTR_ERR(req->private_bio); in drbd_req_complete()
258 if (!drbd_interval_empty(&req->i)) { in drbd_req_complete()
265 drbd_remove_request_interval(root, req); in drbd_req_complete()
276 req->epoch == atomic_read(&mdev->tconn->current_tle_nr)) in drbd_req_complete()
280 _drbd_end_io_acct(mdev, req); in drbd_req_complete()
296 if (!ok && rw == READ && !list_empty(&req->tl_requests)) in drbd_req_complete()
297 req->rq_state |= RQ_POSTPONED; in drbd_req_complete()
299 if (!(req->rq_state & RQ_POSTPONED)) { in drbd_req_complete()
301 m->bio = req->master_bio; in drbd_req_complete()
302 req->master_bio = NULL; in drbd_req_complete()
306 static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) in drbd_req_put_completion_ref() argument
308 struct drbd_conf *mdev = req->w.mdev; in drbd_req_put_completion_ref()
309 D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); in drbd_req_put_completion_ref()
311 if (!atomic_sub_and_test(put, &req->completion_ref)) in drbd_req_put_completion_ref()
314 drbd_req_complete(req, m); in drbd_req_put_completion_ref()
316 if (req->rq_state & RQ_POSTPONED) { in drbd_req_put_completion_ref()
319 drbd_restart_request(req); in drbd_req_put_completion_ref()
328 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, in mod_rq_state() argument
331 struct drbd_conf *mdev = req->w.mdev; in mod_rq_state()
332 unsigned s = req->rq_state; in mod_rq_state()
341 req->rq_state &= ~clear; in mod_rq_state()
342 req->rq_state |= set; in mod_rq_state()
345 if (req->rq_state == s) in mod_rq_state()
351 atomic_inc(&req->completion_ref); in mod_rq_state()
355 atomic_inc(&req->completion_ref); in mod_rq_state()
359 atomic_inc(&req->completion_ref); in mod_rq_state()
362 kref_get(&req->kref); /* wait for the DONE */ in mod_rq_state()
365 atomic_add(req->i.size >> 9, &mdev->ap_in_flight); in mod_rq_state()
368 atomic_inc(&req->completion_ref); in mod_rq_state()
376 D_ASSERT(req->rq_state & RQ_LOCAL_PENDING); in mod_rq_state()
379 kref_get(&req->kref); in mod_rq_state()
384 if (req->rq_state & RQ_LOCAL_ABORTED) in mod_rq_state()
399 if (req->rq_state & RQ_NET_SENT) in mod_rq_state()
400 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); in mod_rq_state()
410 int refcount = atomic_read(&req->kref.refcount); in mod_rq_state()
414 s, req->rq_state, refcount, at_least); in mod_rq_state()
418 if (req->i.waiting) in mod_rq_state()
422 k_put += drbd_req_put_completion_ref(req, m, c_put); in mod_rq_state()
424 kref_sub(&req->kref, k_put, drbd_req_destroy); in mod_rq_state()
427 static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req) in drbd_report_io_error() argument
435 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", in drbd_report_io_error()
436 (unsigned long long)req->i.sector, in drbd_report_io_error()
437 req->i.size >> 9, in drbd_report_io_error()
453 int __req_mod(struct drbd_request *req, enum drbd_req_event what, in __req_mod() argument
456 struct drbd_conf *mdev = req->w.mdev; in __req_mod()
477 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); in __req_mod()
482 req->rq_state |= in __req_mod()
485 mod_rq_state(req, m, 0, RQ_NET_PENDING); in __req_mod()
490 D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); in __req_mod()
491 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); in __req_mod()
495 if (req->rq_state & RQ_WRITE) in __req_mod()
496 mdev->writ_cnt += req->i.size >> 9; in __req_mod()
498 mdev->read_cnt += req->i.size >> 9; in __req_mod()
500 mod_rq_state(req, m, RQ_LOCAL_PENDING, in __req_mod()
505 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); in __req_mod()
509 drbd_report_io_error(mdev, req); in __req_mod()
511 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
515 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); in __req_mod()
516 drbd_report_io_error(mdev, req); in __req_mod()
521 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
535 D_ASSERT(drbd_interval_empty(&req->i)); in __req_mod()
536 drbd_insert_interval(&mdev->read_requests, &req->i); in __req_mod()
540 D_ASSERT(req->rq_state & RQ_NET_PENDING); in __req_mod()
541 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); in __req_mod()
542 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
543 req->w.cb = w_send_read_req; in __req_mod()
544 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
553 D_ASSERT(drbd_interval_empty(&req->i)); in __req_mod()
554 drbd_insert_interval(&mdev->write_requests, &req->i); in __req_mod()
576 D_ASSERT(req->rq_state & RQ_NET_PENDING); in __req_mod()
577 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); in __req_mod()
578 req->w.cb = w_send_dblock; in __req_mod()
579 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
592 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
593 req->w.cb = w_send_out_of_sync; in __req_mod()
594 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
602 mod_rq_state(req, m, RQ_NET_QUEUED, 0); in __req_mod()
607 if (bio_data_dir(req->master_bio) == WRITE && in __req_mod()
608 !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { in __req_mod()
611 if (req->rq_state & RQ_NET_PENDING) in __req_mod()
612 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); in __req_mod()
618 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); in __req_mod()
624 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); in __req_mod()
629 mod_rq_state(req, m, in __req_mod()
642 D_ASSERT(req->rq_state & RQ_NET_PENDING); in __req_mod()
643 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
644 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); in __req_mod()
648 req->rq_state |= RQ_NET_SIS; in __req_mod()
650 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
658 D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); in __req_mod()
663 D_ASSERT(req->rq_state & RQ_NET_PENDING); in __req_mod()
664 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); in __req_mod()
668 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
673 D_ASSERT(req->rq_state & RQ_NET_PENDING); in __req_mod()
674 req->rq_state |= RQ_POSTPONED; in __req_mod()
675 if (req->i.waiting) in __req_mod()
683 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); in __req_mod()
687 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
689 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
693 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
696 mod_rq_state(req, m, in __req_mod()
701 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod()
705 req->w.cb = w_restart_disk_io; in __req_mod()
706 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
711 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { in __req_mod()
712 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
721 if (!(req->rq_state & RQ_NET_OK)) { in __req_mod()
725 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); in __req_mod()
726 if (req->w.cb) { in __req_mod()
727 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
728 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; in __req_mod()
736 if (!(req->rq_state & RQ_WRITE)) in __req_mod()
739 if (req->rq_state & RQ_NET_PENDING) { in __req_mod()
749 mod_rq_state(req, m, RQ_COMPLETION_SUSP, in __req_mod()
750 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); in __req_mod()
754 D_ASSERT(req->rq_state & RQ_NET_PENDING); in __req_mod()
755 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
760 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
834 static void complete_conflicting_writes(struct drbd_request *req) in complete_conflicting_writes() argument
837 struct drbd_conf *mdev = req->w.mdev; in complete_conflicting_writes()
839 sector_t sector = req->i.sector; in complete_conflicting_writes()
840 int size = req->i.size; in complete_conflicting_writes()
915 static bool do_remote_read(struct drbd_request *req) in do_remote_read() argument
917 struct drbd_conf *mdev = req->w.mdev; in do_remote_read()
920 if (req->private_bio) { in do_remote_read()
922 req->i.sector, req->i.size)) { in do_remote_read()
923 bio_put(req->private_bio); in do_remote_read()
924 req->private_bio = NULL; in do_remote_read()
932 if (req->private_bio == NULL) in do_remote_read()
942 if (rbm == RB_PREFER_LOCAL && req->private_bio) in do_remote_read()
945 if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) { in do_remote_read()
946 if (req->private_bio) { in do_remote_read()
947 bio_put(req->private_bio); in do_remote_read()
948 req->private_bio = NULL; in do_remote_read()
960 static int drbd_process_write_request(struct drbd_request *req) in drbd_process_write_request() argument
962 struct drbd_conf *mdev = req->w.mdev; in drbd_process_write_request()
974 if (unlikely(req->i.size == 0)) { in drbd_process_write_request()
976 D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH); in drbd_process_write_request()
978 _req_mod(req, QUEUE_AS_DRBD_BARRIER); in drbd_process_write_request()
988 _req_mod(req, TO_BE_SENT); in drbd_process_write_request()
989 _req_mod(req, QUEUE_FOR_NET_WRITE); in drbd_process_write_request()
990 } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size)) in drbd_process_write_request()
991 _req_mod(req, QUEUE_FOR_SEND_OOS); in drbd_process_write_request()
997 drbd_submit_req_private_bio(struct drbd_request *req) in drbd_submit_req_private_bio() argument
999 struct drbd_conf *mdev = req->w.mdev; in drbd_submit_req_private_bio()
1000 struct bio *bio = req->private_bio; in drbd_submit_req_private_bio()
1023 static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req) in drbd_queue_write() argument
1026 list_add_tail(&req->tl_requests, &mdev->submit.writes); in drbd_queue_write()
1040 struct drbd_request *req; in drbd_request_prepare() local
1043 req = drbd_req_new(mdev, bio); in drbd_request_prepare()
1044 if (!req) { in drbd_request_prepare()
1052 req->start_time = start_time; in drbd_request_prepare()
1055 bio_put(req->private_bio); in drbd_request_prepare()
1056 req->private_bio = NULL; in drbd_request_prepare()
1060 _drbd_start_io_acct(mdev, req); in drbd_request_prepare()
1062 if (rw == WRITE && req->private_bio && req->i.size in drbd_request_prepare()
1064 if (!drbd_al_begin_io_fastpath(mdev, &req->i)) { in drbd_request_prepare()
1065 drbd_queue_write(mdev, req); in drbd_request_prepare()
1068 req->rq_state |= RQ_IN_ACT_LOG; in drbd_request_prepare()
1071 return req; in drbd_request_prepare()
1074 static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req) in drbd_send_and_submit() argument
1076 const int rw = bio_rw(req->master_bio); in drbd_send_and_submit()
1085 complete_conflicting_writes(req); in drbd_send_and_submit()
1096 req->rq_state |= RQ_POSTPONED; in drbd_send_and_submit()
1097 if (req->private_bio) { in drbd_send_and_submit()
1098 bio_put(req->private_bio); in drbd_send_and_submit()
1099 req->private_bio = NULL; in drbd_send_and_submit()
1109 if (!do_remote_read(req) && !req->private_bio) in drbd_send_and_submit()
1114 req->epoch = atomic_read(&mdev->tconn->current_tle_nr); in drbd_send_and_submit()
1118 if (likely(req->i.size!=0)) { in drbd_send_and_submit()
1122 list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log); in drbd_send_and_submit()
1126 if (!drbd_process_write_request(req)) in drbd_send_and_submit()
1131 if (req->private_bio == NULL) { in drbd_send_and_submit()
1132 _req_mod(req, TO_BE_SENT); in drbd_send_and_submit()
1133 _req_mod(req, QUEUE_FOR_NET_READ); in drbd_send_and_submit()
1138 if (req->private_bio) { in drbd_send_and_submit()
1140 _req_mod(req, TO_BE_SUBMITTED); in drbd_send_and_submit()
1143 drbd_submit_req_private_bio(req); in drbd_send_and_submit()
1149 (unsigned long long)req->i.sector, req->i.size >> 9); in drbd_send_and_submit()
1155 if (drbd_req_put_completion_ref(req, &m, 1)) in drbd_send_and_submit()
1156 kref_put(&req->kref, drbd_req_destroy); in drbd_send_and_submit()
1165 struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time); in __drbd_make_request() local
1166 if (IS_ERR_OR_NULL(req)) in __drbd_make_request()
1168 drbd_send_and_submit(mdev, req); in __drbd_make_request()
1173 struct drbd_request *req, *tmp; in submit_fast_path() local
1174 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1175 const int rw = bio_data_dir(req->master_bio); in submit_fast_path()
1178 && req->private_bio && req->i.size in submit_fast_path()
1180 if (!drbd_al_begin_io_fastpath(mdev, &req->i)) in submit_fast_path()
1183 req->rq_state |= RQ_IN_ACT_LOG; in submit_fast_path()
1186 list_del_init(&req->tl_requests); in submit_fast_path()
1187 drbd_send_and_submit(mdev, req); in submit_fast_path()
1195 struct drbd_request *req, *tmp; in prepare_al_transaction_nonblock() local
1200 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in prepare_al_transaction_nonblock()
1201 err = drbd_al_begin_io_nonblock(mdev, &req->i); in prepare_al_transaction_nonblock()
1206 req->rq_state |= RQ_IN_ACT_LOG; in prepare_al_transaction_nonblock()
1207 list_move_tail(&req->tl_requests, pending); in prepare_al_transaction_nonblock()
1221 struct drbd_request *req, *tmp; in do_submit() local
1266 list_for_each_entry_safe(req, tmp, &pending, tl_requests) { in do_submit()
1267 list_del_init(&req->tl_requests); in do_submit()
1268 drbd_send_and_submit(mdev, req); in do_submit()
1336 struct drbd_request *req; /* oldest request */ in request_timer_fn() local
1360 req = find_oldest_request(tconn); in request_timer_fn()
1361 if (!req) { in request_timer_fn()
1383 if (ent && req->rq_state & RQ_NET_PENDING && in request_timer_fn()
1384 time_after(now, req->start_time + ent) && in request_timer_fn()
1389 if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev && in request_timer_fn()
1390 time_after(now, req->start_time + dt) && in request_timer_fn()
1395 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; in request_timer_fn()