Lines Matching refs:mdev
34 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
37 static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req) in _drbd_start_io_acct() argument
42 part_round_stats(cpu, &mdev->vdisk->part0); in _drbd_start_io_acct()
43 part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); in _drbd_start_io_acct()
44 part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9); in _drbd_start_io_acct()
47 part_inc_in_flight(&mdev->vdisk->part0, rw); in _drbd_start_io_acct()
52 static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) in _drbd_end_io_acct() argument
58 part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration); in _drbd_end_io_acct()
59 part_round_stats(cpu, &mdev->vdisk->part0); in _drbd_end_io_acct()
60 part_dec_in_flight(&mdev->vdisk->part0, rw); in _drbd_end_io_acct()
64 static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, in drbd_req_new() argument
75 req->w.mdev = mdev; in drbd_req_new()
98 struct drbd_conf *mdev = req->w.mdev; in drbd_req_destroy() local
135 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); in drbd_req_destroy()
138 drbd_set_in_sync(mdev, req->i.sector, req->i.size); in drbd_req_destroy()
152 if (get_ldev_if_state(mdev, D_FAILED)) { in drbd_req_destroy()
153 drbd_al_complete_io(mdev, &req->i); in drbd_req_destroy()
154 put_ldev(mdev); in drbd_req_destroy()
182 void complete_master_bio(struct drbd_conf *mdev, in complete_master_bio() argument
186 dec_ap_bio(mdev); in complete_master_bio()
193 struct drbd_conf *mdev = req->w.mdev; in drbd_remove_request_interval() local
200 wake_up(&mdev->misc_wait); in drbd_remove_request_interval()
213 struct drbd_conf *mdev = req->w.mdev; in drbd_req_complete() local
262 root = &mdev->write_requests; in drbd_req_complete()
264 root = &mdev->read_requests; in drbd_req_complete()
276 req->epoch == atomic_read(&mdev->tconn->current_tle_nr)) in drbd_req_complete()
277 start_new_tl_epoch(mdev->tconn); in drbd_req_complete()
280 _drbd_end_io_acct(mdev, req); in drbd_req_complete()
308 struct drbd_conf *mdev = req->w.mdev; in drbd_req_put_completion_ref() local
331 struct drbd_conf *mdev = req->w.mdev; in mod_rq_state() local
336 if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP)) in mod_rq_state()
354 inc_ap_pending(mdev); in mod_rq_state()
365 atomic_add(req->i.size >> 9, &mdev->ap_in_flight); in mod_rq_state()
391 dec_ap_pending(mdev); in mod_rq_state()
400 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight); in mod_rq_state()
419 wake_up(&mdev->misc_wait); in mod_rq_state()
427 static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req) in drbd_report_io_error() argument
438 bdevname(mdev->ldev->backing_bdev, b)); in drbd_report_io_error()
456 struct drbd_conf *mdev = req->w.mdev; in __req_mod() local
479 nc = rcu_dereference(mdev->tconn->net_conf); in __req_mod()
496 mdev->writ_cnt += req->i.size >> 9; in __req_mod()
498 mdev->read_cnt += req->i.size >> 9; in __req_mod()
509 drbd_report_io_error(mdev, req); in __req_mod()
510 __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR); in __req_mod()
515 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); in __req_mod()
516 drbd_report_io_error(mdev, req); in __req_mod()
517 __drbd_chk_io_error(mdev, DRBD_READ_ERROR); in __req_mod()
536 drbd_insert_interval(&mdev->read_requests, &req->i); in __req_mod()
538 set_bit(UNPLUG_REMOTE, &mdev->flags); in __req_mod()
544 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
554 drbd_insert_interval(&mdev->write_requests, &req->i); in __req_mod()
573 set_bit(UNPLUG_REMOTE, &mdev->flags); in __req_mod()
579 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
583 nc = rcu_dereference(mdev->tconn->net_conf); in __req_mod()
586 if (mdev->tconn->current_tle_writes >= p) in __req_mod()
587 start_new_tl_epoch(mdev->tconn); in __req_mod()
594 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
676 wake_up(&mdev->misc_wait); in __req_mod()
704 get_ldev(mdev); /* always succeeds in this call path */ in __req_mod()
706 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
727 drbd_queue_work(&mdev->tconn->sender_work, &req->w); in __req_mod()
759 start_new_tl_epoch(mdev->tconn); in __req_mod()
774 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) in drbd_may_do_local_read() argument
779 if (mdev->state.disk == D_UP_TO_DATE) in drbd_may_do_local_read()
781 if (mdev->state.disk != D_INCONSISTENT) in drbd_may_do_local_read()
784 nr_sectors = drbd_get_capacity(mdev->this_bdev); in drbd_may_do_local_read()
791 return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; in drbd_may_do_local_read()
794 static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector, in remote_due_to_read_balancing() argument
802 bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info; in remote_due_to_read_balancing()
805 return atomic_read(&mdev->local_cnt) > in remote_due_to_read_balancing()
806 atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt); in remote_due_to_read_balancing()
816 return test_and_change_bit(READ_BALANCE_RR, &mdev->flags); in remote_due_to_read_balancing()
837 struct drbd_conf *mdev = req->w.mdev; in complete_conflicting_writes() local
842 i = drbd_find_overlap(&mdev->write_requests, sector, size); in complete_conflicting_writes()
847 prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); in complete_conflicting_writes()
848 i = drbd_find_overlap(&mdev->write_requests, sector, size); in complete_conflicting_writes()
853 spin_unlock_irq(&mdev->tconn->req_lock); in complete_conflicting_writes()
855 spin_lock_irq(&mdev->tconn->req_lock); in complete_conflicting_writes()
857 finish_wait(&mdev->misc_wait, &wait); in complete_conflicting_writes()
861 static void maybe_pull_ahead(struct drbd_conf *mdev) in maybe_pull_ahead() argument
863 struct drbd_tconn *tconn = mdev->tconn; in maybe_pull_ahead()
880 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) in maybe_pull_ahead()
884 atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) { in maybe_pull_ahead()
889 if (mdev->act_log->used >= nc->cong_extents) { in maybe_pull_ahead()
896 start_new_tl_epoch(mdev->tconn); in maybe_pull_ahead()
899 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); in maybe_pull_ahead()
901 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); in maybe_pull_ahead()
903 put_ldev(mdev); in maybe_pull_ahead()
917 struct drbd_conf *mdev = req->w.mdev; in do_remote_read() local
921 if (!drbd_may_do_local_read(mdev, in do_remote_read()
925 put_ldev(mdev); in do_remote_read()
929 if (mdev->state.pdsk != D_UP_TO_DATE) in do_remote_read()
939 rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing; in do_remote_read()
945 if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) { in do_remote_read()
949 put_ldev(mdev); in do_remote_read()
962 struct drbd_conf *mdev = req->w.mdev; in drbd_process_write_request() local
965 remote = drbd_should_do_remote(mdev->state); in drbd_process_write_request()
966 send_oos = drbd_should_send_out_of_sync(mdev->state); in drbd_process_write_request()
990 } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size)) in drbd_process_write_request()
999 struct drbd_conf *mdev = req->w.mdev; in drbd_submit_req_private_bio() local
1003 bio->bi_bdev = mdev->ldev->backing_bdev; in drbd_submit_req_private_bio()
1010 if (get_ldev(mdev)) { in drbd_submit_req_private_bio()
1011 if (drbd_insert_fault(mdev, in drbd_submit_req_private_bio()
1018 put_ldev(mdev); in drbd_submit_req_private_bio()
1023 static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req) in drbd_queue_write() argument
1025 spin_lock(&mdev->submit.lock); in drbd_queue_write()
1026 list_add_tail(&req->tl_requests, &mdev->submit.writes); in drbd_queue_write()
1027 spin_unlock(&mdev->submit.lock); in drbd_queue_write()
1028 queue_work(mdev->submit.wq, &mdev->submit.worker); in drbd_queue_write()
1037 drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) in drbd_request_prepare() argument
1043 req = drbd_req_new(mdev, bio); in drbd_request_prepare()
1045 dec_ap_bio(mdev); in drbd_request_prepare()
1054 if (!get_ldev(mdev)) { in drbd_request_prepare()
1060 _drbd_start_io_acct(mdev, req); in drbd_request_prepare()
1063 && !test_bit(AL_SUSPENDED, &mdev->flags)) { in drbd_request_prepare()
1064 if (!drbd_al_begin_io_fastpath(mdev, &req->i)) { in drbd_request_prepare()
1065 drbd_queue_write(mdev, req); in drbd_request_prepare()
1074 static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req) in drbd_send_and_submit() argument
1080 spin_lock_irq(&mdev->tconn->req_lock); in drbd_send_and_submit()
1090 maybe_pull_ahead(mdev); in drbd_send_and_submit()
1094 if (drbd_suspended(mdev)) { in drbd_send_and_submit()
1100 put_ldev(mdev); in drbd_send_and_submit()
1114 req->epoch = atomic_read(&mdev->tconn->current_tle_nr); in drbd_send_and_submit()
1120 mdev->tconn->current_tle_writes++; in drbd_send_and_submit()
1122 list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log); in drbd_send_and_submit()
1142 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_send_and_submit()
1144 spin_lock_irq(&mdev->tconn->req_lock); in drbd_send_and_submit()
1157 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_send_and_submit()
1160 complete_master_bio(mdev, &m); in drbd_send_and_submit()
1163 void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) in __drbd_make_request() argument
1165 struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time); in __drbd_make_request()
1168 drbd_send_and_submit(mdev, req); in __drbd_make_request()
1171 static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming) in submit_fast_path() argument
1179 && !test_bit(AL_SUSPENDED, &mdev->flags)) { in submit_fast_path()
1180 if (!drbd_al_begin_io_fastpath(mdev, &req->i)) in submit_fast_path()
1187 drbd_send_and_submit(mdev, req); in submit_fast_path()
1191 static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev, in prepare_al_transaction_nonblock() argument
1199 spin_lock_irq(&mdev->al_lock); in prepare_al_transaction_nonblock()
1201 err = drbd_al_begin_io_nonblock(mdev, &req->i); in prepare_al_transaction_nonblock()
1209 spin_unlock_irq(&mdev->al_lock); in prepare_al_transaction_nonblock()
1211 wake_up(&mdev->al_wait); in prepare_al_transaction_nonblock()
1218 struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker); in do_submit() local
1224 spin_lock(&mdev->submit.lock); in do_submit()
1225 list_splice_tail_init(&mdev->submit.writes, &incoming); in do_submit()
1226 spin_unlock(&mdev->submit.lock); in do_submit()
1228 submit_fast_path(mdev, &incoming); in do_submit()
1232 wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending)); in do_submit()
1246 if (list_empty(&mdev->submit.writes)) in do_submit()
1249 spin_lock(&mdev->submit.lock); in do_submit()
1250 list_splice_tail_init(&mdev->submit.writes, &more_incoming); in do_submit()
1251 spin_unlock(&mdev->submit.lock); in do_submit()
1256 made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending); in do_submit()
1264 drbd_al_begin_io_commit(mdev, false); in do_submit()
1268 drbd_send_and_submit(mdev, req); in do_submit()
1275 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; in drbd_make_request() local
1285 inc_ap_bio(mdev); in drbd_make_request()
1286 __drbd_make_request(mdev, bio, start_time); in drbd_make_request()
1303 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; in drbd_merge_bvec() local
1308 if (bio_size && get_ldev(mdev)) { in drbd_merge_bvec()
1310 mdev->ldev->backing_bdev->bd_disk->queue; in drbd_merge_bvec()
1315 put_ldev(mdev); in drbd_merge_bvec()
1334 struct drbd_conf *mdev = (struct drbd_conf *) data; in request_timer_fn() local
1335 struct drbd_tconn *tconn = mdev->tconn; in request_timer_fn()
1343 if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS) in request_timer_fn()
1346 if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */ in request_timer_fn()
1347 dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10; in request_timer_fn()
1348 put_ldev(mdev); in request_timer_fn()
1363 mod_timer(&mdev->request_timer, now + et); in request_timer_fn()
1387 _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); in request_timer_fn()
1389 if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev && in request_timer_fn()
1391 !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { in request_timer_fn()
1393 __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); in request_timer_fn()
1397 mod_timer(&mdev->request_timer, nt); in request_timer_fn()