• Home
  • Raw
  • Download

Lines Matching refs:mdev

66 static int drbd_disconnected(struct drbd_conf *mdev);
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev, in __drbd_alloc_pages() argument
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev, in reclaim_finished_net_peer_reqs() argument
210 list_for_each_safe(le, tle, &mdev->net_ee) { in reclaim_finished_net_peer_reqs()
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) in drbd_kick_lo_and_reclaim_net() argument
223 spin_lock_irq(&mdev->tconn->req_lock); in drbd_kick_lo_and_reclaim_net()
224 reclaim_finished_net_peer_reqs(mdev, &reclaimed); in drbd_kick_lo_and_reclaim_net()
225 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_kick_lo_and_reclaim_net()
228 drbd_free_net_peer_req(mdev, peer_req); in drbd_kick_lo_and_reclaim_net()
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number, in drbd_alloc_pages() argument
254 nc = rcu_dereference(mdev->tconn->net_conf); in drbd_alloc_pages()
258 if (atomic_read(&mdev->pp_in_use) < mxb) in drbd_alloc_pages()
259 page = __drbd_alloc_pages(mdev, number); in drbd_alloc_pages()
264 drbd_kick_lo_and_reclaim_net(mdev); in drbd_alloc_pages()
266 if (atomic_read(&mdev->pp_in_use) < mxb) { in drbd_alloc_pages()
267 page = __drbd_alloc_pages(mdev, number); in drbd_alloc_pages()
285 atomic_add(number, &mdev->pp_in_use); in drbd_alloc_pages()
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net) in drbd_free_pages() argument
295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; in drbd_free_pages()
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, in drbd_alloc_peer_req() argument
340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) in drbd_alloc_peer_req()
351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); in drbd_alloc_peer_req()
363 peer_req->w.mdev = mdev; in drbd_alloc_peer_req()
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, in __drbd_free_peer_req() argument
385 drbd_free_pages(mdev, peer_req->pages, is_net); in __drbd_free_peer_req()
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list) in drbd_free_peer_reqs() argument
396 int is_net = list == &mdev->net_ee; in drbd_free_peer_reqs()
398 spin_lock_irq(&mdev->tconn->req_lock); in drbd_free_peer_reqs()
400 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_free_peer_reqs()
403 __drbd_free_peer_req(mdev, peer_req, is_net); in drbd_free_peer_reqs()
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev) in drbd_finish_peer_reqs() argument
419 spin_lock_irq(&mdev->tconn->req_lock); in drbd_finish_peer_reqs()
420 reclaim_finished_net_peer_reqs(mdev, &reclaimed); in drbd_finish_peer_reqs()
421 list_splice_init(&mdev->done_ee, &work_list); in drbd_finish_peer_reqs()
422 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_finish_peer_reqs()
425 drbd_free_net_peer_req(mdev, peer_req); in drbd_finish_peer_reqs()
438 drbd_free_peer_req(mdev, peer_req); in drbd_finish_peer_reqs()
440 wake_up(&mdev->ee_wait); in drbd_finish_peer_reqs()
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, in _drbd_wait_ee_list_empty() argument
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); in _drbd_wait_ee_list_empty()
454 spin_unlock_irq(&mdev->tconn->req_lock); in _drbd_wait_ee_list_empty()
456 finish_wait(&mdev->ee_wait, &wait); in _drbd_wait_ee_list_empty()
457 spin_lock_irq(&mdev->tconn->req_lock); in _drbd_wait_ee_list_empty()
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev, in drbd_wait_ee_list_empty() argument
464 spin_lock_irq(&mdev->tconn->req_lock); in drbd_wait_ee_list_empty()
465 _drbd_wait_ee_list_empty(mdev, head); in drbd_wait_ee_list_empty()
466 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_wait_ee_list_empty()
833 int drbd_connected(struct drbd_conf *mdev) in drbd_connected() argument
837 atomic_set(&mdev->packet_seq, 0); in drbd_connected()
838 mdev->peer_seq = 0; in drbd_connected()
840 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ? in drbd_connected()
841 &mdev->tconn->cstate_mutex : in drbd_connected()
842 &mdev->own_state_mutex; in drbd_connected()
844 err = drbd_send_sync_param(mdev); in drbd_connected()
846 err = drbd_send_sizes(mdev, 0, 0); in drbd_connected()
848 err = drbd_send_uuids(mdev); in drbd_connected()
850 err = drbd_send_current_state(mdev); in drbd_connected()
851 clear_bit(USE_DEGR_WFC_T, &mdev->flags); in drbd_connected()
852 clear_bit(RESIZE_PENDING, &mdev->flags); in drbd_connected()
853 atomic_set(&mdev->ap_in_flight, 0); in drbd_connected()
854 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ in drbd_connected()
869 struct drbd_conf *mdev; in conn_connect() local
1040 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in conn_connect()
1041 kref_get(&mdev->kref); in conn_connect()
1049 mutex_lock(mdev->state_mutex); in conn_connect()
1050 mutex_unlock(mdev->state_mutex); in conn_connect()
1055 set_bit(DISCARD_MY_DATA, &mdev->flags); in conn_connect()
1057 clear_bit(DISCARD_MY_DATA, &mdev->flags); in conn_connect()
1059 drbd_connected(mdev); in conn_connect()
1060 kref_put(&mdev->kref, &drbd_minor_destroy); in conn_connect()
1147 struct drbd_conf *mdev; in drbd_flush() local
1152 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in drbd_flush()
1153 if (!get_ldev(mdev)) in drbd_flush()
1155 kref_get(&mdev->kref); in drbd_flush()
1158 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, in drbd_flush()
1167 put_ldev(mdev); in drbd_flush()
1168 kref_put(&mdev->kref, &drbd_minor_destroy); in drbd_flush()
1262 struct drbd_conf *mdev; in drbd_bump_write_ordering() local
1274 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in drbd_bump_write_ordering()
1275 if (!get_ldev_if_state(mdev, D_ATTACHING)) in drbd_bump_write_ordering()
1277 dc = rcu_dereference(mdev->ldev->disk_conf); in drbd_bump_write_ordering()
1283 put_ldev(mdev); in drbd_bump_write_ordering()
1308 int drbd_submit_peer_request(struct drbd_conf *mdev, in drbd_submit_peer_request() argument
1337 bio->bi_bdev = mdev->ldev->backing_bdev; in drbd_submit_peer_request()
1375 drbd_generic_make_request(mdev, fault_type, bio); in drbd_submit_peer_request()
1388 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev, in drbd_remove_epoch_entry_interval() argument
1393 drbd_remove_interval(&mdev->write_requests, i); in drbd_remove_epoch_entry_interval()
1398 wake_up(&mdev->misc_wait); in drbd_remove_epoch_entry_interval()
1403 struct drbd_conf *mdev; in conn_wait_active_ee_empty() local
1407 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in conn_wait_active_ee_empty()
1408 kref_get(&mdev->kref); in conn_wait_active_ee_empty()
1410 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); in conn_wait_active_ee_empty()
1411 kref_put(&mdev->kref, &drbd_minor_destroy); in conn_wait_active_ee_empty()
1487 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, in read_in_block() argument
1490 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); in read_in_block()
1494 void *dig_in = mdev->tconn->int_dig_in; in read_in_block()
1495 void *dig_vv = mdev->tconn->int_dig_vv; in read_in_block()
1499 if (mdev->tconn->peer_integrity_tfm) { in read_in_block()
1500 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm); in read_in_block()
1505 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs); in read_in_block()
1529 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO); in read_in_block()
1541 err = drbd_recv_all_warn(mdev->tconn, data, len); in read_in_block()
1542 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { in read_in_block()
1548 drbd_free_peer_req(mdev, peer_req); in read_in_block()
1555 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv); in read_in_block()
1559 drbd_free_peer_req(mdev, peer_req); in read_in_block()
1563 mdev->recv_cnt += data_size>>9; in read_in_block()
1570 static int drbd_drain_block(struct drbd_conf *mdev, int data_size) in drbd_drain_block() argument
1579 page = drbd_alloc_pages(mdev, 1, 1); in drbd_drain_block()
1585 err = drbd_recv_all_warn(mdev->tconn, data, len); in drbd_drain_block()
1591 drbd_free_pages(mdev, page, 0); in drbd_drain_block()
1595 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, in recv_dless_read() argument
1601 void *dig_in = mdev->tconn->int_dig_in; in recv_dless_read()
1602 void *dig_vv = mdev->tconn->int_dig_vv; in recv_dless_read()
1605 if (mdev->tconn->peer_integrity_tfm) { in recv_dless_read()
1606 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm); in recv_dless_read()
1607 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs); in recv_dless_read()
1615 mdev->recv_cnt += data_size>>9; in recv_dless_read()
1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect); in recv_dless_read()
1631 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv); in recv_dless_read()
1650 struct drbd_conf *mdev = w->mdev; in e_end_resync_block() local
1657 drbd_set_in_sync(mdev, sector, peer_req->i.size); in e_end_resync_block()
1658 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req); in e_end_resync_block()
1661 drbd_rs_failed_io(mdev, sector, peer_req->i.size); in e_end_resync_block()
1663 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req); in e_end_resync_block()
1665 dec_unacked(mdev); in e_end_resync_block()
1670 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(loca… in recv_resync_read() argument
1674 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size); in recv_resync_read()
1678 dec_rs_pending(mdev); in recv_resync_read()
1680 inc_unacked(mdev); in recv_resync_read()
1686 spin_lock_irq(&mdev->tconn->req_lock); in recv_resync_read()
1687 list_add(&peer_req->w.list, &mdev->sync_ee); in recv_resync_read()
1688 spin_unlock_irq(&mdev->tconn->req_lock); in recv_resync_read()
1690 atomic_add(data_size >> 9, &mdev->rs_sect_ev); in recv_resync_read()
1691 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) in recv_resync_read()
1696 spin_lock_irq(&mdev->tconn->req_lock); in recv_resync_read()
1698 spin_unlock_irq(&mdev->tconn->req_lock); in recv_resync_read()
1700 drbd_free_peer_req(mdev, peer_req); in recv_resync_read()
1702 put_ldev(mdev); in recv_resync_read()
1707 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id, in find_request() argument
1725 struct drbd_conf *mdev; in receive_DataReply() local
1731 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_DataReply()
1732 if (!mdev) in receive_DataReply()
1737 spin_lock_irq(&mdev->tconn->req_lock); in receive_DataReply()
1738 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__); in receive_DataReply()
1739 spin_unlock_irq(&mdev->tconn->req_lock); in receive_DataReply()
1746 err = recv_dless_read(mdev, req, sector, pi->size); in receive_DataReply()
1758 struct drbd_conf *mdev; in receive_RSDataReply() local
1763 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_RSDataReply()
1764 if (!mdev) in receive_RSDataReply()
1770 if (get_ldev(mdev)) { in receive_RSDataReply()
1774 err = recv_resync_read(mdev, sector, pi->size); in receive_RSDataReply()
1779 err = drbd_drain_block(mdev, pi->size); in receive_RSDataReply()
1781 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size); in receive_RSDataReply()
1784 atomic_add(pi->size >> 9, &mdev->rs_sect_in); in receive_RSDataReply()
1789 static void restart_conflicting_writes(struct drbd_conf *mdev, in restart_conflicting_writes() argument
1795 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) { in restart_conflicting_writes()
1815 struct drbd_conf *mdev = w->mdev; in e_end_block() local
1821 pcmd = (mdev->state.conn >= C_SYNC_SOURCE && in e_end_block()
1822 mdev->state.conn <= C_PAUSED_SYNC_T && in e_end_block()
1825 err = drbd_send_ack(mdev, pcmd, peer_req); in e_end_block()
1827 drbd_set_in_sync(mdev, sector, peer_req->i.size); in e_end_block()
1829 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req); in e_end_block()
1833 dec_unacked(mdev); in e_end_block()
1838 spin_lock_irq(&mdev->tconn->req_lock); in e_end_block()
1840 drbd_remove_epoch_entry_interval(mdev, peer_req); in e_end_block()
1842 restart_conflicting_writes(mdev, sector, peer_req->i.size); in e_end_block()
1843 spin_unlock_irq(&mdev->tconn->req_lock); in e_end_block()
1847 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); in e_end_block()
1854 struct drbd_conf *mdev = w->mdev; in e_send_ack() local
1859 err = drbd_send_ack(mdev, ack, peer_req); in e_send_ack()
1860 dec_unacked(mdev); in e_send_ack()
1872 struct drbd_tconn *tconn = w->mdev->tconn; in e_send_retry_write()
1893 static bool need_peer_seq(struct drbd_conf *mdev) in need_peer_seq() argument
1895 struct drbd_tconn *tconn = mdev->tconn; in need_peer_seq()
1905 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; in need_peer_seq()
1911 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) in update_peer_seq() argument
1915 if (need_peer_seq(mdev)) { in update_peer_seq()
1916 spin_lock(&mdev->peer_seq_lock); in update_peer_seq()
1917 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq); in update_peer_seq()
1918 mdev->peer_seq = newest_peer_seq; in update_peer_seq()
1919 spin_unlock(&mdev->peer_seq_lock); in update_peer_seq()
1922 wake_up(&mdev->seq_wait); in update_peer_seq()
1932 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) in overlapping_resync_write() argument
1937 spin_lock_irq(&mdev->tconn->req_lock); in overlapping_resync_write()
1938 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) { in overlapping_resync_write()
1945 spin_unlock_irq(&mdev->tconn->req_lock); in overlapping_resync_write()
1971 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq) in wait_for_and_update_peer_seq() argument
1977 if (!need_peer_seq(mdev)) in wait_for_and_update_peer_seq()
1980 spin_lock(&mdev->peer_seq_lock); in wait_for_and_update_peer_seq()
1982 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) { in wait_for_and_update_peer_seq()
1983 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); in wait_for_and_update_peer_seq()
1991 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); in wait_for_and_update_peer_seq()
1992 spin_unlock(&mdev->peer_seq_lock); in wait_for_and_update_peer_seq()
1994 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10; in wait_for_and_update_peer_seq()
1997 spin_lock(&mdev->peer_seq_lock); in wait_for_and_update_peer_seq()
2004 spin_unlock(&mdev->peer_seq_lock); in wait_for_and_update_peer_seq()
2005 finish_wait(&mdev->seq_wait, &wait); in wait_for_and_update_peer_seq()
2012 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) in wire_flags_to_bio() argument
2020 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector, in fail_postponed_requests() argument
2026 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) { in fail_postponed_requests()
2037 spin_unlock_irq(&mdev->tconn->req_lock); in fail_postponed_requests()
2039 complete_master_bio(mdev, &m); in fail_postponed_requests()
2040 spin_lock_irq(&mdev->tconn->req_lock); in fail_postponed_requests()
2045 static int handle_write_conflicts(struct drbd_conf *mdev, in handle_write_conflicts() argument
2048 struct drbd_tconn *tconn = mdev->tconn; in handle_write_conflicts()
2060 drbd_insert_interval(&mdev->write_requests, &peer_req->i); in handle_write_conflicts()
2063 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) { in handle_write_conflicts()
2073 err = drbd_wait_misc(mdev, i); in handle_write_conflicts()
2098 inc_unacked(mdev); in handle_write_conflicts()
2101 list_add_tail(&peer_req->w.list, &mdev->done_ee); in handle_write_conflicts()
2102 wake_asender(mdev->tconn); in handle_write_conflicts()
2129 err = drbd_wait_misc(mdev, &req->i); in handle_write_conflicts()
2131 _conn_request_state(mdev->tconn, in handle_write_conflicts()
2134 fail_postponed_requests(mdev, sector, size); in handle_write_conflicts()
2150 drbd_remove_epoch_entry_interval(mdev, peer_req); in handle_write_conflicts()
2157 struct drbd_conf *mdev; in receive_Data() local
2166 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_Data()
2167 if (!mdev) in receive_Data()
2170 if (!get_ldev(mdev)) { in receive_Data()
2173 err = wait_for_and_update_peer_seq(mdev, peer_seq); in receive_Data()
2174 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size); in receive_Data()
2176 err2 = drbd_drain_block(mdev, pi->size); in receive_Data()
2189 peer_req = read_in_block(mdev, p->block_id, sector, pi->size); in receive_Data()
2191 put_ldev(mdev); in receive_Data()
2198 rw |= wire_flags_to_bio(mdev, dp_flags); in receive_Data()
2214 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; in receive_Data()
2218 err = wait_for_and_update_peer_seq(mdev, peer_seq); in receive_Data()
2221 spin_lock_irq(&mdev->tconn->req_lock); in receive_Data()
2222 err = handle_write_conflicts(mdev, peer_req); in receive_Data()
2224 spin_unlock_irq(&mdev->tconn->req_lock); in receive_Data()
2226 put_ldev(mdev); in receive_Data()
2232 spin_lock_irq(&mdev->tconn->req_lock); in receive_Data()
2233 list_add(&peer_req->w.list, &mdev->active_ee); in receive_Data()
2234 spin_unlock_irq(&mdev->tconn->req_lock); in receive_Data()
2236 if (mdev->state.conn == C_SYNC_TARGET) in receive_Data()
2237 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req)); in receive_Data()
2239 if (mdev->tconn->agreed_pro_version < 100) { in receive_Data()
2241 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) { in receive_Data()
2254 inc_unacked(mdev); in receive_Data()
2262 drbd_send_ack(mdev, P_RECV_ACK, peer_req); in receive_Data()
2265 if (mdev->state.pdsk < D_INCONSISTENT) { in receive_Data()
2267 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size); in receive_Data()
2270 drbd_al_begin_io(mdev, &peer_req->i, true); in receive_Data()
2273 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR); in receive_Data()
2279 spin_lock_irq(&mdev->tconn->req_lock); in receive_Data()
2281 drbd_remove_epoch_entry_interval(mdev, peer_req); in receive_Data()
2282 spin_unlock_irq(&mdev->tconn->req_lock); in receive_Data()
2284 drbd_al_complete_io(mdev, &peer_req->i); in receive_Data()
2288 put_ldev(mdev); in receive_Data()
2289 drbd_free_peer_req(mdev, peer_req); in receive_Data()
2304 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) in drbd_rs_should_slow_down() argument
2306 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; in drbd_rs_should_slow_down()
2314 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate; in drbd_rs_should_slow_down()
2321 spin_lock_irq(&mdev->al_lock); in drbd_rs_should_slow_down()
2322 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); in drbd_rs_should_slow_down()
2326 spin_unlock_irq(&mdev->al_lock); in drbd_rs_should_slow_down()
2331 spin_unlock_irq(&mdev->al_lock); in drbd_rs_should_slow_down()
2335 atomic_read(&mdev->rs_sect_ev); in drbd_rs_should_slow_down()
2337 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { in drbd_rs_should_slow_down()
2341 mdev->rs_last_events = curr_events; in drbd_rs_should_slow_down()
2345 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; in drbd_rs_should_slow_down()
2347 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) in drbd_rs_should_slow_down()
2348 rs_left = mdev->ov_left; in drbd_rs_should_slow_down()
2350 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; in drbd_rs_should_slow_down()
2352 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; in drbd_rs_should_slow_down()
2355 db = mdev->rs_mark_left[i] - rs_left; in drbd_rs_should_slow_down()
2367 struct drbd_conf *mdev; in receive_DataRequest() local
2376 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_DataRequest()
2377 if (!mdev) in receive_DataRequest()
2379 capacity = drbd_get_capacity(mdev->this_bdev); in receive_DataRequest()
2395 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { in receive_DataRequest()
2399 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); in receive_DataRequest()
2404 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); in receive_DataRequest()
2408 dec_rs_pending(mdev); in receive_DataRequest()
2409 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); in receive_DataRequest()
2419 return drbd_drain_block(mdev, pi->size); in receive_DataRequest()
2425 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO); in receive_DataRequest()
2427 put_ldev(mdev); in receive_DataRequest()
2442 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2458 if (drbd_recv_all(mdev->tconn, di->digest, pi->size)) in receive_DataRequest()
2462 D_ASSERT(mdev->tconn->agreed_pro_version >= 89); in receive_DataRequest()
2465 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2468 atomic_add(size >> 9, &mdev->rs_sect_in); in receive_DataRequest()
2470 dec_rs_pending(mdev); in receive_DataRequest()
2478 if (mdev->ov_start_sector == ~(sector_t)0 && in receive_DataRequest()
2479 mdev->tconn->agreed_pro_version >= 90) { in receive_DataRequest()
2482 mdev->ov_start_sector = sector; in receive_DataRequest()
2483 mdev->ov_position = sector; in receive_DataRequest()
2484 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); in receive_DataRequest()
2485 mdev->rs_total = mdev->ov_left; in receive_DataRequest()
2487 mdev->rs_mark_left[i] = mdev->ov_left; in receive_DataRequest()
2488 mdev->rs_mark_time[i] = now; in receive_DataRequest()
2523 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) in receive_DataRequest()
2525 if (drbd_rs_begin_io(mdev, sector)) in receive_DataRequest()
2529 atomic_add(size >> 9, &mdev->rs_sect_ev); in receive_DataRequest()
2532 inc_unacked(mdev); in receive_DataRequest()
2533 spin_lock_irq(&mdev->tconn->req_lock); in receive_DataRequest()
2534 list_add_tail(&peer_req->w.list, &mdev->read_ee); in receive_DataRequest()
2535 spin_unlock_irq(&mdev->tconn->req_lock); in receive_DataRequest()
2537 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0) in receive_DataRequest()
2542 spin_lock_irq(&mdev->tconn->req_lock); in receive_DataRequest()
2544 spin_unlock_irq(&mdev->tconn->req_lock); in receive_DataRequest()
2548 put_ldev(mdev); in receive_DataRequest()
2549 drbd_free_peer_req(mdev, peer_req); in receive_DataRequest()
2553 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) in drbd_asb_recover_0p() argument
2559 self = mdev->ldev->md.uuid[UI_BITMAP] & 1; in drbd_asb_recover_0p()
2560 peer = mdev->p_uuid[UI_BITMAP] & 1; in drbd_asb_recover_0p()
2562 ch_peer = mdev->p_uuid[UI_SIZE]; in drbd_asb_recover_0p()
2563 ch_self = mdev->comm_bm_set; in drbd_asb_recover_0p()
2566 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p; in drbd_asb_recover_0p()
2601 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) in drbd_asb_recover_0p()
2617 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) in drbd_asb_recover_0p()
2630 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) in drbd_asb_recover_1p() argument
2636 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p; in drbd_asb_recover_1p()
2650 hg = drbd_asb_recover_0p(mdev); in drbd_asb_recover_1p()
2651 if (hg == -1 && mdev->state.role == R_SECONDARY) in drbd_asb_recover_1p()
2653 if (hg == 1 && mdev->state.role == R_PRIMARY) in drbd_asb_recover_1p()
2657 rv = drbd_asb_recover_0p(mdev); in drbd_asb_recover_1p()
2660 return mdev->state.role == R_PRIMARY ? 1 : -1; in drbd_asb_recover_1p()
2662 hg = drbd_asb_recover_0p(mdev); in drbd_asb_recover_1p()
2663 if (hg == -1 && mdev->state.role == R_PRIMARY) { in drbd_asb_recover_1p()
2669 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); in drbd_asb_recover_1p()
2671 drbd_khelper(mdev, "pri-lost-after-sb"); in drbd_asb_recover_1p()
2683 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) in drbd_asb_recover_2p() argument
2689 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p; in drbd_asb_recover_2p()
2703 rv = drbd_asb_recover_0p(mdev); in drbd_asb_recover_2p()
2708 hg = drbd_asb_recover_0p(mdev); in drbd_asb_recover_2p()
2715 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); in drbd_asb_recover_2p()
2717 drbd_khelper(mdev, "pri-lost-after-sb"); in drbd_asb_recover_2p()
2729 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, in drbd_uuid_dump() argument
2758 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) in drbd_uuid_compare() argument
2763 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); in drbd_uuid_compare()
2764 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); in drbd_uuid_compare()
2783 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { in drbd_uuid_compare()
2785 if (mdev->tconn->agreed_pro_version < 91) in drbd_uuid_compare()
2788 …if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && in drbd_uuid_compare()
2789 …(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u6… in drbd_uuid_compare()
2791 drbd_uuid_move_history(mdev); in drbd_uuid_compare()
2792 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_compare()
2793 mdev->ldev->md.uuid[UI_BITMAP] = 0; in drbd_uuid_compare()
2795 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, in drbd_uuid_compare()
2796 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); in drbd_uuid_compare()
2806 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { in drbd_uuid_compare()
2808 if (mdev->tconn->agreed_pro_version < 91) in drbd_uuid_compare()
2811 …if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && in drbd_uuid_compare()
2812 …(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u6… in drbd_uuid_compare()
2815 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; in drbd_uuid_compare()
2816 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; in drbd_uuid_compare()
2817 mdev->p_uuid[UI_BITMAP] = 0UL; in drbd_uuid_compare()
2819 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); in drbd_uuid_compare()
2830 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + in drbd_uuid_compare()
2831 (mdev->p_uuid[UI_FLAGS] & 2); in drbd_uuid_compare()
2841 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags); in drbd_uuid_compare()
2847 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); in drbd_uuid_compare()
2852 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); in drbd_uuid_compare()
2854 if (mdev->tconn->agreed_pro_version < 96 ? in drbd_uuid_compare()
2855 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == in drbd_uuid_compare()
2856 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : in drbd_uuid_compare()
2857 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { in drbd_uuid_compare()
2861 if (mdev->tconn->agreed_pro_version < 91) in drbd_uuid_compare()
2864 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; in drbd_uuid_compare()
2865 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; in drbd_uuid_compare()
2868 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); in drbd_uuid_compare()
2875 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); in drbd_uuid_compare()
2877 peer = mdev->p_uuid[i] & ~((u64)1); in drbd_uuid_compare()
2883 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); in drbd_uuid_compare()
2884 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); in drbd_uuid_compare()
2889 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); in drbd_uuid_compare()
2891 if (mdev->tconn->agreed_pro_version < 96 ? in drbd_uuid_compare()
2892 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == in drbd_uuid_compare()
2893 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : in drbd_uuid_compare()
2894 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { in drbd_uuid_compare()
2898 if (mdev->tconn->agreed_pro_version < 91) in drbd_uuid_compare()
2901 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); in drbd_uuid_compare()
2902 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); in drbd_uuid_compare()
2905 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, in drbd_uuid_compare()
2906 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); in drbd_uuid_compare()
2914 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); in drbd_uuid_compare()
2916 self = mdev->ldev->md.uuid[i] & ~((u64)1); in drbd_uuid_compare()
2922 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); in drbd_uuid_compare()
2923 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); in drbd_uuid_compare()
2929 self = mdev->ldev->md.uuid[i] & ~((u64)1); in drbd_uuid_compare()
2931 peer = mdev->p_uuid[j] & ~((u64)1); in drbd_uuid_compare()
2943 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, in drbd_sync_handshake() argument
2951 mydisk = mdev->state.disk; in drbd_sync_handshake()
2953 mydisk = mdev->new_state_tmp.disk; in drbd_sync_handshake()
2957 spin_lock_irq(&mdev->ldev->md.uuid_lock); in drbd_sync_handshake()
2958 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); in drbd_sync_handshake()
2959 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, in drbd_sync_handshake()
2960 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); in drbd_sync_handshake()
2962 hg = drbd_uuid_compare(mdev, &rule_nr); in drbd_sync_handshake()
2963 spin_unlock_irq(&mdev->ldev->md.uuid_lock); in drbd_sync_handshake()
2987 drbd_khelper(mdev, "initial-split-brain"); in drbd_sync_handshake()
2990 nc = rcu_dereference(mdev->tconn->net_conf); in drbd_sync_handshake()
2993 int pcount = (mdev->state.role == R_PRIMARY) in drbd_sync_handshake()
2999 hg = drbd_asb_recover_0p(mdev); in drbd_sync_handshake()
3002 hg = drbd_asb_recover_1p(mdev); in drbd_sync_handshake()
3005 hg = drbd_asb_recover_2p(mdev); in drbd_sync_handshake()
3021 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1)) in drbd_sync_handshake()
3023 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1)) in drbd_sync_handshake()
3041 drbd_khelper(mdev, "split-brain"); in drbd_sync_handshake()
3051 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { in drbd_sync_handshake()
3054 drbd_khelper(mdev, "pri-lost"); in drbd_sync_handshake()
3065 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) { in drbd_sync_handshake()
3077 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", in drbd_sync_handshake()
3088 if (drbd_bm_total_weight(mdev)) { in drbd_sync_handshake()
3090 drbd_bm_total_weight(mdev)); in drbd_sync_handshake()
3266 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, in drbd_crypto_alloc_digest_safe() argument
3323 struct drbd_conf *mdev; in receive_SyncParam() local
3335 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_SyncParam()
3336 if (!mdev) in receive_SyncParam()
3368 err = drbd_recv_all(mdev->tconn, p, header_size); in receive_SyncParam()
3372 mutex_lock(&mdev->tconn->conf_update); in receive_SyncParam()
3373 old_net_conf = mdev->tconn->net_conf; in receive_SyncParam()
3374 if (get_ldev(mdev)) { in receive_SyncParam()
3377 put_ldev(mdev); in receive_SyncParam()
3378 mutex_unlock(&mdev->tconn->conf_update); in receive_SyncParam()
3383 old_disk_conf = mdev->ldev->disk_conf; in receive_SyncParam()
3399 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size); in receive_SyncParam()
3417 if (mdev->state.conn == C_WF_REPORT_PARAMS) { in receive_SyncParam()
3422 verify_tfm = drbd_crypto_alloc_digest_safe(mdev, in receive_SyncParam()
3431 if (mdev->state.conn == C_WF_REPORT_PARAMS) { in receive_SyncParam()
3436 csums_tfm = drbd_crypto_alloc_digest_safe(mdev, in receive_SyncParam()
3451 if (fifo_size != mdev->rs_plan_s->size) { in receive_SyncParam()
3455 put_ldev(mdev); in receive_SyncParam()
3473 crypto_free_hash(mdev->tconn->verify_tfm); in receive_SyncParam()
3474 mdev->tconn->verify_tfm = verify_tfm; in receive_SyncParam()
3480 crypto_free_hash(mdev->tconn->csums_tfm); in receive_SyncParam()
3481 mdev->tconn->csums_tfm = csums_tfm; in receive_SyncParam()
3489 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf); in receive_SyncParam()
3490 put_ldev(mdev); in receive_SyncParam()
3494 old_plan = mdev->rs_plan_s; in receive_SyncParam()
3495 rcu_assign_pointer(mdev->rs_plan_s, new_plan); in receive_SyncParam()
3498 mutex_unlock(&mdev->tconn->conf_update); in receive_SyncParam()
3509 put_ldev(mdev); in receive_SyncParam()
3512 mutex_unlock(&mdev->tconn->conf_update); in receive_SyncParam()
3518 put_ldev(mdev); in receive_SyncParam()
3521 mutex_unlock(&mdev->tconn->conf_update); in receive_SyncParam()
3527 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); in receive_SyncParam()
3532 static void warn_if_differ_considerably(struct drbd_conf *mdev, in warn_if_differ_considerably() argument
3546 struct drbd_conf *mdev; in receive_sizes() local
3553 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_sizes()
3554 if (!mdev) in receive_sizes()
3562 mdev->p_size = p_size; in receive_sizes()
3564 if (get_ldev(mdev)) { in receive_sizes()
3566 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size; in receive_sizes()
3569 warn_if_differ_considerably(mdev, "lower level device sizes", in receive_sizes()
3570 p_size, drbd_get_max_capacity(mdev->ldev)); in receive_sizes()
3571 warn_if_differ_considerably(mdev, "user requested size", in receive_sizes()
3576 if (mdev->state.conn == C_WF_REPORT_PARAMS) in receive_sizes()
3581 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) < in receive_sizes()
3582 drbd_get_capacity(mdev->this_bdev) && in receive_sizes()
3583 mdev->state.disk >= D_OUTDATED && in receive_sizes()
3584 mdev->state.conn < C_CONNECTED) { in receive_sizes()
3586 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); in receive_sizes()
3587 put_ldev(mdev); in receive_sizes()
3597 put_ldev(mdev); in receive_sizes()
3601 mutex_lock(&mdev->tconn->conf_update); in receive_sizes()
3602 old_disk_conf = mdev->ldev->disk_conf; in receive_sizes()
3606 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf); in receive_sizes()
3607 mutex_unlock(&mdev->tconn->conf_update); in receive_sizes()
3615 put_ldev(mdev); in receive_sizes()
3619 if (get_ldev(mdev)) { in receive_sizes()
3620 dd = drbd_determine_dev_size(mdev, ddsf); in receive_sizes()
3621 put_ldev(mdev); in receive_sizes()
3624 drbd_md_sync(mdev); in receive_sizes()
3627 drbd_set_my_capacity(mdev, p_size); in receive_sizes()
3630 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size); in receive_sizes()
3631 drbd_reconsider_max_bio_size(mdev); in receive_sizes()
3633 if (get_ldev(mdev)) { in receive_sizes()
3634 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { in receive_sizes()
3635 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); in receive_sizes()
3639 put_ldev(mdev); in receive_sizes()
3642 if (mdev->state.conn > C_WF_REPORT_PARAMS) { in receive_sizes()
3644 drbd_get_capacity(mdev->this_bdev) || ldsc) { in receive_sizes()
3647 drbd_send_sizes(mdev, 0, ddsf); in receive_sizes()
3649 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || in receive_sizes()
3650 (dd == grew && mdev->state.conn == C_CONNECTED)) { in receive_sizes()
3651 if (mdev->state.pdsk >= D_INCONSISTENT && in receive_sizes()
3652 mdev->state.disk >= D_INCONSISTENT) { in receive_sizes()
3656 resync_after_online_grow(mdev); in receive_sizes()
3658 set_bit(RESYNC_AFTER_NEG, &mdev->flags); in receive_sizes()
3667 struct drbd_conf *mdev; in receive_uuids() local
3672 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_uuids()
3673 if (!mdev) in receive_uuids()
3685 kfree(mdev->p_uuid); in receive_uuids()
3686 mdev->p_uuid = p_uuid; in receive_uuids()
3688 if (mdev->state.conn < C_CONNECTED && in receive_uuids()
3689 mdev->state.disk < D_INCONSISTENT && in receive_uuids()
3690 mdev->state.role == R_PRIMARY && in receive_uuids()
3691 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { in receive_uuids()
3693 (unsigned long long)mdev->ed_uuid); in receive_uuids()
3694 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); in receive_uuids()
3698 if (get_ldev(mdev)) { in receive_uuids()
3700 mdev->state.conn == C_CONNECTED && in receive_uuids()
3701 mdev->tconn->agreed_pro_version >= 90 && in receive_uuids()
3702 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && in receive_uuids()
3706 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, in receive_uuids()
3709 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); in receive_uuids()
3710 _drbd_uuid_set(mdev, UI_BITMAP, 0); in receive_uuids()
3711 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), in receive_uuids()
3713 drbd_md_sync(mdev); in receive_uuids()
3716 put_ldev(mdev); in receive_uuids()
3717 } else if (mdev->state.disk < D_INCONSISTENT && in receive_uuids()
3718 mdev->state.role == R_PRIMARY) { in receive_uuids()
3721 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); in receive_uuids()
3728 mutex_lock(mdev->state_mutex); in receive_uuids()
3729 mutex_unlock(mdev->state_mutex); in receive_uuids()
3730 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) in receive_uuids()
3731 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); in receive_uuids()
3734 drbd_print_uuids(mdev, "receiver updated UUIDs to"); in receive_uuids()
3772 struct drbd_conf *mdev; in receive_req_state() local
3777 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_req_state()
3778 if (!mdev) in receive_req_state()
3784 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) && in receive_req_state()
3785 mutex_is_locked(mdev->state_mutex)) { in receive_req_state()
3786 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); in receive_req_state()
3793 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); in receive_req_state()
3794 drbd_send_sr_reply(mdev, rv); in receive_req_state()
3796 drbd_md_sync(mdev); in receive_req_state()
3827 struct drbd_conf *mdev; in receive_state() local
3834 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_state()
3835 if (!mdev) in receive_state()
3842 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; in receive_state()
3846 spin_lock_irq(&mdev->tconn->req_lock); in receive_state()
3848 os = ns = drbd_read_state(mdev); in receive_state()
3849 spin_unlock_irq(&mdev->tconn->req_lock); in receive_state()
3881 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) in receive_state()
3882 drbd_resync_finished(mdev); in receive_state()
3890 ov_out_of_sync_print(mdev); in receive_state()
3891 drbd_resync_finished(mdev); in receive_state()
3910 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && in receive_state()
3911 get_ldev_if_state(mdev, D_NEGOTIATING)) { in receive_state()
3923 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); in receive_state()
3931 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); in receive_state()
3933 put_ldev(mdev); in receive_state()
3936 if (mdev->state.disk == D_NEGOTIATING) { in receive_state()
3937 drbd_force_state(mdev, NS(disk, D_FAILED)); in receive_state()
3943 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags)) in receive_state()
3946 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); in receive_state()
3952 spin_lock_irq(&mdev->tconn->req_lock); in receive_state()
3953 if (os.i != drbd_read_state(mdev).i) in receive_state()
3955 clear_bit(CONSIDER_RESYNC, &mdev->flags); in receive_state()
3960 ns.disk = mdev->new_state_tmp.disk; in receive_state()
3962 …if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNE… in receive_state()
3963 test_bit(NEW_CUR_UUID, &mdev->flags)) { in receive_state()
3966 spin_unlock_irq(&mdev->tconn->req_lock); in receive_state()
3968 tl_clear(mdev->tconn); in receive_state()
3969 drbd_uuid_new_current(mdev); in receive_state()
3970 clear_bit(NEW_CUR_UUID, &mdev->flags); in receive_state()
3971 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); in receive_state()
3974 rv = _drbd_set_state(mdev, ns, cs_flags, NULL); in receive_state()
3975 ns = drbd_read_state(mdev); in receive_state()
3976 spin_unlock_irq(&mdev->tconn->req_lock); in receive_state()
3979 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); in receive_state()
3989 drbd_send_uuids(mdev); in receive_state()
3990 drbd_send_current_state(mdev); in receive_state()
3994 clear_bit(DISCARD_MY_DATA, &mdev->flags); in receive_state()
3996 drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */ in receive_state()
4003 struct drbd_conf *mdev; in receive_sync_uuid() local
4006 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_sync_uuid()
4007 if (!mdev) in receive_sync_uuid()
4010 wait_event(mdev->misc_wait, in receive_sync_uuid()
4011 mdev->state.conn == C_WF_SYNC_UUID || in receive_sync_uuid()
4012 mdev->state.conn == C_BEHIND || in receive_sync_uuid()
4013 mdev->state.conn < C_CONNECTED || in receive_sync_uuid()
4014 mdev->state.disk < D_NEGOTIATING); in receive_sync_uuid()
4020 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { in receive_sync_uuid()
4021 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); in receive_sync_uuid()
4022 _drbd_uuid_set(mdev, UI_BITMAP, 0UL); in receive_sync_uuid()
4024 drbd_print_uuids(mdev, "updated sync uuid"); in receive_sync_uuid()
4025 drbd_start_resync(mdev, C_SYNC_TARGET); in receive_sync_uuid()
4027 put_ldev(mdev); in receive_sync_uuid()
4041 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size, in receive_bitmap_plain() argument
4045 drbd_header_size(mdev->tconn); in receive_bitmap_plain()
4057 err = drbd_recv_all(mdev->tconn, p, want); in receive_bitmap_plain()
4061 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p); in receive_bitmap_plain()
4093 recv_bm_rle_bits(struct drbd_conf *mdev, in recv_bm_rle_bits() argument
4125 _drbd_bm_set_bits(mdev, s, e); in recv_bm_rle_bits()
4158 decode_bitmap_c(struct drbd_conf *mdev, in decode_bitmap_c() argument
4164 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p)); in decode_bitmap_c()
4171 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); in decode_bitmap_c()
4175 void INFO_bm_xfer_stats(struct drbd_conf *mdev, in INFO_bm_xfer_stats() argument
4179 unsigned int header_size = drbd_header_size(mdev->tconn); in INFO_bm_xfer_stats()
4221 struct drbd_conf *mdev; in receive_bitmap() local
4225 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_bitmap()
4226 if (!mdev) in receive_bitmap()
4229 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); in receive_bitmap()
4234 .bm_bits = drbd_bm_bits(mdev), in receive_bitmap()
4235 .bm_words = drbd_bm_words(mdev), in receive_bitmap()
4240 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c); in receive_bitmap()
4256 err = drbd_recv_all(mdev->tconn, p, pi->size); in receive_bitmap()
4259 err = decode_bitmap_c(mdev, p, &c, pi->size); in receive_bitmap()
4274 err = drbd_recv_header(mdev->tconn, pi); in receive_bitmap()
4279 INFO_bm_xfer_stats(mdev, "receive", &c); in receive_bitmap()
4281 if (mdev->state.conn == C_WF_BITMAP_T) { in receive_bitmap()
4284 err = drbd_send_bitmap(mdev); in receive_bitmap()
4288 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); in receive_bitmap()
4290 } else if (mdev->state.conn != C_WF_BITMAP_S) { in receive_bitmap()
4294 drbd_conn_str(mdev->state.conn)); in receive_bitmap()
4299 drbd_bm_unlock(mdev); in receive_bitmap()
4300 if (!err && mdev->state.conn == C_WF_BITMAP_S) in receive_bitmap()
4301 drbd_start_resync(mdev, C_SYNC_SOURCE); in receive_bitmap()
4324 struct drbd_conf *mdev; in receive_out_of_sync() local
4327 mdev = vnr_to_mdev(tconn, pi->vnr); in receive_out_of_sync()
4328 if (!mdev) in receive_out_of_sync()
4331 switch (mdev->state.conn) { in receive_out_of_sync()
4338 drbd_conn_str(mdev->state.conn)); in receive_out_of_sync()
4341 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); in receive_out_of_sync()
4439 struct drbd_conf *mdev; in conn_disconnect() local
4458 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in conn_disconnect()
4459 kref_get(&mdev->kref); in conn_disconnect()
4461 drbd_disconnected(mdev); in conn_disconnect()
4462 kref_put(&mdev->kref, &drbd_minor_destroy); in conn_disconnect()
4489 static int drbd_disconnected(struct drbd_conf *mdev) in drbd_disconnected() argument
4494 spin_lock_irq(&mdev->tconn->req_lock); in drbd_disconnected()
4495 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); in drbd_disconnected()
4496 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); in drbd_disconnected()
4497 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); in drbd_disconnected()
4498 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_disconnected()
4510 drbd_rs_cancel_all(mdev); in drbd_disconnected()
4511 mdev->rs_total = 0; in drbd_disconnected()
4512 mdev->rs_failed = 0; in drbd_disconnected()
4513 atomic_set(&mdev->rs_pending_cnt, 0); in drbd_disconnected()
4514 wake_up(&mdev->misc_wait); in drbd_disconnected()
4516 del_timer_sync(&mdev->resync_timer); in drbd_disconnected()
4517 resync_timer_fn((unsigned long)mdev); in drbd_disconnected()
4522 drbd_flush_workqueue(mdev); in drbd_disconnected()
4524 drbd_finish_peer_reqs(mdev); in drbd_disconnected()
4529 drbd_flush_workqueue(mdev); in drbd_disconnected()
4533 drbd_rs_cancel_all(mdev); in drbd_disconnected()
4535 kfree(mdev->p_uuid); in drbd_disconnected()
4536 mdev->p_uuid = NULL; in drbd_disconnected()
4538 if (!drbd_suspended(mdev)) in drbd_disconnected()
4539 tl_clear(mdev->tconn); in drbd_disconnected()
4541 drbd_md_sync(mdev); in drbd_disconnected()
4545 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); in drbd_disconnected()
4554 i = drbd_free_peer_reqs(mdev, &mdev->net_ee); in drbd_disconnected()
4557 i = atomic_read(&mdev->pp_in_use_by_net); in drbd_disconnected()
4560 i = atomic_read(&mdev->pp_in_use); in drbd_disconnected()
4564 D_ASSERT(list_empty(&mdev->read_ee)); in drbd_disconnected()
4565 D_ASSERT(list_empty(&mdev->active_ee)); in drbd_disconnected()
4566 D_ASSERT(list_empty(&mdev->sync_ee)); in drbd_disconnected()
4567 D_ASSERT(list_empty(&mdev->done_ee)); in drbd_disconnected()
4888 struct drbd_conf *mdev; in got_RqSReply() local
4892 mdev = vnr_to_mdev(tconn, pi->vnr); in got_RqSReply()
4893 if (!mdev) in got_RqSReply()
4902 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); in got_RqSReply()
4904 set_bit(CL_ST_CHG_FAIL, &mdev->flags); in got_RqSReply()
4908 wake_up(&mdev->state_wait); in got_RqSReply()
4931 struct drbd_conf *mdev; in got_IsInSync() local
4936 mdev = vnr_to_mdev(tconn, pi->vnr); in got_IsInSync()
4937 if (!mdev) in got_IsInSync()
4940 D_ASSERT(mdev->tconn->agreed_pro_version >= 89); in got_IsInSync()
4942 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); in got_IsInSync()
4944 if (get_ldev(mdev)) { in got_IsInSync()
4945 drbd_rs_complete_io(mdev, sector); in got_IsInSync()
4946 drbd_set_in_sync(mdev, sector, blksize); in got_IsInSync()
4948 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); in got_IsInSync()
4949 put_ldev(mdev); in got_IsInSync()
4951 dec_rs_pending(mdev); in got_IsInSync()
4952 atomic_add(blksize >> 9, &mdev->rs_sect_in); in got_IsInSync()
4958 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector, in validate_req_change_req_state() argument
4965 spin_lock_irq(&mdev->tconn->req_lock); in validate_req_change_req_state()
4966 req = find_request(mdev, root, id, sector, missing_ok, func); in validate_req_change_req_state()
4968 spin_unlock_irq(&mdev->tconn->req_lock); in validate_req_change_req_state()
4972 spin_unlock_irq(&mdev->tconn->req_lock); in validate_req_change_req_state()
4975 complete_master_bio(mdev, &m); in validate_req_change_req_state()
4981 struct drbd_conf *mdev; in got_BlockAck() local
4987 mdev = vnr_to_mdev(tconn, pi->vnr); in got_BlockAck()
4988 if (!mdev) in got_BlockAck()
4991 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); in got_BlockAck()
4994 drbd_set_in_sync(mdev, sector, blksize); in got_BlockAck()
4995 dec_rs_pending(mdev); in got_BlockAck()
5018 return validate_req_change_req_state(mdev, p->block_id, sector, in got_BlockAck()
5019 &mdev->write_requests, __func__, in got_BlockAck()
5025 struct drbd_conf *mdev; in got_NegAck() local
5031 mdev = vnr_to_mdev(tconn, pi->vnr); in got_NegAck()
5032 if (!mdev) in got_NegAck()
5035 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); in got_NegAck()
5038 dec_rs_pending(mdev); in got_NegAck()
5039 drbd_rs_failed_io(mdev, sector, size); in got_NegAck()
5043 err = validate_req_change_req_state(mdev, p->block_id, sector, in got_NegAck()
5044 &mdev->write_requests, __func__, in got_NegAck()
5052 drbd_set_out_of_sync(mdev, sector, size); in got_NegAck()
5059 struct drbd_conf *mdev; in got_NegDReply() local
5063 mdev = vnr_to_mdev(tconn, pi->vnr); in got_NegDReply()
5064 if (!mdev) in got_NegDReply()
5067 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); in got_NegDReply()
5072 return validate_req_change_req_state(mdev, p->block_id, sector, in got_NegDReply()
5073 &mdev->read_requests, __func__, in got_NegDReply()
5079 struct drbd_conf *mdev; in got_NegRSDReply() local
5084 mdev = vnr_to_mdev(tconn, pi->vnr); in got_NegRSDReply()
5085 if (!mdev) in got_NegRSDReply()
5091 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); in got_NegRSDReply()
5093 dec_rs_pending(mdev); in got_NegRSDReply()
5095 if (get_ldev_if_state(mdev, D_FAILED)) { in got_NegRSDReply()
5096 drbd_rs_complete_io(mdev, sector); in got_NegRSDReply()
5099 drbd_rs_failed_io(mdev, sector, size); in got_NegRSDReply()
5105 put_ldev(mdev); in got_NegRSDReply()
5114 struct drbd_conf *mdev; in got_BarrierAck() local
5120 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in got_BarrierAck()
5121 if (mdev->state.conn == C_AHEAD && in got_BarrierAck()
5122 atomic_read(&mdev->ap_in_flight) == 0 && in got_BarrierAck()
5123 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) { in got_BarrierAck()
5124 mdev->start_resync_timer.expires = jiffies + HZ; in got_BarrierAck()
5125 add_timer(&mdev->start_resync_timer); in got_BarrierAck()
5135 struct drbd_conf *mdev; in got_OVResult() local
5141 mdev = vnr_to_mdev(tconn, pi->vnr); in got_OVResult()
5142 if (!mdev) in got_OVResult()
5148 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); in got_OVResult()
5151 drbd_ov_out_of_sync_found(mdev, sector, size); in got_OVResult()
5153 ov_out_of_sync_print(mdev); in got_OVResult()
5155 if (!get_ldev(mdev)) in got_OVResult()
5158 drbd_rs_complete_io(mdev, sector); in got_OVResult()
5159 dec_rs_pending(mdev); in got_OVResult()
5161 --mdev->ov_left; in got_OVResult()
5164 if ((mdev->ov_left & 0x200) == 0x200) in got_OVResult()
5165 drbd_advance_rs_marks(mdev, mdev->ov_left); in got_OVResult()
5167 if (mdev->ov_left == 0) { in got_OVResult()
5171 w->mdev = mdev; in got_OVResult()
5172 drbd_queue_work(&mdev->tconn->sender_work, w); in got_OVResult()
5175 ov_out_of_sync_print(mdev); in got_OVResult()
5176 drbd_resync_finished(mdev); in got_OVResult()
5179 put_ldev(mdev); in got_OVResult()
5190 struct drbd_conf *mdev; in tconn_finish_peer_reqs() local
5198 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in tconn_finish_peer_reqs()
5199 kref_get(&mdev->kref); in tconn_finish_peer_reqs()
5201 if (drbd_finish_peer_reqs(mdev)) { in tconn_finish_peer_reqs()
5202 kref_put(&mdev->kref, &drbd_minor_destroy); in tconn_finish_peer_reqs()
5205 kref_put(&mdev->kref, &drbd_minor_destroy); in tconn_finish_peer_reqs()
5211 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in tconn_finish_peer_reqs()
5212 not_empty = !list_empty(&mdev->done_ee); in tconn_finish_peer_reqs()