Lines Matching refs:mdev
169 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) in _get_ldev_if_state() argument
173 atomic_inc(&mdev->local_cnt); in _get_ldev_if_state()
174 io_allowed = (mdev->state.disk >= mins); in _get_ldev_if_state()
176 if (atomic_dec_and_test(&mdev->local_cnt)) in _get_ldev_if_state()
177 wake_up(&mdev->misc_wait); in _get_ldev_if_state()
310 void tl_abort_disk_io(struct drbd_conf *mdev) in tl_abort_disk_io() argument
312 struct drbd_tconn *tconn = mdev->tconn; in tl_abort_disk_io()
319 if (req->w.mdev != mdev) in tl_abort_disk_io()
499 struct drbd_conf *mdev; in conn_lowest_minor() local
503 mdev = idr_get_next(&tconn->volumes, &vnr); in conn_lowest_minor()
504 m = mdev ? mdev_to_minor(mdev) : -1; in conn_lowest_minor()
635 void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock) in drbd_prepare_command() argument
637 return conn_prepare_command(mdev->tconn, sock); in drbd_prepare_command()
684 int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock, in drbd_send_command() argument
690 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size, in drbd_send_command()
716 int drbd_send_sync_param(struct drbd_conf *mdev) in drbd_send_sync_param() argument
721 const int apv = mdev->tconn->agreed_pro_version; in drbd_send_sync_param()
726 sock = &mdev->tconn->data; in drbd_send_sync_param()
727 p = drbd_prepare_command(mdev, sock); in drbd_send_sync_param()
732 nc = rcu_dereference(mdev->tconn->net_conf); in drbd_send_sync_param()
745 if (get_ldev(mdev)) { in drbd_send_sync_param()
746 dc = rcu_dereference(mdev->ldev->disk_conf); in drbd_send_sync_param()
752 put_ldev(mdev); in drbd_send_sync_param()
767 return drbd_send_command(mdev, sock, cmd, size, NULL, 0); in drbd_send_sync_param()
826 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) in _drbd_send_uuids() argument
832 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) in _drbd_send_uuids()
835 sock = &mdev->tconn->data; in _drbd_send_uuids()
836 p = drbd_prepare_command(mdev, sock); in _drbd_send_uuids()
838 put_ldev(mdev); in _drbd_send_uuids()
841 spin_lock_irq(&mdev->ldev->md.uuid_lock); in _drbd_send_uuids()
843 p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); in _drbd_send_uuids()
844 spin_unlock_irq(&mdev->ldev->md.uuid_lock); in _drbd_send_uuids()
846 mdev->comm_bm_set = drbd_bm_total_weight(mdev); in _drbd_send_uuids()
847 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set); in _drbd_send_uuids()
849 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0; in _drbd_send_uuids()
851 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0; in _drbd_send_uuids()
852 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; in _drbd_send_uuids()
855 put_ldev(mdev); in _drbd_send_uuids()
856 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0); in _drbd_send_uuids()
859 int drbd_send_uuids(struct drbd_conf *mdev) in drbd_send_uuids() argument
861 return _drbd_send_uuids(mdev, 0); in drbd_send_uuids()
864 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) in drbd_send_uuids_skip_initial_sync() argument
866 return _drbd_send_uuids(mdev, 8); in drbd_send_uuids_skip_initial_sync()
869 void drbd_print_uuids(struct drbd_conf *mdev, const char *text) in drbd_print_uuids() argument
871 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { in drbd_print_uuids()
872 u64 *uuid = mdev->ldev->md.uuid; in drbd_print_uuids()
879 put_ldev(mdev); in drbd_print_uuids()
883 (unsigned long long)mdev->ed_uuid); in drbd_print_uuids()
887 void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) in drbd_gen_and_send_sync_uuid() argument
893 D_ASSERT(mdev->state.disk == D_UP_TO_DATE); in drbd_gen_and_send_sync_uuid()
895 uuid = mdev->ldev->md.uuid[UI_BITMAP]; in drbd_gen_and_send_sync_uuid()
900 drbd_uuid_set(mdev, UI_BITMAP, uuid); in drbd_gen_and_send_sync_uuid()
901 drbd_print_uuids(mdev, "updated sync UUID"); in drbd_gen_and_send_sync_uuid()
902 drbd_md_sync(mdev); in drbd_gen_and_send_sync_uuid()
904 sock = &mdev->tconn->data; in drbd_gen_and_send_sync_uuid()
905 p = drbd_prepare_command(mdev, sock); in drbd_gen_and_send_sync_uuid()
908 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0); in drbd_gen_and_send_sync_uuid()
912 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) in drbd_send_sizes() argument
920 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { in drbd_send_sizes()
921 D_ASSERT(mdev->ldev->backing_bdev); in drbd_send_sizes()
922 d_size = drbd_get_max_capacity(mdev->ldev); in drbd_send_sizes()
924 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size; in drbd_send_sizes()
926 q_order_type = drbd_queue_order_type(mdev); in drbd_send_sizes()
927 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; in drbd_send_sizes()
929 put_ldev(mdev); in drbd_send_sizes()
937 sock = &mdev->tconn->data; in drbd_send_sizes()
938 p = drbd_prepare_command(mdev, sock); in drbd_send_sizes()
942 if (mdev->tconn->agreed_pro_version <= 94) in drbd_send_sizes()
944 else if (mdev->tconn->agreed_pro_version < 100) in drbd_send_sizes()
949 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); in drbd_send_sizes()
953 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0); in drbd_send_sizes()
960 int drbd_send_current_state(struct drbd_conf *mdev) in drbd_send_current_state() argument
965 sock = &mdev->tconn->data; in drbd_send_current_state()
966 p = drbd_prepare_command(mdev, sock); in drbd_send_current_state()
969 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */ in drbd_send_current_state()
970 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0); in drbd_send_current_state()
983 int drbd_send_state(struct drbd_conf *mdev, union drbd_state state) in drbd_send_state() argument
988 sock = &mdev->tconn->data; in drbd_send_state()
989 p = drbd_prepare_command(mdev, sock); in drbd_send_state()
993 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0); in drbd_send_state()
996 int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val) in drbd_send_state_req() argument
1001 sock = &mdev->tconn->data; in drbd_send_state_req()
1002 p = drbd_prepare_command(mdev, sock); in drbd_send_state_req()
1007 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0); in drbd_send_state_req()
1026 void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) in drbd_send_sr_reply() argument
1031 sock = &mdev->tconn->meta; in drbd_send_sr_reply()
1032 p = drbd_prepare_command(mdev, sock); in drbd_send_sr_reply()
1035 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0); in drbd_send_sr_reply()
1070 int fill_bitmap_rle_bits(struct drbd_conf *mdev, in fill_bitmap_rle_bits() argument
1085 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle; in fill_bitmap_rle_bits()
1087 if (!use_rle || mdev->tconn->agreed_pro_version < 90) in fill_bitmap_rle_bits()
1107 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset) in fill_bitmap_rle_bits()
1108 : _drbd_bm_find_next(mdev, c->bit_offset); in fill_bitmap_rle_bits()
1174 send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c) in send_bitmap_rle_or_plain() argument
1176 struct drbd_socket *sock = &mdev->tconn->data; in send_bitmap_rle_or_plain()
1177 unsigned int header_size = drbd_header_size(mdev->tconn); in send_bitmap_rle_or_plain()
1181 len = fill_bitmap_rle_bits(mdev, p, in send_bitmap_rle_or_plain()
1188 err = __send_command(mdev->tconn, mdev->vnr, sock, in send_bitmap_rle_or_plain()
1208 drbd_bm_get_lel(mdev, c->word_offset, num_words, p); in send_bitmap_rle_or_plain()
1209 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0); in send_bitmap_rle_or_plain()
1221 INFO_bm_xfer_stats(mdev, "send", c); in send_bitmap_rle_or_plain()
1230 static int _drbd_send_bitmap(struct drbd_conf *mdev) in _drbd_send_bitmap() argument
1235 if (!expect(mdev->bitmap)) in _drbd_send_bitmap()
1238 if (get_ldev(mdev)) { in _drbd_send_bitmap()
1239 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { in _drbd_send_bitmap()
1241 drbd_bm_set_all(mdev); in _drbd_send_bitmap()
1242 if (drbd_bm_write(mdev)) { in _drbd_send_bitmap()
1248 drbd_md_clear_flag(mdev, MDF_FULL_SYNC); in _drbd_send_bitmap()
1249 drbd_md_sync(mdev); in _drbd_send_bitmap()
1252 put_ldev(mdev); in _drbd_send_bitmap()
1256 .bm_bits = drbd_bm_bits(mdev), in _drbd_send_bitmap()
1257 .bm_words = drbd_bm_words(mdev), in _drbd_send_bitmap()
1261 err = send_bitmap_rle_or_plain(mdev, &c); in _drbd_send_bitmap()
1267 int drbd_send_bitmap(struct drbd_conf *mdev) in drbd_send_bitmap() argument
1269 struct drbd_socket *sock = &mdev->tconn->data; in drbd_send_bitmap()
1274 err = !_drbd_send_bitmap(mdev); in drbd_send_bitmap()
1304 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, in _drbd_send_ack() argument
1310 if (mdev->state.conn < C_CONNECTED) in _drbd_send_ack()
1313 sock = &mdev->tconn->meta; in _drbd_send_ack()
1314 p = drbd_prepare_command(mdev, sock); in _drbd_send_ack()
1320 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); in _drbd_send_ack()
1321 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); in _drbd_send_ack()
1327 void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, in drbd_send_ack_dp() argument
1330 if (mdev->tconn->peer_integrity_tfm) in drbd_send_ack_dp()
1331 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm); in drbd_send_ack_dp()
1332 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), in drbd_send_ack_dp()
1336 void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, in drbd_send_ack_rp() argument
1339 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); in drbd_send_ack_rp()
1348 int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, in drbd_send_ack() argument
1351 return _drbd_send_ack(mdev, cmd, in drbd_send_ack()
1359 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, in drbd_send_ack_ex() argument
1362 return _drbd_send_ack(mdev, cmd, in drbd_send_ack_ex()
1368 int drbd_send_drequest(struct drbd_conf *mdev, int cmd, in drbd_send_drequest() argument
1374 sock = &mdev->tconn->data; in drbd_send_drequest()
1375 p = drbd_prepare_command(mdev, sock); in drbd_send_drequest()
1381 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); in drbd_send_drequest()
1384 int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, in drbd_send_drequest_csum() argument
1392 sock = &mdev->tconn->data; in drbd_send_drequest_csum()
1393 p = drbd_prepare_command(mdev, sock); in drbd_send_drequest_csum()
1399 return drbd_send_command(mdev, sock, cmd, sizeof(*p), in drbd_send_drequest_csum()
1403 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) in drbd_send_ov_request() argument
1408 sock = &mdev->tconn->data; in drbd_send_ov_request()
1409 p = drbd_prepare_command(mdev, sock); in drbd_send_ov_request()
1415 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0); in drbd_send_ov_request()
1473 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, in _drbd_no_send_page() argument
1480 socket = mdev->tconn->data.socket; in _drbd_no_send_page()
1482 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags); in _drbd_no_send_page()
1485 mdev->send_cnt += size >> 9; in _drbd_no_send_page()
1489 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, in _drbd_send_page() argument
1492 struct socket *socket = mdev->tconn->data.socket; in _drbd_send_page()
1504 return _drbd_no_send_page(mdev, page, offset, size, msg_flags); in _drbd_send_page()
1507 drbd_update_congested(mdev->tconn); in _drbd_send_page()
1515 if (we_should_drop_the_connection(mdev->tconn, socket)) in _drbd_send_page()
1529 clear_bit(NET_CONGESTED, &mdev->tconn->flags); in _drbd_send_page()
1533 mdev->send_cnt += size >> 9; in _drbd_send_page()
1538 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) in _drbd_send_bio() argument
1546 err = _drbd_no_send_page(mdev, bvec->bv_page, in _drbd_send_bio()
1555 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) in _drbd_send_zc_bio() argument
1563 err = _drbd_send_page(mdev, bvec->bv_page, in _drbd_send_zc_bio()
1572 static int _drbd_send_zc_ee(struct drbd_conf *mdev, in _drbd_send_zc_ee() argument
1583 err = _drbd_send_page(mdev, page, 0, l, in _drbd_send_zc_ee()
1592 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) in bio_flags_to_wire() argument
1594 if (mdev->tconn->agreed_pro_version >= 95) in bio_flags_to_wire()
1606 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) in drbd_send_dblock() argument
1614 sock = &mdev->tconn->data; in drbd_send_dblock()
1615 p = drbd_prepare_command(mdev, sock); in drbd_send_dblock()
1616 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0; in drbd_send_dblock()
1622 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); in drbd_send_dblock()
1623 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); in drbd_send_dblock()
1624 if (mdev->state.conn >= C_SYNC_SOURCE && in drbd_send_dblock()
1625 mdev->state.conn <= C_PAUSED_SYNC_T) in drbd_send_dblock()
1627 if (mdev->tconn->agreed_pro_version >= 100) { in drbd_send_dblock()
1635 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1); in drbd_send_dblock()
1636 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); in drbd_send_dblock()
1650 err = _drbd_send_bio(mdev, req->master_bio); in drbd_send_dblock()
1652 err = _drbd_send_zc_bio(mdev, req->master_bio); in drbd_send_dblock()
1659 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest); in drbd_send_dblock()
1678 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, in drbd_send_block() argument
1686 sock = &mdev->tconn->data; in drbd_send_block()
1687 p = drbd_prepare_command(mdev, sock); in drbd_send_block()
1689 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0; in drbd_send_block()
1698 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1); in drbd_send_block()
1699 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size); in drbd_send_block()
1701 err = _drbd_send_zc_ee(mdev, peer_req); in drbd_send_block()
1707 int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req) in drbd_send_out_of_sync() argument
1712 sock = &mdev->tconn->data; in drbd_send_out_of_sync()
1713 p = drbd_prepare_command(mdev, sock); in drbd_send_out_of_sync()
1718 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0); in drbd_send_out_of_sync()
1828 struct drbd_conf *mdev = bdev->bd_disk->private_data; in drbd_open() local
1833 spin_lock_irqsave(&mdev->tconn->req_lock, flags); in drbd_open()
1837 if (mdev->state.role != R_PRIMARY) { in drbd_open()
1845 mdev->open_cnt++; in drbd_open()
1846 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); in drbd_open()
1854 struct drbd_conf *mdev = gd->private_data; in drbd_release() local
1856 mdev->open_cnt--; in drbd_release()
1860 static void drbd_set_defaults(struct drbd_conf *mdev) in drbd_set_defaults() argument
1864 mdev->state = (union drbd_dev_state) { in drbd_set_defaults()
1873 void drbd_init_set_defaults(struct drbd_conf *mdev) in drbd_init_set_defaults() argument
1878 drbd_set_defaults(mdev); in drbd_init_set_defaults()
1880 atomic_set(&mdev->ap_bio_cnt, 0); in drbd_init_set_defaults()
1881 atomic_set(&mdev->ap_pending_cnt, 0); in drbd_init_set_defaults()
1882 atomic_set(&mdev->rs_pending_cnt, 0); in drbd_init_set_defaults()
1883 atomic_set(&mdev->unacked_cnt, 0); in drbd_init_set_defaults()
1884 atomic_set(&mdev->local_cnt, 0); in drbd_init_set_defaults()
1885 atomic_set(&mdev->pp_in_use_by_net, 0); in drbd_init_set_defaults()
1886 atomic_set(&mdev->rs_sect_in, 0); in drbd_init_set_defaults()
1887 atomic_set(&mdev->rs_sect_ev, 0); in drbd_init_set_defaults()
1888 atomic_set(&mdev->ap_in_flight, 0); in drbd_init_set_defaults()
1889 atomic_set(&mdev->md_io_in_use, 0); in drbd_init_set_defaults()
1891 mutex_init(&mdev->own_state_mutex); in drbd_init_set_defaults()
1892 mdev->state_mutex = &mdev->own_state_mutex; in drbd_init_set_defaults()
1894 spin_lock_init(&mdev->al_lock); in drbd_init_set_defaults()
1895 spin_lock_init(&mdev->peer_seq_lock); in drbd_init_set_defaults()
1897 INIT_LIST_HEAD(&mdev->active_ee); in drbd_init_set_defaults()
1898 INIT_LIST_HEAD(&mdev->sync_ee); in drbd_init_set_defaults()
1899 INIT_LIST_HEAD(&mdev->done_ee); in drbd_init_set_defaults()
1900 INIT_LIST_HEAD(&mdev->read_ee); in drbd_init_set_defaults()
1901 INIT_LIST_HEAD(&mdev->net_ee); in drbd_init_set_defaults()
1902 INIT_LIST_HEAD(&mdev->resync_reads); in drbd_init_set_defaults()
1903 INIT_LIST_HEAD(&mdev->resync_work.list); in drbd_init_set_defaults()
1904 INIT_LIST_HEAD(&mdev->unplug_work.list); in drbd_init_set_defaults()
1905 INIT_LIST_HEAD(&mdev->go_diskless.list); in drbd_init_set_defaults()
1906 INIT_LIST_HEAD(&mdev->md_sync_work.list); in drbd_init_set_defaults()
1907 INIT_LIST_HEAD(&mdev->start_resync_work.list); in drbd_init_set_defaults()
1908 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); in drbd_init_set_defaults()
1910 mdev->resync_work.cb = w_resync_timer; in drbd_init_set_defaults()
1911 mdev->unplug_work.cb = w_send_write_hint; in drbd_init_set_defaults()
1912 mdev->go_diskless.cb = w_go_diskless; in drbd_init_set_defaults()
1913 mdev->md_sync_work.cb = w_md_sync; in drbd_init_set_defaults()
1914 mdev->bm_io_work.w.cb = w_bitmap_io; in drbd_init_set_defaults()
1915 mdev->start_resync_work.cb = w_start_resync; in drbd_init_set_defaults()
1917 mdev->resync_work.mdev = mdev; in drbd_init_set_defaults()
1918 mdev->unplug_work.mdev = mdev; in drbd_init_set_defaults()
1919 mdev->go_diskless.mdev = mdev; in drbd_init_set_defaults()
1920 mdev->md_sync_work.mdev = mdev; in drbd_init_set_defaults()
1921 mdev->bm_io_work.w.mdev = mdev; in drbd_init_set_defaults()
1922 mdev->start_resync_work.mdev = mdev; in drbd_init_set_defaults()
1924 init_timer(&mdev->resync_timer); in drbd_init_set_defaults()
1925 init_timer(&mdev->md_sync_timer); in drbd_init_set_defaults()
1926 init_timer(&mdev->start_resync_timer); in drbd_init_set_defaults()
1927 init_timer(&mdev->request_timer); in drbd_init_set_defaults()
1928 mdev->resync_timer.function = resync_timer_fn; in drbd_init_set_defaults()
1929 mdev->resync_timer.data = (unsigned long) mdev; in drbd_init_set_defaults()
1930 mdev->md_sync_timer.function = md_sync_timer_fn; in drbd_init_set_defaults()
1931 mdev->md_sync_timer.data = (unsigned long) mdev; in drbd_init_set_defaults()
1932 mdev->start_resync_timer.function = start_resync_timer_fn; in drbd_init_set_defaults()
1933 mdev->start_resync_timer.data = (unsigned long) mdev; in drbd_init_set_defaults()
1934 mdev->request_timer.function = request_timer_fn; in drbd_init_set_defaults()
1935 mdev->request_timer.data = (unsigned long) mdev; in drbd_init_set_defaults()
1937 init_waitqueue_head(&mdev->misc_wait); in drbd_init_set_defaults()
1938 init_waitqueue_head(&mdev->state_wait); in drbd_init_set_defaults()
1939 init_waitqueue_head(&mdev->ee_wait); in drbd_init_set_defaults()
1940 init_waitqueue_head(&mdev->al_wait); in drbd_init_set_defaults()
1941 init_waitqueue_head(&mdev->seq_wait); in drbd_init_set_defaults()
1943 mdev->resync_wenr = LC_FREE; in drbd_init_set_defaults()
1944 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; in drbd_init_set_defaults()
1945 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; in drbd_init_set_defaults()
1948 void drbd_mdev_cleanup(struct drbd_conf *mdev) in drbd_mdev_cleanup() argument
1951 if (mdev->tconn->receiver.t_state != NONE) in drbd_mdev_cleanup()
1953 mdev->tconn->receiver.t_state); in drbd_mdev_cleanup()
1955 mdev->al_writ_cnt = in drbd_mdev_cleanup()
1956 mdev->bm_writ_cnt = in drbd_mdev_cleanup()
1957 mdev->read_cnt = in drbd_mdev_cleanup()
1958 mdev->recv_cnt = in drbd_mdev_cleanup()
1959 mdev->send_cnt = in drbd_mdev_cleanup()
1960 mdev->writ_cnt = in drbd_mdev_cleanup()
1961 mdev->p_size = in drbd_mdev_cleanup()
1962 mdev->rs_start = in drbd_mdev_cleanup()
1963 mdev->rs_total = in drbd_mdev_cleanup()
1964 mdev->rs_failed = 0; in drbd_mdev_cleanup()
1965 mdev->rs_last_events = 0; in drbd_mdev_cleanup()
1966 mdev->rs_last_sect_ev = 0; in drbd_mdev_cleanup()
1968 mdev->rs_mark_left[i] = 0; in drbd_mdev_cleanup()
1969 mdev->rs_mark_time[i] = 0; in drbd_mdev_cleanup()
1971 D_ASSERT(mdev->tconn->net_conf == NULL); in drbd_mdev_cleanup()
1973 drbd_set_my_capacity(mdev, 0); in drbd_mdev_cleanup()
1974 if (mdev->bitmap) { in drbd_mdev_cleanup()
1976 drbd_bm_resize(mdev, 0, 1); in drbd_mdev_cleanup()
1977 drbd_bm_cleanup(mdev); in drbd_mdev_cleanup()
1980 drbd_free_bc(mdev->ldev); in drbd_mdev_cleanup()
1981 mdev->ldev = NULL; in drbd_mdev_cleanup()
1983 clear_bit(AL_SUSPENDED, &mdev->flags); in drbd_mdev_cleanup()
1985 D_ASSERT(list_empty(&mdev->active_ee)); in drbd_mdev_cleanup()
1986 D_ASSERT(list_empty(&mdev->sync_ee)); in drbd_mdev_cleanup()
1987 D_ASSERT(list_empty(&mdev->done_ee)); in drbd_mdev_cleanup()
1988 D_ASSERT(list_empty(&mdev->read_ee)); in drbd_mdev_cleanup()
1989 D_ASSERT(list_empty(&mdev->net_ee)); in drbd_mdev_cleanup()
1990 D_ASSERT(list_empty(&mdev->resync_reads)); in drbd_mdev_cleanup()
1991 D_ASSERT(list_empty(&mdev->tconn->sender_work.q)); in drbd_mdev_cleanup()
1992 D_ASSERT(list_empty(&mdev->resync_work.list)); in drbd_mdev_cleanup()
1993 D_ASSERT(list_empty(&mdev->unplug_work.list)); in drbd_mdev_cleanup()
1994 D_ASSERT(list_empty(&mdev->go_diskless.list)); in drbd_mdev_cleanup()
1996 drbd_set_defaults(mdev); in drbd_mdev_cleanup()
2131 static void drbd_release_all_peer_reqs(struct drbd_conf *mdev) in drbd_release_all_peer_reqs() argument
2135 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee); in drbd_release_all_peer_reqs()
2139 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee); in drbd_release_all_peer_reqs()
2143 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee); in drbd_release_all_peer_reqs()
2147 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee); in drbd_release_all_peer_reqs()
2151 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee); in drbd_release_all_peer_reqs()
2159 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref); in drbd_minor_destroy() local
2160 struct drbd_tconn *tconn = mdev->tconn; in drbd_minor_destroy()
2162 del_timer_sync(&mdev->request_timer); in drbd_minor_destroy()
2165 D_ASSERT(mdev->open_cnt == 0); in drbd_minor_destroy()
2171 if (mdev->this_bdev) in drbd_minor_destroy()
2172 bdput(mdev->this_bdev); in drbd_minor_destroy()
2174 drbd_free_bc(mdev->ldev); in drbd_minor_destroy()
2175 mdev->ldev = NULL; in drbd_minor_destroy()
2177 drbd_release_all_peer_reqs(mdev); in drbd_minor_destroy()
2179 lc_destroy(mdev->act_log); in drbd_minor_destroy()
2180 lc_destroy(mdev->resync); in drbd_minor_destroy()
2182 kfree(mdev->p_uuid); in drbd_minor_destroy()
2185 if (mdev->bitmap) /* should no longer be there. */ in drbd_minor_destroy()
2186 drbd_bm_cleanup(mdev); in drbd_minor_destroy()
2187 __free_page(mdev->md_io_page); in drbd_minor_destroy()
2188 put_disk(mdev->vdisk); in drbd_minor_destroy()
2189 blk_cleanup_queue(mdev->rq_queue); in drbd_minor_destroy()
2190 kfree(mdev->rs_plan_s); in drbd_minor_destroy()
2191 kfree(mdev); in drbd_minor_destroy()
2218 struct drbd_conf *mdev = req->w.mdev; in do_retry() local
2254 inc_ap_bio(mdev); in do_retry()
2255 __drbd_make_request(mdev, bio, start_time); in do_retry()
2269 dec_ap_bio(req->w.mdev); in drbd_restart_request()
2278 struct drbd_conf *mdev; in drbd_cleanup() local
2299 idr_for_each_entry(&minors, mdev, i) { in drbd_cleanup()
2300 idr_remove(&minors, mdev_to_minor(mdev)); in drbd_cleanup()
2301 idr_remove(&mdev->tconn->volumes, mdev->vnr); in drbd_cleanup()
2302 destroy_workqueue(mdev->submit.wq); in drbd_cleanup()
2303 del_gendisk(mdev->vdisk); in drbd_cleanup()
2305 kref_put(&mdev->kref, &drbd_minor_destroy); in drbd_cleanup()
2332 struct drbd_conf *mdev = congested_data; in drbd_congested() local
2337 if (!may_inc_ap_bio(mdev)) { in drbd_congested()
2344 if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) { in drbd_congested()
2351 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) in drbd_congested()
2354 put_ldev(mdev); in drbd_congested()
2360 if (get_ldev(mdev)) { in drbd_congested()
2361 q = bdev_get_queue(mdev->ldev->backing_bdev); in drbd_congested()
2363 put_ldev(mdev); in drbd_congested()
2368 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) { in drbd_congested()
2374 mdev->congestion_reason = reason; in drbd_congested()
2592 int init_submitter(struct drbd_conf *mdev) in init_submitter() argument
2596 mdev->submit.wq = alloc_workqueue("drbd%u_submit", in init_submitter()
2597 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor); in init_submitter()
2598 if (!mdev->submit.wq) in init_submitter()
2601 INIT_WORK(&mdev->submit.worker, do_submit); in init_submitter()
2602 spin_lock_init(&mdev->submit.lock); in init_submitter()
2603 INIT_LIST_HEAD(&mdev->submit.writes); in init_submitter()
2609 struct drbd_conf *mdev; in conn_new_minor() local
2616 mdev = minor_to_mdev(minor); in conn_new_minor()
2617 if (mdev) in conn_new_minor()
2621 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); in conn_new_minor()
2622 if (!mdev) in conn_new_minor()
2626 mdev->tconn = tconn; in conn_new_minor()
2628 mdev->minor = minor; in conn_new_minor()
2629 mdev->vnr = vnr; in conn_new_minor()
2631 drbd_init_set_defaults(mdev); in conn_new_minor()
2636 mdev->rq_queue = q; in conn_new_minor()
2637 q->queuedata = mdev; in conn_new_minor()
2642 mdev->vdisk = disk; in conn_new_minor()
2651 disk->private_data = mdev; in conn_new_minor()
2653 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); in conn_new_minor()
2655 mdev->this_bdev->bd_contains = mdev->this_bdev; in conn_new_minor()
2658 q->backing_dev_info.congested_data = mdev; in conn_new_minor()
2667 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */ in conn_new_minor()
2669 mdev->md_io_page = alloc_page(GFP_KERNEL); in conn_new_minor()
2670 if (!mdev->md_io_page) in conn_new_minor()
2673 if (drbd_bm_init(mdev)) in conn_new_minor()
2675 mdev->read_requests = RB_ROOT; in conn_new_minor()
2676 mdev->write_requests = RB_ROOT; in conn_new_minor()
2678 minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL); in conn_new_minor()
2687 vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL); in conn_new_minor()
2696 if (init_submitter(mdev)) { in conn_new_minor()
2703 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */ in conn_new_minor()
2706 mdev->state.conn = tconn->cstate; in conn_new_minor()
2707 if (mdev->state.conn == C_WF_REPORT_PARAMS) in conn_new_minor()
2708 drbd_connected(mdev); in conn_new_minor()
2718 drbd_bm_cleanup(mdev); in conn_new_minor()
2720 __free_page(mdev->md_io_page); in conn_new_minor()
2726 kfree(mdev); in conn_new_minor()
2847 struct drbd_conf *mdev; in conn_md_sync() local
2851 idr_for_each_entry(&tconn->volumes, mdev, vnr) { in conn_md_sync()
2852 kref_get(&mdev->kref); in conn_md_sync()
2854 drbd_md_sync(mdev); in conn_md_sync()
2855 kref_put(&mdev->kref, &drbd_minor_destroy); in conn_md_sync()
2888 void drbd_md_sync(struct drbd_conf *mdev) in drbd_md_sync() argument
2898 del_timer(&mdev->md_sync_timer); in drbd_md_sync()
2900 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) in drbd_md_sync()
2905 if (!get_ldev_if_state(mdev, D_FAILED)) in drbd_md_sync()
2908 buffer = drbd_md_get_buffer(mdev); in drbd_md_sync()
2914 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); in drbd_md_sync()
2916 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); in drbd_md_sync()
2917 buffer->flags = cpu_to_be32(mdev->ldev->md.flags); in drbd_md_sync()
2920 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect); in drbd_md_sync()
2921 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset); in drbd_md_sync()
2922 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements); in drbd_md_sync()
2924 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); in drbd_md_sync()
2926 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); in drbd_md_sync()
2927 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); in drbd_md_sync()
2929 buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes); in drbd_md_sync()
2930 buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k); in drbd_md_sync()
2932 D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset); in drbd_md_sync()
2933 sector = mdev->ldev->md.md_offset; in drbd_md_sync()
2935 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { in drbd_md_sync()
2938 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); in drbd_md_sync()
2943 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev); in drbd_md_sync()
2945 drbd_md_put_buffer(mdev); in drbd_md_sync()
2947 put_ldev(mdev); in drbd_md_sync()
2950 static int check_activity_log_stripe_size(struct drbd_conf *mdev, in check_activity_log_stripe_size() argument
2995 static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) in check_offsets_and_sizes() argument
3086 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) in drbd_md_read() argument
3092 if (mdev->state.disk != D_DISKLESS) in drbd_md_read()
3095 buffer = drbd_md_get_buffer(mdev); in drbd_md_read()
3104 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { in drbd_md_read()
3149 if (check_activity_log_stripe_size(mdev, buffer, &bdev->md)) in drbd_md_read()
3151 if (check_offsets_and_sizes(mdev, bdev)) in drbd_md_read()
3167 spin_lock_irq(&mdev->tconn->req_lock); in drbd_md_read()
3168 if (mdev->state.conn < C_CONNECTED) { in drbd_md_read()
3172 mdev->peer_max_bio_size = peer; in drbd_md_read()
3174 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_md_read()
3177 drbd_md_put_buffer(mdev); in drbd_md_read()
3191 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) in drbd_md_mark_dirty_() argument
3193 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { in drbd_md_mark_dirty_()
3194 mod_timer(&mdev->md_sync_timer, jiffies + HZ); in drbd_md_mark_dirty_()
3195 mdev->last_md_mark_dirty.line = line; in drbd_md_mark_dirty_()
3196 mdev->last_md_mark_dirty.func = func; in drbd_md_mark_dirty_()
3200 void drbd_md_mark_dirty(struct drbd_conf *mdev) in drbd_md_mark_dirty() argument
3202 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) in drbd_md_mark_dirty()
3203 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); in drbd_md_mark_dirty()
3207 void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) in drbd_uuid_move_history() argument
3212 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; in drbd_uuid_move_history()
3215 void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) in __drbd_uuid_set() argument
3218 if (mdev->state.role == R_PRIMARY) in __drbd_uuid_set()
3223 drbd_set_ed_uuid(mdev, val); in __drbd_uuid_set()
3226 mdev->ldev->md.uuid[idx] = val; in __drbd_uuid_set()
3227 drbd_md_mark_dirty(mdev); in __drbd_uuid_set()
3230 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) in _drbd_uuid_set() argument
3233 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); in _drbd_uuid_set()
3234 __drbd_uuid_set(mdev, idx, val); in _drbd_uuid_set()
3235 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); in _drbd_uuid_set()
3238 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) in drbd_uuid_set() argument
3241 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); in drbd_uuid_set()
3242 if (mdev->ldev->md.uuid[idx]) { in drbd_uuid_set()
3243 drbd_uuid_move_history(mdev); in drbd_uuid_set()
3244 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; in drbd_uuid_set()
3246 __drbd_uuid_set(mdev, idx, val); in drbd_uuid_set()
3247 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); in drbd_uuid_set()
3257 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) in drbd_uuid_new_current() argument
3264 spin_lock_irq(&mdev->ldev->md.uuid_lock); in drbd_uuid_new_current()
3265 bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_new_current()
3270 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; in drbd_uuid_new_current()
3271 __drbd_uuid_set(mdev, UI_CURRENT, val); in drbd_uuid_new_current()
3272 spin_unlock_irq(&mdev->ldev->md.uuid_lock); in drbd_uuid_new_current()
3274 drbd_print_uuids(mdev, "new current UUID"); in drbd_uuid_new_current()
3276 drbd_md_sync(mdev); in drbd_uuid_new_current()
3279 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) in drbd_uuid_set_bm() argument
3282 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) in drbd_uuid_set_bm()
3285 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3287 drbd_uuid_move_history(mdev); in drbd_uuid_set_bm()
3288 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_set_bm()
3289 mdev->ldev->md.uuid[UI_BITMAP] = 0; in drbd_uuid_set_bm()
3291 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_set_bm()
3295 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); in drbd_uuid_set_bm()
3297 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3299 drbd_md_mark_dirty(mdev); in drbd_uuid_set_bm()
3308 int drbd_bmio_set_n_write(struct drbd_conf *mdev) in drbd_bmio_set_n_write() argument
3312 if (get_ldev_if_state(mdev, D_ATTACHING)) { in drbd_bmio_set_n_write()
3313 drbd_md_set_flag(mdev, MDF_FULL_SYNC); in drbd_bmio_set_n_write()
3314 drbd_md_sync(mdev); in drbd_bmio_set_n_write()
3315 drbd_bm_set_all(mdev); in drbd_bmio_set_n_write()
3317 rv = drbd_bm_write(mdev); in drbd_bmio_set_n_write()
3320 drbd_md_clear_flag(mdev, MDF_FULL_SYNC); in drbd_bmio_set_n_write()
3321 drbd_md_sync(mdev); in drbd_bmio_set_n_write()
3324 put_ldev(mdev); in drbd_bmio_set_n_write()
3336 int drbd_bmio_clear_n_write(struct drbd_conf *mdev) in drbd_bmio_clear_n_write() argument
3340 drbd_resume_al(mdev); in drbd_bmio_clear_n_write()
3341 if (get_ldev_if_state(mdev, D_ATTACHING)) { in drbd_bmio_clear_n_write()
3342 drbd_bm_clear_all(mdev); in drbd_bmio_clear_n_write()
3343 rv = drbd_bm_write(mdev); in drbd_bmio_clear_n_write()
3344 put_ldev(mdev); in drbd_bmio_clear_n_write()
3353 struct drbd_conf *mdev = w->mdev; in w_bitmap_io() local
3356 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); in w_bitmap_io()
3358 if (get_ldev(mdev)) { in w_bitmap_io()
3359 drbd_bm_lock(mdev, work->why, work->flags); in w_bitmap_io()
3360 rv = work->io_fn(mdev); in w_bitmap_io()
3361 drbd_bm_unlock(mdev); in w_bitmap_io()
3362 put_ldev(mdev); in w_bitmap_io()
3365 clear_bit_unlock(BITMAP_IO, &mdev->flags); in w_bitmap_io()
3366 wake_up(&mdev->misc_wait); in w_bitmap_io()
3369 work->done(mdev, rv); in w_bitmap_io()
3371 clear_bit(BITMAP_IO_QUEUED, &mdev->flags); in w_bitmap_io()
3378 void drbd_ldev_destroy(struct drbd_conf *mdev) in drbd_ldev_destroy() argument
3380 lc_destroy(mdev->resync); in drbd_ldev_destroy()
3381 mdev->resync = NULL; in drbd_ldev_destroy()
3382 lc_destroy(mdev->act_log); in drbd_ldev_destroy()
3383 mdev->act_log = NULL; in drbd_ldev_destroy()
3385 drbd_free_bc(mdev->ldev); in drbd_ldev_destroy()
3386 mdev->ldev = NULL;); in drbd_ldev_destroy()
3388 clear_bit(GO_DISKLESS, &mdev->flags); in drbd_ldev_destroy()
3393 struct drbd_conf *mdev = w->mdev; in w_go_diskless() local
3395 D_ASSERT(mdev->state.disk == D_FAILED); in w_go_diskless()
3414 if (mdev->bitmap && mdev->ldev) { in w_go_diskless()
3419 if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write, in w_go_diskless()
3421 if (test_bit(WAS_READ_ERROR, &mdev->flags)) { in w_go_diskless()
3422 drbd_md_set_flag(mdev, MDF_FULL_SYNC); in w_go_diskless()
3423 drbd_md_sync(mdev); in w_go_diskless()
3428 drbd_force_state(mdev, NS(disk, D_DISKLESS)); in w_go_diskless()
3444 void drbd_queue_bitmap_io(struct drbd_conf *mdev, in drbd_queue_bitmap_io() argument
3449 D_ASSERT(current == mdev->tconn->worker.task); in drbd_queue_bitmap_io()
3451 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags)); in drbd_queue_bitmap_io()
3452 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags)); in drbd_queue_bitmap_io()
3453 D_ASSERT(list_empty(&mdev->bm_io_work.w.list)); in drbd_queue_bitmap_io()
3454 if (mdev->bm_io_work.why) in drbd_queue_bitmap_io()
3456 why, mdev->bm_io_work.why); in drbd_queue_bitmap_io()
3458 mdev->bm_io_work.io_fn = io_fn; in drbd_queue_bitmap_io()
3459 mdev->bm_io_work.done = done; in drbd_queue_bitmap_io()
3460 mdev->bm_io_work.why = why; in drbd_queue_bitmap_io()
3461 mdev->bm_io_work.flags = flags; in drbd_queue_bitmap_io()
3463 spin_lock_irq(&mdev->tconn->req_lock); in drbd_queue_bitmap_io()
3464 set_bit(BITMAP_IO, &mdev->flags); in drbd_queue_bitmap_io()
3465 if (atomic_read(&mdev->ap_bio_cnt) == 0) { in drbd_queue_bitmap_io()
3466 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) in drbd_queue_bitmap_io()
3467 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w); in drbd_queue_bitmap_io()
3469 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_queue_bitmap_io()
3481 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), in drbd_bitmap_io() argument
3486 D_ASSERT(current != mdev->tconn->worker.task); in drbd_bitmap_io()
3489 drbd_suspend_io(mdev); in drbd_bitmap_io()
3491 drbd_bm_lock(mdev, why, flags); in drbd_bitmap_io()
3492 rv = io_fn(mdev); in drbd_bitmap_io()
3493 drbd_bm_unlock(mdev); in drbd_bitmap_io()
3496 drbd_resume_io(mdev); in drbd_bitmap_io()
3501 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) in drbd_md_set_flag() argument
3503 if ((mdev->ldev->md.flags & flag) != flag) { in drbd_md_set_flag()
3504 drbd_md_mark_dirty(mdev); in drbd_md_set_flag()
3505 mdev->ldev->md.flags |= flag; in drbd_md_set_flag()
3509 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) in drbd_md_clear_flag() argument
3511 if ((mdev->ldev->md.flags & flag) != 0) { in drbd_md_clear_flag()
3512 drbd_md_mark_dirty(mdev); in drbd_md_clear_flag()
3513 mdev->ldev->md.flags &= ~flag; in drbd_md_clear_flag()
3523 struct drbd_conf *mdev = (struct drbd_conf *) data; in md_sync_timer_fn() local
3526 if (list_empty(&mdev->md_sync_work.list)) in md_sync_timer_fn()
3527 drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work); in md_sync_timer_fn()
3532 struct drbd_conf *mdev = w->mdev; in w_md_sync() local
3537 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line); in w_md_sync()
3539 drbd_md_sync(mdev); in w_md_sync()
3619 int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i) in drbd_wait_misc() argument
3626 nc = rcu_dereference(mdev->tconn->net_conf); in drbd_wait_misc()
3636 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE); in drbd_wait_misc()
3637 spin_unlock_irq(&mdev->tconn->req_lock); in drbd_wait_misc()
3639 finish_wait(&mdev->misc_wait, &wait); in drbd_wait_misc()
3640 spin_lock_irq(&mdev->tconn->req_lock); in drbd_wait_misc()
3641 if (!timeout || mdev->state.conn < C_CONNECTED) in drbd_wait_misc()
3697 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) in _drbd_insert_fault() argument
3703 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) && in _drbd_insert_fault()