1 /*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/module.h>
29 #include <linux/drbd.h>
30 #include <linux/in.h>
31 #include <linux/fs.h>
32 #include <linux/file.h>
33 #include <linux/slab.h>
34 #include <linux/blkpg.h>
35 #include <linux/cpumask.h>
36 #include "drbd_int.h"
37 #include "drbd_protocol.h"
38 #include "drbd_req.h"
39 #include <asm/unaligned.h>
40 #include <linux/drbd_limits.h>
41 #include <linux/kthread.h>
42
43 #include <net/genetlink.h>
44
45 /* .doit */
46 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
47 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
48
49 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
51
52 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
55
56 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
74 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
76 /* .dumpit */
77 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
78
79 #include <linux/drbd_genl_api.h>
80 #include "drbd_nla.h"
81 #include <linux/genl_magic_func.h>
82
83 /* used blkdev_get_by_path, to claim our meta data device(s) */
84 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
85
drbd_adm_send_reply(struct sk_buff * skb,struct genl_info * info)86 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
87 {
88 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
89 if (genlmsg_reply(skb, info))
90 pr_err("error sending genl reply\n");
91 }
92
93 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
94 * reason it could fail was no space in skb, and there are 4k available. */
drbd_msg_put_info(struct sk_buff * skb,const char * info)95 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
96 {
97 struct nlattr *nla;
98 int err = -EMSGSIZE;
99
100 if (!info || !info[0])
101 return 0;
102
103 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
104 if (!nla)
105 return err;
106
107 err = nla_put_string(skb, T_info_text, info);
108 if (err) {
109 nla_nest_cancel(skb, nla);
110 return err;
111 } else
112 nla_nest_end(skb, nla);
113 return 0;
114 }
115
116 /* This would be a good candidate for a "pre_doit" hook,
117 * and per-family private info->pointers.
118 * But we need to stay compatible with older kernels.
119 * If it returns successfully, adm_ctx members are valid.
120 *
121 * At this point, we still rely on the global genl_lock().
122 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
123 * to add additional synchronization against object destruction/modification.
124 */
125 #define DRBD_ADM_NEED_MINOR 1
126 #define DRBD_ADM_NEED_RESOURCE 2
127 #define DRBD_ADM_NEED_CONNECTION 4
drbd_adm_prepare(struct drbd_config_context * adm_ctx,struct sk_buff * skb,struct genl_info * info,unsigned flags)128 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
129 struct sk_buff *skb, struct genl_info *info, unsigned flags)
130 {
131 struct drbd_genlmsghdr *d_in = info->userhdr;
132 const u8 cmd = info->genlhdr->cmd;
133 int err;
134
135 memset(adm_ctx, 0, sizeof(*adm_ctx));
136
137 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
138 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
139 return -EPERM;
140
141 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
142 if (!adm_ctx->reply_skb) {
143 err = -ENOMEM;
144 goto fail;
145 }
146
147 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
148 info, &drbd_genl_family, 0, cmd);
149 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
150 * but anyways */
151 if (!adm_ctx->reply_dh) {
152 err = -ENOMEM;
153 goto fail;
154 }
155
156 adm_ctx->reply_dh->minor = d_in->minor;
157 adm_ctx->reply_dh->ret_code = NO_ERROR;
158
159 adm_ctx->volume = VOLUME_UNSPECIFIED;
160 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
161 struct nlattr *nla;
162 /* parse and validate only */
163 err = drbd_cfg_context_from_attrs(NULL, info);
164 if (err)
165 goto fail;
166
167 /* It was present, and valid,
168 * copy it over to the reply skb. */
169 err = nla_put_nohdr(adm_ctx->reply_skb,
170 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
171 info->attrs[DRBD_NLA_CFG_CONTEXT]);
172 if (err)
173 goto fail;
174
175 /* and assign stuff to the adm_ctx */
176 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
177 if (nla)
178 adm_ctx->volume = nla_get_u32(nla);
179 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
180 if (nla)
181 adm_ctx->resource_name = nla_data(nla);
182 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
183 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
184 if ((adm_ctx->my_addr &&
185 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
186 (adm_ctx->peer_addr &&
187 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
188 err = -EINVAL;
189 goto fail;
190 }
191 }
192
193 adm_ctx->minor = d_in->minor;
194 adm_ctx->device = minor_to_device(d_in->minor);
195
196 /* We are protected by the global genl_lock().
197 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
198 * so make sure this object stays around. */
199 if (adm_ctx->device)
200 kref_get(&adm_ctx->device->kref);
201
202 if (adm_ctx->resource_name) {
203 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
204 }
205
206 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
207 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
208 return ERR_MINOR_INVALID;
209 }
210 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
211 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
212 if (adm_ctx->resource_name)
213 return ERR_RES_NOT_KNOWN;
214 return ERR_INVALID_REQUEST;
215 }
216
217 if (flags & DRBD_ADM_NEED_CONNECTION) {
218 if (adm_ctx->resource) {
219 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
220 return ERR_INVALID_REQUEST;
221 }
222 if (adm_ctx->device) {
223 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
224 return ERR_INVALID_REQUEST;
225 }
226 if (adm_ctx->my_addr && adm_ctx->peer_addr)
227 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
228 nla_len(adm_ctx->my_addr),
229 nla_data(adm_ctx->peer_addr),
230 nla_len(adm_ctx->peer_addr));
231 if (!adm_ctx->connection) {
232 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
233 return ERR_INVALID_REQUEST;
234 }
235 }
236
237 /* some more paranoia, if the request was over-determined */
238 if (adm_ctx->device && adm_ctx->resource &&
239 adm_ctx->device->resource != adm_ctx->resource) {
240 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
241 adm_ctx->minor, adm_ctx->resource->name,
242 adm_ctx->device->resource->name);
243 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
244 return ERR_INVALID_REQUEST;
245 }
246 if (adm_ctx->device &&
247 adm_ctx->volume != VOLUME_UNSPECIFIED &&
248 adm_ctx->volume != adm_ctx->device->vnr) {
249 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
250 adm_ctx->minor, adm_ctx->volume,
251 adm_ctx->device->vnr,
252 adm_ctx->device->resource->name);
253 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
254 return ERR_INVALID_REQUEST;
255 }
256
257 /* still, provide adm_ctx->resource always, if possible. */
258 if (!adm_ctx->resource) {
259 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
260 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
261 if (adm_ctx->resource)
262 kref_get(&adm_ctx->resource->kref);
263 }
264
265 return NO_ERROR;
266
267 fail:
268 nlmsg_free(adm_ctx->reply_skb);
269 adm_ctx->reply_skb = NULL;
270 return err;
271 }
272
drbd_adm_finish(struct drbd_config_context * adm_ctx,struct genl_info * info,int retcode)273 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
274 struct genl_info *info, int retcode)
275 {
276 if (adm_ctx->device) {
277 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
278 adm_ctx->device = NULL;
279 }
280 if (adm_ctx->connection) {
281 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
282 adm_ctx->connection = NULL;
283 }
284 if (adm_ctx->resource) {
285 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
286 adm_ctx->resource = NULL;
287 }
288
289 if (!adm_ctx->reply_skb)
290 return -ENOMEM;
291
292 adm_ctx->reply_dh->ret_code = retcode;
293 drbd_adm_send_reply(adm_ctx->reply_skb, info);
294 return 0;
295 }
296
setup_khelper_env(struct drbd_connection * connection,char ** envp)297 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
298 {
299 char *afs;
300
301 /* FIXME: A future version will not allow this case. */
302 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
303 return;
304
305 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
306 case AF_INET6:
307 afs = "ipv6";
308 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
309 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
310 break;
311 case AF_INET:
312 afs = "ipv4";
313 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
314 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
315 break;
316 default:
317 afs = "ssocks";
318 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
319 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
320 }
321 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
322 }
323
drbd_khelper(struct drbd_device * device,char * cmd)324 int drbd_khelper(struct drbd_device *device, char *cmd)
325 {
326 char *envp[] = { "HOME=/",
327 "TERM=linux",
328 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
329 (char[20]) { }, /* address family */
330 (char[60]) { }, /* address */
331 NULL };
332 char mb[12];
333 char *argv[] = {usermode_helper, cmd, mb, NULL };
334 struct drbd_connection *connection = first_peer_device(device)->connection;
335 struct sib_info sib;
336 int ret;
337
338 if (current == connection->worker.task)
339 set_bit(CALLBACK_PENDING, &connection->flags);
340
341 snprintf(mb, 12, "minor-%d", device_to_minor(device));
342 setup_khelper_env(connection, envp);
343
344 /* The helper may take some time.
345 * write out any unsynced meta data changes now */
346 drbd_md_sync(device);
347
348 drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
349 sib.sib_reason = SIB_HELPER_PRE;
350 sib.helper_name = cmd;
351 drbd_bcast_event(device, &sib);
352 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
353 if (ret)
354 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
355 usermode_helper, cmd, mb,
356 (ret >> 8) & 0xff, ret);
357 else
358 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
359 usermode_helper, cmd, mb,
360 (ret >> 8) & 0xff, ret);
361 sib.sib_reason = SIB_HELPER_POST;
362 sib.helper_exit_code = ret;
363 drbd_bcast_event(device, &sib);
364
365 if (current == connection->worker.task)
366 clear_bit(CALLBACK_PENDING, &connection->flags);
367
368 if (ret < 0) /* Ignore any ERRNOs we got. */
369 ret = 0;
370
371 return ret;
372 }
373
conn_khelper(struct drbd_connection * connection,char * cmd)374 static int conn_khelper(struct drbd_connection *connection, char *cmd)
375 {
376 char *envp[] = { "HOME=/",
377 "TERM=linux",
378 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
379 (char[20]) { }, /* address family */
380 (char[60]) { }, /* address */
381 NULL };
382 char *resource_name = connection->resource->name;
383 char *argv[] = {usermode_helper, cmd, resource_name, NULL };
384 int ret;
385
386 setup_khelper_env(connection, envp);
387 conn_md_sync(connection);
388
389 drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
390 /* TODO: conn_bcast_event() ?? */
391
392 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
393 if (ret)
394 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
395 usermode_helper, cmd, resource_name,
396 (ret >> 8) & 0xff, ret);
397 else
398 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
399 usermode_helper, cmd, resource_name,
400 (ret >> 8) & 0xff, ret);
401 /* TODO: conn_bcast_event() ?? */
402
403 if (ret < 0) /* Ignore any ERRNOs we got. */
404 ret = 0;
405
406 return ret;
407 }
408
highest_fencing_policy(struct drbd_connection * connection)409 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
410 {
411 enum drbd_fencing_p fp = FP_NOT_AVAIL;
412 struct drbd_peer_device *peer_device;
413 int vnr;
414
415 rcu_read_lock();
416 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
417 struct drbd_device *device = peer_device->device;
418 if (get_ldev_if_state(device, D_CONSISTENT)) {
419 struct disk_conf *disk_conf =
420 rcu_dereference(peer_device->device->ldev->disk_conf);
421 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
422 put_ldev(device);
423 }
424 }
425 rcu_read_unlock();
426
427 if (fp == FP_NOT_AVAIL) {
428 /* IO Suspending works on the whole resource.
429 Do it only for one device. */
430 vnr = 0;
431 peer_device = idr_get_next(&connection->peer_devices, &vnr);
432 drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0));
433 }
434
435 return fp;
436 }
437
conn_try_outdate_peer(struct drbd_connection * connection)438 bool conn_try_outdate_peer(struct drbd_connection *connection)
439 {
440 unsigned int connect_cnt;
441 union drbd_state mask = { };
442 union drbd_state val = { };
443 enum drbd_fencing_p fp;
444 char *ex_to_string;
445 int r;
446
447 spin_lock_irq(&connection->resource->req_lock);
448 if (connection->cstate >= C_WF_REPORT_PARAMS) {
449 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
450 spin_unlock_irq(&connection->resource->req_lock);
451 return false;
452 }
453
454 connect_cnt = connection->connect_cnt;
455 spin_unlock_irq(&connection->resource->req_lock);
456
457 fp = highest_fencing_policy(connection);
458 switch (fp) {
459 case FP_NOT_AVAIL:
460 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
461 goto out;
462 case FP_DONT_CARE:
463 return true;
464 default: ;
465 }
466
467 r = conn_khelper(connection, "fence-peer");
468
469 switch ((r>>8) & 0xff) {
470 case 3: /* peer is inconsistent */
471 ex_to_string = "peer is inconsistent or worse";
472 mask.pdsk = D_MASK;
473 val.pdsk = D_INCONSISTENT;
474 break;
475 case 4: /* peer got outdated, or was already outdated */
476 ex_to_string = "peer was fenced";
477 mask.pdsk = D_MASK;
478 val.pdsk = D_OUTDATED;
479 break;
480 case 5: /* peer was down */
481 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
482 /* we will(have) create(d) a new UUID anyways... */
483 ex_to_string = "peer is unreachable, assumed to be dead";
484 mask.pdsk = D_MASK;
485 val.pdsk = D_OUTDATED;
486 } else {
487 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
488 }
489 break;
490 case 6: /* Peer is primary, voluntarily outdate myself.
491 * This is useful when an unconnected R_SECONDARY is asked to
492 * become R_PRIMARY, but finds the other peer being active. */
493 ex_to_string = "peer is active";
494 drbd_warn(connection, "Peer is primary, outdating myself.\n");
495 mask.disk = D_MASK;
496 val.disk = D_OUTDATED;
497 break;
498 case 7:
499 if (fp != FP_STONITH)
500 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
501 ex_to_string = "peer was stonithed";
502 mask.pdsk = D_MASK;
503 val.pdsk = D_OUTDATED;
504 break;
505 default:
506 /* The script is broken ... */
507 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
508 return false; /* Eventually leave IO frozen */
509 }
510
511 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
512 (r>>8) & 0xff, ex_to_string);
513
514 out:
515
516 /* Not using
517 conn_request_state(connection, mask, val, CS_VERBOSE);
518 here, because we might were able to re-establish the connection in the
519 meantime. */
520 spin_lock_irq(&connection->resource->req_lock);
521 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
522 if (connection->connect_cnt != connect_cnt)
523 /* In case the connection was established and droped
524 while the fence-peer handler was running, ignore it */
525 drbd_info(connection, "Ignoring fence-peer exit code\n");
526 else
527 _conn_request_state(connection, mask, val, CS_VERBOSE);
528 }
529 spin_unlock_irq(&connection->resource->req_lock);
530
531 return conn_highest_pdsk(connection) <= D_OUTDATED;
532 }
533
_try_outdate_peer_async(void * data)534 static int _try_outdate_peer_async(void *data)
535 {
536 struct drbd_connection *connection = (struct drbd_connection *)data;
537
538 conn_try_outdate_peer(connection);
539
540 kref_put(&connection->kref, drbd_destroy_connection);
541 return 0;
542 }
543
conn_try_outdate_peer_async(struct drbd_connection * connection)544 void conn_try_outdate_peer_async(struct drbd_connection *connection)
545 {
546 struct task_struct *opa;
547
548 kref_get(&connection->kref);
549 /* We may just have force_sig()'ed this thread
550 * to get it out of some blocking network function.
551 * Clear signals; otherwise kthread_run(), which internally uses
552 * wait_on_completion_killable(), will mistake our pending signal
553 * for a new fatal signal and fail. */
554 flush_signals(current);
555 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
556 if (IS_ERR(opa)) {
557 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
558 kref_put(&connection->kref, drbd_destroy_connection);
559 }
560 }
561
562 enum drbd_state_rv
drbd_set_role(struct drbd_device * const device,enum drbd_role new_role,int force)563 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
564 {
565 struct drbd_peer_device *const peer_device = first_peer_device(device);
566 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
567 const int max_tries = 4;
568 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
569 struct net_conf *nc;
570 int try = 0;
571 int forced = 0;
572 union drbd_state mask, val;
573
574 if (new_role == R_PRIMARY) {
575 struct drbd_connection *connection;
576
577 /* Detect dead peers as soon as possible. */
578
579 rcu_read_lock();
580 for_each_connection(connection, device->resource)
581 request_ping(connection);
582 rcu_read_unlock();
583 }
584
585 mutex_lock(device->state_mutex);
586
587 mask.i = 0; mask.role = R_MASK;
588 val.i = 0; val.role = new_role;
589
590 while (try++ < max_tries) {
591 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
592
593 /* in case we first succeeded to outdate,
594 * but now suddenly could establish a connection */
595 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
596 val.pdsk = 0;
597 mask.pdsk = 0;
598 continue;
599 }
600
601 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
602 (device->state.disk < D_UP_TO_DATE &&
603 device->state.disk >= D_INCONSISTENT)) {
604 mask.disk = D_MASK;
605 val.disk = D_UP_TO_DATE;
606 forced = 1;
607 continue;
608 }
609
610 if (rv == SS_NO_UP_TO_DATE_DISK &&
611 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
612 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
613
614 if (conn_try_outdate_peer(connection)) {
615 val.disk = D_UP_TO_DATE;
616 mask.disk = D_MASK;
617 }
618 continue;
619 }
620
621 if (rv == SS_NOTHING_TO_DO)
622 goto out;
623 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
624 if (!conn_try_outdate_peer(connection) && force) {
625 drbd_warn(device, "Forced into split brain situation!\n");
626 mask.pdsk = D_MASK;
627 val.pdsk = D_OUTDATED;
628
629 }
630 continue;
631 }
632 if (rv == SS_TWO_PRIMARIES) {
633 /* Maybe the peer is detected as dead very soon...
634 retry at most once more in this case. */
635 if (try < max_tries) {
636 int timeo;
637 try = max_tries - 1;
638 rcu_read_lock();
639 nc = rcu_dereference(connection->net_conf);
640 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
641 rcu_read_unlock();
642 schedule_timeout_interruptible(timeo);
643 }
644 continue;
645 }
646 if (rv < SS_SUCCESS) {
647 rv = _drbd_request_state(device, mask, val,
648 CS_VERBOSE + CS_WAIT_COMPLETE);
649 if (rv < SS_SUCCESS)
650 goto out;
651 }
652 break;
653 }
654
655 if (rv < SS_SUCCESS)
656 goto out;
657
658 if (forced)
659 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
660
661 /* Wait until nothing is on the fly :) */
662 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
663
664 /* FIXME also wait for all pending P_BARRIER_ACK? */
665
666 if (new_role == R_SECONDARY) {
667 if (get_ldev(device)) {
668 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
669 put_ldev(device);
670 }
671 } else {
672 mutex_lock(&device->resource->conf_update);
673 nc = connection->net_conf;
674 if (nc)
675 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
676 mutex_unlock(&device->resource->conf_update);
677
678 if (get_ldev(device)) {
679 if (((device->state.conn < C_CONNECTED ||
680 device->state.pdsk <= D_FAILED)
681 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
682 drbd_uuid_new_current(device);
683
684 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
685 put_ldev(device);
686 }
687 }
688
689 /* writeout of activity log covered areas of the bitmap
690 * to stable storage done in after state change already */
691
692 if (device->state.conn >= C_WF_REPORT_PARAMS) {
693 /* if this was forced, we should consider sync */
694 if (forced)
695 drbd_send_uuids(peer_device);
696 drbd_send_current_state(peer_device);
697 }
698
699 drbd_md_sync(device);
700 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
701 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
702 out:
703 mutex_unlock(device->state_mutex);
704 return rv;
705 }
706
from_attrs_err_to_txt(int err)707 static const char *from_attrs_err_to_txt(int err)
708 {
709 return err == -ENOMSG ? "required attribute missing" :
710 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
711 err == -EEXIST ? "can not change invariant setting" :
712 "invalid attribute value";
713 }
714
drbd_adm_set_role(struct sk_buff * skb,struct genl_info * info)715 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
716 {
717 struct drbd_config_context adm_ctx;
718 struct set_role_parms parms;
719 int err;
720 enum drbd_ret_code retcode;
721
722 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
723 if (!adm_ctx.reply_skb)
724 return retcode;
725 if (retcode != NO_ERROR)
726 goto out;
727
728 memset(&parms, 0, sizeof(parms));
729 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
730 err = set_role_parms_from_attrs(&parms, info);
731 if (err) {
732 retcode = ERR_MANDATORY_TAG;
733 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
734 goto out;
735 }
736 }
737 genl_unlock();
738 mutex_lock(&adm_ctx.resource->adm_mutex);
739
740 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
741 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
742 else
743 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
744
745 mutex_unlock(&adm_ctx.resource->adm_mutex);
746 genl_lock();
747 out:
748 drbd_adm_finish(&adm_ctx, info, retcode);
749 return 0;
750 }
751
752 /* Initializes the md.*_offset members, so we are able to find
753 * the on disk meta data.
754 *
755 * We currently have two possible layouts:
756 * external:
757 * |----------- md_size_sect ------------------|
758 * [ 4k superblock ][ activity log ][ Bitmap ]
759 * | al_offset == 8 |
760 * | bm_offset = al_offset + X |
761 * ==> bitmap sectors = md_size_sect - bm_offset
762 *
763 * internal:
764 * |----------- md_size_sect ------------------|
765 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
766 * | al_offset < 0 |
767 * | bm_offset = al_offset - Y |
768 * ==> bitmap sectors = Y = al_offset - bm_offset
769 *
770 * Activity log size used to be fixed 32kB,
771 * but is about to become configurable.
772 */
drbd_md_set_sector_offsets(struct drbd_device * device,struct drbd_backing_dev * bdev)773 static void drbd_md_set_sector_offsets(struct drbd_device *device,
774 struct drbd_backing_dev *bdev)
775 {
776 sector_t md_size_sect = 0;
777 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
778
779 bdev->md.md_offset = drbd_md_ss(bdev);
780
781 switch (bdev->md.meta_dev_idx) {
782 default:
783 /* v07 style fixed size indexed meta data */
784 bdev->md.md_size_sect = MD_128MB_SECT;
785 bdev->md.al_offset = MD_4kB_SECT;
786 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
787 break;
788 case DRBD_MD_INDEX_FLEX_EXT:
789 /* just occupy the full device; unit: sectors */
790 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
791 bdev->md.al_offset = MD_4kB_SECT;
792 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
793 break;
794 case DRBD_MD_INDEX_INTERNAL:
795 case DRBD_MD_INDEX_FLEX_INT:
796 /* al size is still fixed */
797 bdev->md.al_offset = -al_size_sect;
798 /* we need (slightly less than) ~ this much bitmap sectors: */
799 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
800 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
801 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
802 md_size_sect = ALIGN(md_size_sect, 8);
803
804 /* plus the "drbd meta data super block",
805 * and the activity log; */
806 md_size_sect += MD_4kB_SECT + al_size_sect;
807
808 bdev->md.md_size_sect = md_size_sect;
809 /* bitmap offset is adjusted by 'super' block size */
810 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
811 break;
812 }
813 }
814
815 /* input size is expected to be in KB */
ppsize(char * buf,unsigned long long size)816 char *ppsize(char *buf, unsigned long long size)
817 {
818 /* Needs 9 bytes at max including trailing NUL:
819 * -1ULL ==> "16384 EB" */
820 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
821 int base = 0;
822 while (size >= 10000 && base < sizeof(units)-1) {
823 /* shift + round */
824 size = (size >> 10) + !!(size & (1<<9));
825 base++;
826 }
827 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
828
829 return buf;
830 }
831
832 /* there is still a theoretical deadlock when called from receiver
833 * on an D_INCONSISTENT R_PRIMARY:
834 * remote READ does inc_ap_bio, receiver would need to receive answer
835 * packet from remote to dec_ap_bio again.
836 * receiver receive_sizes(), comes here,
837 * waits for ap_bio_cnt == 0. -> deadlock.
838 * but this cannot happen, actually, because:
839 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
840 * (not connected, or bad/no disk on peer):
841 * see drbd_fail_request_early, ap_bio_cnt is zero.
842 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
843 * peer may not initiate a resize.
844 */
845 /* Note these are not to be confused with
846 * drbd_adm_suspend_io/drbd_adm_resume_io,
847 * which are (sub) state changes triggered by admin (drbdsetup),
848 * and can be long lived.
849 * This changes an device->flag, is triggered by drbd internals,
850 * and should be short-lived. */
drbd_suspend_io(struct drbd_device * device)851 void drbd_suspend_io(struct drbd_device *device)
852 {
853 set_bit(SUSPEND_IO, &device->flags);
854 if (drbd_suspended(device))
855 return;
856 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
857 }
858
drbd_resume_io(struct drbd_device * device)859 void drbd_resume_io(struct drbd_device *device)
860 {
861 clear_bit(SUSPEND_IO, &device->flags);
862 wake_up(&device->misc_wait);
863 }
864
865 /**
866 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
867 * @device: DRBD device.
868 *
869 * Returns 0 on success, negative return values indicate errors.
870 * You should call drbd_md_sync() after calling this function.
871 */
872 enum determine_dev_size
drbd_determine_dev_size(struct drbd_device * device,enum dds_flags flags,struct resize_parms * rs)873 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
874 {
875 sector_t prev_first_sect, prev_size; /* previous meta location */
876 sector_t la_size_sect, u_size;
877 struct drbd_md *md = &device->ldev->md;
878 u32 prev_al_stripe_size_4k;
879 u32 prev_al_stripes;
880 sector_t size;
881 char ppb[10];
882 void *buffer;
883
884 int md_moved, la_size_changed;
885 enum determine_dev_size rv = DS_UNCHANGED;
886
887 /* race:
888 * application request passes inc_ap_bio,
889 * but then cannot get an AL-reference.
890 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
891 *
892 * to avoid that:
893 * Suspend IO right here.
894 * still lock the act_log to not trigger ASSERTs there.
895 */
896 drbd_suspend_io(device);
897 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
898 if (!buffer) {
899 drbd_resume_io(device);
900 return DS_ERROR;
901 }
902
903 /* no wait necessary anymore, actually we could assert that */
904 wait_event(device->al_wait, lc_try_lock(device->act_log));
905
906 prev_first_sect = drbd_md_first_sector(device->ldev);
907 prev_size = device->ldev->md.md_size_sect;
908 la_size_sect = device->ldev->md.la_size_sect;
909
910 if (rs) {
911 /* rs is non NULL if we should change the AL layout only */
912
913 prev_al_stripes = md->al_stripes;
914 prev_al_stripe_size_4k = md->al_stripe_size_4k;
915
916 md->al_stripes = rs->al_stripes;
917 md->al_stripe_size_4k = rs->al_stripe_size / 4;
918 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
919 }
920
921 drbd_md_set_sector_offsets(device, device->ldev);
922
923 rcu_read_lock();
924 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
925 rcu_read_unlock();
926 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
927
928 if (size < la_size_sect) {
929 if (rs && u_size == 0) {
930 /* Remove "rs &&" later. This check should always be active, but
931 right now the receiver expects the permissive behavior */
932 drbd_warn(device, "Implicit shrink not allowed. "
933 "Use --size=%llus for explicit shrink.\n",
934 (unsigned long long)size);
935 rv = DS_ERROR_SHRINK;
936 }
937 if (u_size > size)
938 rv = DS_ERROR_SPACE_MD;
939 if (rv != DS_UNCHANGED)
940 goto err_out;
941 }
942
943 if (drbd_get_capacity(device->this_bdev) != size ||
944 drbd_bm_capacity(device) != size) {
945 int err;
946 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
947 if (unlikely(err)) {
948 /* currently there is only one error: ENOMEM! */
949 size = drbd_bm_capacity(device)>>1;
950 if (size == 0) {
951 drbd_err(device, "OUT OF MEMORY! "
952 "Could not allocate bitmap!\n");
953 } else {
954 drbd_err(device, "BM resizing failed. "
955 "Leaving size unchanged at size = %lu KB\n",
956 (unsigned long)size);
957 }
958 rv = DS_ERROR;
959 }
960 /* racy, see comments above. */
961 drbd_set_my_capacity(device, size);
962 device->ldev->md.la_size_sect = size;
963 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
964 (unsigned long long)size>>1);
965 }
966 if (rv <= DS_ERROR)
967 goto err_out;
968
969 la_size_changed = (la_size_sect != device->ldev->md.la_size_sect);
970
971 md_moved = prev_first_sect != drbd_md_first_sector(device->ldev)
972 || prev_size != device->ldev->md.md_size_sect;
973
974 if (la_size_changed || md_moved || rs) {
975 u32 prev_flags;
976
977 /* We do some synchronous IO below, which may take some time.
978 * Clear the timer, to avoid scary "timer expired!" messages,
979 * "Superblock" is written out at least twice below, anyways. */
980 del_timer(&device->md_sync_timer);
981 drbd_al_shrink(device); /* All extents inactive. */
982
983 prev_flags = md->flags;
984 md->flags &= ~MDF_PRIMARY_IND;
985 drbd_md_write(device, buffer);
986
987 drbd_info(device, "Writing the whole bitmap, %s\n",
988 la_size_changed && md_moved ? "size changed and md moved" :
989 la_size_changed ? "size changed" : "md moved");
990 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
991 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
992 "size changed", BM_LOCKED_MASK);
993 drbd_initialize_al(device, buffer);
994
995 md->flags = prev_flags;
996 drbd_md_write(device, buffer);
997
998 if (rs)
999 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1000 md->al_stripes, md->al_stripe_size_4k * 4);
1001 }
1002
1003 if (size > la_size_sect)
1004 rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1005 if (size < la_size_sect)
1006 rv = DS_SHRUNK;
1007
1008 if (0) {
1009 err_out:
1010 if (rs) {
1011 md->al_stripes = prev_al_stripes;
1012 md->al_stripe_size_4k = prev_al_stripe_size_4k;
1013 md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
1014
1015 drbd_md_set_sector_offsets(device, device->ldev);
1016 }
1017 }
1018 lc_unlock(device->act_log);
1019 wake_up(&device->al_wait);
1020 drbd_md_put_buffer(device);
1021 drbd_resume_io(device);
1022
1023 return rv;
1024 }
1025
1026 sector_t
drbd_new_dev_size(struct drbd_device * device,struct drbd_backing_dev * bdev,sector_t u_size,int assume_peer_has_space)1027 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1028 sector_t u_size, int assume_peer_has_space)
1029 {
1030 sector_t p_size = device->p_size; /* partner's disk size. */
1031 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1032 sector_t m_size; /* my size */
1033 sector_t size = 0;
1034
1035 m_size = drbd_get_max_capacity(bdev);
1036
1037 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1038 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1039 p_size = m_size;
1040 }
1041
1042 if (p_size && m_size) {
1043 size = min_t(sector_t, p_size, m_size);
1044 } else {
1045 if (la_size_sect) {
1046 size = la_size_sect;
1047 if (m_size && m_size < size)
1048 size = m_size;
1049 if (p_size && p_size < size)
1050 size = p_size;
1051 } else {
1052 if (m_size)
1053 size = m_size;
1054 if (p_size)
1055 size = p_size;
1056 }
1057 }
1058
1059 if (size == 0)
1060 drbd_err(device, "Both nodes diskless!\n");
1061
1062 if (u_size) {
1063 if (u_size > size)
1064 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1065 (unsigned long)u_size>>1, (unsigned long)size>>1);
1066 else
1067 size = u_size;
1068 }
1069
1070 return size;
1071 }
1072
1073 /**
1074 * drbd_check_al_size() - Ensures that the AL is of the right size
1075 * @device: DRBD device.
1076 *
1077 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1078 * failed, and 0 on success. You should call drbd_md_sync() after you called
1079 * this function.
1080 */
drbd_check_al_size(struct drbd_device * device,struct disk_conf * dc)1081 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1082 {
1083 struct lru_cache *n, *t;
1084 struct lc_element *e;
1085 unsigned int in_use;
1086 int i;
1087
1088 if (device->act_log &&
1089 device->act_log->nr_elements == dc->al_extents)
1090 return 0;
1091
1092 in_use = 0;
1093 t = device->act_log;
1094 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1095 dc->al_extents, sizeof(struct lc_element), 0);
1096
1097 if (n == NULL) {
1098 drbd_err(device, "Cannot allocate act_log lru!\n");
1099 return -ENOMEM;
1100 }
1101 spin_lock_irq(&device->al_lock);
1102 if (t) {
1103 for (i = 0; i < t->nr_elements; i++) {
1104 e = lc_element_by_index(t, i);
1105 if (e->refcnt)
1106 drbd_err(device, "refcnt(%d)==%d\n",
1107 e->lc_number, e->refcnt);
1108 in_use += e->refcnt;
1109 }
1110 }
1111 if (!in_use)
1112 device->act_log = n;
1113 spin_unlock_irq(&device->al_lock);
1114 if (in_use) {
1115 drbd_err(device, "Activity log still in use!\n");
1116 lc_destroy(n);
1117 return -EBUSY;
1118 } else {
1119 if (t)
1120 lc_destroy(t);
1121 }
1122 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1123 return 0;
1124 }
1125
drbd_setup_queue_param(struct drbd_device * device,struct drbd_backing_dev * bdev,unsigned int max_bio_size)1126 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1127 unsigned int max_bio_size)
1128 {
1129 struct request_queue * const q = device->rq_queue;
1130 unsigned int max_hw_sectors = max_bio_size >> 9;
1131 unsigned int max_segments = 0;
1132 struct request_queue *b = NULL;
1133
1134 if (bdev) {
1135 b = bdev->backing_bdev->bd_disk->queue;
1136
1137 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1138 rcu_read_lock();
1139 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
1140 rcu_read_unlock();
1141
1142 blk_set_stacking_limits(&q->limits);
1143 blk_queue_max_write_same_sectors(q, 0);
1144 }
1145
1146 blk_queue_logical_block_size(q, 512);
1147 blk_queue_max_hw_sectors(q, max_hw_sectors);
1148 /* This is the workaround for "bio would need to, but cannot, be split" */
1149 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1150 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1151
1152 if (b) {
1153 struct drbd_connection *connection = first_peer_device(device)->connection;
1154
1155 if (blk_queue_discard(b) &&
1156 (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
1157 /* For now, don't allow more than one activity log extent worth of data
1158 * to be discarded in one go. We may need to rework drbd_al_begin_io()
1159 * to allow for even larger discard ranges */
1160 blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
1161
1162 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1163 /* REALLY? Is stacking secdiscard "legal"? */
1164 if (blk_queue_secdiscard(b))
1165 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
1166 } else {
1167 blk_queue_max_discard_sectors(q, 0);
1168 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1169 queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
1170 }
1171
1172 blk_queue_stack_limits(q, b);
1173
1174 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1175 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1176 q->backing_dev_info.ra_pages,
1177 b->backing_dev_info.ra_pages);
1178 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1179 }
1180 }
1181 }
1182
drbd_reconsider_max_bio_size(struct drbd_device * device,struct drbd_backing_dev * bdev)1183 void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
1184 {
1185 unsigned int now, new, local, peer;
1186
1187 now = queue_max_hw_sectors(device->rq_queue) << 9;
1188 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1189 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1190
1191 if (bdev) {
1192 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1193 device->local_max_bio_size = local;
1194 }
1195 local = min(local, DRBD_MAX_BIO_SIZE);
1196
1197 /* We may ignore peer limits if the peer is modern enough.
1198 Because new from 8.3.8 onwards the peer can use multiple
1199 BIOs for a single peer_request */
1200 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1201 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1202 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1203 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1204 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1205 peer = DRBD_MAX_SIZE_H80_PACKET;
1206 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1207 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1208 else
1209 peer = DRBD_MAX_BIO_SIZE;
1210
1211 /* We may later detach and re-attach on a disconnected Primary.
1212 * Avoid this setting to jump back in that case.
1213 * We want to store what we know the peer DRBD can handle,
1214 * not what the peer IO backend can handle. */
1215 if (peer > device->peer_max_bio_size)
1216 device->peer_max_bio_size = peer;
1217 }
1218 new = min(local, peer);
1219
1220 if (device->state.role == R_PRIMARY && new < now)
1221 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1222
1223 if (new != now)
1224 drbd_info(device, "max BIO size = %u\n", new);
1225
1226 drbd_setup_queue_param(device, bdev, new);
1227 }
1228
1229 /* Starts the worker thread */
conn_reconfig_start(struct drbd_connection * connection)1230 static void conn_reconfig_start(struct drbd_connection *connection)
1231 {
1232 drbd_thread_start(&connection->worker);
1233 drbd_flush_workqueue(&connection->sender_work);
1234 }
1235
1236 /* if still unconfigured, stops worker again. */
conn_reconfig_done(struct drbd_connection * connection)1237 static void conn_reconfig_done(struct drbd_connection *connection)
1238 {
1239 bool stop_threads;
1240 spin_lock_irq(&connection->resource->req_lock);
1241 stop_threads = conn_all_vols_unconf(connection) &&
1242 connection->cstate == C_STANDALONE;
1243 spin_unlock_irq(&connection->resource->req_lock);
1244 if (stop_threads) {
1245 /* asender is implicitly stopped by receiver
1246 * in conn_disconnect() */
1247 drbd_thread_stop(&connection->receiver);
1248 drbd_thread_stop(&connection->worker);
1249 }
1250 }
1251
1252 /* Make sure IO is suspended before calling this function(). */
drbd_suspend_al(struct drbd_device * device)1253 static void drbd_suspend_al(struct drbd_device *device)
1254 {
1255 int s = 0;
1256
1257 if (!lc_try_lock(device->act_log)) {
1258 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1259 return;
1260 }
1261
1262 drbd_al_shrink(device);
1263 spin_lock_irq(&device->resource->req_lock);
1264 if (device->state.conn < C_CONNECTED)
1265 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1266 spin_unlock_irq(&device->resource->req_lock);
1267 lc_unlock(device->act_log);
1268
1269 if (s)
1270 drbd_info(device, "Suspended AL updates\n");
1271 }
1272
1273
should_set_defaults(struct genl_info * info)1274 static bool should_set_defaults(struct genl_info *info)
1275 {
1276 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1277 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1278 }
1279
drbd_al_extents_max(struct drbd_backing_dev * bdev)1280 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1281 {
1282 /* This is limited by 16 bit "slot" numbers,
1283 * and by available on-disk context storage.
1284 *
1285 * Also (u16)~0 is special (denotes a "free" extent).
1286 *
1287 * One transaction occupies one 4kB on-disk block,
1288 * we have n such blocks in the on disk ring buffer,
1289 * the "current" transaction may fail (n-1),
1290 * and there is 919 slot numbers context information per transaction.
1291 *
1292 * 72 transaction blocks amounts to more than 2**16 context slots,
1293 * so cap there first.
1294 */
1295 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1296 const unsigned int sufficient_on_disk =
1297 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1298 /AL_CONTEXT_PER_TRANSACTION;
1299
1300 unsigned int al_size_4k = bdev->md.al_size_4k;
1301
1302 if (al_size_4k > sufficient_on_disk)
1303 return max_al_nr;
1304
1305 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1306 }
1307
write_ordering_changed(struct disk_conf * a,struct disk_conf * b)1308 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1309 {
1310 return a->disk_barrier != b->disk_barrier ||
1311 a->disk_flushes != b->disk_flushes ||
1312 a->disk_drain != b->disk_drain;
1313 }
1314
drbd_adm_disk_opts(struct sk_buff * skb,struct genl_info * info)1315 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1316 {
1317 struct drbd_config_context adm_ctx;
1318 enum drbd_ret_code retcode;
1319 struct drbd_device *device;
1320 struct disk_conf *new_disk_conf, *old_disk_conf;
1321 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1322 int err, fifo_size;
1323
1324 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1325 if (!adm_ctx.reply_skb)
1326 return retcode;
1327 if (retcode != NO_ERROR)
1328 goto finish;
1329
1330 device = adm_ctx.device;
1331 mutex_lock(&adm_ctx.resource->adm_mutex);
1332
1333 /* we also need a disk
1334 * to change the options on */
1335 if (!get_ldev(device)) {
1336 retcode = ERR_NO_DISK;
1337 goto out;
1338 }
1339
1340 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1341 if (!new_disk_conf) {
1342 retcode = ERR_NOMEM;
1343 goto fail;
1344 }
1345
1346 mutex_lock(&device->resource->conf_update);
1347 old_disk_conf = device->ldev->disk_conf;
1348 *new_disk_conf = *old_disk_conf;
1349 if (should_set_defaults(info))
1350 set_disk_conf_defaults(new_disk_conf);
1351
1352 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1353 if (err && err != -ENOMSG) {
1354 retcode = ERR_MANDATORY_TAG;
1355 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1356 goto fail_unlock;
1357 }
1358
1359 if (!expect(new_disk_conf->resync_rate >= 1))
1360 new_disk_conf->resync_rate = 1;
1361
1362 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1363 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1364 if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
1365 new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
1366
1367 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1368 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1369
1370 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1371 if (fifo_size != device->rs_plan_s->size) {
1372 new_plan = fifo_alloc(fifo_size);
1373 if (!new_plan) {
1374 drbd_err(device, "kmalloc of fifo_buffer failed");
1375 retcode = ERR_NOMEM;
1376 goto fail_unlock;
1377 }
1378 }
1379
1380 drbd_suspend_io(device);
1381 wait_event(device->al_wait, lc_try_lock(device->act_log));
1382 drbd_al_shrink(device);
1383 err = drbd_check_al_size(device, new_disk_conf);
1384 lc_unlock(device->act_log);
1385 wake_up(&device->al_wait);
1386 drbd_resume_io(device);
1387
1388 if (err) {
1389 retcode = ERR_NOMEM;
1390 goto fail_unlock;
1391 }
1392
1393 write_lock_irq(&global_state_lock);
1394 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1395 if (retcode == NO_ERROR) {
1396 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1397 drbd_resync_after_changed(device);
1398 }
1399 write_unlock_irq(&global_state_lock);
1400
1401 if (retcode != NO_ERROR)
1402 goto fail_unlock;
1403
1404 if (new_plan) {
1405 old_plan = device->rs_plan_s;
1406 rcu_assign_pointer(device->rs_plan_s, new_plan);
1407 }
1408
1409 mutex_unlock(&device->resource->conf_update);
1410
1411 if (new_disk_conf->al_updates)
1412 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1413 else
1414 device->ldev->md.flags |= MDF_AL_DISABLED;
1415
1416 if (new_disk_conf->md_flushes)
1417 clear_bit(MD_NO_FUA, &device->flags);
1418 else
1419 set_bit(MD_NO_FUA, &device->flags);
1420
1421 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1422 drbd_bump_write_ordering(device->resource, NULL, WO_bdev_flush);
1423
1424 drbd_md_sync(device);
1425
1426 if (device->state.conn >= C_CONNECTED) {
1427 struct drbd_peer_device *peer_device;
1428
1429 for_each_peer_device(peer_device, device)
1430 drbd_send_sync_param(peer_device);
1431 }
1432
1433 synchronize_rcu();
1434 kfree(old_disk_conf);
1435 kfree(old_plan);
1436 mod_timer(&device->request_timer, jiffies + HZ);
1437 goto success;
1438
1439 fail_unlock:
1440 mutex_unlock(&device->resource->conf_update);
1441 fail:
1442 kfree(new_disk_conf);
1443 kfree(new_plan);
1444 success:
1445 put_ldev(device);
1446 out:
1447 mutex_unlock(&adm_ctx.resource->adm_mutex);
1448 finish:
1449 drbd_adm_finish(&adm_ctx, info, retcode);
1450 return 0;
1451 }
1452
drbd_adm_attach(struct sk_buff * skb,struct genl_info * info)1453 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1454 {
1455 struct drbd_config_context adm_ctx;
1456 struct drbd_device *device;
1457 struct drbd_peer_device *peer_device;
1458 struct drbd_connection *connection;
1459 int err;
1460 enum drbd_ret_code retcode;
1461 enum determine_dev_size dd;
1462 sector_t max_possible_sectors;
1463 sector_t min_md_device_sectors;
1464 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1465 struct disk_conf *new_disk_conf = NULL;
1466 struct block_device *bdev;
1467 struct lru_cache *resync_lru = NULL;
1468 struct fifo_buffer *new_plan = NULL;
1469 union drbd_state ns, os;
1470 enum drbd_state_rv rv;
1471 struct net_conf *nc;
1472
1473 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1474 if (!adm_ctx.reply_skb)
1475 return retcode;
1476 if (retcode != NO_ERROR)
1477 goto finish;
1478
1479 device = adm_ctx.device;
1480 mutex_lock(&adm_ctx.resource->adm_mutex);
1481 peer_device = first_peer_device(device);
1482 connection = peer_device ? peer_device->connection : NULL;
1483 conn_reconfig_start(connection);
1484
1485 /* if you want to reconfigure, please tear down first */
1486 if (device->state.disk > D_DISKLESS) {
1487 retcode = ERR_DISK_CONFIGURED;
1488 goto fail;
1489 }
1490 /* It may just now have detached because of IO error. Make sure
1491 * drbd_ldev_destroy is done already, we may end up here very fast,
1492 * e.g. if someone calls attach from the on-io-error handler,
1493 * to realize a "hot spare" feature (not that I'd recommend that) */
1494 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1495
1496 /* make sure there is no leftover from previous force-detach attempts */
1497 clear_bit(FORCE_DETACH, &device->flags);
1498 clear_bit(WAS_IO_ERROR, &device->flags);
1499 clear_bit(WAS_READ_ERROR, &device->flags);
1500
1501 /* and no leftover from previously aborted resync or verify, either */
1502 device->rs_total = 0;
1503 device->rs_failed = 0;
1504 atomic_set(&device->rs_pending_cnt, 0);
1505
1506 /* allocation not in the IO path, drbdsetup context */
1507 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1508 if (!nbc) {
1509 retcode = ERR_NOMEM;
1510 goto fail;
1511 }
1512 spin_lock_init(&nbc->md.uuid_lock);
1513
1514 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1515 if (!new_disk_conf) {
1516 retcode = ERR_NOMEM;
1517 goto fail;
1518 }
1519 nbc->disk_conf = new_disk_conf;
1520
1521 set_disk_conf_defaults(new_disk_conf);
1522 err = disk_conf_from_attrs(new_disk_conf, info);
1523 if (err) {
1524 retcode = ERR_MANDATORY_TAG;
1525 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1526 goto fail;
1527 }
1528
1529 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1530 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1531
1532 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1533 if (!new_plan) {
1534 retcode = ERR_NOMEM;
1535 goto fail;
1536 }
1537
1538 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1539 retcode = ERR_MD_IDX_INVALID;
1540 goto fail;
1541 }
1542
1543 write_lock_irq(&global_state_lock);
1544 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1545 write_unlock_irq(&global_state_lock);
1546 if (retcode != NO_ERROR)
1547 goto fail;
1548
1549 rcu_read_lock();
1550 nc = rcu_dereference(connection->net_conf);
1551 if (nc) {
1552 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1553 rcu_read_unlock();
1554 retcode = ERR_STONITH_AND_PROT_A;
1555 goto fail;
1556 }
1557 }
1558 rcu_read_unlock();
1559
1560 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1561 FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
1562 if (IS_ERR(bdev)) {
1563 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1564 PTR_ERR(bdev));
1565 retcode = ERR_OPEN_DISK;
1566 goto fail;
1567 }
1568 nbc->backing_bdev = bdev;
1569
1570 /*
1571 * meta_dev_idx >= 0: external fixed size, possibly multiple
1572 * drbd sharing one meta device. TODO in that case, paranoia
1573 * check that [md_bdev, meta_dev_idx] is not yet used by some
1574 * other drbd minor! (if you use drbd.conf + drbdadm, that
1575 * should check it for you already; but if you don't, or
1576 * someone fooled it, we need to double check here)
1577 */
1578 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1579 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1580 (new_disk_conf->meta_dev_idx < 0) ?
1581 (void *)device : (void *)drbd_m_holder);
1582 if (IS_ERR(bdev)) {
1583 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1584 PTR_ERR(bdev));
1585 retcode = ERR_OPEN_MD_DISK;
1586 goto fail;
1587 }
1588 nbc->md_bdev = bdev;
1589
1590 if ((nbc->backing_bdev == nbc->md_bdev) !=
1591 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1592 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1593 retcode = ERR_MD_IDX_INVALID;
1594 goto fail;
1595 }
1596
1597 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1598 1, 61, sizeof(struct bm_extent),
1599 offsetof(struct bm_extent, lce));
1600 if (!resync_lru) {
1601 retcode = ERR_NOMEM;
1602 goto fail;
1603 }
1604
1605 /* Read our meta data super block early.
1606 * This also sets other on-disk offsets. */
1607 retcode = drbd_md_read(device, nbc);
1608 if (retcode != NO_ERROR)
1609 goto fail;
1610
1611 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1612 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1613 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1614 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1615
1616 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1617 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1618 (unsigned long long) drbd_get_max_capacity(nbc),
1619 (unsigned long long) new_disk_conf->disk_size);
1620 retcode = ERR_DISK_TOO_SMALL;
1621 goto fail;
1622 }
1623
1624 if (new_disk_conf->meta_dev_idx < 0) {
1625 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1626 /* at least one MB, otherwise it does not make sense */
1627 min_md_device_sectors = (2<<10);
1628 } else {
1629 max_possible_sectors = DRBD_MAX_SECTORS;
1630 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1631 }
1632
1633 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1634 retcode = ERR_MD_DISK_TOO_SMALL;
1635 drbd_warn(device, "refusing attach: md-device too small, "
1636 "at least %llu sectors needed for this meta-disk type\n",
1637 (unsigned long long) min_md_device_sectors);
1638 goto fail;
1639 }
1640
1641 /* Make sure the new disk is big enough
1642 * (we may currently be R_PRIMARY with no local disk...) */
1643 if (drbd_get_max_capacity(nbc) <
1644 drbd_get_capacity(device->this_bdev)) {
1645 retcode = ERR_DISK_TOO_SMALL;
1646 goto fail;
1647 }
1648
1649 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1650
1651 if (nbc->known_size > max_possible_sectors) {
1652 drbd_warn(device, "==> truncating very big lower level device "
1653 "to currently maximum possible %llu sectors <==\n",
1654 (unsigned long long) max_possible_sectors);
1655 if (new_disk_conf->meta_dev_idx >= 0)
1656 drbd_warn(device, "==>> using internal or flexible "
1657 "meta data may help <<==\n");
1658 }
1659
1660 drbd_suspend_io(device);
1661 /* also wait for the last barrier ack. */
1662 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1663 * We need a way to either ignore barrier acks for barriers sent before a device
1664 * was attached, or a way to wait for all pending barrier acks to come in.
1665 * As barriers are counted per resource,
1666 * we'd need to suspend io on all devices of a resource.
1667 */
1668 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1669 /* and for any other previously queued work */
1670 drbd_flush_workqueue(&connection->sender_work);
1671
1672 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1673 retcode = rv; /* FIXME: Type mismatch. */
1674 drbd_resume_io(device);
1675 if (rv < SS_SUCCESS)
1676 goto fail;
1677
1678 if (!get_ldev_if_state(device, D_ATTACHING))
1679 goto force_diskless;
1680
1681 if (!device->bitmap) {
1682 if (drbd_bm_init(device)) {
1683 retcode = ERR_NOMEM;
1684 goto force_diskless_dec;
1685 }
1686 }
1687
1688 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1689 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1690 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1691 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1692 (unsigned long long)device->ed_uuid);
1693 retcode = ERR_DATA_NOT_CURRENT;
1694 goto force_diskless_dec;
1695 }
1696
1697 /* Since we are diskless, fix the activity log first... */
1698 if (drbd_check_al_size(device, new_disk_conf)) {
1699 retcode = ERR_NOMEM;
1700 goto force_diskless_dec;
1701 }
1702
1703 /* Prevent shrinking of consistent devices ! */
1704 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1705 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1706 drbd_warn(device, "refusing to truncate a consistent device\n");
1707 retcode = ERR_DISK_TOO_SMALL;
1708 goto force_diskless_dec;
1709 }
1710
1711 /* Reset the "barriers don't work" bits here, then force meta data to
1712 * be written, to ensure we determine if barriers are supported. */
1713 if (new_disk_conf->md_flushes)
1714 clear_bit(MD_NO_FUA, &device->flags);
1715 else
1716 set_bit(MD_NO_FUA, &device->flags);
1717
1718 /* Point of no return reached.
1719 * Devices and memory are no longer released by error cleanup below.
1720 * now device takes over responsibility, and the state engine should
1721 * clean it up somewhere. */
1722 D_ASSERT(device, device->ldev == NULL);
1723 device->ldev = nbc;
1724 device->resync = resync_lru;
1725 device->rs_plan_s = new_plan;
1726 nbc = NULL;
1727 resync_lru = NULL;
1728 new_disk_conf = NULL;
1729 new_plan = NULL;
1730
1731 drbd_bump_write_ordering(device->resource, device->ldev, WO_bdev_flush);
1732
1733 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1734 set_bit(CRASHED_PRIMARY, &device->flags);
1735 else
1736 clear_bit(CRASHED_PRIMARY, &device->flags);
1737
1738 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1739 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1740 set_bit(CRASHED_PRIMARY, &device->flags);
1741
1742 device->send_cnt = 0;
1743 device->recv_cnt = 0;
1744 device->read_cnt = 0;
1745 device->writ_cnt = 0;
1746
1747 drbd_reconsider_max_bio_size(device, device->ldev);
1748
1749 /* If I am currently not R_PRIMARY,
1750 * but meta data primary indicator is set,
1751 * I just now recover from a hard crash,
1752 * and have been R_PRIMARY before that crash.
1753 *
1754 * Now, if I had no connection before that crash
1755 * (have been degraded R_PRIMARY), chances are that
1756 * I won't find my peer now either.
1757 *
1758 * In that case, and _only_ in that case,
1759 * we use the degr-wfc-timeout instead of the default,
1760 * so we can automatically recover from a crash of a
1761 * degraded but active "cluster" after a certain timeout.
1762 */
1763 clear_bit(USE_DEGR_WFC_T, &device->flags);
1764 if (device->state.role != R_PRIMARY &&
1765 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1766 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
1767 set_bit(USE_DEGR_WFC_T, &device->flags);
1768
1769 dd = drbd_determine_dev_size(device, 0, NULL);
1770 if (dd <= DS_ERROR) {
1771 retcode = ERR_NOMEM_BITMAP;
1772 goto force_diskless_dec;
1773 } else if (dd == DS_GREW)
1774 set_bit(RESYNC_AFTER_NEG, &device->flags);
1775
1776 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
1777 (test_bit(CRASHED_PRIMARY, &device->flags) &&
1778 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
1779 drbd_info(device, "Assuming that all blocks are out of sync "
1780 "(aka FullSync)\n");
1781 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
1782 "set_n_write from attaching", BM_LOCKED_MASK)) {
1783 retcode = ERR_IO_MD_DISK;
1784 goto force_diskless_dec;
1785 }
1786 } else {
1787 if (drbd_bitmap_io(device, &drbd_bm_read,
1788 "read from attaching", BM_LOCKED_MASK)) {
1789 retcode = ERR_IO_MD_DISK;
1790 goto force_diskless_dec;
1791 }
1792 }
1793
1794 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
1795 drbd_suspend_al(device); /* IO is still suspended here... */
1796
1797 spin_lock_irq(&device->resource->req_lock);
1798 os = drbd_read_state(device);
1799 ns = os;
1800 /* If MDF_CONSISTENT is not set go into inconsistent state,
1801 otherwise investigate MDF_WasUpToDate...
1802 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1803 otherwise into D_CONSISTENT state.
1804 */
1805 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
1806 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
1807 ns.disk = D_CONSISTENT;
1808 else
1809 ns.disk = D_OUTDATED;
1810 } else {
1811 ns.disk = D_INCONSISTENT;
1812 }
1813
1814 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
1815 ns.pdsk = D_OUTDATED;
1816
1817 rcu_read_lock();
1818 if (ns.disk == D_CONSISTENT &&
1819 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
1820 ns.disk = D_UP_TO_DATE;
1821
1822 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1823 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1824 this point, because drbd_request_state() modifies these
1825 flags. */
1826
1827 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
1828 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1829 else
1830 device->ldev->md.flags |= MDF_AL_DISABLED;
1831
1832 rcu_read_unlock();
1833
1834 /* In case we are C_CONNECTED postpone any decision on the new disk
1835 state after the negotiation phase. */
1836 if (device->state.conn == C_CONNECTED) {
1837 device->new_state_tmp.i = ns.i;
1838 ns.i = os.i;
1839 ns.disk = D_NEGOTIATING;
1840
1841 /* We expect to receive up-to-date UUIDs soon.
1842 To avoid a race in receive_state, free p_uuid while
1843 holding req_lock. I.e. atomic with the state change */
1844 kfree(device->p_uuid);
1845 device->p_uuid = NULL;
1846 }
1847
1848 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1849 spin_unlock_irq(&device->resource->req_lock);
1850
1851 if (rv < SS_SUCCESS)
1852 goto force_diskless_dec;
1853
1854 mod_timer(&device->request_timer, jiffies + HZ);
1855
1856 if (device->state.role == R_PRIMARY)
1857 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1858 else
1859 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1860
1861 drbd_md_mark_dirty(device);
1862 drbd_md_sync(device);
1863
1864 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1865 put_ldev(device);
1866 conn_reconfig_done(connection);
1867 mutex_unlock(&adm_ctx.resource->adm_mutex);
1868 drbd_adm_finish(&adm_ctx, info, retcode);
1869 return 0;
1870
1871 force_diskless_dec:
1872 put_ldev(device);
1873 force_diskless:
1874 drbd_force_state(device, NS(disk, D_DISKLESS));
1875 drbd_md_sync(device);
1876 fail:
1877 conn_reconfig_done(connection);
1878 if (nbc) {
1879 if (nbc->backing_bdev)
1880 blkdev_put(nbc->backing_bdev,
1881 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1882 if (nbc->md_bdev)
1883 blkdev_put(nbc->md_bdev,
1884 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1885 kfree(nbc);
1886 }
1887 kfree(new_disk_conf);
1888 lc_destroy(resync_lru);
1889 kfree(new_plan);
1890 mutex_unlock(&adm_ctx.resource->adm_mutex);
1891 finish:
1892 drbd_adm_finish(&adm_ctx, info, retcode);
1893 return 0;
1894 }
1895
adm_detach(struct drbd_device * device,int force)1896 static int adm_detach(struct drbd_device *device, int force)
1897 {
1898 enum drbd_state_rv retcode;
1899 int ret;
1900
1901 if (force) {
1902 set_bit(FORCE_DETACH, &device->flags);
1903 drbd_force_state(device, NS(disk, D_FAILED));
1904 retcode = SS_SUCCESS;
1905 goto out;
1906 }
1907
1908 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
1909 drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
1910 retcode = drbd_request_state(device, NS(disk, D_FAILED));
1911 drbd_md_put_buffer(device);
1912 /* D_FAILED will transition to DISKLESS. */
1913 ret = wait_event_interruptible(device->misc_wait,
1914 device->state.disk != D_FAILED);
1915 drbd_resume_io(device);
1916 if ((int)retcode == (int)SS_IS_DISKLESS)
1917 retcode = SS_NOTHING_TO_DO;
1918 if (ret)
1919 retcode = ERR_INTR;
1920 out:
1921 return retcode;
1922 }
1923
1924 /* Detaching the disk is a process in multiple stages. First we need to lock
1925 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1926 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1927 * internal references as well.
1928 * Only then we have finally detached. */
drbd_adm_detach(struct sk_buff * skb,struct genl_info * info)1929 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1930 {
1931 struct drbd_config_context adm_ctx;
1932 enum drbd_ret_code retcode;
1933 struct detach_parms parms = { };
1934 int err;
1935
1936 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1937 if (!adm_ctx.reply_skb)
1938 return retcode;
1939 if (retcode != NO_ERROR)
1940 goto out;
1941
1942 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1943 err = detach_parms_from_attrs(&parms, info);
1944 if (err) {
1945 retcode = ERR_MANDATORY_TAG;
1946 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1947 goto out;
1948 }
1949 }
1950
1951 mutex_lock(&adm_ctx.resource->adm_mutex);
1952 retcode = adm_detach(adm_ctx.device, parms.force_detach);
1953 mutex_unlock(&adm_ctx.resource->adm_mutex);
1954 out:
1955 drbd_adm_finish(&adm_ctx, info, retcode);
1956 return 0;
1957 }
1958
conn_resync_running(struct drbd_connection * connection)1959 static bool conn_resync_running(struct drbd_connection *connection)
1960 {
1961 struct drbd_peer_device *peer_device;
1962 bool rv = false;
1963 int vnr;
1964
1965 rcu_read_lock();
1966 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1967 struct drbd_device *device = peer_device->device;
1968 if (device->state.conn == C_SYNC_SOURCE ||
1969 device->state.conn == C_SYNC_TARGET ||
1970 device->state.conn == C_PAUSED_SYNC_S ||
1971 device->state.conn == C_PAUSED_SYNC_T) {
1972 rv = true;
1973 break;
1974 }
1975 }
1976 rcu_read_unlock();
1977
1978 return rv;
1979 }
1980
conn_ov_running(struct drbd_connection * connection)1981 static bool conn_ov_running(struct drbd_connection *connection)
1982 {
1983 struct drbd_peer_device *peer_device;
1984 bool rv = false;
1985 int vnr;
1986
1987 rcu_read_lock();
1988 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1989 struct drbd_device *device = peer_device->device;
1990 if (device->state.conn == C_VERIFY_S ||
1991 device->state.conn == C_VERIFY_T) {
1992 rv = true;
1993 break;
1994 }
1995 }
1996 rcu_read_unlock();
1997
1998 return rv;
1999 }
2000
2001 static enum drbd_ret_code
_check_net_options(struct drbd_connection * connection,struct net_conf * old_net_conf,struct net_conf * new_net_conf)2002 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2003 {
2004 struct drbd_peer_device *peer_device;
2005 int i;
2006
2007 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2008 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2009 return ERR_NEED_APV_100;
2010
2011 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2012 return ERR_NEED_APV_100;
2013
2014 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2015 return ERR_NEED_APV_100;
2016 }
2017
2018 if (!new_net_conf->two_primaries &&
2019 conn_highest_role(connection) == R_PRIMARY &&
2020 conn_highest_peer(connection) == R_PRIMARY)
2021 return ERR_NEED_ALLOW_TWO_PRI;
2022
2023 if (new_net_conf->two_primaries &&
2024 (new_net_conf->wire_protocol != DRBD_PROT_C))
2025 return ERR_NOT_PROTO_C;
2026
2027 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2028 struct drbd_device *device = peer_device->device;
2029 if (get_ldev(device)) {
2030 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2031 put_ldev(device);
2032 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2033 return ERR_STONITH_AND_PROT_A;
2034 }
2035 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2036 return ERR_DISCARD_IMPOSSIBLE;
2037 }
2038
2039 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2040 return ERR_CONG_NOT_PROTO_A;
2041
2042 return NO_ERROR;
2043 }
2044
2045 static enum drbd_ret_code
check_net_options(struct drbd_connection * connection,struct net_conf * new_net_conf)2046 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2047 {
2048 static enum drbd_ret_code rv;
2049 struct drbd_peer_device *peer_device;
2050 int i;
2051
2052 rcu_read_lock();
2053 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2054 rcu_read_unlock();
2055
2056 /* connection->peer_devices protected by genl_lock() here */
2057 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2058 struct drbd_device *device = peer_device->device;
2059 if (!device->bitmap) {
2060 if (drbd_bm_init(device))
2061 return ERR_NOMEM;
2062 }
2063 }
2064
2065 return rv;
2066 }
2067
2068 struct crypto {
2069 struct crypto_hash *verify_tfm;
2070 struct crypto_hash *csums_tfm;
2071 struct crypto_hash *cram_hmac_tfm;
2072 struct crypto_hash *integrity_tfm;
2073 };
2074
2075 static int
alloc_hash(struct crypto_hash ** tfm,char * tfm_name,int err_alg)2076 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
2077 {
2078 if (!tfm_name[0])
2079 return NO_ERROR;
2080
2081 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
2082 if (IS_ERR(*tfm)) {
2083 *tfm = NULL;
2084 return err_alg;
2085 }
2086
2087 return NO_ERROR;
2088 }
2089
2090 static enum drbd_ret_code
alloc_crypto(struct crypto * crypto,struct net_conf * new_net_conf)2091 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2092 {
2093 char hmac_name[CRYPTO_MAX_ALG_NAME];
2094 enum drbd_ret_code rv;
2095
2096 rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
2097 ERR_CSUMS_ALG);
2098 if (rv != NO_ERROR)
2099 return rv;
2100 rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
2101 ERR_VERIFY_ALG);
2102 if (rv != NO_ERROR)
2103 return rv;
2104 rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2105 ERR_INTEGRITY_ALG);
2106 if (rv != NO_ERROR)
2107 return rv;
2108 if (new_net_conf->cram_hmac_alg[0] != 0) {
2109 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2110 new_net_conf->cram_hmac_alg);
2111
2112 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
2113 ERR_AUTH_ALG);
2114 }
2115
2116 return rv;
2117 }
2118
free_crypto(struct crypto * crypto)2119 static void free_crypto(struct crypto *crypto)
2120 {
2121 crypto_free_hash(crypto->cram_hmac_tfm);
2122 crypto_free_hash(crypto->integrity_tfm);
2123 crypto_free_hash(crypto->csums_tfm);
2124 crypto_free_hash(crypto->verify_tfm);
2125 }
2126
drbd_adm_net_opts(struct sk_buff * skb,struct genl_info * info)2127 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2128 {
2129 struct drbd_config_context adm_ctx;
2130 enum drbd_ret_code retcode;
2131 struct drbd_connection *connection;
2132 struct net_conf *old_net_conf, *new_net_conf = NULL;
2133 int err;
2134 int ovr; /* online verify running */
2135 int rsr; /* re-sync running */
2136 struct crypto crypto = { };
2137
2138 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2139 if (!adm_ctx.reply_skb)
2140 return retcode;
2141 if (retcode != NO_ERROR)
2142 goto finish;
2143
2144 connection = adm_ctx.connection;
2145 mutex_lock(&adm_ctx.resource->adm_mutex);
2146
2147 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2148 if (!new_net_conf) {
2149 retcode = ERR_NOMEM;
2150 goto out;
2151 }
2152
2153 conn_reconfig_start(connection);
2154
2155 mutex_lock(&connection->data.mutex);
2156 mutex_lock(&connection->resource->conf_update);
2157 old_net_conf = connection->net_conf;
2158
2159 if (!old_net_conf) {
2160 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2161 retcode = ERR_INVALID_REQUEST;
2162 goto fail;
2163 }
2164
2165 *new_net_conf = *old_net_conf;
2166 if (should_set_defaults(info))
2167 set_net_conf_defaults(new_net_conf);
2168
2169 err = net_conf_from_attrs_for_change(new_net_conf, info);
2170 if (err && err != -ENOMSG) {
2171 retcode = ERR_MANDATORY_TAG;
2172 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2173 goto fail;
2174 }
2175
2176 retcode = check_net_options(connection, new_net_conf);
2177 if (retcode != NO_ERROR)
2178 goto fail;
2179
2180 /* re-sync running */
2181 rsr = conn_resync_running(connection);
2182 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2183 retcode = ERR_CSUMS_RESYNC_RUNNING;
2184 goto fail;
2185 }
2186
2187 /* online verify running */
2188 ovr = conn_ov_running(connection);
2189 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2190 retcode = ERR_VERIFY_RUNNING;
2191 goto fail;
2192 }
2193
2194 retcode = alloc_crypto(&crypto, new_net_conf);
2195 if (retcode != NO_ERROR)
2196 goto fail;
2197
2198 rcu_assign_pointer(connection->net_conf, new_net_conf);
2199
2200 if (!rsr) {
2201 crypto_free_hash(connection->csums_tfm);
2202 connection->csums_tfm = crypto.csums_tfm;
2203 crypto.csums_tfm = NULL;
2204 }
2205 if (!ovr) {
2206 crypto_free_hash(connection->verify_tfm);
2207 connection->verify_tfm = crypto.verify_tfm;
2208 crypto.verify_tfm = NULL;
2209 }
2210
2211 crypto_free_hash(connection->integrity_tfm);
2212 connection->integrity_tfm = crypto.integrity_tfm;
2213 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2214 /* Do this without trying to take connection->data.mutex again. */
2215 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2216
2217 crypto_free_hash(connection->cram_hmac_tfm);
2218 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2219
2220 mutex_unlock(&connection->resource->conf_update);
2221 mutex_unlock(&connection->data.mutex);
2222 synchronize_rcu();
2223 kfree(old_net_conf);
2224
2225 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2226 struct drbd_peer_device *peer_device;
2227 int vnr;
2228
2229 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2230 drbd_send_sync_param(peer_device);
2231 }
2232
2233 goto done;
2234
2235 fail:
2236 mutex_unlock(&connection->resource->conf_update);
2237 mutex_unlock(&connection->data.mutex);
2238 free_crypto(&crypto);
2239 kfree(new_net_conf);
2240 done:
2241 conn_reconfig_done(connection);
2242 out:
2243 mutex_unlock(&adm_ctx.resource->adm_mutex);
2244 finish:
2245 drbd_adm_finish(&adm_ctx, info, retcode);
2246 return 0;
2247 }
2248
drbd_adm_connect(struct sk_buff * skb,struct genl_info * info)2249 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2250 {
2251 struct drbd_config_context adm_ctx;
2252 struct drbd_peer_device *peer_device;
2253 struct net_conf *old_net_conf, *new_net_conf = NULL;
2254 struct crypto crypto = { };
2255 struct drbd_resource *resource;
2256 struct drbd_connection *connection;
2257 enum drbd_ret_code retcode;
2258 int i;
2259 int err;
2260
2261 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2262
2263 if (!adm_ctx.reply_skb)
2264 return retcode;
2265 if (retcode != NO_ERROR)
2266 goto out;
2267 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2268 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2269 retcode = ERR_INVALID_REQUEST;
2270 goto out;
2271 }
2272
2273 /* No need for _rcu here. All reconfiguration is
2274 * strictly serialized on genl_lock(). We are protected against
2275 * concurrent reconfiguration/addition/deletion */
2276 for_each_resource(resource, &drbd_resources) {
2277 for_each_connection(connection, resource) {
2278 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2279 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2280 connection->my_addr_len)) {
2281 retcode = ERR_LOCAL_ADDR;
2282 goto out;
2283 }
2284
2285 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2286 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2287 connection->peer_addr_len)) {
2288 retcode = ERR_PEER_ADDR;
2289 goto out;
2290 }
2291 }
2292 }
2293
2294 mutex_lock(&adm_ctx.resource->adm_mutex);
2295 connection = first_connection(adm_ctx.resource);
2296 conn_reconfig_start(connection);
2297
2298 if (connection->cstate > C_STANDALONE) {
2299 retcode = ERR_NET_CONFIGURED;
2300 goto fail;
2301 }
2302
2303 /* allocation not in the IO path, drbdsetup / netlink process context */
2304 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2305 if (!new_net_conf) {
2306 retcode = ERR_NOMEM;
2307 goto fail;
2308 }
2309
2310 set_net_conf_defaults(new_net_conf);
2311
2312 err = net_conf_from_attrs(new_net_conf, info);
2313 if (err && err != -ENOMSG) {
2314 retcode = ERR_MANDATORY_TAG;
2315 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2316 goto fail;
2317 }
2318
2319 retcode = check_net_options(connection, new_net_conf);
2320 if (retcode != NO_ERROR)
2321 goto fail;
2322
2323 retcode = alloc_crypto(&crypto, new_net_conf);
2324 if (retcode != NO_ERROR)
2325 goto fail;
2326
2327 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2328
2329 drbd_flush_workqueue(&connection->sender_work);
2330
2331 mutex_lock(&adm_ctx.resource->conf_update);
2332 old_net_conf = connection->net_conf;
2333 if (old_net_conf) {
2334 retcode = ERR_NET_CONFIGURED;
2335 mutex_unlock(&adm_ctx.resource->conf_update);
2336 goto fail;
2337 }
2338 rcu_assign_pointer(connection->net_conf, new_net_conf);
2339
2340 conn_free_crypto(connection);
2341 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2342 connection->integrity_tfm = crypto.integrity_tfm;
2343 connection->csums_tfm = crypto.csums_tfm;
2344 connection->verify_tfm = crypto.verify_tfm;
2345
2346 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2347 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2348 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2349 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2350
2351 mutex_unlock(&adm_ctx.resource->conf_update);
2352
2353 rcu_read_lock();
2354 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2355 struct drbd_device *device = peer_device->device;
2356 device->send_cnt = 0;
2357 device->recv_cnt = 0;
2358 }
2359 rcu_read_unlock();
2360
2361 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2362
2363 conn_reconfig_done(connection);
2364 mutex_unlock(&adm_ctx.resource->adm_mutex);
2365 drbd_adm_finish(&adm_ctx, info, retcode);
2366 return 0;
2367
2368 fail:
2369 free_crypto(&crypto);
2370 kfree(new_net_conf);
2371
2372 conn_reconfig_done(connection);
2373 mutex_unlock(&adm_ctx.resource->adm_mutex);
2374 out:
2375 drbd_adm_finish(&adm_ctx, info, retcode);
2376 return 0;
2377 }
2378
conn_try_disconnect(struct drbd_connection * connection,bool force)2379 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2380 {
2381 enum drbd_state_rv rv;
2382
2383 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2384 force ? CS_HARD : 0);
2385
2386 switch (rv) {
2387 case SS_NOTHING_TO_DO:
2388 break;
2389 case SS_ALREADY_STANDALONE:
2390 return SS_SUCCESS;
2391 case SS_PRIMARY_NOP:
2392 /* Our state checking code wants to see the peer outdated. */
2393 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2394
2395 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2396 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2397
2398 break;
2399 case SS_CW_FAILED_BY_PEER:
2400 /* The peer probably wants to see us outdated. */
2401 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2402 disk, D_OUTDATED), 0);
2403 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2404 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2405 CS_HARD);
2406 }
2407 break;
2408 default:;
2409 /* no special handling necessary */
2410 }
2411
2412 if (rv >= SS_SUCCESS) {
2413 enum drbd_state_rv rv2;
2414 /* No one else can reconfigure the network while I am here.
2415 * The state handling only uses drbd_thread_stop_nowait(),
2416 * we want to really wait here until the receiver is no more.
2417 */
2418 drbd_thread_stop(&connection->receiver);
2419
2420 /* Race breaker. This additional state change request may be
2421 * necessary, if this was a forced disconnect during a receiver
2422 * restart. We may have "killed" the receiver thread just
2423 * after drbd_receiver() returned. Typically, we should be
2424 * C_STANDALONE already, now, and this becomes a no-op.
2425 */
2426 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2427 CS_VERBOSE | CS_HARD);
2428 if (rv2 < SS_SUCCESS)
2429 drbd_err(connection,
2430 "unexpected rv2=%d in conn_try_disconnect()\n",
2431 rv2);
2432 }
2433 return rv;
2434 }
2435
drbd_adm_disconnect(struct sk_buff * skb,struct genl_info * info)2436 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2437 {
2438 struct drbd_config_context adm_ctx;
2439 struct disconnect_parms parms;
2440 struct drbd_connection *connection;
2441 enum drbd_state_rv rv;
2442 enum drbd_ret_code retcode;
2443 int err;
2444
2445 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2446 if (!adm_ctx.reply_skb)
2447 return retcode;
2448 if (retcode != NO_ERROR)
2449 goto fail;
2450
2451 connection = adm_ctx.connection;
2452 memset(&parms, 0, sizeof(parms));
2453 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2454 err = disconnect_parms_from_attrs(&parms, info);
2455 if (err) {
2456 retcode = ERR_MANDATORY_TAG;
2457 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2458 goto fail;
2459 }
2460 }
2461
2462 mutex_lock(&adm_ctx.resource->adm_mutex);
2463 rv = conn_try_disconnect(connection, parms.force_disconnect);
2464 if (rv < SS_SUCCESS)
2465 retcode = rv; /* FIXME: Type mismatch. */
2466 else
2467 retcode = NO_ERROR;
2468 mutex_unlock(&adm_ctx.resource->adm_mutex);
2469 fail:
2470 drbd_adm_finish(&adm_ctx, info, retcode);
2471 return 0;
2472 }
2473
resync_after_online_grow(struct drbd_device * device)2474 void resync_after_online_grow(struct drbd_device *device)
2475 {
2476 int iass; /* I am sync source */
2477
2478 drbd_info(device, "Resync of new storage after online grow\n");
2479 if (device->state.role != device->state.peer)
2480 iass = (device->state.role == R_PRIMARY);
2481 else
2482 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2483
2484 if (iass)
2485 drbd_start_resync(device, C_SYNC_SOURCE);
2486 else
2487 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2488 }
2489
drbd_adm_resize(struct sk_buff * skb,struct genl_info * info)2490 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2491 {
2492 struct drbd_config_context adm_ctx;
2493 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2494 struct resize_parms rs;
2495 struct drbd_device *device;
2496 enum drbd_ret_code retcode;
2497 enum determine_dev_size dd;
2498 bool change_al_layout = false;
2499 enum dds_flags ddsf;
2500 sector_t u_size;
2501 int err;
2502
2503 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2504 if (!adm_ctx.reply_skb)
2505 return retcode;
2506 if (retcode != NO_ERROR)
2507 goto finish;
2508
2509 mutex_lock(&adm_ctx.resource->adm_mutex);
2510 device = adm_ctx.device;
2511 if (!get_ldev(device)) {
2512 retcode = ERR_NO_DISK;
2513 goto fail;
2514 }
2515
2516 memset(&rs, 0, sizeof(struct resize_parms));
2517 rs.al_stripes = device->ldev->md.al_stripes;
2518 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2519 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2520 err = resize_parms_from_attrs(&rs, info);
2521 if (err) {
2522 retcode = ERR_MANDATORY_TAG;
2523 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2524 goto fail_ldev;
2525 }
2526 }
2527
2528 if (device->state.conn > C_CONNECTED) {
2529 retcode = ERR_RESIZE_RESYNC;
2530 goto fail_ldev;
2531 }
2532
2533 if (device->state.role == R_SECONDARY &&
2534 device->state.peer == R_SECONDARY) {
2535 retcode = ERR_NO_PRIMARY;
2536 goto fail_ldev;
2537 }
2538
2539 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2540 retcode = ERR_NEED_APV_93;
2541 goto fail_ldev;
2542 }
2543
2544 rcu_read_lock();
2545 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2546 rcu_read_unlock();
2547 if (u_size != (sector_t)rs.resize_size) {
2548 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2549 if (!new_disk_conf) {
2550 retcode = ERR_NOMEM;
2551 goto fail_ldev;
2552 }
2553 }
2554
2555 if (device->ldev->md.al_stripes != rs.al_stripes ||
2556 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2557 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2558
2559 if (al_size_k > (16 * 1024 * 1024)) {
2560 retcode = ERR_MD_LAYOUT_TOO_BIG;
2561 goto fail_ldev;
2562 }
2563
2564 if (al_size_k < MD_32kB_SECT/2) {
2565 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2566 goto fail_ldev;
2567 }
2568
2569 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2570 retcode = ERR_MD_LAYOUT_CONNECTED;
2571 goto fail_ldev;
2572 }
2573
2574 change_al_layout = true;
2575 }
2576
2577 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2578 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2579
2580 if (new_disk_conf) {
2581 mutex_lock(&device->resource->conf_update);
2582 old_disk_conf = device->ldev->disk_conf;
2583 *new_disk_conf = *old_disk_conf;
2584 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2585 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2586 mutex_unlock(&device->resource->conf_update);
2587 synchronize_rcu();
2588 kfree(old_disk_conf);
2589 }
2590
2591 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2592 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2593 drbd_md_sync(device);
2594 put_ldev(device);
2595 if (dd == DS_ERROR) {
2596 retcode = ERR_NOMEM_BITMAP;
2597 goto fail;
2598 } else if (dd == DS_ERROR_SPACE_MD) {
2599 retcode = ERR_MD_LAYOUT_NO_FIT;
2600 goto fail;
2601 } else if (dd == DS_ERROR_SHRINK) {
2602 retcode = ERR_IMPLICIT_SHRINK;
2603 goto fail;
2604 }
2605
2606 if (device->state.conn == C_CONNECTED) {
2607 if (dd == DS_GREW)
2608 set_bit(RESIZE_PENDING, &device->flags);
2609
2610 drbd_send_uuids(first_peer_device(device));
2611 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2612 }
2613
2614 fail:
2615 mutex_unlock(&adm_ctx.resource->adm_mutex);
2616 finish:
2617 drbd_adm_finish(&adm_ctx, info, retcode);
2618 return 0;
2619
2620 fail_ldev:
2621 put_ldev(device);
2622 goto fail;
2623 }
2624
drbd_adm_resource_opts(struct sk_buff * skb,struct genl_info * info)2625 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2626 {
2627 struct drbd_config_context adm_ctx;
2628 enum drbd_ret_code retcode;
2629 struct res_opts res_opts;
2630 int err;
2631
2632 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2633 if (!adm_ctx.reply_skb)
2634 return retcode;
2635 if (retcode != NO_ERROR)
2636 goto fail;
2637
2638 res_opts = adm_ctx.resource->res_opts;
2639 if (should_set_defaults(info))
2640 set_res_opts_defaults(&res_opts);
2641
2642 err = res_opts_from_attrs(&res_opts, info);
2643 if (err && err != -ENOMSG) {
2644 retcode = ERR_MANDATORY_TAG;
2645 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2646 goto fail;
2647 }
2648
2649 mutex_lock(&adm_ctx.resource->adm_mutex);
2650 err = set_resource_options(adm_ctx.resource, &res_opts);
2651 if (err) {
2652 retcode = ERR_INVALID_REQUEST;
2653 if (err == -ENOMEM)
2654 retcode = ERR_NOMEM;
2655 }
2656 mutex_unlock(&adm_ctx.resource->adm_mutex);
2657
2658 fail:
2659 drbd_adm_finish(&adm_ctx, info, retcode);
2660 return 0;
2661 }
2662
drbd_adm_invalidate(struct sk_buff * skb,struct genl_info * info)2663 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2664 {
2665 struct drbd_config_context adm_ctx;
2666 struct drbd_device *device;
2667 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2668
2669 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2670 if (!adm_ctx.reply_skb)
2671 return retcode;
2672 if (retcode != NO_ERROR)
2673 goto out;
2674
2675 device = adm_ctx.device;
2676 if (!get_ldev(device)) {
2677 retcode = ERR_NO_DISK;
2678 goto out;
2679 }
2680
2681 mutex_lock(&adm_ctx.resource->adm_mutex);
2682
2683 /* If there is still bitmap IO pending, probably because of a previous
2684 * resync just being finished, wait for it before requesting a new resync.
2685 * Also wait for it's after_state_ch(). */
2686 drbd_suspend_io(device);
2687 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2688 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2689
2690 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2691 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2692 * try to start a resync handshake as sync target for full sync.
2693 */
2694 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2695 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
2696 if (retcode >= SS_SUCCESS) {
2697 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2698 "set_n_write from invalidate", BM_LOCKED_MASK))
2699 retcode = ERR_IO_MD_DISK;
2700 }
2701 } else
2702 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2703 drbd_resume_io(device);
2704 mutex_unlock(&adm_ctx.resource->adm_mutex);
2705 put_ldev(device);
2706 out:
2707 drbd_adm_finish(&adm_ctx, info, retcode);
2708 return 0;
2709 }
2710
drbd_adm_simple_request_state(struct sk_buff * skb,struct genl_info * info,union drbd_state mask,union drbd_state val)2711 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2712 union drbd_state mask, union drbd_state val)
2713 {
2714 struct drbd_config_context adm_ctx;
2715 enum drbd_ret_code retcode;
2716
2717 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2718 if (!adm_ctx.reply_skb)
2719 return retcode;
2720 if (retcode != NO_ERROR)
2721 goto out;
2722
2723 mutex_lock(&adm_ctx.resource->adm_mutex);
2724 retcode = drbd_request_state(adm_ctx.device, mask, val);
2725 mutex_unlock(&adm_ctx.resource->adm_mutex);
2726 out:
2727 drbd_adm_finish(&adm_ctx, info, retcode);
2728 return 0;
2729 }
2730
drbd_bmio_set_susp_al(struct drbd_device * device)2731 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
2732 {
2733 int rv;
2734
2735 rv = drbd_bmio_set_n_write(device);
2736 drbd_suspend_al(device);
2737 return rv;
2738 }
2739
drbd_adm_invalidate_peer(struct sk_buff * skb,struct genl_info * info)2740 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2741 {
2742 struct drbd_config_context adm_ctx;
2743 int retcode; /* drbd_ret_code, drbd_state_rv */
2744 struct drbd_device *device;
2745
2746 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2747 if (!adm_ctx.reply_skb)
2748 return retcode;
2749 if (retcode != NO_ERROR)
2750 goto out;
2751
2752 device = adm_ctx.device;
2753 if (!get_ldev(device)) {
2754 retcode = ERR_NO_DISK;
2755 goto out;
2756 }
2757
2758 mutex_lock(&adm_ctx.resource->adm_mutex);
2759
2760 /* If there is still bitmap IO pending, probably because of a previous
2761 * resync just being finished, wait for it before requesting a new resync.
2762 * Also wait for it's after_state_ch(). */
2763 drbd_suspend_io(device);
2764 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2765 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2766
2767 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2768 * in the bitmap. Otherwise, try to start a resync handshake
2769 * as sync source for full sync.
2770 */
2771 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
2772 /* The peer will get a resync upon connect anyways. Just make that
2773 into a full resync. */
2774 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
2775 if (retcode >= SS_SUCCESS) {
2776 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
2777 "set_n_write from invalidate_peer",
2778 BM_LOCKED_SET_ALLOWED))
2779 retcode = ERR_IO_MD_DISK;
2780 }
2781 } else
2782 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
2783 drbd_resume_io(device);
2784 mutex_unlock(&adm_ctx.resource->adm_mutex);
2785 put_ldev(device);
2786 out:
2787 drbd_adm_finish(&adm_ctx, info, retcode);
2788 return 0;
2789 }
2790
drbd_adm_pause_sync(struct sk_buff * skb,struct genl_info * info)2791 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2792 {
2793 struct drbd_config_context adm_ctx;
2794 enum drbd_ret_code retcode;
2795
2796 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2797 if (!adm_ctx.reply_skb)
2798 return retcode;
2799 if (retcode != NO_ERROR)
2800 goto out;
2801
2802 mutex_lock(&adm_ctx.resource->adm_mutex);
2803 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2804 retcode = ERR_PAUSE_IS_SET;
2805 mutex_unlock(&adm_ctx.resource->adm_mutex);
2806 out:
2807 drbd_adm_finish(&adm_ctx, info, retcode);
2808 return 0;
2809 }
2810
drbd_adm_resume_sync(struct sk_buff * skb,struct genl_info * info)2811 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2812 {
2813 struct drbd_config_context adm_ctx;
2814 union drbd_dev_state s;
2815 enum drbd_ret_code retcode;
2816
2817 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2818 if (!adm_ctx.reply_skb)
2819 return retcode;
2820 if (retcode != NO_ERROR)
2821 goto out;
2822
2823 mutex_lock(&adm_ctx.resource->adm_mutex);
2824 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2825 s = adm_ctx.device->state;
2826 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2827 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2828 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2829 } else {
2830 retcode = ERR_PAUSE_IS_CLEAR;
2831 }
2832 }
2833 mutex_unlock(&adm_ctx.resource->adm_mutex);
2834 out:
2835 drbd_adm_finish(&adm_ctx, info, retcode);
2836 return 0;
2837 }
2838
drbd_adm_suspend_io(struct sk_buff * skb,struct genl_info * info)2839 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2840 {
2841 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2842 }
2843
drbd_adm_resume_io(struct sk_buff * skb,struct genl_info * info)2844 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2845 {
2846 struct drbd_config_context adm_ctx;
2847 struct drbd_device *device;
2848 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2849
2850 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2851 if (!adm_ctx.reply_skb)
2852 return retcode;
2853 if (retcode != NO_ERROR)
2854 goto out;
2855
2856 mutex_lock(&adm_ctx.resource->adm_mutex);
2857 device = adm_ctx.device;
2858 if (test_bit(NEW_CUR_UUID, &device->flags)) {
2859 drbd_uuid_new_current(device);
2860 clear_bit(NEW_CUR_UUID, &device->flags);
2861 }
2862 drbd_suspend_io(device);
2863 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2864 if (retcode == SS_SUCCESS) {
2865 if (device->state.conn < C_CONNECTED)
2866 tl_clear(first_peer_device(device)->connection);
2867 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
2868 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
2869 }
2870 drbd_resume_io(device);
2871 mutex_unlock(&adm_ctx.resource->adm_mutex);
2872 out:
2873 drbd_adm_finish(&adm_ctx, info, retcode);
2874 return 0;
2875 }
2876
drbd_adm_outdate(struct sk_buff * skb,struct genl_info * info)2877 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2878 {
2879 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2880 }
2881
nla_put_drbd_cfg_context(struct sk_buff * skb,struct drbd_resource * resource,struct drbd_connection * connection,struct drbd_device * device)2882 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
2883 struct drbd_resource *resource,
2884 struct drbd_connection *connection,
2885 struct drbd_device *device)
2886 {
2887 struct nlattr *nla;
2888 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2889 if (!nla)
2890 goto nla_put_failure;
2891 if (device &&
2892 nla_put_u32(skb, T_ctx_volume, device->vnr))
2893 goto nla_put_failure;
2894 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
2895 goto nla_put_failure;
2896 if (connection) {
2897 if (connection->my_addr_len &&
2898 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
2899 goto nla_put_failure;
2900 if (connection->peer_addr_len &&
2901 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
2902 goto nla_put_failure;
2903 }
2904 nla_nest_end(skb, nla);
2905 return 0;
2906
2907 nla_put_failure:
2908 if (nla)
2909 nla_nest_cancel(skb, nla);
2910 return -EMSGSIZE;
2911 }
2912
2913 /*
2914 * Return the connection of @resource if @resource has exactly one connection.
2915 */
the_only_connection(struct drbd_resource * resource)2916 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
2917 {
2918 struct list_head *connections = &resource->connections;
2919
2920 if (list_empty(connections) || connections->next->next != connections)
2921 return NULL;
2922 return list_first_entry(&resource->connections, struct drbd_connection, connections);
2923 }
2924
nla_put_status_info(struct sk_buff * skb,struct drbd_device * device,const struct sib_info * sib)2925 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2926 const struct sib_info *sib)
2927 {
2928 struct drbd_resource *resource = device->resource;
2929 struct state_info *si = NULL; /* for sizeof(si->member); */
2930 struct nlattr *nla;
2931 int got_ldev;
2932 int err = 0;
2933 int exclude_sensitive;
2934
2935 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2936 * to. So we better exclude_sensitive information.
2937 *
2938 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2939 * in the context of the requesting user process. Exclude sensitive
2940 * information, unless current has superuser.
2941 *
2942 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2943 * relies on the current implementation of netlink_dump(), which
2944 * executes the dump callback successively from netlink_recvmsg(),
2945 * always in the context of the receiving process */
2946 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2947
2948 got_ldev = get_ldev(device);
2949
2950 /* We need to add connection name and volume number information still.
2951 * Minor number is in drbd_genlmsghdr. */
2952 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
2953 goto nla_put_failure;
2954
2955 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
2956 goto nla_put_failure;
2957
2958 rcu_read_lock();
2959 if (got_ldev) {
2960 struct disk_conf *disk_conf;
2961
2962 disk_conf = rcu_dereference(device->ldev->disk_conf);
2963 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
2964 }
2965 if (!err) {
2966 struct net_conf *nc;
2967
2968 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2969 if (nc)
2970 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2971 }
2972 rcu_read_unlock();
2973 if (err)
2974 goto nla_put_failure;
2975
2976 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2977 if (!nla)
2978 goto nla_put_failure;
2979 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2980 nla_put_u32(skb, T_current_state, device->state.i) ||
2981 nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
2982 nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
2983 nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
2984 nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
2985 nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
2986 nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
2987 nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
2988 nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
2989 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
2990 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
2991 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
2992 goto nla_put_failure;
2993
2994 if (got_ldev) {
2995 int err;
2996
2997 spin_lock_irq(&device->ldev->md.uuid_lock);
2998 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
2999 spin_unlock_irq(&device->ldev->md.uuid_lock);
3000
3001 if (err)
3002 goto nla_put_failure;
3003
3004 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3005 nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
3006 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
3007 goto nla_put_failure;
3008 if (C_SYNC_SOURCE <= device->state.conn &&
3009 C_PAUSED_SYNC_T >= device->state.conn) {
3010 if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
3011 nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
3012 goto nla_put_failure;
3013 }
3014 }
3015
3016 if (sib) {
3017 switch(sib->sib_reason) {
3018 case SIB_SYNC_PROGRESS:
3019 case SIB_GET_STATUS_REPLY:
3020 break;
3021 case SIB_STATE_CHANGE:
3022 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3023 nla_put_u32(skb, T_new_state, sib->ns.i))
3024 goto nla_put_failure;
3025 break;
3026 case SIB_HELPER_POST:
3027 if (nla_put_u32(skb, T_helper_exit_code,
3028 sib->helper_exit_code))
3029 goto nla_put_failure;
3030 /* fall through */
3031 case SIB_HELPER_PRE:
3032 if (nla_put_string(skb, T_helper, sib->helper_name))
3033 goto nla_put_failure;
3034 break;
3035 }
3036 }
3037 nla_nest_end(skb, nla);
3038
3039 if (0)
3040 nla_put_failure:
3041 err = -EMSGSIZE;
3042 if (got_ldev)
3043 put_ldev(device);
3044 return err;
3045 }
3046
drbd_adm_get_status(struct sk_buff * skb,struct genl_info * info)3047 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3048 {
3049 struct drbd_config_context adm_ctx;
3050 enum drbd_ret_code retcode;
3051 int err;
3052
3053 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3054 if (!adm_ctx.reply_skb)
3055 return retcode;
3056 if (retcode != NO_ERROR)
3057 goto out;
3058
3059 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3060 if (err) {
3061 nlmsg_free(adm_ctx.reply_skb);
3062 return err;
3063 }
3064 out:
3065 drbd_adm_finish(&adm_ctx, info, retcode);
3066 return 0;
3067 }
3068
get_one_status(struct sk_buff * skb,struct netlink_callback * cb)3069 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3070 {
3071 struct drbd_device *device;
3072 struct drbd_genlmsghdr *dh;
3073 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3074 struct drbd_resource *resource = NULL;
3075 struct drbd_resource *tmp;
3076 unsigned volume = cb->args[1];
3077
3078 /* Open coded, deferred, iteration:
3079 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3080 * connection = "first connection of resource or undefined";
3081 * idr_for_each_entry(&resource->devices, device, i) {
3082 * ...
3083 * }
3084 * }
3085 * where resource is cb->args[0];
3086 * and i is cb->args[1];
3087 *
3088 * cb->args[2] indicates if we shall loop over all resources,
3089 * or just dump all volumes of a single resource.
3090 *
3091 * This may miss entries inserted after this dump started,
3092 * or entries deleted before they are reached.
3093 *
3094 * We need to make sure the device won't disappear while
3095 * we are looking at it, and revalidate our iterators
3096 * on each iteration.
3097 */
3098
3099 /* synchronize with conn_create()/drbd_destroy_connection() */
3100 rcu_read_lock();
3101 /* revalidate iterator position */
3102 for_each_resource_rcu(tmp, &drbd_resources) {
3103 if (pos == NULL) {
3104 /* first iteration */
3105 pos = tmp;
3106 resource = pos;
3107 break;
3108 }
3109 if (tmp == pos) {
3110 resource = pos;
3111 break;
3112 }
3113 }
3114 if (resource) {
3115 next_resource:
3116 device = idr_get_next(&resource->devices, &volume);
3117 if (!device) {
3118 /* No more volumes to dump on this resource.
3119 * Advance resource iterator. */
3120 pos = list_entry_rcu(resource->resources.next,
3121 struct drbd_resource, resources);
3122 /* Did we dump any volume of this resource yet? */
3123 if (volume != 0) {
3124 /* If we reached the end of the list,
3125 * or only a single resource dump was requested,
3126 * we are done. */
3127 if (&pos->resources == &drbd_resources || cb->args[2])
3128 goto out;
3129 volume = 0;
3130 resource = pos;
3131 goto next_resource;
3132 }
3133 }
3134
3135 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3136 cb->nlh->nlmsg_seq, &drbd_genl_family,
3137 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3138 if (!dh)
3139 goto out;
3140
3141 if (!device) {
3142 /* This is a connection without a single volume.
3143 * Suprisingly enough, it may have a network
3144 * configuration. */
3145 struct drbd_connection *connection;
3146
3147 dh->minor = -1U;
3148 dh->ret_code = NO_ERROR;
3149 connection = the_only_connection(resource);
3150 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3151 goto cancel;
3152 if (connection) {
3153 struct net_conf *nc;
3154
3155 nc = rcu_dereference(connection->net_conf);
3156 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3157 goto cancel;
3158 }
3159 goto done;
3160 }
3161
3162 D_ASSERT(device, device->vnr == volume);
3163 D_ASSERT(device, device->resource == resource);
3164
3165 dh->minor = device_to_minor(device);
3166 dh->ret_code = NO_ERROR;
3167
3168 if (nla_put_status_info(skb, device, NULL)) {
3169 cancel:
3170 genlmsg_cancel(skb, dh);
3171 goto out;
3172 }
3173 done:
3174 genlmsg_end(skb, dh);
3175 }
3176
3177 out:
3178 rcu_read_unlock();
3179 /* where to start the next iteration */
3180 cb->args[0] = (long)pos;
3181 cb->args[1] = (pos == resource) ? volume + 1 : 0;
3182
3183 /* No more resources/volumes/minors found results in an empty skb.
3184 * Which will terminate the dump. */
3185 return skb->len;
3186 }
3187
3188 /*
3189 * Request status of all resources, or of all volumes within a single resource.
3190 *
3191 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3192 * Which means we cannot use the family->attrbuf or other such members, because
3193 * dump is NOT protected by the genl_lock(). During dump, we only have access
3194 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3195 *
3196 * Once things are setup properly, we call into get_one_status().
3197 */
drbd_adm_get_status_all(struct sk_buff * skb,struct netlink_callback * cb)3198 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3199 {
3200 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3201 struct nlattr *nla;
3202 const char *resource_name;
3203 struct drbd_resource *resource;
3204 int maxtype;
3205
3206 /* Is this a followup call? */
3207 if (cb->args[0]) {
3208 /* ... of a single resource dump,
3209 * and the resource iterator has been advanced already? */
3210 if (cb->args[2] && cb->args[2] != cb->args[0])
3211 return 0; /* DONE. */
3212 goto dump;
3213 }
3214
3215 /* First call (from netlink_dump_start). We need to figure out
3216 * which resource(s) the user wants us to dump. */
3217 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
3218 nlmsg_attrlen(cb->nlh, hdrlen),
3219 DRBD_NLA_CFG_CONTEXT);
3220
3221 /* No explicit context given. Dump all. */
3222 if (!nla)
3223 goto dump;
3224 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3225 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
3226 if (IS_ERR(nla))
3227 return PTR_ERR(nla);
3228 /* context given, but no name present? */
3229 if (!nla)
3230 return -EINVAL;
3231 resource_name = nla_data(nla);
3232 if (!*resource_name)
3233 return -ENODEV;
3234 resource = drbd_find_resource(resource_name);
3235 if (!resource)
3236 return -ENODEV;
3237
3238 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
3239
3240 /* prime iterators, and set "filter" mode mark:
3241 * only dump this connection. */
3242 cb->args[0] = (long)resource;
3243 /* cb->args[1] = 0; passed in this way. */
3244 cb->args[2] = (long)resource;
3245
3246 dump:
3247 return get_one_status(skb, cb);
3248 }
3249
drbd_adm_get_timeout_type(struct sk_buff * skb,struct genl_info * info)3250 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
3251 {
3252 struct drbd_config_context adm_ctx;
3253 enum drbd_ret_code retcode;
3254 struct timeout_parms tp;
3255 int err;
3256
3257 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3258 if (!adm_ctx.reply_skb)
3259 return retcode;
3260 if (retcode != NO_ERROR)
3261 goto out;
3262
3263 tp.timeout_type =
3264 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
3265 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
3266 UT_DEFAULT;
3267
3268 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
3269 if (err) {
3270 nlmsg_free(adm_ctx.reply_skb);
3271 return err;
3272 }
3273 out:
3274 drbd_adm_finish(&adm_ctx, info, retcode);
3275 return 0;
3276 }
3277
drbd_adm_start_ov(struct sk_buff * skb,struct genl_info * info)3278 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
3279 {
3280 struct drbd_config_context adm_ctx;
3281 struct drbd_device *device;
3282 enum drbd_ret_code retcode;
3283 struct start_ov_parms parms;
3284
3285 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3286 if (!adm_ctx.reply_skb)
3287 return retcode;
3288 if (retcode != NO_ERROR)
3289 goto out;
3290
3291 device = adm_ctx.device;
3292
3293 /* resume from last known position, if possible */
3294 parms.ov_start_sector = device->ov_start_sector;
3295 parms.ov_stop_sector = ULLONG_MAX;
3296 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
3297 int err = start_ov_parms_from_attrs(&parms, info);
3298 if (err) {
3299 retcode = ERR_MANDATORY_TAG;
3300 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3301 goto out;
3302 }
3303 }
3304 mutex_lock(&adm_ctx.resource->adm_mutex);
3305
3306 /* w_make_ov_request expects position to be aligned */
3307 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3308 device->ov_stop_sector = parms.ov_stop_sector;
3309
3310 /* If there is still bitmap IO pending, e.g. previous resync or verify
3311 * just being finished, wait for it before requesting a new resync. */
3312 drbd_suspend_io(device);
3313 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3314 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
3315 drbd_resume_io(device);
3316
3317 mutex_unlock(&adm_ctx.resource->adm_mutex);
3318 out:
3319 drbd_adm_finish(&adm_ctx, info, retcode);
3320 return 0;
3321 }
3322
3323
drbd_adm_new_c_uuid(struct sk_buff * skb,struct genl_info * info)3324 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3325 {
3326 struct drbd_config_context adm_ctx;
3327 struct drbd_device *device;
3328 enum drbd_ret_code retcode;
3329 int skip_initial_sync = 0;
3330 int err;
3331 struct new_c_uuid_parms args;
3332
3333 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3334 if (!adm_ctx.reply_skb)
3335 return retcode;
3336 if (retcode != NO_ERROR)
3337 goto out_nolock;
3338
3339 device = adm_ctx.device;
3340 memset(&args, 0, sizeof(args));
3341 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3342 err = new_c_uuid_parms_from_attrs(&args, info);
3343 if (err) {
3344 retcode = ERR_MANDATORY_TAG;
3345 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3346 goto out_nolock;
3347 }
3348 }
3349
3350 mutex_lock(&adm_ctx.resource->adm_mutex);
3351 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
3352
3353 if (!get_ldev(device)) {
3354 retcode = ERR_NO_DISK;
3355 goto out;
3356 }
3357
3358 /* this is "skip initial sync", assume to be clean */
3359 if (device->state.conn == C_CONNECTED &&
3360 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3361 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3362 drbd_info(device, "Preparing to skip initial sync\n");
3363 skip_initial_sync = 1;
3364 } else if (device->state.conn != C_STANDALONE) {
3365 retcode = ERR_CONNECTED;
3366 goto out_dec;
3367 }
3368
3369 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3370 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
3371
3372 if (args.clear_bm) {
3373 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3374 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3375 if (err) {
3376 drbd_err(device, "Writing bitmap failed with %d\n", err);
3377 retcode = ERR_IO_MD_DISK;
3378 }
3379 if (skip_initial_sync) {
3380 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
3381 _drbd_uuid_set(device, UI_BITMAP, 0);
3382 drbd_print_uuids(device, "cleared bitmap UUID");
3383 spin_lock_irq(&device->resource->req_lock);
3384 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3385 CS_VERBOSE, NULL);
3386 spin_unlock_irq(&device->resource->req_lock);
3387 }
3388 }
3389
3390 drbd_md_sync(device);
3391 out_dec:
3392 put_ldev(device);
3393 out:
3394 mutex_unlock(device->state_mutex);
3395 mutex_unlock(&adm_ctx.resource->adm_mutex);
3396 out_nolock:
3397 drbd_adm_finish(&adm_ctx, info, retcode);
3398 return 0;
3399 }
3400
3401 static enum drbd_ret_code
drbd_check_resource_name(struct drbd_config_context * adm_ctx)3402 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
3403 {
3404 const char *name = adm_ctx->resource_name;
3405 if (!name || !name[0]) {
3406 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
3407 return ERR_MANDATORY_TAG;
3408 }
3409 /* if we want to use these in sysfs/configfs/debugfs some day,
3410 * we must not allow slashes */
3411 if (strchr(name, '/')) {
3412 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
3413 return ERR_INVALID_REQUEST;
3414 }
3415 return NO_ERROR;
3416 }
3417
drbd_adm_new_resource(struct sk_buff * skb,struct genl_info * info)3418 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3419 {
3420 struct drbd_config_context adm_ctx;
3421 enum drbd_ret_code retcode;
3422 struct res_opts res_opts;
3423 int err;
3424
3425 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
3426 if (!adm_ctx.reply_skb)
3427 return retcode;
3428 if (retcode != NO_ERROR)
3429 goto out;
3430
3431 set_res_opts_defaults(&res_opts);
3432 err = res_opts_from_attrs(&res_opts, info);
3433 if (err && err != -ENOMSG) {
3434 retcode = ERR_MANDATORY_TAG;
3435 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3436 goto out;
3437 }
3438
3439 retcode = drbd_check_resource_name(&adm_ctx);
3440 if (retcode != NO_ERROR)
3441 goto out;
3442
3443 if (adm_ctx.resource) {
3444 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3445 retcode = ERR_INVALID_REQUEST;
3446 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
3447 }
3448 /* else: still NO_ERROR */
3449 goto out;
3450 }
3451
3452 /* not yet safe for genl_family.parallel_ops */
3453 if (!conn_create(adm_ctx.resource_name, &res_opts))
3454 retcode = ERR_NOMEM;
3455 out:
3456 drbd_adm_finish(&adm_ctx, info, retcode);
3457 return 0;
3458 }
3459
drbd_adm_new_minor(struct sk_buff * skb,struct genl_info * info)3460 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
3461 {
3462 struct drbd_config_context adm_ctx;
3463 struct drbd_genlmsghdr *dh = info->userhdr;
3464 enum drbd_ret_code retcode;
3465
3466 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3467 if (!adm_ctx.reply_skb)
3468 return retcode;
3469 if (retcode != NO_ERROR)
3470 goto out;
3471
3472 if (dh->minor > MINORMASK) {
3473 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
3474 retcode = ERR_INVALID_REQUEST;
3475 goto out;
3476 }
3477 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3478 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
3479 retcode = ERR_INVALID_REQUEST;
3480 goto out;
3481 }
3482
3483 /* drbd_adm_prepare made sure already
3484 * that first_peer_device(device)->connection and device->vnr match the request. */
3485 if (adm_ctx.device) {
3486 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3487 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
3488 /* else: still NO_ERROR */
3489 goto out;
3490 }
3491
3492 mutex_lock(&adm_ctx.resource->adm_mutex);
3493 retcode = drbd_create_device(&adm_ctx, dh->minor);
3494 mutex_unlock(&adm_ctx.resource->adm_mutex);
3495 out:
3496 drbd_adm_finish(&adm_ctx, info, retcode);
3497 return 0;
3498 }
3499
adm_del_minor(struct drbd_device * device)3500 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
3501 {
3502 if (device->state.disk == D_DISKLESS &&
3503 /* no need to be device->state.conn == C_STANDALONE &&
3504 * we may want to delete a minor from a live replication group.
3505 */
3506 device->state.role == R_SECONDARY) {
3507 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
3508 CS_VERBOSE + CS_WAIT_COMPLETE);
3509 drbd_delete_device(device);
3510 return NO_ERROR;
3511 } else
3512 return ERR_MINOR_CONFIGURED;
3513 }
3514
drbd_adm_del_minor(struct sk_buff * skb,struct genl_info * info)3515 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
3516 {
3517 struct drbd_config_context adm_ctx;
3518 enum drbd_ret_code retcode;
3519
3520 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3521 if (!adm_ctx.reply_skb)
3522 return retcode;
3523 if (retcode != NO_ERROR)
3524 goto out;
3525
3526 mutex_lock(&adm_ctx.resource->adm_mutex);
3527 retcode = adm_del_minor(adm_ctx.device);
3528 mutex_unlock(&adm_ctx.resource->adm_mutex);
3529 out:
3530 drbd_adm_finish(&adm_ctx, info, retcode);
3531 return 0;
3532 }
3533
adm_del_resource(struct drbd_resource * resource)3534 static int adm_del_resource(struct drbd_resource *resource)
3535 {
3536 struct drbd_connection *connection;
3537
3538 for_each_connection(connection, resource) {
3539 if (connection->cstate > C_STANDALONE)
3540 return ERR_NET_CONFIGURED;
3541 }
3542 if (!idr_is_empty(&resource->devices))
3543 return ERR_RES_IN_USE;
3544
3545 list_del_rcu(&resource->resources);
3546 /* Make sure all threads have actually stopped: state handling only
3547 * does drbd_thread_stop_nowait(). */
3548 list_for_each_entry(connection, &resource->connections, connections)
3549 drbd_thread_stop(&connection->worker);
3550 synchronize_rcu();
3551 drbd_free_resource(resource);
3552 return NO_ERROR;
3553 }
3554
drbd_adm_down(struct sk_buff * skb,struct genl_info * info)3555 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3556 {
3557 struct drbd_config_context adm_ctx;
3558 struct drbd_resource *resource;
3559 struct drbd_connection *connection;
3560 struct drbd_device *device;
3561 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3562 unsigned i;
3563
3564 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3565 if (!adm_ctx.reply_skb)
3566 return retcode;
3567 if (retcode != NO_ERROR)
3568 goto finish;
3569
3570 resource = adm_ctx.resource;
3571 mutex_lock(&resource->adm_mutex);
3572 /* demote */
3573 for_each_connection(connection, resource) {
3574 struct drbd_peer_device *peer_device;
3575
3576 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
3577 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
3578 if (retcode < SS_SUCCESS) {
3579 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
3580 goto out;
3581 }
3582 }
3583
3584 retcode = conn_try_disconnect(connection, 0);
3585 if (retcode < SS_SUCCESS) {
3586 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
3587 goto out;
3588 }
3589 }
3590
3591 /* detach */
3592 idr_for_each_entry(&resource->devices, device, i) {
3593 retcode = adm_detach(device, 0);
3594 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3595 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
3596 goto out;
3597 }
3598 }
3599
3600 /* delete volumes */
3601 idr_for_each_entry(&resource->devices, device, i) {
3602 retcode = adm_del_minor(device);
3603 if (retcode != NO_ERROR) {
3604 /* "can not happen" */
3605 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
3606 goto out;
3607 }
3608 }
3609
3610 retcode = adm_del_resource(resource);
3611 out:
3612 mutex_unlock(&resource->adm_mutex);
3613 finish:
3614 drbd_adm_finish(&adm_ctx, info, retcode);
3615 return 0;
3616 }
3617
drbd_adm_del_resource(struct sk_buff * skb,struct genl_info * info)3618 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3619 {
3620 struct drbd_config_context adm_ctx;
3621 struct drbd_resource *resource;
3622 enum drbd_ret_code retcode;
3623
3624 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3625 if (!adm_ctx.reply_skb)
3626 return retcode;
3627 if (retcode != NO_ERROR)
3628 goto finish;
3629 resource = adm_ctx.resource;
3630
3631 mutex_lock(&resource->adm_mutex);
3632 retcode = adm_del_resource(resource);
3633 mutex_unlock(&resource->adm_mutex);
3634 finish:
3635 drbd_adm_finish(&adm_ctx, info, retcode);
3636 return 0;
3637 }
3638
drbd_bcast_event(struct drbd_device * device,const struct sib_info * sib)3639 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
3640 {
3641 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3642 struct sk_buff *msg;
3643 struct drbd_genlmsghdr *d_out;
3644 unsigned seq;
3645 int err = -ENOMEM;
3646
3647 seq = atomic_inc_return(&drbd_genl_seq);
3648 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3649 if (!msg)
3650 goto failed;
3651
3652 err = -EMSGSIZE;
3653 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3654 if (!d_out) /* cannot happen, but anyways. */
3655 goto nla_put_failure;
3656 d_out->minor = device_to_minor(device);
3657 d_out->ret_code = NO_ERROR;
3658
3659 if (nla_put_status_info(msg, device, sib))
3660 goto nla_put_failure;
3661 genlmsg_end(msg, d_out);
3662 err = drbd_genl_multicast_events(msg, 0);
3663 /* msg has been consumed or freed in netlink_broadcast() */
3664 if (err && err != -ESRCH)
3665 goto failed;
3666
3667 return;
3668
3669 nla_put_failure:
3670 nlmsg_free(msg);
3671 failed:
3672 drbd_err(device, "Error %d while broadcasting event. "
3673 "Event seq:%u sib_reason:%u\n",
3674 err, seq, sib->sib_reason);
3675 }
3676