1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2013, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/fid/fid_request.c
37 *
38 * Lustre Sequence Manager
39 *
40 * Author: Yury Umanets <umka@clusterfs.com>
41 */
42
43 #define DEBUG_SUBSYSTEM S_FID
44
45 #include "../../include/linux/libcfs/libcfs.h"
46 #include <linux/module.h>
47
48 #include "../include/obd.h"
49 #include "../include/obd_class.h"
50 #include "../include/obd_support.h"
51 #include "../include/lustre_fid.h"
52 /* mdc RPC locks */
53 #include "../include/lustre_mdc.h"
54 #include "fid_internal.h"
55
56 static struct dentry *seq_debugfs_dir;
57
seq_client_rpc(struct lu_client_seq * seq,struct lu_seq_range * output,__u32 opc,const char * opcname)58 static int seq_client_rpc(struct lu_client_seq *seq,
59 struct lu_seq_range *output, __u32 opc,
60 const char *opcname)
61 {
62 struct obd_export *exp = seq->lcs_exp;
63 struct ptlrpc_request *req;
64 struct lu_seq_range *out, *in;
65 __u32 *op;
66 unsigned int debug_mask;
67 int rc;
68
69 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
70 LUSTRE_MDS_VERSION, SEQ_QUERY);
71 if (req == NULL)
72 return -ENOMEM;
73
74 /* Init operation code */
75 op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
76 *op = opc;
77
78 /* Zero out input range, this is not recovery yet. */
79 in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
80 range_init(in);
81
82 ptlrpc_request_set_replen(req);
83
84 in->lsr_index = seq->lcs_space.lsr_index;
85 if (seq->lcs_type == LUSTRE_SEQ_METADATA)
86 fld_range_set_mdt(in);
87 else
88 fld_range_set_ost(in);
89
90 if (opc == SEQ_ALLOC_SUPER) {
91 req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
92 req->rq_reply_portal = MDC_REPLY_PORTAL;
93 /* During allocating super sequence for data object,
94 * the current thread might hold the export of MDT0(MDT0
95 * precreating objects on this OST), and it will send the
96 * request to MDT0 here, so we can not keep resending the
97 * request here, otherwise if MDT0 is failed(umounted),
98 * it can not release the export of MDT0 */
99 if (seq->lcs_type == LUSTRE_SEQ_DATA)
100 req->rq_no_delay = req->rq_no_resend = 1;
101 debug_mask = D_CONSOLE;
102 } else {
103 if (seq->lcs_type == LUSTRE_SEQ_METADATA)
104 req->rq_request_portal = SEQ_METADATA_PORTAL;
105 else
106 req->rq_request_portal = SEQ_DATA_PORTAL;
107 debug_mask = D_INFO;
108 }
109
110 ptlrpc_at_set_req_timeout(req);
111
112 if (seq->lcs_type == LUSTRE_SEQ_METADATA)
113 mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
114 rc = ptlrpc_queue_wait(req);
115 if (seq->lcs_type == LUSTRE_SEQ_METADATA)
116 mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
117 if (rc)
118 goto out_req;
119
120 out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
121 *output = *out;
122
123 if (!range_is_sane(output)) {
124 CERROR("%s: Invalid range received from server: "
125 DRANGE"\n", seq->lcs_name, PRANGE(output));
126 rc = -EINVAL;
127 goto out_req;
128 }
129
130 if (range_is_exhausted(output)) {
131 CERROR("%s: Range received from server is exhausted: "
132 DRANGE"]\n", seq->lcs_name, PRANGE(output));
133 rc = -EINVAL;
134 goto out_req;
135 }
136
137 CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
138 seq->lcs_name, opcname, PRANGE(output));
139
140 out_req:
141 ptlrpc_req_finished(req);
142 return rc;
143 }
144
145 /* Request sequence-controller node to allocate new super-sequence. */
seq_client_alloc_super(struct lu_client_seq * seq,const struct lu_env * env)146 int seq_client_alloc_super(struct lu_client_seq *seq,
147 const struct lu_env *env)
148 {
149 int rc;
150
151 mutex_lock(&seq->lcs_mutex);
152
153 /* Check whether the connection to seq controller has been
154 * setup (lcs_exp != NULL) */
155 if (!seq->lcs_exp) {
156 mutex_unlock(&seq->lcs_mutex);
157 return -EINPROGRESS;
158 }
159
160 rc = seq_client_rpc(seq, &seq->lcs_space,
161 SEQ_ALLOC_SUPER, "super");
162 mutex_unlock(&seq->lcs_mutex);
163 return rc;
164 }
165
166 /* Request sequence-controller node to allocate new meta-sequence. */
seq_client_alloc_meta(const struct lu_env * env,struct lu_client_seq * seq)167 static int seq_client_alloc_meta(const struct lu_env *env,
168 struct lu_client_seq *seq)
169 {
170 int rc;
171
172 do {
173 /* If meta server return -EINPROGRESS or EAGAIN,
174 * it means meta server might not be ready to
175 * allocate super sequence from sequence controller
176 * (MDT0)yet */
177 rc = seq_client_rpc(seq, &seq->lcs_space,
178 SEQ_ALLOC_META, "meta");
179 } while (rc == -EINPROGRESS || rc == -EAGAIN);
180
181 return rc;
182 }
183
184 /* Allocate new sequence for client. */
seq_client_alloc_seq(const struct lu_env * env,struct lu_client_seq * seq,u64 * seqnr)185 static int seq_client_alloc_seq(const struct lu_env *env,
186 struct lu_client_seq *seq, u64 *seqnr)
187 {
188 int rc;
189
190 LASSERT(range_is_sane(&seq->lcs_space));
191
192 if (range_is_exhausted(&seq->lcs_space)) {
193 rc = seq_client_alloc_meta(env, seq);
194 if (rc) {
195 CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
196 seq->lcs_name, rc);
197 return rc;
198 }
199 CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
200 seq->lcs_name, PRANGE(&seq->lcs_space));
201 } else {
202 rc = 0;
203 }
204
205 LASSERT(!range_is_exhausted(&seq->lcs_space));
206 *seqnr = seq->lcs_space.lsr_start;
207 seq->lcs_space.lsr_start += 1;
208
209 CDEBUG(D_INFO, "%s: Allocated sequence [%#llx]\n", seq->lcs_name,
210 *seqnr);
211
212 return rc;
213 }
214
seq_fid_alloc_prep(struct lu_client_seq * seq,wait_queue_t * link)215 static int seq_fid_alloc_prep(struct lu_client_seq *seq,
216 wait_queue_t *link)
217 {
218 if (seq->lcs_update) {
219 add_wait_queue(&seq->lcs_waitq, link);
220 set_current_state(TASK_UNINTERRUPTIBLE);
221 mutex_unlock(&seq->lcs_mutex);
222
223 schedule();
224
225 mutex_lock(&seq->lcs_mutex);
226 remove_wait_queue(&seq->lcs_waitq, link);
227 set_current_state(TASK_RUNNING);
228 return -EAGAIN;
229 }
230 ++seq->lcs_update;
231 mutex_unlock(&seq->lcs_mutex);
232 return 0;
233 }
234
seq_fid_alloc_fini(struct lu_client_seq * seq)235 static void seq_fid_alloc_fini(struct lu_client_seq *seq)
236 {
237 LASSERT(seq->lcs_update == 1);
238 mutex_lock(&seq->lcs_mutex);
239 --seq->lcs_update;
240 wake_up(&seq->lcs_waitq);
241 }
242
243 /* Allocate new fid on passed client @seq and save it to @fid. */
seq_client_alloc_fid(const struct lu_env * env,struct lu_client_seq * seq,struct lu_fid * fid)244 int seq_client_alloc_fid(const struct lu_env *env,
245 struct lu_client_seq *seq, struct lu_fid *fid)
246 {
247 wait_queue_t link;
248 int rc;
249
250 LASSERT(seq != NULL);
251 LASSERT(fid != NULL);
252
253 init_waitqueue_entry(&link, current);
254 mutex_lock(&seq->lcs_mutex);
255
256 if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
257 seq->lcs_fid.f_oid = seq->lcs_width;
258
259 while (1) {
260 u64 seqnr;
261
262 if (!fid_is_zero(&seq->lcs_fid) &&
263 fid_oid(&seq->lcs_fid) < seq->lcs_width) {
264 /* Just bump last allocated fid and return to caller. */
265 seq->lcs_fid.f_oid += 1;
266 rc = 0;
267 break;
268 }
269
270 rc = seq_fid_alloc_prep(seq, &link);
271 if (rc)
272 continue;
273
274 rc = seq_client_alloc_seq(env, seq, &seqnr);
275 if (rc) {
276 CERROR("%s: Can't allocate new sequence, rc %d\n",
277 seq->lcs_name, rc);
278 seq_fid_alloc_fini(seq);
279 mutex_unlock(&seq->lcs_mutex);
280 return rc;
281 }
282
283 CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16Lx]\n",
284 seq->lcs_name, seqnr);
285
286 seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
287 seq->lcs_fid.f_seq = seqnr;
288 seq->lcs_fid.f_ver = 0;
289
290 /*
291 * Inform caller that sequence switch is performed to allow it
292 * to setup FLD for it.
293 */
294 rc = 1;
295
296 seq_fid_alloc_fini(seq);
297 break;
298 }
299
300 *fid = seq->lcs_fid;
301 mutex_unlock(&seq->lcs_mutex);
302
303 CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
304 return rc;
305 }
306 EXPORT_SYMBOL(seq_client_alloc_fid);
307
308 /*
309 * Finish the current sequence due to disconnect.
310 * See mdc_import_event()
311 */
seq_client_flush(struct lu_client_seq * seq)312 void seq_client_flush(struct lu_client_seq *seq)
313 {
314 wait_queue_t link;
315
316 LASSERT(seq != NULL);
317 init_waitqueue_entry(&link, current);
318 mutex_lock(&seq->lcs_mutex);
319
320 while (seq->lcs_update) {
321 add_wait_queue(&seq->lcs_waitq, &link);
322 set_current_state(TASK_UNINTERRUPTIBLE);
323 mutex_unlock(&seq->lcs_mutex);
324
325 schedule();
326
327 mutex_lock(&seq->lcs_mutex);
328 remove_wait_queue(&seq->lcs_waitq, &link);
329 set_current_state(TASK_RUNNING);
330 }
331
332 fid_zero(&seq->lcs_fid);
333 /**
334 * this id shld not be used for seq range allocation.
335 * set to -1 for dgb check.
336 */
337
338 seq->lcs_space.lsr_index = -1;
339
340 range_init(&seq->lcs_space);
341 mutex_unlock(&seq->lcs_mutex);
342 }
343 EXPORT_SYMBOL(seq_client_flush);
344
seq_client_debugfs_fini(struct lu_client_seq * seq)345 static void seq_client_debugfs_fini(struct lu_client_seq *seq)
346 {
347 if (!IS_ERR_OR_NULL(seq->lcs_debugfs_entry))
348 ldebugfs_remove(&seq->lcs_debugfs_entry);
349 }
350
seq_client_debugfs_init(struct lu_client_seq * seq)351 static int seq_client_debugfs_init(struct lu_client_seq *seq)
352 {
353 int rc;
354
355 seq->lcs_debugfs_entry = ldebugfs_register(seq->lcs_name,
356 seq_debugfs_dir,
357 NULL, NULL);
358
359 if (IS_ERR_OR_NULL(seq->lcs_debugfs_entry)) {
360 CERROR("%s: LdebugFS failed in seq-init\n", seq->lcs_name);
361 rc = seq->lcs_debugfs_entry ? PTR_ERR(seq->lcs_debugfs_entry)
362 : -ENOMEM;
363 seq->lcs_debugfs_entry = NULL;
364 return rc;
365 }
366
367 rc = ldebugfs_add_vars(seq->lcs_debugfs_entry,
368 seq_client_debugfs_list, seq);
369 if (rc) {
370 CERROR("%s: Can't init sequence manager debugfs, rc %d\n",
371 seq->lcs_name, rc);
372 goto out_cleanup;
373 }
374
375 return 0;
376
377 out_cleanup:
378 seq_client_debugfs_fini(seq);
379 return rc;
380 }
381
seq_client_fini(struct lu_client_seq * seq)382 static void seq_client_fini(struct lu_client_seq *seq)
383 {
384 seq_client_debugfs_fini(seq);
385
386 if (seq->lcs_exp) {
387 class_export_put(seq->lcs_exp);
388 seq->lcs_exp = NULL;
389 }
390 }
391
seq_client_init(struct lu_client_seq * seq,struct obd_export * exp,enum lu_cli_type type,const char * prefix)392 static int seq_client_init(struct lu_client_seq *seq,
393 struct obd_export *exp,
394 enum lu_cli_type type,
395 const char *prefix)
396 {
397 int rc;
398
399 LASSERT(seq != NULL);
400 LASSERT(prefix != NULL);
401
402 seq->lcs_type = type;
403
404 mutex_init(&seq->lcs_mutex);
405 if (type == LUSTRE_SEQ_METADATA)
406 seq->lcs_width = LUSTRE_METADATA_SEQ_MAX_WIDTH;
407 else
408 seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
409
410 init_waitqueue_head(&seq->lcs_waitq);
411 /* Make sure that things are clear before work is started. */
412 seq_client_flush(seq);
413
414 seq->lcs_exp = class_export_get(exp);
415
416 snprintf(seq->lcs_name, sizeof(seq->lcs_name),
417 "cli-%s", prefix);
418
419 rc = seq_client_debugfs_init(seq);
420 if (rc)
421 seq_client_fini(seq);
422 return rc;
423 }
424
client_fid_init(struct obd_device * obd,struct obd_export * exp,enum lu_cli_type type)425 int client_fid_init(struct obd_device *obd,
426 struct obd_export *exp, enum lu_cli_type type)
427 {
428 struct client_obd *cli = &obd->u.cli;
429 char *prefix;
430 int rc;
431
432 cli->cl_seq = kzalloc(sizeof(*cli->cl_seq), GFP_NOFS);
433 if (!cli->cl_seq)
434 return -ENOMEM;
435
436 prefix = kzalloc(MAX_OBD_NAME + 5, GFP_NOFS);
437 if (!prefix) {
438 rc = -ENOMEM;
439 goto out_free_seq;
440 }
441
442 snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name);
443
444 /* Init client side sequence-manager */
445 rc = seq_client_init(cli->cl_seq, exp, type, prefix);
446 kfree(prefix);
447 if (rc)
448 goto out_free_seq;
449
450 return rc;
451 out_free_seq:
452 kfree(cli->cl_seq);
453 cli->cl_seq = NULL;
454 return rc;
455 }
456 EXPORT_SYMBOL(client_fid_init);
457
client_fid_fini(struct obd_device * obd)458 int client_fid_fini(struct obd_device *obd)
459 {
460 struct client_obd *cli = &obd->u.cli;
461
462 if (cli->cl_seq != NULL) {
463 seq_client_fini(cli->cl_seq);
464 kfree(cli->cl_seq);
465 cli->cl_seq = NULL;
466 }
467
468 return 0;
469 }
470 EXPORT_SYMBOL(client_fid_fini);
471
fid_mod_init(void)472 static int __init fid_mod_init(void)
473 {
474 seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME,
475 debugfs_lustre_root,
476 NULL, NULL);
477 return PTR_ERR_OR_ZERO(seq_debugfs_dir);
478 }
479
fid_mod_exit(void)480 static void __exit fid_mod_exit(void)
481 {
482 if (!IS_ERR_OR_NULL(seq_debugfs_dir))
483 ldebugfs_remove(&seq_debugfs_dir);
484 }
485
486 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
487 MODULE_DESCRIPTION("Lustre FID Module");
488 MODULE_LICENSE("GPL");
489 MODULE_VERSION("0.1.0");
490
491 module_init(fid_mod_init);
492 module_exit(fid_mod_exit);
493