• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/tracefile.c
37  *
38  * Author: Zach Brown <zab@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41 
42 #define DEBUG_SUBSYSTEM S_LNET
43 #define LUSTRE_TRACEFILE_PRIVATE
44 #include "tracefile.h"
45 
46 #include "../../include/linux/libcfs/libcfs.h"
47 
48 /* XXX move things up to the top, comment */
49 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
50 
51 char cfs_tracefile[TRACEFILE_NAME_SIZE];
52 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
53 static struct tracefiled_ctl trace_tctl;
54 static DEFINE_MUTEX(cfs_trace_thread_mutex);
55 static int thread_running;
56 
57 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
58 
59 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
60 					 struct cfs_trace_cpu_data *tcd);
61 
62 static inline struct cfs_trace_page *
cfs_tage_from_list(struct list_head * list)63 cfs_tage_from_list(struct list_head *list)
64 {
65 	return list_entry(list, struct cfs_trace_page, linkage);
66 }
67 
cfs_tage_alloc(gfp_t gfp)68 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
69 {
70 	struct page	    *page;
71 	struct cfs_trace_page *tage;
72 
73 	/* My caller is trying to free memory */
74 	if (!in_interrupt() && memory_pressure_get())
75 		return NULL;
76 
77 	/*
78 	 * Don't spam console with allocation failures: they will be reported
79 	 * by upper layer anyway.
80 	 */
81 	gfp |= __GFP_NOWARN;
82 	page = alloc_page(gfp);
83 	if (page == NULL)
84 		return NULL;
85 
86 	tage = kmalloc(sizeof(*tage), gfp);
87 	if (tage == NULL) {
88 		__free_page(page);
89 		return NULL;
90 	}
91 
92 	tage->page = page;
93 	atomic_inc(&cfs_tage_allocated);
94 	return tage;
95 }
96 
cfs_tage_free(struct cfs_trace_page * tage)97 static void cfs_tage_free(struct cfs_trace_page *tage)
98 {
99 	__LASSERT(tage != NULL);
100 	__LASSERT(tage->page != NULL);
101 
102 	__free_page(tage->page);
103 	kfree(tage);
104 	atomic_dec(&cfs_tage_allocated);
105 }
106 
cfs_tage_to_tail(struct cfs_trace_page * tage,struct list_head * queue)107 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
108 			     struct list_head *queue)
109 {
110 	__LASSERT(tage != NULL);
111 	__LASSERT(queue != NULL);
112 
113 	list_move_tail(&tage->linkage, queue);
114 }
115 
cfs_trace_refill_stock(struct cfs_trace_cpu_data * tcd,gfp_t gfp,struct list_head * stock)116 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
117 			   struct list_head *stock)
118 {
119 	int i;
120 
121 	/*
122 	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
123 	 * from here: this will lead to infinite recursion.
124 	 */
125 
126 	for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) {
127 		struct cfs_trace_page *tage;
128 
129 		tage = cfs_tage_alloc(gfp);
130 		if (tage == NULL)
131 			break;
132 		list_add_tail(&tage->linkage, stock);
133 	}
134 	return i;
135 }
136 
137 /* return a page that has 'len' bytes left at the end */
138 static struct cfs_trace_page *
cfs_trace_get_tage_try(struct cfs_trace_cpu_data * tcd,unsigned long len)139 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
140 {
141 	struct cfs_trace_page *tage;
142 
143 	if (tcd->tcd_cur_pages > 0) {
144 		__LASSERT(!list_empty(&tcd->tcd_pages));
145 		tage = cfs_tage_from_list(tcd->tcd_pages.prev);
146 		if (tage->used + len <= PAGE_CACHE_SIZE)
147 			return tage;
148 	}
149 
150 	if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
151 		if (tcd->tcd_cur_stock_pages > 0) {
152 			tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
153 			--tcd->tcd_cur_stock_pages;
154 			list_del_init(&tage->linkage);
155 		} else {
156 			tage = cfs_tage_alloc(GFP_ATOMIC);
157 			if (unlikely(tage == NULL)) {
158 				if ((!memory_pressure_get() ||
159 				     in_interrupt()) && printk_ratelimit())
160 					printk(KERN_WARNING
161 					       "cannot allocate a tage (%ld)\n",
162 					       tcd->tcd_cur_pages);
163 				return NULL;
164 			}
165 		}
166 
167 		tage->used = 0;
168 		tage->cpu = smp_processor_id();
169 		tage->type = tcd->tcd_type;
170 		list_add_tail(&tage->linkage, &tcd->tcd_pages);
171 		tcd->tcd_cur_pages++;
172 
173 		if (tcd->tcd_cur_pages > 8 && thread_running) {
174 			struct tracefiled_ctl *tctl = &trace_tctl;
175 			/*
176 			 * wake up tracefiled to process some pages.
177 			 */
178 			wake_up(&tctl->tctl_waitq);
179 		}
180 		return tage;
181 	}
182 	return NULL;
183 }
184 
cfs_tcd_shrink(struct cfs_trace_cpu_data * tcd)185 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
186 {
187 	int pgcount = tcd->tcd_cur_pages / 10;
188 	struct page_collection pc;
189 	struct cfs_trace_page *tage;
190 	struct cfs_trace_page *tmp;
191 
192 	/*
193 	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
194 	 * from here: this will lead to infinite recursion.
195 	 */
196 
197 	if (printk_ratelimit())
198 		printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
199 		       pgcount + 1, tcd->tcd_cur_pages);
200 
201 	INIT_LIST_HEAD(&pc.pc_pages);
202 	spin_lock_init(&pc.pc_lock);
203 
204 	list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
205 		if (pgcount-- == 0)
206 			break;
207 
208 		list_move_tail(&tage->linkage, &pc.pc_pages);
209 		tcd->tcd_cur_pages--;
210 	}
211 	put_pages_on_tcd_daemon_list(&pc, tcd);
212 }
213 
214 /* return a page that has 'len' bytes left at the end */
cfs_trace_get_tage(struct cfs_trace_cpu_data * tcd,unsigned long len)215 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
216 						 unsigned long len)
217 {
218 	struct cfs_trace_page *tage;
219 
220 	/*
221 	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
222 	 * from here: this will lead to infinite recursion.
223 	 */
224 
225 	if (len > PAGE_CACHE_SIZE) {
226 		pr_err("cowardly refusing to write %lu bytes in a page\n", len);
227 		return NULL;
228 	}
229 
230 	tage = cfs_trace_get_tage_try(tcd, len);
231 	if (tage != NULL)
232 		return tage;
233 	if (thread_running)
234 		cfs_tcd_shrink(tcd);
235 	if (tcd->tcd_cur_pages > 0) {
236 		tage = cfs_tage_from_list(tcd->tcd_pages.next);
237 		tage->used = 0;
238 		cfs_tage_to_tail(tage, &tcd->tcd_pages);
239 	}
240 	return tage;
241 }
242 
libcfs_debug_msg(struct libcfs_debug_msg_data * msgdata,const char * format,...)243 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
244 		     const char *format, ...)
245 {
246 	va_list args;
247 	int     rc;
248 
249 	va_start(args, format);
250 	rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
251 	va_end(args);
252 
253 	return rc;
254 }
255 EXPORT_SYMBOL(libcfs_debug_msg);
256 
libcfs_debug_vmsg2(struct libcfs_debug_msg_data * msgdata,const char * format1,va_list args,const char * format2,...)257 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
258 		       const char *format1, va_list args,
259 		       const char *format2, ...)
260 {
261 	struct cfs_trace_cpu_data *tcd = NULL;
262 	struct ptldebug_header     header = {0};
263 	struct cfs_trace_page     *tage;
264 	/* string_buf is used only if tcd != NULL, and is always set then */
265 	char		      *string_buf = NULL;
266 	char		      *debug_buf;
267 	int			known_size;
268 	int			needed = 85; /* average message length */
269 	int			max_nob;
270 	va_list		    ap;
271 	int			depth;
272 	int			i;
273 	int			remain;
274 	int			mask = msgdata->msg_mask;
275 	const char		*file = kbasename(msgdata->msg_file);
276 	struct cfs_debug_limit_state   *cdls = msgdata->msg_cdls;
277 
278 	tcd = cfs_trace_get_tcd();
279 
280 	/* cfs_trace_get_tcd() grabs a lock, which disables preemption and
281 	 * pins us to a particular CPU.  This avoids an smp_processor_id()
282 	 * warning on Linux when debugging is enabled. */
283 	cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
284 
285 	if (tcd == NULL)		/* arch may not log in IRQ context */
286 		goto console;
287 
288 	if (tcd->tcd_cur_pages == 0)
289 		header.ph_flags |= PH_FLAG_FIRST_RECORD;
290 
291 	if (tcd->tcd_shutting_down) {
292 		cfs_trace_put_tcd(tcd);
293 		tcd = NULL;
294 		goto console;
295 	}
296 
297 	depth = __current_nesting_level();
298 	known_size = strlen(file) + 1 + depth;
299 	if (msgdata->msg_fn)
300 		known_size += strlen(msgdata->msg_fn) + 1;
301 
302 	if (libcfs_debug_binary)
303 		known_size += sizeof(header);
304 
305 	/*/
306 	 * '2' used because vsnprintf return real size required for output
307 	 * _without_ terminating NULL.
308 	 * if needed is to small for this format.
309 	 */
310 	for (i = 0; i < 2; i++) {
311 		tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
312 		if (tage == NULL) {
313 			if (needed + known_size > PAGE_CACHE_SIZE)
314 				mask |= D_ERROR;
315 
316 			cfs_trace_put_tcd(tcd);
317 			tcd = NULL;
318 			goto console;
319 		}
320 
321 		string_buf = (char *)page_address(tage->page) +
322 					tage->used + known_size;
323 
324 		max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
325 		if (max_nob <= 0) {
326 			printk(KERN_EMERG "negative max_nob: %d\n",
327 			       max_nob);
328 			mask |= D_ERROR;
329 			cfs_trace_put_tcd(tcd);
330 			tcd = NULL;
331 			goto console;
332 		}
333 
334 		needed = 0;
335 		if (format1) {
336 			va_copy(ap, args);
337 			needed = vsnprintf(string_buf, max_nob, format1, ap);
338 			va_end(ap);
339 		}
340 
341 		if (format2) {
342 			remain = max_nob - needed;
343 			if (remain < 0)
344 				remain = 0;
345 
346 			va_start(ap, format2);
347 			needed += vsnprintf(string_buf + needed, remain,
348 					    format2, ap);
349 			va_end(ap);
350 		}
351 
352 		if (needed < max_nob) /* well. printing ok.. */
353 			break;
354 	}
355 
356 	if (*(string_buf+needed-1) != '\n')
357 		printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
358 		       file, msgdata->msg_line, msgdata->msg_fn);
359 
360 	header.ph_len = known_size + needed;
361 	debug_buf = (char *)page_address(tage->page) + tage->used;
362 
363 	if (libcfs_debug_binary) {
364 		memcpy(debug_buf, &header, sizeof(header));
365 		tage->used += sizeof(header);
366 		debug_buf += sizeof(header);
367 	}
368 
369 	/* indent message according to the nesting level */
370 	while (depth-- > 0) {
371 		*(debug_buf++) = '.';
372 		++tage->used;
373 	}
374 
375 	strcpy(debug_buf, file);
376 	tage->used += strlen(file) + 1;
377 	debug_buf += strlen(file) + 1;
378 
379 	if (msgdata->msg_fn) {
380 		strcpy(debug_buf, msgdata->msg_fn);
381 		tage->used += strlen(msgdata->msg_fn) + 1;
382 		debug_buf += strlen(msgdata->msg_fn) + 1;
383 	}
384 
385 	__LASSERT(debug_buf == string_buf);
386 
387 	tage->used += needed;
388 	__LASSERT (tage->used <= PAGE_CACHE_SIZE);
389 
390 console:
391 	if ((mask & libcfs_printk) == 0) {
392 		/* no console output requested */
393 		if (tcd != NULL)
394 			cfs_trace_put_tcd(tcd);
395 		return 1;
396 	}
397 
398 	if (cdls != NULL) {
399 		if (libcfs_console_ratelimit &&
400 		    cdls->cdls_next != 0 &&     /* not first time ever */
401 		    !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
402 			/* skipping a console message */
403 			cdls->cdls_count++;
404 			if (tcd != NULL)
405 				cfs_trace_put_tcd(tcd);
406 			return 1;
407 		}
408 
409 		if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
410 						       libcfs_console_max_delay
411 						       + cfs_time_seconds(10))) {
412 			/* last timeout was a long time ago */
413 			cdls->cdls_delay /= libcfs_console_backoff * 4;
414 		} else {
415 			cdls->cdls_delay *= libcfs_console_backoff;
416 		}
417 
418 		if (cdls->cdls_delay < libcfs_console_min_delay)
419 			cdls->cdls_delay = libcfs_console_min_delay;
420 		else if (cdls->cdls_delay > libcfs_console_max_delay)
421 			cdls->cdls_delay = libcfs_console_max_delay;
422 
423 		/* ensure cdls_next is never zero after it's been seen */
424 		cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
425 	}
426 
427 	if (tcd != NULL) {
428 		cfs_print_to_console(&header, mask, string_buf, needed, file,
429 				     msgdata->msg_fn);
430 		cfs_trace_put_tcd(tcd);
431 	} else {
432 		string_buf = cfs_trace_get_console_buffer();
433 
434 		needed = 0;
435 		if (format1 != NULL) {
436 			va_copy(ap, args);
437 			needed = vsnprintf(string_buf,
438 					   CFS_TRACE_CONSOLE_BUFFER_SIZE,
439 					   format1, ap);
440 			va_end(ap);
441 		}
442 		if (format2 != NULL) {
443 			remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
444 			if (remain > 0) {
445 				va_start(ap, format2);
446 				needed += vsnprintf(string_buf+needed, remain,
447 						    format2, ap);
448 				va_end(ap);
449 			}
450 		}
451 		cfs_print_to_console(&header, mask,
452 				     string_buf, needed, file, msgdata->msg_fn);
453 
454 		cfs_trace_put_console_buffer(string_buf);
455 	}
456 
457 	if (cdls != NULL && cdls->cdls_count != 0) {
458 		string_buf = cfs_trace_get_console_buffer();
459 
460 		needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
461 				  "Skipped %d previous similar message%s\n",
462 				  cdls->cdls_count,
463 				  (cdls->cdls_count > 1) ? "s" : "");
464 
465 		cfs_print_to_console(&header, mask,
466 				     string_buf, needed, file, msgdata->msg_fn);
467 
468 		cfs_trace_put_console_buffer(string_buf);
469 		cdls->cdls_count = 0;
470 	}
471 
472 	return 0;
473 }
474 EXPORT_SYMBOL(libcfs_debug_vmsg2);
475 
476 void
cfs_trace_assertion_failed(const char * str,struct libcfs_debug_msg_data * msgdata)477 cfs_trace_assertion_failed(const char *str,
478 			   struct libcfs_debug_msg_data *msgdata)
479 {
480 	struct ptldebug_header hdr;
481 
482 	libcfs_panic_in_progress = 1;
483 	libcfs_catastrophe = 1;
484 	mb();
485 
486 	cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
487 
488 	cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
489 			     msgdata->msg_file, msgdata->msg_fn);
490 
491 	panic("Lustre debug assertion failure\n");
492 
493 	/* not reached */
494 }
495 
496 static void
panic_collect_pages(struct page_collection * pc)497 panic_collect_pages(struct page_collection *pc)
498 {
499 	/* Do the collect_pages job on a single CPU: assumes that all other
500 	 * CPUs have been stopped during a panic.  If this isn't true for some
501 	 * arch, this will have to be implemented separately in each arch.  */
502 	int			i;
503 	int			j;
504 	struct cfs_trace_cpu_data *tcd;
505 
506 	INIT_LIST_HEAD(&pc->pc_pages);
507 
508 	cfs_tcd_for_each(tcd, i, j) {
509 		list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
510 		tcd->tcd_cur_pages = 0;
511 
512 		if (pc->pc_want_daemon_pages) {
513 			list_splice_init(&tcd->tcd_daemon_pages,
514 					     &pc->pc_pages);
515 			tcd->tcd_cur_daemon_pages = 0;
516 		}
517 	}
518 }
519 
collect_pages_on_all_cpus(struct page_collection * pc)520 static void collect_pages_on_all_cpus(struct page_collection *pc)
521 {
522 	struct cfs_trace_cpu_data *tcd;
523 	int i, cpu;
524 
525 	spin_lock(&pc->pc_lock);
526 	for_each_possible_cpu(cpu) {
527 		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
528 			list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
529 			tcd->tcd_cur_pages = 0;
530 			if (pc->pc_want_daemon_pages) {
531 				list_splice_init(&tcd->tcd_daemon_pages,
532 						     &pc->pc_pages);
533 				tcd->tcd_cur_daemon_pages = 0;
534 			}
535 		}
536 	}
537 	spin_unlock(&pc->pc_lock);
538 }
539 
collect_pages(struct page_collection * pc)540 static void collect_pages(struct page_collection *pc)
541 {
542 	INIT_LIST_HEAD(&pc->pc_pages);
543 
544 	if (libcfs_panic_in_progress)
545 		panic_collect_pages(pc);
546 	else
547 		collect_pages_on_all_cpus(pc);
548 }
549 
put_pages_back_on_all_cpus(struct page_collection * pc)550 static void put_pages_back_on_all_cpus(struct page_collection *pc)
551 {
552 	struct cfs_trace_cpu_data *tcd;
553 	struct list_head *cur_head;
554 	struct cfs_trace_page *tage;
555 	struct cfs_trace_page *tmp;
556 	int i, cpu;
557 
558 	spin_lock(&pc->pc_lock);
559 	for_each_possible_cpu(cpu) {
560 		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
561 			cur_head = tcd->tcd_pages.next;
562 
563 			list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
564 						 linkage) {
565 
566 				__LASSERT_TAGE_INVARIANT(tage);
567 
568 				if (tage->cpu != cpu || tage->type != i)
569 					continue;
570 
571 				cfs_tage_to_tail(tage, cur_head);
572 				tcd->tcd_cur_pages++;
573 			}
574 		}
575 	}
576 	spin_unlock(&pc->pc_lock);
577 }
578 
put_pages_back(struct page_collection * pc)579 static void put_pages_back(struct page_collection *pc)
580 {
581 	if (!libcfs_panic_in_progress)
582 		put_pages_back_on_all_cpus(pc);
583 }
584 
585 /* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
586  * we have a good amount of data at all times for dumping during an LBUG, even
587  * if we have been steadily writing (and otherwise discarding) pages via the
588  * debug daemon. */
put_pages_on_tcd_daemon_list(struct page_collection * pc,struct cfs_trace_cpu_data * tcd)589 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
590 					 struct cfs_trace_cpu_data *tcd)
591 {
592 	struct cfs_trace_page *tage;
593 	struct cfs_trace_page *tmp;
594 
595 	spin_lock(&pc->pc_lock);
596 	list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
597 
598 		__LASSERT_TAGE_INVARIANT(tage);
599 
600 		if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
601 			continue;
602 
603 		cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
604 		tcd->tcd_cur_daemon_pages++;
605 
606 		if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
607 			struct cfs_trace_page *victim;
608 
609 			__LASSERT(!list_empty(&tcd->tcd_daemon_pages));
610 			victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
611 
612 			__LASSERT_TAGE_INVARIANT(victim);
613 
614 			list_del(&victim->linkage);
615 			cfs_tage_free(victim);
616 			tcd->tcd_cur_daemon_pages--;
617 		}
618 	}
619 	spin_unlock(&pc->pc_lock);
620 }
621 
put_pages_on_daemon_list(struct page_collection * pc)622 static void put_pages_on_daemon_list(struct page_collection *pc)
623 {
624 	struct cfs_trace_cpu_data *tcd;
625 	int i, cpu;
626 
627 	for_each_possible_cpu(cpu) {
628 		cfs_tcd_for_each_type_lock(tcd, i, cpu)
629 			put_pages_on_tcd_daemon_list(pc, tcd);
630 	}
631 }
632 
cfs_trace_debug_print(void)633 void cfs_trace_debug_print(void)
634 {
635 	struct page_collection pc;
636 	struct cfs_trace_page *tage;
637 	struct cfs_trace_page *tmp;
638 
639 	spin_lock_init(&pc.pc_lock);
640 
641 	pc.pc_want_daemon_pages = 1;
642 	collect_pages(&pc);
643 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
644 		char *p, *file, *fn;
645 		struct page *page;
646 
647 		__LASSERT_TAGE_INVARIANT(tage);
648 
649 		page = tage->page;
650 		p = page_address(page);
651 		while (p < ((char *)page_address(page) + tage->used)) {
652 			struct ptldebug_header *hdr;
653 			int len;
654 
655 			hdr = (void *)p;
656 			p += sizeof(*hdr);
657 			file = p;
658 			p += strlen(file) + 1;
659 			fn = p;
660 			p += strlen(fn) + 1;
661 			len = hdr->ph_len - (int)(p - (char *)hdr);
662 
663 			cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
664 
665 			p += len;
666 		}
667 
668 		list_del(&tage->linkage);
669 		cfs_tage_free(tage);
670 	}
671 }
672 
cfs_tracefile_dump_all_pages(char * filename)673 int cfs_tracefile_dump_all_pages(char *filename)
674 {
675 	struct page_collection	pc;
676 	struct file		*filp;
677 	struct cfs_trace_page	*tage;
678 	struct cfs_trace_page	*tmp;
679 	char			*buf;
680 	int rc;
681 
682 	DECL_MMSPACE;
683 
684 	cfs_tracefile_write_lock();
685 
686 	filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
687 	if (IS_ERR(filp)) {
688 		rc = PTR_ERR(filp);
689 		filp = NULL;
690 		pr_err("LustreError: can't open %s for dump: rc %d\n",
691 			filename, rc);
692 		goto out;
693 	}
694 
695 	spin_lock_init(&pc.pc_lock);
696 	pc.pc_want_daemon_pages = 1;
697 	collect_pages(&pc);
698 	if (list_empty(&pc.pc_pages)) {
699 		rc = 0;
700 		goto close;
701 	}
702 
703 	/* ok, for now, just write the pages.  in the future we'll be building
704 	 * iobufs with the pages and calling generic_direct_IO */
705 	MMSPACE_OPEN;
706 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
707 
708 		__LASSERT_TAGE_INVARIANT(tage);
709 
710 		buf = kmap(tage->page);
711 		rc = vfs_write(filp, (__force const char __user *)buf,
712 			       tage->used, &filp->f_pos);
713 		kunmap(tage->page);
714 
715 		if (rc != (int)tage->used) {
716 			printk(KERN_WARNING "wanted to write %u but wrote %d\n",
717 			       tage->used, rc);
718 			put_pages_back(&pc);
719 			__LASSERT(list_empty(&pc.pc_pages));
720 			break;
721 		}
722 		list_del(&tage->linkage);
723 		cfs_tage_free(tage);
724 	}
725 	MMSPACE_CLOSE;
726 	rc = vfs_fsync(filp, 1);
727 	if (rc)
728 		pr_err("sync returns %d\n", rc);
729 close:
730 	filp_close(filp, NULL);
731 out:
732 	cfs_tracefile_write_unlock();
733 	return rc;
734 }
735 
cfs_trace_flush_pages(void)736 void cfs_trace_flush_pages(void)
737 {
738 	struct page_collection pc;
739 	struct cfs_trace_page *tage;
740 	struct cfs_trace_page *tmp;
741 
742 	spin_lock_init(&pc.pc_lock);
743 
744 	pc.pc_want_daemon_pages = 1;
745 	collect_pages(&pc);
746 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
747 
748 		__LASSERT_TAGE_INVARIANT(tage);
749 
750 		list_del(&tage->linkage);
751 		cfs_tage_free(tage);
752 	}
753 }
754 
cfs_trace_copyin_string(char * knl_buffer,int knl_buffer_nob,const char __user * usr_buffer,int usr_buffer_nob)755 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
756 			    const char __user *usr_buffer, int usr_buffer_nob)
757 {
758 	int    nob;
759 
760 	if (usr_buffer_nob > knl_buffer_nob)
761 		return -EOVERFLOW;
762 
763 	if (copy_from_user((void *)knl_buffer,
764 			   usr_buffer, usr_buffer_nob))
765 		return -EFAULT;
766 
767 	nob = strnlen(knl_buffer, usr_buffer_nob);
768 	while (nob-- >= 0)		      /* strip trailing whitespace */
769 		if (!isspace(knl_buffer[nob]))
770 			break;
771 
772 	if (nob < 0)			    /* empty string */
773 		return -EINVAL;
774 
775 	if (nob == knl_buffer_nob)	      /* no space to terminate */
776 		return -EOVERFLOW;
777 
778 	knl_buffer[nob + 1] = 0;		/* terminate */
779 	return 0;
780 }
781 EXPORT_SYMBOL(cfs_trace_copyin_string);
782 
cfs_trace_copyout_string(char __user * usr_buffer,int usr_buffer_nob,const char * knl_buffer,char * append)783 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
784 			     const char *knl_buffer, char *append)
785 {
786 	/* NB if 'append' != NULL, it's a single character to append to the
787 	 * copied out string - usually "\n", for /proc entries and "" (i.e. a
788 	 * terminating zero byte) for sysctl entries */
789 	int   nob = strlen(knl_buffer);
790 
791 	if (nob > usr_buffer_nob)
792 		nob = usr_buffer_nob;
793 
794 	if (copy_to_user(usr_buffer, knl_buffer, nob))
795 		return -EFAULT;
796 
797 	if (append != NULL && nob < usr_buffer_nob) {
798 		if (copy_to_user(usr_buffer + nob, append, 1))
799 			return -EFAULT;
800 
801 		nob++;
802 	}
803 
804 	return nob;
805 }
806 EXPORT_SYMBOL(cfs_trace_copyout_string);
807 
cfs_trace_allocate_string_buffer(char ** str,int nob)808 int cfs_trace_allocate_string_buffer(char **str, int nob)
809 {
810 	if (nob > 2 * PAGE_CACHE_SIZE)	    /* string must be "sensible" */
811 		return -EINVAL;
812 
813 	*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
814 	if (*str == NULL)
815 		return -ENOMEM;
816 
817 	return 0;
818 }
819 
cfs_trace_free_string_buffer(char * str,int nob)820 void cfs_trace_free_string_buffer(char *str, int nob)
821 {
822 	kfree(str);
823 }
824 
cfs_trace_dump_debug_buffer_usrstr(void __user * usr_str,int usr_str_nob)825 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
826 {
827 	char	 *str;
828 	int	   rc;
829 
830 	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
831 	if (rc != 0)
832 		return rc;
833 
834 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
835 				     usr_str, usr_str_nob);
836 	if (rc != 0)
837 		goto out;
838 
839 	if (str[0] != '/') {
840 		rc = -EINVAL;
841 		goto out;
842 	}
843 	rc = cfs_tracefile_dump_all_pages(str);
844 out:
845 	cfs_trace_free_string_buffer(str, usr_str_nob + 1);
846 	return rc;
847 }
848 
cfs_trace_daemon_command(char * str)849 int cfs_trace_daemon_command(char *str)
850 {
851 	int       rc = 0;
852 
853 	cfs_tracefile_write_lock();
854 
855 	if (strcmp(str, "stop") == 0) {
856 		cfs_tracefile_write_unlock();
857 		cfs_trace_stop_thread();
858 		cfs_tracefile_write_lock();
859 		memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
860 
861 	} else if (strncmp(str, "size=", 5) == 0) {
862 		cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
863 		if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
864 			cfs_tracefile_size = CFS_TRACEFILE_SIZE;
865 		else
866 			cfs_tracefile_size <<= 20;
867 
868 	} else if (strlen(str) >= sizeof(cfs_tracefile)) {
869 		rc = -ENAMETOOLONG;
870 	} else if (str[0] != '/') {
871 		rc = -EINVAL;
872 	} else {
873 		strcpy(cfs_tracefile, str);
874 
875 		printk(KERN_INFO
876 		       "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
877 		       cfs_tracefile,
878 		       (long)(cfs_tracefile_size >> 10));
879 
880 		cfs_trace_start_thread();
881 	}
882 
883 	cfs_tracefile_write_unlock();
884 	return rc;
885 }
886 
cfs_trace_daemon_command_usrstr(void __user * usr_str,int usr_str_nob)887 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
888 {
889 	char *str;
890 	int   rc;
891 
892 	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
893 	if (rc != 0)
894 		return rc;
895 
896 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
897 				 usr_str, usr_str_nob);
898 	if (rc == 0)
899 		rc = cfs_trace_daemon_command(str);
900 
901 	cfs_trace_free_string_buffer(str, usr_str_nob + 1);
902 	return rc;
903 }
904 
cfs_trace_set_debug_mb(int mb)905 int cfs_trace_set_debug_mb(int mb)
906 {
907 	int i;
908 	int j;
909 	int pages;
910 	int limit = cfs_trace_max_debug_mb();
911 	struct cfs_trace_cpu_data *tcd;
912 
913 	if (mb < num_possible_cpus()) {
914 		printk(KERN_WARNING
915 		       "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
916 		       mb, num_possible_cpus());
917 		mb = num_possible_cpus();
918 	}
919 
920 	if (mb > limit) {
921 		printk(KERN_WARNING
922 		       "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
923 		       mb, limit);
924 		mb = limit;
925 	}
926 
927 	mb /= num_possible_cpus();
928 	pages = mb << (20 - PAGE_CACHE_SHIFT);
929 
930 	cfs_tracefile_write_lock();
931 
932 	cfs_tcd_for_each(tcd, i, j)
933 		tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
934 
935 	cfs_tracefile_write_unlock();
936 
937 	return 0;
938 }
939 
cfs_trace_get_debug_mb(void)940 int cfs_trace_get_debug_mb(void)
941 {
942 	int i;
943 	int j;
944 	struct cfs_trace_cpu_data *tcd;
945 	int total_pages = 0;
946 
947 	cfs_tracefile_read_lock();
948 
949 	cfs_tcd_for_each(tcd, i, j)
950 		total_pages += tcd->tcd_max_pages;
951 
952 	cfs_tracefile_read_unlock();
953 
954 	return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
955 }
956 
tracefiled(void * arg)957 static int tracefiled(void *arg)
958 {
959 	struct page_collection pc;
960 	struct tracefiled_ctl *tctl = arg;
961 	struct cfs_trace_page *tage;
962 	struct cfs_trace_page *tmp;
963 	struct file *filp;
964 	char *buf;
965 	int last_loop = 0;
966 	int rc;
967 
968 	DECL_MMSPACE;
969 
970 	/* we're started late enough that we pick up init's fs context */
971 	/* this is so broken in uml?  what on earth is going on? */
972 
973 	spin_lock_init(&pc.pc_lock);
974 	complete(&tctl->tctl_start);
975 
976 	while (1) {
977 		wait_queue_t __wait;
978 
979 		pc.pc_want_daemon_pages = 0;
980 		collect_pages(&pc);
981 		if (list_empty(&pc.pc_pages))
982 			goto end_loop;
983 
984 		filp = NULL;
985 		cfs_tracefile_read_lock();
986 		if (cfs_tracefile[0] != 0) {
987 			filp = filp_open(cfs_tracefile,
988 					 O_CREAT | O_RDWR | O_LARGEFILE,
989 					 0600);
990 			if (IS_ERR(filp)) {
991 				rc = PTR_ERR(filp);
992 				filp = NULL;
993 				printk(KERN_WARNING "couldn't open %s: %d\n",
994 				       cfs_tracefile, rc);
995 			}
996 		}
997 		cfs_tracefile_read_unlock();
998 		if (filp == NULL) {
999 			put_pages_on_daemon_list(&pc);
1000 			__LASSERT(list_empty(&pc.pc_pages));
1001 			goto end_loop;
1002 		}
1003 
1004 		MMSPACE_OPEN;
1005 
1006 		list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1007 						   linkage) {
1008 			static loff_t f_pos;
1009 
1010 			__LASSERT_TAGE_INVARIANT(tage);
1011 
1012 			if (f_pos >= (off_t)cfs_tracefile_size)
1013 				f_pos = 0;
1014 			else if (f_pos > i_size_read(file_inode(filp)))
1015 				f_pos = i_size_read(file_inode(filp));
1016 
1017 			buf = kmap(tage->page);
1018 			rc = vfs_write(filp, (__force const char __user *)buf,
1019 				       tage->used, &f_pos);
1020 			kunmap(tage->page);
1021 
1022 			if (rc != (int)tage->used) {
1023 				printk(KERN_WARNING "wanted to write %u but wrote %d\n",
1024 				       tage->used, rc);
1025 				put_pages_back(&pc);
1026 				__LASSERT(list_empty(&pc.pc_pages));
1027 				break;
1028 			}
1029 		}
1030 		MMSPACE_CLOSE;
1031 
1032 		filp_close(filp, NULL);
1033 		put_pages_on_daemon_list(&pc);
1034 		if (!list_empty(&pc.pc_pages)) {
1035 			int i;
1036 
1037 			printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
1038 			pr_err("total cpus(%d): ",
1039 				num_possible_cpus());
1040 			for (i = 0; i < num_possible_cpus(); i++)
1041 				if (cpu_online(i))
1042 					pr_cont("%d(on) ", i);
1043 				else
1044 					pr_cont("%d(off) ", i);
1045 			pr_cont("\n");
1046 
1047 			i = 0;
1048 			list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1049 						     linkage)
1050 				pr_err("page %d belongs to cpu %d\n",
1051 					++i, tage->cpu);
1052 			pr_err("There are %d pages unwritten\n", i);
1053 		}
1054 		__LASSERT(list_empty(&pc.pc_pages));
1055 end_loop:
1056 		if (atomic_read(&tctl->tctl_shutdown)) {
1057 			if (last_loop == 0) {
1058 				last_loop = 1;
1059 				continue;
1060 			} else {
1061 				break;
1062 			}
1063 		}
1064 		init_waitqueue_entry(&__wait, current);
1065 		add_wait_queue(&tctl->tctl_waitq, &__wait);
1066 		set_current_state(TASK_INTERRUPTIBLE);
1067 		schedule_timeout(cfs_time_seconds(1));
1068 		remove_wait_queue(&tctl->tctl_waitq, &__wait);
1069 	}
1070 	complete(&tctl->tctl_stop);
1071 	return 0;
1072 }
1073 
cfs_trace_start_thread(void)1074 int cfs_trace_start_thread(void)
1075 {
1076 	struct tracefiled_ctl *tctl = &trace_tctl;
1077 	int rc = 0;
1078 
1079 	mutex_lock(&cfs_trace_thread_mutex);
1080 	if (thread_running)
1081 		goto out;
1082 
1083 	init_completion(&tctl->tctl_start);
1084 	init_completion(&tctl->tctl_stop);
1085 	init_waitqueue_head(&tctl->tctl_waitq);
1086 	atomic_set(&tctl->tctl_shutdown, 0);
1087 
1088 	if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1089 		rc = -ECHILD;
1090 		goto out;
1091 	}
1092 
1093 	wait_for_completion(&tctl->tctl_start);
1094 	thread_running = 1;
1095 out:
1096 	mutex_unlock(&cfs_trace_thread_mutex);
1097 	return rc;
1098 }
1099 
cfs_trace_stop_thread(void)1100 void cfs_trace_stop_thread(void)
1101 {
1102 	struct tracefiled_ctl *tctl = &trace_tctl;
1103 
1104 	mutex_lock(&cfs_trace_thread_mutex);
1105 	if (thread_running) {
1106 		printk(KERN_INFO
1107 		       "Lustre: shutting down debug daemon thread...\n");
1108 		atomic_set(&tctl->tctl_shutdown, 1);
1109 		wait_for_completion(&tctl->tctl_stop);
1110 		thread_running = 0;
1111 	}
1112 	mutex_unlock(&cfs_trace_thread_mutex);
1113 }
1114 
cfs_tracefile_init(int max_pages)1115 int cfs_tracefile_init(int max_pages)
1116 {
1117 	struct cfs_trace_cpu_data *tcd;
1118 	int		    i;
1119 	int		    j;
1120 	int		    rc;
1121 	int		    factor;
1122 
1123 	rc = cfs_tracefile_init_arch();
1124 	if (rc != 0)
1125 		return rc;
1126 
1127 	cfs_tcd_for_each(tcd, i, j) {
1128 		/* tcd_pages_factor is initialized int tracefile_init_arch. */
1129 		factor = tcd->tcd_pages_factor;
1130 		INIT_LIST_HEAD(&tcd->tcd_pages);
1131 		INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1132 		INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1133 		tcd->tcd_cur_pages = 0;
1134 		tcd->tcd_cur_stock_pages = 0;
1135 		tcd->tcd_cur_daemon_pages = 0;
1136 		tcd->tcd_max_pages = (max_pages * factor) / 100;
1137 		LASSERT(tcd->tcd_max_pages > 0);
1138 		tcd->tcd_shutting_down = 0;
1139 	}
1140 
1141 	return 0;
1142 }
1143 
trace_cleanup_on_all_cpus(void)1144 static void trace_cleanup_on_all_cpus(void)
1145 {
1146 	struct cfs_trace_cpu_data *tcd;
1147 	struct cfs_trace_page *tage;
1148 	struct cfs_trace_page *tmp;
1149 	int i, cpu;
1150 
1151 	for_each_possible_cpu(cpu) {
1152 		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1153 			tcd->tcd_shutting_down = 1;
1154 
1155 			list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1156 							   linkage) {
1157 				__LASSERT_TAGE_INVARIANT(tage);
1158 
1159 				list_del(&tage->linkage);
1160 				cfs_tage_free(tage);
1161 			}
1162 
1163 			tcd->tcd_cur_pages = 0;
1164 		}
1165 	}
1166 }
1167 
cfs_trace_cleanup(void)1168 static void cfs_trace_cleanup(void)
1169 {
1170 	struct page_collection pc;
1171 
1172 	INIT_LIST_HEAD(&pc.pc_pages);
1173 	spin_lock_init(&pc.pc_lock);
1174 
1175 	trace_cleanup_on_all_cpus();
1176 
1177 	cfs_tracefile_fini_arch();
1178 }
1179 
cfs_tracefile_exit(void)1180 void cfs_tracefile_exit(void)
1181 {
1182 	cfs_trace_stop_thread();
1183 	cfs_trace_cleanup();
1184 }
1185