1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/include/lprocfs_status.h
37 *
38 * Top level header file for LProc SNMP
39 *
40 * Author: Hariharan Thantry thantry@users.sourceforge.net
41 */
42 #ifndef _LPROCFS_SNMP_H
43 #define _LPROCFS_SNMP_H
44
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/spinlock.h>
48 #include <linux/types.h>
49
50 #include "lustre/lustre_idl.h"
51
52 struct lprocfs_vars {
53 const char *name;
54 struct file_operations *fops;
55 void *data;
56 /**
57 * /proc file mode.
58 */
59 umode_t proc_mode;
60 };
61
62 struct lprocfs_static_vars {
63 struct lprocfs_vars *obd_vars;
64 struct attribute_group *sysfs_vars;
65 };
66
67 /* if we find more consumers this could be generalized */
68 #define OBD_HIST_MAX 32
69 struct obd_histogram {
70 spinlock_t oh_lock;
71 unsigned long oh_buckets[OBD_HIST_MAX];
72 };
73
74 enum {
75 BRW_R_PAGES = 0,
76 BRW_W_PAGES,
77 BRW_R_RPC_HIST,
78 BRW_W_RPC_HIST,
79 BRW_R_IO_TIME,
80 BRW_W_IO_TIME,
81 BRW_R_DISCONT_PAGES,
82 BRW_W_DISCONT_PAGES,
83 BRW_R_DISCONT_BLOCKS,
84 BRW_W_DISCONT_BLOCKS,
85 BRW_R_DISK_IOSIZE,
86 BRW_W_DISK_IOSIZE,
87 BRW_R_DIO_FRAGS,
88 BRW_W_DIO_FRAGS,
89 BRW_LAST,
90 };
91
92 struct brw_stats {
93 struct obd_histogram hist[BRW_LAST];
94 };
95
96 enum {
97 RENAME_SAMEDIR_SIZE = 0,
98 RENAME_CROSSDIR_SRC_SIZE,
99 RENAME_CROSSDIR_TGT_SIZE,
100 RENAME_LAST,
101 };
102
103 struct rename_stats {
104 struct obd_histogram hist[RENAME_LAST];
105 };
106
107 /* An lprocfs counter can be configured using the enum bit masks below.
108 *
109 * LPROCFS_CNTR_EXTERNALLOCK indicates that an external lock already
110 * protects this counter from concurrent updates. If not specified,
111 * lprocfs an internal per-counter lock variable. External locks are
112 * not used to protect counter increments, but are used to protect
113 * counter readout and resets.
114 *
115 * LPROCFS_CNTR_AVGMINMAX indicates a multi-valued counter samples,
116 * (i.e. counter can be incremented by more than "1"). When specified,
117 * the counter maintains min, max and sum in addition to a simple
118 * invocation count. This allows averages to be be computed.
119 * If not specified, the counter is an increment-by-1 counter.
120 * min, max, sum, etc. are not maintained.
121 *
122 * LPROCFS_CNTR_STDDEV indicates that the counter should track sum of
123 * squares (for multi-valued counter samples only). This allows
124 * external computation of standard deviation, but involves a 64-bit
125 * multiply per counter increment.
126 */
127
128 enum {
129 LPROCFS_CNTR_EXTERNALLOCK = 0x0001,
130 LPROCFS_CNTR_AVGMINMAX = 0x0002,
131 LPROCFS_CNTR_STDDEV = 0x0004,
132
133 /* counter data type */
134 LPROCFS_TYPE_REGS = 0x0100,
135 LPROCFS_TYPE_BYTES = 0x0200,
136 LPROCFS_TYPE_PAGES = 0x0400,
137 LPROCFS_TYPE_CYCLE = 0x0800,
138 };
139
140 #define LC_MIN_INIT ((~(__u64)0) >> 1)
141
142 struct lprocfs_counter_header {
143 unsigned int lc_config;
144 const char *lc_name; /* must be static */
145 const char *lc_units; /* must be static */
146 };
147
148 struct lprocfs_counter {
149 __s64 lc_count;
150 __s64 lc_min;
151 __s64 lc_max;
152 __s64 lc_sumsquare;
153 /*
154 * Every counter has lc_array_sum[0], while lc_array_sum[1] is only
155 * for irq context counter, i.e. stats with
156 * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need
157 * lc_array_sum[1]
158 */
159 __s64 lc_array_sum[1];
160 };
161
162 #define lc_sum lc_array_sum[0]
163 #define lc_sum_irq lc_array_sum[1]
164
165 struct lprocfs_percpu {
166 #ifndef __GNUC__
167 __s64 pad;
168 #endif
169 struct lprocfs_counter lp_cntr[0];
170 };
171
172 #define LPROCFS_GET_NUM_CPU 0x0001
173 #define LPROCFS_GET_SMP_ID 0x0002
174
175 enum lprocfs_stats_flags {
176 LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
177 LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
178 * area and need locking */
179 LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
180 };
181
182 enum lprocfs_fields_flags {
183 LPROCFS_FIELDS_FLAGS_CONFIG = 0x0001,
184 LPROCFS_FIELDS_FLAGS_SUM = 0x0002,
185 LPROCFS_FIELDS_FLAGS_MIN = 0x0003,
186 LPROCFS_FIELDS_FLAGS_MAX = 0x0004,
187 LPROCFS_FIELDS_FLAGS_AVG = 0x0005,
188 LPROCFS_FIELDS_FLAGS_SUMSQUARE = 0x0006,
189 LPROCFS_FIELDS_FLAGS_COUNT = 0x0007,
190 };
191
192 struct lprocfs_stats {
193 /* # of counters */
194 unsigned short ls_num;
195 /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
196 unsigned short ls_biggest_alloc_num;
197 enum lprocfs_stats_flags ls_flags;
198 /* Lock used when there are no percpu stats areas; For percpu stats,
199 * it is used to protect ls_biggest_alloc_num change */
200 spinlock_t ls_lock;
201
202 /* has ls_num of counter headers */
203 struct lprocfs_counter_header *ls_cnt_header;
204 struct lprocfs_percpu *ls_percpu[0];
205 };
206
207 #define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
208
209 /* Pack all opcodes down into a single monotonically increasing index */
opcode_offset(__u32 opc)210 static inline int opcode_offset(__u32 opc)
211 {
212 if (opc < OST_LAST_OPC) {
213 /* OST opcode */
214 return (opc - OST_FIRST_OPC);
215 } else if (opc < MDS_LAST_OPC) {
216 /* MDS opcode */
217 return (opc - MDS_FIRST_OPC +
218 OPC_RANGE(OST));
219 } else if (opc < LDLM_LAST_OPC) {
220 /* LDLM Opcode */
221 return (opc - LDLM_FIRST_OPC +
222 OPC_RANGE(MDS) +
223 OPC_RANGE(OST));
224 } else if (opc < MGS_LAST_OPC) {
225 /* MGS Opcode */
226 return (opc - MGS_FIRST_OPC +
227 OPC_RANGE(LDLM) +
228 OPC_RANGE(MDS) +
229 OPC_RANGE(OST));
230 } else if (opc < OBD_LAST_OPC) {
231 /* OBD Ping */
232 return (opc - OBD_FIRST_OPC +
233 OPC_RANGE(MGS) +
234 OPC_RANGE(LDLM) +
235 OPC_RANGE(MDS) +
236 OPC_RANGE(OST));
237 } else if (opc < LLOG_LAST_OPC) {
238 /* LLOG Opcode */
239 return (opc - LLOG_FIRST_OPC +
240 OPC_RANGE(OBD) +
241 OPC_RANGE(MGS) +
242 OPC_RANGE(LDLM) +
243 OPC_RANGE(MDS) +
244 OPC_RANGE(OST));
245 } else if (opc < QUOTA_LAST_OPC) {
246 /* LQUOTA Opcode */
247 return (opc - QUOTA_FIRST_OPC +
248 OPC_RANGE(LLOG) +
249 OPC_RANGE(OBD) +
250 OPC_RANGE(MGS) +
251 OPC_RANGE(LDLM) +
252 OPC_RANGE(MDS) +
253 OPC_RANGE(OST));
254 } else if (opc < SEQ_LAST_OPC) {
255 /* SEQ opcode */
256 return (opc - SEQ_FIRST_OPC +
257 OPC_RANGE(QUOTA) +
258 OPC_RANGE(LLOG) +
259 OPC_RANGE(OBD) +
260 OPC_RANGE(MGS) +
261 OPC_RANGE(LDLM) +
262 OPC_RANGE(MDS) +
263 OPC_RANGE(OST));
264 } else if (opc < SEC_LAST_OPC) {
265 /* SEC opcode */
266 return (opc - SEC_FIRST_OPC +
267 OPC_RANGE(SEQ) +
268 OPC_RANGE(QUOTA) +
269 OPC_RANGE(LLOG) +
270 OPC_RANGE(OBD) +
271 OPC_RANGE(MGS) +
272 OPC_RANGE(LDLM) +
273 OPC_RANGE(MDS) +
274 OPC_RANGE(OST));
275 } else if (opc < FLD_LAST_OPC) {
276 /* FLD opcode */
277 return (opc - FLD_FIRST_OPC +
278 OPC_RANGE(SEC) +
279 OPC_RANGE(SEQ) +
280 OPC_RANGE(QUOTA) +
281 OPC_RANGE(LLOG) +
282 OPC_RANGE(OBD) +
283 OPC_RANGE(MGS) +
284 OPC_RANGE(LDLM) +
285 OPC_RANGE(MDS) +
286 OPC_RANGE(OST));
287 } else if (opc < UPDATE_LAST_OPC) {
288 /* update opcode */
289 return (opc - UPDATE_FIRST_OPC +
290 OPC_RANGE(FLD) +
291 OPC_RANGE(SEC) +
292 OPC_RANGE(SEQ) +
293 OPC_RANGE(QUOTA) +
294 OPC_RANGE(LLOG) +
295 OPC_RANGE(OBD) +
296 OPC_RANGE(MGS) +
297 OPC_RANGE(LDLM) +
298 OPC_RANGE(MDS) +
299 OPC_RANGE(OST));
300 } else {
301 /* Unknown Opcode */
302 return -1;
303 }
304 }
305
306 #define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \
307 OPC_RANGE(MDS) + \
308 OPC_RANGE(LDLM) + \
309 OPC_RANGE(MGS) + \
310 OPC_RANGE(OBD) + \
311 OPC_RANGE(LLOG) + \
312 OPC_RANGE(SEC) + \
313 OPC_RANGE(SEQ) + \
314 OPC_RANGE(SEC) + \
315 OPC_RANGE(FLD) + \
316 OPC_RANGE(UPDATE))
317
318 #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
319 OPC_RANGE(EXTRA))
320
321 enum {
322 PTLRPC_REQWAIT_CNTR = 0,
323 PTLRPC_REQQDEPTH_CNTR,
324 PTLRPC_REQACTIVE_CNTR,
325 PTLRPC_TIMEOUT,
326 PTLRPC_REQBUF_AVAIL_CNTR,
327 PTLRPC_LAST_CNTR
328 };
329
330 #define PTLRPC_FIRST_CNTR PTLRPC_REQWAIT_CNTR
331
332 enum {
333 LDLM_GLIMPSE_ENQUEUE = 0,
334 LDLM_PLAIN_ENQUEUE,
335 LDLM_EXTENT_ENQUEUE,
336 LDLM_FLOCK_ENQUEUE,
337 LDLM_IBITS_ENQUEUE,
338 MDS_REINT_SETATTR,
339 MDS_REINT_CREATE,
340 MDS_REINT_LINK,
341 MDS_REINT_UNLINK,
342 MDS_REINT_RENAME,
343 MDS_REINT_OPEN,
344 MDS_REINT_SETXATTR,
345 BRW_READ_BYTES,
346 BRW_WRITE_BYTES,
347 EXTRA_LAST_OPC
348 };
349
350 #define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE
351 /* class_obd.c */
352 extern struct dentry *debugfs_lustre_root;
353 extern struct kobject *lustre_kobj;
354
355 struct obd_device;
356 struct obd_histogram;
357
358 /* Days / hours / mins / seconds format */
359 struct dhms {
360 int d, h, m, s;
361 };
362
s2dhms(struct dhms * ts,time64_t secs64)363 static inline void s2dhms(struct dhms *ts, time64_t secs64)
364 {
365 unsigned int secs;
366
367 ts->d = div_u64_rem(secs64, 86400, &secs);
368 ts->h = secs / 3600;
369 secs = secs % 3600;
370 ts->m = secs / 60;
371 ts->s = secs % 60;
372 }
373
374 #define DHMS_FMT "%dd%dh%02dm%02ds"
375 #define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s
376
377 #define JOBSTATS_JOBID_VAR_MAX_LEN 20
378 #define JOBSTATS_DISABLE "disable"
379 #define JOBSTATS_PROCNAME_UID "procname_uid"
380 #define JOBSTATS_NODELOCAL "nodelocal"
381
382 int lprocfs_write_frac_helper(const char __user *buffer,
383 unsigned long count, int *val, int mult);
384 int lprocfs_read_frac_helper(char *buffer, unsigned long count,
385 long val, int mult);
386 int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
387 /*
388 * \return value
389 * < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID)
390 */
lprocfs_stats_lock(struct lprocfs_stats * stats,int opc,unsigned long * flags)391 static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
392 unsigned long *flags)
393 {
394 int rc = 0;
395
396 switch (opc) {
397 default:
398 LBUG();
399
400 case LPROCFS_GET_SMP_ID:
401 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
402 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
403 spin_lock_irqsave(&stats->ls_lock, *flags);
404 else
405 spin_lock(&stats->ls_lock);
406 return 0;
407 } else {
408 unsigned int cpuid = get_cpu();
409
410 if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
411 rc = lprocfs_stats_alloc_one(stats, cpuid);
412 if (rc < 0) {
413 put_cpu();
414 return rc;
415 }
416 }
417 return cpuid;
418 }
419
420 case LPROCFS_GET_NUM_CPU:
421 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
422 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
423 spin_lock_irqsave(&stats->ls_lock, *flags);
424 else
425 spin_lock(&stats->ls_lock);
426 return 1;
427 }
428 return stats->ls_biggest_alloc_num;
429 }
430 }
431
lprocfs_stats_unlock(struct lprocfs_stats * stats,int opc,unsigned long * flags)432 static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
433 unsigned long *flags)
434 {
435 switch (opc) {
436 default:
437 LBUG();
438
439 case LPROCFS_GET_SMP_ID:
440 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
441 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
442 spin_unlock_irqrestore(&stats->ls_lock,
443 *flags);
444 } else {
445 spin_unlock(&stats->ls_lock);
446 }
447 } else {
448 put_cpu();
449 }
450 return;
451
452 case LPROCFS_GET_NUM_CPU:
453 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
454 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
455 spin_unlock_irqrestore(&stats->ls_lock,
456 *flags);
457 } else {
458 spin_unlock(&stats->ls_lock);
459 }
460 }
461 return;
462 }
463 }
464
465 static inline unsigned int
lprocfs_stats_counter_size(struct lprocfs_stats * stats)466 lprocfs_stats_counter_size(struct lprocfs_stats *stats)
467 {
468 unsigned int percpusize;
469
470 percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
471
472 /* irq safe stats need lc_array_sum[1] */
473 if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
474 percpusize += stats->ls_num * sizeof(__s64);
475
476 if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
477 percpusize = L1_CACHE_ALIGN(percpusize);
478
479 return percpusize;
480 }
481
482 static inline struct lprocfs_counter *
lprocfs_stats_counter_get(struct lprocfs_stats * stats,unsigned int cpuid,int index)483 lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
484 int index)
485 {
486 struct lprocfs_counter *cntr;
487
488 cntr = &stats->ls_percpu[cpuid]->lp_cntr[index];
489
490 if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
491 cntr = (void *)cntr + index * sizeof(__s64);
492
493 return cntr;
494 }
495
496 /* Two optimized LPROCFS counter increment functions are provided:
497 * lprocfs_counter_incr(cntr, value) - optimized for by-one counters
498 * lprocfs_counter_add(cntr) - use for multi-valued counters
499 * Counter data layout allows config flag, counter lock and the
500 * count itself to reside within a single cache line.
501 */
502
503 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount);
504 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount);
505
506 #define lprocfs_counter_incr(stats, idx) \
507 lprocfs_counter_add(stats, idx, 1)
508 #define lprocfs_counter_decr(stats, idx) \
509 lprocfs_counter_sub(stats, idx, 1)
510
511 __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
512 struct lprocfs_counter_header *header,
513 enum lprocfs_stats_flags flags,
514 enum lprocfs_fields_flags field);
lprocfs_stats_collector(struct lprocfs_stats * stats,int idx,enum lprocfs_fields_flags field)515 static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
516 int idx,
517 enum lprocfs_fields_flags field)
518 {
519 int i;
520 unsigned int num_cpu;
521 unsigned long flags = 0;
522 __u64 ret = 0;
523
524 LASSERT(stats != NULL);
525
526 num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
527 for (i = 0; i < num_cpu; i++) {
528 if (stats->ls_percpu[i] == NULL)
529 continue;
530 ret += lprocfs_read_helper(
531 lprocfs_stats_counter_get(stats, i, idx),
532 &stats->ls_cnt_header[idx], stats->ls_flags,
533 field);
534 }
535 lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
536 return ret;
537 }
538
539 extern struct lprocfs_stats *
540 lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
541 void lprocfs_clear_stats(struct lprocfs_stats *stats);
542 void lprocfs_free_stats(struct lprocfs_stats **stats);
543 void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
544 unsigned conf, const char *name, const char *units);
545 struct obd_export;
546 int lprocfs_exp_cleanup(struct obd_export *exp);
547 struct dentry *ldebugfs_add_simple(struct dentry *root,
548 char *name,
549 void *data,
550 struct file_operations *fops);
551
552 int ldebugfs_register_stats(struct dentry *parent,
553 const char *name,
554 struct lprocfs_stats *stats);
555
556 /* lprocfs_status.c */
557 int ldebugfs_add_vars(struct dentry *parent,
558 struct lprocfs_vars *var,
559 void *data);
560
561 struct dentry *ldebugfs_register(const char *name,
562 struct dentry *parent,
563 struct lprocfs_vars *list,
564 void *data);
565
566 void ldebugfs_remove(struct dentry **entryp);
567
568 int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
569 struct attribute_group *attrs);
570 int lprocfs_obd_cleanup(struct obd_device *obd);
571
572 int ldebugfs_seq_create(struct dentry *parent,
573 const char *name,
574 umode_t mode,
575 const struct file_operations *seq_fops,
576 void *data);
577 int ldebugfs_obd_seq_create(struct obd_device *dev,
578 const char *name,
579 umode_t mode,
580 const struct file_operations *seq_fops,
581 void *data);
582
583 /* Generic callbacks */
584
585 int lprocfs_rd_uint(struct seq_file *m, void *data);
586 int lprocfs_wr_uint(struct file *file, const char __user *buffer,
587 unsigned long count, void *data);
588 int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
589 int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
590 int lprocfs_rd_import(struct seq_file *m, void *data);
591 int lprocfs_rd_state(struct seq_file *m, void *data);
592 int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
593
594 struct adaptive_timeout;
595 int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at);
596 int lprocfs_rd_timeouts(struct seq_file *m, void *data);
597 int lprocfs_wr_ping(struct file *file, const char __user *buffer,
598 size_t count, loff_t *off);
599 int lprocfs_wr_import(struct file *file, const char __user *buffer,
600 size_t count, loff_t *off);
601 int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
602 int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
603 size_t count, loff_t *off);
604
605 /* Statfs helpers */
606
607 int lprocfs_write_helper(const char __user *buffer, unsigned long count,
608 int *val);
609 int lprocfs_write_u64_helper(const char __user *buffer,
610 unsigned long count, __u64 *val);
611 int lprocfs_write_frac_u64_helper(const char *buffer,
612 unsigned long count,
613 __u64 *val, int mult);
614 char *lprocfs_find_named_value(const char *buffer, const char *name,
615 size_t *count);
616 void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
617 void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
618 void lprocfs_oh_clear(struct obd_histogram *oh);
619 unsigned long lprocfs_oh_sum(struct obd_histogram *oh);
620
621 void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
622 struct lprocfs_counter *cnt);
623
624 int lprocfs_single_release(struct inode *, struct file *);
625 int lprocfs_seq_release(struct inode *, struct file *);
626
627 #define LPROCFS_CLIMP_EXIT(obd) \
628 up_read(&(obd)->u.cli.cl_sem)
629
630 /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
631 proc entries; otherwise, you will define name##_seq_write function also for
632 a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
633 call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
634 #define __LPROC_SEQ_FOPS(name, custom_seq_write) \
635 static int name##_single_open(struct inode *inode, struct file *file) \
636 { \
637 return single_open(file, name##_seq_show, inode->i_private); \
638 } \
639 static struct file_operations name##_fops = { \
640 .owner = THIS_MODULE, \
641 .open = name##_single_open, \
642 .read = seq_read, \
643 .write = custom_seq_write, \
644 .llseek = seq_lseek, \
645 .release = lprocfs_single_release, \
646 }
647
648 #define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL)
649 #define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write)
650
651 #define LPROC_SEQ_FOPS_RO_TYPE(name, type) \
652 static int name##_##type##_seq_show(struct seq_file *m, void *v)\
653 { \
654 return lprocfs_rd_##type(m, m->private); \
655 } \
656 LPROC_SEQ_FOPS_RO(name##_##type)
657
658 #define LPROC_SEQ_FOPS_RW_TYPE(name, type) \
659 static int name##_##type##_seq_show(struct seq_file *m, void *v)\
660 { \
661 return lprocfs_rd_##type(m, m->private); \
662 } \
663 static ssize_t name##_##type##_seq_write(struct file *file, \
664 const char __user *buffer, size_t count, \
665 loff_t *off) \
666 { \
667 struct seq_file *seq = file->private_data; \
668 return lprocfs_wr_##type(file, buffer, \
669 count, seq->private); \
670 } \
671 LPROC_SEQ_FOPS(name##_##type)
672
673 #define LPROC_SEQ_FOPS_WR_ONLY(name, type) \
674 static ssize_t name##_##type##_write(struct file *file, \
675 const char __user *buffer, size_t count, \
676 loff_t *off) \
677 { \
678 return lprocfs_wr_##type(file, buffer, count, off); \
679 } \
680 static int name##_##type##_open(struct inode *inode, struct file *file) \
681 { \
682 return single_open(file, NULL, inode->i_private); \
683 } \
684 static struct file_operations name##_##type##_fops = { \
685 .open = name##_##type##_open, \
686 .write = name##_##type##_write, \
687 .release = lprocfs_single_release, \
688 }
689
690 struct lustre_attr {
691 struct attribute attr;
692 ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
693 char *buf);
694 ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
695 const char *buf, size_t len);
696 };
697
698 #define LUSTRE_ATTR(name, mode, show, store) \
699 static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
700
701 #define LUSTRE_RO_ATTR(name) LUSTRE_ATTR(name, 0444, name##_show, NULL)
702 #define LUSTRE_RW_ATTR(name) LUSTRE_ATTR(name, 0644, name##_show, name##_store)
703
704 extern const struct sysfs_ops lustre_sysfs_ops;
705
706 /* all quota proc functions */
707 int lprocfs_quota_rd_bunit(char *page, char **start,
708 loff_t off, int count,
709 int *eof, void *data);
710 int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
711 unsigned long count, void *data);
712 int lprocfs_quota_rd_btune(char *page, char **start,
713 loff_t off, int count,
714 int *eof, void *data);
715 int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
716 unsigned long count, void *data);
717 int lprocfs_quota_rd_iunit(char *page, char **start,
718 loff_t off, int count,
719 int *eof, void *data);
720 int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
721 unsigned long count, void *data);
722 int lprocfs_quota_rd_itune(char *page, char **start,
723 loff_t off, int count,
724 int *eof, void *data);
725 int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
726 unsigned long count, void *data);
727 int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
728 int *eof, void *data);
729 int lprocfs_quota_wr_type(struct file *file, const char *buffer,
730 unsigned long count, void *data);
731 int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
732 int count, int *eof, void *data);
733 int lprocfs_quota_wr_switch_seconds(struct file *file,
734 const char *buffer,
735 unsigned long count, void *data);
736 int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
737 int count, int *eof, void *data);
738 int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
739 unsigned long count, void *data);
740 int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
741 int count, int *eof, void *data);
742 int lprocfs_quota_wr_switch_qs(struct file *file,
743 const char *buffer, unsigned long count,
744 void *data);
745 int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
746 int count, int *eof, void *data);
747 int lprocfs_quota_wr_boundary_factor(struct file *file,
748 const char *buffer, unsigned long count,
749 void *data);
750 int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
751 int count, int *eof, void *data);
752 int lprocfs_quota_wr_least_bunit(struct file *file,
753 const char *buffer, unsigned long count,
754 void *data);
755 int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
756 int count, int *eof, void *data);
757 int lprocfs_quota_wr_least_iunit(struct file *file,
758 const char *buffer, unsigned long count,
759 void *data);
760 int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
761 int count, int *eof, void *data);
762 int lprocfs_quota_wr_qs_factor(struct file *file,
763 const char *buffer, unsigned long count,
764 void *data);
765 #endif /* LPROCFS_SNMP_H */
766