1 #ifndef _LINUX_KERNEL_STAT_H
2 #define _LINUX_KERNEL_STAT_H
3
4 #include <linux/smp.h>
5 #include <linux/threads.h>
6 #include <linux/percpu.h>
7 #include <linux/cpumask.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched.h>
10 #include <linux/vtime.h>
11 #include <asm/irq.h>
12 #include <asm/cputime.h>
13
14 /*
15 * 'kernel_stat.h' contains the definitions needed for doing
16 * some kernel statistics (CPU usage, context switches ...),
17 * used by rstatd/perfmeter
18 */
19
20 enum cpu_usage_stat {
21 CPUTIME_USER,
22 CPUTIME_NICE,
23 CPUTIME_SYSTEM,
24 CPUTIME_SOFTIRQ,
25 CPUTIME_IRQ,
26 CPUTIME_IDLE,
27 CPUTIME_IOWAIT,
28 CPUTIME_STEAL,
29 CPUTIME_GUEST,
30 CPUTIME_GUEST_NICE,
31 NR_STATS,
32 };
33
34 struct kernel_cpustat {
35 u64 cpustat[NR_STATS];
36 };
37
38 struct kernel_stat {
39 #ifndef CONFIG_GENERIC_HARDIRQS
40 unsigned int irqs[NR_IRQS];
41 #endif
42 unsigned long irqs_sum;
43 unsigned int softirqs[NR_SOFTIRQS];
44 };
45
46 DECLARE_PER_CPU(struct kernel_stat, kstat);
47 DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
48
49 /* Must have preemption disabled for this to be meaningful. */
50 #define kstat_this_cpu (&__get_cpu_var(kstat))
51 #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
52 #define kstat_cpu(cpu) per_cpu(kstat, cpu)
53 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
54
55 extern unsigned long long nr_context_switches(void);
56
57 #ifndef CONFIG_GENERIC_HARDIRQS
58
59 struct irq_desc;
60
kstat_incr_irqs_this_cpu(unsigned int irq,struct irq_desc * desc)61 static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
62 struct irq_desc *desc)
63 {
64 __this_cpu_inc(kstat.irqs[irq]);
65 __this_cpu_inc(kstat.irqs_sum);
66 }
67
kstat_irqs_cpu(unsigned int irq,int cpu)68 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
69 {
70 return kstat_cpu(cpu).irqs[irq];
71 }
72 #else
73 #include <linux/irq.h>
74 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
75
76 #define kstat_incr_irqs_this_cpu(irqno, DESC) \
77 do { \
78 __this_cpu_inc(*(DESC)->kstat_irqs); \
79 __this_cpu_inc(kstat.irqs_sum); \
80 } while (0)
81
82 #endif
83
kstat_incr_softirqs_this_cpu(unsigned int irq)84 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
85 {
86 __this_cpu_inc(kstat.softirqs[irq]);
87 }
88
kstat_softirqs_cpu(unsigned int irq,int cpu)89 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
90 {
91 return kstat_cpu(cpu).softirqs[irq];
92 }
93
94 /*
95 * Number of interrupts per specific IRQ source, since bootup
96 */
97 #ifndef CONFIG_GENERIC_HARDIRQS
kstat_irqs(unsigned int irq)98 static inline unsigned int kstat_irqs(unsigned int irq)
99 {
100 unsigned int sum = 0;
101 int cpu;
102
103 for_each_possible_cpu(cpu)
104 sum += kstat_irqs_cpu(irq, cpu);
105
106 return sum;
107 }
108 #else
109 extern unsigned int kstat_irqs(unsigned int irq);
110 #endif
111
112 /*
113 * Number of interrupts per cpu, since bootup
114 */
kstat_cpu_irqs_sum(unsigned int cpu)115 static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
116 {
117 return kstat_cpu(cpu).irqs_sum;
118 }
119
120 /*
121 * Lock/unlock the current runqueue - to extract task statistics:
122 */
123 extern unsigned long long task_delta_exec(struct task_struct *);
124
125 extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
126 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
127 extern void account_steal_time(cputime_t);
128 extern void account_idle_time(cputime_t);
129
130 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
account_process_tick(struct task_struct * tsk,int user)131 static inline void account_process_tick(struct task_struct *tsk, int user)
132 {
133 vtime_account_user(tsk);
134 }
135 #else
136 extern void account_process_tick(struct task_struct *, int user);
137 #endif
138
139 extern void account_steal_ticks(unsigned long ticks);
140 extern void account_idle_ticks(unsigned long ticks);
141
142 #endif /* _LINUX_KERNEL_STAT_H */
143