1#include <linux/cpumask.h>
2#include <linux/fs.h>
3#include <linux/init.h>
4#include <linux/interrupt.h>
5#include <linux/kernel_stat.h>
6#include <linux/proc_fs.h>
7#include <linux/sched.h>
8#include <linux/seq_file.h>
9#include <linux/slab.h>
10#include <linux/time.h>
11#include <linux/irqnr.h>
12#include <asm/cputime.h>
13
14#ifndef arch_irq_stat_cpu
15#define arch_irq_stat_cpu(cpu) 0
16#endif
17#ifndef arch_irq_stat
18#define arch_irq_stat() 0
19#endif
20#ifndef arch_idle_time
21#define arch_idle_time(cpu) 0
22#endif
23
24static int show_stat(struct seq_file *p, void *v)
25{
26	int i, j;
27	unsigned long jif;
28	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
29	cputime64_t guest, guest_nice;
30	u64 sum = 0;
31	u64 sum_softirq = 0;
32	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
33	struct timespec boottime;
34	unsigned int per_irq_sum;
35
36	user = nice = system = idle = iowait =
37		irq = softirq = steal = cputime64_zero;
38	guest = guest_nice = cputime64_zero;
39	getboottime(&boottime);
40	jif = boottime.tv_sec;
41
42	for_each_possible_cpu(i) {
43		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
44		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
45		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
46		idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
47		idle = cputime64_add(idle, arch_idle_time(i));
48		iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
49		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
50		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
51		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
52		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
53		guest_nice = cputime64_add(guest_nice,
54			kstat_cpu(i).cpustat.guest_nice);
55		for_each_irq_nr(j) {
56			sum += kstat_irqs_cpu(j, i);
57		}
58		sum += arch_irq_stat_cpu(i);
59
60		for (j = 0; j < NR_SOFTIRQS; j++) {
61			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
62
63			per_softirq_sums[j] += softirq_stat;
64			sum_softirq += softirq_stat;
65		}
66	}
67	sum += arch_irq_stat();
68
69	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu %llu "
70		"%llu\n",
71		(unsigned long long)cputime64_to_clock_t(user),
72		(unsigned long long)cputime64_to_clock_t(nice),
73		(unsigned long long)cputime64_to_clock_t(system),
74		(unsigned long long)cputime64_to_clock_t(idle),
75		(unsigned long long)cputime64_to_clock_t(iowait),
76		(unsigned long long)cputime64_to_clock_t(irq),
77		(unsigned long long)cputime64_to_clock_t(softirq),
78		(unsigned long long)cputime64_to_clock_t(steal),
79		(unsigned long long)cputime64_to_clock_t(guest),
80		(unsigned long long)cputime64_to_clock_t(guest_nice));
81	for_each_online_cpu(i) {
82
83		user = kstat_cpu(i).cpustat.user;
84		nice = kstat_cpu(i).cpustat.nice;
85		system = kstat_cpu(i).cpustat.system;
86		idle = kstat_cpu(i).cpustat.idle;
87		idle = cputime64_add(idle, arch_idle_time(i));
88		iowait = kstat_cpu(i).cpustat.iowait;
89		irq = kstat_cpu(i).cpustat.irq;
90		softirq = kstat_cpu(i).cpustat.softirq;
91		steal = kstat_cpu(i).cpustat.steal;
92		guest = kstat_cpu(i).cpustat.guest;
93		guest_nice = kstat_cpu(i).cpustat.guest_nice;
94		seq_printf(p,
95			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
96			"%llu\n",
97			i,
98			(unsigned long long)cputime64_to_clock_t(user),
99			(unsigned long long)cputime64_to_clock_t(nice),
100			(unsigned long long)cputime64_to_clock_t(system),
101			(unsigned long long)cputime64_to_clock_t(idle),
102			(unsigned long long)cputime64_to_clock_t(iowait),
103			(unsigned long long)cputime64_to_clock_t(irq),
104			(unsigned long long)cputime64_to_clock_t(softirq),
105			(unsigned long long)cputime64_to_clock_t(steal),
106			(unsigned long long)cputime64_to_clock_t(guest),
107			(unsigned long long)cputime64_to_clock_t(guest_nice));
108	}
109	seq_printf(p, "intr %llu", (unsigned long long)sum);
110
111	/* sum again ? it could be updated? */
112	for_each_irq_nr(j) {
113		per_irq_sum = 0;
114		for_each_possible_cpu(i)
115			per_irq_sum += kstat_irqs_cpu(j, i);
116
117		seq_printf(p, " %u", per_irq_sum);
118	}
119
120	seq_printf(p,
121		"\nctxt %llu\n"
122		"btime %lu\n"
123		"processes %lu\n"
124		"procs_running %lu\n"
125		"procs_blocked %lu\n",
126		nr_context_switches(),
127		(unsigned long)jif,
128		total_forks,
129		nr_running(),
130		nr_iowait());
131
132	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
133
134	for (i = 0; i < NR_SOFTIRQS; i++)
135		seq_printf(p, " %u", per_softirq_sums[i]);
136	seq_printf(p, "\n");
137
138	return 0;
139}
140
141static int stat_open(struct inode *inode, struct file *file)
142{
143	unsigned size = 4096 * (1 + num_possible_cpus() / 32);
144	char *buf;
145	struct seq_file *m;
146	int res;
147
148	/* don't ask for more than the kmalloc() max size, currently 128 KB */
149	if (size > 128 * 1024)
150		size = 128 * 1024;
151	buf = kmalloc(size, GFP_KERNEL);
152	if (!buf)
153		return -ENOMEM;
154
155	res = single_open(file, show_stat, NULL);
156	if (!res) {
157		m = file->private_data;
158		m->buf = buf;
159		m->size = size;
160	} else
161		kfree(buf);
162	return res;
163}
164
165static const struct file_operations proc_stat_operations = {
166	.open		= stat_open,
167	.read		= seq_read,
168	.llseek		= seq_lseek,
169	.release	= single_release,
170};
171
172static int __init proc_stat_init(void)
173{
174	proc_create("stat", 0, NULL, &proc_stat_operations);
175	return 0;
176}
177module_init(proc_stat_init);
178