1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KERNEL_STAT_H
3#define _LINUX_KERNEL_STAT_H
4
5#include <linux/smp.h>
6#include <linux/threads.h>
7#include <linux/percpu.h>
8#include <linux/cpumask.h>
9#include <linux/interrupt.h>
10#include <linux/sched.h>
11#include <linux/vtime.h>
12#include <asm/irq.h>
13
14/*
15 * 'kernel_stat.h' contains the definitions needed for doing
16 * some kernel statistics (CPU usage, context switches ...),
17 * used by rstatd/perfmeter
18 */
19
20enum cpu_usage_stat {
21	CPUTIME_USER,
22	CPUTIME_NICE,
23	CPUTIME_SYSTEM,
24	CPUTIME_SOFTIRQ,
25	CPUTIME_IRQ,
26	CPUTIME_IDLE,
27	CPUTIME_IOWAIT,
28	CPUTIME_STEAL,
29	CPUTIME_GUEST,
30	CPUTIME_GUEST_NICE,
31#ifdef CONFIG_SCHED_CORE
32	CPUTIME_FORCEIDLE,
33#endif
34	NR_STATS,
35};
36
37struct kernel_cpustat {
38	u64 cpustat[NR_STATS];
39};
40
41struct kernel_stat {
42	unsigned long irqs_sum;
43	unsigned int softirqs[NR_SOFTIRQS];
44};
45
46DECLARE_PER_CPU(struct kernel_stat, kstat);
47DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
48
49/* Must have preemption disabled for this to be meaningful. */
50#define kstat_this_cpu this_cpu_ptr(&kstat)
51#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
52#define kstat_cpu(cpu) per_cpu(kstat, cpu)
53#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
54
55extern unsigned long long nr_context_switches_cpu(int cpu);
56extern unsigned long long nr_context_switches(void);
57
58extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
59extern void kstat_incr_irq_this_cpu(unsigned int irq);
60
61static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
62{
63	__this_cpu_inc(kstat.softirqs[irq]);
64}
65
66static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
67{
68       return kstat_cpu(cpu).softirqs[irq];
69}
70
71static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
72{
73	int i;
74	unsigned int sum = 0;
75
76	for (i = 0; i < NR_SOFTIRQS; i++)
77		sum += kstat_softirqs_cpu(i, cpu);
78
79	return sum;
80}
81
82/*
83 * Number of interrupts per specific IRQ source, since bootup
84 */
85extern unsigned int kstat_irqs_usr(unsigned int irq);
86
87/*
88 * Number of interrupts per cpu, since bootup
89 */
90static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
91{
92	return kstat_cpu(cpu).irqs_sum;
93}
94
95#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
96extern u64 kcpustat_field(struct kernel_cpustat *kcpustat,
97			  enum cpu_usage_stat usage, int cpu);
98extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
99#else
100static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat,
101				 enum cpu_usage_stat usage, int cpu)
102{
103	return kcpustat->cpustat[usage];
104}
105
106static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
107{
108	*dst = kcpustat_cpu(cpu);
109}
110
111#endif
112
113extern void account_user_time(struct task_struct *, u64);
114extern void account_guest_time(struct task_struct *, u64);
115extern void account_system_time(struct task_struct *, int, u64);
116extern void account_system_index_time(struct task_struct *, u64,
117				      enum cpu_usage_stat);
118extern void account_steal_time(u64);
119extern void account_idle_time(u64);
120extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
121
122#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
123static inline void account_process_tick(struct task_struct *tsk, int user)
124{
125	vtime_flush(tsk);
126}
127#else
128extern void account_process_tick(struct task_struct *, int user);
129#endif
130
131extern void account_idle_ticks(unsigned long ticks);
132
133#ifdef CONFIG_SCHED_CORE
134extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
135#endif
136
137#endif /* _LINUX_KERNEL_STAT_H */
138