1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 by Ralf Baechle
7 */
8#include <linux/clocksource.h>
9#include <linux/cpufreq.h>
10#include <linux/init.h>
11#include <linux/sched_clock.h>
12
13#include <asm/time.h>
14
15static u64 c0_hpt_read(struct clocksource *cs)
16{
17	return read_c0_count();
18}
19
20static struct clocksource clocksource_mips = {
21	.name		= "MIPS",
22	.read		= c0_hpt_read,
23	.mask		= CLOCKSOURCE_MASK(32),
24	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
25};
26
27static u64 __maybe_unused notrace r4k_read_sched_clock(void)
28{
29	return read_c0_count();
30}
31
32static inline unsigned int rdhwr_count(void)
33{
34	unsigned int count;
35
36	__asm__ __volatile__(
37	"	.set push\n"
38	"	.set mips32r2\n"
39	"	rdhwr	%0, $2\n"
40	"	.set pop\n"
41	: "=r" (count));
42
43	return count;
44}
45
46static bool rdhwr_count_usable(void)
47{
48	unsigned int prev, curr, i;
49
50	/*
51	 * Older QEMUs have a broken implementation of RDHWR for the CP0 count
52	 * which always returns a constant value. Try to identify this and don't
53	 * use it in the VDSO if it is broken. This workaround can be removed
54	 * once the fix has been in QEMU stable for a reasonable amount of time.
55	 */
56	for (i = 0, prev = rdhwr_count(); i < 100; i++) {
57		curr = rdhwr_count();
58
59		if (curr != prev)
60			return true;
61
62		prev = curr;
63	}
64
65	pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
66	return false;
67}
68
69#ifdef CONFIG_CPU_FREQ
70
71static bool __read_mostly r4k_clock_unstable;
72
73static void r4k_clocksource_unstable(char *reason)
74{
75	if (r4k_clock_unstable)
76		return;
77
78	r4k_clock_unstable = true;
79
80	pr_info("R4K timer is unstable due to %s\n", reason);
81
82	clocksource_mark_unstable(&clocksource_mips);
83}
84
85static int r4k_cpufreq_callback(struct notifier_block *nb,
86				unsigned long val, void *data)
87{
88	if (val == CPUFREQ_POSTCHANGE)
89		r4k_clocksource_unstable("CPU frequency change");
90
91	return 0;
92}
93
94static struct notifier_block r4k_cpufreq_notifier = {
95	.notifier_call  = r4k_cpufreq_callback,
96};
97
98static int __init r4k_register_cpufreq_notifier(void)
99{
100	return cpufreq_register_notifier(&r4k_cpufreq_notifier,
101					 CPUFREQ_TRANSITION_NOTIFIER);
102
103}
104core_initcall(r4k_register_cpufreq_notifier);
105
106#endif /* !CONFIG_CPU_FREQ */
107
108int __init init_r4k_clocksource(void)
109{
110	if (!cpu_has_counter || !mips_hpt_frequency)
111		return -ENXIO;
112
113	/* Calculate a somewhat reasonable rating value */
114	clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
115
116	/*
117	 * R2 onwards makes the count accessible to user mode so it can be used
118	 * by the VDSO (HWREna is configured by configure_hwrena()).
119	 */
120	if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
121		clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
122
123	clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
124
125#ifndef CONFIG_CPU_FREQ
126	sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
127#endif
128
129	return 0;
130}
131