1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASMARM_ARCH_TIMER_H
3#define __ASMARM_ARCH_TIMER_H
4
5#include <asm/barrier.h>
6#include <asm/errno.h>
7#include <asm/hwcap.h>
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/io-64-nonatomic-lo-hi.h>
11#include <linux/types.h>
12
13#include <clocksource/arm_arch_timer.h>
14
15#ifdef CONFIG_ARM_ARCH_TIMER
16/* 32bit ARM doesn't know anything about timer errata... */
17#define has_erratum_handler(h)		(false)
18#define erratum_handler(h)		(arch_timer_##h)
19
20int arch_timer_arch_init(void);
21
22/*
23 * These register accessors are marked inline so the compiler can
24 * nicely work out which register we want, and chuck away the rest of
25 * the code. At least it does so with a recent GCC (4.6.3).
26 */
27static __always_inline
28void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
29{
30	if (access == ARCH_TIMER_PHYS_ACCESS) {
31		switch (reg) {
32		case ARCH_TIMER_REG_CTRL:
33			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" ((u32)val));
34			isb();
35			break;
36		case ARCH_TIMER_REG_CVAL:
37			asm volatile("mcrr p15, 2, %Q0, %R0, c14" : : "r" (val));
38			break;
39		default:
40			BUILD_BUG();
41		}
42	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
43		switch (reg) {
44		case ARCH_TIMER_REG_CTRL:
45			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" ((u32)val));
46			isb();
47			break;
48		case ARCH_TIMER_REG_CVAL:
49			asm volatile("mcrr p15, 3, %Q0, %R0, c14" : : "r" (val));
50			break;
51		default:
52			BUILD_BUG();
53		}
54	} else {
55		BUILD_BUG();
56	}
57}
58
59static __always_inline
60u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
61{
62	u32 val = 0;
63
64	if (access == ARCH_TIMER_PHYS_ACCESS) {
65		switch (reg) {
66		case ARCH_TIMER_REG_CTRL:
67			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
68			break;
69		default:
70			BUILD_BUG();
71		}
72	} else if (access == ARCH_TIMER_VIRT_ACCESS) {
73		switch (reg) {
74		case ARCH_TIMER_REG_CTRL:
75			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
76			break;
77		default:
78			BUILD_BUG();
79		}
80	} else {
81		BUILD_BUG();
82	}
83
84	return val;
85}
86
87static inline u32 arch_timer_get_cntfrq(void)
88{
89	u32 val;
90	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
91	return val;
92}
93
94static inline u64 __arch_counter_get_cntpct(void)
95{
96	u64 cval;
97
98	isb();
99	asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
100	return cval;
101}
102
103static inline u64 __arch_counter_get_cntpct_stable(void)
104{
105	return __arch_counter_get_cntpct();
106}
107
108static inline u64 __arch_counter_get_cntvct(void)
109{
110	u64 cval;
111
112	isb();
113	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
114	return cval;
115}
116
117static inline u64 __arch_counter_get_cntvct_stable(void)
118{
119	return __arch_counter_get_cntvct();
120}
121
122static inline u32 arch_timer_get_cntkctl(void)
123{
124	u32 cntkctl;
125	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
126	return cntkctl;
127}
128
129static inline void arch_timer_set_cntkctl(u32 cntkctl)
130{
131	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
132	isb();
133}
134
135static inline void arch_timer_set_evtstrm_feature(void)
136{
137	elf_hwcap |= HWCAP_EVTSTRM;
138}
139
140static inline bool arch_timer_have_evtstrm_feature(void)
141{
142	return elf_hwcap & HWCAP_EVTSTRM;
143}
144#endif
145
146#endif
147