• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/x86/kernel/
1/*  paravirtual clock -- common code used by kvm/xen
2
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License as published by
5    the Free Software Foundation; either version 2 of the License, or
6    (at your option) any later version.
7
8    This program is distributed in the hope that it will be useful,
9    but WITHOUT ANY WARRANTY; without even the implied warranty of
10    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11    GNU General Public License for more details.
12
13    You should have received a copy of the GNU General Public License
14    along with this program; if not, write to the Free Software
15    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16*/
17
18#include <linux/kernel.h>
19#include <linux/percpu.h>
20#include <asm/pvclock.h>
21
22/*
23 * These are perodically updated
24 *    xen: magic shared_info page
25 *    kvm: gpa registered via msr
26 * and then copied here.
27 */
28struct pvclock_shadow_time {
29	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
30	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
31	u32 tsc_to_nsec_mul;
32	int tsc_shift;
33	u32 version;
34	u8  flags;
35};
36
37static u8 valid_flags __read_mostly = 0;
38
39void pvclock_set_flags(u8 flags)
40{
41	valid_flags = flags;
42}
43
44/*
45 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
46 * yielding a 64-bit result.
47 */
48static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
49{
50	u64 product;
51#ifdef __i386__
52	u32 tmp1, tmp2;
53#endif
54
55	if (shift < 0)
56		delta >>= -shift;
57	else
58		delta <<= shift;
59
60#ifdef __i386__
61	__asm__ (
62		"mul  %5       ; "
63		"mov  %4,%%eax ; "
64		"mov  %%edx,%4 ; "
65		"mul  %5       ; "
66		"xor  %5,%5    ; "
67		"add  %4,%%eax ; "
68		"adc  %5,%%edx ; "
69		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
70		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
71#elif defined(__x86_64__)
72	__asm__ (
73		"mul %%rdx ; shrd $32,%%rdx,%%rax"
74		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
75#else
76#error implement me!
77#endif
78
79	return product;
80}
81
82static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
83{
84	u64 delta = native_read_tsc() - shadow->tsc_timestamp;
85	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
86}
87
88/*
89 * Reads a consistent set of time-base values from hypervisor,
90 * into a shadow data area.
91 */
92static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
93					struct pvclock_vcpu_time_info *src)
94{
95	do {
96		dst->version = src->version;
97		rmb();		/* fetch version before data */
98		dst->tsc_timestamp     = src->tsc_timestamp;
99		dst->system_timestamp  = src->system_time;
100		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
101		dst->tsc_shift         = src->tsc_shift;
102		dst->flags             = src->flags;
103		rmb();		/* test version after fetching data */
104	} while ((src->version & 1) || (dst->version != src->version));
105
106	return dst->version;
107}
108
109unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
110{
111	u64 pv_tsc_khz = 1000000ULL << 32;
112
113	do_div(pv_tsc_khz, src->tsc_to_system_mul);
114	if (src->tsc_shift < 0)
115		pv_tsc_khz <<= -src->tsc_shift;
116	else
117		pv_tsc_khz >>= src->tsc_shift;
118	return pv_tsc_khz;
119}
120
121static atomic64_t last_value = ATOMIC64_INIT(0);
122
123cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
124{
125	struct pvclock_shadow_time shadow;
126	unsigned version;
127	cycle_t ret, offset;
128	u64 last;
129
130	do {
131		version = pvclock_get_time_values(&shadow, src);
132		barrier();
133		offset = pvclock_get_nsec_offset(&shadow);
134		ret = shadow.system_timestamp + offset;
135		barrier();
136	} while (version != src->version);
137
138	if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
139		(shadow.flags & PVCLOCK_TSC_STABLE_BIT))
140		return ret;
141
142	/*
143	 * Assumption here is that last_value, a global accumulator, always goes
144	 * forward. If we are less than that, we should not be much smaller.
145	 * We assume there is an error marging we're inside, and then the correction
146	 * does not sacrifice accuracy.
147	 *
148	 * For reads: global may have changed between test and return,
149	 * but this means someone else updated poked the clock at a later time.
150	 * We just need to make sure we are not seeing a backwards event.
151	 *
152	 * For updates: last_value = ret is not enough, since two vcpus could be
153	 * updating at the same time, and one of them could be slightly behind,
154	 * making the assumption that last_value always go forward fail to hold.
155	 */
156	last = atomic64_read(&last_value);
157	do {
158		if (ret < last)
159			return last;
160		last = atomic64_cmpxchg(&last_value, last, ret);
161	} while (unlikely(last != ret));
162
163	return ret;
164}
165
166void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
167			    struct pvclock_vcpu_time_info *vcpu_time,
168			    struct timespec *ts)
169{
170	u32 version;
171	u64 delta;
172	struct timespec now;
173
174	/* get wallclock at system boot */
175	do {
176		version = wall_clock->version;
177		rmb();		/* fetch version before time */
178		now.tv_sec  = wall_clock->sec;
179		now.tv_nsec = wall_clock->nsec;
180		rmb();		/* fetch time before checking version */
181	} while ((wall_clock->version & 1) || (version != wall_clock->version));
182
183	delta = pvclock_clocksource_read(vcpu_time);	/* time since system boot */
184	delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
185
186	now.tv_nsec = do_div(delta, NSEC_PER_SEC);
187	now.tv_sec = delta;
188
189	set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
190}
191