pvclock.c revision 278183
1/*-
2 * Copyright (c) 2009 Adrian Chadd
3 * Copyright (c) 2012 Spectra Logic Corporation
4 * Copyright (c) 2014 Bryan Venteicher
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/x86/x86/pvclock.c 278183 2015-02-04 08:26:43Z bryanv $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/proc.h>
35
36#include <machine/cpufunc.h>
37#include <machine/cpu.h>
38#include <machine/atomic.h>
39#include <machine/pvclock.h>
40
41/*
42 * Last time; this guarantees a monotonically increasing clock for when
43 * a stable TSC is not provided.
44 */
45static volatile uint64_t pvclock_last_cycles;
46
47void
48pvclock_resume(void)
49{
50
51	atomic_store_rel_64(&pvclock_last_cycles, 0);
52}
53
54uint64_t
55pvclock_get_last_cycles(void)
56{
57
58	return (atomic_load_acq_64(&pvclock_last_cycles));
59}
60
61/*
62 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
63 * yielding a 64-bit result.
64 */
65static inline uint64_t
66pvclock_scale_delta(uint64_t delta, uint32_t mul_frac, int shift)
67{
68	uint64_t product;
69
70	if (shift < 0)
71		delta >>= -shift;
72	else
73		delta <<= shift;
74
75#if defined(__i386__)
76	{
77		uint32_t tmp1, tmp2;
78
79		/**
80		 * For i386, the formula looks like:
81		 *
82		 *   lower = (mul_frac * (delta & UINT_MAX)) >> 32
83		 *   upper = mul_frac * (delta >> 32)
84		 *   product = lower + upper
85		 */
86		__asm__ (
87			"mul  %5       ; "
88			"mov  %4,%%eax ; "
89			"mov  %%edx,%4 ; "
90			"mul  %5       ; "
91			"xor  %5,%5    ; "
92			"add  %4,%%eax ; "
93			"adc  %5,%%edx ; "
94			: "=A" (product), "=r" (tmp1), "=r" (tmp2)
95			: "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)),
96			  "2" (mul_frac) );
97	}
98#elif defined(__amd64__)
99	{
100		unsigned long tmp;
101
102		__asm__ (
103			"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
104			: [lo]"=a" (product), [hi]"=d" (tmp)
105			: "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac));
106	}
107#else
108#error "pvclock: unsupported x86 architecture?"
109#endif
110
111	return (product);
112}
113
114static uint64_t
115pvclock_get_nsec_offset(struct pvclock_vcpu_time_info *ti)
116{
117	uint64_t delta;
118
119	delta = rdtsc() - ti->tsc_timestamp;
120
121	return (pvclock_scale_delta(delta, ti->tsc_to_system_mul,
122	    ti->tsc_shift));
123}
124
125static void
126pvclock_read_time_info(struct pvclock_vcpu_time_info *ti,
127    uint64_t *cycles, uint8_t *flags)
128{
129	uint32_t version;
130
131	do {
132		version = ti->version;
133		rmb();
134		*cycles = ti->system_time + pvclock_get_nsec_offset(ti);
135		*flags = ti->flags;
136		rmb();
137	} while ((ti->version & 1) != 0 || ti->version != version);
138}
139
140static void
141pvclock_read_wall_clock(struct pvclock_wall_clock *wc, uint32_t *sec,
142    uint32_t *nsec)
143{
144	uint32_t version;
145
146	do {
147		version = wc->version;
148		rmb();
149		*sec = wc->sec;
150		*nsec = wc->nsec;
151		rmb();
152	} while ((wc->version & 1) != 0 || wc->version != version);
153}
154
155uint64_t
156pvclock_get_timecount(struct pvclock_vcpu_time_info *ti)
157{
158	uint64_t now, last;
159	uint8_t flags;
160
161	pvclock_read_time_info(ti, &now, &flags);
162
163	if (flags & PVCLOCK_FLAG_TSC_STABLE)
164		return (now);
165
166	/*
167	 * Enforce a monotonically increasing clock time across all VCPUs.
168	 * If our time is too old, use the last time and return. Otherwise,
169	 * try to update the last time.
170	 */
171	do {
172		last = atomic_load_acq_64(&pvclock_last_cycles);
173		if (last > now)
174			return (last);
175	} while (!atomic_cmpset_64(&pvclock_last_cycles, last, now));
176
177	return (now);
178}
179
180void
181pvclock_get_wallclock(struct pvclock_wall_clock *wc, struct timespec *ts)
182{
183	uint32_t sec, nsec;
184
185	pvclock_read_wall_clock(wc, &sec, &nsec);
186	ts->tv_sec = sec;
187	ts->tv_nsec = nsec;
188}
189