1/*- 2 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
|
3 * Copyright (c) 2016 The FreeBSD Foundation
|
3 * Copyright (c) 2016, 2017 The FreeBSD Foundation |
4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h>
|
32__FBSDID("$FreeBSD: stable/11/lib/libc/x86/sys/__vdso_gettc.c 311376 2017-01-05 07:42:08Z sephe $");
|
32__FBSDID("$FreeBSD: stable/11/lib/libc/x86/sys/__vdso_gettc.c 311927 2017-01-11 11:25:18Z kib $"); |
33 34#include <sys/param.h> 35#include "namespace.h" 36#include <sys/elf.h> 37#include <sys/fcntl.h> 38#include <sys/mman.h> 39#include <sys/time.h> 40#include <sys/vdso.h> 41#include <errno.h> 42#include <string.h> 43#include <unistd.h> 44#include "un-namespace.h"
|
45#include <machine/atomic.h> |
46#include <machine/cpufunc.h> 47#include <machine/specialreg.h> 48#include <dev/acpica/acpi_hpet.h> 49#ifdef __amd64__
|
49#include <machine/atomic.h>
|
50#include <dev/hyperv/hyperv.h> 51#endif 52#include "libc_private.h" 53 54static void 55lfence_mb(void) 56{ 57#if defined(__i386__) 58 static int lfence_works = -1; 59 u_int cpuid_supported, p[4]; 60 61 if (lfence_works == -1) { 62 __asm __volatile( 63 " pushfl\n" 64 " popl %%eax\n" 65 " movl %%eax,%%ecx\n" 66 " xorl $0x200000,%%eax\n" 67 " pushl %%eax\n" 68 " popfl\n" 69 " pushfl\n" 70 " popl %%eax\n" 71 " xorl %%eax,%%ecx\n" 72 " je 1f\n" 73 " movl $1,%0\n" 74 " jmp 2f\n" 75 "1: movl $0,%0\n" 76 "2:\n" 77 : "=r" (cpuid_supported) : : "eax", "ecx", "cc"); 78 if (cpuid_supported) { 79 __asm __volatile( 80 " pushl %%ebx\n" 81 " cpuid\n" 82 " movl %%ebx,%1\n" 83 " popl %%ebx\n" 84 : "=a" (p[0]), "=r" (p[1]), "=c" (p[2]), "=d" (p[3]) 85 : "0" (0x1)); 86 lfence_works = (p[3] & CPUID_SSE2) != 0; 87 } else 88 lfence_works = 0; 89 } 90 if (lfence_works == 1) 91 lfence(); 92#elif defined(__amd64__) 93 lfence(); 94#else 95#error "arch" 96#endif 97} 98 99static u_int 100__vdso_gettc_rdtsc_low(const struct vdso_timehands *th) 101{ 102 u_int rv; 103 104 lfence_mb(); 105 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 106 : "=a" (rv) : "c" (th->th_x86_shift) : "edx"); 107 return (rv); 108} 109 110static u_int 111__vdso_rdtsc32(void) 112{ 113 114 lfence_mb(); 115 return (rdtsc32()); 116} 117
|
118static char *hpet_dev_map = NULL;
119static uint32_t hpet_idx = 0xffffffff;
|
118#define HPET_DEV_MAP_MAX 10 119static volatile char *hpet_dev_map[HPET_DEV_MAP_MAX]; |
120 121static void 122__vdso_init_hpet(uint32_t u) 123{ 124 static const char devprefix[] = "/dev/hpet"; 125 char devname[64], *c, *c1, t;
|
126 volatile char *new_map, *old_map; 127 uint32_t u1; |
128 int fd; 129 130 c1 = c = stpcpy(devname, devprefix);
|
129 u = hpet_idx;
|
131 u1 = u; |
132 do {
|
131 *c++ = u % 10 + '0';
132 u /= 10;
133 } while (u != 0);
|
133 *c++ = u1 % 10 + '0'; 134 u1 /= 10; 135 } while (u1 != 0); |
136 *c = '\0'; 137 for (c--; c1 != c; c1++, c--) { 138 t = *c1; 139 *c1 = *c; 140 *c = t; 141 }
|
142 143 old_map = hpet_dev_map[u]; 144 if (old_map != NULL) 145 return; 146 |
147 fd = _open(devname, O_RDONLY); 148 if (fd == -1) {
|
142 hpet_dev_map = MAP_FAILED;
|
149 atomic_cmpset_rel_ptr((volatile uintptr_t *)&hpet_dev_map[u], 150 (uintptr_t)old_map, (uintptr_t)MAP_FAILED); |
151 return; 152 }
|
145 if (hpet_dev_map != NULL && hpet_dev_map != MAP_FAILED)
146 munmap(hpet_dev_map, PAGE_SIZE);
147 hpet_dev_map = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
|
153 new_map = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0); |
154 _close(fd);
|
155 if (atomic_cmpset_rel_ptr((volatile uintptr_t *)&hpet_dev_map[u], 156 (uintptr_t)old_map, (uintptr_t)new_map) == 0 && 157 new_map != MAP_FAILED) 158 munmap((void *)new_map, PAGE_SIZE); |
159} 160 161#ifdef __amd64__ 162 163#define HYPERV_REFTSC_DEVPATH "/dev/" HYPERV_REFTSC_DEVNAME 164 165/* 166 * NOTE: 167 * We use 'NULL' for this variable to indicate that initialization 168 * is required. And if this variable is 'MAP_FAILED', then Hyper-V 169 * reference TSC can not be used, e.g. in misconfigured jail. 170 */ 171static struct hyperv_reftsc *hyperv_ref_tsc; 172 173static void 174__vdso_init_hyperv_tsc(void) 175{ 176 int fd; 177 178 fd = _open(HYPERV_REFTSC_DEVPATH, O_RDONLY); 179 if (fd < 0) { 180 /* Prevent the caller from re-entering. */ 181 hyperv_ref_tsc = MAP_FAILED; 182 return; 183 } 184 hyperv_ref_tsc = mmap(NULL, sizeof(*hyperv_ref_tsc), PROT_READ, 185 MAP_SHARED, fd, 0); 186 _close(fd); 187} 188 189static int 190__vdso_hyperv_tsc(struct hyperv_reftsc *tsc_ref, u_int *tc) 191{ 192 uint64_t disc, ret, tsc, scale; 193 uint32_t seq; 194 int64_t ofs; 195 196 while ((seq = atomic_load_acq_int(&tsc_ref->tsc_seq)) != 0) { 197 scale = tsc_ref->tsc_scale; 198 ofs = tsc_ref->tsc_ofs; 199 200 lfence_mb(); 201 tsc = rdtsc(); 202 203 /* ret = ((tsc * scale) >> 64) + ofs */ 204 __asm__ __volatile__ ("mulq %3" : 205 "=d" (ret), "=a" (disc) : 206 "a" (tsc), "r" (scale)); 207 ret += ofs; 208 209 atomic_thread_fence_acq(); 210 if (tsc_ref->tsc_seq == seq) { 211 *tc = ret; 212 return (0); 213 } 214 215 /* Sequence changed; re-sync. */ 216 } 217 return (ENOSYS); 218} 219 220#endif /* __amd64__ */ 221 222#pragma weak __vdso_gettc 223int 224__vdso_gettc(const struct vdso_timehands *th, u_int *tc) 225{
|
216 uint32_t tmp;
|
226 volatile char *map; 227 uint32_t idx; |
228 229 switch (th->th_algo) { 230 case VDSO_TH_ALGO_X86_TSC: 231 *tc = th->th_x86_shift > 0 ? __vdso_gettc_rdtsc_low(th) : 232 __vdso_rdtsc32(); 233 return (0); 234 case VDSO_TH_ALGO_X86_HPET:
|
224 tmp = th->th_x86_hpet_idx;
225 if (hpet_dev_map == NULL || tmp != hpet_idx) {
226 hpet_idx = tmp;
227 __vdso_init_hpet(hpet_idx);
|
235 idx = th->th_x86_hpet_idx; 236 if (idx >= HPET_DEV_MAP_MAX) 237 return (ENOSYS); 238 map = (volatile char *)atomic_load_acq_ptr( 239 (volatile uintptr_t *)&hpet_dev_map[idx]); 240 if (map == NULL) { 241 __vdso_init_hpet(idx); 242 map = (volatile char *)atomic_load_acq_ptr( 243 (volatile uintptr_t *)&hpet_dev_map[idx]); |
244 }
|
229 if (hpet_dev_map == MAP_FAILED)
|
245 if (map == MAP_FAILED) |
246 return (ENOSYS);
|
231 *tc = *(volatile uint32_t *)(hpet_dev_map + HPET_MAIN_COUNTER);
|
247 *tc = *(volatile uint32_t *)(map + HPET_MAIN_COUNTER); |
248 return (0); 249#ifdef __amd64__ 250 case VDSO_TH_ALGO_X86_HVTSC: 251 if (hyperv_ref_tsc == NULL) 252 __vdso_init_hyperv_tsc(); 253 if (hyperv_ref_tsc == MAP_FAILED) 254 return (ENOSYS); 255 return (__vdso_hyperv_tsc(hyperv_ref_tsc, tc)); 256#endif 257 default: 258 return (ENOSYS); 259 } 260} 261 262#pragma weak __vdso_gettimekeep 263int 264__vdso_gettimekeep(struct vdso_timekeep **tk) 265{ 266 267 return (_elf_aux_info(AT_TIMEKEEP, tk, sizeof(*tk))); 268}
|