1/**
2 * \file
3 * \brief Some arch specific asm inlines
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#ifndef ARCH_X86_BARRELFISH_KPI_X86_H
16#define ARCH_X86_BARRELFISH_KPI_X86_H
17
18#include <machine/param.h>
19
20#ifndef __ASSEMBLER__
21
22/** \brief This code reads the cycle counter */
23static inline uint64_t rdtsc(void)
24{
25    uint32_t eax, edx;
26    __asm volatile ("rdtsc" : "=a" (eax), "=d" (edx) :: "memory");
27    return ((uint64_t)edx << 32) | eax;
28}
29
30#ifndef __k1om__
31/** \brief This code reads the cycle counter -- flushing the
32    instruction pipeline first. Throws away the processor ID information. */
33static inline uint64_t rdtscp(void)
34{
35    uint32_t eax, edx;
36    // why is "ecx" in clobber list here, anyway? -SG&MH,2017-10-05
37    __asm volatile ("rdtscp" : "=a" (eax), "=d" (edx) :: "ecx", "memory");
38    return ((uint64_t)edx << 32) | eax;
39}
40#else
41static inline uint64_t rdtscp(void)
42{
43    /* K1OM does not support rdtscp */
44    return rdtsc();
45}
46#endif
47
48static inline uint64_t rdpmc(uint32_t counter)
49{
50    uint32_t eax, edx;
51
52    __asm volatile("rdpmc"
53                   : "=a" (eax), "=d" (edx)
54                   : "c" (counter)
55                   );
56
57    return ((uint64_t)edx << 32) | eax;
58}
59
60static inline void mfence(void)
61{
62    __asm volatile("mfence");
63}
64
65static inline void sfence(void)
66{
67    __asm volatile("sfence");
68}
69
70static inline void lfence(void)
71{
72    __asm volatile("lfence");
73}
74
75static inline void clflush(void *line)
76{
77    __asm volatile("clflush %0" :: "m" (line));
78}
79
80#ifndef __cplusplus
81/* flush a range of memory from the cache */
82static inline void cache_flush_range(void *base, size_t len)
83{
84    //mfence();
85
86    uint8_t *line = (uint8_t *)((uintptr_t)base & ~(CACHE_LINE_SIZE-1UL));
87    do {
88        clflush(line);
89        line += CACHE_LINE_SIZE;
90    } while (line < (uint8_t *)base + len);
91}
92#endif
93
94#endif // __ASSEMBLER__
95
96#endif // ARCH_X86_BARRELFISH_KPI_X86_H
97