1255040Sgibbs/*****************************************************************************
2255040Sgibbs * i386/xen/xen-os.h
3181638Skmacy *
4255040Sgibbs * Random collection of macros and definition
5255040Sgibbs *
6255040Sgibbs * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
7255040Sgibbs * All rights reserved.
8255040Sgibbs *
9255040Sgibbs * Permission is hereby granted, free of charge, to any person obtaining a copy
10255040Sgibbs * of this software and associated documentation files (the "Software"), to
11255040Sgibbs * deal in the Software without restriction, including without limitation the
12255040Sgibbs * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
13255040Sgibbs * sell copies of the Software, and to permit persons to whom the Software is
14255040Sgibbs * furnished to do so, subject to the following conditions:
15255040Sgibbs *
16255040Sgibbs * The above copyright notice and this permission notice shall be included in
17255040Sgibbs * all copies or substantial portions of the Software.
18255040Sgibbs *
19255040Sgibbs * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20255040Sgibbs * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21255040Sgibbs * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22255040Sgibbs * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23255040Sgibbs * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24255040Sgibbs * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25255040Sgibbs * DEALINGS IN THE SOFTWARE.
26255040Sgibbs *
27255040Sgibbs * $FreeBSD$
28181638Skmacy */
29181638Skmacy
30255040Sgibbs#ifndef _MACHINE_XEN_XEN_OS_H_
31255040Sgibbs#define _MACHINE_XEN_XEN_OS_H_
32181747Skmacy
33181638Skmacy#ifdef PAE
34181638Skmacy#define CONFIG_X86_PAE
35181638Skmacy#endif
36181638Skmacy
37251767Sgibbs/* Everything below this point is not included by assembler (.S) files. */
38251767Sgibbs#ifndef __ASSEMBLY__
39251767Sgibbs
40255040Sgibbs/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
41255040Sgibbsstatic inline void rep_nop(void)
42255040Sgibbs{
43255040Sgibbs    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
44255040Sgibbs}
45255040Sgibbs#define cpu_relax() rep_nop()
46181747Skmacy
47255040Sgibbs#ifndef XENHVM
48255040Sgibbsvoid xc_printf(const char *fmt, ...);
49181747Skmacy
50255040Sgibbs#ifdef SMP
51197108Skmacyextern int gdtset;
52255040Sgibbs
53181638Skmacy#include <sys/time.h> /* XXX for pcpu.h */
54181638Skmacy#include <sys/pcpu.h> /* XXX for PCPU_GET */
55181638Skmacystatic inline int
56181638Skmacysmp_processor_id(void)
57181638Skmacy{
58255040Sgibbs    if (__predict_true(gdtset))
59181638Skmacy	return PCPU_GET(cpuid);
60181638Skmacy    return 0;
61181638Skmacy}
62181638Skmacy
63181638Skmacy#else
64181638Skmacy#define smp_processor_id() 0
65181638Skmacy#endif
66181638Skmacy
67181638Skmacy#ifndef PANIC_IF
68255040Sgibbs#define PANIC_IF(exp) if (__predict_false(exp)) {printf("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
69181638Skmacy#endif
70181638Skmacy
71255040Sgibbs/*
72255040Sgibbs * Crude memory allocator for memory allocation early in boot.
73181638Skmacy */
74181638Skmacyvoid *bootmem_alloc(unsigned int size);
75181638Skmacyvoid bootmem_free(void *ptr, unsigned int size);
76181638Skmacy
77181638Skmacy/*
78181638Skmacy * STI/CLI equivalents. These basically set and clear the virtual
79240520Seadler * event_enable flag in the shared_info structure. Note that when
80181638Skmacy * the enable bit is set, there may be pending events to be handled.
81181638Skmacy * We may therefore call into do_hypervisor_callback() directly.
82181638Skmacy */
83181638Skmacy
84181638Skmacy#define __cli()                                                         \
85181638Skmacydo {                                                                    \
86181638Skmacy        vcpu_info_t *_vcpu;                                             \
87181638Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
88181638Skmacy        _vcpu->evtchn_upcall_mask = 1;                                  \
89181638Skmacy        barrier();                                                      \
90181638Skmacy} while (0)
91181638Skmacy
92181638Skmacy#define __sti()                                                         \
93181638Skmacydo {                                                                    \
94181638Skmacy        vcpu_info_t *_vcpu;                                             \
95181638Skmacy        barrier();                                                      \
96181638Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
97181638Skmacy        _vcpu->evtchn_upcall_mask = 0;                                  \
98181638Skmacy        barrier(); /* unmask then check (avoid races) */                \
99255040Sgibbs        if (__predict_false(_vcpu->evtchn_upcall_pending))              \
100181638Skmacy                force_evtchn_callback();                                \
101181638Skmacy} while (0)
102181638Skmacy
103181638Skmacy#define __restore_flags(x)                                              \
104181638Skmacydo {                                                                    \
105181638Skmacy        vcpu_info_t *_vcpu;                                             \
106181638Skmacy        barrier();                                                      \
107181638Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
108181638Skmacy        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
109181638Skmacy                barrier(); /* unmask then check (avoid races) */        \
110255040Sgibbs                if (__predict_false(_vcpu->evtchn_upcall_pending))      \
111181638Skmacy                        force_evtchn_callback();                        \
112184110Skmacy        } 								\
113181638Skmacy} while (0)
114181638Skmacy
115181638Skmacy/*
116181638Skmacy * Add critical_{enter, exit}?
117181638Skmacy *
118181638Skmacy */
119181638Skmacy#define __save_and_cli(x)                                               \
120181638Skmacydo {                                                                    \
121181638Skmacy        vcpu_info_t *_vcpu;                                             \
122181638Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
123181638Skmacy        (x) = _vcpu->evtchn_upcall_mask;                                \
124181638Skmacy        _vcpu->evtchn_upcall_mask = 1;                                  \
125181638Skmacy        barrier();                                                      \
126181638Skmacy} while (0)
127181638Skmacy
128181638Skmacy
129181638Skmacy#define cli() __cli()
130181638Skmacy#define sti() __sti()
131181638Skmacy#define save_flags(x) __save_flags(x)
132181638Skmacy#define restore_flags(x) __restore_flags(x)
133181638Skmacy#define save_and_cli(x) __save_and_cli(x)
134181638Skmacy
135181638Skmacy#define local_irq_save(x)       __save_and_cli(x)
136181638Skmacy#define local_irq_restore(x)    __restore_flags(x)
137181638Skmacy#define local_irq_disable()     __cli()
138181638Skmacy#define local_irq_enable()      __sti()
139181638Skmacy
140181638Skmacy#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
141181638Skmacy#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
142181638Skmacy#define spin_lock_irqsave mtx_lock_irqsave
143181638Skmacy#define spin_unlock_irqrestore mtx_unlock_irqrestore
144181638Skmacy
145255040Sgibbs#endif /* !XENHVM */
146181638Skmacy
147181638Skmacy/* This is a barrier for the compiler only, NOT the processor! */
148181638Skmacy#define barrier() __asm__ __volatile__("": : :"memory")
149181638Skmacy
150181638Skmacy#define LOCK_PREFIX ""
151181638Skmacy#define LOCK ""
152181638Skmacy#define ADDR (*(volatile long *) addr)
153181638Skmacy/*
154181638Skmacy * Make sure gcc doesn't try to be clever and move things around
155181638Skmacy * on us. We need to use _exactly_ the address the user gave us,
156181638Skmacy * not some alias that contains the same information.
157181638Skmacy */
158181638Skmacytypedef struct { volatile int counter; } atomic_t;
159181638Skmacy
160181638Skmacy#define xen_xchg(ptr,v) \
161181638Skmacy        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
162181638Skmacystruct __xchg_dummy { unsigned long a[100]; };
163181638Skmacy#define __xg(x) ((volatile struct __xchg_dummy *)(x))
164181638Skmacystatic __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
165181638Skmacy                                   int size)
166181638Skmacy{
167181638Skmacy    switch (size) {
168181638Skmacy    case 1:
169181638Skmacy        __asm__ __volatile__("xchgb %b0,%1"
170181638Skmacy                             :"=q" (x)
171181638Skmacy                             :"m" (*__xg(ptr)), "0" (x)
172181638Skmacy                             :"memory");
173181638Skmacy        break;
174181638Skmacy    case 2:
175181638Skmacy        __asm__ __volatile__("xchgw %w0,%1"
176181638Skmacy                             :"=r" (x)
177181638Skmacy                             :"m" (*__xg(ptr)), "0" (x)
178181638Skmacy                             :"memory");
179181638Skmacy        break;
180181638Skmacy    case 4:
181181638Skmacy        __asm__ __volatile__("xchgl %0,%1"
182181638Skmacy                             :"=r" (x)
183181638Skmacy                             :"m" (*__xg(ptr)), "0" (x)
184181638Skmacy                             :"memory");
185181638Skmacy        break;
186181638Skmacy    }
187181638Skmacy    return x;
188181638Skmacy}
189181638Skmacy
190181638Skmacy/**
191181638Skmacy * test_and_clear_bit - Clear a bit and return its old value
192181638Skmacy * @nr: Bit to set
193181638Skmacy * @addr: Address to count from
194181638Skmacy *
195181638Skmacy * This operation is atomic and cannot be reordered.
196181638Skmacy * It also implies a memory barrier.
197181638Skmacy */
198181638Skmacystatic __inline int test_and_clear_bit(int nr, volatile void * addr)
199181638Skmacy{
200181638Skmacy        int oldbit;
201181638Skmacy
202181638Skmacy        __asm__ __volatile__( LOCK_PREFIX
203181638Skmacy                "btrl %2,%1\n\tsbbl %0,%0"
204181638Skmacy                :"=r" (oldbit),"=m" (ADDR)
205181638Skmacy                :"Ir" (nr) : "memory");
206181638Skmacy        return oldbit;
207181638Skmacy}
208181638Skmacy
209181638Skmacystatic __inline int constant_test_bit(int nr, const volatile void * addr)
210181638Skmacy{
211181638Skmacy    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
212181638Skmacy}
213181638Skmacy
214181638Skmacystatic __inline int variable_test_bit(int nr, volatile void * addr)
215181638Skmacy{
216181638Skmacy    int oldbit;
217181638Skmacy
218181638Skmacy    __asm__ __volatile__(
219181638Skmacy        "btl %2,%1\n\tsbbl %0,%0"
220181638Skmacy        :"=r" (oldbit)
221181638Skmacy        :"m" (ADDR),"Ir" (nr));
222181638Skmacy    return oldbit;
223181638Skmacy}
224181638Skmacy
225181638Skmacy#define test_bit(nr,addr) \
226181638Skmacy(__builtin_constant_p(nr) ? \
227181638Skmacy constant_test_bit((nr),(addr)) : \
228181638Skmacy variable_test_bit((nr),(addr)))
229181638Skmacy
230181638Skmacy
231181638Skmacy/**
232181638Skmacy * set_bit - Atomically set a bit in memory
233181638Skmacy * @nr: the bit to set
234181638Skmacy * @addr: the address to start counting from
235181638Skmacy *
236181638Skmacy * This function is atomic and may not be reordered.  See __set_bit()
237181638Skmacy * if you do not require the atomic guarantees.
238181638Skmacy * Note that @nr may be almost arbitrarily large; this function is not
239181638Skmacy * restricted to acting on a single-word quantity.
240181638Skmacy */
241181638Skmacystatic __inline__ void set_bit(int nr, volatile void * addr)
242181638Skmacy{
243181638Skmacy        __asm__ __volatile__( LOCK_PREFIX
244181638Skmacy                "btsl %1,%0"
245181638Skmacy                :"=m" (ADDR)
246181638Skmacy                :"Ir" (nr));
247181638Skmacy}
248181638Skmacy
249181638Skmacy/**
250181638Skmacy * clear_bit - Clears a bit in memory
251181638Skmacy * @nr: Bit to clear
252181638Skmacy * @addr: Address to start counting from
253181638Skmacy *
254181638Skmacy * clear_bit() is atomic and may not be reordered.  However, it does
255181638Skmacy * not contain a memory barrier, so if it is used for locking purposes,
256181638Skmacy * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
257181638Skmacy * in order to ensure changes are visible on other processors.
258181638Skmacy */
259181638Skmacystatic __inline__ void clear_bit(int nr, volatile void * addr)
260181638Skmacy{
261181638Skmacy        __asm__ __volatile__( LOCK_PREFIX
262181638Skmacy                "btrl %1,%0"
263181638Skmacy                :"=m" (ADDR)
264181638Skmacy                :"Ir" (nr));
265181638Skmacy}
266181638Skmacy
267181638Skmacy/**
268181638Skmacy * atomic_inc - increment atomic variable
269181638Skmacy * @v: pointer of type atomic_t
270181638Skmacy *
271181638Skmacy * Atomically increments @v by 1.  Note that the guaranteed
272181638Skmacy * useful range of an atomic_t is only 24 bits.
273181638Skmacy */
274181638Skmacystatic __inline__ void atomic_inc(atomic_t *v)
275181638Skmacy{
276181638Skmacy        __asm__ __volatile__(
277181638Skmacy                LOCK "incl %0"
278181638Skmacy                :"=m" (v->counter)
279181638Skmacy                :"m" (v->counter));
280181638Skmacy}
281181638Skmacy
282181638Skmacy
283181638Skmacy#define rdtscll(val) \
284181638Skmacy     __asm__ __volatile__("rdtsc" : "=A" (val))
285181638Skmacy
286181638Skmacy#endif /* !__ASSEMBLY__ */
287181638Skmacy
288255040Sgibbs#endif /* _MACHINE_XEN_XEN_OS_H_ */
289