Deleted Added
sdiff udiff text old ( 133756 ) new ( 133801 )
full compact
1/*-
2 * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Neither the name of the author nor the names of its contributors
12 * may be used to endorse or promote products derived from this software
13 * without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/lib/libkse/arch/amd64/include/pthread_md.h 133756 2004-08-15 16:28:05Z dfr $
28 */
29/*
30 * Machine-dependent thread prototypes/definitions for the thread kernel.
31 */
32#ifndef _PTHREAD_MD_H_
33#define _PTHREAD_MD_H_
34
35#include <sys/types.h>
36#include <sys/kse.h>
37#include <machine/sysarch.h>
38#include <ucontext.h>
39
40#define KSE_STACKSIZE 16384
41
42#define THR_GETCONTEXT(ucp) \
43 (void)_amd64_save_context(&(ucp)->uc_mcontext)
44#define THR_SETCONTEXT(ucp) \
45 (void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL)
46
47#define PER_KSE
48#undef PER_THREAD
49
50struct kse;
51struct pthread;
52struct tdv;
53
54/*
55 * %fs points to a struct kcb.
56 */
57struct kcb {
58 struct tcb *kcb_curtcb;
59 struct kcb *kcb_self; /* self reference */
60 struct kse *kcb_kse;
61 struct kse_mailbox kcb_kmbx;
62};
63
64struct tcb {
65 struct tcb *tcb_self; /* required by rtld */
66 void *tcb_dtv; /* required by rtld */
67 struct pthread *tcb_thread;
68 void *tcb_spare[1]; /* align tcb_tmbx to 16 bytes */
69 struct kse_thr_mailbox tcb_tmbx;
70};
71
72/*
73 * Evaluates to the byte offset of the per-kse variable name.
74 */
75#define __kcb_offset(name) __offsetof(struct kcb, name)
76
77/*
78 * Evaluates to the type of the per-kse variable name.
79 */
80#define __kcb_type(name) __typeof(((struct kcb *)0)->name)
81
82/*
83 * Evaluates to the value of the per-kse variable name.
84 */
85#define KCB_GET64(name) ({ \
86 __kcb_type(name) __result; \
87 \
88 u_long __i; \
89 __asm __volatile("movq %%fs:%1, %0" \
90 : "=r" (__i) \
91 : "m" (*(u_long *)(__kcb_offset(name)))); \
92 __result = *(__kcb_type(name) *)&__i; \
93 \
94 __result; \
95})
96
97/*
98 * Sets the value of the per-kse variable name to value val.
99 */
100#define KCB_SET64(name, val) ({ \
101 __kcb_type(name) __val = (val); \
102 \
103 u_long __i; \
104 __i = *(u_long *)&__val; \
105 __asm __volatile("movq %1,%%fs:%0" \
106 : "=m" (*(u_long *)(__kcb_offset(name))) \
107 : "r" (__i)); \
108})
109
110static __inline u_long
111__kcb_readandclear64(volatile u_long *addr)
112{
113 u_long result;
114
115 __asm __volatile (
116 " xorq %0, %0;"
117 " xchgq %%fs:%1, %0;"
118 "# __kcb_readandclear64"
119 : "=&r" (result)
120 : "m" (*addr));
121 return (result);
122}
123
124#define KCB_READANDCLEAR64(name) ({ \
125 __kcb_type(name) __result; \
126 \
127 __result = (__kcb_type(name)) \
128 __kcb_readandclear64((u_long *)__kcb_offset(name)); \
129 __result; \
130})
131
132
133#define _kcb_curkcb() KCB_GET64(kcb_self)
134#define _kcb_curtcb() KCB_GET64(kcb_curtcb)
135#define _kcb_curkse() ((struct kse *)KCB_GET64(kcb_kmbx.km_udata))
136#define _kcb_get_tmbx() KCB_GET64(kcb_kmbx.km_curthread)
137#define _kcb_set_tmbx(value) KCB_SET64(kcb_kmbx.km_curthread, (void *)value)
138#define _kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread)
139
140/*
141 * The constructors.
142 */
143struct tcb *_tcb_ctor(struct pthread *, int);
144void _tcb_dtor(struct tcb *tcb);
145struct kcb *_kcb_ctor(struct kse *);
146void _kcb_dtor(struct kcb *);
147
148/* Called from the KSE to set its private data. */
149static __inline void
150_kcb_set(struct kcb *kcb)
151{
152 amd64_set_fsbase(kcb);
153}
154
155/* Get the current kcb. */
156static __inline struct kcb *
157_kcb_get(void)
158{
159 return (_kcb_curkcb());
160}
161
162static __inline struct kse_thr_mailbox *
163_kcb_critical_enter(void)
164{
165 struct kse_thr_mailbox *crit;
166
167 crit = _kcb_readandclear_tmbx();
168 return (crit);
169}
170
171static __inline void
172_kcb_critical_leave(struct kse_thr_mailbox *crit)
173{
174 _kcb_set_tmbx(crit);
175}
176
177static __inline int
178_kcb_in_critical(void)
179{
180 return (_kcb_get_tmbx() == NULL);
181}
182
183static __inline void
184_tcb_set(struct kcb *kcb, struct tcb *tcb)
185{
186 kcb->kcb_curtcb = tcb;
187}
188
189static __inline struct tcb *
190_tcb_get(void)
191{
192 return (_kcb_curtcb());
193}
194
195static __inline struct pthread *
196_get_curthread(void)
197{
198 struct tcb *tcb;
199
200 tcb = _kcb_curtcb();
201 if (tcb != NULL)
202 return (tcb->tcb_thread);
203 else
204 return (NULL);
205}
206
207static __inline struct kse *
208_get_curkse(void)
209{
210 return ((struct kse *)_kcb_curkse());
211}
212
213void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
214 size_t stacksz);
215int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
216int _amd64_save_context(mcontext_t *mc);
217
218static __inline int
219_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
220{
221 int ret;
222
223 ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext);
224 if (ret == 0) {
225 _amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
226 kcb->kcb_kmbx.km_stack.ss_sp,
227 kcb->kcb_kmbx.km_stack.ss_size);
228 /* We should not reach here. */
229 return (-1);
230 }
231 else if (ret < 0)
232 return (-1);
233 return (0);
234}
235
236static __inline int
237_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
238{
239 extern int _libkse_debug;
240
241 if ((kcb == NULL) || (tcb == NULL))
242 return (-1);
243 kcb->kcb_curtcb = tcb;
244
245 if (_libkse_debug == 0) {
246 tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
247 if (setmbox != 0)
248 _amd64_restore_context(
249 &tcb->tcb_tmbx.tm_context.uc_mcontext,
250 (intptr_t)&tcb->tcb_tmbx,
251 (intptr_t *)&kcb->kcb_kmbx.km_curthread);
252 else
253 _amd64_restore_context(
254 &tcb->tcb_tmbx.tm_context.uc_mcontext,
255 0, NULL);
256 /* We should not reach here. */
257 } else {
258 if (setmbox)
259 kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
260 else
261 kse_switchin(&tcb->tcb_tmbx, 0);
262 }
263
264 return (-1);
265}
266#endif