1/*-
2 * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Neither the name of the author nor the names of its contributors
12 *    may be used to endorse or promote products derived from this software
13 *    without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/10.2/lib/libkse/arch/amd64/include/pthread_md.h 174112 2007-11-30 17:20:29Z deischen $
28 */
29/*
30 * Machine-dependent thread prototypes/definitions for the thread kernel.
31 */
32#ifndef _PTHREAD_MD_H_
33#define	_PTHREAD_MD_H_
34
35#include <stddef.h>
36#include <sys/types.h>
37#include <sys/kse.h>
38#include <machine/sysarch.h>
39#include <ucontext.h>
40
41#define	KSE_STACKSIZE		16384
42#define	DTV_OFFSET		offsetof(struct tcb, tcb_dtv)
43
44#define	THR_GETCONTEXT(ucp)	\
45	(void)_amd64_save_context(&(ucp)->uc_mcontext)
46#define	THR_SETCONTEXT(ucp)	\
47	(void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL)
48
49#define	PER_KSE
50#undef	PER_THREAD
51
52struct kse;
53struct pthread;
54struct tdv;
55
56/*
57 * %fs points to a struct kcb.
58 */
59struct kcb {
60	struct tcb		*kcb_curtcb;
61	struct kcb		*kcb_self;	/* self reference */
62	struct kse		*kcb_kse;
63	struct kse_mailbox	kcb_kmbx;
64};
65
66struct tcb {
67	struct tcb		*tcb_self;	/* required by rtld */
68	void			*tcb_dtv;	/* required by rtld */
69	struct pthread		*tcb_thread;
70	void			*tcb_spare[1];	/* align tcb_tmbx to 16 bytes */
71	struct kse_thr_mailbox	tcb_tmbx;
72};
73
74/*
75 * Evaluates to the byte offset of the per-kse variable name.
76 */
77#define	__kcb_offset(name)	__offsetof(struct kcb, name)
78
79/*
80 * Evaluates to the type of the per-kse variable name.
81 */
82#define	__kcb_type(name)	__typeof(((struct kcb *)0)->name)
83
84/*
85 * Evaluates to the value of the per-kse variable name.
86 */
87#define	KCB_GET64(name) ({					\
88	__kcb_type(name) __result;				\
89								\
90	u_long __i;						\
91	__asm __volatile("movq %%fs:%1, %0"			\
92	    : "=r" (__i)					\
93	    : "m" (*(u_long *)(__kcb_offset(name))));		\
94	__result = (__kcb_type(name))__i;			\
95								\
96	__result;						\
97})
98
99/*
100 * Sets the value of the per-kse variable name to value val.
101 */
102#define	KCB_SET64(name, val) ({					\
103	__kcb_type(name) __val = (val);				\
104								\
105	u_long __i;						\
106	__i = (u_long)__val;					\
107	__asm __volatile("movq %1,%%fs:%0"			\
108	    : "=m" (*(u_long *)(__kcb_offset(name)))		\
109	    : "r" (__i));					\
110})
111
112static __inline u_long
113__kcb_readandclear64(volatile u_long *addr)
114{
115	u_long result;
116
117	__asm __volatile (
118	    "	xorq	%0, %0;"
119	    "	xchgq	%%fs:%1, %0;"
120	    "# __kcb_readandclear64"
121	    : "=&r" (result)
122	    : "m" (*addr));
123	return (result);
124}
125
126#define	KCB_READANDCLEAR64(name) ({				\
127	__kcb_type(name) __result;				\
128								\
129	__result = (__kcb_type(name))				\
130	    __kcb_readandclear64((u_long *)__kcb_offset(name)); \
131	__result;						\
132})
133
134
135#define	_kcb_curkcb()		KCB_GET64(kcb_self)
136#define	_kcb_curtcb()		KCB_GET64(kcb_curtcb)
137#define	_kcb_curkse()		((struct kse *)KCB_GET64(kcb_kmbx.km_udata))
138#define	_kcb_get_tmbx()		KCB_GET64(kcb_kmbx.km_curthread)
139#define	_kcb_set_tmbx(value)	KCB_SET64(kcb_kmbx.km_curthread, (void *)value)
140#define	_kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread)
141
142/*
143 * The constructors.
144 */
145struct tcb	*_tcb_ctor(struct pthread *, int);
146void		_tcb_dtor(struct tcb *tcb);
147struct kcb	*_kcb_ctor(struct kse *);
148void		_kcb_dtor(struct kcb *);
149
150/* Called from the KSE to set its private data. */
151static __inline void
152_kcb_set(struct kcb *kcb)
153{
154	amd64_set_fsbase(kcb);
155}
156
157/* Get the current kcb. */
158static __inline struct kcb *
159_kcb_get(void)
160{
161	return (_kcb_curkcb());
162}
163
164static __inline struct kse_thr_mailbox *
165_kcb_critical_enter(void)
166{
167	struct kse_thr_mailbox *crit;
168
169	crit = _kcb_readandclear_tmbx();
170	return (crit);
171}
172
173static __inline void
174_kcb_critical_leave(struct kse_thr_mailbox *crit)
175{
176	_kcb_set_tmbx(crit);
177}
178
179static __inline int
180_kcb_in_critical(void)
181{
182	return (_kcb_get_tmbx() == NULL);
183}
184
185static __inline void
186_tcb_set(struct kcb *kcb, struct tcb *tcb)
187{
188	kcb->kcb_curtcb = tcb;
189}
190
191static __inline struct tcb *
192_tcb_get(void)
193{
194	return (_kcb_curtcb());
195}
196
197static __inline struct pthread *
198_get_curthread(void)
199{
200	struct tcb *tcb;
201
202	tcb = _kcb_curtcb();
203	if (tcb != NULL)
204		return (tcb->tcb_thread);
205	else
206		return (NULL);
207}
208
209static __inline struct kse *
210_get_curkse(void)
211{
212	return ((struct kse *)_kcb_curkse());
213}
214
215void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
216    size_t stacksz);
217int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
218int _amd64_save_context(mcontext_t *mc);
219
220static __inline int
221_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
222{
223	int ret;
224
225	ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext);
226	if (ret == 0) {
227		_amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
228		    kcb->kcb_kmbx.km_stack.ss_sp,
229		    kcb->kcb_kmbx.km_stack.ss_size);
230		/* We should not reach here. */
231		return (-1);
232	}
233	else if (ret < 0)
234		return (-1);
235	return (0);
236}
237
238static __inline int
239_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
240{
241	extern int _libkse_debug;
242
243	if ((kcb == NULL) || (tcb == NULL))
244		return (-1);
245	kcb->kcb_curtcb = tcb;
246
247	if (_libkse_debug == 0) {
248		tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
249		if (setmbox != 0)
250			_amd64_restore_context(
251				&tcb->tcb_tmbx.tm_context.uc_mcontext,
252				(intptr_t)&tcb->tcb_tmbx,
253				(intptr_t *)(void *)&kcb->kcb_kmbx.km_curthread);
254		else
255			_amd64_restore_context(
256				&tcb->tcb_tmbx.tm_context.uc_mcontext,
257				0, NULL);
258		/* We should not reach here. */
259	} else {
260		if (setmbox)
261			kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
262		else
263			kse_switchin(&tcb->tcb_tmbx, 0);
264	}
265
266	return (-1);
267}
268#endif
269