pthread_md.h revision 174127
1/*
2 * Copyright (c) 2003-2006 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libkse/arch/ia64/include/pthread_md.h 174127 2007-12-01 14:23:29Z rwatson $
27 */
28
29#ifndef _PTHREAD_MD_H_
30#define	_PTHREAD_MD_H_
31
32#include <sys/kse.h>
33#include <stddef.h>
34#include <ucontext.h>
35
36#define	KSE_STACKSIZE		16384
37#define	DTV_OFFSET		offsetof(struct tcb, tcb_tp.tp_dtv)
38
39#define	THR_GETCONTEXT(ucp)	_ia64_save_context(&(ucp)->uc_mcontext)
40#define	THR_SETCONTEXT(ucp)	PANIC("THR_SETCONTEXT() now in use!\n")
41
42#define	PER_THREAD
43
44struct kcb;
45struct kse;
46struct pthread;
47struct tcb;
48
49/*
50 * tp points to one of these. We define the TLS structure as a union
51 * containing a long double to enforce 16-byte alignment. This makes
52 * sure that there will not be any padding in struct tcb after the
53 * TLS structure.
54 */
55union ia64_tp {
56	void			*tp_dtv;
57	long double		_align_;
58};
59
60struct tcb {
61	struct kse_thr_mailbox	tcb_tmbx;
62	struct pthread		*tcb_thread;
63	struct kcb		*tcb_curkcb;
64	long			tcb_isfake;
65	union ia64_tp		tcb_tp;
66};
67
68struct kcb {
69	struct kse_mailbox	kcb_kmbx;
70	struct kse		*kcb_kse;
71	struct tcb		*kcb_curtcb;
72	struct tcb		kcb_faketcb;
73};
74
75static __inline struct tcb *
76ia64_get_tcb(void)
77{
78	register char *tp __asm("%r13");
79
80	return ((struct tcb *)(tp - offsetof(struct tcb, tcb_tp)));
81}
82
83static __inline void
84ia64_set_tcb(struct tcb *tcb)
85{
86	register char *tp __asm("%r13");
87
88	__asm __volatile("mov %0 = %1;;" : "=r"(tp) : "r"(&tcb->tcb_tp));
89}
90
91/*
92 * The kcb and tcb constructors.
93 */
94struct tcb	*_tcb_ctor(struct pthread *, int);
95void		_tcb_dtor(struct tcb *);
96struct kcb	*_kcb_ctor(struct kse *kse);
97void		_kcb_dtor(struct kcb *);
98
99/* Called from the KSE to set its private data. */
100static __inline void
101_kcb_set(struct kcb *kcb)
102{
103	/* There is no thread yet; use the fake tcb. */
104	ia64_set_tcb(&kcb->kcb_faketcb);
105}
106
107/*
108 * Get the current kcb.
109 *
110 * This can only be called while in a critical region; don't
111 * worry about having the kcb changed out from under us.
112 */
113static __inline struct kcb *
114_kcb_get(void)
115{
116	return (ia64_get_tcb()->tcb_curkcb);
117}
118
119/*
120 * Enter a critical region.
121 *
122 * Read and clear km_curthread in the kse mailbox.
123 */
124static __inline struct kse_thr_mailbox *
125_kcb_critical_enter(void)
126{
127	struct tcb *tcb;
128	struct kse_thr_mailbox *crit;
129	uint32_t flags;
130
131	tcb = ia64_get_tcb();
132	if (tcb->tcb_isfake != 0) {
133		/*
134		 * We already are in a critical region since
135		 * there is no current thread.
136		 */
137		crit = NULL;
138	} else {
139		flags = tcb->tcb_tmbx.tm_flags;
140		tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
141		crit = tcb->tcb_curkcb->kcb_kmbx.km_curthread;
142		tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
143		tcb->tcb_tmbx.tm_flags = flags;
144	}
145	return (crit);
146}
147
148static __inline void
149_kcb_critical_leave(struct kse_thr_mailbox *crit)
150{
151	struct tcb *tcb;
152
153	tcb = ia64_get_tcb();
154	/* No need to do anything if this is a fake tcb. */
155	if (tcb->tcb_isfake == 0)
156		tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
157}
158
159static __inline int
160_kcb_in_critical(void)
161{
162	struct tcb *tcb;
163	uint32_t flags;
164	int ret;
165
166	tcb = ia64_get_tcb();
167	if (tcb->tcb_isfake != 0) {
168		/*
169		 * We are in a critical region since there is no
170		 * current thread.
171		 */
172		ret = 1;
173	} else {
174		flags = tcb->tcb_tmbx.tm_flags;
175		tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
176		ret = (tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
177		tcb->tcb_tmbx.tm_flags = flags;
178	}
179	return (ret);
180}
181
182static __inline void
183_tcb_set(struct kcb *kcb, struct tcb *tcb)
184{
185	if (tcb == NULL)
186		tcb = &kcb->kcb_faketcb;
187	kcb->kcb_curtcb = tcb;
188	tcb->tcb_curkcb = kcb;
189	ia64_set_tcb(tcb);
190}
191
192static __inline struct tcb *
193_tcb_get(void)
194{
195	return (ia64_get_tcb());
196}
197
198static __inline struct pthread *
199_get_curthread(void)
200{
201	return (ia64_get_tcb()->tcb_thread);
202}
203
204/*
205 * Get the current kse.
206 *
207 * Like _kcb_get(), this can only be called while in a critical region.
208 */
209static __inline struct kse *
210_get_curkse(void)
211{
212	return (ia64_get_tcb()->tcb_curkcb->kcb_kse);
213}
214
215void _ia64_break_setcontext(mcontext_t *mc);
216void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
217    size_t stacksz);
218int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
219int _ia64_save_context(mcontext_t *mc);
220
221static __inline int
222_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
223{
224	if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
225		/* Make the fake tcb the current thread. */
226		kcb->kcb_curtcb = &kcb->kcb_faketcb;
227		ia64_set_tcb(&kcb->kcb_faketcb);
228		_ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
229		    kcb->kcb_kmbx.km_stack.ss_sp,
230		    kcb->kcb_kmbx.km_stack.ss_size);
231		/* We should not reach here. */
232		return (-1);
233	}
234	return (0);
235}
236
237static __inline int
238_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
239{
240	mcontext_t *mc;
241
242	_tcb_set(kcb, tcb);
243	mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
244	if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
245		if (setmbox) {
246			mc->mc_flags |= _MC_FLAGS_KSE_SET_MBOX;
247			mc->mc_special.ifa =
248			    (intptr_t)&kcb->kcb_kmbx.km_curthread;
249			mc->mc_special.isr = (intptr_t)&tcb->tcb_tmbx;
250		}
251		_ia64_break_setcontext(mc);
252	} else if (mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) {
253		if (setmbox)
254			kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
255		else
256			kse_switchin(&tcb->tcb_tmbx, 0);
257	} else {
258		if (setmbox)
259			_ia64_restore_context(mc, (intptr_t)&tcb->tcb_tmbx,
260			    (intptr_t *)&kcb->kcb_kmbx.km_curthread);
261		else
262			_ia64_restore_context(mc, 0, NULL);
263	}
264	/* We should not reach here. */
265	return (-1);
266}
267
268#endif /* _PTHREAD_MD_H_ */
269