pthread_md.h revision 118513
1/*
2 * Copyright (c) 2003 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libkse/arch/ia64/include/pthread_md.h 118513 2003-08-06 00:17:15Z marcel $
27 */
28
29#ifndef _PTHREAD_MD_H_
30#define	_PTHREAD_MD_H_
31
32#include <sys/kse.h>
33#include <ucontext.h>
34
35#define	THR_GETCONTEXT(ucp)	_ia64_save_context(&(ucp)->uc_mcontext)
36#define	THR_SETCONTEXT(ucp)	_ia64_restore_context(&(ucp)->uc_mcontext, \
37				    0, NULL)
38
39#define	PER_THREAD
40
41struct kcb;
42struct kse;
43struct pthread;
44struct tcb;
45struct tdv;	/* We don't know what this is yet? */
46
47/*
48 * tp points to one of these. We define the static TLS as an array
49 * of long double to enforce 16-byte alignment of the TLS memory,
50 * struct ia64_tp, struct tcb and also struct kcb. Both static and
51 * dynamic allocation of any of these structures will result in a
52 * valid, well-aligned thread pointer.
53 */
54struct ia64_tp {
55	struct tdv		*tp_tdv;	/* dynamic TLS */
56	struct tcb		*tp_self;
57	long double		tp_tls[0];	/* static TLS */
58};
59
60struct tcb {
61	struct kse_thr_mailbox	tcb_tmbx;
62	struct pthread		*tcb_thread;
63	struct kcb		*tcb_curkcb;
64	long			tcb_isfake;
65	struct ia64_tp		tcb_tp;
66};
67
68struct kcb {
69	struct kse_mailbox	kcb_kmbx;
70	struct tcb		kcb_faketcb;
71	struct tcb		*kcb_curtcb;
72	struct kse		*kcb_kse;
73};
74
75register struct ia64_tp *_tp __asm("%r13");
76
77/*
78 * The kcb and tcb constructors.
79 */
80struct tcb	*_tcb_ctor(struct pthread *);
81void		_tcb_dtor(struct tcb *);
82struct kcb	*_kcb_ctor(struct kse *kse);
83void		_kcb_dtor(struct kcb *);
84
85/* Called from the KSE to set its private data. */
86static __inline void
87_kcb_set(struct kcb *kcb)
88{
89	/* There is no thread yet; use the fake tcb. */
90	_tp = &kcb->kcb_faketcb.tcb_tp;
91}
92
93/*
94 * Get the current kcb.
95 *
96 * This can only be called while in a critical region; don't
97 * worry about having the kcb changed out from under us.
98 */
99static __inline struct kcb *
100_kcb_get(void)
101{
102	return (_tp->tp_self->tcb_curkcb);
103}
104
105/*
106 * Enter a critical region.
107 *
108 * Read and clear km_curthread in the kse mailbox.
109 */
110static __inline struct kse_thr_mailbox *
111_kcb_critical_enter(void)
112{
113	struct kse_thr_mailbox *crit;
114	struct tcb *tcb;
115	uint32_t flags;
116
117	tcb = _tp->tp_self;
118	if (tcb->tcb_isfake != 0) {
119		/*
120		 * We already are in a critical region since
121		 * there is no current thread.
122		 */
123		crit = NULL;
124	} else {
125		flags = tcb->tcb_tmbx.tm_flags;
126		tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
127		crit = tcb->tcb_curkcb->kcb_kmbx.km_curthread;
128		tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
129		tcb->tcb_tmbx.tm_flags = flags;
130	}
131	return (crit);
132}
133
134static __inline void
135_kcb_critical_leave(struct kse_thr_mailbox *crit)
136{
137	struct tcb *tcb;
138
139	tcb = _tp->tp_self;
140	/* No need to do anything if this is a fake tcb. */
141	if (tcb->tcb_isfake == 0)
142		tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
143}
144
145static __inline int
146_kcb_in_critical(void)
147{
148	struct tcb *tcb;
149	uint32_t flags;
150	int ret;
151
152	tcb = _tp->tp_self;
153	if (tcb->tcb_isfake != 0) {
154		/*
155		 * We are in a critical region since there is no
156		 * current thread.
157		 */
158		ret = 1;
159	} else {
160		flags = tcb->tcb_tmbx.tm_flags;
161		tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
162		ret = (tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
163		tcb->tcb_tmbx.tm_flags = flags;
164	}
165	return (ret);
166}
167
168static __inline void
169_tcb_set(struct kcb *kcb, struct tcb *tcb)
170{
171	if (tcb == NULL) {
172		kcb->kcb_curtcb = &kcb->kcb_faketcb;
173		_tp = &kcb->kcb_faketcb.tcb_tp;
174	}
175	else {
176		kcb->kcb_curtcb = tcb;
177		tcb->tcb_curkcb = kcb;
178		_tp = &tcb->tcb_tp;
179	}
180}
181
182static __inline struct tcb *
183_tcb_get(void)
184{
185	return (_tp->tp_self);
186}
187
188static __inline struct pthread *
189_get_curthread(void)
190{
191	return (_tp->tp_self->tcb_thread);
192}
193
194/*
195 * Get the current kse.
196 *
197 * Line _kcb_get(), this can only be called while in a critical region.
198 */
199static __inline struct kse *
200_get_curkse(void)
201{
202	return (_tp->tp_self->tcb_curkcb->kcb_kse);
203}
204
205void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
206    size_t stacksz);
207int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
208int _ia64_save_context(mcontext_t *mc);
209
210static __inline int
211_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
212{
213	if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
214		/* Make the fake tcb the current thread. */
215		kcb->kcb_curtcb = &kcb->kcb_faketcb;
216		_tp = &kcb->kcb_faketcb.tcb_tp;
217		_ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
218		    kcb->kcb_kmbx.km_stack.ss_sp,
219		    kcb->kcb_kmbx.km_stack.ss_size);
220		/* We should not reach here. */
221		return (-1);
222	}
223	return (0);
224}
225
226static __inline int
227_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
228{
229	kcb->kcb_curtcb = tcb;
230	tcb->tcb_curkcb = kcb;
231	_tp = &tcb->tcb_tp;
232	if (setmbox != 0)
233		_ia64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext,
234		    (intptr_t)&tcb->tcb_tmbx,
235		    (intptr_t *)&kcb->kcb_kmbx.km_curthread);
236	else
237		_ia64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext,
238		    0, NULL);
239	/* We should not reach here. */
240	return (-1);
241}
242
243#endif /* _PTHREAD_MD_H_ */
244