1/*-
2 * Copyright (c) 2003 Jake Burkholder <jake@freebsd.org>.
3 * Copyright (c) 2003 Marcel Moolenaar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/10.3/lib/libkse/arch/arm/include/pthread_md.h 142571 2005-02-26 19:06:49Z cognet $
28 */
29
30/*
31 * Machine-dependent thread prototypes/definitions for the thread kernel.
32 */
33#ifndef _PTHREAD_MD_H_
34#define	_PTHREAD_MD_H_
35
36#include <sys/kse.h>
37#include <stddef.h>
38#include <ucontext.h>
39
40#define	KSE_STACKSIZE		16384
41#define	DTV_OFFSET              offsetof(struct tcb, tcb_tp.tp_tdv)
42
43int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *);
44int _thr_getcontext(mcontext_t *);
45
46#define	THR_GETCONTEXT(ucp)	_thr_getcontext(&(ucp)->uc_mcontext)
47#define	THR_SETCONTEXT(ucp)	_thr_setcontext(&(ucp)->uc_mcontext, 0, NULL)
48
49#define	PER_THREAD
50
51struct kcb;
52struct kse;
53struct pthread;
54struct tcb;
55struct tdv;	/* We don't know what this is yet? */
56
57
58/*
59 * %r6 points to one of these. We define the static TLS as an array
60 * of long double to enforce 16-byte alignment of the TLS memory.
61 *
62 * XXX - Both static and dynamic allocation of any of these structures
63 *       will result in a valid, well-aligned thread pointer???
64 */
65struct arm_tp {
66	struct tdv		*tp_tdv;	/* dynamic TLS */
67};
68
69struct tcb {
70	struct pthread		*tcb_thread;
71	struct kcb		*tcb_curkcb;
72	uint32_t		tcb_isfake;
73	struct kse_thr_mailbox	tcb_tmbx;	/* needs 32-byte alignment */
74	struct arm_tp		tcb_tp;
75};
76
77struct kcb {
78	struct kse_mailbox	kcb_kmbx;
79	struct tcb		kcb_faketcb;
80	struct tcb		*kcb_curtcb;
81	struct kse		*kcb_kse;
82};
83
84extern struct arm_tp **arm_tp;
85#define _tp (*arm_tp)
86
87#define	_tcb	((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
88
89/*
90 * The kcb and tcb constructors.
91 */
92struct tcb	*_tcb_ctor(struct pthread *, int);
93void		_tcb_dtor(struct tcb *);
94struct kcb	*_kcb_ctor(struct kse *kse);
95void		_kcb_dtor(struct kcb *);
96
97static __inline uint32_t
98__kcb_swp(uint32_t val, void *ptr)
99{
100
101	__asm __volatile("swp   %0, %1, [%2]"
102	    : "=r" (val) : "r" (val) , "r" (ptr) : "memory");
103    	return (val);
104}
105
106/* Called from the KSE to set its private data. */
107static __inline void
108_kcb_set(struct kcb *kcb)
109{
110	/* There is no thread yet; use the fake tcb. */
111	__kcb_swp((uint32_t)&kcb->kcb_faketcb.tcb_tp, &_tp);
112}
113
114/*
115 * Get the current kcb.
116 *
117 * This can only be called while in a critical region; don't
118 * worry about having the kcb changed out from under us.
119 */
120static __inline struct kcb *
121_kcb_get(void)
122{
123	return (_tcb->tcb_curkcb);
124}
125
126/*
127 * Enter a critical region.
128 *
129 * Read and clear km_curthread in the kse mailbox.
130 */
131static __inline struct kse_thr_mailbox *
132_kcb_critical_enter(void)
133{
134	struct kse_thr_mailbox *crit;
135
136	if (_tcb->tcb_isfake)
137		return (NULL);
138	crit = (struct kse_thr_mailbox *)__kcb_swp((uint32_t)NULL,
139	    &_tcb->tcb_curkcb->kcb_kmbx.km_curthread);
140	return (crit);
141}
142
143static __inline void
144_kcb_critical_leave(struct kse_thr_mailbox *crit)
145{
146
147	if (_tcb->tcb_isfake == 0)
148		__kcb_swp((uint32_t)crit,
149		    &_tcb->tcb_curkcb->kcb_kmbx.km_curthread);
150}
151
152static __inline int
153_kcb_in_critical(void)
154{
155	uint32_t flags;
156	int ret;
157
158	return (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
159	if (_tcb->tcb_isfake != 0) {
160		/*
161		 * We are in a critical region since there is no
162		 * current thread.
163		 */
164		ret = 1;
165	} else {
166		flags = _tcb->tcb_tmbx.tm_flags;
167		_tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
168		ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
169		_tcb->tcb_tmbx.tm_flags = flags;
170	}
171	return (ret);
172}
173
174static __inline void
175_tcb_set(struct kcb *kcb, struct tcb *tcb)
176{
177	if (tcb == NULL)
178		tcb = &kcb->kcb_faketcb;
179	__kcb_swp((uint32_t)&tcb->tcb_tp, &_tp);
180	kcb->kcb_curtcb = tcb;
181	tcb->tcb_curkcb = kcb;
182}
183
184static __inline struct tcb *
185_tcb_get(void)
186{
187	return (_tcb);
188}
189
190static __inline struct pthread *
191_get_curthread(void)
192{
193	return (_tcb->tcb_thread);
194}
195
196/*
197 * Get the current kse.
198 *
199 * Like _kcb_get(), this can only be called while in a critical region.
200 */
201static __inline struct kse *
202_get_curkse(void)
203{
204	return (_tcb->tcb_curkcb->kcb_kse);
205}
206
207void _arm_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
208    size_t stacksz);
209
210static __inline int
211_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
212{
213	int ret;
214
215	if ((ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext))
216	    == 0) {
217		kcb->kcb_curtcb = &kcb->kcb_faketcb;
218		__kcb_swp((int)&kcb->kcb_faketcb.tcb_tp, &_tp);
219		_arm_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
220		    kcb->kcb_kmbx.km_stack.ss_sp,
221		    kcb->kcb_kmbx.km_stack.ss_size);
222		/* We should not reach here. */
223		return (-1);
224	} else if (ret < 0)
225		return (-1);
226	return (0);
227}
228
229static __inline int
230_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
231{
232	extern int _libkse_debug;
233	mcontext_t *mc;
234
235	if (!tcb || !kcb)
236		return (-1);
237	_tcb_set(kcb, tcb);
238	mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
239	if (_libkse_debug == 0) {
240		tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
241		if (setmbox)
242			_thr_setcontext(mc, (intptr_t)&tcb->tcb_tmbx,
243				(intptr_t *)&kcb->kcb_kmbx.km_curthread);
244		else
245			_thr_setcontext(mc, 0, NULL);
246	} else {
247		if (setmbox)
248			kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
249		else
250			kse_switchin(&tcb->tcb_tmbx, 0);
251	}
252
253	/* We should not reach here. */
254	return (-1);
255}
256
257#endif /* _PTHREAD_MD_H_ */
258