1/*
2 * Copyright 2014, Pawe�� Dziepak, pdziepak@quarnos.org.
3 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel D��rfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10#ifndef _THREAD_H
11#define _THREAD_H
12
13
14#include <OS.h>
15
16#include <arch/atomic.h>
17#include <arch/thread.h>
18// For the thread blocking inline functions only.
19#include <kscheduler.h>
20#include <ksignal.h>
21#include <thread_types.h>
22
23
24struct arch_fork_arg;
25struct kernel_args;
26struct select_info;
27struct thread_creation_attributes;
28
29
30// thread notifications
31#define THREAD_MONITOR		'_tm_'
32#define THREAD_ADDED		0x01
33#define THREAD_REMOVED		0x02
34#define THREAD_NAME_CHANGED	0x04
35
36
37namespace BKernel {
38
39
40struct ThreadCreationAttributes : thread_creation_attributes {
41	// when calling from kernel only
42	team_id			team;
43	Thread*			thread;
44	sigset_t		signal_mask;
45	size_t			additional_stack_size;	// additional space in the stack
46											// area after the TLS region, not
47											// used as thread stack
48	thread_func		kernelEntry;
49	void*			kernelArgument;
50	arch_fork_arg*	forkArgs;				// If non-NULL, the userland thread
51											// will be started with this
52											// register context.
53
54public:
55								ThreadCreationAttributes() {}
56									// no-init constructor
57								ThreadCreationAttributes(
58									thread_func function, const char* name,
59									int32 priority, void* arg,
60									team_id team = -1, Thread* thread = NULL);
61
62			status_t			InitFromUserAttributes(
63									const thread_creation_attributes*
64										userAttributes,
65									char* nameBuffer);
66};
67
68
69}	// namespace BKernel
70
71using BKernel::ThreadCreationAttributes;
72
73
74extern spinlock gThreadCreationLock;
75
76
77#ifdef __cplusplus
78extern "C" {
79#endif
80
81void thread_at_kernel_entry(bigtime_t now);
82	// called when the thread enters the kernel on behalf of the thread
83void thread_at_kernel_exit(void);
84void thread_at_kernel_exit_no_signals(void);
85void thread_reset_for_exec(void);
86
87status_t thread_init(struct kernel_args *args);
88status_t thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum);
89void thread_yield(void);
90void thread_exit(void);
91
92void thread_map(void (*function)(Thread* thread, void* data), void* data);
93
94int32 thread_max_threads(void);
95int32 thread_used_threads(void);
96
97const char* thread_state_to_text(Thread* thread, int32 state);
98
99int32 thread_get_io_priority(thread_id id);
100void thread_set_io_priority(int32 priority);
101
102#define thread_get_current_thread arch_thread_get_current_thread
103
104static thread_id thread_get_current_thread_id(void);
105static inline thread_id
106thread_get_current_thread_id(void)
107{
108	Thread *thread = thread_get_current_thread();
109	return thread ? thread->id : 0;
110}
111
112static inline bool
113thread_is_idle_thread(Thread *thread)
114{
115	return thread->priority == B_IDLE_PRIORITY;
116}
117
118thread_id allocate_thread_id();
119thread_id peek_next_thread_id();
120
121status_t thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
122	void* argument1, void* argument2);
123status_t thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
124	size_t stackSize, size_t additionalSize);
125thread_id thread_create_thread(const ThreadCreationAttributes& attributes,
126	bool kernel);
127
128thread_id spawn_kernel_thread_etc(thread_func, const char *name, int32 priority,
129	void *args, team_id team);
130
131status_t select_thread(int32 object, struct select_info *info, bool kernel);
132status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
133
134#define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
135
136status_t thread_block();
137status_t thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout);
138void thread_unblock(Thread* thread, status_t status);
139
140// used in syscalls.c
141status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
142status_t _user_rename_thread(thread_id thread, const char *name);
143status_t _user_suspend_thread(thread_id thread);
144status_t _user_resume_thread(thread_id thread);
145status_t _user_rename_thread(thread_id thread, const char *name);
146thread_id _user_spawn_thread(struct thread_creation_attributes* attributes);
147status_t _user_wait_for_thread(thread_id id, status_t *_returnCode);
148status_t _user_wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
149	status_t *_returnCode);
150status_t _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
151	bigtime_t* _remainingTime);
152status_t _user_kill_thread(thread_id thread);
153status_t _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int));
154void _user_thread_yield(void);
155void _user_exit_thread(status_t return_value);
156bool _user_has_data(thread_id thread);
157status_t _user_send_data(thread_id thread, int32 code, const void *buffer, size_t buffer_size);
158status_t _user_receive_data(thread_id *_sender, void *buffer, size_t buffer_size);
159thread_id _user_find_thread(const char *name);
160status_t _user_get_thread_info(thread_id id, thread_info *info);
161status_t _user_get_next_thread_info(team_id team, int32 *cookie, thread_info *info);
162int _user_get_cpu();
163
164status_t _user_block_thread(uint32 flags, bigtime_t timeout);
165status_t _user_unblock_thread(thread_id thread, status_t status);
166status_t _user_unblock_threads(thread_id* threads, uint32 count,
167	status_t status);
168
169// ToDo: these don't belong here
170struct rlimit;
171int _user_getrlimit(int resource, struct rlimit * rlp);
172int _user_setrlimit(int resource, const struct rlimit * rlp);
173
174#ifdef __cplusplus
175}
176#endif
177
178
179/*!	Checks whether the current thread would immediately be interrupted when
180	blocking it with the given wait/interrupt flags.
181
182	The caller must hold the scheduler lock.
183
184	\param thread The current thread.
185	\param flags Wait/interrupt flags to be considered. Relevant are:
186		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
187			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
188		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
189			signal.
190	\return \c true, if the thread would be interrupted, \c false otherwise.
191*/
192static inline bool
193thread_is_interrupted(Thread* thread, uint32 flags)
194{
195	sigset_t pendingSignals = thread->AllPendingSignals();
196	return ((flags & B_CAN_INTERRUPT) != 0
197			&& (pendingSignals & ~thread->sig_block_mask) != 0)
198		|| ((flags & B_KILL_CAN_INTERRUPT) != 0
199			&& (pendingSignals & KILL_SIGNALS) != 0);
200}
201
202
203/*!	Checks whether the given thread is currently blocked (i.e. still waiting
204	for something).
205
206	If a stable answer is required, the caller must hold the scheduler lock.
207	Alternatively, if waiting is not interruptible and cannot time out, holding
208	the client lock held when calling thread_prepare_to_block() and the
209	unblocking functions works as well.
210
211	\param thread The thread in question.
212	\return \c true, if the thread is blocked, \c false otherwise.
213*/
214static inline bool
215thread_is_blocked(Thread* thread)
216{
217	return atomic_get(&thread->wait.status) == 1;
218}
219
220
221/*!	Prepares the current thread for waiting.
222
223	This is the first of two steps necessary to block the current thread
224	(IOW, to let it wait for someone else to unblock it or optionally time out
225	after a specified delay). The process consists of two steps to avoid race
226	conditions in case a lock other than the scheduler lock is involved.
227
228	Usually the thread waits for some condition to change and this condition is
229	something reflected in the caller's data structures which should be
230	protected by a client lock the caller knows about. E.g. in the semaphore
231	code that lock is a per-semaphore spinlock that protects the semaphore data,
232	including the semaphore count and the queue of waiting threads. For certain
233	low-level locking primitives (e.g. mutexes) that client lock is the
234	scheduler lock itself, which simplifies things a bit.
235
236	If a client lock other than the scheduler lock is used, this function must
237	be called with that lock being held. Afterwards that lock should be dropped
238	and the function that actually blocks the thread shall be invoked
239	(thread_block[_locked]() or thread_block_with_timeout()). In between these
240	two steps no functionality that uses the thread blocking API for this thread
241	shall be used.
242
243	When the caller determines that the condition for unblocking the thread
244	occurred, it calls thread_unblock_locked() to unblock the thread. At that
245	time one of locks that are held when calling thread_prepare_to_block() must
246	be held. Usually that would be the client lock. In two cases it generally
247	isn't, however, since the unblocking code doesn't know about the client
248	lock: 1. When thread_block_with_timeout() had been used and the timeout
249	occurs. 2. When thread_prepare_to_block() had been called with one or both
250	of the \c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT flags specified and
251	someone calls thread_interrupt() that is supposed to wake up the thread.
252	In either of these two cases only the scheduler lock is held by the
253	unblocking code. A timeout can only happen after
254	thread_block_with_timeout() has been called, but an interruption is
255	possible at any time. The client code must deal with those situations.
256
257	Generally blocking and unblocking threads proceed in the following manner:
258
259	Blocking thread:
260	- Acquire client lock.
261	- Check client condition and decide whether blocking is necessary.
262	- Modify some client data structure to indicate that this thread is now
263		waiting.
264	- Release client lock (unless client lock is the scheduler lock).
265	- Block.
266	- Acquire client lock (unless client lock is the scheduler lock).
267	- Check client condition and compare with block result. E.g. if the wait was
268		interrupted or timed out, but the client condition indicates success, it
269		may be considered a success after all, since usually that happens when
270		another thread concurrently changed the client condition and also tried
271		to unblock the waiting thread. It is even necessary when that other
272		thread changed the client data structures in a way that associate some
273		resource with the unblocked thread, or otherwise the unblocked thread
274		would have to reverse that here.
275	- If still necessary -- i.e. not already taken care of by an unblocking
276		thread -- modify some client structure to indicate that the thread is no
277		longer waiting, so it isn't erroneously unblocked later.
278
279	Unblocking thread:
280	- Acquire client lock.
281	- Check client condition and decide whether a blocked thread can be woken
282		up.
283	- Check the client data structure that indicates whether one or more threads
284		are waiting and which thread(s) need(s) to be woken up.
285	- Unblock respective thread(s).
286	- Possibly change some client structure, so that an unblocked thread can
287		decide whether a concurrent timeout/interruption can be ignored, or
288		simply so that it doesn't have to do any more cleanup.
289
290	Note that in the blocking thread the steps after blocking are strictly
291	required only if timeouts or interruptions are possible. If they are not,
292	the blocking thread can only be woken up explicitly by an unblocking thread,
293	which could already take care of all the necessary client data structure
294	modifications, so that the blocking thread wouldn't have to do that.
295
296	Note that the client lock can but does not have to be a spinlock.
297	A mutex, a semaphore, or anything that doesn't try to use the thread
298	blocking API for the calling thread when releasing the lock is fine.
299	In particular that means in principle thread_prepare_to_block() can be
300	called with interrupts enabled.
301
302	Care must be taken when the wait can be interrupted or can time out,
303	especially with a client lock that uses the thread blocking API. After a
304	blocked thread has been interrupted or the the time out occurred it cannot
305	acquire the client lock (or any other lock using the thread blocking API)
306	without first making sure that the thread doesn't still appear to be
307	waiting to other client code. Otherwise another thread could try to unblock
308	it which could erroneously unblock the thread while already waiting on the
309	client lock. So usually when interruptions or timeouts are possible a
310	spinlock needs to be involved.
311
312	\param thread The current thread.
313	\param flags The blocking flags. Relevant are:
314		- \c B_CAN_INTERRUPT: The thread can be interrupted by any non-blocked
315			signal. Implies \c B_KILL_CAN_INTERRUPT (specified or not).
316		- \c B_KILL_CAN_INTERRUPT: The thread can be interrupted by a kill
317			signal.
318	\param type The type of object the thread will be blocked at. Informative/
319		for debugging purposes. Must be one of the \c THREAD_BLOCK_TYPE_*
320		constants. \c THREAD_BLOCK_TYPE_OTHER implies that \a object is a
321		string.
322	\param object The object the thread will be blocked at.  Informative/for
323		debugging purposes.
324*/
325static inline void
326thread_prepare_to_block(Thread* thread, uint32 flags, uint32 type,
327	const void* object)
328{
329	thread->wait.flags = flags;
330	thread->wait.type = type;
331	thread->wait.object = object;
332	atomic_set(&thread->wait.status, 1);
333		// Set status last to guarantee that the other fields are initialized
334		// when a thread is waiting.
335}
336
337
338/*!	Unblocks the specified blocked thread.
339
340	If the thread is no longer waiting (e.g. because thread_unblock_locked() has
341	already been called in the meantime), this function does not have any
342	effect.
343
344	The caller must hold the scheduler lock and the client lock (might be the
345	same).
346
347	\param thread The thread to be unblocked.
348	\param status The unblocking status. That's what the unblocked thread's
349		call to thread_block_locked() will return.
350*/
351static inline void
352thread_unblock_locked(Thread* thread, status_t status)
353{
354	if (atomic_test_and_set(&thread->wait.status, status, 1) != 1)
355		return;
356
357	// wake up the thread, if it is sleeping
358	if (thread->state == B_THREAD_WAITING)
359		scheduler_enqueue_in_run_queue(thread);
360}
361
362
363/*!	Interrupts the specified blocked thread, if possible.
364
365	The function checks whether the thread can be interrupted and, if so, calls
366	\code thread_unblock_locked(thread, B_INTERRUPTED) \endcode. Otherwise the
367	function is a no-op.
368
369	The caller must hold the scheduler lock. Normally thread_unblock_locked()
370	also requires the client lock to be held, but in this case the caller
371	usually doesn't know it. This implies that the client code needs to take
372	special care, if waits are interruptible. See thread_prepare_to_block() for
373	more information.
374
375	\param thread The thread to be interrupted.
376	\param kill If \c false, the blocked thread is only interrupted, when the
377		flag \c B_CAN_INTERRUPT was specified for the blocked thread. If
378		\c true, it is only interrupted, when at least one of the flags
379		\c B_CAN_INTERRUPT or \c B_KILL_CAN_INTERRUPT was specified for the
380		blocked thread.
381	\return \c B_OK, if the thread is interruptible and thread_unblock_locked()
382		was called, \c B_NOT_ALLOWED otherwise. \c B_OK doesn't imply that the
383		thread actually has been interrupted -- it could have been unblocked
384		before already.
385*/
386static inline status_t
387thread_interrupt(Thread* thread, bool kill)
388{
389	if (thread_is_blocked(thread)) {
390		if ((thread->wait.flags & B_CAN_INTERRUPT) != 0
391			|| (kill && (thread->wait.flags & B_KILL_CAN_INTERRUPT) != 0)) {
392			thread_unblock_locked(thread, B_INTERRUPTED);
393			return B_OK;
394		}
395	}
396
397	return B_NOT_ALLOWED;
398}
399
400
401static inline void
402thread_pin_to_current_cpu(Thread* thread)
403{
404	thread->pinned_to_cpu++;
405}
406
407
408static inline void
409thread_unpin_from_current_cpu(Thread* thread)
410{
411	thread->pinned_to_cpu--;
412}
413
414
415static inline void
416thread_prepare_suspend()
417{
418	Thread* thread = thread_get_current_thread();
419	thread->going_to_suspend = true;
420}
421
422
423static inline void
424thread_suspend(bool alreadyPrepared = false)
425{
426	Thread* thread = thread_get_current_thread();
427	if (!alreadyPrepared)
428		thread_prepare_suspend();
429
430	cpu_status state = disable_interrupts();
431	acquire_spinlock(&thread->scheduler_lock);
432
433	if (thread->going_to_suspend)
434		scheduler_reschedule(B_THREAD_SUSPENDED);
435
436	release_spinlock(&thread->scheduler_lock);
437	restore_interrupts(state);
438}
439
440
441static inline void
442thread_continue(Thread* thread)
443{
444	thread->going_to_suspend = false;
445
446	cpu_status state = disable_interrupts();
447	acquire_spinlock(&thread->scheduler_lock);
448
449	if (thread->state == B_THREAD_SUSPENDED)
450		scheduler_enqueue_in_run_queue(thread);
451
452	release_spinlock(&thread->scheduler_lock);
453	restore_interrupts(state);
454}
455
456
457#endif /* _THREAD_H */
458