1/*
2 * Copyright 2004-2011, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Thread definition and structures
6 */
7#ifndef _KERNEL_THREAD_TYPES_H
8#define _KERNEL_THREAD_TYPES_H
9
10
11#ifndef _ASSEMBLER
12
13#include <pthread.h>
14
15#include <arch/thread_types.h>
16#include <condition_variable.h>
17#include <heap.h>
18#include <ksignal.h>
19#include <lock.h>
20#include <smp.h>
21#include <thread_defs.h>
22#include <timer.h>
23#include <UserTimer.h>
24#include <user_debugger.h>
25#include <util/DoublyLinkedList.h>
26#include <util/KernelReferenceable.h>
27#include <util/list.h>
28
29
30enum additional_thread_state {
31	THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
32//	THREAD_STATE_BIRTH	// thread is being created
33};
34
35#define THREAD_MIN_SET_PRIORITY				B_LOWEST_ACTIVE_PRIORITY
36#define THREAD_MAX_SET_PRIORITY				B_REAL_TIME_PRIORITY
37
38enum team_state {
39	TEAM_STATE_NORMAL,		// normal state
40	TEAM_STATE_BIRTH,		// being constructed
41	TEAM_STATE_SHUTDOWN,	// still lives, but is going down
42	TEAM_STATE_DEATH		// only the Team object still exists, threads are
43							// gone
44};
45
46#define	TEAM_FLAG_EXEC_DONE	0x01
47
48typedef enum job_control_state {
49	JOB_CONTROL_STATE_NONE,
50	JOB_CONTROL_STATE_STOPPED,
51	JOB_CONTROL_STATE_CONTINUED,
52	JOB_CONTROL_STATE_DEAD
53} job_control_state;
54
55
56struct cpu_ent;
57struct image;					// defined in image.c
58struct io_context;
59struct realtime_sem_context;	// defined in realtime_sem.cpp
60struct scheduler_thread_data;
61struct select_info;
62struct user_thread;				// defined in libroot/user_thread.h
63struct VMAddressSpace;
64struct xsi_sem_context;			// defined in xsi_semaphore.cpp
65
66namespace BKernel {
67	struct Team;
68	struct Thread;
69	struct ProcessGroup;
70}
71
72
73struct thread_death_entry {
74	struct list_link	link;
75	thread_id			thread;
76	status_t			status;
77};
78
79struct team_loading_info {
80	Thread*				thread;	// the waiting thread
81	status_t			result;		// the result of the loading
82	bool				done;		// set when loading is done/aborted
83};
84
85struct team_watcher {
86	struct list_link	link;
87	void				(*hook)(team_id team, void *data);
88	void				*data;
89};
90
91
92#define MAX_DEAD_CHILDREN	32
93	// this is a soft limit for the number of child death entries in a team
94#define MAX_DEAD_THREADS	32
95	// this is a soft limit for the number of thread death entries in a team
96
97
98struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
99	job_control_state	state;		// current team job control state
100	thread_id			thread;		// main thread ID == team ID
101	uint16				signal;		// signal causing the current state
102	bool				has_group_ref;
103	uid_t				signaling_user;
104
105	// valid while state != JOB_CONTROL_STATE_DEAD
106	BKernel::Team*		team;
107
108	// valid when state == JOB_CONTROL_STATE_DEAD
109	pid_t				group_id;
110	status_t			status;
111	uint16				reason;		// reason for the team's demise, one of the
112									// CLD_* values defined in <signal.h>
113
114	job_control_entry();
115	~job_control_entry();
116
117	void InitDeadState();
118
119	job_control_entry& operator=(const job_control_entry& other);
120};
121
122typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
123
124struct team_job_control_children {
125	JobControlEntryList		entries;
126};
127
128struct team_dead_children : team_job_control_children {
129	ConditionVariable	condition_variable;
130	uint32				count;
131	bigtime_t			kernel_time;
132	bigtime_t			user_time;
133};
134
135
136struct team_death_entry {
137	int32				remaining_threads;
138	ConditionVariable	condition;
139};
140
141
142struct free_user_thread {
143	struct free_user_thread*	next;
144	struct user_thread*			thread;
145};
146
147
148class AssociatedDataOwner;
149
150class AssociatedData : public BReferenceable,
151	public DoublyLinkedListLinkImpl<AssociatedData> {
152public:
153								AssociatedData();
154	virtual						~AssociatedData();
155
156			AssociatedDataOwner* Owner() const
157									{ return fOwner; }
158			void				SetOwner(AssociatedDataOwner* owner)
159									{ fOwner = owner; }
160
161	virtual	void				OwnerDeleted(AssociatedDataOwner* owner);
162
163private:
164			AssociatedDataOwner* fOwner;
165};
166
167
168class AssociatedDataOwner {
169public:
170								AssociatedDataOwner();
171								~AssociatedDataOwner();
172
173			bool				AddData(AssociatedData* data);
174			bool				RemoveData(AssociatedData* data);
175
176			void				PrepareForDeletion();
177
178private:
179			typedef DoublyLinkedList<AssociatedData> DataList;
180
181private:
182
183			mutex				fLock;
184			DataList			fList;
185};
186
187
188typedef int32 (*thread_entry_func)(thread_func, void *);
189
190
191namespace BKernel {
192
193
194template<typename IDType>
195struct TeamThreadIteratorEntry
196	: DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
197	typedef IDType	id_type;
198	typedef TeamThreadIteratorEntry<id_type> iterator_type;
199
200	id_type	id;			// -1 for iterator entries, >= 0 for actual elements
201	bool	visible;	// the entry is publicly visible
202};
203
204
205struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
206		AssociatedDataOwner {
207	DoublyLinkedListLink<Team>	global_list_link;
208	Team			*hash_next;		// next in hash
209	Team			*siblings_next;	// next in parent's list; protected by
210									// parent's fLock
211	Team			*parent;		// write-protected by both parent (if any)
212									// and this team's fLock
213	Team			*children;		// protected by this team's fLock;
214									// adding/removing a child also requires the
215									// child's fLock
216	Team			*group_next;	// protected by the group's lock
217
218	int64			serial_number;	// immutable after adding team to hash
219
220	// process group info -- write-protected by both the group's lock, the
221	// team's lock, and the team's parent's lock
222	pid_t			group_id;
223	pid_t			session_id;
224	ProcessGroup	*group;
225
226	int				num_threads;	// number of threads in this team
227	int				state;			// current team state, see above
228	int32			flags;
229	struct io_context *io_context;
230	struct realtime_sem_context	*realtime_sem_context;
231	struct xsi_sem_context *xsi_sem_context;
232	struct team_death_entry *death_entry;	// protected by fLock
233	struct list		dead_threads;
234	int				dead_threads_count;
235
236	// protected by the team's fLock
237	team_dead_children dead_children;
238	team_job_control_children stopped_children;
239	team_job_control_children continued_children;
240
241	// protected by the parent team's fLock
242	struct job_control_entry* job_control_entry;
243
244	VMAddressSpace	*address_space;
245	Thread			*main_thread;	// protected by fLock and the scheduler
246									// lock (and the thread's lock), immutable
247									// after first set
248	Thread			*thread_list;	// protected by fLock and the scheduler lock
249	struct team_loading_info *loading_info;	// protected by fLock
250	struct list		image_list;		// protected by sImageMutex
251	struct list		watcher_list;
252	struct list		sem_list;		// protected by sSemsSpinlock
253	struct list		port_list;		// protected by sPortsLock
254	struct arch_team arch_info;
255
256	addr_t			user_data;
257	area_id			user_data_area;
258	size_t			user_data_size;
259	size_t			used_user_data;
260	struct free_user_thread* free_user_threads;
261
262	struct team_debug_info debug_info;
263
264	// protected by scheduler lock
265	bigtime_t		dead_threads_kernel_time;
266	bigtime_t		dead_threads_user_time;
267	bigtime_t		cpu_clock_offset;
268
269	// user group information; protected by fLock, the *_uid/*_gid fields also
270	// by the scheduler lock
271	uid_t			saved_set_uid;
272	uid_t			real_uid;
273	uid_t			effective_uid;
274	gid_t			saved_set_gid;
275	gid_t			real_gid;
276	gid_t			effective_gid;
277	gid_t*			supplementary_groups;
278	int				supplementary_group_count;
279
280	// Exit status information. Set when the first terminal event occurs,
281	// immutable afterwards. Protected by fLock.
282	struct {
283		uint16		reason;			// reason for the team's demise, one of the
284									// CLD_* values defined in <signal.h>
285		uint16		signal;			// signal killing the team
286		uid_t		signaling_user;	// real UID of the signal sender
287		status_t	status;			// exit status, if normal team exit
288		bool		initialized;	// true when the state has been initialized
289	} exit;
290
291public:
292								~Team();
293
294	static	Team*				Create(team_id id, const char* name,
295									bool kernel);
296	static	Team*				Get(team_id id);
297	static	Team*				GetAndLock(team_id id);
298
299			bool				Lock()
300									{ mutex_lock(&fLock); return true; }
301			bool				TryLock()
302									{ return mutex_trylock(&fLock) == B_OK; }
303			void				Unlock()
304									{ mutex_unlock(&fLock); }
305
306			void				UnlockAndReleaseReference()
307									{ Unlock(); ReleaseReference(); }
308
309			void				LockTeamAndParent(bool dontLockParentIfKernel);
310			void				UnlockTeamAndParent();
311			void				LockTeamAndProcessGroup();
312			void				UnlockTeamAndProcessGroup();
313			void				LockTeamParentAndProcessGroup();
314			void				UnlockTeamParentAndProcessGroup();
315			void				LockProcessGroup()
316									{ LockTeamAndProcessGroup(); Unlock(); }
317
318			const char*			Name() const	{ return fName; }
319			void				SetName(const char* name);
320
321			const char*			Args() const	{ return fArgs; }
322			void				SetArgs(const char* args);
323			void				SetArgs(const char* path,
324									const char* const* otherArgs,
325									int otherArgCount);
326
327			BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
328									{ return fQueuedSignalsCounter; }
329			sigset_t			PendingSignals() const
330									{ return fPendingSignals.AllSignals(); }
331
332			void				AddPendingSignal(int signal)
333									{ fPendingSignals.AddSignal(signal); }
334			void				AddPendingSignal(Signal* signal)
335									{ fPendingSignals.AddSignal(signal); }
336			void				RemovePendingSignal(int signal)
337									{ fPendingSignals.RemoveSignal(signal); }
338			void				RemovePendingSignal(Signal* signal)
339									{ fPendingSignals.RemoveSignal(signal); }
340			void				RemovePendingSignals(sigset_t mask)
341									{ fPendingSignals.RemoveSignals(mask); }
342			void				ResetSignalsOnExec();
343
344	inline	int32				HighestPendingSignalPriority(
345									sigset_t nonBlocked) const;
346	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
347									Signal& buffer);
348
349			struct sigaction&	SignalActionFor(int32 signal)
350									{ return fSignalActions[signal - 1]; }
351			void				InheritSignalActions(Team* parent);
352
353			// user timers -- protected by fLock
354			UserTimer*			UserTimerFor(int32 id) const
355									{ return fUserTimers.TimerFor(id); }
356			status_t			AddUserTimer(UserTimer* timer);
357			void				RemoveUserTimer(UserTimer* timer);
358			void				DeleteUserTimers(bool userDefinedOnly);
359
360			bool				CheckAddUserDefinedTimer();
361			void				UserDefinedTimersRemoved(int32 count);
362
363			void				UserTimerActivated(TeamTimeUserTimer* timer)
364									{ fCPUTimeUserTimers.Add(timer); }
365			void				UserTimerActivated(TeamUserTimeUserTimer* timer)
366									{ fUserTimeUserTimers.Add(timer); }
367			void				UserTimerDeactivated(TeamTimeUserTimer* timer)
368									{ fCPUTimeUserTimers.Remove(timer); }
369			void				UserTimerDeactivated(
370									TeamUserTimeUserTimer* timer)
371									{ fUserTimeUserTimers.Remove(timer); }
372			void				DeactivateCPUTimeUserTimers();
373									// both total and user CPU timers
374			bool				HasActiveCPUTimeUserTimers() const
375									{ return !fCPUTimeUserTimers.IsEmpty(); }
376			bool				HasActiveUserTimeUserTimers() const
377									{ return !fUserTimeUserTimers.IsEmpty(); }
378			TeamTimeUserTimerList::ConstIterator
379									CPUTimeUserTimerIterator() const
380									{ return fCPUTimeUserTimers.GetIterator(); }
381	inline	TeamUserTimeUserTimerList::ConstIterator
382									UserTimeUserTimerIterator() const;
383
384			bigtime_t			CPUTime(bool ignoreCurrentRun) const;
385			bigtime_t			UserCPUTime() const;
386
387private:
388								Team(team_id id, bool kernel);
389
390private:
391			mutex				fLock;
392			char				fName[B_OS_NAME_LENGTH];
393			char				fArgs[64];
394									// contents for the team_info::args field
395
396			BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
397			BKernel::PendingSignals	fPendingSignals;
398									// protected by scheduler lock
399			struct sigaction 	fSignalActions[MAX_SIGNAL_NUMBER];
400									// indexed signal - 1, protected by fLock
401
402			UserTimerList		fUserTimers;			// protected by fLock
403			TeamTimeUserTimerList fCPUTimeUserTimers;
404									// protected by scheduler lock
405			TeamUserTimeUserTimerList fUserTimeUserTimers;
406			vint32				fUserDefinedTimerCount;	// accessed atomically
407};
408
409
410struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
411	int32			flags;			// summary of events relevant in interrupt
412									// handlers (signals pending, user debugging
413									// enabled, etc.)
414	int64			serial_number;	// immutable after adding thread to hash
415	Thread			*hash_next;		// protected by thread hash lock
416	Thread			*team_next;		// protected by team lock and fLock
417	Thread			*queue_next;	// protected by scheduler lock
418	timer			alarm;			// protected by scheduler lock
419	char			name[B_OS_NAME_LENGTH];	// protected by fLock
420	int32			priority;		// protected by scheduler lock
421	int32			next_priority;	// protected by scheduler lock
422	int32			io_priority;	// protected by fLock
423	int32			state;			// protected by scheduler lock
424	int32			next_state;		// protected by scheduler lock
425	struct cpu_ent	*cpu;			// protected by scheduler lock
426	struct cpu_ent	*previous_cpu;	// protected by scheduler lock
427	int32			pinned_to_cpu;	// only accessed by this thread or in the
428									// scheduler, when thread is not running
429
430	sigset_t		sig_block_mask;	// protected by scheduler lock,
431									// only modified by the thread itself
432	sigset_t		sigsuspend_original_unblocked_mask;
433		// non-0 after a return from _user_sigsuspend(), containing the inverted
434		// original signal mask, reset in handle_signals(); only accessed by
435		// this thread
436	ucontext_t*		user_signal_context;	// only accessed by this thread
437	addr_t			signal_stack_base;		// only accessed by this thread
438	size_t			signal_stack_size;		// only accessed by this thread
439	bool			signal_stack_enabled;	// only accessed by this thread
440
441	bool			in_kernel;		// protected by time_lock, only written by
442									// this thread
443	bool			was_yielded;	// protected by scheduler lock
444	struct scheduler_thread_data* scheduler_data; // protected by scheduler lock
445
446	struct user_thread*	user_thread;	// write-protected by fLock, only
447										// modified by the thread itself and
448										// thus freely readable by it
449
450	void 			(*cancel_function)(int);
451
452	struct {
453		uint8		parameters[SYSCALL_RESTART_PARAMETER_SIZE];
454	} syscall_restart;
455
456	struct {
457		status_t	status;				// current wait status
458		uint32		flags;				// interrupable flags
459		uint32		type;				// type of the object waited on
460		const void*	object;				// pointer to the object waited on
461		timer		unblock_timer;		// timer for block with timeout
462	} wait;
463
464	struct PrivateConditionVariableEntry *condition_variable_entry;
465
466	struct {
467		sem_id		write_sem;	// acquired by writers before writing
468		sem_id		read_sem;	// release by writers after writing, acquired
469								// by this thread when reading
470		thread_id	sender;
471		int32		code;
472		size_t		size;
473		void*		buffer;
474	} msg;	// write_sem/read_sem are protected by fLock when accessed by
475			// others, the other fields are protected by write_sem/read_sem
476
477	addr_t			fault_handler;
478	int32			page_faults_allowed;
479		/* this field may only stay in debug builds in the future */
480
481	BKernel::Team	*team;	// protected by team lock, thread lock, scheduler
482							// lock
483
484	struct {
485		sem_id		sem;		// immutable after thread creation
486		status_t	status;		// accessed only by this thread
487		struct list	waiters;	// protected by fLock
488	} exit;
489
490	struct select_info *select_infos;	// protected by fLock
491
492	struct thread_debug_info debug_info;
493
494	// stack
495	area_id			kernel_stack_area;	// immutable after thread creation
496	addr_t			kernel_stack_base;	// immutable after thread creation
497	addr_t			kernel_stack_top;	// immutable after thread creation
498	area_id			user_stack_area;	// protected by thread lock
499	addr_t			user_stack_base;	// protected by thread lock
500	size_t			user_stack_size;	// protected by thread lock
501
502	addr_t			user_local_storage;
503		// usually allocated at the safe side of the stack
504	int				kernel_errno;
505		// kernel "errno" differs from its userspace alter ego
506
507	// user_time, kernel_time, and last_time are only written by the thread
508	// itself, so they can be read by the thread without lock. Holding the
509	// scheduler lock and checking that the thread does not run also guarantees
510	// that the times will not change.
511	spinlock		time_lock;
512	bigtime_t		user_time;			// protected by time_lock
513	bigtime_t		kernel_time;		// protected by time_lock
514	bigtime_t		last_time;			// protected by time_lock
515	bigtime_t		cpu_clock_offset;	// protected by scheduler lock
516
517	void			(*post_interrupt_callback)(void*);
518	void*			post_interrupt_data;
519
520	// architecture dependent section
521	struct arch_thread arch_info;
522
523public:
524								Thread() {}
525									// dummy for the idle threads
526								Thread(const char *name, thread_id threadID,
527									struct cpu_ent *cpu);
528								~Thread();
529
530	static	status_t			Create(const char* name, Thread*& _thread);
531
532	static	Thread*				Get(thread_id id);
533	static	Thread*				GetAndLock(thread_id id);
534	static	Thread*				GetDebug(thread_id id);
535									// in kernel debugger only
536
537	static	bool				IsAlive(thread_id id);
538
539			void*				operator new(size_t size);
540			void*				operator new(size_t, void* pointer);
541			void				operator delete(void* pointer, size_t size);
542
543			status_t			Init(bool idleThread);
544
545			bool				Lock()
546									{ mutex_lock(&fLock); return true; }
547			bool				TryLock()
548									{ return mutex_trylock(&fLock) == B_OK; }
549			void				Unlock()
550									{ mutex_unlock(&fLock); }
551
552			void				UnlockAndReleaseReference()
553									{ Unlock(); ReleaseReference(); }
554
555			bool				IsAlive() const;
556
557			bool				IsRunning() const
558									{ return cpu != NULL; }
559									// scheduler lock must be held
560
561			sigset_t			ThreadPendingSignals() const
562									{ return fPendingSignals.AllSignals(); }
563	inline	sigset_t			AllPendingSignals() const;
564			void				AddPendingSignal(int signal)
565									{ fPendingSignals.AddSignal(signal); }
566			void				AddPendingSignal(Signal* signal)
567									{ fPendingSignals.AddSignal(signal); }
568			void				RemovePendingSignal(int signal)
569									{ fPendingSignals.RemoveSignal(signal); }
570			void				RemovePendingSignal(Signal* signal)
571									{ fPendingSignals.RemoveSignal(signal); }
572			void				RemovePendingSignals(sigset_t mask)
573									{ fPendingSignals.RemoveSignals(mask); }
574			void				ResetSignalsOnExec();
575
576	inline	int32				HighestPendingSignalPriority(
577									sigset_t nonBlocked) const;
578	inline	Signal*				DequeuePendingSignal(sigset_t nonBlocked,
579									Signal& buffer);
580
581			// user timers -- protected by fLock
582			UserTimer*			UserTimerFor(int32 id) const
583									{ return fUserTimers.TimerFor(id); }
584			status_t			AddUserTimer(UserTimer* timer);
585			void				RemoveUserTimer(UserTimer* timer);
586			void				DeleteUserTimers(bool userDefinedOnly);
587
588			void				UserTimerActivated(ThreadTimeUserTimer* timer)
589									{ fCPUTimeUserTimers.Add(timer); }
590			void				UserTimerDeactivated(ThreadTimeUserTimer* timer)
591									{ fCPUTimeUserTimers.Remove(timer); }
592			void				DeactivateCPUTimeUserTimers();
593			bool				HasActiveCPUTimeUserTimers() const
594									{ return !fCPUTimeUserTimers.IsEmpty(); }
595			ThreadTimeUserTimerList::ConstIterator
596									CPUTimeUserTimerIterator() const
597									{ return fCPUTimeUserTimers.GetIterator(); }
598
599	inline	bigtime_t			CPUTime(bool ignoreCurrentRun) const;
600
601private:
602			mutex				fLock;
603
604			BKernel::PendingSignals	fPendingSignals;
605									// protected by scheduler lock
606
607			UserTimerList		fUserTimers;			// protected by fLock
608			ThreadTimeUserTimerList fCPUTimeUserTimers;
609									// protected by scheduler lock
610};
611
612
613struct ProcessSession : BReferenceable {
614	pid_t				id;
615	int32				controlling_tty;	// index of the controlling tty,
616											// -1 if none
617	pid_t				foreground_group;
618
619public:
620								ProcessSession(pid_t id);
621								~ProcessSession();
622
623			bool				Lock()
624									{ mutex_lock(&fLock); return true; }
625			bool				TryLock()
626									{ return mutex_trylock(&fLock) == B_OK; }
627			void				Unlock()
628									{ mutex_unlock(&fLock); }
629
630private:
631			mutex				fLock;
632};
633
634
635struct ProcessGroup : KernelReferenceable {
636	struct ProcessGroup *next;		// next in hash
637	pid_t				id;
638	BKernel::Team		*teams;
639
640public:
641								ProcessGroup(pid_t id);
642								~ProcessGroup();
643
644	static	ProcessGroup*		Get(pid_t id);
645
646			bool				Lock()
647									{ mutex_lock(&fLock); return true; }
648			bool				TryLock()
649									{ return mutex_trylock(&fLock) == B_OK; }
650			void				Unlock()
651									{ mutex_unlock(&fLock); }
652
653			ProcessSession*		Session() const
654									{ return fSession; }
655			void				Publish(ProcessSession* session);
656			void				PublishLocked(ProcessSession* session);
657
658			bool				IsOrphaned() const;
659
660			void				ScheduleOrphanedCheck();
661			void				UnsetOrphanedCheck();
662
663public:
664			SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
665
666private:
667			mutex				fLock;
668			ProcessSession*		fSession;
669			bool				fInOrphanedCheckList;	// protected by
670														// sOrphanedCheckLock
671};
672
673typedef SinglyLinkedList<ProcessGroup,
674	SinglyLinkedListMemberGetLink<ProcessGroup,
675		&ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
676
677
678/*!	\brief Allows to iterate through all teams.
679*/
680struct TeamListIterator {
681								TeamListIterator();
682								~TeamListIterator();
683
684			Team*				Next();
685
686private:
687			TeamThreadIteratorEntry<team_id> fEntry;
688};
689
690
691/*!	\brief Allows to iterate through all threads.
692*/
693struct ThreadListIterator {
694								ThreadListIterator();
695								~ThreadListIterator();
696
697			Thread*				Next();
698
699private:
700			TeamThreadIteratorEntry<thread_id> fEntry;
701};
702
703
704inline int32
705Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
706{
707	return fPendingSignals.HighestSignalPriority(nonBlocked);
708}
709
710
711inline Signal*
712Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
713{
714	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
715}
716
717
718inline TeamUserTimeUserTimerList::ConstIterator
719Team::UserTimeUserTimerIterator() const
720{
721	return fUserTimeUserTimers.GetIterator();
722}
723
724
725inline sigset_t
726Thread::AllPendingSignals() const
727{
728	return fPendingSignals.AllSignals() | team->PendingSignals();
729}
730
731
732inline int32
733Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
734{
735	return fPendingSignals.HighestSignalPriority(nonBlocked);
736}
737
738
739inline Signal*
740Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
741{
742	return fPendingSignals.DequeueSignal(nonBlocked, buffer);
743}
744
745
746/*!	Returns the thread's current total CPU time (kernel + user + offset).
747
748	The caller must hold the scheduler lock.
749
750	\param ignoreCurrentRun If \c true and the thread is currently running,
751		don't add the time since the last time \c last_time was updated. Should
752		be used in "thread unscheduled" scheduler callbacks, since although the
753		thread is still running at that time, its time has already been stopped.
754	\return The thread's current total CPU time.
755*/
756inline bigtime_t
757Thread::CPUTime(bool ignoreCurrentRun) const
758{
759	bigtime_t time = user_time + kernel_time + cpu_clock_offset;
760
761	// If currently running, also add the time since the last check, unless
762	// requested otherwise.
763	if (!ignoreCurrentRun && cpu != NULL)
764		time += system_time() - last_time;
765
766	return time;
767}
768
769
770}	// namespace BKernel
771
772using BKernel::Team;
773using BKernel::TeamListIterator;
774using BKernel::Thread;
775using BKernel::ThreadListIterator;
776using BKernel::ProcessSession;
777using BKernel::ProcessGroup;
778using BKernel::ProcessGroupList;
779
780
781struct thread_queue {
782	Thread*	head;
783	Thread*	tail;
784};
785
786
787#endif	// !_ASSEMBLER
788
789
790// bits for the thread::flags field
791#define	THREAD_FLAGS_SIGNALS_PENDING		0x0001
792	// unblocked signals are pending (computed flag for optimization purposes)
793#define	THREAD_FLAGS_DEBUG_THREAD			0x0002
794	// forces the thread into the debugger as soon as possible (set by
795	// debug_thread())
796#define	THREAD_FLAGS_SINGLE_STEP			0x0004
797	// indicates that the thread is in single-step mode (in userland)
798#define	THREAD_FLAGS_DEBUGGER_INSTALLED		0x0008
799	// a debugger is installed for the current team (computed flag for
800	// optimization purposes)
801#define	THREAD_FLAGS_BREAKPOINTS_DEFINED	0x0010
802	// hardware breakpoints are defined for the current team (computed flag for
803	// optimization purposes)
804#define	THREAD_FLAGS_BREAKPOINTS_INSTALLED	0x0020
805	// breakpoints are currently installed for the thread (i.e. the hardware is
806	// actually set up to trigger debug events for them)
807#define	THREAD_FLAGS_64_BIT_SYSCALL_RETURN	0x0040
808	// set by 64 bit return value syscalls
809#define	THREAD_FLAGS_RESTART_SYSCALL		0x0080
810	// set by handle_signals(), if the current syscall shall be restarted
811#define	THREAD_FLAGS_DONT_RESTART_SYSCALL	0x0100
812	// explicitly disables automatic syscall restarts (e.g. resume_thread())
813#define	THREAD_FLAGS_ALWAYS_RESTART_SYSCALL	0x0200
814	// force syscall restart, even if a signal handler without SA_RESTART was
815	// invoked (e.g. sigwait())
816#define	THREAD_FLAGS_SYSCALL_RESTARTED		0x0400
817	// the current syscall has been restarted
818#define	THREAD_FLAGS_SYSCALL				0x0800
819	// the thread is currently in a syscall; set/reset only for certain
820	// functions (e.g. ioctl()) to allow inner functions to discriminate
821	// whether e.g. parameters were passed from userland or kernel
822
823
824#endif	/* _KERNEL_THREAD_TYPES_H */
825