1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4#include <asm/param.h> /* for HZ */ 5 6extern unsigned long event; 7 8#include <linux/config.h> 9#include <linux/binfmts.h> 10#include <linux/threads.h> 11#include <linux/kernel.h> 12#include <linux/types.h> 13#include <linux/times.h> 14#include <linux/timex.h> 15#include <linux/rbtree.h> 16 17#include <asm/system.h> 18#include <asm/semaphore.h> 19#include <asm/page.h> 20#include <asm/ptrace.h> 21#include <asm/mmu.h> 22 23#include <linux/smp.h> 24#include <linux/tty.h> 25#include <linux/sem.h> 26#include <linux/signal.h> 27#include <linux/securebits.h> 28#include <linux/fs_struct.h> 29 30struct exec_domain; 31 32/* 33 * cloning flags: 34 */ 35#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ 36#define CLONE_VM 0x00000100 /* set if VM shared between processes */ 37#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 38#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 39#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 40#define CLONE_PID 0x00001000 /* set if pid shared */ 41#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 42#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 43#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 44#define CLONE_THREAD 0x00010000 /* Same thread group? */ 45#define CLONE_NEWNS 0x00020000 /* New namespace group? */ 46 47#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD) 48 49/* 50 * These are the constant used to fake the fixed-point load-average 51 * counting. Some notes: 52 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 53 * a load-average precision of 10 bits integer + 11 bits fractional 54 * - if you want to count load-averages more often, you need more 55 * precision, or rounding will get you. With 2-second counting freq, 56 * the EXP_n values would be 1981, 2034 and 2043 if still using only 57 * 11 bit fractions. 58 */ 59extern unsigned long avenrun[]; /* Load averages */ 60 61#define FSHIFT 11 /* nr of bits of precision */ 62#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 63#define LOAD_FREQ (5*HZ) /* 5 sec intervals */ 64#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 65#define EXP_5 2014 /* 1/exp(5sec/5min) */ 66#define EXP_15 2037 /* 1/exp(5sec/15min) */ 67 68#define CALC_LOAD(load,exp,n) \ 69 load *= exp; \ 70 load += n*(FIXED_1-exp); \ 71 load >>= FSHIFT; 72 73#define CT_TO_SECS(x) ((x) / HZ) 74#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) 75 76extern int nr_running, nr_threads; 77extern int last_pid; 78 79#include <linux/fs.h> 80#include <linux/time.h> 81#include <linux/param.h> 82#include <linux/resource.h> 83#ifdef __KERNEL__ 84#include <linux/timer.h> 85#endif 86 87#include <asm/processor.h> 88 89#define TASK_RUNNING 0 90#define TASK_INTERRUPTIBLE 1 91#define TASK_UNINTERRUPTIBLE 2 92#define TASK_ZOMBIE 4 93#define TASK_STOPPED 8 94 95#define __set_task_state(tsk, state_value) \ 96 do { (tsk)->state = (state_value); } while (0) 97#ifdef CONFIG_SMP 98#define set_task_state(tsk, state_value) \ 99 set_mb((tsk)->state, (state_value)) 100#else 101#define set_task_state(tsk, state_value) \ 102 __set_task_state((tsk), (state_value)) 103#endif 104 105#define __set_current_state(state_value) \ 106 do { current->state = (state_value); } while (0) 107#ifdef CONFIG_SMP 108#define set_current_state(state_value) \ 109 set_mb(current->state, (state_value)) 110#else 111#define set_current_state(state_value) \ 112 __set_current_state(state_value) 113#endif 114 115/* 116 * Scheduling policies 117 */ 118#define SCHED_OTHER 0 119#define SCHED_FIFO 1 120#define SCHED_RR 2 121 122/* 123 * This is an additional bit set when we want to 124 * yield the CPU for one re-schedule.. 125 */ 126#define SCHED_YIELD 0x10 127 128struct sched_param { 129 int sched_priority; 130}; 131 132struct completion; 133 134#ifdef __KERNEL__ 135 136#include <linux/spinlock.h> 137 138/* 139 * This serializes "schedule()" and also protects 140 * the run-queue from deletions/modifications (but 141 * _adding_ to the beginning of the run-queue has 142 * a separate lock). 143 */ 144extern rwlock_t tasklist_lock; 145extern spinlock_t runqueue_lock; 146extern spinlock_t mmlist_lock; 147 148extern void sched_init(void); 149extern void init_idle(void); 150extern void show_state(void); 151extern void cpu_init (void); 152extern void trap_init(void); 153extern void update_process_times(int user); 154extern void update_one_process(struct task_struct *p, unsigned long user, 155 unsigned long system, int cpu); 156 157#define MAX_SCHEDULE_TIMEOUT LONG_MAX 158extern signed long FASTCALL(schedule_timeout(signed long timeout)); 159asmlinkage void schedule(void); 160 161extern int schedule_task(struct tq_struct *task); 162extern void flush_scheduled_tasks(void); 163extern int start_context_thread(void); 164extern int current_is_keventd(void); 165 166/* 167 * The default fd array needs to be at least BITS_PER_LONG, 168 * as this is the granularity returned by copy_fdset(). 169 */ 170#define NR_OPEN_DEFAULT BITS_PER_LONG 171 172struct namespace; 173/* 174 * Open file table structure 175 */ 176struct files_struct { 177 atomic_t count; 178 rwlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */ 179 int max_fds; 180 int max_fdset; 181 int next_fd; 182 struct file ** fd; /* current fd array */ 183 fd_set *close_on_exec; 184 fd_set *open_fds; 185 fd_set close_on_exec_init; 186 fd_set open_fds_init; 187 struct file * fd_array[NR_OPEN_DEFAULT]; 188}; 189 190#define INIT_FILES \ 191{ \ 192 count: ATOMIC_INIT(1), \ 193 file_lock: RW_LOCK_UNLOCKED, \ 194 max_fds: NR_OPEN_DEFAULT, \ 195 max_fdset: __FD_SETSIZE, \ 196 next_fd: 0, \ 197 fd: &init_files.fd_array[0], \ 198 close_on_exec: &init_files.close_on_exec_init, \ 199 open_fds: &init_files.open_fds_init, \ 200 close_on_exec_init: { { 0, } }, \ 201 open_fds_init: { { 0, } }, \ 202 fd_array: { NULL, } \ 203} 204 205/* Maximum number of active map areas.. This is a random (large) number */ 206#define DEFAULT_MAX_MAP_COUNT (65536) 207 208extern int max_map_count; 209 210struct mm_struct { 211 struct vm_area_struct * mmap; /* list of VMAs */ 212 rb_root_t mm_rb; 213 struct vm_area_struct * mmap_cache; /* last find_vma result */ 214 pgd_t * pgd; 215 atomic_t mm_users; /* How many users with user space? */ 216 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 217 int map_count; /* number of VMAs */ 218 struct rw_semaphore mmap_sem; 219 spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ 220 221 struct list_head mmlist; /* List of all active mm's. These are globally strung 222 * together off init_mm.mmlist, and are protected 223 * by mmlist_lock 224 */ 225 226 unsigned long start_code, end_code, start_data, end_data; 227 unsigned long start_brk, brk, start_stack; 228 unsigned long arg_start, arg_end, env_start, env_end; 229 unsigned long rss, total_vm, locked_vm; 230 unsigned long def_flags; 231 unsigned long cpu_vm_mask; 232 unsigned long swap_address; 233 234 unsigned dumpable:1; 235 236 /* Architecture-specific MM context */ 237 mm_context_t context; 238}; 239 240extern int mmlist_nr; 241 242#define INIT_MM(name) \ 243{ \ 244 mm_rb: RB_ROOT, \ 245 pgd: swapper_pg_dir, \ 246 mm_users: ATOMIC_INIT(2), \ 247 mm_count: ATOMIC_INIT(1), \ 248 mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem), \ 249 page_table_lock: SPIN_LOCK_UNLOCKED, \ 250 mmlist: LIST_HEAD_INIT(name.mmlist), \ 251} 252 253struct signal_struct { 254 atomic_t count; 255 struct k_sigaction action[_NSIG]; 256 spinlock_t siglock; 257}; 258 259 260#define INIT_SIGNALS { \ 261 count: ATOMIC_INIT(1), \ 262 action: { {{0,}}, }, \ 263 siglock: SPIN_LOCK_UNLOCKED \ 264} 265 266/* 267 * Some day this will be a full-fledged user tracking system.. 268 */ 269struct user_struct { 270 atomic_t __count; /* reference count */ 271 atomic_t processes; /* How many processes does this user have? */ 272 atomic_t files; /* How many open files does this user have? */ 273 274 /* Hash table maintenance information */ 275 struct user_struct *next, **pprev; 276 uid_t uid; 277}; 278 279#define get_current_user() ({ \ 280 struct user_struct *__user = current->user; \ 281 atomic_inc(&__user->__count); \ 282 __user; }) 283 284extern struct user_struct root_user; 285#define INIT_USER (&root_user) 286 287struct task_struct { 288 /* 289 * offsets of these are hardcoded elsewhere - touch with care 290 */ 291 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 292 unsigned long flags; /* per process flags, defined below */ 293 int sigpending; 294 mm_segment_t addr_limit; /* thread address space: 295 0-0xBFFFFFFF for user-thead 296 0-0xFFFFFFFF for kernel-thread 297 */ 298 struct exec_domain *exec_domain; 299 volatile long need_resched; 300 unsigned long ptrace; 301 302 int lock_depth; /* Lock depth */ 303 304/* 305 * offset 32 begins here on 32-bit platforms. We keep 306 * all fields in a single cacheline that are needed for 307 * the goodness() loop in schedule(). 308 */ 309 long counter; 310 long nice; 311 unsigned long policy; 312 struct mm_struct *mm; 313 int processor; 314 /* 315 * cpus_runnable is ~0 if the process is not running on any 316 * CPU. It's (1 << cpu) if it's running on a CPU. This mask 317 * is updated under the runqueue lock. 318 * 319 * To determine whether a process might run on a CPU, this 320 * mask is AND-ed with cpus_allowed. 321 */ 322 unsigned long cpus_runnable, cpus_allowed; 323 /* 324 * (only the 'next' pointer fits into the cacheline, but 325 * that's just fine.) 326 */ 327 struct list_head run_list; 328 unsigned long sleep_time; 329#ifdef CONFIG_KERNPROF 330 unsigned long stop_time, wakeup_time; 331#endif 332 struct task_struct *next_task, *prev_task; 333 struct mm_struct *active_mm; 334 struct list_head local_pages; 335 unsigned int allocation_order, nr_local_pages; 336 337/* task state */ 338 struct linux_binfmt *binfmt; 339 int exit_code, exit_signal; 340 int pdeath_signal; /* The signal sent when the parent dies */ 341 /* ??? */ 342 unsigned long personality; 343 int did_exec:1; 344 pid_t pid; 345 pid_t pgrp; 346 pid_t tty_old_pgrp; 347 pid_t session; 348 pid_t tgid; 349 /* boolean value for session group leader */ 350 int leader; 351 /* 352 * pointers to (original) parent process, youngest child, younger sibling, 353 * older sibling, respectively. (p->father can be replaced with 354 * p->p_pptr->pid) 355 */ 356 struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; 357 struct list_head thread_group; 358 359 /* PID hash table linkage. */ 360 struct task_struct *pidhash_next; 361 struct task_struct **pidhash_pprev; 362 363 wait_queue_head_t wait_chldexit; /* for wait4() */ 364 struct completion *vfork_done; /* for vfork() */ 365 unsigned long rt_priority; 366 unsigned long it_real_value, it_prof_value, it_virt_value; 367 unsigned long it_real_incr, it_prof_incr, it_virt_incr; 368 struct timer_list real_timer; 369 struct tms times; 370 unsigned long start_time; 371 long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS]; 372/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 373 unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; 374 int swappable:1; 375/* process credentials */ 376 uid_t uid,euid,suid,fsuid; 377 gid_t gid,egid,sgid,fsgid; 378 int ngroups; 379 gid_t groups[NGROUPS]; 380 kernel_cap_t cap_effective, cap_inheritable, cap_permitted; 381 int keep_capabilities:1; 382 struct user_struct *user; 383/* limits */ 384 struct rlimit rlim[RLIM_NLIMITS]; 385 unsigned short used_math; 386 char comm[16]; 387/* file system info */ 388 int link_count, total_link_count; 389 struct tty_struct *tty; /* NULL if no tty */ 390 unsigned int locks; /* How many file locks are being held */ 391/* ipc stuff */ 392 struct sem_undo *semundo; 393 struct sem_queue *semsleeping; 394/* CPU-specific state of this task */ 395 struct thread_struct thread; 396/* filesystem information */ 397 struct fs_struct *fs; 398/* open file information */ 399 struct files_struct *files; 400/* namespace */ 401 struct namespace *namespace; 402/* signal handlers */ 403 spinlock_t sigmask_lock; /* Protects signal and blocked */ 404 struct signal_struct *sig; 405 406 sigset_t blocked; 407 struct sigpending pending; 408 409 unsigned long sas_ss_sp; 410 size_t sas_ss_size; 411 int (*notifier)(void *priv); 412 void *notifier_data; 413 sigset_t *notifier_mask; 414 415/* Thread group tracking */ 416 u32 parent_exec_id; 417 u32 self_exec_id; 418/* Protection of (de-)allocation: mm, files, fs, tty */ 419 spinlock_t alloc_lock; 420 421/* journalling filesystem info */ 422 void *journal_info; 423}; 424 425/* 426 * Per process flags 427 */ 428#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ 429 /* Not implemented yet, only for 486*/ 430#define PF_STARTING 0x00000002 /* being created */ 431#define PF_EXITING 0x00000004 /* getting shut down */ 432#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 433#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 434#define PF_DUMPCORE 0x00000200 /* dumped core */ 435#define PF_SIGNALED 0x00000400 /* killed by a signal */ 436#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 437#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ 438#define PF_FREE_PAGES 0x00002000 /* per process page freeing */ 439#define PF_NOIO 0x00004000 /* avoid generating further I/O */ 440 441#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */ 442 443/* 444 * Ptrace flags 445 */ 446 447#define PT_PTRACED 0x00000001 448#define PT_TRACESYS 0x00000002 449#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */ 450#define PT_TRACESYSGOOD 0x00000008 451#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */ 452 453/* 454 * Limit the stack by to some sane default: root can always 455 * increase this limit if needed.. 8MB seems reasonable. 456 */ 457#define _STK_LIM (8*1024*1024) 458 459#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */ 460#define MAX_COUNTER (20*HZ/100) 461#define DEF_NICE (0) 462 463extern void yield(void); 464 465/* 466 * The default (Linux) execution domain. 467 */ 468extern struct exec_domain default_exec_domain; 469 470/* 471 * INIT_TASK is used to set up the first task table, touch at 472 * your own risk!. Base=0, limit=0x1fffff (=2MB) 473 */ 474#define INIT_TASK(tsk) \ 475{ \ 476 state: 0, \ 477 flags: 0, \ 478 sigpending: 0, \ 479 addr_limit: KERNEL_DS, \ 480 exec_domain: &default_exec_domain, \ 481 lock_depth: -1, \ 482 counter: DEF_COUNTER, \ 483 nice: DEF_NICE, \ 484 policy: SCHED_OTHER, \ 485 mm: NULL, \ 486 active_mm: &init_mm, \ 487 cpus_runnable: -1, \ 488 cpus_allowed: -1, \ 489 run_list: LIST_HEAD_INIT(tsk.run_list), \ 490 next_task: &tsk, \ 491 prev_task: &tsk, \ 492 p_opptr: &tsk, \ 493 p_pptr: &tsk, \ 494 thread_group: LIST_HEAD_INIT(tsk.thread_group), \ 495 wait_chldexit: __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\ 496 real_timer: { \ 497 function: it_real_fn \ 498 }, \ 499 cap_effective: CAP_INIT_EFF_SET, \ 500 cap_inheritable: CAP_INIT_INH_SET, \ 501 cap_permitted: CAP_FULL_SET, \ 502 keep_capabilities: 0, \ 503 rlim: INIT_RLIMITS, \ 504 user: INIT_USER, \ 505 comm: "swapper", \ 506 thread: INIT_THREAD, \ 507 fs: &init_fs, \ 508 files: &init_files, \ 509 sigmask_lock: SPIN_LOCK_UNLOCKED, \ 510 sig: &init_signals, \ 511 pending: { NULL, &tsk.pending.head, {{0}}}, \ 512 blocked: {{0}}, \ 513 alloc_lock: SPIN_LOCK_UNLOCKED, \ 514 journal_info: NULL, \ 515} 516 517 518#ifndef INIT_TASK_SIZE 519# define INIT_TASK_SIZE 2048*sizeof(long) 520#endif 521 522union task_union { 523 struct task_struct task; 524 unsigned long stack[INIT_TASK_SIZE/sizeof(long)]; 525}; 526 527extern union task_union init_task_union; 528 529extern struct mm_struct init_mm; 530extern struct task_struct *init_tasks[NR_CPUS]; 531 532/* PID hashing. (shouldnt this be dynamic?) */ 533#define PIDHASH_SZ (4096 >> 2) 534extern struct task_struct *pidhash[PIDHASH_SZ]; 535 536#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) 537 538static inline void hash_pid(struct task_struct *p) 539{ 540 struct task_struct **htable = &pidhash[pid_hashfn(p->pid)]; 541 542 if((p->pidhash_next = *htable) != NULL) 543 (*htable)->pidhash_pprev = &p->pidhash_next; 544 *htable = p; 545 p->pidhash_pprev = htable; 546} 547 548static inline void unhash_pid(struct task_struct *p) 549{ 550 if(p->pidhash_next) 551 p->pidhash_next->pidhash_pprev = p->pidhash_pprev; 552 *p->pidhash_pprev = p->pidhash_next; 553} 554 555static inline struct task_struct *find_task_by_pid(int pid) 556{ 557 struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)]; 558 559 for(p = *htable; p && p->pid != pid; p = p->pidhash_next) 560 ; 561 562 return p; 563} 564 565#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) 566 567static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) 568{ 569 tsk->processor = cpu; 570 tsk->cpus_runnable = 1UL << cpu; 571} 572 573static inline void task_release_cpu(struct task_struct *tsk) 574{ 575 tsk->cpus_runnable = ~0UL; 576} 577 578/* per-UID process charging. */ 579extern struct user_struct * alloc_uid(uid_t); 580extern void free_uid(struct user_struct *); 581 582#include <asm/current.h> 583 584extern unsigned long volatile jiffies; 585extern unsigned long itimer_ticks; 586extern unsigned long itimer_next; 587extern struct timeval xtime; 588extern void do_timer(struct pt_regs *); 589 590#define CURRENT_TIME (xtime.tv_sec) 591 592extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); 593extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); 594extern void FASTCALL(sleep_on(wait_queue_head_t *q)); 595extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, 596 signed long timeout)); 597extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); 598extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, 599 signed long timeout)); 600extern int FASTCALL(wake_up_process(struct task_struct * tsk)); 601 602#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) 603#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) 604#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) 605#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) 606#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) 607#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) 608#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) 609#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) 610#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) 611#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr) 612asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); 613 614extern int in_group_p(gid_t); 615extern int in_egroup_p(gid_t); 616 617extern ATTRIB_NORET void cpu_idle(void); 618 619extern void release_task(struct task_struct * p); 620 621extern void proc_caches_init(void); 622extern void flush_signals(struct task_struct *); 623extern void flush_signal_handlers(struct task_struct *); 624extern void sig_exit(int, int, struct siginfo *); 625extern int dequeue_signal(sigset_t *, siginfo_t *); 626extern void block_all_signals(int (*notifier)(void *priv), void *priv, 627 sigset_t *mask); 628extern void unblock_all_signals(void); 629extern int send_sig_info(int, struct siginfo *, struct task_struct *); 630extern int force_sig_info(int, struct siginfo *, struct task_struct *); 631extern int kill_pg_info(int, struct siginfo *, pid_t); 632extern int kill_sl_info(int, struct siginfo *, pid_t); 633extern int kill_proc_info(int, struct siginfo *, pid_t); 634extern void notify_parent(struct task_struct *, int); 635extern void do_notify_parent(struct task_struct *, int); 636extern void force_sig(int, struct task_struct *); 637extern int send_sig(int, struct task_struct *, int); 638extern int kill_pg(pid_t, int, int); 639extern int kill_sl(pid_t, int, int); 640extern int kill_proc(pid_t, int, int); 641extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); 642extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); 643 644static inline int signal_pending(struct task_struct *p) 645{ 646 return (p->sigpending != 0); 647} 648 649/* 650 * Re-calculate pending state from the set of locally pending 651 * signals, globally pending signals, and blocked signals. 652 */ 653static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 654{ 655 unsigned long ready; 656 long i; 657 658 switch (_NSIG_WORDS) { 659 default: 660 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 661 ready |= signal->sig[i] &~ blocked->sig[i]; 662 break; 663 664 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 665 ready |= signal->sig[2] &~ blocked->sig[2]; 666 ready |= signal->sig[1] &~ blocked->sig[1]; 667 ready |= signal->sig[0] &~ blocked->sig[0]; 668 break; 669 670 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 671 ready |= signal->sig[0] &~ blocked->sig[0]; 672 break; 673 674 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 675 } 676 return ready != 0; 677} 678 679/* Reevaluate whether the task has signals pending delivery. 680 This is required every time the blocked sigset_t changes. 681 All callers should have t->sigmask_lock. */ 682 683static inline void recalc_sigpending(struct task_struct *t) 684{ 685 t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked); 686} 687 688/* True if we are on the alternate signal stack. */ 689 690static inline int on_sig_stack(unsigned long sp) 691{ 692 return (sp - current->sas_ss_sp < current->sas_ss_size); 693} 694 695static inline int sas_ss_flags(unsigned long sp) 696{ 697 return (current->sas_ss_size == 0 ? SS_DISABLE 698 : on_sig_stack(sp) ? SS_ONSTACK : 0); 699} 700 701extern int request_irq(unsigned int, 702 void (*handler)(int, void *, struct pt_regs *), 703 unsigned long, const char *, void *); 704extern void free_irq(unsigned int, void *); 705 706/* 707 * This has now become a routine instead of a macro, it sets a flag if 708 * it returns true (to do BSD-style accounting where the process is flagged 709 * if it uses root privs). The implication of this is that you should do 710 * normal permissions checks first, and check suser() last. 711 * 712 * [Dec 1997 -- Chris Evans] 713 * For correctness, the above considerations need to be extended to 714 * fsuser(). This is done, along with moving fsuser() checks to be 715 * last. 716 * 717 * These will be removed, but in the mean time, when the SECURE_NOROOT 718 * flag is set, uids don't grant privilege. 719 */ 720static inline int suser(void) 721{ 722 if (!issecure(SECURE_NOROOT) && current->euid == 0) { 723 current->flags |= PF_SUPERPRIV; 724 return 1; 725 } 726 return 0; 727} 728 729static inline int fsuser(void) 730{ 731 if (!issecure(SECURE_NOROOT) && current->fsuid == 0) { 732 current->flags |= PF_SUPERPRIV; 733 return 1; 734 } 735 return 0; 736} 737 738/* 739 * capable() checks for a particular capability. 740 * New privilege checks should use this interface, rather than suser() or 741 * fsuser(). See include/linux/capability.h for defined capabilities. 742 */ 743 744static inline int capable(int cap) 745{ 746 if (cap_raised(current->cap_effective, cap)) 747 { 748 current->flags |= PF_SUPERPRIV; 749 return 1; 750 } 751 return 0; 752} 753 754/* 755 * Routines for handling mm_structs 756 */ 757extern struct mm_struct * mm_alloc(void); 758 759extern struct mm_struct * start_lazy_tlb(void); 760extern void end_lazy_tlb(struct mm_struct *mm); 761 762/* mmdrop drops the mm and the page tables */ 763extern inline void FASTCALL(__mmdrop(struct mm_struct *)); 764static inline void mmdrop(struct mm_struct * mm) 765{ 766 if (atomic_dec_and_test(&mm->mm_count)) 767 __mmdrop(mm); 768} 769 770/* mmput gets rid of the mappings and all user-space */ 771extern void mmput(struct mm_struct *); 772/* Remove the current tasks stale references to the old mm_struct */ 773extern void mm_release(void); 774 775/* 776 * Routines for handling the fd arrays 777 */ 778extern struct file ** alloc_fd_array(int); 779extern int expand_fd_array(struct files_struct *, int nr); 780extern void free_fd_array(struct file **, int); 781 782extern fd_set *alloc_fdset(int); 783extern int expand_fdset(struct files_struct *, int nr); 784extern void free_fdset(fd_set *, int); 785 786extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 787extern void flush_thread(void); 788extern void exit_thread(void); 789 790extern void exit_mm(struct task_struct *); 791extern void exit_files(struct task_struct *); 792extern void exit_sighand(struct task_struct *); 793 794extern void reparent_to_init(void); 795extern void daemonize(void); 796 797extern int do_execve(char *, char **, char **, struct pt_regs *); 798extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long); 799 800extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); 801extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); 802extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); 803 804#define __wait_event(wq, condition) \ 805do { \ 806 wait_queue_t __wait; \ 807 init_waitqueue_entry(&__wait, current); \ 808 \ 809 add_wait_queue(&wq, &__wait); \ 810 for (;;) { \ 811 set_current_state(TASK_UNINTERRUPTIBLE); \ 812 if (condition) \ 813 break; \ 814 schedule(); \ 815 } \ 816 current->state = TASK_RUNNING; \ 817 remove_wait_queue(&wq, &__wait); \ 818} while (0) 819 820#define wait_event(wq, condition) \ 821do { \ 822 if (condition) \ 823 break; \ 824 __wait_event(wq, condition); \ 825} while (0) 826 827#define __wait_event_interruptible(wq, condition, ret) \ 828do { \ 829 wait_queue_t __wait; \ 830 init_waitqueue_entry(&__wait, current); \ 831 \ 832 add_wait_queue(&wq, &__wait); \ 833 for (;;) { \ 834 set_current_state(TASK_INTERRUPTIBLE); \ 835 if (condition) \ 836 break; \ 837 if (!signal_pending(current)) { \ 838 schedule(); \ 839 continue; \ 840 } \ 841 ret = -ERESTARTSYS; \ 842 break; \ 843 } \ 844 current->state = TASK_RUNNING; \ 845 remove_wait_queue(&wq, &__wait); \ 846} while (0) 847 848#define wait_event_interruptible(wq, condition) \ 849({ \ 850 int __ret = 0; \ 851 if (!(condition)) \ 852 __wait_event_interruptible(wq, condition, __ret); \ 853 __ret; \ 854}) 855 856#define REMOVE_LINKS(p) do { \ 857 (p)->next_task->prev_task = (p)->prev_task; \ 858 (p)->prev_task->next_task = (p)->next_task; \ 859 if ((p)->p_osptr) \ 860 (p)->p_osptr->p_ysptr = (p)->p_ysptr; \ 861 if ((p)->p_ysptr) \ 862 (p)->p_ysptr->p_osptr = (p)->p_osptr; \ 863 else \ 864 (p)->p_pptr->p_cptr = (p)->p_osptr; \ 865 } while (0) 866 867#define SET_LINKS(p) do { \ 868 (p)->next_task = &init_task; \ 869 (p)->prev_task = init_task.prev_task; \ 870 init_task.prev_task->next_task = (p); \ 871 init_task.prev_task = (p); \ 872 (p)->p_ysptr = NULL; \ 873 if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \ 874 (p)->p_osptr->p_ysptr = p; \ 875 (p)->p_pptr->p_cptr = p; \ 876 } while (0) 877 878#define for_each_task(p) \ 879 for (p = &init_task ; (p = p->next_task) != &init_task ; ) 880 881#define for_each_thread(task) \ 882 for (task = next_thread(current) ; task != current ; task = next_thread(task)) 883 884#define next_thread(p) \ 885 list_entry((p)->thread_group.next, struct task_struct, thread_group) 886 887#define thread_group_leader(p) (p->pid == p->tgid) 888 889static inline void del_from_runqueue(struct task_struct * p) 890{ 891 nr_running--; 892 p->sleep_time = jiffies; 893 list_del(&p->run_list); 894 p->run_list.next = NULL; 895} 896 897static inline int task_on_runqueue(struct task_struct *p) 898{ 899 return (p->run_list.next != NULL); 900} 901 902static inline void unhash_process(struct task_struct *p) 903{ 904 if (task_on_runqueue(p)) 905 out_of_line_bug(); 906 write_lock_irq(&tasklist_lock); 907 nr_threads--; 908 unhash_pid(p); 909 REMOVE_LINKS(p); 910 list_del(&p->thread_group); 911 write_unlock_irq(&tasklist_lock); 912} 913 914/* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */ 915static inline void task_lock(struct task_struct *p) 916{ 917 spin_lock(&p->alloc_lock); 918} 919 920static inline void task_unlock(struct task_struct *p) 921{ 922 spin_unlock(&p->alloc_lock); 923} 924 925/* write full pathname into buffer and return start of pathname */ 926static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, 927 char *buf, int buflen) 928{ 929 char *res; 930 struct vfsmount *rootmnt; 931 struct dentry *root; 932 read_lock(¤t->fs->lock); 933 rootmnt = mntget(current->fs->rootmnt); 934 root = dget(current->fs->root); 935 read_unlock(¤t->fs->lock); 936 spin_lock(&dcache_lock); 937 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); 938 spin_unlock(&dcache_lock); 939 dput(root); 940 mntput(rootmnt); 941 return res; 942} 943 944static inline int need_resched(void) 945{ 946 return (unlikely(current->need_resched)); 947} 948 949extern void __cond_resched(void); 950static inline void cond_resched(void) 951{ 952 if (need_resched()) 953 __cond_resched(); 954} 955 956#endif /* __KERNEL__ */ 957#endif 958