1#include <linux/mm.h> 2#include <linux/module.h> 3#include <linux/sched.h> 4#include <linux/init.h> 5#include <linux/init_task.h> 6#include <linux/fs.h> 7#include <linux/mqueue.h> 8 9#include <asm/uaccess.h> 10#include <asm/pgtable.h> 11#include <asm/desc.h> 12 13static struct fs_struct init_fs = INIT_FS; 14static struct files_struct init_files = INIT_FILES; 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 17struct mm_struct init_mm = INIT_MM(init_mm); 18 19EXPORT_SYMBOL(init_mm); 20 21/* 22 * Initial task structure. 23 * 24 * We need to make sure that this is 8192-byte aligned due to the 25 * way process stacks are handled. This is done by having a special 26 * "init_task" linker map entry.. 27 */ 28union thread_union init_thread_union 29 __attribute__((__section__(".data.init_task"))) = 30 { INIT_THREAD_INFO(init_task) }; 31 32/* 33 * Initial task structure. 34 * 35 * All other task structs will be allocated on slabs in fork.c 36 */ 37struct task_struct init_task = INIT_TASK(init_task); 38 39EXPORT_SYMBOL(init_task); 40/* 41 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 42 * no more per-task TSS's. The TSS size is kept cacheline-aligned 43 * so they are allowed to end up in the .data.cacheline_aligned 44 * section. Since TSS's are completely CPU-local, we want them 45 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 46 */ 47DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; 48 49/* Copies of the original ist values from the tss are only accessed during 50 * debugging, no special alignment required. 51 */ 52DEFINE_PER_CPU(struct orig_ist, orig_ist); 53 54#define ALIGN_TO_4K __attribute__((section(".data.init_task"))) 55