2005-04-17 02:20:36 +04:00
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/init_task.h>
# include <linux/fs.h>
# include <linux/mqueue.h>
# include <asm/uaccess.h>
# include <asm/pgtable.h>
# include <asm/desc.h>
static struct fs_struct init_fs = INIT_FS ;
static struct files_struct init_files = INIT_FILES ;
static struct signal_struct init_signals = INIT_SIGNALS ( init_signals ) ;
static struct sighand_struct init_sighand = INIT_SIGHAND ( init_sighand ) ;
struct mm_struct init_mm = INIT_MM ( init_mm ) ;
/*
* Initial thread structure .
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled . This is done by having a special
* " init_task " linker map entry . .
*/
2007-10-19 22:35:02 +04:00
union thread_union init_thread_union
2005-04-17 02:20:36 +04:00
__attribute__ ( ( __section__ ( " .data.init_task " ) ) ) =
{ INIT_THREAD_INFO ( init_task ) } ;
/*
* Initial task structure .
*
* All other task structs will be allocated on slabs in fork . c
*/
struct task_struct init_task = INIT_TASK ( init_task ) ;
EXPORT_SYMBOL ( init_task ) ;
/*
* per - CPU TSS segments . Threads are completely ' soft ' on Linux ,
2007-10-19 22:35:02 +04:00
* no more per - task TSS ' s . The TSS size is kept cacheline - aligned
* so they are allowed to end up in the . data . cacheline_aligned
* section . Since TSS ' s are completely CPU - local , we want them
* on exact cacheline boundaries , to eliminate cacheline ping - pong .
*/
2007-07-19 12:48:13 +04:00
DEFINE_PER_CPU_SHARED_ALIGNED ( struct tss_struct , init_tss ) = INIT_TSS ;
2005-04-17 02:20:36 +04:00