From f2c3e3910f5afff6d1d5e8f18f389df711992acb Mon Sep 17 00:00:00 2001 From: Insigma Research Institute Date: Mon, 31 Aug 2009 00:00:00 +0800 Subject: [PATCH 1/3] applied original unifiedkernel-0.2.4.1-linux patch from original unifiedkernel-0.2.4.1-linux-2.6.30.diff --- arch/x86/include/asm/irq_vectors.h | 28 ++ arch/x86/include/asm/thread_info.h | 5 + arch/x86/kernel/ldt.c | 18 + arch/x86/kernel/process.c | 28 ++ arch/x86/kernel/process_32.c | 3 + arch/x86/kernel/ptrace.c | 10 + arch/x86/kernel/signal.c | 7 + arch/x86/kernel/traps.c | 43 +++ arch/x86/kernel/vm86_32.c | 7 + arch/x86/mm/mmap.c | 6 + arch/x86/vdso/vdso32-setup.c | 3 + fs/exec.c | 18 + fs/ext3/namei.c | 12 + fs/fcntl.c | 6 + fs/namei.c | 9 + fs/open.c | 15 + fs/read_write.c | 14 + fs/select.c | 3 + fs/stat.c | 12 + fs/sync.c | 10 + include/linux/init_task.h | 67 ++++ include/linux/sched.h | 20 + include/linux/win32_thread.h | 348 ++++++++++++++++++ include/linux/winternl.h | 579 ++++++++++++++++++++++++++++++ init/Kconfig | 4 + kernel/Makefile | 1 + kernel/auditsc.c | 27 ++ kernel/exit.c | 82 +++++ kernel/fork.c | 685 ++++++++++++++++++++++++++++++++++++ kernel/sched.c | 9 + kernel/signal.c | 23 ++ kernel/win32_thread.c | 149 ++++++++ mm/mmap.c | 56 +++ mm/mprotect.c | 7 + mm/msync.c | 7 + mm/thrash.c | 7 + net/socket.c | 21 ++ security/security.c | 6 + 38 files changed, 2355 insertions(+), 0 deletions(-) create mode 100644 include/linux/win32_thread.h create mode 100644 include/linux/winternl.h create mode 100644 kernel/win32_thread.c diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 3cbd79b..e1676a6 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -26,11 +26,24 @@ #define NMI_VECTOR 0x02 +#ifndef CONFIG_UNIFIED_KERNEL /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 +#else +/* + * IDT vectors usable for external interrupt sources start + * at 0x30, as 0x20-0x2f are used by Win32 system call implementation: + */ +#define FIRST_EXTERNAL_VECTOR 0x30 +/* + * For Unified Kernel, 16 more IRQ's are reserved for win32 system + * call implementation, and thus the number of potential APIC + * interrupt sources is reduced by 16. + */ +#endif #ifdef CONFIG_X86_32 # define SYSCALL_VECTOR 0x80 @@ -155,15 +168,30 @@ static inline int invalid_vm86_irq(int irq) #ifdef CONFIG_X86_IO_APIC # ifdef CONFIG_SPARSE_IRQ +# ifndef CONFIG_UNIFIED_KERNEL # define NR_IRQS \ (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ (NR_VECTORS + CPU_VECTOR_LIMIT) : \ (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) +# else +# define NR_IRQS \ + (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ + (NR_VECTORS + CPU_VECTOR_LIMIT - 16) : \ + (NR_VECTORS + IO_APIC_VECTOR_LIMIT) - 16) +# endif # else # if NR_CPUS < MAX_IO_APICS +# ifndef CONFIG_UNIFIED_KERNEL # define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) +# else +# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT - 16) +# endif # else +# ifndef CONFIG_UNIFIED_KERNEL # define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) +# else +# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT - 16) +# endif # endif # endif #else /* !CONFIG_X86_IO_APIC: */ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 8820a73..38b98f7 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -118,6 +118,11 @@ struct thread_info { #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) +#ifdef CONFIG_UNIFIED_KERNEL +#define TIF_APC 13 +#define _TIF_APC (1<context.lock); + mm->context.size = 0; + old_mm = ptsk->mm; + if (old_mm && old_mm->context.size > 0) { + mutex_lock(&old_mm->context.lock); + retval = copy_ldt(&mm->context, &old_mm->context); + mutex_unlock(&old_mm->context.lock); + } + return retval; +} +#endif + /* * No need to lock the MM as we are the last user * diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ca98915..971a9ec 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -87,6 +87,34 @@ void exit_thread(void) ds_exit_thread(current); } +#ifdef CONFIG_UNIFIED_KERNEL +/* + * Free thread data structures etc.. + */ +void exit_thread_for_task(struct task_struct *tsk) +{ + struct thread_struct *t = &tsk->thread; + unsigned long *bp = t->io_bitmap_ptr; + + if (bp) { + struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); + + t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); + /* + * Careful, clear this in the TSS too: + */ + memset(tss->io_bitmap, 0xff, t->io_bitmap_max); + t->io_bitmap_max = 0; + put_cpu(); + kfree(bp); + } + + ds_exit_thread(tsk); +} +EXPORT_SYMBOL(exit_thread_for_task); +#endif + void flush_thread(void) { struct task_struct *tsk = current; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 76f8f84..1c1f723 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -503,6 +503,9 @@ unsigned long arch_align_stack(unsigned long sp) sp -= get_random_int() % 8192; return sp & ~0xf; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(arch_align_stack); +#endif unsigned long arch_randomize_brk(struct mm_struct *mm) { diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 23b7c8f..2bdc042 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -38,6 +38,10 @@ #include "tls.h" +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + enum x86_regset { REGSET_GENERAL, REGSET_FP, @@ -1437,6 +1441,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) return ret ?: regs->orig_ax; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(syscall_trace_enter); +#endif asmregparm void syscall_trace_leave(struct pt_regs *regs) { @@ -1466,3 +1473,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) tracehook_consider_fatal_signal(current, SIGTRAP)) send_sigtrap(current, regs, 0, TRAP_BRKPT); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(syscall_trace_leave); +#endif diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 1442516..3236800 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -37,6 +37,10 @@ #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ @@ -876,6 +880,9 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) clear_thread_flag(TIF_IRET); #endif /* CONFIG_X86_32 */ } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(do_notify_resume); +#endif void signal_fault(struct pt_regs *regs, void __user *frame, char *where) { diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a1d2883..4463227 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -798,6 +798,9 @@ unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) return new_kesp; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(patch_espfix_desc); +#endif #else asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) { @@ -904,6 +907,46 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) } #endif +#ifdef CONFIG_UNIFIED_KERNEL +int set_w32system_gate(unsigned int n, void *addr) +{ + /* 0x20 ~ 0x2f could be set */ + if ((n & 0xfffffff0) != 0x20) + return -1; + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); + return 0; +} +EXPORT_SYMBOL(set_w32system_gate); + +int backup_idt_entry(unsigned int n, unsigned long *a, unsigned long *b) +{ + unsigned long *gate_addr; + + /* 0x20 ~ 0x2f could be backup */ + if ((n & 0xfffffff0) != 0x20) + return -1; + gate_addr = (unsigned long *)(idt_table + n); + *a = *gate_addr; + *b = *(gate_addr + 1); + return 0; +} +EXPORT_SYMBOL(backup_idt_entry); + +int restore_idt_entry(unsigned int n, unsigned long a, unsigned long b) +{ + unsigned long *gate_addr; + + /* 0x20 ~ 0x2f could be restore */ + if ((n & 0xfffffff0) != 0x20) + return -1; + gate_addr = (unsigned long *)(idt_table + n); + *gate_addr = a; + *(gate_addr + 1) = b; + return 0; +} +EXPORT_SYMBOL(restore_idt_entry); +#endif + void __init trap_init(void) { int i; diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index d7ac84e..e8088da 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -48,6 +48,10 @@ #include #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + /* * Known problems: * @@ -162,6 +166,9 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(save_v86_state); +#endif static void mark_screen_rdonly(struct mm_struct *mm) { diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1658296..74bc780 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -29,6 +29,9 @@ #include #include #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif /* * Top of mmap area (just below the process stack). @@ -121,3 +124,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->unmap_area = arch_unmap_area_topdown; } } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(arch_pick_mmap_layout); +#endif diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 1241f11..b6c648a 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -367,6 +367,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(arch_setup_additional_pages); +#endif #ifdef CONFIG_X86_64 diff --git a/fs/exec.c b/fs/exec.c index 895823d..9724bec 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -60,6 +60,10 @@ #include #include "internal.h" +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; int suid_dumpable = 0; @@ -741,7 +745,11 @@ static int exec_mmap(struct mm_struct *mm) * disturbing other processes. (Other processes might share the signal * table via the CLONE_SIGHAND option to clone().) */ +#ifdef CONFIG_UNIFIED_KERNEL +int de_thread(struct task_struct *tsk) +#else static int de_thread(struct task_struct *tsk) +#endif { struct signal_struct *sig = tsk->signal; struct sighand_struct *oldsighand = tsk->sighand; @@ -873,6 +881,9 @@ no_thread_group: BUG_ON(!thread_group_leader(tsk)); return 0; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(de_thread); +#endif /* * These functions flushes out all traces of the currently running executable @@ -947,6 +958,10 @@ int flush_old_exec(struct linux_binprm * bprm) if (retval) goto out; +#ifdef CONFIG_UNIFIED_KERNEL + ethread_notify_execve(current); +#endif + bprm->mm = NULL; /* We're using it now */ /* This is the point of no return */ @@ -1692,6 +1707,9 @@ void set_dumpable(struct mm_struct *mm, int value) break; } } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(set_dumpable); +#endif int get_dumpable(struct mm_struct *mm) { diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 6ff7b97..9b00d05 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -40,6 +40,9 @@ #include "namei.h" #include "xattr.h" #include "acl.h" +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif /* * define how far ahead to read directories while searching them. @@ -795,6 +798,15 @@ static inline int ext3_match (int len, const char * const name, return 0; if (!de->inode) return 0; +#ifdef CONFIG_UNIFIED_KERNEL + if (current->ethread) { + int i; + for (i = 0; i < len; i++) + if (tolower(name[i]) != tolower(de->name[i])) + return 0; + return 1; + } +#endif return !memcmp(name, de->name, len); } diff --git a/fs/fcntl.c b/fs/fcntl.c index 1ad7031..7ad2906 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -142,6 +142,9 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes) } return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_dup); +#endif #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) @@ -358,6 +361,9 @@ SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) out: return err; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_fcntl); +#endif #if BITS_PER_LONG == 32 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, diff --git a/fs/namei.c b/fs/namei.c index 967c3db..82e80d1 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2095,6 +2095,9 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_mkdir); +#endif /* * We try to drop the dentry early: we should have @@ -2316,6 +2319,9 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_unlink); +#endif int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { @@ -2731,6 +2737,9 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna { return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_rename); +#endif int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) { diff --git a/fs/open.c b/fs/open.c index bdfbf03..5d401b4 100644 --- a/fs/open.c +++ b/fs/open.c @@ -174,6 +174,9 @@ SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf) out: return error; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_fstatfs); +#endif SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf) { @@ -221,6 +224,9 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, mutex_unlock(&dentry->d_inode->i_mutex); return err; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(do_truncate); +#endif static long do_sys_truncate(const char __user *pathname, loff_t length) { @@ -347,6 +353,9 @@ SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length) asmlinkage_protect(2, ret, fd, length); return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_ftruncate); +#endif /* LFS versions of truncate are only needed on 32 bit machines */ #if BITS_PER_LONG == 32 @@ -543,6 +552,9 @@ dput_and_out: out: return error; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_chdir); +#endif SYSCALL_DEFINE1(fchdir, unsigned int, fd) { @@ -1059,6 +1071,9 @@ SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, int, mode) asmlinkage_protect(3, ret, filename, flags, mode); return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_open); +#endif SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, int, mode) diff --git a/fs/read_write.c b/fs/read_write.c index 9d1e76b..48ed2a1 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -385,6 +385,9 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_read); +#endif SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, size_t, count) @@ -403,6 +406,9 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_write); +#endif SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf, size_t count, loff_t pos) @@ -424,6 +430,10 @@ SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf, return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_pread64); +#endif + #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_pread64(long fd, long buf, long count, loff_t pos) { @@ -453,6 +463,10 @@ SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf, return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_pwrite64); +#endif + #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_pwrite64(long fd, long buf, long count, loff_t pos) { diff --git a/fs/select.c b/fs/select.c index 0fe0e14..a6df4c0 100644 --- a/fs/select.c +++ b/fs/select.c @@ -888,6 +888,9 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, } return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_poll); +#endif #ifdef HAVE_SET_RESTORE_SIGMASK SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, diff --git a/fs/stat.c b/fs/stat.c index 075694e..dd2e2d0 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -243,6 +243,9 @@ SYSCALL_DEFINE2(newstat, char __user *, filename, struct stat __user *, statbuf) return error; return cp_new_stat(&stat, statbuf); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_newstat); +#endif SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf) { @@ -255,6 +258,9 @@ SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf return cp_new_stat(&stat, statbuf); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_newlstat); +#endif #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) SYSCALL_DEFINE4(newfstatat, int, dfd, char __user *, filename, @@ -280,6 +286,9 @@ SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) return error; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_newfstat); +#endif SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, char __user *, buf, int, bufsiz) @@ -313,6 +322,9 @@ SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, { return sys_readlinkat(AT_FDCWD, path, buf, bufsiz); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_readlink); +#endif /* ---------- LFS-64 ----------- */ diff --git a/fs/sync.c b/fs/sync.c index 7abc65f..10f87af 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -143,7 +143,11 @@ out: } EXPORT_SYMBOL(vfs_fsync); +#ifdef CONFIG_UNIFIED_KERNEL +int do_fsync(unsigned int fd, int datasync) +#else static int do_fsync(unsigned int fd, int datasync) +#endif { struct file *file; int ret = -EBADF; @@ -155,11 +159,17 @@ static int do_fsync(unsigned int fd, int datasync) } return ret; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(do_fsync); +#endif SYSCALL_DEFINE1(fsync, unsigned int, fd) { return do_fsync(fd, 0); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_fsync); +#endif SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d87247d..05c7036 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -112,6 +112,7 @@ extern struct cred init_cred; * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) */ +#ifndef CONFIG_UNIFIED_KERNEL #define INIT_TASK(tsk) \ { \ .state = 0, \ @@ -175,6 +176,72 @@ extern struct cred init_cred; INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ } +#else +#define INIT_TASK(tsk) \ +{ \ + .state = 0, \ + .stack = &init_thread_info, \ + .usage = ATOMIC_INIT(2), \ + .flags = PF_KTHREAD, \ + .lock_depth = -1, \ + .prio = MAX_PRIO-20, \ + .static_prio = MAX_PRIO-20, \ + .normal_prio = MAX_PRIO-20, \ + .policy = SCHED_NORMAL, \ + .cpus_allowed = CPU_MASK_ALL, \ + .mm = NULL, \ + .active_mm = &init_mm, \ + .se = { \ + .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ + }, \ + .rt = { \ + .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ + .time_slice = HZ, \ + .nr_cpus_allowed = NR_CPUS, \ + }, \ + .tasks = LIST_HEAD_INIT(tsk.tasks), \ + .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \ + .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ + .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ + .real_parent = &tsk, \ + .parent = &tsk, \ + .children = LIST_HEAD_INIT(tsk.children), \ + .sibling = LIST_HEAD_INIT(tsk.sibling), \ + .group_leader = &tsk, \ + .real_cred = &init_cred, \ + .cred = &init_cred, \ + .cred_exec_mutex = \ + __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ + .comm = "swapper", \ + .thread = INIT_THREAD, \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ + .sighand = &init_sighand, \ + .nsproxy = &init_nsproxy, \ + .pending = { \ + .list = LIST_HEAD_INIT(tsk.pending.list), \ + .signal = {{0}}}, \ + .blocked = {{0}}, \ + .alloc_lock = __RW_LOCK_UNLOCKED(tsk.alloc_lock), \ + .journal_info = NULL, \ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .fs_excl = ATOMIC_INIT(0), \ + .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ + [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ + }, \ + .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ + INIT_IDS \ + INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ + INIT_FTRACE_GRAPH \ + .ethread = NULL \ +} +#endif #define INIT_CPU_TIMERS(cpu_timers) \ diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc..bbac81b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -105,6 +105,11 @@ struct fs_struct; */ #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) +#ifdef CONFIG_UNIFIED_KERNEL +#define CREATE_PROCESS 1 +#define CREATE_THREAD 2 +#endif + /* * These are the constant used to fake the fixed-point load-average * counting. Some notes: @@ -1295,7 +1300,11 @@ struct task_struct { u32 parent_exec_id; u32 self_exec_id; /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ +#ifndef CONFIG_UNIFIED_KERNEL spinlock_t alloc_lock; +#else + rwlock_t alloc_lock; +#endif #ifdef CONFIG_GENERIC_HARDIRQS /* IRQ handler threads */ @@ -1429,6 +1438,9 @@ struct task_struct { /* state flags for use by tracers */ unsigned long trace; #endif +#ifdef CONFIG_UNIFIED_KERNEL + struct ethread *ethread; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -2078,12 +2090,20 @@ static inline int task_detached(struct task_struct *p) */ static inline void task_lock(struct task_struct *p) { +#ifndef CONFIG_UNIFIED_KERNEL spin_lock(&p->alloc_lock); +#else + write_lock(&p->alloc_lock); +#endif } static inline void task_unlock(struct task_struct *p) { +#ifndef CONFIG_UNIFIED_KERNEL spin_unlock(&p->alloc_lock); +#else + write_unlock(&p->alloc_lock); +#endif } extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, diff --git a/include/linux/win32_thread.h b/include/linux/win32_thread.h new file mode 100644 index 0000000..0119703 --- /dev/null +++ b/include/linux/win32_thread.h @@ -0,0 +1,348 @@ +/* + * win32_thread.h + * + * Copyright (C) 2006 Insigme Co., Ltd + * + * This software has been developed while working on the Linux Unified Kernel + * project (http://linux.insigma.com.cn) in the Insigma Reaserch Institute, + * which is a subdivision of Insigma Co., Ltd (http://www.insigma.com.cn). + * + * The project is sponsored by Insigma Co., Ltd. + * + * The authors can be reached at linux@insigma.com.cn. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Revision History: + * Jan 2006 - Created. + */ + +#ifndef _WIN32_THREAD_H_ +#define _WIN32_THREAD_H_ + +#include +#include +#include +#include +#include + +#define WIN32_THREAD_SIGNAL_OKAY 0 +#define WIN32_THREAD_CANCEL_SIGNAL 1 + +struct task_struct; + +enum kthread_state { + Initialized, + Ready, + Running, + Standby, + Terminated, + Waiting, + Transition, + DeferredReady, +}; + +struct ethread_operations { + const char *name; /* ethread name */ + struct module *owner; /* responsible module */ + + void (*close)(struct ethread *thread); /* request to destruct */ + void (*exit)(struct ethread *thread, int status); /* notification of exit */ + int (*signal)(struct ethread *thread, int signal); /* notification of signal (can cancel) */ + void (*execve)(struct ethread *thread); /* notification of execve */ + /* notification that fork/clone has set up the new process and */ + void (*fork)(struct ethread *thread, struct task_struct *parent, + struct task_struct *child, unsigned long clone_flags); +}; + +struct w32thread; +#if 0 +struct w32thread +{ + void* message_queue; + struct semaphore window_list_lock; + struct list_head window_list_head; + struct list_head w32_callback_list_head; + struct kbdtables* keyboard_layout; + struct desktop_object* desktop; + void* desktop_handle; + unsigned long message_pump_hook_value; + unsigned char is_exiting; +}; +#endif + +struct kthread +{ + struct dispatcher_header header; + struct list_head mutant_list_head; + void* initial_stack; + unsigned long stack_limit; + void* teb; + void* tls_array; + void* kernel_stack; + unsigned char debug_active; + unsigned char state; + unsigned char alerted[2]; + unsigned char iopl; + unsigned char npx_state; + char saturation; + char priority; + unsigned long context_switches; + long wait_status; + kirql_t wait_irql; + kprocessor_mode_t wait_mode; + unsigned char wait_next; + unsigned char wait_reason; + union + { + struct kwait_block* wait_block_list; + struct kgate* gate_object; + }; + struct list_head wait_list_entry; + unsigned long wait_time; + char base_priority; + unsigned char decrement_count; + unsigned char priority_decrement; + char quantum; + struct kwait_block wait_block[4]; + void* lego_data; + + kaffinity_t user_affinity; + kaffinity_t affinity; + unsigned char system_affinity_active; + + unsigned char power_state; + unsigned char npx_irql; + unsigned char pad[1]; + + struct kqueue* queue; + struct ktimer timer; + struct list_head queue_list_entry; + + unsigned char preempted; + unsigned char process_ready_queue; + unsigned char next_processor; + unsigned char kstack_resident; + + void* callback_stack; + + struct w32thread* win32thread; + struct ktrap_frame* trap_frame; + + /* APC */ + union + { + struct + { + unsigned short kernel_apc_disable; + unsigned short special_apc_disable; + }; + unsigned long combined_apc_disable; + }; + spinlock_t apc_queue_lock; + struct kapc_state* apc_state_pointer[2]; + struct kapc_state apc_state; + struct kapc_state saved_apc_state; + struct kapc suspend_apc; + unsigned char apc_queueable; + unsigned char apc_state_index; + + unsigned char enable_stack_swap; + unsigned char large_stack; + unsigned char resource_index; + unsigned char previous_mode; + unsigned char alertable; + unsigned char auto_alignment; + void* stack_base; + struct ksemaphore suspend_semaphore; + struct list_head thread_list_entry; + char freeze_count; + unsigned char suspend_count; + unsigned char ideal_processor; + unsigned char disable_boost; + unsigned char quantum_reset; +}; + +struct ethread +{ + struct kthread tcb; + union + { + ntstatus_t exit_status; + void* ofs_chain; + }; + struct list_head post_block_list; + union + { + struct termination_port* termination_port; + struct ethread* reaper_link; + void* keyed_wait_value; + }; + spinlock_t active_timer_list_lock; + struct list_head active_timer_list_head; + struct client_id cid; + + union + { + struct semaphore lpc_reply_semaphore; + struct semaphore keyed_reply_semaphore; + }; + union + { + void* lpc_reply_message; + void* lpc_waiting_on_port; + }; + union + { + struct list_head lpc_reply_chain; + struct list_head keyed_wait_chain; + }; + unsigned long lpc_reply_messageid; + + struct ps_impersonation_information* impersonation_info; + + struct list_head irp_list; + unsigned long top_level_irp; + struct device_object* device_to_verify; + struct eprocess* threads_process; + void* start_address; + union + { + void* win32_start_address; + unsigned long lpc_received_messageid; + }; + struct list_head thread_list_entry; + ex_rundown_ref_t rundown_protect; + spinlock_t thread_lock; + unsigned long read_cluster_size; + access_mask_t granted_access; + union + { + struct + { + unsigned long terminated:1; + unsigned long dead_thread:1; + unsigned long hide_from_debugger:1; + unsigned long active_impersonation_info:1; + unsigned long system_thread:1; + unsigned long hard_errors_are_disabled:1; + unsigned long break_on_termination:1; + unsigned long skip_creation_msg:1; + unsigned long skip_termination_msg:1; + }; + unsigned long cross_thread_flags; + }; + union + { + struct + { + unsigned long active_exworker:1; + unsigned long exworker_can_wait_user:1; + unsigned long memory_maker:1; + unsigned long keyed_event_inuse:1; + }; + unsigned long same_thread_passive_flags; + }; + union + { + struct + { + unsigned long lpc_received_msgid_valid:1; + unsigned long lpc_exit_thread_called:1; + unsigned long address_space_owner:1; + unsigned long owns_process_workingsete_xclusive:1; + unsigned long owns_process_workingset_shared:1; + unsigned long owns_system_workingset_exclusive:1; + unsigned long owns_system_workingset_shared:1; + unsigned long owns_session_workingset_exclusive:1; + unsigned long owns_session_workingset_shared:1; + unsigned long apc_needed:1; + }; + unsigned long same_thread_apc_flags; + }; + unsigned char forward_cluster_only; + unsigned char disable_page_fault_clustering; + unsigned char active_fault_count; + + /* for unified kernel */ + atomic_t et_count; /* ref count */ + int et_exit_called; /* exit is called? */ + struct task_struct* et_task; /* Linux task */ + struct ethread_operations* et_ops; + void* tsb; +#if 0 + void* et_extend; +#endif +}; + +typedef struct ethread ETHREAD, *PETHREAD; + +static __inline__ void etget(struct ethread *thread) +{ + atomic_inc(&thread->et_count); +} /* end etget() */ + +static __inline__ void etput(struct ethread *thread) +{ + if (atomic_dec_and_test(&thread->et_count)){ + struct module *owner = thread->et_ops->owner; + thread->et_ops->close(thread); /* will destroy this ethread */ + if(owner) + module_put(owner); + } +} /* end etput()*/ + +/* add a win32 thread to a task */ +extern void add_ethread(struct task_struct *tsk, struct ethread *thread); + +/* remove a win32 thread from a task */ +extern void remove_ethread(struct task_struct *tsk, struct ethread *thread); + +/* a win32 thread exit */ +extern void exit_ethread(struct task_struct *tsk); + + +/* notification of exit/fatal signal */ +extern void __ethread_notify_exit(struct task_struct *tsk, int exit_code); + +static __inline__ void ethread_notify_exit(struct task_struct *tsk, int exit_code) +{ + if (tsk->ethread) + __ethread_notify_exit(tsk, exit_code); +} /* end __ethread_notify_exit() */ + +/* notification of signal */ +extern int __ethread_notify_signal(struct task_struct *tsk, int signal); + +static __inline__ int ethread_notify_signal(struct task_struct *tsk, int signal) +{ + return tsk->ethread + ? __ethread_notify_signal(tsk, signal) + : WIN32_THREAD_SIGNAL_OKAY; +} /* end __ethread_notify_signal() */ + +/* notification of signal execve */ +extern void __ethread_notify_execve(struct task_struct *tsk); + +static __inline__ void ethread_notify_execve(struct task_struct *tsk) +{ + if (tsk->ethread) + __ethread_notify_execve(tsk); +} /* end __ethread_notify_execve() */ + +/* notification of fork */ +extern void __ethread_notify_fork(struct task_struct *tsk, + struct task_struct *child, + unsigned long clone_flags); + +static __inline__ void ethread_notify_fork(struct task_struct *tsk, + struct task_struct *child, + unsigned long clone_flags) +{ + if (tsk->ethread) + __ethread_notify_fork(tsk, child, clone_flags); +} /* end __ethread_notify_fork() */ +#endif /* _WIN32_THREAD_H_ */ diff --git a/include/linux/winternl.h b/include/linux/winternl.h new file mode 100644 index 0000000..c3be1d3 --- /dev/null +++ b/include/linux/winternl.h @@ -0,0 +1,579 @@ +/* + * winternl.h + * + * Copyright (C) 2006 Insigme Co., Ltd + * + * This software has been developed while working on the Linux Unified Kernel + * project (http://linux.insigma.com.cn) in the Insigma Reaserch Institute, + * which is a subdivision of Insigma Co., Ltd (http://www.insigma.com.cn). + * + * The project is sponsored by Insigma Co., Ltd. + * + * The authors can be reached at linux@insigma.com.cn. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Revision History: + * Jan 2006 - Created. + */ + +#ifndef _WINTERNL_H_ +#define _WINTERNL_H_ + +#include +#include +#include +#include + +struct eprocess; +struct ethread; +struct kthread; + +typedef unsigned char kirql_t; +typedef long ntstatus_t; +typedef void* ex_rundown_ref_t; +typedef unsigned long access_mask_t; + +#define INIT_DISP_HEADER(header, tp, sz, state) \ +{ \ + (header)->type = (unsigned char)tp; \ + (header)->absolute = 0; \ + (header)->inserted = 0; \ + (header)->size = (unsigned char)sz; \ + (header)->signal_state = state; \ + INIT_LIST_HEAD(&((header)->wait_list_head)); \ +} + +typedef union _large_integer_t { + struct { + long low; + long high; + } u; + long long quad; +} large_integer_t; + +typedef large_integer_t physical_address_t; + +enum security_impersonation_level { + sec_anonymous, + sec_identification, + sec_impersonation, + sec_delegation +}; + +enum event_type +{ + notification_event, + synchronization_event +}; + +struct client_id +{ + void *unique_process; + void *unique_thread; +}; + +struct dispatcher_header { + unsigned char type; + unsigned char absolute; + unsigned char size; + unsigned char inserted; + long signal_state; + struct list_head wait_list_head; +}; + +struct kapc_state { + struct list_head apc_list_head[2]; + struct kprocess* process; + unsigned char kapc_inprogress; + unsigned char kapc_pending; + unsigned char uapc_pending; +}; + +struct kwait_block { + struct list_head wait_list_entry; + struct kthread* thread; + void* object; + struct kwait_block* next_wait_block; + unsigned short wait_key; + unsigned short wait_type; +}; + +typedef unsigned long kaffinity_t; + +struct kqueue { + struct dispatcher_header header; + struct list_head entry_list_head; + unsigned long current_count; + unsigned long maximum_count; + struct list_head thread_list_head; +}; + +struct ktimer { + struct dispatcher_header header; + large_integer_t due_time; + struct list_head timer_list_entry; + struct kdpc* dpc; + long period; +}; + +struct ktrap_frame +{ + void* debug_ebp; + void* debug_eip; + void* debug_arg_mark; + void* debug_pointer; + void* temp_cs; + void* temp_eip; + unsigned long dr0; + unsigned long dr1; + unsigned long dr2; + unsigned long dr3; + unsigned long dr6; + unsigned long dr7; + unsigned short gs; + unsigned short reserved1; + unsigned short es; + unsigned short reserved2; + unsigned short ds; + unsigned short reserved3; + unsigned long edx; + unsigned long ecx; + unsigned long eax; + unsigned long previous_mode; + void* exception_list; + unsigned short fs; + unsigned short reserved4; + unsigned long edi; + unsigned long esi; + unsigned long ebx; + unsigned long ebp; + unsigned long errorcode; + unsigned long eip; + unsigned long cs; + unsigned long eflags; + unsigned long esp; + unsigned short ss; + unsigned short reserved5; + unsigned short v86_es; + unsigned short reserved6; + unsigned short v86_ds; + unsigned short reserved7; + unsigned short v86_fs; + unsigned short reserved8; + unsigned short v86_gs; + unsigned short reserved9; +}; + +#ifndef __stdcall +#define __stdcall __attribute__((stdcall)) +#endif + +struct kapc; +typedef void (__stdcall *normal_routine_t)(void *context, void *arg1, void *arg2); + +typedef void (__stdcall *kernel_routine_t)(struct kapc *apc, + normal_routine_t *normal_routine, void **context, void **arg1, void **arg2); + +typedef void (__stdcall *rundown_routine_t)(struct kapc *apc); + +typedef char kprocessor_mode_t; + +struct kapc { + short type; + short size; + unsigned long spare0; + struct kthread* thread; + struct list_head apc_list_entry; + kernel_routine_t kernel_routine; + rundown_routine_t rundown_routine; + normal_routine_t normal_routine; + void* normal_context; + void* system_argument1; + void* system_argument2; + char apc_state_index; + kprocessor_mode_t apc_mode; + unsigned char inserted; +}; + +#define USER_SHARED_DATA (0x7FFE0000) + +/* Global Flags */ +#define FLG_STOP_ON_EXCEPTION 0x00000001 +#define FLG_SHOW_LDR_SNAPS 0x00000002 +#define FLG_DEBUG_INITIAL_COMMAND 0x00000004 +#define FLG_STOP_ON_HUNG_GUI 0x00000008 +#define FLG_HEAP_ENABLE_TAIL_CHECK 0x00000010 +#define FLG_HEAP_ENABLE_FREE_CHECK 0x00000020 +#define FLG_HEAP_VALIDATE_PARAMETERS 0x00000040 +#define FLG_HEAP_VALIDATE_ALL 0x00000080 +#define FLG_POOL_ENABLE_TAIL_CHECK 0x00000100 +#define FLG_POOL_ENABLE_FREE_CHECK 0x00000200 +#define FLG_POOL_ENABLE_TAGGING 0x00000400 +#define FLG_HEAP_ENABLE_TAGGING 0x00000800 +#define FLG_USER_STACK_TRACE_DB 0x00001000 +#define FLG_KERNEL_STACK_TRACE_DB 0x00002000 +#define FLG_MAINTAIN_OBJECT_TYPELIST 0x00004000 +#define FLG_HEAP_ENABLE_TAG_BY_DLL 0x00008000 +#define FLG_IGNORE_DEBUG_PRIV 0x00010000 +#define FLG_ENABLE_CSRDEBUG 0x00020000 +#define FLG_ENABLE_KDEBUG_SYMBOL_LOAD 0x00040000 +#define FLG_DISABLE_PAGE_KERNEL_STACKS 0x00080000 +#define FLG_HEAP_ENABLE_CALL_TRACING 0x00100000 +#define FLG_HEAP_DISABLE_COALESCING 0x00200000 +#define FLG_ENABLE_CLOSE_EXCEPTIONS 0x00400000 +#define FLG_ENABLE_EXCEPTION_LOGGING 0x00800000 +#define FLG_ENABLE_void *_TYPE_TAGGING 0x01000000 +#define FLG_HEAP_PAGE_ALLOCS 0x02000000 +#define FLG_DEBUG_INITIAL_COMMAND_EX 0x04000000 + +struct kgd_entry { + unsigned short limit_low; + unsigned short base_low; + union { + struct { + unsigned char base_mid; + unsigned char flags1; + unsigned char flags2; + unsigned char base_hi; + } bytes; + struct { + unsigned long base_mid : 8; + unsigned long type : 5; + unsigned long dpl : 2; + unsigned long pres : 1; + unsigned long limit_h : 4; + unsigned long sys : 1; + unsigned long reserved_0 : 1; + unsigned long default_big : 1; + unsigned long granularity : 1; + unsigned long base_hi : 8; + } bits; + } high_word; +}; + +struct kidt_entry +{ + unsigned short offset; + unsigned short selector; + unsigned short access; + unsigned short extended_offset; +}; + +struct kexecute_options +{ + unsigned char execute_disable:1; + unsigned char execute_enable:1; + unsigned char disable_thunk_emulation:1; + unsigned char permanent:1; + unsigned char execute_dispatch_enable:1; + unsigned char image_dispatch_enable:1; + unsigned char spare:2; +}; + +struct kevent { + struct dispatcher_header header; +}; + +struct mmsupport_flags { + unsigned long session_space:1; + unsigned long being_trimmed:1; + unsigned long session_leader:1; + unsigned long trim_hard:1; + unsigned long working_set_hard:1; + unsigned long address_space_being_deleted :1; + unsigned long available:10; + unsigned long allow_working_set_adjustment:8; + unsigned long memory_priority:8; +}; + +struct mmwslentry { + unsigned long valid:1; + unsigned long locked_in_ws:1; + unsigned long locked_in_memory:1; + unsigned long protection:5; + unsigned long hashed:1; + unsigned long direct:1; + unsigned long age:2; + unsigned long virtual_page_number:14; +}; + +struct mmwsle { + union + { + void* virtual_address; + unsigned long long_v; + struct mmwslentry e1; + }; +}; + +struct mmwsle_hash { + void* Key; + unsigned long Index; +}; + +struct mmwsl +{ + unsigned long first_free; + unsigned long first_dynamic; + unsigned long last_entry; + unsigned long next_slot; + struct mmwsle* wsle; + unsigned long last_initialized_wsle; + unsigned long non_directcout; + struct mmwsle_hash* hash_table; + unsigned long hash_table_size; + unsigned long number_of_committed_page_tables; + void* hash_table_start; + void* highest_permitted_hash_address; + unsigned long number_of_image_waiters; + unsigned long vad_bitmap_hint; + unsigned short used_page_table_entries[768]; + unsigned long committed_page_tables[24]; +}; + +struct mmsupport { + large_integer_t last_trim_time; + struct mmsupport_flags flags; + unsigned long page_fault_count; + unsigned long peak_working_set_size; + unsigned long working_set_size; + unsigned long minimum_working_set_size; + unsigned long maximum_working_set_size; + struct mmwsl* mm_working_set_list; + struct list_head working_set_expansion_links; + unsigned long claim; + unsigned long next_estimation_slot; + unsigned long next_aging_slot; + unsigned long estimated_available; + unsigned long growth_since_last_estimate; +}; + +struct handle_table_entry_info +{ + unsigned long audit_mask; +}; + +struct handle_table_entry +{ + union + { + void* object; + unsigned long obattributes; + struct handle_table_entry_info* info_table; + unsigned long value; + } u1; + union + { + unsigned long granted_access; + unsigned short granted_access_index; + long next_free_table_entry; + } u2; +}; + +typedef unsigned long eresource_thread_t; + +struct owner_entry { + eresource_thread_t owner_thread; + union { + long owner_count; + unsigned long table_size; + } u; +}; + +struct ksemaphore { + struct dispatcher_header header; + long limit; +}; + +struct eresource { + struct list_head system_resources_list; + struct owner_entry* owner_table; + short active_count; + unsigned short flag; + struct ksemaphore* shared_waiters; + struct kevent* exclusive_waiters; + struct owner_entry owner_threads[2]; + unsigned long contention_count; + unsigned short number_of_shared_waiters; + unsigned short number_of_exclusive_waiters; + union { + void* address; + unsigned long long greator_back_trace_index; + } u; + spinlock_t spinlock; +}; + +struct handle_table +{ + unsigned long flags; + long handle_count; + struct handle_table_entry*** table; + struct eprocess* quota_process; + void* unique_processid; + long first_free_table_entry; + long next_index_needing_pool; + struct eresource handle_table_lock; + struct list_head handle_table_list; + struct kevent handle_contention_event; +}; + +struct ex_fast_ref +{ + union + { + void* object; + unsigned long ref_cnt:3; + unsigned long value; + }; +}; + + +struct kgate +{ + struct dispatcher_header header; +}; + +struct kguarded_mutex +{ + long count; + struct kthread* owner; + unsigned long contention; + struct kgate gate; + union { + struct { + short kernel_apc_disable; + short special_apc_disable; + }; + unsigned long combined_apc_disable; + }; +}; + +struct mmaddress_node +{ + union + { + unsigned long balance:2; + struct mmaddress_node* parent; + } u1; + struct mmaddress_node* left_child; + struct mmaddress_node* right_child; + unsigned long starting_vpn; + unsigned long ending_vpn; +}; + +struct mm_avl_table +{ + struct mmaddress_node balanced_root; + unsigned long depth_of_tree:5; + unsigned long unused:3; + unsigned long number_generic_table_elements:24; + void* node_hint; + void* node_free_hint; +}; + +struct eprocess_quota_entry +{ + unsigned long usage; + unsigned long limit; + unsigned long peak; + unsigned long return_val; +}; + +struct eprocess_quota_block +{ + struct eprocess_quota_entry quota_entry[3]; + struct list_head quota_list; + unsigned long reference_count; + unsigned long process_tount; +}; + +struct proc_ws_watch_info +{ + void* faulting_pc; + void* faulting_va; +}; + +struct pagefault_history +{ + unsigned long current_index; + unsigned long map_index; + spinlock_t spinlock; + void* reserved; + struct proc_ws_watch_info watch_info[1]; +}; + +struct hardware_pte_x86 +{ + unsigned long valid : 1; + unsigned long write : 1; + unsigned long owner : 1; + unsigned long write_through : 1; + unsigned long cache_disable : 1; + unsigned long accessed : 1; + unsigned long dirty : 1; + unsigned long large_page : 1; + unsigned long global : 1; + unsigned long copy_on_write : 1; + unsigned long prototype : 1; + unsigned long reserved : 1; + unsigned long page_frame_number : 20; +}; +struct unicode_string +{ + unsigned short length; + unsigned short max_ength; + wchar_t* buffer; +}; + +struct object_name_info +{ + struct unicode_string name; +}; + +struct se_audit_proc_creation_info +{ + struct object_name_info* image_filename; +}; + +typedef void* fast_mutex_t; + +struct maddress_space +{ + void* memory_area_root; + fast_mutex_t lock; + void* lowest_address; + struct eprocess* process; + unsigned short* pt_ref_count_table; + unsigned long pt_ref_count_table_size; +}; + +struct kdpc; +typedef void +(__stdcall *kdeferred_routine_t)(struct kdpc *dpc, void* deferred_context, + void* system_argument1,void* system_argument2); + +struct kdpc { + short type; + unsigned char number; + unsigned char importance; + struct list_head dpc_list_entry; + kdeferred_routine_t deferred_routine; + void* deferred_context; + void* system_argument1; + void* system_argument2; + void* dpc_data; +}; + +struct kmutant { + struct dispatcher_header header; + struct list_head mutant_list_entry; + struct kthread* owner_thread; + unsigned char abandoned; + unsigned char apc_disable; +}; + +#endif /* _WINTERNL_H_ */ diff --git a/init/Kconfig b/init/Kconfig index 7be4d38..ea6266a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1042,6 +1042,10 @@ config SLOW_WORK See Documentation/slow-work.txt. +config UNIFIED_KERNEL + bool "Unified kernel support" + default 0 + endmenu # General setup config HAVE_GENERIC_DMA_COHERENT diff --git a/kernel/Makefile b/kernel/Makefile index 4242366..7e04ef8 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o +obj-$(CONFIG_UNIFIED_KERNEL) += win32_thread.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 7d6ac7c..d3d0046 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -890,6 +890,33 @@ int audit_alloc(struct task_struct *tsk) return 0; } +#ifdef CONFIG_UNIFIED_KERNEL +int audit_alloc_from_task(struct task_struct *ptsk, struct task_struct *tsk) +{ + struct audit_context *context; + enum audit_state state; + char *key = NULL; + + if (likely(!audit_ever_enabled)) + return 0; /* Return if not auditing. */ + + state = audit_filter_task(tsk, &key); + if (likely(state == AUDIT_DISABLED)) + return 0; + + if (!(context = audit_alloc_context(state))) { + kfree(key); + audit_log_lost("out of memory in audit_alloc"); + return -ENOMEM; + } + context->filterkey = key; + + tsk->audit_context = context; + set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); + return 0; +} +#endif + static inline void audit_free_context(struct audit_context *context) { struct audit_context *previous; diff --git a/kernel/exit.c b/kernel/exit.c index abf9cf3..b0040eb 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -56,6 +56,10 @@ #include #include "cred-internals.h" +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + DEFINE_TRACE(sched_process_free); DEFINE_TRACE(sched_process_exit); DEFINE_TRACE(sched_process_wait); @@ -527,6 +531,9 @@ void put_files_struct(struct files_struct *files) free_fdtable(fdt); } } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(put_files_struct); +#endif void reset_files_struct(struct files_struct *files) { @@ -854,6 +861,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead) write_unlock_irq(&tasklist_lock); +#ifdef CONFIG_UNIFIED_KERNEL + ethread_notify_exit(tsk, tsk->exit_code); + exit_ethread(tsk); +#endif + tracehook_report_death(tsk, signal, cookie, group_dead); /* If the process is dead, release it - nobody will wait for it */ @@ -1019,6 +1031,73 @@ NORET_TYPE void do_exit(long code) EXPORT_SYMBOL_GPL(do_exit); +#ifdef CONFIG_UNIFIED_KERNEL +extern void exit_thread_for_task(struct task_struct *tsk); + +void do_exit_task(struct task_struct *tsk, long code) +{ + int group_dead; + + profile_task_exit(tsk); + + WARN_ON(atomic_read(&tsk->fs_excl)); + + tracehook_report_exit(&code); + + exit_irq_thread(); + + exit_signals(tsk); /* sets PF_EXITING */ + /* + * tsk->flags are checked in the futex code to protect against + * an exiting task cleaning up the robust pi futexes. + */ + smp_mb(); + spin_unlock_wait(&tsk->pi_lock); + + acct_update_integrals(tsk); + + group_dead = atomic_dec_and_test(&tsk->signal->live); + if (group_dead) { + hrtimer_cancel(&tsk->signal->real_timer); + exit_itimers(tsk->signal); + } + acct_collect(code, group_dead); + if (group_dead) + tty_audit_exit(); + + tsk->exit_code = code; + taskstats_exit(tsk, group_dead); + + exit_mm(tsk); + + if (group_dead) + acct_process(); + trace_sched_process_exit(tsk); + + exit_sem(tsk); + exit_files(tsk); + exit_fs(tsk); + check_stack_usage(); + exit_thread_for_task(tsk); + cgroup_exit(tsk, 1); + + if (group_dead && tsk->signal->leader) + disassociate_ctty(1); + + module_put(task_thread_info(tsk)->exec_domain->module); + if (tsk->binfmt) + module_put(tsk->binfmt->module); + + proc_exit_connector(tsk); + exit_notify(tsk, group_dead); +#ifdef CONFIG_NUMA + mpol_put(tsk->mempolicy); + tsk->mempolicy = NULL; +#endif +} +EXPORT_SYMBOL(do_exit_task); +#endif + NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) @@ -1064,6 +1143,9 @@ do_group_exit(int exit_code) do_exit(exit_code); /* NOTREACHED */ } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(do_group_exit); +#endif /* * this kills every thread in the thread group. Note that any externally diff --git a/kernel/fork.c b/kernel/fork.c index 875ffbd..eb1b2d8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -71,6 +71,10 @@ #include #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + /* * Protected counters by write_lock_irq(&tasklist_lock) */ @@ -731,6 +735,9 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk) out: return error; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(copy_files); +#endif static int copy_io(unsigned long clone_flags, struct task_struct *tsk) { @@ -1025,7 +1032,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->rcu_flipctr_idx = 0; #endif /* #ifdef CONFIG_PREEMPT_RCU */ p->vfork_done = NULL; +#ifdef CONFIG_UNIFIED_KERNEL + rwlock_init(&p->alloc_lock); +#else spin_lock_init(&p->alloc_lock); +#endif clear_tsk_thread_flag(p, TIF_SIGPENDING); init_sigpending(&p->pending); @@ -1178,6 +1189,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->pdeath_signal = 0; p->exit_state = 0; +#ifdef CONFIG_UNIFIED_KERNEL + p->ethread= NULL; + ethread_notify_fork(current, p, clone_flags); +#endif + /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. @@ -1576,7 +1592,11 @@ static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) /* * Unshare file descriptor table if it is being shared */ +#ifdef CONFIG_UNIFIED_KERNEL +int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) +#else static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) +#endif { struct files_struct *fd = current->files; int error = 0; @@ -1590,6 +1610,9 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp return 0; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(unshare_fd); +#endif /* * unshare allows a process to 'unshare' part of the process @@ -1731,3 +1754,665 @@ int unshare_files(struct files_struct **displaced) task_unlock(task); return 0; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(unshare_files); +#endif + +#ifdef CONFIG_UNIFIED_KERNEL +/* FIXME: added for NtCreateProcess() and NtCreateThread() */ + +#ifdef CONFIG_AUDITSYSCALL +extern int audit_alloc_from_task(struct task_struct *ptsk, struct task_struct *tsk); +#else +#define audit_alloc_from_task(ptsk, tsk) 0 +#endif +extern int init_new_context_from_task(struct task_struct *ptsk, struct task_struct *tsk, struct mm_struct *mm); + +static inline void clone_files(struct task_struct *tsk) +{ + if (tsk->files) + atomic_inc(&tsk->files->count); +} + +static inline void clone_fs(struct task_struct *tsk) +{ + struct fs_struct *fs = tsk->fs; + + write_lock(&fs->lock); + if (fs->in_exec) { + write_unlock(&fs->lock); + return; + } + fs->users++; + write_unlock(&fs->lock); +} + +static inline void clone_sighand(struct task_struct *tsk) +{ + atomic_inc(&tsk->sighand->count); +} + +static inline void clone_signal(struct task_struct *tsk) +{ + atomic_inc(&tsk->signal->count); + atomic_inc(&tsk->signal->live); +} + +static inline int clone_mm(struct task_struct *parent, struct task_struct *child) +{ + struct mm_struct * mm, *oldmm; + + child->min_flt = child->maj_flt = 0; + child->nvcsw = child->nivcsw = 0; +#ifdef CONFIG_DETECT_HUNG_TASK + child->last_switch_count = child->nvcsw + child->nivcsw; +#endif + + child->mm = NULL; + child->active_mm = NULL; + + /* + * Are we cloning a kernel thread? + * + * We need to steal a active VM for that.. + */ + oldmm = parent->mm; + if (!oldmm) + return 0; + + atomic_inc(&oldmm->mm_users); + mm = oldmm; + + /* + * There are cases where the PTL is held to ensure no + * new threads start up in user mode using an mm, which + * allows optimizing out ipis; the tlb_gather_mmu code + * is an example. + */ + spin_unlock_wait(&oldmm->page_table_lock); + + child->mm = mm; + child->active_mm = mm; + + return 0; +} + +static inline int dup_sighand(struct task_struct *parent, struct task_struct *child) +{ + struct sighand_struct *sig; + + sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); + rcu_assign_pointer(child->sighand, sig); + if (!sig) + return -ENOMEM; + atomic_set(&sig->count, 1); + memcpy(sig->action, current->sighand->action, sizeof(sig->action)); + return 0; +} + +static inline int create_mm(struct task_struct *parent, struct task_struct *child) +{ + struct mm_struct *mm; + + mm = allocate_mm(); + if (!mm) + return -ENOMEM; + + /* Copy the current MM stuff.. */ + memset(mm, 0, sizeof(*mm)); + if (!mm_init(mm, parent)) + return -ENOMEM; + + init_new_context_from_task(parent, child, mm); + + if (!mm->get_unmapped_area) + mm->get_unmapped_area = parent->mm->get_unmapped_area; + if (!mm->unmap_area) + mm->unmap_area = parent->mm->unmap_area; + + child->mm = mm; + child->active_mm = mm; + + return 0; +} + +static inline int dup_signal(struct task_struct *parent, struct task_struct *child) +{ + struct signal_struct *sig; + + sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); + child->signal = sig; + if (!sig) + return -ENOMEM; + + atomic_set(&sig->count, 1); + atomic_set(&sig->live, 1); + init_waitqueue_head(&sig->wait_chldexit); + sig->flags = 0; + sig->group_exit_code = 0; + sig->group_exit_task = NULL; + sig->group_stop_count = 0; + sig->curr_target = child; + init_sigpending(&sig->shared_pending); + INIT_LIST_HEAD(&sig->posix_timers); + + hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + sig->it_real_incr.tv64 = 0; + sig->real_timer.function = it_real_fn; + + sig->leader = 0; /* session leadership doesn't inherit */ + sig->tty_old_pgrp = NULL; + sig->tty = NULL; + + sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; + sig->gtime = cputime_zero; + sig->cgtime = cputime_zero; + sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; + sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; + sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; + task_io_accounting_init(&sig->ioac); + sig->sum_sched_runtime = 0; + taskstats_tgid_init(sig); + + task_lock(parent->group_leader); + memcpy(sig->rlim, parent->signal->rlim, sizeof sig->rlim); + task_unlock(parent->group_leader); + + posix_cpu_timers_init_group(sig); + + acct_init_pacct(&sig->pacct); + + tty_audit_fork(sig); + +#ifdef CONFIG_AUDIT + /* tty_audit_fork */ + spin_lock_irq(&parent->sighand->siglock); + sig->audit_tty = parent->signal->audit_tty; + spin_unlock_irq(&parent->sighand->siglock); + sig->tty_audit_buf = NULL; +#endif + + return 0; +} + +static inline int dup_fs(struct task_struct *parent, struct task_struct *child) +{ + child->fs = copy_fs_struct(parent->fs); + return child->fs ? 0 : -ENOMEM; +} + +extern int recalc_sigpending_tsk(struct task_struct *t); +static struct task_struct *copy_process_from_task(struct task_struct *ptsk, + unsigned long process_flags, + unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *child_tidptr, + struct pid *pid, + int trace) +{ + int retval; + struct task_struct *p; + int cgroup_callbacks_done = 0; + + retval = security_task_create(clone_flags); + if (retval) + goto fork_out; + + retval = -ENOMEM; + p = dup_task_struct(ptsk); + if (!p) + goto fork_out; + + rt_mutex_init_task(p); + +#ifdef CONFIG_PROVE_LOCKING + DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); +#endif + retval = -EAGAIN; + if (atomic_read(&p->real_cred->user->processes) >= + p->signal->rlim[RLIMIT_NPROC].rlim_cur) { + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && + p->real_cred->user != INIT_USER) + goto bad_fork_free; + } + + retval = copy_creds(p, clone_flags); + if (retval < 0) + goto bad_fork_free; + + /* + * If multiple threads are within copy_process(), then this check + * triggers too late. This doesn't hurt, the check is only there + * to stop root fork bombs. + */ + retval = -EAGAIN; + if (nr_threads >= max_threads) + goto bad_fork_cleanup_count; + + if (!try_module_get(task_thread_info(p)->exec_domain->module)) + goto bad_fork_cleanup_count; + + if (p->binfmt && !try_module_get(p->binfmt->module)) + goto bad_fork_cleanup_put_domain; + + p->did_exec = 0; + delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ + copy_flags(clone_flags, p); + INIT_LIST_HEAD(&p->children); + INIT_LIST_HEAD(&p->sibling); +#ifdef CONFIG_PREEMPT_RCU + p->rcu_read_lock_nesting = 0; + p->rcu_flipctr_idx = 0; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ + p->vfork_done = NULL; + rwlock_init(&p->alloc_lock); + + clear_tsk_thread_flag(p, TIF_SIGPENDING); + init_sigpending(&p->pending); + + p->utime = cputime_zero; + p->stime = cputime_zero; + p->gtime = cputime_zero; + p->utimescaled = cputime_zero; + p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; + p->prev_stime = cputime_zero; + + p->default_timer_slack_ns = ptsk->timer_slack_ns; + + task_io_accounting_init(&p->ioac); + acct_clear_integrals(p); + + posix_cpu_timers_init(p); + + p->lock_depth = -1; /* -1 = no lock */ + do_posix_clock_monotonic_gettime(&p->start_time); + p->real_start_time = p->start_time; + monotonic_to_bootbased(&p->real_start_time); + p->io_context = NULL; + p->audit_context = NULL; + cgroup_fork(p); +#ifdef CONFIG_NUMA + p->mempolicy = mpol_dup(p->mempolicy); + if (IS_ERR(p->mempolicy)) { + retval = PTR_ERR(p->mempolicy); + p->mempolicy = NULL; + goto bad_fork_cleanup_cgroup; + } + mpol_fix_fork_child_flag(p); +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + p->irq_events = 0; +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + p->hardirqs_enabled = 1; +#else + p->hardirqs_enabled = 0; +#endif + p->hardirq_enable_ip = 0; + p->hardirq_enable_event = 0; + p->hardirq_disable_ip = _THIS_IP_; + p->hardirq_disable_event = 0; + p->softirqs_enabled = 1; + p->softirq_enable_ip = _THIS_IP_; + p->softirq_enable_event = 0; + p->softirq_disable_ip = 0; + p->softirq_disable_event = 0; + p->hardirq_context = 0; + p->softirq_context = 0; +#endif +#ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; + p->lockdep_recursion = 0; +#endif + +#ifdef CONFIG_DEBUG_MUTEXES + p->blocked_on = NULL; /* not blocked yet */ +#endif + if (unlikely(ptsk->ptrace)) + ptrace_fork(p, clone_flags); + + /* Perform scheduler related setup. Assign this task to a CPU. */ + sched_fork(p, clone_flags); + + if ((retval = audit_alloc_from_task(ptsk, p))) + goto bad_fork_cleanup_policy; + /* copy all the process information */ + /* copy all the process information */ + p->sysvsem.undo_list = NULL; /* sysv semaphore is not used */ + + if (process_flags & CREATE_PROCESS) { + if ((retval = copy_files(0, p))) + goto bad_fork_cleanup_audit; + if ((retval = dup_fs(ptsk, p))) + goto bad_fork_cleanup_files; + if ((retval = dup_sighand(ptsk, p))) + goto bad_fork_cleanup_fs; + if ((retval = dup_signal(ptsk, p))) + goto bad_fork_cleanup_sighand; + if ((retval = create_mm(ptsk, p))) + goto bad_fork_cleanup_signal; + } + else { + clone_files(ptsk); + clone_fs(ptsk); + clone_sighand(ptsk); + clone_signal(ptsk); + clone_mm(ptsk, p); + } + + if ((retval = copy_namespaces(clone_flags, p))) + goto bad_fork_cleanup_mm; + if ((retval = copy_io(clone_flags, p))) + goto bad_fork_cleanup_namespaces; + retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); + if (retval) + goto bad_fork_cleanup_io; + + /* p->thread.io_bitmap_ptr is copied from current->thread.io_bitmap_ptr */ + if (ptsk != current) { + if (ptsk->thread.io_bitmap_ptr) { + if (!current->thread.io_bitmap_ptr) { + /* p->thread.io_bitmap_ptr is shared with ptsk */ + p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); + if (!p->thread.io_bitmap_ptr) { + p->thread.io_bitmap_max = 0; + goto bad_fork_cleanup_namespaces; + } + } + memcpy(p->thread.io_bitmap_ptr, ptsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES); + } + else { + if (current->thread.io_bitmap_ptr) { + kfree(p->thread.io_bitmap_ptr); + p->thread.io_bitmap_ptr = NULL; + } + } + } + + if (pid != &init_struct_pid) { + retval = -ENOMEM; + pid = alloc_pid(p->nsproxy->pid_ns); + if (!pid) + goto bad_fork_cleanup_io; + + if (clone_flags & CLONE_NEWPID) { + retval = pid_ns_prepare_proc(p->nsproxy->pid_ns); + if (retval < 0) + goto bad_fork_free_pid; + } + } + + ftrace_graph_init_task(p); + + p->pid = pid_nr(pid); + p->tgid = p->pid; + if (clone_flags & CLONE_THREAD) + p->tgid = ptsk->tgid; + + if (ptsk->nsproxy != p->nsproxy) { + retval = ns_cgroup_clone(p, pid); + if (retval) + goto bad_fork_free_graph; + } + + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? + */ + p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; +#ifdef CONFIG_FUTEX + p->robust_list = NULL; +#ifdef CONFIG_COMPAT + p->compat_robust_list = NULL; +#endif + INIT_LIST_HEAD(&p->pi_state_list); + p->pi_state_cache = NULL; +#endif + /* + * sigaltstack should be cleared when sharing the same VM + */ + if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) + p->sas_ss_sp = p->sas_ss_size = 0; + + /* + * Syscall tracing should be turned off in the child regardless + * of CLONE_PTRACE. + */ + clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); +#ifdef TIF_SYSCALL_EMU + clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); +#endif + clear_all_latency_tracing(p); + + /* ok, now we should be set up.. */ + p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); + p->pdeath_signal = 0; + p->exit_state = 0; + + p->ethread= NULL; + ethread_notify_fork(ptsk, p, clone_flags); + + /* + * Ok, make it visible to the rest of the system. + * We dont wake it up yet. + */ + p->group_leader = p; + INIT_LIST_HEAD(&p->thread_group); + + /* Now that the task is set up, run cgroup callbacks if + * necessary. We need to run them before the task is visible + * on the tasklist. */ + cgroup_fork_callbacks(p); + cgroup_callbacks_done = 1; + + /* Need tasklist lock for parent etc handling! */ + write_lock_irq(&tasklist_lock); + + /* + * The task hasn't been attached yet, so its cpus_allowed mask will + * not be changed, nor will its assigned CPU. + * + * The cpus_allowed mask of the parent may have changed after it was + * copied first time - so re-copy it here, then check the child's CPU + * to ensure it is on a valid CPU (and if not, just force it back to + * parent's CPU). This avoids alot of nasty races. + */ + p->cpus_allowed = ptsk->cpus_allowed; + p->rt.nr_cpus_allowed = ptsk->rt.nr_cpus_allowed; + if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || + !cpu_online(task_cpu(p)))) + set_task_cpu(p, smp_processor_id()); + + /* CLONE_PARENT re-uses the old parent */ + if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { + p->real_parent = ptsk->real_parent; + p->parent_exec_id = ptsk->parent_exec_id; + } else { + p->real_parent = ptsk; + p->parent_exec_id = ptsk->self_exec_id; + } + + spin_lock(&ptsk->sighand->siglock); + + /* + * Process group and session signals need to be delivered to just the + * parent before the fork or both the parent and the child after the + * fork. Restart if a signal comes in before we add the new process to + * it's process group. + * A fatal signal pending means that parent task will exit, so the new + * thread can't slip out of an OOM kill (or normal SIGKILL). + */ + recalc_sigpending(); + if (signal_pending(ptsk)) { + spin_unlock(&ptsk->sighand->siglock); + write_unlock_irq(&tasklist_lock); + retval = -ERESTARTNOINTR; + goto bad_fork_free_graph; + } + + if (clone_flags & CLONE_THREAD) { + p->group_leader = ptsk->group_leader; + list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); + } + + if (likely(p->pid)) { + list_add_tail(&p->sibling, &p->real_parent->children); + tracehook_finish_clone(p, clone_flags, trace); + + if (thread_group_leader(p)) { + if (clone_flags & CLONE_NEWPID) + p->nsproxy->pid_ns->child_reaper = p; + + p->signal->leader_pid = pid; + tty_kref_put(p->signal->tty); + p->signal->tty = tty_kref_get(ptsk->signal->tty); + attach_pid(p, PIDTYPE_PGID, task_pgrp(ptsk)); + attach_pid(p, PIDTYPE_SID, task_session(ptsk)); + list_add_tail_rcu(&p->tasks, &init_task.tasks); + __get_cpu_var(process_counts)++; + } + attach_pid(p, PIDTYPE_PID, pid); + nr_threads++; + } + + total_forks++; + spin_unlock(&ptsk->sighand->siglock); + write_unlock_irq(&tasklist_lock); + proc_fork_connector(p); + cgroup_post_fork(p); + return p; + +bad_fork_free_graph: + ftrace_graph_exit_task(p); +bad_fork_free_pid: + if (pid != &init_struct_pid) + free_pid(pid); +bad_fork_cleanup_io: + put_io_context(p->io_context); +bad_fork_cleanup_namespaces: + exit_task_namespaces(p); +bad_fork_cleanup_mm: + if (p->mm) + mmput(p->mm); +bad_fork_cleanup_signal: + cleanup_signal(p); +bad_fork_cleanup_sighand: + __cleanup_sighand(p->sighand); +bad_fork_cleanup_fs: + exit_fs(p); /* blocking */ +bad_fork_cleanup_files: + exit_files(p); /* blocking */ +bad_fork_cleanup_audit: + audit_free(p); +bad_fork_cleanup_policy: +#ifdef CONFIG_NUMA + mpol_put(p->mempolicy); +bad_fork_cleanup_cgroup: +#endif + cgroup_exit(p, cgroup_callbacks_done); + delayacct_tsk_free(p); + if (p->binfmt) + module_put(p->binfmt->module); +bad_fork_cleanup_put_domain: + module_put(task_thread_info(p)->exec_domain->module); +bad_fork_cleanup_count: + atomic_dec(&p->cred->user->processes); + put_cred(p->real_cred); + put_cred(p->cred); +bad_fork_free: + free_task(p); +fork_out: + return ERR_PTR(retval); +} + +long do_fork_from_task(struct task_struct *ptsk, + unsigned long process_flags, + unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *parent_tidptr, + int __user *child_tidptr) +{ + struct task_struct *p; + int trace = 0; + long nr; + + /* + * Do some preliminary argument and permissions checking before we + * actually start allocating stuff + */ + if (clone_flags & CLONE_NEWUSER) { + if (clone_flags & CLONE_THREAD) + return -EINVAL; + /* hopefully this check will go away when userns support is + * complete + */ + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || + !capable(CAP_SETGID)) + return -EPERM; + } + + /* + * We hope to recycle these flags after 2.6.26 + */ + if (unlikely(clone_flags & CLONE_STOPPED)) { + static int __read_mostly count = 100; + + if (count > 0 && printk_ratelimit()) { + char comm[TASK_COMM_LEN]; + + count--; + printk(KERN_INFO "fork(): process `%s' used deprecated " + "clone flags 0x%lx\n", + get_task_comm(comm, ptsk), + clone_flags & CLONE_STOPPED); + } + } + + /* + * When called from kernel_thread, don't do user tracing stuff. + */ + if (likely(user_mode(regs))) + trace = tracehook_prepare_clone(clone_flags); + + p = copy_process_from_task(ptsk, process_flags, clone_flags, stack_start, regs, stack_size, + child_tidptr, NULL, trace); + /* + * Do this prior waking up the new thread - the thread pointer + * might get invalid after that point, if the thread exits quickly. + */ + if (!IS_ERR(p)) { + trace_sched_process_fork(ptsk, p); + + nr = task_pid_vnr(p); + + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + + audit_finish_fork(p); + tracehook_report_clone(regs, clone_flags, nr, p); + + /* + * We set PF_STARTING at creation in case tracing wants to + * use this to distinguish a fully live task from one that + * hasn't gotten to tracehook_report_clone() yet. Now we + * clear it and set the child going. + */ + p->flags &= ~PF_STARTING; + p->state = TASK_UNINTERRUPTIBLE; + + tracehook_report_clone_complete(trace, regs, + clone_flags, nr, p); + + } else { + nr = PTR_ERR(p); + } + return nr; +} +EXPORT_SYMBOL(do_fork_from_task); +#endif diff --git a/kernel/sched.c b/kernel/sched.c index 26efa47..7264fbd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2550,6 +2550,9 @@ void sched_fork(struct task_struct *p, int clone_flags) put_cpu(); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sched_fork); +#endif /* * wake_up_new_task - wake up a newly created task for the first time. @@ -2587,6 +2590,9 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) #endif task_rq_unlock(rq, &flags); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(wake_up_new_task); +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -2746,6 +2752,9 @@ asmlinkage void schedule_tail(struct task_struct *prev) if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(schedule_tail); +#endif /* * context_switch - switch to the new MM and the new diff --git a/kernel/signal.c b/kernel/signal.c index d803473..11b80c1 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -35,6 +35,9 @@ #include #include "audit.h" /* audit_signal_info() */ +#ifdef CONFIG_UNIFIED_KERNEL +#include "linux/win32_thread.h" +#endif /* * SLAB caches for signal bits. */ @@ -120,7 +123,11 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) +#ifdef CONFIG_UNIFIED_KERNEL +int recalc_sigpending_tsk(struct task_struct *t) +#else static int recalc_sigpending_tsk(struct task_struct *t) +#endif { if (t->signal->group_stop_count > 0 || PENDING(&t->pending, &t->blocked) || @@ -466,6 +473,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) } } +#ifdef CONFIG_UNIFIED_KERNEL + ethread_notify_signal(current, signr); +#endif + recalc_sigpending(); if (!signr) return 0; @@ -806,7 +817,11 @@ static void complete_signal(int sig, struct task_struct *p, int group) do { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); +#ifdef CONFIG_UNIFIED_KERNEL + } while (((t = next_thread(t)) != p) && !t->ethread); +#else } while_each_thread(p, t); +#endif return; } } @@ -1664,6 +1679,9 @@ void ptrace_notify(int exit_code) ptrace_stop(exit_code, 1, &info); spin_unlock_irq(¤t->sighand->siglock); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(ptrace_notify); +#endif static void finish_stop(int stop_count) @@ -1942,6 +1960,11 @@ relock: /* * Death signals, no core dump. */ +#ifdef CONFIG_UNIFIED_KERNEL + if (current->ethread && !(current->signal->flags & SIGNAL_GROUP_EXIT)) + do_exit((current->ethread->exit_status & 0xff) << 8); + else +#endif do_group_exit(info->si_signo); /* NOTREACHED */ } diff --git a/kernel/win32_thread.c b/kernel/win32_thread.c new file mode 100644 index 0000000..c314873 --- /dev/null +++ b/kernel/win32_thread.c @@ -0,0 +1,149 @@ +/* + * win32_thread.c + * + * Copyright (C) 2006 Insigme Co., Ltd + * + * This software has been developed while working on the Linux Unified Kernel + * project (http://linux.insigma.com.cn) in the Insigma Reaserch Institute, + * which is a subdivision of Insigma Co., Ltd (http://www.insigma.com.cn). + * + * The project is sponsored by Insigma Co., Ltd. + * + * The authors can be reached at linux@insigma.com.cn. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Revision History: + * Jan 2006 - Created. + */ + +#include +#include + +/* add a ethread to a task */ +void add_ethread(struct task_struct *tsk, struct ethread *thread) +{ + etget(thread); + + write_lock(&tsk->alloc_lock); + tsk->ethread = thread; + write_unlock(&tsk->alloc_lock); +} /* end add_ethread() */ +EXPORT_SYMBOL(add_ethread); + +/* remove a ethread from a task */ +void remove_ethread(struct task_struct *tsk, struct ethread *thread) +{ + write_lock(&tsk->alloc_lock); + if (tsk->ethread == thread) + tsk->ethread = NULL; + else + thread = NULL; + write_unlock(&tsk->alloc_lock); + + if (thread) + etput(thread); +} /* end remove_ethread() */ +EXPORT_SYMBOL(remove_ethread); + +/* clean up all task ethread for exit() */ +void exit_ethread(struct task_struct *tsk) +{ + struct ethread *thread; + + write_lock(&tsk->alloc_lock); + thread = tsk->ethread; + tsk->ethread = NULL; + write_unlock(&tsk->alloc_lock); + + if (thread) + etput(thread); +} /* end exit_ethread() */ +EXPORT_SYMBOL(exit_ethread); + +/* + * notify a ethread of a process that is exiting + * - this'll be called from notify_parent() in kernel/signal.c + */ +void __ethread_notify_exit(struct task_struct *tsk, int exit_code) +{ + struct ethread *thread; + + read_lock(&tsk->alloc_lock); + thread = tsk->ethread; + read_unlock(&tsk->alloc_lock); + + etget(thread); + thread->et_exit_called = 1; + if (thread->et_ops->exit) + thread->et_ops->exit(thread, exit_code); /* call the operation */ + etput(thread); +} /* end __ethread_notify_exit() */ +EXPORT_SYMBOL(__ethread_notify_exit); + +/* + * notify a ethread of a signal being delivered to a process that would cause + * the parent process to get SIGCHLD + * - this'll be called from notify_parent() in kernel/signal.c + * - return WIN32_THREAD_SIGNAL_OKAY to keep the signal + * - return WIN32_THREAD_CANCEL_SIGNAL to cancel the signal immediately + */ +int __ethread_notify_signal(struct task_struct *tsk, int signal) +{ + struct ethread *thread; + + read_lock(&tsk->alloc_lock); + thread = tsk->ethread; + read_unlock(&tsk->alloc_lock); + + etget(thread); + if (thread->et_ops->signal) + thread->et_ops->signal(thread, signal); /* call the operation */ + etput(thread); + + return WIN32_THREAD_SIGNAL_OKAY; +} /* end __ethread_notify_signal() */ +EXPORT_SYMBOL(__ethread_notify_signal); + +/* + * notify a ethread of a process execve'ing itself + * - this'll be called from flush_old_exec() in kernel/exec.c + */ +void __ethread_notify_execve(struct task_struct *tsk) +{ + struct ethread *thread; + + read_lock(&tsk->alloc_lock); + thread = tsk->ethread; + read_unlock(&tsk->alloc_lock); + + etget(thread); + if (thread->et_ops->execve) + thread->et_ops->execve(thread); /* call the operation */ + etput(thread); +} /* end __ethread_notify_execve() */ +EXPORT_SYMBOL(__ethread_notify_execve); + +/* + * notify a ethread of a process forking/cloning itself + * - this'll be called from do_fork() in kernel/fork.c + */ +void __ethread_notify_fork(struct task_struct *tsk, + struct task_struct *child, + unsigned long clone_flags) +{ + struct ethread *thread; + + read_lock(&tsk->alloc_lock); + thread = tsk->ethread; + read_unlock(&tsk->alloc_lock); + + etget(thread); + if (thread->et_ops->fork) + thread->et_ops->fork(thread, tsk, child, clone_flags); /* call the operation */ + etput(thread); +} /* end __ethread_notify_fork() */ +EXPORT_SYMBOL(__ethread_notify_fork); diff --git a/mm/mmap.c b/mm/mmap.c index 6b7b1a9..6ed988a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -44,6 +44,19 @@ #define arch_rebalance_pgtables(addr, len) (addr) #endif +#ifdef CONFIG_UNIFIED_KERNEL + +#define MAP_RESERVE 0x10000000 +#define MAP_TOP_DOWN 0x20000000 + +#define MMAP_TOP_DOWN_BASE 0x7fff0000 + +#define RESERVE_PAGE_SIZE (16 * PAGE_SIZE) +#define RESERVE_PAGE_SHIFT (PAGE_SHIFT + 4) +#define RESERVE_PAGE_MASK (~(RESERVE_PAGE_SIZE - 1)) + +#endif + static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); @@ -1269,6 +1282,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; +#ifdef CONFIG_UNIFIED_KERNEL + unsigned long reserved_len = (len + RESERVE_PAGE_SIZE - 1) & RESERVE_PAGE_MASK; +#endif if (len > TASK_SIZE) return -ENOMEM; @@ -1276,6 +1292,17 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr; +#ifdef CONFIG_UNIFIED_KERNEL + if (current->ethread && (flags & MAP_TOP_DOWN)) { + unsigned long old_mmap_base = mm->mmap_base; + + mm->mmap_base = MMAP_TOP_DOWN_BASE; + addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); + mm->mmap_base = old_mmap_base; + return addr; + } +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); @@ -1286,7 +1313,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { +#ifdef CONFIG_UNIFIED_KERNEL + start_addr = addr = mm->mmap_base; +#else start_addr = addr = TASK_UNMAPPED_BASE; +#endif mm->cached_hole_size = 0; } @@ -1298,8 +1329,13 @@ full_search: * Start a new search - just in case we missed * some holes. */ +#ifdef CONFIG_UNIFIED_KERNEL + if (start_addr != mm->mmap_base) { + addr = mm->mmap_base; +#else if (start_addr != TASK_UNMAPPED_BASE) { addr = TASK_UNMAPPED_BASE; +#endif start_addr = addr; mm->cached_hole_size = 0; goto full_search; @@ -1307,6 +1343,19 @@ full_search: return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { +#ifdef CONFIG_UNIFIED_KERNEL + if (current->ethread && (flags & MAP_RESERVE)) { + addr = ((addr + RESERVE_PAGE_SIZE - 1) & RESERVE_PAGE_MASK); + if (addr + reserved_len > vma->vm_start) { + addr = vma->vm_end; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + continue; + } + mm->free_area_cache = addr + reserved_len; + } + else +#endif /* * Remember the place where we stopped the search: */ @@ -1325,7 +1374,11 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr) /* * Is this a new hole at the lowest possible address? */ +#ifdef CONFIG_UNIFIED_KERNEL + if (addr >= mm->mmap_base && addr < mm->free_area_cache) { +#else if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { +#endif mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } @@ -1738,6 +1791,9 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) return vma; } #endif +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(find_extend_vma); +#endif /* * Ok - we have the memory areas we should free on the vma list, diff --git a/mm/mprotect.c b/mm/mprotect.c index 258197b..34ad0b3 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -28,6 +28,10 @@ #include #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + #ifndef pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { @@ -316,3 +320,6 @@ out: up_write(¤t->mm->mmap_sem); return error; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_mprotect); +#endif diff --git a/mm/msync.c b/mm/msync.c index 4083209..6c3482b 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -14,6 +14,10 @@ #include #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + /* * MS_SYNC syncs the entire file - including mappings. * @@ -101,3 +105,6 @@ out_unlock: out: return error ? : unmapped_error; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_msync); +#endif diff --git a/mm/thrash.c b/mm/thrash.c index c4c5205..4a5c5dd 100644 --- a/mm/thrash.c +++ b/mm/thrash.c @@ -22,6 +22,10 @@ #include #include +#ifdef CONFIG_UNIFIED_KERNEL +#include +#endif + static DEFINE_SPINLOCK(swap_token_lock); struct mm_struct *swap_token_mm; static unsigned int global_faults; @@ -68,6 +72,9 @@ out: spin_unlock(&swap_token_lock); return; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(grab_swap_token); +#endif /* Called on process exit. */ void __put_swap_token(struct mm_struct *mm) diff --git a/net/socket.c b/net/socket.c index 791d71a..b98668c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1299,6 +1299,9 @@ out_release: sock_release(sock); return retval; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_socket); +#endif /* * Create a pair of connected sockets. @@ -1396,6 +1399,9 @@ out_fd1: put_unused_fd(fd2); goto out; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_socketpair); +#endif /* * Bind a name to a socket. Nothing much to do here since it's @@ -1554,6 +1560,9 @@ SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_accept); +#endif /* * Attempt to connect to a socket with the server address. The address @@ -1757,6 +1766,9 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, out: return err; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_recvfrom); +#endif /* * Receive a datagram from a socket. @@ -1801,6 +1813,9 @@ out_put: } return err; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_setsockopt); +#endif /* * Get a socket option. Because we don't know the option lengths we have @@ -1832,6 +1847,9 @@ out_put: } return err; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_getsockopt); +#endif /* * Shutdown a socket. @@ -1851,6 +1869,9 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how) } return err; } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(sys_shutdown); +#endif /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. diff --git a/security/security.c b/security/security.c index 5284255..f16ad78 100644 --- a/security/security.c +++ b/security/security.c @@ -255,6 +255,9 @@ int security_bprm_secureexec(struct linux_binprm *bprm) { return security_ops->bprm_secureexec(bprm); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(security_bprm_secureexec); +#endif int security_sb_alloc(struct super_block *sb) { @@ -655,6 +658,9 @@ int security_file_lock(struct file *file, unsigned int cmd) { return security_ops->file_lock(file, cmd); } +#ifdef CONFIG_UNIFIED_KERNEL +EXPORT_SYMBOL(security_file_lock); +#endif int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { -- 1.6.4.2