[PATCH 2] utrace: register sets This provides a new uniform interface in for accessing registers and similar per-thread machine resources. The old architecture ptrace code for accessing register state is rolled into new functions to flesh out the utrace_regset interface. Nothing yet uses this interface. The hope is that this interface can cover most of the machine-dependent issues for any higher-level tracing/debugging interface. Signed-off-by: Roland McGrath --- include/asm-powerpc/tracehook.h | 13 + include/asm-i386/i387.h | 13 - include/asm-i386/tracehook.h | 7 include/asm-x86_64/fpu32.h | 3 include/asm-x86_64/tracehook.h | 11 + include/linux/tracehook.h | 221 ++++++++++ kernel/ptrace.c | 45 -- arch/i386/kernel/i387.c | 143 ++++--- arch/i386/kernel/ptrace.c | 741 ++++++++++++++++++++--------------- arch/powerpc/kernel/ptrace-common.h | 145 ------- arch/powerpc/kernel/Makefile | 4 arch/powerpc/kernel/ptrace32.c | 443 --------------------- arch/powerpc/kernel/ptrace.c | 711 +++++++++++++++++----------------- arch/x86_64/kernel/ptrace.c | 577 ++++++++++++++++----------- arch/x86_64/ia32/fpu32.c | 92 +++- arch/x86_64/ia32/ptrace32.c | 688 +++++++++++++++++++++----------- 16 files changed, 1958 insertions(+), 1899 deletions(-) delete arch/powerpc/kernel/ptrace-common.h delete arch/powerpc/kernel/ptrace32.c --- linux-2.6/include/asm-powerpc/tracehook.h +++ linux-2.6/include/asm-powerpc/tracehook.h @@ -71,4 +71,17 @@ static inline void tracehook_abort_sysca } +extern const struct utrace_regset_view utrace_ppc_native_view; +static inline const struct utrace_regset_view * +utrace_native_view(struct task_struct *tsk) +{ +#ifdef CONFIG_PPC64 + extern const struct utrace_regset_view utrace_ppc32_view; + + if (test_tsk_thread_flag(tsk, TIF_32BIT)) + return &utrace_ppc32_view; +#endif + return &utrace_ppc_native_view; +} + #endif --- linux-2.6/include/asm-i386/i387.h +++ linux-2.6/include/asm-i386/i387.h @@ -129,17 +129,12 @@ extern int save_i387( struct _fpstate __ extern int restore_i387( struct _fpstate __user *buf ); /* - * ptrace request handers... + * ptrace request handlers... */ -extern int get_fpregs( struct user_i387_struct __user *buf, - struct task_struct *tsk ); -extern int set_fpregs( struct task_struct *tsk, - struct user_i387_struct __user *buf ); +extern int get_fpregs(struct user_i387_struct *, struct task_struct *); +extern int set_fpregs(struct task_struct *, const struct user_i387_struct *); +extern void updated_fpxregs(struct task_struct *tsk); -extern int get_fpxregs( struct user_fxsr_struct __user *buf, - struct task_struct *tsk ); -extern int set_fpxregs( struct task_struct *tsk, - struct user_fxsr_struct __user *buf ); /* * FPU state for core dumps... --- linux-2.6/include/asm-i386/tracehook.h +++ linux-2.6/include/asm-i386/tracehook.h @@ -46,5 +46,12 @@ static inline void tracehook_abort_sysca regs->orig_eax = -1; } +extern const struct utrace_regset_view utrace_i386_native; +static inline const struct utrace_regset_view * +utrace_native_view(struct task_struct *tsk) +{ + return &utrace_i386_native; +} + #endif --- linux-2.6/include/asm-x86_64/fpu32.h +++ linux-2.6/include/asm-x86_64/fpu32.h @@ -7,4 +7,7 @@ int restore_i387_ia32(struct task_struct int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, struct pt_regs *regs, int fsave); +int get_fpregs32(struct user_i387_ia32_struct *, struct task_struct *); +int set_fpregs32(struct task_struct *, const struct user_i387_ia32_struct *); + #endif --- linux-2.6/include/asm-x86_64/tracehook.h +++ linux-2.6/include/asm-x86_64/tracehook.h @@ -46,4 +46,15 @@ static inline void tracehook_abort_sysca regs->orig_rax = -1L; } +extern const struct utrace_regset_view utrace_x86_64_native, utrace_ia32_view; +static inline const struct utrace_regset_view * +utrace_native_view(struct task_struct *tsk) +{ +#ifdef CONFIG_IA32_EMULATION + if (test_tsk_thread_flag(tsk, TIF_IA32)) + return &utrace_ia32_view; +#endif + return &utrace_x86_64_native; +} + #endif --- linux-2.6/include/linux/tracehook.h +++ linux-2.6/include/linux/tracehook.h @@ -27,6 +27,7 @@ #define _LINUX_TRACEHOOK_H 1 #include +#include struct linux_binprm; struct pt_regs; @@ -80,7 +81,225 @@ struct pt_regs; * * void tracehook_abort_syscall(struct pt_regs *regs); * - */ + * Return the regset view (see below) that is native for the given process. + * For example, what it would access when it called ptrace. + * Throughout the life of the process, this only changes at exec. + * + * const struct utrace_regset_view *utrace_native_view(struct task_struct *); + * + ***/ + + +/* + * This data structure describes a machine resource we call a register set. + * This is part of the state of an individual thread, not necessarily + * actual CPU registers per se. A register set consists of a number of + * similar slots, given by ->n. Each slot is ->size bytes, and aligned to + * ->align bytes (which is at least ->size). + * + * As described above, these entry points can be called on the current + * thread or on a quiescent thread. The pos argument must be aligned + * according to ->align; the count argument must be a multiple of ->size. + * These functions are not responsible for checking for invalid arguments. + * + * When there is a natural value to use as an index, ->bias gives the + * difference between the natural index and the slot index for the + * register set. For example, x86 GDT segment descriptors form a regset; + * the segment selector produces a natural index, but only a subset of + * that index space is available as a regset (the TLS slots); subtracting + * ->bias from a segment selector index value computes the regset slot. + */ +struct utrace_regset { + unsigned int n; /* Number of slots (registers). */ + unsigned int size; /* Size in bytes of a slot (register). */ + unsigned int align; /* Required alignment, in bytes. */ + unsigned int bias; /* Bias from natural indexing. */ + + /* + * Return -ENODEV if not available on the hardware found. + * Return 0 if no interesting state in this thread. + * Return >0 number of ->size units of interesting state. + * Any get call fetching state beyond that number will + * see the default initialization state for this data, + * so a caller that knows that the default state is need + * not copy it all out. + * This call is optional; the pointer is NULL if there + * so no inexpensive check to yield a value < .n. + */ + int (*active)(struct task_struct *, const struct utrace_regset *); + + /* + * Fetch and store register values. Return 0 on success; -EIO or + * -ENODEV are usual failure returns. The pos and count values are + * in bytes, but must be properly aligned. If kbuf is non-null, + * that buffer is used and ubuf is ignored. If kbuf is NULL, then + * ubuf gives a userland pointer to access directly, and an -EFAULT + * return value is possible. + */ + int (*get)(struct task_struct *, const struct utrace_regset *, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); + int (*set)(struct task_struct *, const struct utrace_regset *, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf); + + /* + * This call is optional; usually the pointer is NULL. + * When provided, there is some user memory associated + * with this regset's hardware, such as memory backing + * cached register data on register window machines; the + * regset's data controls what user memory is used + * (e.g. via the stack pointer value). + * + * Write register data back to user memory. If the + * immediate flag is nonzero, it must be written to the + * user memory so uaccess/access_process_vm can see it + * when this call returns; if zero, then it must be + * written back by the time the task completes a context + * switch (as synchronized with wait_task_inactive). + * Return 0 on success or if there was nothing to do, + * -EFAULT for a memory problem (bad stack pointer or + * whatever), or -EIO for a hardware problem. + */ + int (*writeback)(struct task_struct *, const struct utrace_regset *, + int immediate); +}; + +/* + * A regset view is a collection of regsets (struct utrace_regset, above). + * This describes all the state of a thread that can be seen from a given + * architecture/ABI environment. More than one view might refer to the + * same utrace_regset, or more than one regset might refer to the same + * machine-specific state in the thread. For example, a 32-bit thread's + * state could be examined from the 32-bit view or from the 64-bit view. + * Either method reaches the same thread register state, doing appropriate + * widening or truncation. + */ +struct utrace_regset_view { + const char *name; /* Identifier, e.g. ELF_PLATFORM string. */ + + const struct utrace_regset *regsets; + unsigned int n; + + /* + * EM_* value for which this is the native view, if any. + */ + u16 e_machine; +}; + + +/* + * These two are helpers for writing regset get/set functions in arch code. + * Use one or more calls sequentially for each chunk of regset data stored + * contiguously in memory. Call with constants for start_pos and end_pos, + * giving the range of byte positions in the regset that data corresponds + * to; end_pos can be -1 if this chunk is at the end of the regset layout. + * Each call updates the arguments to point past its chunk. + */ + +static inline int +utrace_regset_copyout(unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf, + const void *data, int start_pos, int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(*kbuf, data, copy); + *kbuf += copy; + } + else if (copy_to_user(*ubuf, data, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int +utrace_regset_copyin(unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf, + void *data, int start_pos, int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + data += *pos - start_pos; + if (*kbuf) { + memcpy(data, *kbuf, copy); + *kbuf += copy; + } + else if (copy_from_user(data, *ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/* + * These two parallel the two above, but for portions of a regset layout + * that always read as all-zero or for which writes are ignored. + */ +static inline int +utrace_regset_copyout_zero(unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf, + int start_pos, int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) { + memset(*kbuf, 0, copy); + *kbuf += copy; + } + else if (clear_user(*ubuf, copy)) + return -EFAULT; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +static inline int +utrace_regset_copyin_ignore(unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf, + int start_pos, int end_pos) +{ + if (*count == 0) + return 0; + BUG_ON(*pos < start_pos); + if (end_pos < 0 || *pos < end_pos) { + unsigned int copy = (end_pos < 0 ? *count + : min(*count, end_pos - *pos)); + if (*kbuf) + *kbuf += copy; + else + *ubuf += copy; + *pos += copy; + *count -= copy; + } + return 0; +} + +/**/ /*** --- linux-2.6/kernel/ptrace.c +++ linux-2.6/kernel/ptrace.c @@ -97,12 +97,6 @@ int ptrace_detach(struct task_struct *ch if (!valid_signal(data)) return -EIO; - /* Architecture-specific hardware disable .. */ - ptrace_disable(child); - - /* .. re-parent .. */ - child->exit_code = data; - return -ENOSYS; } @@ -210,44 +204,7 @@ struct task_struct *ptrace_get_task_stru return child; } -#ifndef __ARCH_SYS_PTRACE asmlinkage long sys_ptrace(long request, long pid, long addr, long data) { - struct task_struct *child; - long ret; - - /* - * This lock_kernel fixes a subtle race with suid exec - */ - lock_kernel(); - if (request == PTRACE_TRACEME) { - ret = ptrace_traceme(); - goto out; - } - - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); - goto out; - } - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - goto out_put_task_struct; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out_put_task_struct; - - ret = arch_ptrace(child, request, addr, data); - if (ret < 0) - goto out_put_task_struct; - - out_put_task_struct: - put_task_struct(child); - out: - unlock_kernel(); - return ret; + return -ENOSYS; } -#endif /* __ARCH_SYS_PTRACE */ --- linux-2.6/arch/i386/kernel/i387.c +++ linux-2.6/arch/i386/kernel/i387.c @@ -222,14 +222,10 @@ void set_fpu_twd( struct task_struct *ts * FXSR floating point environment conversions. */ -static int convert_fxsr_to_user( struct _fpstate __user *buf, - struct i387_fxsave_struct *fxsave ) +static inline void +convert_fxsr_env_to_i387(unsigned long env[7], + struct i387_fxsave_struct *fxsave) { - unsigned long env[7]; - struct _fpreg __user *to; - struct _fpxreg *from; - int i; - env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul; env[1] = (unsigned long)fxsave->swd | 0xffff0000ul; env[2] = twd_fxsr_to_i387(fxsave); @@ -237,7 +233,17 @@ static int convert_fxsr_to_user( struct env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); env[5] = fxsave->foo; env[6] = fxsave->fos; +} + +static int convert_fxsr_to_user(struct _fpstate __user *buf, + struct i387_fxsave_struct *fxsave) +{ + unsigned long env[7]; + struct _fpreg __user *to; + struct _fpxreg *from; + int i; + convert_fxsr_env_to_i387(env, fxsave); if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) ) return 1; @@ -255,6 +261,20 @@ static int convert_fxsr_to_user( struct return 0; } +static inline void +convert_fxsr_env_from_i387(struct i387_fxsave_struct *fxsave, + const unsigned long env[7]) +{ + fxsave->cwd = (unsigned short)(env[0] & 0xffff); + fxsave->swd = (unsigned short)(env[1] & 0xffff); + fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); + fxsave->fip = env[3]; + fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); + fxsave->fcs = (env[4] & 0xffff); + fxsave->foo = env[5]; + fxsave->fos = env[6]; +} + static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, struct _fpstate __user *buf ) { @@ -266,14 +286,7 @@ static int convert_fxsr_from_user( struc if ( __copy_from_user( env, buf, 7 * sizeof(long) ) ) return 1; - fxsave->cwd = (unsigned short)(env[0] & 0xffff); - fxsave->swd = (unsigned short)(env[1] & 0xffff); - fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); - fxsave->fip = env[3]; - fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); - fxsave->fcs = (env[4] & 0xffff); - fxsave->foo = env[5]; - fxsave->fos = env[6]; + convert_fxsr_env_from_i387(fxsave, env); to = (struct _fpxreg *) &fxsave->st_space[0]; from = &buf->_st[0]; @@ -388,88 +401,82 @@ int restore_i387( struct _fpstate __user * ptrace request handlers. */ -static inline int get_fpregs_fsave( struct user_i387_struct __user *buf, - struct task_struct *tsk ) +static inline void get_fpregs_fsave(struct user_i387_struct *buf, + struct task_struct *tsk) { - return __copy_to_user( buf, &tsk->thread.i387.fsave, - sizeof(struct user_i387_struct) ); + memcpy(buf, &tsk->thread.i387.fsave, sizeof(struct user_i387_struct)); } -static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf, - struct task_struct *tsk ) +static inline void get_fpregs_fxsave(struct user_i387_struct *buf, + struct task_struct *tsk) { - return convert_fxsr_to_user( (struct _fpstate __user *)buf, - &tsk->thread.i387.fxsave ); + struct _fpreg *to; + const struct _fpxreg *from; + unsigned int i; + + convert_fxsr_env_to_i387((unsigned long *) buf, + &tsk->thread.i387.fxsave); + + to = (struct _fpreg *) buf->st_space; + from = (const struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0]; + for (i = 0; i < 8; i++, to++, from++) + *to = *(const struct _fpreg *) from; } -int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk ) +int get_fpregs(struct user_i387_struct *buf, struct task_struct *tsk) { if ( HAVE_HWFP ) { - if ( cpu_has_fxsr ) { - return get_fpregs_fxsave( buf, tsk ); - } else { - return get_fpregs_fsave( buf, tsk ); - } + if (cpu_has_fxsr) + get_fpregs_fxsave(buf, tsk); + else + get_fpregs_fsave(buf, tsk); + return 0; } else { return save_i387_soft( &tsk->thread.i387.soft, (struct _fpstate __user *)buf ); } } -static inline int set_fpregs_fsave( struct task_struct *tsk, - struct user_i387_struct __user *buf ) +static inline void set_fpregs_fsave(struct task_struct *tsk, + const struct user_i387_struct *buf) { - return __copy_from_user( &tsk->thread.i387.fsave, buf, - sizeof(struct user_i387_struct) ); + memcpy(&tsk->thread.i387.fsave, buf, sizeof(struct user_i387_struct)); } -static inline int set_fpregs_fxsave( struct task_struct *tsk, - struct user_i387_struct __user *buf ) +static inline void set_fpregs_fxsave(struct task_struct *tsk, + const struct user_i387_struct *buf) { - return convert_fxsr_from_user( &tsk->thread.i387.fxsave, - (struct _fpstate __user *)buf ); + struct _fpxreg *to; + const struct _fpreg *from; + unsigned int i; + + convert_fxsr_env_from_i387(&tsk->thread.i387.fxsave, + (unsigned long *) buf); + + to = (struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0]; + from = (const struct _fpreg *) buf->st_space; + for (i = 0; i < 8; i++, to++, from++) + *(struct _fpreg *) to = *from; } -int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf ) +int set_fpregs(struct task_struct *tsk, const struct user_i387_struct *buf) { if ( HAVE_HWFP ) { - if ( cpu_has_fxsr ) { - return set_fpregs_fxsave( tsk, buf ); - } else { - return set_fpregs_fsave( tsk, buf ); - } + if (cpu_has_fxsr) + set_fpregs_fxsave(tsk, buf); + else + set_fpregs_fsave(tsk, buf); + return 0; } else { return restore_i387_soft( &tsk->thread.i387.soft, (struct _fpstate __user *)buf ); } } -int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk ) +void updated_fpxregs(struct task_struct *tsk) { - if ( cpu_has_fxsr ) { - if (__copy_to_user( buf, &tsk->thread.i387.fxsave, - sizeof(struct user_fxsr_struct) )) - return -EFAULT; - return 0; - } else { - return -EIO; - } -} - -int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf ) -{ - int ret = 0; - - if ( cpu_has_fxsr ) { - if (__copy_from_user( &tsk->thread.i387.fxsave, buf, - sizeof(struct user_fxsr_struct) )) - ret = -EFAULT; - /* mxcsr reserved bits must be masked to zero for security reasons */ - tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; - } else { - ret = -EIO; - } - return ret; + /* mxcsr reserved bits must be masked to zero for security reasons */ + tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; } /* --- linux-2.6/arch/i386/kernel/ptrace.c +++ linux-2.6/arch/i386/kernel/ptrace.c @@ -18,7 +18,9 @@ #include #include #include +#include +#include #include #include #include @@ -28,10 +30,6 @@ #include #include -/* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. - */ /* * Determines which flags the user has access to [1 = access, 0 = no access]. @@ -40,9 +38,6 @@ */ #define FLAG_MASK 0x00050dd5 -/* set's the trap flag. */ -#define TRAP_FLAG 0x100 - /* * Offset of eflags on child stack.. */ @@ -111,6 +106,7 @@ static int putreg(struct task_struct *ch case EFL: value &= FLAG_MASK; value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK; + clear_tsk_thread_flag(child, TIF_FORCED_TF); break; } if (regno > FS*4) @@ -128,6 +124,10 @@ static unsigned long getreg(struct task_ case GS: retval = child->thread.gs; break; + case EFL: + if (test_tsk_thread_flag(child, TIF_FORCED_TF)) + retval &= ~X86_EFLAGS_TF; + goto fetch; case DS: case ES: case FS: @@ -136,9 +136,11 @@ static unsigned long getreg(struct task_ retval = 0xffff; /* fall through */ default: +fetch: if (regno > FS*4) regno -= 1*4; retval &= get_stack_long(child, regno); + break; } return retval; } @@ -230,11 +232,11 @@ void tracehook_enable_single_step(struct /* * If TF was already set, don't do anything else */ - if (regs->eflags & TRAP_FLAG) + if (regs->eflags & X86_EFLAGS_TF) return; /* Set TF on the kernel stack.. */ - regs->eflags |= TRAP_FLAG; + regs->eflags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, @@ -255,29 +257,312 @@ void tracehook_disable_single_step(struc /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) { struct pt_regs *regs = get_child_regs(child); - regs->eflags &= ~TRAP_FLAG; + regs->eflags &= ~X86_EFLAGS_TF; } } -/* - * Called by kernel/ptrace.c when detaching.. - * - * Make sure the single step bit is not set. - */ -void ptrace_disable(struct task_struct *child) + +static int +genregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (kbuf) { + unsigned long *kp = kbuf; + while (count > 0) { + *kp++ = getreg(target, pos); + pos += 4; + count -= 4; + } + } + else { + unsigned long __user *up = ubuf; + while (count > 0) { + if (__put_user(getreg(target, pos), up++)) + return -EFAULT; + pos += 4; + count -= 4; + } + } + + return 0; +} + +static int +genregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret = 0; + + if (kbuf) { + const unsigned long *kp = kbuf; + while (!ret && count > 0) { + ret = putreg(target, pos, *kp++); + pos += 4; + count -= 4; + } + } + else { + int ret = 0; + const unsigned long __user *up = ubuf; + while (!ret && count > 0) { + unsigned long val; + ret = __get_user(val, up++); + if (!ret) + ret = putreg(target, pos, val); + pos += 4; + count -= 4; + } + } + + return ret; +} + +static int +fpregs_active(struct task_struct *target, const struct utrace_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} + +static int +fpregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_i387_struct fp; + int ret; + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else + init_fpu(target); + + ret = get_fpregs(&fp, target); + if (ret == 0) + ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &fp, 0, -1); + + return ret; +} + +static int +fpregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_i387_struct fp; + int ret; + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else if (pos == 0 && count == sizeof(fp)) + set_stopped_child_used_math(target); + else + init_fpu(target); + + if (pos > 0 || count < sizeof(fp)) { + ret = get_fpregs(&fp, target); + if (ret == 0) + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fp, 0, -1); + if (ret) + return ret; + kbuf = &fp; + } + else if (kbuf == NULL) { + if (__copy_from_user(&fp, ubuf, sizeof(fp))) + return -EFAULT; + kbuf = &fp; + } + + return set_fpregs(target, kbuf); +} + +static int +fpxregs_active(struct task_struct *target, const struct utrace_regset *regset) +{ + return !cpu_has_fxsr ? -ENODEV : tsk_used_math(target) ? regset->n : 0; +} + +static int +fpxregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (!cpu_has_fxsr) + return -ENODEV; + + if (tsk_used_math(target)) + unlazy_fpu(target); + else + init_fpu(target); + + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); +} + +static int +fpxregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (!cpu_has_fxsr) + return -ENODEV; + + if (tsk_used_math(target)) + unlazy_fpu(target); + else if (pos == 0 && count == sizeof(target->thread.i387.fxsave)) + set_stopped_child_used_math(target); + else + init_fpu(target); + + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); + + updated_fpxregs(target); + + return ret; +} + + +static int +dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset) { - tracehook_disable_single_step(child); + if (tsk->thread.debugreg[DR_CONTROL] | tsk->thread.debugreg[DR_STATUS]) + return 8; + return 0; +} + +static int +dbregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + /* + * The hardware updates the status register on a debug trap, + * but do_debug (traps.c) save it for us when that happens. + * So whether the target is current or not, thread.debugreg is good. + */ + + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + target->thread.debugreg, 0, -1); +} + +static int +dbregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) { + unsigned long val; + unsigned int i; + + if (kbuf) { + val = *(const unsigned long *) kbuf; + kbuf += sizeof(unsigned long); + } + else { + if (__get_user(val, (unsigned long __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(unsigned long); + } + + if (pos < 4) { + if (val >= TASK_SIZE - 3) + return -EIO; + goto set; + } + else if (pos < 6) { + if (val != 0) + return -EIO; + continue; + } + else if (pos < 7) + goto set; + + /* Sanity-check data. Take one half-byte at once with + * check = (val >> (16 + 4*i)) & 0xf. It contains the + * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits + * 2 and 3 are LENi. Given a list of invalid values, + * we do mask |= 1 << invalid_value, so that + * (mask >> check) & 1 is a correct test for invalid + * values. + * + * R/Wi contains the type of the breakpoint / + * watchpoint, LENi contains the length of the watched + * data in the watchpoint case. + * + * The invalid values are: + * - LENi == 0x10 (undefined), so mask |= 0x0f00. + * - R/Wi == 0x10 (break on I/O reads or writes), so + * mask |= 0x4444. + * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= + * 0x1110. + * + * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. + * + * See the Intel Manual "System Programming Guide", + * 15.2.4 + * + * Note that LENi == 0x10 is defined on x86_64 in long + * mode (i.e. even for 32-bit userspace software, but + * 64-bit kernel), so the x86_64 mask value is 0x5454. + * See the AMD manual no. 24593 (AMD64 System + * Programming)*/ + val &= ~DR_CONTROL_RESERVED; + for (i = 0; i < 4; i++) + if ((0x5f54 >> ((val >> (16 + 4*i)) & 0xf)) & 1) + return -EIO; + if (val) + set_tsk_thread_flag(target, TIF_DEBUG); + else + clear_tsk_thread_flag(target, TIF_DEBUG); + +set: + target->thread.debugreg[pos] = val; + if (target == current) + switch (pos) { +#define DBREG(n) case n: set_debugreg(target->thread.debugreg[n], n); break + DBREG(0); + DBREG(1); + DBREG(2); + DBREG(3); + DBREG(6); + DBREG(7); +#undef DBREG + } + } + + return 0; } + /* * Perform get_thread_area on behalf of the traced child. */ static int -ptrace_get_thread_area(struct task_struct *child, - int idx, struct user_desc __user *user_desc) +tls_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) { - struct user_desc info; - struct desc_struct *desc; + struct user_desc info, *ip; + const struct desc_struct *desc; /* * Get the current Thread-Local Storage area: @@ -299,23 +584,29 @@ ptrace_get_thread_area(struct task_struc #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - - desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; - - info.entry_number = idx; - info.base_addr = GET_BASE(desc); - info.limit = GET_LIMIT(desc); - info.seg_32bit = GET_32BIT(desc); - info.contents = GET_CONTENTS(desc); - info.read_exec_only = !GET_WRITABLE(desc); - info.limit_in_pages = GET_LIMIT_PAGES(desc); - info.seg_not_present = !GET_PRESENT(desc); - info.useable = GET_USEABLE(desc); - - if (copy_to_user(user_desc, &info, sizeof(info))) - return -EFAULT; + desc = &target->thread.tls_array[pos / sizeof(struct user_desc)]; + ip = kbuf ?: &info; + memset(ip, 0, sizeof *ip); + for (; count > 0; count -= sizeof(struct user_desc), ++desc) { + ip->entry_number = (desc - &target->thread.tls_array[0] + + GDT_ENTRY_TLS_MIN); + ip->base_addr = GET_BASE(desc); + ip->limit = GET_LIMIT(desc); + ip->seg_32bit = GET_32BIT(desc); + ip->contents = GET_CONTENTS(desc); + ip->read_exec_only = !GET_WRITABLE(desc); + ip->limit_in_pages = GET_LIMIT_PAGES(desc); + ip->seg_not_present = !GET_PRESENT(desc); + ip->useable = GET_USEABLE(desc); + + if (kbuf) + ++ip; + else { + if (__copy_to_user(ubuf, &info, sizeof(info))) + return -EFAULT; + ubuf += sizeof(info); + } + } return 0; } @@ -324,303 +615,115 @@ ptrace_get_thread_area(struct task_struc * Perform set_thread_area on behalf of the traced child. */ static int -ptrace_set_thread_area(struct task_struct *child, - int idx, struct user_desc __user *user_desc) +tls_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { struct user_desc info; struct desc_struct *desc; - - if (copy_from_user(&info, user_desc, sizeof(info))) - return -EFAULT; - - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - - desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; - if (LDT_empty(&info)) { - desc->a = 0; - desc->b = 0; - } else { - desc->a = LDT_entry_a(&info); - desc->b = LDT_entry_b(&info); + struct desc_struct newtls[GDT_ENTRY_TLS_ENTRIES]; + unsigned int i; + int cpu; + + pos /= sizeof(struct user_desc); + count /= sizeof(struct user_desc); + + desc = newtls; + for (i = 0; i < count; ++i, ++desc) { + const struct user_desc *ip; + if (kbuf) { + ip = kbuf; + kbuf += sizeof(struct user_desc); + } + else { + ip = &info; + if (__copy_from_user(&info, ubuf, sizeof(info))) + return -EFAULT; + ubuf += sizeof(struct user_desc); + } + + if (LDT_empty(ip)) { + desc->a = 0; + desc->b = 0; + } else { + desc->a = LDT_entry_a(ip); + desc->b = LDT_entry_b(ip); + } } + /* + * We must not get preempted while modifying the TLS. + */ + cpu = get_cpu(); + memcpy(&target->thread.tls_array[pos], newtls, + count * sizeof(newtls[0])); + if (target == current) + load_TLS(&target->thread, cpu); + put_cpu(); + return 0; } -long arch_ptrace(struct task_struct *child, long request, long addr, long data) -{ - struct user * dummy = NULL; - int i, ret; - unsigned long __user *datap = (unsigned long __user *)data; - - switch (request) { - /* when I and D space are separate, these will need to be fixed. */ - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned long tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - ret = put_user(tmp, datap); - break; - } - - /* read the word at location addr in the USER area. */ - case PTRACE_PEEKUSR: { - unsigned long tmp; - - ret = -EIO; - if ((addr & 3) || addr < 0 || - addr > sizeof(struct user) - 3) - break; - - tmp = 0; /* Default return condition */ - if(addr < FRAME_SIZE*sizeof(long)) - tmp = getreg(child, addr); - if(addr >= (long) &dummy->u_debugreg[0] && - addr <= (long) &dummy->u_debugreg[7]){ - addr -= (long) &dummy->u_debugreg[0]; - addr = addr >> 2; - tmp = child->thread.debugreg[addr]; - } - ret = put_user(tmp, datap); - break; - } - - /* when I and D space are separate, this will have to be fixed. */ - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) - break; - ret = -EIO; - break; - - case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ - ret = -EIO; - if ((addr & 3) || addr < 0 || - addr > sizeof(struct user) - 3) - break; - - if (addr < FRAME_SIZE*sizeof(long)) { - ret = putreg(child, addr, data); - break; - } - /* We need to be very careful here. We implicitly - want to modify a portion of the task_struct, and we - have to be selective about what portions we allow someone - to modify. */ - - ret = -EIO; - if(addr >= (long) &dummy->u_debugreg[0] && - addr <= (long) &dummy->u_debugreg[7]){ - - if(addr == (long) &dummy->u_debugreg[4]) break; - if(addr == (long) &dummy->u_debugreg[5]) break; - if(addr < (long) &dummy->u_debugreg[4] && - ((unsigned long) data) >= TASK_SIZE-3) break; - - /* Sanity-check data. Take one half-byte at once with - * check = (val >> (16 + 4*i)) & 0xf. It contains the - * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits - * 2 and 3 are LENi. Given a list of invalid values, - * we do mask |= 1 << invalid_value, so that - * (mask >> check) & 1 is a correct test for invalid - * values. - * - * R/Wi contains the type of the breakpoint / - * watchpoint, LENi contains the length of the watched - * data in the watchpoint case. - * - * The invalid values are: - * - LENi == 0x10 (undefined), so mask |= 0x0f00. - * - R/Wi == 0x10 (break on I/O reads or writes), so - * mask |= 0x4444. - * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= - * 0x1110. - * - * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. - * - * See the Intel Manual "System Programming Guide", - * 15.2.4 - * - * Note that LENi == 0x10 is defined on x86_64 in long - * mode (i.e. even for 32-bit userspace software, but - * 64-bit kernel), so the x86_64 mask value is 0x5454. - * See the AMD manual no. 24593 (AMD64 System - * Programming)*/ - - if(addr == (long) &dummy->u_debugreg[7]) { - data &= ~DR_CONTROL_RESERVED; - for(i=0; i<4; i++) - if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1) - goto out_tsk; - if (data) - set_tsk_thread_flag(child, TIF_DEBUG); - else - clear_tsk_thread_flag(child, TIF_DEBUG); - } - addr -= (long) &dummy->u_debugreg; - addr = addr >> 2; - child->thread.debugreg[addr] = data; - ret = 0; - } - break; - - case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */ - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: /* restart after signal. */ - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSEMU) { - //set_tsk_thread_flag(child, TIF_SYSCALL_EMU); - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - } else if (request == PTRACE_SYSCALL) { - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - //clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); - } else { - //clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - } - child->exit_code = data; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - ret = 0; - break; /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. + * Determine how many TLS slots are in use. */ - case PTRACE_KILL: - ret = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - child->exit_code = SIGKILL; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - break; - - case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */ - case PTRACE_SINGLESTEP: /* set the trap flag. */ - ret = -EIO; - if (!valid_signal(data)) - break; - - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - tracehook_enable_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - - case PTRACE_DETACH: - /* detach a process that was attached. */ - ret = ptrace_detach(child, data); - break; - - case PTRACE_GETREGS: { /* Get all gp regs from the child. */ - if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) { - ret = -EIO; - break; - } - for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) { - __put_user(getreg(child, i), datap); - datap++; - } - ret = 0; - break; - } - - case PTRACE_SETREGS: { /* Set all gp regs in the child. */ - unsigned long tmp; - if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) { - ret = -EIO; - break; - } - for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) { - __get_user(tmp, datap); - putreg(child, i, tmp); - datap++; - } - ret = 0; - break; - } - - case PTRACE_GETFPREGS: { /* Get the child FPU state. */ - if (!access_ok(VERIFY_WRITE, datap, - sizeof(struct user_i387_struct))) { - ret = -EIO; +static int +tls_active(struct task_struct *target, const struct utrace_regset *regset) +{ + int i; + for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) { + struct desc_struct *desc = &target->thread.tls_array[i - 1]; + if ((desc->a | desc->b) != 0) break; - } - ret = 0; - if (!tsk_used_math(child)) - init_fpu(child); - get_fpregs((struct user_i387_struct __user *)data, child); - break; } + return i; +} - case PTRACE_SETFPREGS: { /* Set the child FPU state. */ - if (!access_ok(VERIFY_READ, datap, - sizeof(struct user_i387_struct))) { - ret = -EIO; - break; - } - set_stopped_child_used_math(child); - set_fpregs(child, (struct user_i387_struct __user *)data); - ret = 0; - break; - } - case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */ - if (!access_ok(VERIFY_WRITE, datap, - sizeof(struct user_fxsr_struct))) { - ret = -EIO; - break; - } - if (!tsk_used_math(child)) - init_fpu(child); - ret = get_fpxregs((struct user_fxsr_struct __user *)data, child); - break; - } +/* + * These are our native regset flavors. + * XXX ioperm? vm86? + */ +static const struct utrace_regset native_regsets[] = { + { + .n = FRAME_SIZE, .size = sizeof(long), .align = sizeof(long), + .get = genregs_get, .set = genregs_set + }, + { + .n = sizeof(struct user_i387_struct) / sizeof(long), + .size = sizeof(long), .align = sizeof(long), + .active = fpregs_active, + .get = fpregs_get, .set = fpregs_set + }, + { + .n = sizeof(struct user_fxsr_struct) / sizeof(long), + .size = sizeof(long), .align = sizeof(long), + .active = fpxregs_active, + .get = fpxregs_get, .set = fpxregs_set + }, + { + .n = GDT_ENTRY_TLS_ENTRIES, + .bias = GDT_ENTRY_TLS_MIN, + .size = sizeof(struct user_desc), + .align = sizeof(struct user_desc), + .active = tls_active, .get = tls_get, .set = tls_set + }, + { + .n = 8, .size = sizeof(long), .align = sizeof(long), + .active = dbregs_active, + .get = dbregs_get, .set = dbregs_set + }, +}; + + +const struct utrace_regset_view utrace_i386_native = { + .name = "i386", .e_machine = EM_386, + .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) +}; +EXPORT_SYMBOL_GPL(utrace_i386_native); - case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */ - if (!access_ok(VERIFY_READ, datap, - sizeof(struct user_fxsr_struct))) { - ret = -EIO; - break; - } - set_stopped_child_used_math(child); - ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data); - break; - } - - case PTRACE_GET_THREAD_AREA: - ret = ptrace_get_thread_area(child, addr, - (struct user_desc __user *) data); - break; - - case PTRACE_SET_THREAD_AREA: - ret = ptrace_set_thread_area(child, addr, - (struct user_desc __user *) data); - break; - - default: - ret = ptrace_request(child, request, addr, data); - break; - } - out_tsk: - return ret; -} void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) { --- linux-2.6/arch/powerpc/kernel/ptrace-common.h +++ linux-2.6/arch/powerpc/kernel/ptrace-common.h @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2002 Stephen Rothwell, IBM Coproration - * Extracted from ptrace.c and ptrace32.c - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file README.legal in the main directory of - * this archive for more details. - */ - -#ifndef _PPC64_PTRACE_COMMON_H -#define _PPC64_PTRACE_COMMON_H - -#include - -/* - * Set of msr bits that gdb can change on behalf of a process. - */ -#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1) - -/* - * Get contents of register REGNO in task TASK. - */ -static inline unsigned long get_reg(struct task_struct *task, int regno) -{ - unsigned long tmp = 0; - - /* - * Put the correct FP bits in, they might be wrong as a result - * of our lazy FP restore. - */ - if (regno == PT_MSR) { - tmp = ((unsigned long *)task->thread.regs)[PT_MSR]; - tmp |= task->thread.fpexc_mode; - } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) { - tmp = ((unsigned long *)task->thread.regs)[regno]; - } - - return tmp; -} - -/* - * Write contents of register REGNO in task TASK. - */ -static inline int put_reg(struct task_struct *task, int regno, - unsigned long data) -{ - if (regno < PT_SOFTE) { - if (regno == PT_MSR) - data = (data & MSR_DEBUGCHANGE) - | (task->thread.regs->msr & ~MSR_DEBUGCHANGE); - ((unsigned long *)task->thread.regs)[regno] = data; - return 0; - } - return -EIO; -} - -#ifdef CONFIG_ALTIVEC -/* - * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. - * The transfer totals 34 quadword. Quadwords 0-31 contain the - * corresponding vector registers. Quadword 32 contains the vscr as the - * last word (offset 12) within that quadword. Quadword 33 contains the - * vrsave as the first word (offset 0) within the quadword. - * - * This definition of the VMX state is compatible with the current PPC32 - * ptrace interface. This allows signal handling and ptrace to use the - * same structures. This also simplifies the implementation of a bi-arch - * (combined (32- and 64-bit) gdb. - */ - -/* - * Get contents of AltiVec register state in task TASK - */ -static inline int get_vrregs(unsigned long __user *data, - struct task_struct *task) -{ - unsigned long regsize; - - /* copy AltiVec registers VR[0] .. VR[31] */ - regsize = 32 * sizeof(vector128); - if (copy_to_user(data, task->thread.vr, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VSCR */ - regsize = 1 * sizeof(vector128); - if (copy_to_user(data, &task->thread.vscr, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VRSAVE */ - if (put_user(task->thread.vrsave, (u32 __user *)data)) - return -EFAULT; - - return 0; -} - -/* - * Write contents of AltiVec register state into task TASK. - */ -static inline int set_vrregs(struct task_struct *task, - unsigned long __user *data) -{ - unsigned long regsize; - - /* copy AltiVec registers VR[0] .. VR[31] */ - regsize = 32 * sizeof(vector128); - if (copy_from_user(task->thread.vr, data, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VSCR */ - regsize = 1 * sizeof(vector128); - if (copy_from_user(&task->thread.vscr, data, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VRSAVE */ - if (get_user(task->thread.vrsave, (u32 __user *)data)) - return -EFAULT; - - return 0; -} -#endif - -static inline int ptrace_set_debugreg(struct task_struct *task, - unsigned long addr, unsigned long data) -{ - /* We only support one DABR and no IABRS at the moment */ - if (addr > 0) - return -EINVAL; - - /* The bottom 3 bits are flags */ - if ((data & ~0x7UL) >= TASK_SIZE) - return -EIO; - - /* Ensure translation is on */ - if (data && !(data & DABR_TRANSLATION)) - return -EIO; - - task->thread.dabr = data; - return 0; -} - -#endif /* _PPC64_PTRACE_COMMON_H */ --- linux-2.6/arch/powerpc/kernel/Makefile +++ linux-2.6/arch/powerpc/kernel/Makefile @@ -10,12 +10,14 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif +CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' + obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ irq.o align.o signal_32.o pmc.o vdso.o \ init_task.o process.o systbl.o idle.o obj-y += vdso32/ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ - signal_64.o ptrace32.o \ + signal_64.o \ paca.o cpu_setup_ppc970.o \ cpu_setup_pa6t.o \ firmware.o sysfs.o nvram_64.o --- linux-2.6/arch/powerpc/kernel/ptrace32.c +++ linux-2.6/arch/powerpc/kernel/ptrace32.c @@ -1,443 +0,0 @@ -/* - * ptrace for 32-bit processes running on a 64-bit kernel. - * - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Derived from "arch/m68k/kernel/ptrace.c" - * Copyright (C) 1994 by Hamish Macdonald - * Taken from linux/kernel/ptrace.c and modified for M680x0. - * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds - * - * Modified by Cort Dougan (cort@hq.fsmlabs.com) - * and Paul Mackerras (paulus@samba.org). - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file COPYING in the main directory of - * this archive for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "ptrace-common.h" - -/* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. - */ - -long compat_sys_ptrace(int request, int pid, unsigned long addr, - unsigned long data) -{ - struct task_struct *child; - int ret; - - if (request == PTRACE_TRACEME) - return -ENOSYS; - - lock_kernel(); - if (request == PTRACE_TRACEME) { - ret = ptrace_traceme(); - goto out; - } - - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) { - ret = PTR_ERR(child); - goto out; - } - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - goto out_tsk; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out_tsk; - - switch (request) { - /* when I and D space are separate, these will need to be fixed. */ - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned int tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - ret = put_user(tmp, (u32 __user *)data); - break; - } - - /* - * Read 4 bytes of the other process' storage - * data is a pointer specifying where the user wants the - * 4 bytes copied into - * addr is a pointer in the user's storage that contains an 8 byte - * address in the other process of the 4 bytes that is to be read - * (this is run in a 32-bit process looking at a 64-bit process) - * when I and D space are separate, these will need to be fixed. - */ - case PPC_PTRACE_PEEKTEXT_3264: - case PPC_PTRACE_PEEKDATA_3264: { - u32 tmp; - int copied; - u32 __user * addrOthers; - - ret = -EIO; - - /* Get the addr in the other process that we want to read */ - if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) - break; - - copied = access_process_vm(child, (u64)addrOthers, &tmp, - sizeof(tmp), 0); - if (copied != sizeof(tmp)) - break; - ret = put_user(tmp, (u32 __user *)data); - break; - } - - /* Read a register (specified by ADDR) out of the "user area" */ - case PTRACE_PEEKUSR: { - int index; - unsigned long tmp; - - ret = -EIO; - /* convert to index and check */ - index = (unsigned long) addr >> 2; - if ((addr & 3) || (index > PT_FPSCR32)) - break; - - if (index < PT_FPR0) { - tmp = get_reg(child, index); - } else { - flush_fp_to_thread(child); - /* - * the user space code considers the floating point - * to be an array of unsigned int (32 bits) - the - * index passed in is based on this assumption. - */ - tmp = ((unsigned int *)child->thread.fpr)[index - PT_FPR0]; - } - ret = put_user((unsigned int)tmp, (u32 __user *)data); - break; - } - - /* - * Read 4 bytes out of the other process' pt_regs area - * data is a pointer specifying where the user wants the - * 4 bytes copied into - * addr is the offset into the other process' pt_regs structure - * that is to be read - * (this is run in a 32-bit process looking at a 64-bit process) - */ - case PPC_PTRACE_PEEKUSR_3264: { - u32 index; - u32 reg32bits; - u64 tmp; - u32 numReg; - u32 part; - - ret = -EIO; - /* Determine which register the user wants */ - index = (u64)addr >> 2; - numReg = index / 2; - /* Determine which part of the register the user wants */ - if (index % 2) - part = 1; /* want the 2nd half of the register (right-most). */ - else - part = 0; /* want the 1st half of the register (left-most). */ - - /* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */ - if ((addr & 3) || numReg > PT_FPSCR) - break; - - if (numReg >= PT_FPR0) { - flush_fp_to_thread(child); - tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0]; - } else { /* register within PT_REGS struct */ - tmp = get_reg(child, numReg); - } - reg32bits = ((u32*)&tmp)[part]; - ret = put_user(reg32bits, (u32 __user *)data); - break; - } - - /* If I and D space are separate, this will have to be fixed. */ - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: { - unsigned int tmp; - tmp = data; - ret = 0; - if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) - == sizeof(tmp)) - break; - ret = -EIO; - break; - } - - /* - * Write 4 bytes into the other process' storage - * data is the 4 bytes that the user wants written - * addr is a pointer in the user's storage that contains an - * 8 byte address in the other process where the 4 bytes - * that is to be written - * (this is run in a 32-bit process looking at a 64-bit process) - * when I and D space are separate, these will need to be fixed. - */ - case PPC_PTRACE_POKETEXT_3264: - case PPC_PTRACE_POKEDATA_3264: { - u32 tmp = data; - u32 __user * addrOthers; - - /* Get the addr in the other process that we want to write into */ - ret = -EIO; - if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) - break; - ret = 0; - if (access_process_vm(child, (u64)addrOthers, &tmp, - sizeof(tmp), 1) == sizeof(tmp)) - break; - ret = -EIO; - break; - } - - /* write the word at location addr in the USER area */ - case PTRACE_POKEUSR: { - unsigned long index; - - ret = -EIO; - /* convert to index and check */ - index = (unsigned long) addr >> 2; - if ((addr & 3) || (index > PT_FPSCR32)) - break; - - if (index == PT_ORIG_R3) - break; - if (index < PT_FPR0) { - ret = put_reg(child, index, data); - } else { - flush_fp_to_thread(child); - /* - * the user space code considers the floating point - * to be an array of unsigned int (32 bits) - the - * index passed in is based on this assumption. - */ - ((unsigned int *)child->thread.fpr)[index - PT_FPR0] = data; - ret = 0; - } - break; - } - - /* - * Write 4 bytes into the other process' pt_regs area - * data is the 4 bytes that the user wants written - * addr is the offset into the other process' pt_regs structure - * that is to be written into - * (this is run in a 32-bit process looking at a 64-bit process) - */ - case PPC_PTRACE_POKEUSR_3264: { - u32 index; - u32 numReg; - - ret = -EIO; - /* Determine which register the user wants */ - index = (u64)addr >> 2; - numReg = index / 2; - /* - * Validate the input - check to see if address is on the - * wrong boundary or beyond the end of the user area - */ - if ((addr & 3) || (numReg > PT_FPSCR)) - break; - /* Insure it is a register we let them change */ - if ((numReg == PT_ORIG_R3) - || ((numReg > PT_CCR) && (numReg < PT_FPR0))) - break; - if (numReg >= PT_FPR0) { - flush_fp_to_thread(child); - } - if (numReg == PT_MSR) - data = (data & MSR_DEBUGCHANGE) - | (child->thread.regs->msr & ~MSR_DEBUGCHANGE); - ((u32*)child->thread.regs)[index] = data; - ret = 0; - break; - } - - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: { /* restart after signal. */ - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - ret = 0; - break; - } - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: { - ret = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - child->exit_code = SIGKILL; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - break; - } - - case PTRACE_SINGLESTEP: { /* set the trap flag. */ - ret = -EIO; - if (!valid_signal(data)) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - tracehook_enable_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - } - - case PTRACE_GET_DEBUGREG: { - ret = -EINVAL; - /* We only support one DABR and no IABRS at the moment */ - if (addr > 0) - break; - ret = put_user(child->thread.dabr, (u32 __user *)data); - break; - } - - case PTRACE_SET_DEBUGREG: - ret = ptrace_set_debugreg(child, addr, data); - break; - - case PTRACE_DETACH: - ret = ptrace_detach(child, data); - break; - - case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; - unsigned int __user *tmp = (unsigned int __user *)addr; - - for (i = 0; i < 32; i++) { - ret = put_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; - } - break; - } - - case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; - unsigned int __user *tmp = (unsigned int __user *)addr; - - for (i = 0; i < 32; i++) { - ret = get_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; - } - break; - } - - case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; - unsigned int __user *tmp = (unsigned int __user *)addr; - - flush_fp_to_thread(child); - - for (i = 0; i < 32; i++) { - ret = put_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; - } - break; - } - - case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; - unsigned int __user *tmp = (unsigned int __user *)addr; - - flush_fp_to_thread(child); - - for (i = 0; i < 32; i++) { - ret = get_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; - } - break; - } - -#if 0 /* XXX */ - case PTRACE_GETEVENTMSG: - ret = put_user(child->ptrace_message, (unsigned int __user *) data); - break; - -#ifdef CONFIG_ALTIVEC - case PTRACE_GETVRREGS: - /* Get the child altivec register state. */ - flush_altivec_to_thread(child); - ret = get_vrregs((unsigned long __user *)data, child); - break; - - case PTRACE_SETVRREGS: - /* Set the child altivec register state. */ - flush_altivec_to_thread(child); - ret = set_vrregs(child, (unsigned long __user *)data); - break; -#endif -#endif - - default: - ret = ptrace_request(child, request, addr, data); - break; - } -out_tsk: - put_task_struct(child); -out: - unlock_kernel(); - return ret; -} --- linux-2.6/arch/powerpc/kernel/ptrace.c +++ linux-2.6/arch/powerpc/kernel/ptrace.c @@ -28,9 +28,8 @@ #include #include #include -#ifdef CONFIG_PPC32 +#include #include -#endif #include #include @@ -38,114 +37,224 @@ #include #include -#ifdef CONFIG_PPC64 -#include "ptrace-common.h" -#endif - -#ifdef CONFIG_PPC32 /* * Set of msr bits that gdb can change on behalf of a process. */ -#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) +#ifdef CONFIG_PPC64 +#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1) +#elif defined(CONFIG_40x) || defined(CONFIG_BOOKE) #define MSR_DEBUGCHANGE 0 -#else +#else /* CONFIG_PPC32 */ #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) -#endif -#endif /* CONFIG_PPC32 */ +#endif /* CONFIG_PPC64 */ /* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. + * Last register that can be changed via ptrace. */ +#ifdef CONFIG_PPC64 +#define PT_LAST PT_SOFTE +#else +#define PT_LAST PT_MQ +#endif + +static int +genregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (target->thread.regs == NULL) + return -EIO; #ifdef CONFIG_PPC32 -/* - * Get contents of register REGNO in task TASK. - */ -static inline unsigned long get_reg(struct task_struct *task, int regno) + CHECK_FULL_REGS(target->thread.regs); +#endif + + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + target->thread.regs, 0, -1); +} + +static int +genregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - if (regno < sizeof(struct pt_regs) / sizeof(unsigned long) - && task->thread.regs != NULL) - return ((unsigned long *)task->thread.regs)[regno]; - return (0); + unsigned long msr_save; + int ret = 0; + + if (target->thread.regs == NULL) + return -EIO; + +#ifdef CONFIG_PPC32 + CHECK_FULL_REGS(target->thread.regs); +#endif + + /* + * Just ignore attempts to set the registers beyond PT_LAST. + * They are read-only. + */ + + msr_save = target->thread.regs->msr &~ MSR_DEBUGCHANGE; + + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + target->thread.regs, 0, + (PT_LAST + 1) * sizeof(long)); + + target->thread.regs->msr &= MSR_DEBUGCHANGE; + target->thread.regs->msr |= msr_save; + + return ret; } -/* - * Write contents of register REGNO in task TASK. - */ -static inline int put_reg(struct task_struct *task, int regno, - unsigned long data) +static int +fpregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) { - if (regno <= PT_MQ && task->thread.regs != NULL) { - if (regno == PT_MSR) - data = (data & MSR_DEBUGCHANGE) - | (task->thread.regs->msr & ~MSR_DEBUGCHANGE); - ((unsigned long *)task->thread.regs)[regno] = data; - return 0; - } - return -EIO; + BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) + != offsetof(struct thread_struct, fpr[32])); + + flush_fp_to_thread(target); + + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpr, 0, -1); +} + +static int +fpregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpr, 0, -1); } #ifdef CONFIG_ALTIVEC /* - * Get contents of AltiVec register state in task TASK + * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. + * The transfer totals 34 quadword. Quadwords 0-31 contain the + * corresponding vector registers. Quadword 32 contains the vscr as the + * last word (offset 12) within that quadword. Quadword 33 contains the + * vrsave as the first word (offset 0) within the quadword. + * + * This definition of the VMX state is compatible with the current PPC32 + * ptrace interface. This allows signal handling and ptrace to use the + * same structures. This also simplifies the implementation of a bi-arch + * (combined (32- and 64-bit) gdb. */ -static inline int get_vrregs(unsigned long __user *data, struct task_struct *task) + +static int +vrregs_active(struct task_struct *target, const struct utrace_regset *regset) { - int i, j; + flush_altivec_to_thread(target); + return target->thread.used_vr ? regset->n : 0; +} + +static int +vrregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + BUILD_BUG_ON(offsetof(struct thread_struct, vscr) + != offsetof(struct thread_struct, vr[32])); + BUILD_BUG_ON(offsetof(struct thread_struct, vscr) + sizeof(vector128) + != offsetof(struct thread_struct, vrsave)); - if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long))) - return -EFAULT; + flush_altivec_to_thread(target); - /* copy AltiVec registers VR[0] .. VR[31] */ - for (i = 0; i < 32; i++) - for (j = 0; j < 4; j++, data++) - if (__put_user(task->thread.vr[i].u[j], data)) - return -EFAULT; + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.vr, 0, -1); +} - /* copy VSCR */ - for (i = 0; i < 4; i++, data++) - if (__put_user(task->thread.vscr.u[i], data)) - return -EFAULT; - - /* copy VRSAVE */ - if (__put_user(task->thread.vrsave, data)) - return -EFAULT; +static int +vrregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + flush_altivec_to_thread(target); + return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.vr, 0, -1); +} +#endif /* CONFIG_ALTIVEC */ + +#ifdef CONFIG_PPC64 +/* We only support one DABR and no IABRS at the moment */ + +static int +set_thread_dabr(struct task_struct *tsk, unsigned long dabr) +{ + /* The bottom 3 bits are flags */ + if ((dabr & ~0x7UL) >= TASK_SIZE) + return -EIO; + + /* Ensure translation is on */ + if (dabr && !(dabr & DABR_TRANSLATION)) + return -EIO; + + tsk->thread.dabr = dabr; return 0; } -/* - * Write contents of AltiVec register state into task TASK. - */ -static inline int set_vrregs(struct task_struct *task, unsigned long __user *data) +static int +debugreg_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) { - int i, j; + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.dabr, 0, -1); +} - if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long))) - return -EFAULT; +static int +debugreg_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long dabr; + int ret; - /* copy AltiVec registers VR[0] .. VR[31] */ - for (i = 0; i < 32; i++) - for (j = 0; j < 4; j++, data++) - if (__get_user(task->thread.vr[i].u[j], data)) - return -EFAULT; + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, &dabr, 0, -1); + if (ret == 0) + ret = set_thread_dabr(target, dabr); - /* copy VSCR */ - for (i = 0; i < 4; i++, data++) - if (__get_user(task->thread.vscr.u[i], data)) - return -EFAULT; - - /* copy VRSAVE */ - if (__get_user(task->thread.vrsave, data)) - return -EFAULT; + return ret; +} - return 0; +static int +ppc32_dabr_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + u32 dabr = target->thread.dabr; + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, &dabr, 0, -1); } -#endif -#ifdef CONFIG_SPE +static int +ppc32_dabr_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + u32 dabr; + int ret; + + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, &dabr, 0, -1); + if (ret == 0) + ret = set_thread_dabr(target, dabr); + + return ret; +} +#endif /* CONFIG_PPC64 */ +#ifdef CONFIG_SPE /* * For get_evrregs/set_evrregs functions 'data' has the following layout: * @@ -156,327 +265,207 @@ static inline int set_vrregs(struct task * } */ -/* - * Get contents of SPE register state in task TASK. - */ -static inline int get_evrregs(unsigned long *data, struct task_struct *task) +static int +evrregs_active(struct task_struct *target, const struct utrace_regset *regset) { - int i; - - if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long))) - return -EFAULT; - - /* copy SPEFSCR */ - if (__put_user(task->thread.spefscr, &data[34])) - return -EFAULT; + if (target->thread.regs->msr & MSR_SPE) + giveup_spe(target); + return target->thread.used_spe ? regset->n : 0; +} - /* copy SPE registers EVR[0] .. EVR[31] */ - for (i = 0; i < 32; i++, data++) - if (__put_user(task->thread.evr[i], data)) - return -EFAULT; +static int +evrregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + BUILD_BUG_ON(offsetof(struct thread_struct, acc) + != offsetof(struct thread_struct, evr[32])); + BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) + != offsetof(struct thread_struct, spefscr)); - /* copy ACC */ - if (__put_user64(task->thread.acc, (unsigned long long *)data)) - return -EFAULT; + if (target->thread.regs->msr & MSR_SPE) + giveup_spe(target); - return 0; + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.evr, 0, -1); } -/* - * Write contents of SPE register state into task TASK. - */ -static inline int set_evrregs(struct task_struct *task, unsigned long *data) +static int +evrregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - int i; - - if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long))) - return -EFAULT; + /* this is to clear the MSR_SPE bit to force a reload + * of register state from memory */ + if (target->thread.regs->msr & MSR_SPE) + giveup_spe(target); - /* copy SPEFSCR */ - if (__get_user(task->thread.spefscr, &data[34])) - return -EFAULT; - - /* copy SPE registers EVR[0] .. EVR[31] */ - for (i = 0; i < 32; i++, data++) - if (__get_user(task->thread.evr[i], data)) - return -EFAULT; - /* copy ACC */ - if (__get_user64(task->thread.acc, (unsigned long long*)data)) - return -EFAULT; - - return 0; + return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.evr, 0, -1); } #endif /* CONFIG_SPE */ -#endif /* CONFIG_PPC32 */ + /* - * Called by kernel/ptrace.c when detaching.. - * - * Make sure single step bits etc are not set. + * These are our native regset flavors. */ -void ptrace_disable(struct task_struct *child) -{ - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); -} - -long arch_ptrace(struct task_struct *child, long request, long addr, long data) -{ - int ret = -EPERM; - - switch (request) { - /* when I and D space are separate, these will need to be fixed. */ - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned long tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - ret = put_user(tmp,(unsigned long __user *) data); - break; - } - - /* read the word at location addr in the USER area. */ - case PTRACE_PEEKUSR: { - unsigned long index, tmp; - - ret = -EIO; - /* convert to index and check */ -#ifdef CONFIG_PPC32 - index = (unsigned long) addr >> 2; - if ((addr & 3) || (index > PT_FPSCR) - || (child->thread.regs == NULL)) -#else - index = (unsigned long) addr >> 3; - if ((addr & 7) || (index > PT_FPSCR)) +static const struct utrace_regset native_regsets[] = { + { + .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), + .get = genregs_get, .set = genregs_set + }, + { + .n = ELF_NFPREG, + .size = sizeof(double), .align = sizeof(double), + .get = fpregs_get, .set = fpregs_set + }, +#ifdef CONFIG_ALTIVEC + { + .n = 33*4+1, .size = sizeof(u32), .align = sizeof(u32), + .active = vrregs_active, .get = vrregs_get, .set = vrregs_set + }, #endif - break; - -#ifdef CONFIG_PPC32 - CHECK_FULL_REGS(child->thread.regs); +#ifdef CONFIG_SPE + { + .n = 35, .size = sizeof(long), .align = sizeof(long), + .active = evrregs_active, + .get = evrregs_get, .set = evrregs_set + }, #endif - if (index < PT_FPR0) { - tmp = get_reg(child, (int) index); - } else { - flush_fp_to_thread(child); - tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; - } - ret = put_user(tmp,(unsigned long __user *) data); - break; - } +#ifdef CONFIG_PPC64 + { + .n = 1, .size = sizeof(long), .align = sizeof(long), + .get = debugreg_get, .set = debugreg_set + }, +#endif +}; + +const struct utrace_regset_view utrace_ppc_native_view = { + .name = UTS_MACHINE, .e_machine = ELF_ARCH, + .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) +}; +EXPORT_SYMBOL_GPL(utrace_ppc_native_view); - /* If I and D space are separate, this will have to be fixed. */ - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) - == sizeof(data)) - break; - ret = -EIO; - break; - - /* write the word at location addr in the USER area */ - case PTRACE_POKEUSR: { - unsigned long index; - ret = -EIO; - /* convert to index and check */ -#ifdef CONFIG_PPC32 - index = (unsigned long) addr >> 2; - if ((addr & 3) || (index > PT_FPSCR) - || (child->thread.regs == NULL)) -#else - index = (unsigned long) addr >> 3; - if ((addr & 7) || (index > PT_FPSCR)) -#endif - break; +#ifdef CONFIG_PPC64 +#include -#ifdef CONFIG_PPC32 - CHECK_FULL_REGS(child->thread.regs); -#endif - if (index == PT_ORIG_R3) - break; - if (index < PT_FPR0) { - ret = put_reg(child, index, data); - } else { - flush_fp_to_thread(child); - ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; - ret = 0; - } - break; - } +static int +ppc32_gpr_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + unsigned long *regs = (unsigned long *) target->thread.regs; - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: { /* restart after signal. */ - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - ret = 0; - break; - } + if (regs == NULL) + return -EIO; -/* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: { - ret = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - child->exit_code = SIGKILL; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - break; - } + regs += pos / sizeof(u32); - case PTRACE_SINGLESTEP: { /* set the trap flag. */ - ret = -EIO; - if (!valid_signal(data)) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - tracehook_enable_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; + if (kbuf) { + u32 *out = kbuf; + for (; count > 0; count -= sizeof(u32)) + *out++ = *regs++; + } + else { + u32 __user *out = ubuf; + for (; count > 0; count -= sizeof(u32)) + if (put_user((u32) *regs++, out++)) + return -EFAULT; } -#ifdef CONFIG_PPC64 - case PTRACE_GET_DEBUGREG: { - ret = -EINVAL; - /* We only support one DABR and no IABRS at the moment */ - if (addr > 0) - break; - ret = put_user(child->thread.dabr, - (unsigned long __user *)data); - break; - } + return 0; +} - case PTRACE_SET_DEBUGREG: - ret = ptrace_set_debugreg(child, addr, data); - break; -#endif +static int +ppc32_gpr_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned long *regs = (unsigned long *) target->thread.regs; - case PTRACE_DETACH: - ret = ptrace_detach(child, data); - break; - - case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; - unsigned long __user *tmp = (unsigned long __user *)addr; - - for (i = 0; i < 32; i++) { - ret = put_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; - } - break; - } + if (regs == NULL) + return -EIO; - case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; - unsigned long __user *tmp = (unsigned long __user *)addr; - - for (i = 0; i < 32; i++) { - ret = get_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; + /* + * Just ignore attempts to set the registers beyond PT_LAST. + * They are read-only. + */ + if (count > (PT_LAST + 1) * sizeof(u32) - pos) + count = (PT_LAST + 1) * sizeof(u32) - pos; + + pos /= sizeof(u32); + + if (kbuf) { + const u32 *in = kbuf; + for (; count > 0; count -= sizeof(u32), ++pos, ++in) { + if (pos == PT_MSR) + regs[pos] = ((regs[pos] &~ MSR_DEBUGCHANGE) + | (*in & MSR_DEBUGCHANGE)); + else + regs[pos] = *in; } - break; } - - case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; - unsigned long __user *tmp = (unsigned long __user *)addr; - - flush_fp_to_thread(child); - - for (i = 0; i < 32; i++) { - ret = put_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; + else { + const u32 __user *in = kbuf; + for (; count > 0; count -= sizeof(u32), ++pos) { + u32 val; + if (get_user(val, in++)) + return -EFAULT; + else if (pos == PT_MSR) + regs[pos] = ((regs[pos] &~ MSR_DEBUGCHANGE) + | (val & MSR_DEBUGCHANGE)); + else + regs[pos] = val; } - break; } - case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */ - int i; - unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; - unsigned long __user *tmp = (unsigned long __user *)addr; - - flush_fp_to_thread(child); - - for (i = 0; i < 32; i++) { - ret = get_user(*reg, tmp); - if (ret) - break; - reg++; - tmp++; - } - break; - } + return 0; +} +/* + * These are the regset flavors matching the CONFIG_PPC32 native set. + */ +static const struct utrace_regset ppc32_regsets[] = { + { + .n = ELF_NGREG, + .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), + .get = ppc32_gpr_get, .set = ppc32_gpr_set + }, + { + .n = ELF_NFPREG, + .size = sizeof(double), .align = sizeof(double), + .get = fpregs_get, .set = fpregs_set + }, #ifdef CONFIG_ALTIVEC - case PTRACE_GETVRREGS: - /* Get the child altivec register state. */ - flush_altivec_to_thread(child); - ret = get_vrregs((unsigned long __user *)data, child); - break; - - case PTRACE_SETVRREGS: - /* Set the child altivec register state. */ - flush_altivec_to_thread(child); - ret = set_vrregs(child, (unsigned long __user *)data); - break; -#endif -#ifdef CONFIG_SPE - case PTRACE_GETEVRREGS: - /* Get the child spe register state. */ - if (child->thread.regs->msr & MSR_SPE) - giveup_spe(child); - ret = get_evrregs((unsigned long __user *)data, child); - break; - - case PTRACE_SETEVRREGS: - /* Set the child spe register state. */ - /* this is to clear the MSR_SPE bit to force a reload - * of register state from memory */ - if (child->thread.regs->msr & MSR_SPE) - giveup_spe(child); - ret = set_evrregs(child, (unsigned long __user *)data); - break; -#endif - - default: - ret = ptrace_request(child, request, addr, data); - break; - } + { + .n = 33*4+1, .size = sizeof(u32), .align = sizeof(u32), + .active = vrregs_active, .get = vrregs_get, .set = vrregs_set + }, +#endif + { + .n = 1, + .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), + .get = ppc32_dabr_get, .set = ppc32_dabr_set + }, +}; + +const struct utrace_regset_view utrace_ppc32_view = { + .name = "ppc", .e_machine = EM_PPC, + .regsets = ppc32_regsets, .n = ARRAY_SIZE(ppc32_regsets) +}; +EXPORT_SYMBOL_GPL(utrace_ppc32_view); - return ret; +long compat_sys_ptrace(int request, int pid, unsigned long addr, + unsigned long data) +{ + return -ENOSYS; } +#endif void do_syscall_trace_enter(struct pt_regs *regs) { --- linux-2.6/arch/x86_64/kernel/ptrace.c +++ linux-2.6/arch/x86_64/kernel/ptrace.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include /* * does not yet catch signals sent when the child dies. @@ -269,6 +271,7 @@ static int putreg(struct task_struct *ch tmp = get_stack_long(child, EFL_OFFSET); tmp &= ~FLAG_MASK; value |= tmp; + clear_tsk_thread_flag(child, TIF_FORCED_TF); break; case offsetof(struct user_regs_struct,cs): if ((value & 3) != 3) @@ -301,280 +304,382 @@ static unsigned long getreg(struct task_ val = get_stack_long(child, regno); if (test_tsk_thread_flag(child, TIF_IA32)) val &= 0xffffffff; + if (regno == (offsetof(struct user_regs_struct, eflags) + - sizeof(struct pt_regs)) + && test_tsk_thread_flag(child, TIF_FORCED_TF)) + val &= ~X86_EFLAGS_TF; return val; } } -long arch_ptrace(struct task_struct *child, long request, long addr, long data) +static int +genregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (kbuf) { + unsigned long *kp = kbuf; + while (count > 0) { + *kp++ = getreg(target, pos); + pos += sizeof(long); + count -= sizeof(long); + } + } + else { + unsigned long __user *up = ubuf; + while (count > 0) { + if (__put_user(getreg(target, pos), up++)) + return -EFAULT; + pos += sizeof(long); + count -= sizeof(long); + } + } + + return 0; +} + +static int +genregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret = 0; + + if (kbuf) { + const unsigned long *kp = kbuf; + while (!ret && count > 0) { + ret = putreg(target, pos, *kp++); + pos += sizeof(long); + count -= sizeof(long); + } + } + else { + int ret = 0; + const unsigned long __user *up = ubuf; + while (!ret && count > 0) { + unsigned long val; + ret = __get_user(val, up++); + if (!ret) + ret = putreg(target, pos, val); + pos += sizeof(long); + count -= sizeof(long); + } + } + + return ret; +} + + +static int +dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset) { - long i, ret; - unsigned ui; + if (tsk->thread.debugreg6 | tsk->thread.debugreg7) + return 8; + return 0; +} - switch (request) { - /* when I and D space are separate, these will need to be fixed. */ - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned long tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - ret = put_user(tmp,(unsigned long __user *) data); - break; +static int +dbregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) { + unsigned long val; + + /* + * The hardware updates the status register on a debug trap, + * but do_debug (traps.c) saves it for us when that happens. + * So whether the target is current or not, debugregN is good. + */ + val = 0; + switch (pos) { + case 0: val = target->thread.debugreg0; break; + case 1: val = target->thread.debugreg1; break; + case 2: val = target->thread.debugreg2; break; + case 3: val = target->thread.debugreg3; break; + case 6: val = target->thread.debugreg6; break; + case 7: val = target->thread.debugreg7; break; + } + + if (kbuf) { + *(unsigned long *) kbuf = val; + kbuf += sizeof(unsigned long); + } + else { + if (__put_user(val, (unsigned long __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(unsigned long); + } } - /* read the word at location addr in the USER area. */ - case PTRACE_PEEKUSR: { - unsigned long tmp; + return 0; +} - ret = -EIO; - if ((addr & 7) || - addr > sizeof(struct user) - 7) - break; +static int +dbregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + + unsigned long maxaddr = TASK_SIZE_OF(target); + maxaddr -= test_tsk_thread_flag(target, TIF_IA32) ? 3 : 7; + + for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) { + unsigned long val; + unsigned int i; + + if (kbuf) { + val = *(const unsigned long *) kbuf; + kbuf += sizeof(unsigned long); + } + else { + if (__get_user(val, (unsigned long __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(unsigned long); + } - switch (addr) { - case 0 ... sizeof(struct user_regs_struct) - sizeof(long): - tmp = getreg(child, addr); - break; - case offsetof(struct user, u_debugreg[0]): - tmp = child->thread.debugreg0; + switch (pos) { +#define SET_DBREG(n) \ + target->thread.debugreg##n = val; \ + if (target == current) \ + set_debugreg(target->thread.debugreg##n, n) + + case 0: + if (val >= maxaddr) + return -EIO; + SET_DBREG(0); break; - case offsetof(struct user, u_debugreg[1]): - tmp = child->thread.debugreg1; + case 1: + if (val >= maxaddr) + return -EIO; + SET_DBREG(1); break; - case offsetof(struct user, u_debugreg[2]): - tmp = child->thread.debugreg2; + case 2: + if (val >= maxaddr) + return -EIO; + SET_DBREG(2); break; - case offsetof(struct user, u_debugreg[3]): - tmp = child->thread.debugreg3; + case 3: + if (val >= maxaddr) + return -EIO; + SET_DBREG(3); break; - case offsetof(struct user, u_debugreg[6]): - tmp = child->thread.debugreg6; + case 4: + case 5: + if (val != 0) + return -EIO; break; - case offsetof(struct user, u_debugreg[7]): - tmp = child->thread.debugreg7; + case 6: + if (val >> 32) + return -EIO; + SET_DBREG(6); break; - default: - tmp = 0; + case 7: + /* + * See arch/i386/kernel/ptrace.c for an explanation + * of this awkward check. + */ + val &= ~DR_CONTROL_RESERVED; + for (i = 0; i < 4; i++) + if ((0x5554 >> ((val >> (16 + 4*i)) & 0xf)) + & 1) + return -EIO; + if (val) + set_tsk_thread_flag(target, TIF_DEBUG); + else + clear_tsk_thread_flag(target, TIF_DEBUG); + SET_DBREG(7); break; +#undef SET_DBREG } - ret = put_user(tmp,(unsigned long __user *) data); - break; } - /* when I and D space are separate, this will have to be fixed. */ - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) - break; - ret = -EIO; - break; + return 0; +} - case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ - { - int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7; - ret = -EIO; - if ((addr & 7) || - addr > sizeof(struct user) - 7) - break; - switch (addr) { - case 0 ... sizeof(struct user_regs_struct) - sizeof(long): - ret = putreg(child, addr, data); - break; - /* Disallows to set a breakpoint into the vsyscall */ - case offsetof(struct user, u_debugreg[0]): - if (data >= TASK_SIZE_OF(child) - dsize) break; - child->thread.debugreg0 = data; - ret = 0; - break; - case offsetof(struct user, u_debugreg[1]): - if (data >= TASK_SIZE_OF(child) - dsize) break; - child->thread.debugreg1 = data; - ret = 0; - break; - case offsetof(struct user, u_debugreg[2]): - if (data >= TASK_SIZE_OF(child) - dsize) break; - child->thread.debugreg2 = data; - ret = 0; - break; - case offsetof(struct user, u_debugreg[3]): - if (data >= TASK_SIZE_OF(child) - dsize) break; - child->thread.debugreg3 = data; - ret = 0; - break; - case offsetof(struct user, u_debugreg[6]): - if (data >> 32) - break; - child->thread.debugreg6 = data; - ret = 0; - break; - case offsetof(struct user, u_debugreg[7]): - /* See arch/i386/kernel/ptrace.c for an explanation of - * this awkward check.*/ - data &= ~DR_CONTROL_RESERVED; - for(i=0; i<4; i++) - if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1) - break; - if (i == 4) { - child->thread.debugreg7 = data; - if (data) - set_tsk_thread_flag(child, TIF_DEBUG); - else - clear_tsk_thread_flag(child, TIF_DEBUG); - ret = 0; - } - break; - } - break; +static int +fpregs_active(struct task_struct *target, const struct utrace_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} + +static int +fpregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); } - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: /* restart after signal. */ + else + init_fpu(target); - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child,TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE); - clear_tsk_thread_flag(child, TIF_SINGLESTEP); - child->exit_code = data; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - ret = 0; - break; - -#ifdef CONFIG_IA32_EMULATION - /* This makes only sense with 32bit programs. Allow a - 64bit debugger to fully examine them too. Better - don't use it against 64bit processes, use - PTRACE_ARCH_PRCTL instead. */ - case PTRACE_SET_THREAD_AREA: { - struct user_desc __user *p; - int old; - p = (struct user_desc __user *)data; - get_user(old, &p->entry_number); - put_user(addr, &p->entry_number); - ret = do_set_thread_area(&child->thread, p); - put_user(old, &p->entry_number); - break; - case PTRACE_GET_THREAD_AREA: - p = (struct user_desc __user *)data; - get_user(old, &p->entry_number); - put_user(addr, &p->entry_number); - ret = do_get_thread_area(&child->thread, p); - put_user(old, &p->entry_number); - break; - } -#endif - /* normal 64bit interface to access TLS data. - Works just like arch_prctl, except that the arguments - are reversed. */ - case PTRACE_ARCH_PRCTL: - ret = do_arch_prctl(child, data, addr); - break; + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); +} -/* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - ret = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - clear_tsk_thread_flag(child, TIF_SINGLESTEP); - child->exit_code = SIGKILL; - /* make sure the single step bit is not set. */ - tracehook_disable_single_step(child); - wake_up_process(child); - break; - - case PTRACE_SINGLESTEP: /* set the trap flag. */ - ret = -EIO; - if (!valid_signal(data)) - break; - clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE); - tracehook_enable_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - - case PTRACE_DETACH: - /* detach a process that was attached. */ - ret = ptrace_detach(child, data); - break; - - case PTRACE_GETREGS: { /* Get all gp regs from the child. */ - if (!access_ok(VERIFY_WRITE, (unsigned __user *)data, - sizeof(struct user_regs_struct))) { - ret = -EIO; - break; - } - ret = 0; - for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) { - ret |= __put_user(getreg(child, ui),(unsigned long __user *) data); - data += sizeof(long); - } - break; +static int +fpregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); } + else if (pos == 0 && count == sizeof(struct user_i387_struct)) + set_stopped_child_used_math(target); + else + init_fpu(target); - case PTRACE_SETREGS: { /* Set all gp regs in the child. */ - unsigned long tmp; - if (!access_ok(VERIFY_READ, (unsigned __user *)data, - sizeof(struct user_regs_struct))) { - ret = -EIO; - break; - } - ret = 0; - for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) { - ret = __get_user(tmp, (unsigned long __user *) data); - if (ret) - break; - ret = putreg(child, ui, tmp); - if (ret) - break; - data += sizeof(long); - } - break; - } - - case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */ - if (!access_ok(VERIFY_WRITE, (unsigned __user *)data, - sizeof(struct user_i387_struct))) { - ret = -EIO; - break; + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); + + target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; + + return ret; +} + +static int +fsgs_active(struct task_struct *tsk, const struct utrace_regset *regset) +{ + if (tsk->thread.gsindex == GS_TLS_SEL || tsk->thread.gs) + return 2; + if (tsk->thread.fsindex == FS_TLS_SEL || tsk->thread.fs) + return 1; + return 0; +} + +static inline u32 read_32bit_tls(struct task_struct *t, int tls) +{ + struct desc_struct *desc = (void *)t->thread.tls_array; + desc += tls; + return desc->base0 | + (((u32)desc->base1) << 16) | + (((u32)desc->base2) << 24); +} + +static int +fsgs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const unsigned long *kaddr = kbuf; + const unsigned long __user *uaddr = ubuf; + unsigned long addr; + + /* + * XXX why the MSR reads here? + * Can anything change the MSRs without changing thread.fs first? + */ + if (pos == 0) { /* FS */ + if (kaddr) + addr = *kaddr++; + else if (__get_user(addr, uaddr++)) + return -EFAULT; + if (target->thread.fsindex == FS_TLS_SEL) + addr = read_32bit_tls(target, FS_TLS); + else if (target == current) { + rdmsrl(MSR_FS_BASE, addr); } - ret = get_fpregs((struct user_i387_struct __user *)data, child); - break; + else + addr = target->thread.fs; } - case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */ - if (!access_ok(VERIFY_READ, (unsigned __user *)data, - sizeof(struct user_i387_struct))) { - ret = -EIO; - break; + if (count > sizeof(unsigned long)) { /* GS */ + if (kaddr) + addr = *kaddr; + else if (__get_user(addr, uaddr)) + return -EFAULT; + if (target->thread.fsindex == GS_TLS_SEL) + addr = read_32bit_tls(target, GS_TLS); + else if (target == current) { + rdmsrl(MSR_GS_BASE, addr); } - set_stopped_child_used_math(child); - ret = set_fpregs(child, (struct user_i387_struct __user *)data); - break; + else + addr = target->thread.fs; } - default: - ret = ptrace_request(child, request, addr, data); - break; + return 0; +} + +static int +fsgs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + const unsigned long *kaddr = kbuf; + const unsigned long __user *uaddr = ubuf; + unsigned long addr; + int ret = 0; + + if (pos == 0) { /* FS */ + if (kaddr) + addr = *kaddr++; + else if (__get_user(addr, uaddr++)) + return -EFAULT; + ret = do_arch_prctl(target, ARCH_SET_FS, addr); + } + + if (!ret && count > sizeof(unsigned long)) { /* GS */ + if (kaddr) + addr = *kaddr; + else if (__get_user(addr, uaddr)) + return -EFAULT; + ret = do_arch_prctl(target, ARCH_SET_GS, addr); } + return ret; } + +/* + * These are our native regset flavors. + * XXX ioperm? vm86? + */ +static const struct utrace_regset native_regsets[] = { + { + .n = sizeof(struct user_regs_struct)/8, .size = 8, .align = 8, + .get = genregs_get, .set = genregs_set + }, + { + .n = sizeof(struct user_i387_struct) / sizeof(long), + .size = sizeof(long), .align = sizeof(long), + .active = fpregs_active, + .get = fpregs_get, .set = fpregs_set + }, + { + .n = 2, .size = sizeof(long), .align = sizeof(long), + .active = fsgs_active, + .get = fsgs_get, .set = fsgs_set + }, + { + .n = 8, .size = sizeof(long), .align = sizeof(long), + .active = dbregs_active, + .get = dbregs_get, .set = dbregs_set + }, +}; + +const struct utrace_regset_view utrace_x86_64_native = { + .name = "x86-64", .e_machine = EM_X86_64, + .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) +}; +EXPORT_SYMBOL_GPL(utrace_x86_64_native); + + asmlinkage void syscall_trace_enter(struct pt_regs *regs) { /* do the secure computing check first */ --- linux-2.6/arch/x86_64/ia32/fpu32.c +++ linux-2.6/arch/x86_64/ia32/fpu32.c @@ -9,6 +9,7 @@ #include #include #include +#include static inline unsigned short twd_i387_to_fxsr(unsigned short twd) { @@ -24,7 +25,8 @@ static inline unsigned short twd_i387_to return tmp; } -static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) +static inline unsigned long +twd_fxsr_to_i387(const struct i387_fxsave_struct *fxsave) { struct _fpxreg *st = NULL; unsigned long tos = (fxsave->swd >> 11) & 7; @@ -71,16 +73,11 @@ static inline unsigned long twd_fxsr_to_ } -static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave, - struct _fpstate_ia32 __user *buf) +static inline void +convert_fxsr_env_from_i387(struct i387_fxsave_struct *fxsave, const u32 env[7]) { - struct _fpxreg *to; - struct _fpreg __user *from; - int i; u32 v; - int err = 0; - -#define G(num,val) err |= __get_user(val, num + (u32 __user *)buf) +#define G(num,val) val = env[num] G(0, fxsave->cwd); G(1, fxsave->swd); G(2, fxsave->twd); @@ -91,9 +88,21 @@ static inline int convert_fxsr_from_user G(5, fxsave->rdp); /* 6: ds ignored */ #undef G - if (err) +} + +static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave, + struct _fpstate_ia32 __user *buf) +{ + u32 env[7]; + struct _fpxreg *to; + struct _fpreg __user *from; + int i; + + if (__copy_from_user(env, buf, sizeof(env))) return -1; + convert_fxsr_env_from_i387(fxsave, env); + to = (struct _fpxreg *)&fxsave->st_space[0]; from = &buf->_st[0]; for (i = 0 ; i < 8 ; i++, to++, from++) { @@ -104,16 +113,11 @@ static inline int convert_fxsr_from_user } -static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf, - struct i387_fxsave_struct *fxsave, - struct pt_regs *regs, - struct task_struct *tsk) +static inline void +convert_fxsr_env_to_i387(struct task_struct *tsk, struct pt_regs *regs, + u32 env[7], const struct i387_fxsave_struct *fxsave) { - struct _fpreg __user *to; - struct _fpxreg *from; - int i; u16 cs,ds; - int err = 0; if (tsk == current) { /* should be actually ds/cs at fpu exception time, @@ -125,7 +129,7 @@ static inline int convert_fxsr_to_user(s cs = regs->cs; } -#define P(num,val) err |= __put_user(val, num + (u32 __user *)buf) +#define P(num,val) env[num] = val P(0, (u32)fxsave->cwd | 0xffff0000); P(1, (u32)fxsave->swd | 0xffff0000); P(2, twd_fxsr_to_i387(fxsave)); @@ -134,8 +138,21 @@ static inline int convert_fxsr_to_user(s P(5, fxsave->rdp); P(6, 0xffff0000 | ds); #undef P +} + + +static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf, + struct i387_fxsave_struct *fxsave, + struct pt_regs *regs, + struct task_struct *tsk) +{ + struct _fpreg __user *to; + struct _fpxreg *from; + int i; + u32 env[7]; - if (err) + convert_fxsr_env_to_i387(tsk, regs, env, fxsave); + if (__copy_to_user(buf, env, sizeof(env))) return -1; to = &buf->_st[0]; @@ -181,3 +198,38 @@ int save_i387_ia32(struct task_struct *t sizeof(struct i387_fxsave_struct)); return err ? -1 : 1; } + +int get_fpregs32(struct user_i387_ia32_struct *buf, struct task_struct *tsk) +{ + struct pt_regs *regs = ((struct pt_regs *)tsk->thread.rsp0) - 1; + struct _fpreg *to; + const struct _fpxreg *from; + unsigned int i; + + convert_fxsr_env_to_i387(tsk, regs, + (u32 *) buf, &tsk->thread.i387.fxsave); + + to = (struct _fpreg *) buf->st_space; + from = (const struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0]; + for (i = 0; i < 8; i++, to++, from++) + *to = *(const struct _fpreg *) from; + + return 0; +} + +int +set_fpregs32(struct task_struct *tsk, const struct user_i387_ia32_struct *buf) +{ + struct _fpxreg *to; + const struct _fpreg *from; + unsigned int i; + + convert_fxsr_env_from_i387(&tsk->thread.i387.fxsave, (u32 *) buf); + + to = (struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0]; + from = (const struct _fpreg *) buf->st_space; + for (i = 0; i < 8; i++, to++, from++) + *(struct _fpreg *) to = *from; + + return 0; +} --- linux-2.6/arch/x86_64/ia32/ptrace32.c +++ linux-2.6/arch/x86_64/ia32/ptrace32.c @@ -16,7 +16,11 @@ #include #include #include +#include +#include +#include #include +#include #include #include #include @@ -25,7 +29,8 @@ #include #include #include -#include +#include +#include /* * Determines which flags the user has access to [1 = access, 0 = no access]. @@ -35,34 +40,33 @@ #define FLAG_MASK 0x54dd5UL #define R32(l,q) \ - case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break + case offsetof(struct user_regs_struct32, l): stack[offsetof(struct pt_regs, q)/8] = val; break static int putreg32(struct task_struct *child, unsigned regno, u32 val) { - int i; __u64 *stack = (__u64 *)task_pt_regs(child); switch (regno) { - case offsetof(struct user32, regs.fs): + case offsetof(struct user_regs_struct32, fs): if (val && (val & 3) != 3) return -EIO; child->thread.fsindex = val & 0xffff; break; - case offsetof(struct user32, regs.gs): + case offsetof(struct user_regs_struct32, gs): if (val && (val & 3) != 3) return -EIO; child->thread.gsindex = val & 0xffff; break; - case offsetof(struct user32, regs.ds): + case offsetof(struct user_regs_struct32, ds): if (val && (val & 3) != 3) return -EIO; child->thread.ds = val & 0xffff; break; - case offsetof(struct user32, regs.es): + case offsetof(struct user_regs_struct32, es): child->thread.es = val & 0xffff; break; - case offsetof(struct user32, regs.ss): + case offsetof(struct user_regs_struct32, ss): if ((val & 3) != 3) return -EIO; stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff; break; - case offsetof(struct user32, regs.cs): + case offsetof(struct user_regs_struct32, cs): if ((val & 3) != 3) return -EIO; stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff; break; @@ -78,57 +82,16 @@ static int putreg32(struct task_struct * R32(eip, rip); R32(esp, rsp); - case offsetof(struct user32, regs.eflags): { + case offsetof(struct user_regs_struct32, eflags): { __u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8]; val &= FLAG_MASK; *flags = val | (*flags & ~FLAG_MASK); + clear_tsk_thread_flag(child, TIF_FORCED_TF); break; } - case offsetof(struct user32, u_debugreg[4]): - case offsetof(struct user32, u_debugreg[5]): - return -EIO; - - case offsetof(struct user32, u_debugreg[0]): - child->thread.debugreg0 = val; - break; - - case offsetof(struct user32, u_debugreg[1]): - child->thread.debugreg1 = val; - break; - - case offsetof(struct user32, u_debugreg[2]): - child->thread.debugreg2 = val; - break; - - case offsetof(struct user32, u_debugreg[3]): - child->thread.debugreg3 = val; - break; - - case offsetof(struct user32, u_debugreg[6]): - child->thread.debugreg6 = val; - break; - - case offsetof(struct user32, u_debugreg[7]): - val &= ~DR_CONTROL_RESERVED; - /* See arch/i386/kernel/ptrace.c for an explanation of - * this awkward check.*/ - for(i=0; i<4; i++) - if ((0x5454 >> ((val >> (16 + 4*i)) & 0xf)) & 1) - return -EIO; - child->thread.debugreg7 = val; - if (val) - set_tsk_thread_flag(child, TIF_DEBUG); - else - clear_tsk_thread_flag(child, TIF_DEBUG); - break; - default: - if (regno > sizeof(struct user32) || (regno & 3)) - return -EIO; - - /* Other dummy fields in the virtual user structure are ignored */ - break; + BUG(); } return 0; } @@ -136,24 +99,25 @@ static int putreg32(struct task_struct * #undef R32 #define R32(l,q) \ - case offsetof(struct user32, regs.l): *val = stack[offsetof(struct pt_regs, q)/8]; break + case offsetof(struct user_regs_struct32, l): val = stack[offsetof(struct pt_regs, q)/8]; break -static int getreg32(struct task_struct *child, unsigned regno, u32 *val) +static int getreg32(struct task_struct *child, unsigned regno) { __u64 *stack = (__u64 *)task_pt_regs(child); + u32 val; switch (regno) { - case offsetof(struct user32, regs.fs): - *val = child->thread.fsindex; + case offsetof(struct user_regs_struct32, fs): + val = child->thread.fsindex; break; - case offsetof(struct user32, regs.gs): - *val = child->thread.gsindex; + case offsetof(struct user_regs_struct32, gs): + val = child->thread.gsindex; break; - case offsetof(struct user32, regs.ds): - *val = child->thread.ds; + case offsetof(struct user_regs_struct32, ds): + val = child->thread.ds; break; - case offsetof(struct user32, regs.es): - *val = child->thread.es; + case offsetof(struct user_regs_struct32, es): + val = child->thread.es; break; R32(cs, cs); @@ -167,238 +131,458 @@ static int getreg32(struct task_struct * R32(eax, rax); R32(orig_eax, orig_rax); R32(eip, rip); - R32(eflags, eflags); R32(esp, rsp); - case offsetof(struct user32, u_debugreg[0]): - *val = child->thread.debugreg0; - break; - case offsetof(struct user32, u_debugreg[1]): - *val = child->thread.debugreg1; - break; - case offsetof(struct user32, u_debugreg[2]): - *val = child->thread.debugreg2; - break; - case offsetof(struct user32, u_debugreg[3]): - *val = child->thread.debugreg3; - break; - case offsetof(struct user32, u_debugreg[6]): - *val = child->thread.debugreg6; - break; - case offsetof(struct user32, u_debugreg[7]): - *val = child->thread.debugreg7; + case offsetof(struct user_regs_struct32, eflags): + val = stack[offsetof(struct pt_regs, eflags) / 8]; + if (test_tsk_thread_flag(child, TIF_FORCED_TF)) + val &= ~X86_EFLAGS_TF; break; default: - if (regno > sizeof(struct user32) || (regno & 3)) - return -EIO; - - /* Other dummy fields in the virtual user structure are ignored */ - *val = 0; + BUG(); + val = -1; break; } - return 0; + + return val; } #undef R32 -static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) +asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) +{ + return -ENOSYS; +} + +static int +ia32_genregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (kbuf) { + u32 *kp = kbuf; + while (count > 0) { + *kp++ = getreg32(target, pos); + pos += 4; + count -= 4; + } + } + else { + u32 __user *up = ubuf; + while (count > 0) { + if (__put_user(getreg32(target, pos), up++)) + return -EFAULT; + pos += 4; + count -= 4; + } + } + + return 0; +} + +static int +ia32_genregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { + int ret = 0; + + if (kbuf) { + const u32 *kp = kbuf; + while (!ret && count > 0) { + ret = putreg32(target, pos, *kp++); + pos += 4; + count -= 4; + } + } + else { + int ret = 0; + const u32 __user *up = ubuf; + while (!ret && count > 0) { + u32 val; + ret = __get_user(val, up++); + if (!ret) + ret = putreg32(target, pos, val); + pos += 4; + count -= 4; + } + } + + return ret; +} + +static int +ia32_fpregs_active(struct task_struct *target, + const struct utrace_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} + +static int +ia32_fpregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_i387_ia32_struct fp; int ret; - compat_siginfo_t __user *si32 = compat_ptr(data); - siginfo_t ssi; - siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t)); - if (request == PTRACE_SETSIGINFO) { - memset(&ssi, 0, sizeof(siginfo_t)); - ret = copy_siginfo_from_user32(&ssi, si32); + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else + init_fpu(target); + + ret = get_fpregs32(&fp, target); + if (ret == 0) + ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &fp, 0, -1); + + return ret; +} + +static int +ia32_fpregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_i387_ia32_struct fp; + int ret; + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else if (pos == 0 && count == sizeof(fp)) + set_stopped_child_used_math(target); + else + init_fpu(target); + + if (pos > 0 || count < sizeof(fp)) { + ret = get_fpregs32(&fp, target); + if (ret == 0) + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fp, 0, -1); if (ret) return ret; - if (copy_to_user(si, &ssi, sizeof(siginfo_t))) - return -EFAULT; + kbuf = &fp; } - ret = sys_ptrace(request, pid, addr, (unsigned long)si); - if (ret) - return ret; - if (request == PTRACE_GETSIGINFO) { - if (copy_from_user(&ssi, si, sizeof(siginfo_t))) + else if (kbuf == NULL) { + if (__copy_from_user(&fp, ubuf, sizeof(fp))) return -EFAULT; - ret = copy_siginfo_to_user32(si32, &ssi); + kbuf = &fp; } - return ret; + + return set_fpregs32(target, kbuf); } -asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) +static int +ia32_fpxregs_active(struct task_struct *target, + const struct utrace_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} + +static int +ia32_fpxregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else + init_fpu(target); + + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); +} + +static int +ia32_fpxregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) + { - struct task_struct *child; - struct pt_regs *childregs; - void __user *datap = compat_ptr(data); int ret; - __u32 val; - switch (request) { - case PTRACE_TRACEME: - case PTRACE_ATTACH: - case PTRACE_KILL: - case PTRACE_CONT: - case PTRACE_SINGLESTEP: - case PTRACE_DETACH: - case PTRACE_SYSCALL: - case PTRACE_SETOPTIONS: - case PTRACE_SET_THREAD_AREA: - case PTRACE_GET_THREAD_AREA: - return sys_ptrace(request, pid, addr, data); + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else if (pos == 0 && count == sizeof(struct i387_fxsave_struct)) + set_stopped_child_used_math(target); + else + init_fpu(target); - default: - return -EINVAL; + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: - case PTRACE_POKEDATA: - case PTRACE_POKETEXT: - case PTRACE_POKEUSR: - case PTRACE_PEEKUSR: - case PTRACE_GETREGS: - case PTRACE_SETREGS: - case PTRACE_SETFPREGS: - case PTRACE_GETFPREGS: - case PTRACE_SETFPXREGS: - case PTRACE_GETFPXREGS: - case PTRACE_GETEVENTMSG: - break; - - case PTRACE_SETSIGINFO: - case PTRACE_GETSIGINFO: - return ptrace32_siginfo(request, pid, addr, data); - } - - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) - return PTR_ERR(child); - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out; - - childregs = task_pt_regs(child); - - switch (request) { - case PTRACE_PEEKDATA: - case PTRACE_PEEKTEXT: - ret = 0; - if (access_process_vm(child, addr, &val, sizeof(u32), 0)!=sizeof(u32)) - ret = -EIO; - else - ret = put_user(val, (unsigned int __user *)datap); - break; + target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; - case PTRACE_POKEDATA: - case PTRACE_POKETEXT: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(u32), 1)!=sizeof(u32)) - ret = -EIO; - break; + return ret; +} - case PTRACE_PEEKUSR: - ret = getreg32(child, addr, &val); - if (ret == 0) - ret = put_user(val, (__u32 __user *)datap); - break; +static int +ia32_dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset) +{ + if (tsk->thread.debugreg6 | tsk->thread.debugreg7) + return 8; + return 0; +} - case PTRACE_POKEUSR: - ret = putreg32(child, addr, data); - break; +static int +ia32_dbregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) { + u32 val; - case PTRACE_GETREGS: { /* Get all gp regs from the child. */ - int i; - if (!access_ok(VERIFY_WRITE, datap, 16*4)) { - ret = -EIO; - break; - } - ret = 0; - for ( i = 0; i <= 16*4 ; i += sizeof(__u32) ) { - getreg32(child, i, &val); - ret |= __put_user(val,(u32 __user *)datap); - datap += sizeof(u32); + /* + * The hardware updates the status register on a debug trap, + * but do_debug (traps.c) saves it for us when that happens. + * So whether the target is current or not, debugregN is good. + */ + val = 0; + switch (pos) { + case 0: val = target->thread.debugreg0; break; + case 1: val = target->thread.debugreg1; break; + case 2: val = target->thread.debugreg2; break; + case 3: val = target->thread.debugreg3; break; + case 6: val = target->thread.debugreg6; break; + case 7: val = target->thread.debugreg7; break; } - break; - } - case PTRACE_SETREGS: { /* Set all gp regs in the child. */ - unsigned long tmp; - int i; - if (!access_ok(VERIFY_READ, datap, 16*4)) { - ret = -EIO; - break; + if (kbuf) { + *(u32 *) kbuf = val; + kbuf += sizeof(u32); } - ret = 0; - for ( i = 0; i <= 16*4; i += sizeof(u32) ) { - ret |= __get_user(tmp, (u32 __user *)datap); - putreg32(child, i, tmp); - datap += sizeof(u32); + else { + if (__put_user(val, (u32 __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(u32); } - break; } - case PTRACE_GETFPREGS: - ret = -EIO; - if (!access_ok(VERIFY_READ, compat_ptr(data), - sizeof(struct user_i387_struct))) - break; - save_i387_ia32(child, datap, childregs, 1); - ret = 0; - break; + return 0; +} - case PTRACE_SETFPREGS: - ret = -EIO; - if (!access_ok(VERIFY_WRITE, datap, - sizeof(struct user_i387_struct))) - break; - ret = 0; - /* don't check EFAULT to be bug-to-bug compatible to i386 */ - restore_i387_ia32(child, datap, 1); - break; +static int +ia32_dbregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + /* + * We'll just hijack the native setter to do the real work for us. + */ + const struct utrace_regset *dbregset = &utrace_x86_64_native.regsets[2]; + + int ret = 0; + + for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) { + unsigned long val; + + if (kbuf) { + val = *(const u32 *) kbuf; + kbuf += sizeof(u32); + } + else { + if (__get_user(val, (u32 __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(u32); + } - case PTRACE_GETFPXREGS: { - struct user32_fxsr_struct __user *u = datap; - init_fpu(child); - ret = -EIO; - if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) - break; - ret = -EFAULT; - if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u))) + ret = (*dbregset->set)(target, dbregset, pos * sizeof(long), + sizeof(val), &val, NULL); + if (ret) break; - ret = __put_user(childregs->cs, &u->fcs); - ret |= __put_user(child->thread.ds, &u->fos); - break; } - case PTRACE_SETFPXREGS: { - struct user32_fxsr_struct __user *u = datap; - unlazy_fpu(child); - ret = -EIO; - if (!access_ok(VERIFY_READ, u, sizeof(*u))) - break; - /* no checking to be bug-to-bug compatible with i386. */ - /* but silence warning */ - if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u))) - ; - set_stopped_child_used_math(child); - child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; - ret = 0; - break; + + return ret; +} + + +/* + * Perform get_thread_area on behalf of the traced child. + */ +static int +ia32_tls_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_desc info, *ip; + const struct n_desc_struct *desc; + const struct n_desc_struct *tls; + +/* + * Get the current Thread-Local Storage area: + */ + +#define GET_BASE(desc) ( \ + (((desc)->a >> 16) & 0x0000ffff) | \ + (((desc)->b << 16) & 0x00ff0000) | \ + ( (desc)->b & 0xff000000) ) + +#define GET_LIMIT(desc) ( \ + ((desc)->a & 0x0ffff) | \ + ((desc)->b & 0xf0000) ) + +#define GET_32BIT(desc) (((desc)->b >> 22) & 1) +#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) +#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) +#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) +#define GET_PRESENT(desc) (((desc)->b >> 15) & 1) +#define GET_USEABLE(desc) (((desc)->b >> 20) & 1) + + tls = (struct n_desc_struct *) target->thread.tls_array; + desc = &tls[pos]; + ip = kbuf ?: &info; + memset(ip, 0, sizeof *ip); + for (; count > 0; count -= sizeof(struct user_desc), ++desc) { + ip->entry_number = desc - tls + GDT_ENTRY_TLS_MIN; + ip->base_addr = GET_BASE(desc); + ip->limit = GET_LIMIT(desc); + ip->seg_32bit = GET_32BIT(desc); + ip->contents = GET_CONTENTS(desc); + ip->read_exec_only = !GET_WRITABLE(desc); + ip->limit_in_pages = GET_LIMIT_PAGES(desc); + ip->seg_not_present = !GET_PRESENT(desc); + ip->useable = GET_USEABLE(desc); + + if (kbuf) + ++ip; + else { + if (__copy_to_user(ubuf, &info, sizeof(info))) + return -EFAULT; + ubuf += sizeof(info); + } } -#if 0 /* XXX */ - case PTRACE_GETEVENTMSG: - ret = put_user(child->ptrace_message,(unsigned int __user *)compat_ptr(data)); - break; -#endif + return 0; +} - default: - BUG(); +/* + * Perform set_thread_area on behalf of the traced child. + */ +static int +ia32_tls_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_desc info; + struct n_desc_struct *desc; + struct n_desc_struct newtls[GDT_ENTRY_TLS_ENTRIES]; + unsigned int i; + int cpu; + + pos /= sizeof(struct user_desc); + count /= sizeof(struct user_desc); + + desc = &newtls[pos]; + for (i = 0; i < count; ++i, ++desc) { + const struct user_desc *ip; + if (kbuf) { + ip = kbuf; + kbuf += sizeof(struct user_desc); + } + else { + ip = &info; + if (__copy_from_user(&info, ubuf, sizeof(info))) + return -EFAULT; + ubuf += sizeof(struct user_desc); + } + + if (LDT_empty(ip)) { + desc->a = 0; + desc->b = 0; + } else { + desc->a = LDT_entry_a(ip); + desc->b = LDT_entry_b(ip); + } } - out: - put_task_struct(child); - return ret; + /* + * We must not get preempted while modifying the TLS. + */ + cpu = get_cpu(); + memcpy(&target->thread.tls_array[pos], newtls, + count * sizeof(newtls[0])); + if (target == current) + load_TLS(&target->thread, cpu); + put_cpu(); + + return 0; +} + +/* + * Determine how many TLS slots are in use. + */ +static int +ia32_tls_active(struct task_struct *target, const struct utrace_regset *regset) +{ + int i; + for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) { + struct n_desc_struct *desc = (struct n_desc_struct *) + &target->thread.tls_array[i - 1]; + if ((desc->a | desc->b) != 0) + break; + } + return i; } + +/* + * This should match arch/i386/kernel/ptrace.c:native_regsets. + * XXX ioperm? vm86? + */ +static const struct utrace_regset ia32_regsets[] = { + { + .n = sizeof(struct user_regs_struct32)/4, + .size = 4, .align = 4, + .get = ia32_genregs_get, .set = ia32_genregs_set + }, + { + .n = sizeof(struct user_i387_ia32_struct) / 4, + .size = 4, .align = 4, + .active = ia32_fpregs_active, + .get = ia32_fpregs_get, .set = ia32_fpregs_set + }, + { + .n = sizeof(struct user32_fxsr_struct) / 4, + .size = 4, .align = 4, + .active = ia32_fpxregs_active, + .get = ia32_fpxregs_get, .set = ia32_fpxregs_set + }, + { + .n = GDT_ENTRY_TLS_ENTRIES, + .bias = GDT_ENTRY_TLS_MIN, + .size = sizeof(struct user_desc), + .align = sizeof(struct user_desc), + .active = ia32_tls_active, + .get = ia32_tls_get, .set = ia32_tls_set + }, + { + .n = 8, .size = 4, .align = 4, + .active = ia32_dbregs_active, + .get = ia32_dbregs_get, .set = ia32_dbregs_set + }, +}; + +const struct utrace_regset_view utrace_ia32_view = { + .name = "i386", .e_machine = EM_386, + .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets) +}; +EXPORT_SYMBOL_GPL(utrace_ia32_view);