Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
Pull generic execve() changes from Al Viro: "This introduces the generic kernel_thread() and kernel_execve() functions, and switches x86, arm, alpha, um and s390 over to them." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: (26 commits) s390: convert to generic kernel_execve() s390: switch to generic kernel_thread() s390: fold kernel_thread_helper() into ret_from_fork() s390: fold execve_tail() into start_thread(), convert to generic sys_execve() um: switch to generic kernel_thread() x86, um/x86: switch to generic sys_execve and kernel_execve x86: split ret_from_fork alpha: introduce ret_from_kernel_execve(), switch to generic kernel_execve() alpha: switch to generic kernel_thread() alpha: switch to generic sys_execve() arm: get rid of execve wrapper, switch to generic execve() implementation arm: optimized current_pt_regs() arm: introduce ret_from_kernel_execve(), switch to generic kernel_execve() arm: split ret_from_fork, simplify kernel_thread() [based on patch by rmk] generic sys_execve() generic kernel_execve() new helper: current_pt_regs() preparation for generic kernel_thread() um: kill thread->forking um: let signal_delivered() do SIGTRAP on singlestepping into handler ...
This commit is contained in:
@@ -52,6 +52,7 @@ config ARM
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
|
||||
select GENERIC_KERNEL_THREAD
|
||||
help
|
||||
The ARM series is a line of low-power-consumption RISC chip designs
|
||||
licensed by ARM Ltd and targeted at embedded applications and
|
||||
|
||||
@@ -85,11 +85,6 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a new kernel thread
|
||||
*/
|
||||
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
||||
|
||||
@@ -254,6 +254,11 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
||||
return regs->ARM_sp;
|
||||
}
|
||||
|
||||
#define current_pt_regs(void) ({ \
|
||||
register unsigned long sp asm ("sp"); \
|
||||
(struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
|
||||
})
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
@@ -478,6 +478,8 @@
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
#define __ARCH_WANT_SYS_SOCKETCALL
|
||||
#endif
|
||||
#define __ARCH_WANT_SYS_EXECVE
|
||||
#define __ARCH_WANT_KERNEL_EXECVE
|
||||
|
||||
/*
|
||||
* "Conditional" syscalls
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
CALL(sys_creat)
|
||||
CALL(sys_link)
|
||||
/* 10 */ CALL(sys_unlink)
|
||||
CALL(sys_execve_wrapper)
|
||||
CALL(sys_execve)
|
||||
CALL(sys_chdir)
|
||||
CALL(OBSOLETE(sys_time)) /* used by libc4 */
|
||||
CALL(sys_mknod)
|
||||
|
||||
@@ -91,6 +91,30 @@ ENTRY(ret_from_fork)
|
||||
b ret_slow_syscall
|
||||
ENDPROC(ret_from_fork)
|
||||
|
||||
ENTRY(ret_from_kernel_thread)
|
||||
UNWIND(.fnstart)
|
||||
UNWIND(.cantunwind)
|
||||
bl schedule_tail
|
||||
mov r0, r4
|
||||
adr lr, BSYM(1f) @ kernel threads should not exit
|
||||
mov pc, r5
|
||||
1: bl do_exit
|
||||
nop
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ret_from_kernel_thread)
|
||||
|
||||
/*
|
||||
* turn a kernel thread into userland process
|
||||
* use: ret_from_kernel_execve(struct pt_regs *normal)
|
||||
*/
|
||||
ENTRY(ret_from_kernel_execve)
|
||||
mov why, #0 @ not a syscall
|
||||
str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well
|
||||
get_thread_info tsk @ thread structure
|
||||
mov sp, r0 @ stack pointer just under pt_regs
|
||||
b ret_slow_syscall
|
||||
ENDPROC(ret_from_kernel_execve)
|
||||
|
||||
.equ NR_syscalls,0
|
||||
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
|
||||
#include "calls.S"
|
||||
@@ -517,11 +541,6 @@ sys_vfork_wrapper:
|
||||
b sys_vfork
|
||||
ENDPROC(sys_vfork_wrapper)
|
||||
|
||||
sys_execve_wrapper:
|
||||
add r3, sp, #S_OFF
|
||||
b sys_execve
|
||||
ENDPROC(sys_execve_wrapper)
|
||||
|
||||
sys_clone_wrapper:
|
||||
add ip, sp, #S_OFF
|
||||
str ip, [sp, #4]
|
||||
|
||||
@@ -373,6 +373,7 @@ void release_thread(struct task_struct *dead_task)
|
||||
}
|
||||
|
||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
|
||||
|
||||
int
|
||||
copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
@@ -381,13 +382,20 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
struct thread_info *thread = task_thread_info(p);
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
*childregs = *regs;
|
||||
childregs->ARM_r0 = 0;
|
||||
childregs->ARM_sp = stack_start;
|
||||
|
||||
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
|
||||
|
||||
if (likely(regs)) {
|
||||
*childregs = *regs;
|
||||
childregs->ARM_r0 = 0;
|
||||
childregs->ARM_sp = stack_start;
|
||||
thread->cpu_context.pc = (unsigned long)ret_from_fork;
|
||||
} else {
|
||||
thread->cpu_context.r4 = stk_sz;
|
||||
thread->cpu_context.r5 = stack_start;
|
||||
thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
|
||||
childregs->ARM_cpsr = SVC_MODE;
|
||||
}
|
||||
thread->cpu_context.sp = (unsigned long)childregs;
|
||||
thread->cpu_context.pc = (unsigned long)ret_from_fork;
|
||||
|
||||
clear_ptrace_hw_breakpoint(p);
|
||||
|
||||
@@ -423,63 +431,6 @@ int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
|
||||
}
|
||||
EXPORT_SYMBOL(dump_fpu);
|
||||
|
||||
/*
|
||||
* Shuffle the argument into the correct register before calling the
|
||||
* thread function. r4 is the thread argument, r5 is the pointer to
|
||||
* the thread function, and r6 points to the exit function.
|
||||
*/
|
||||
extern void kernel_thread_helper(void);
|
||||
asm( ".pushsection .text\n"
|
||||
" .align\n"
|
||||
" .type kernel_thread_helper, #function\n"
|
||||
"kernel_thread_helper:\n"
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
" bl trace_hardirqs_on\n"
|
||||
#endif
|
||||
" msr cpsr_c, r7\n"
|
||||
" mov r0, r4\n"
|
||||
" mov lr, r6\n"
|
||||
" mov pc, r5\n"
|
||||
" .size kernel_thread_helper, . - kernel_thread_helper\n"
|
||||
" .popsection");
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
extern void kernel_thread_exit(long code);
|
||||
asm( ".pushsection .text\n"
|
||||
" .align\n"
|
||||
" .type kernel_thread_exit, #function\n"
|
||||
"kernel_thread_exit:\n"
|
||||
" .fnstart\n"
|
||||
" .cantunwind\n"
|
||||
" bl do_exit\n"
|
||||
" nop\n"
|
||||
" .fnend\n"
|
||||
" .size kernel_thread_exit, . - kernel_thread_exit\n"
|
||||
" .popsection");
|
||||
#else
|
||||
#define kernel_thread_exit do_exit
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a kernel thread.
|
||||
*/
|
||||
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
|
||||
regs.ARM_r4 = (unsigned long)arg;
|
||||
regs.ARM_r5 = (unsigned long)fn;
|
||||
regs.ARM_r6 = (unsigned long)kernel_thread_exit;
|
||||
regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
|
||||
regs.ARM_pc = (unsigned long)kernel_thread_helper;
|
||||
regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
|
||||
|
||||
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
struct stackframe frame;
|
||||
|
||||
@@ -59,69 +59,6 @@ asmlinkage int sys_vfork(struct pt_regs *regs)
|
||||
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
/* sys_execve() executes a new program.
|
||||
* This is called indirectly via a small wrapper
|
||||
*/
|
||||
asmlinkage int sys_execve(const char __user *filenamei,
|
||||
const char __user *const __user *argv,
|
||||
const char __user *const __user *envp, struct pt_regs *regs)
|
||||
{
|
||||
int error;
|
||||
char * filename;
|
||||
|
||||
filename = getname(filenamei);
|
||||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
error = do_execve(filename, argv, envp, regs);
|
||||
putname(filename);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
int kernel_execve(const char *filename,
|
||||
const char *const argv[],
|
||||
const char *const envp[])
|
||||
{
|
||||
struct pt_regs regs;
|
||||
int ret;
|
||||
|
||||
memset(®s, 0, sizeof(struct pt_regs));
|
||||
ret = do_execve(filename,
|
||||
(const char __user *const __user *)argv,
|
||||
(const char __user *const __user *)envp, ®s);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Save argc to the register structure for userspace.
|
||||
*/
|
||||
regs.ARM_r0 = ret;
|
||||
|
||||
/*
|
||||
* We were successful. We won't be returning to our caller, but
|
||||
* instead to user space by manipulating the kernel stack.
|
||||
*/
|
||||
asm( "add r0, %0, %1\n\t"
|
||||
"mov r1, %2\n\t"
|
||||
"mov r2, %3\n\t"
|
||||
"bl memmove\n\t" /* copy regs to top of stack */
|
||||
"mov r8, #0\n\t" /* not a syscall */
|
||||
"mov r9, %0\n\t" /* thread structure */
|
||||
"mov sp, r0\n\t" /* reposition stack pointer */
|
||||
"b ret_to_user"
|
||||
:
|
||||
: "r" (current_thread_info()),
|
||||
"Ir" (THREAD_START_SP - sizeof(regs)),
|
||||
"r" (®s),
|
||||
"Ir" (sizeof(regs))
|
||||
: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_execve);
|
||||
|
||||
/*
|
||||
* Since loff_t is a 64 bit type we avoid a lot of ABI hassle
|
||||
* with a different argument ordering.
|
||||
|
||||
Reference in New Issue
Block a user