mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
Historical prepare_to_copy() is mostly a no-op, duplicated for majority of the architectures and the rest following the x86 model of flushing the extended register state like fpu there. Remove it and use the arch_dup_task_struct() instead. Suggested-by: Oleg Nesterov <oleg@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Link: http://lkml.kernel.org/r/1336692811-30576-1-git-send-email-suresh.b.siddha@intel.com Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Chris Zankel <chris@zankel.net> Cc: Richard Henderson <rth@twiddle.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Mark Salter <msalter@redhat.com> Cc: Aurelien Jacquiot <a-jacquiot@ti.com> Cc: Mikael Starvik <starvik@axis.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Helge Deller <deller@gmx.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Chen Liqin <liqin.chen@sunplusct.com> Cc: Lennox Wu <lennox.wu@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
128 lines
2.9 KiB
C
128 lines
2.9 KiB
C
/*
|
|
* arch/arm/include/asm/processor.h
|
|
*
|
|
* Copyright (C) 1995-1999 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#ifndef __ASM_ARM_PROCESSOR_H
|
|
#define __ASM_ARM_PROCESSOR_H
|
|
|
|
/*
|
|
* Default implementation of macro that returns current
|
|
* instruction pointer ("program counter").
|
|
*/
|
|
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <asm/hw_breakpoint.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/types.h>
|
|
|
|
#ifdef __KERNEL__
|
|
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
|
|
TASK_SIZE : TASK_SIZE_26)
|
|
#define STACK_TOP_MAX TASK_SIZE
|
|
#endif
|
|
|
|
struct debug_info {
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
|
|
#endif
|
|
};
|
|
|
|
struct thread_struct {
|
|
/* fault info */
|
|
unsigned long address;
|
|
unsigned long trap_no;
|
|
unsigned long error_code;
|
|
/* debugging */
|
|
struct debug_info debug;
|
|
};
|
|
|
|
#define INIT_THREAD { }
|
|
|
|
#ifdef CONFIG_MMU
|
|
#define nommu_start_thread(regs) do { } while (0)
|
|
#else
|
|
#define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data
|
|
#endif
|
|
|
|
#define start_thread(regs,pc,sp) \
|
|
({ \
|
|
unsigned long *stack = (unsigned long *)sp; \
|
|
memset(regs->uregs, 0, sizeof(regs->uregs)); \
|
|
if (current->personality & ADDR_LIMIT_32BIT) \
|
|
regs->ARM_cpsr = USR_MODE; \
|
|
else \
|
|
regs->ARM_cpsr = USR26_MODE; \
|
|
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
|
|
regs->ARM_cpsr |= PSR_T_BIT; \
|
|
regs->ARM_cpsr |= PSR_ENDSTATE; \
|
|
regs->ARM_pc = pc & ~1; /* pc */ \
|
|
regs->ARM_sp = sp; /* sp */ \
|
|
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
|
|
regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
|
|
regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
|
|
nommu_start_thread(regs); \
|
|
})
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
struct task_struct;
|
|
|
|
/* Free all resources held by a thread. */
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
|
|
#define cpu_relax() smp_mb()
|
|
#else
|
|
#define cpu_relax() barrier()
|
|
#endif
|
|
|
|
void cpu_idle_wait(void);
|
|
|
|
/*
|
|
* Create a new kernel thread
|
|
*/
|
|
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
|
|
|
#define task_pt_regs(p) \
|
|
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
|
|
|
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
|
|
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
|
|
|
|
/*
|
|
* Prefetching support - only ARMv5.
|
|
*/
|
|
#if __LINUX_ARM_ARCH__ >= 5
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
static inline void prefetch(const void *ptr)
|
|
{
|
|
__asm__ __volatile__(
|
|
"pld\t%a0"
|
|
:
|
|
: "p" (ptr)
|
|
: "cc");
|
|
}
|
|
|
|
#define ARCH_HAS_PREFETCHW
|
|
#define prefetchw(ptr) prefetch(ptr)
|
|
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
|
#define spin_lock_prefetch(x) do { } while (0)
|
|
|
|
#endif
|
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
|
|
|
#endif
|
|
|
|
#endif /* __ASM_ARM_PROCESSOR_H */
|