mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
This interface can be utilized to deactivate the hard and soft lockup detector temporarily. Callers are expected to minimize the duration of deactivation. Multiple deactivations are allowed to occur in parallel but should be rare in practice. [akpm@linux-foundation.org: remove unneeded static initialization] Signed-off-by: Ulrich Obergfell <uobergfe@redhat.com> Reviewed-by: Aaron Tomlin <atomlin@redhat.com> Cc: Guenter Roeck <linux@roeck-us.net> Cc: Don Zickus <dzickus@redhat.com> Cc: Ulrich Obergfell <uobergfe@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Stephane Eranian <eranian@google.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
93 lines
2.5 KiB
C
93 lines
2.5 KiB
C
/*
|
|
* linux/include/linux/nmi.h
|
|
*/
|
|
#ifndef LINUX_NMI_H
|
|
#define LINUX_NMI_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <asm/irq.h>
|
|
|
|
/**
|
|
* touch_nmi_watchdog - restart NMI watchdog timeout.
|
|
*
|
|
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
|
|
* may be used to reset the timeout - for code which intentionally
|
|
* disables interrupts for a long time. This call is stateless.
|
|
*/
|
|
#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
|
|
#include <asm/nmi.h>
|
|
extern void touch_nmi_watchdog(void);
|
|
#else
|
|
static inline void touch_nmi_watchdog(void)
|
|
{
|
|
touch_softlockup_watchdog();
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
|
|
extern void hardlockup_detector_disable(void);
|
|
void watchdog_nmi_disable_all(void);
|
|
void watchdog_nmi_enable_all(void);
|
|
#else
|
|
static inline void hardlockup_detector_disable(void) {}
|
|
static inline void watchdog_nmi_disable_all(void) {}
|
|
static inline void watchdog_nmi_enable_all(void) {}
|
|
#endif
|
|
|
|
/*
|
|
* Create trigger_all_cpu_backtrace() out of the arch-provided
|
|
* base function. Return whether such support was available,
|
|
* to allow calling code to fall back to some other mechanism:
|
|
*/
|
|
#ifdef arch_trigger_all_cpu_backtrace
|
|
static inline bool trigger_all_cpu_backtrace(void)
|
|
{
|
|
arch_trigger_all_cpu_backtrace(true);
|
|
|
|
return true;
|
|
}
|
|
static inline bool trigger_allbutself_cpu_backtrace(void)
|
|
{
|
|
arch_trigger_all_cpu_backtrace(false);
|
|
return true;
|
|
}
|
|
#else
|
|
static inline bool trigger_all_cpu_backtrace(void)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool trigger_allbutself_cpu_backtrace(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_LOCKUP_DETECTOR
|
|
int hw_nmi_is_cpu_stuck(struct pt_regs *);
|
|
u64 hw_nmi_get_sample_period(int watchdog_thresh);
|
|
extern int nmi_watchdog_enabled;
|
|
extern int soft_watchdog_enabled;
|
|
extern int watchdog_user_enabled;
|
|
extern int watchdog_thresh;
|
|
extern unsigned long *watchdog_cpumask_bits;
|
|
extern int sysctl_softlockup_all_cpu_backtrace;
|
|
struct ctl_table;
|
|
extern int proc_watchdog(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_nmi_watchdog(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_soft_watchdog(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_watchdog_thresh(struct ctl_table *, int ,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int proc_watchdog_cpumask(struct ctl_table *, int,
|
|
void __user *, size_t *, loff_t *);
|
|
extern int watchdog_suspend(void);
|
|
extern void watchdog_resume(void);
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
|
|
#include <asm/nmi.h>
|
|
#endif
|
|
|
|
#endif
|