mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 01:09:38 -05:00
LoongArch: Add STACKTRACE support
1. Use common arch_stack_walk() infrastructure to avoid duplicated code and avoid taking care of the stack storage and filtering. 2. Add sched_ra (means sched return address) and sched_cfa (means sched call frame address) to thread_info, and store them in switch_to(). 3. Add __get_wchan() implementation. Now we can print the process stack and wait channel by cat /proc/*/stack and /proc/*/wchan. Signed-off-by: Qing Zhang <zhangqing@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
parent
49aef111e2
commit
93a4fa622e
8 changed files with 93 additions and 6 deletions
|
@ -42,6 +42,7 @@ config LOONGARCH
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||||
select ARCH_SPARSEMEM_ENABLE
|
select ARCH_SPARSEMEM_ENABLE
|
||||||
|
select ARCH_STACKWALK
|
||||||
select ARCH_SUPPORTS_ACPI
|
select ARCH_SUPPORTS_ACPI
|
||||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||||
select ARCH_SUPPORTS_HUGETLBFS
|
select ARCH_SUPPORTS_HUGETLBFS
|
||||||
|
@ -151,6 +152,10 @@ config LOCKDEP_SUPPORT
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
config STACKTRACE_SUPPORT
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
|
||||||
# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
|
# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
|
||||||
# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
|
# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
|
||||||
# are shared between architectures, and specifically expecting the symbols.
|
# are shared between architectures, and specifically expecting the symbols.
|
||||||
|
|
|
@ -101,6 +101,10 @@ struct thread_struct {
|
||||||
unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
|
unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
|
||||||
unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
|
unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
|
||||||
|
|
||||||
|
/* __schedule() return address / call frame address */
|
||||||
|
unsigned long sched_ra;
|
||||||
|
unsigned long sched_cfa;
|
||||||
|
|
||||||
/* CSR registers */
|
/* CSR registers */
|
||||||
unsigned long csr_prmd;
|
unsigned long csr_prmd;
|
||||||
unsigned long csr_crmd;
|
unsigned long csr_crmd;
|
||||||
|
@ -129,6 +133,9 @@ struct thread_struct {
|
||||||
struct loongarch_fpu fpu FPU_ALIGN;
|
struct loongarch_fpu fpu FPU_ALIGN;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
|
||||||
|
#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
|
||||||
|
|
||||||
#define INIT_THREAD { \
|
#define INIT_THREAD { \
|
||||||
/* \
|
/* \
|
||||||
* Main processor registers \
|
* Main processor registers \
|
||||||
|
@ -145,6 +152,8 @@ struct thread_struct {
|
||||||
.reg29 = 0, \
|
.reg29 = 0, \
|
||||||
.reg30 = 0, \
|
.reg30 = 0, \
|
||||||
.reg31 = 0, \
|
.reg31 = 0, \
|
||||||
|
.sched_ra = 0, \
|
||||||
|
.sched_cfa = 0, \
|
||||||
.csr_crmd = 0, \
|
.csr_crmd = 0, \
|
||||||
.csr_prmd = 0, \
|
.csr_prmd = 0, \
|
||||||
.csr_euen = 0, \
|
.csr_euen = 0, \
|
||||||
|
|
|
@ -15,12 +15,15 @@ struct task_struct;
|
||||||
* @prev: The task previously executed.
|
* @prev: The task previously executed.
|
||||||
* @next: The task to begin executing.
|
* @next: The task to begin executing.
|
||||||
* @next_ti: task_thread_info(next).
|
* @next_ti: task_thread_info(next).
|
||||||
|
* @sched_ra: __schedule return address.
|
||||||
|
* @sched_cfa: __schedule call frame address.
|
||||||
*
|
*
|
||||||
* This function is used whilst scheduling to save the context of prev & load
|
* This function is used whilst scheduling to save the context of prev & load
|
||||||
* the context of next. Returns prev.
|
* the context of next. Returns prev.
|
||||||
*/
|
*/
|
||||||
extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
|
extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
|
||||||
struct task_struct *next, struct thread_info *next_ti);
|
struct task_struct *next, struct thread_info *next_ti,
|
||||||
|
void *sched_ra, void *sched_cfa);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For newly created kernel threads switch_to() will return to
|
* For newly created kernel threads switch_to() will return to
|
||||||
|
@ -28,10 +31,11 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
|
||||||
* That is, everything following __switch_to() will be skipped for new threads.
|
* That is, everything following __switch_to() will be skipped for new threads.
|
||||||
* So everything that matters to new threads should be placed before __switch_to().
|
* So everything that matters to new threads should be placed before __switch_to().
|
||||||
*/
|
*/
|
||||||
#define switch_to(prev, next, last) \
|
#define switch_to(prev, next, last) \
|
||||||
do { \
|
do { \
|
||||||
lose_fpu_inatomic(1, prev); \
|
lose_fpu_inatomic(1, prev); \
|
||||||
(last) = __switch_to(prev, next, task_thread_info(next)); \
|
(last) = __switch_to(prev, next, task_thread_info(next), \
|
||||||
|
__builtin_return_address(0), __builtin_frame_address(0)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#endif /* _ASM_SWITCH_TO_H */
|
#endif /* _ASM_SWITCH_TO_H */
|
||||||
|
|
|
@ -15,6 +15,7 @@ obj-$(CONFIG_EFI) += efi.o
|
||||||
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
|
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
|
||||||
|
|
||||||
obj-$(CONFIG_MODULES) += module.o module-sections.o
|
obj-$(CONFIG_MODULES) += module.o module-sections.o
|
||||||
|
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||||
|
|
||||||
obj-$(CONFIG_PROC_FS) += proc.o
|
obj-$(CONFIG_PROC_FS) += proc.o
|
||||||
|
|
||||||
|
|
|
@ -103,6 +103,8 @@ void output_thread_defines(void)
|
||||||
OFFSET(THREAD_REG29, task_struct, thread.reg29);
|
OFFSET(THREAD_REG29, task_struct, thread.reg29);
|
||||||
OFFSET(THREAD_REG30, task_struct, thread.reg30);
|
OFFSET(THREAD_REG30, task_struct, thread.reg30);
|
||||||
OFFSET(THREAD_REG31, task_struct, thread.reg31);
|
OFFSET(THREAD_REG31, task_struct, thread.reg31);
|
||||||
|
OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
|
||||||
|
OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
|
||||||
OFFSET(THREAD_CSRCRMD, task_struct,
|
OFFSET(THREAD_CSRCRMD, task_struct,
|
||||||
thread.csr_crmd);
|
thread.csr_crmd);
|
||||||
OFFSET(THREAD_CSRPRMD, task_struct,
|
OFFSET(THREAD_CSRPRMD, task_struct,
|
||||||
|
|
|
@ -135,6 +135,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||||
childregs = (struct pt_regs *) childksp - 1;
|
childregs = (struct pt_regs *) childksp - 1;
|
||||||
/* Put the stack after the struct pt_regs. */
|
/* Put the stack after the struct pt_regs. */
|
||||||
childksp = (unsigned long) childregs;
|
childksp = (unsigned long) childregs;
|
||||||
|
p->thread.sched_cfa = 0;
|
||||||
p->thread.csr_euen = 0;
|
p->thread.csr_euen = 0;
|
||||||
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
|
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
|
||||||
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
|
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
|
||||||
|
@ -145,6 +146,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||||
p->thread.reg23 = (unsigned long)args->fn;
|
p->thread.reg23 = (unsigned long)args->fn;
|
||||||
p->thread.reg24 = (unsigned long)args->fn_arg;
|
p->thread.reg24 = (unsigned long)args->fn_arg;
|
||||||
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
|
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
|
||||||
|
p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
|
||||||
memset(childregs, 0, sizeof(struct pt_regs));
|
memset(childregs, 0, sizeof(struct pt_regs));
|
||||||
childregs->csr_euen = p->thread.csr_euen;
|
childregs->csr_euen = p->thread.csr_euen;
|
||||||
childregs->csr_crmd = p->thread.csr_crmd;
|
childregs->csr_crmd = p->thread.csr_crmd;
|
||||||
|
@ -161,6 +163,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||||
|
|
||||||
p->thread.reg03 = (unsigned long) childregs;
|
p->thread.reg03 = (unsigned long) childregs;
|
||||||
p->thread.reg01 = (unsigned long) ret_from_fork;
|
p->thread.reg01 = (unsigned long) ret_from_fork;
|
||||||
|
p->thread.sched_ra = (unsigned long) ret_from_fork;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* New tasks lose permission to use the fpu. This accelerates context
|
* New tasks lose permission to use the fpu. This accelerates context
|
||||||
|
@ -181,7 +184,31 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||||
|
|
||||||
unsigned long __get_wchan(struct task_struct *task)
|
unsigned long __get_wchan(struct task_struct *task)
|
||||||
{
|
{
|
||||||
return 0;
|
unsigned long pc;
|
||||||
|
struct unwind_state state;
|
||||||
|
|
||||||
|
if (!try_get_task_stack(task))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
unwind_start(&state, task, NULL);
|
||||||
|
state.sp = thread_saved_fp(task);
|
||||||
|
get_stack_info(state.sp, state.task, &state.stack_info);
|
||||||
|
state.pc = thread_saved_ra(task);
|
||||||
|
#ifdef CONFIG_UNWINDER_PROLOGUE
|
||||||
|
state.type = UNWINDER_PROLOGUE;
|
||||||
|
#endif
|
||||||
|
for (; !unwind_done(&state); unwind_next_frame(&state)) {
|
||||||
|
pc = unwind_get_return_address(&state);
|
||||||
|
if (!pc)
|
||||||
|
break;
|
||||||
|
if (in_sched_functions(pc))
|
||||||
|
continue;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
put_task_stack(task);
|
||||||
|
|
||||||
|
return pc;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool in_irq_stack(unsigned long stack, struct stack_info *info)
|
bool in_irq_stack(unsigned long stack, struct stack_info *info)
|
||||||
|
|
37
arch/loongarch/kernel/stacktrace.c
Normal file
37
arch/loongarch/kernel/stacktrace.c
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Stack trace management functions
|
||||||
|
*
|
||||||
|
* Copyright (C) 2022 Loongson Technology Corporation Limited
|
||||||
|
*/
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/stacktrace.h>
|
||||||
|
|
||||||
|
#include <asm/stacktrace.h>
|
||||||
|
#include <asm/unwind.h>
|
||||||
|
|
||||||
|
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
|
struct task_struct *task, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long addr;
|
||||||
|
struct pt_regs dummyregs;
|
||||||
|
struct unwind_state state;
|
||||||
|
|
||||||
|
regs = &dummyregs;
|
||||||
|
|
||||||
|
if (task == current) {
|
||||||
|
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
|
||||||
|
regs->csr_era = (unsigned long)__builtin_return_address(0);
|
||||||
|
} else {
|
||||||
|
regs->regs[3] = thread_saved_fp(task);
|
||||||
|
regs->csr_era = thread_saved_ra(task);
|
||||||
|
}
|
||||||
|
|
||||||
|
regs->regs[1] = 0;
|
||||||
|
for (unwind_start(&state, task, regs);
|
||||||
|
!unwind_done(&state); unwind_next_frame(&state)) {
|
||||||
|
addr = unwind_get_return_address(&state);
|
||||||
|
if (!addr || !consume_entry(cookie, addr))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,6 +21,8 @@ SYM_FUNC_START(__switch_to)
|
||||||
|
|
||||||
cpu_save_nonscratch a0
|
cpu_save_nonscratch a0
|
||||||
stptr.d ra, a0, THREAD_REG01
|
stptr.d ra, a0, THREAD_REG01
|
||||||
|
stptr.d a3, a0, THREAD_SCHED_RA
|
||||||
|
stptr.d a4, a0, THREAD_SCHED_CFA
|
||||||
move tp, a2
|
move tp, a2
|
||||||
cpu_restore_nonscratch a1
|
cpu_restore_nonscratch a1
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue