1
0
Fork 0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-01-22 07:53:11 -05:00
linux/lib/test_fprobe.c
Masami Hiramatsu (Google) 4346ba1604 fprobe: Rewrite fprobe on function-graph tracer
Rewrite fprobe implementation on function-graph tracer.
Major API changes are:
 -  'nr_maxactive' field is deprecated.
 -  This depends on CONFIG_DYNAMIC_FTRACE_WITH_ARGS or
    !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS, and
    CONFIG_HAVE_FUNCTION_GRAPH_FREGS. So currently works only
    on x86_64.
 -  Currently the entry size is limited in 15 * sizeof(long).
 -  If there is too many fprobe exit handler set on the same
    function, it will fail to probe.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Acked-by: Heiko Carstens <hca@linux.ibm.com> # s390
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Naveen N Rao <naveen@kernel.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/173519003970.391279.14406792285453830996.stgit@devnote2
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
2024-12-26 10:50:05 -05:00

230 lines
5.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* test_fprobe.c - simple sanity test for fprobe
*/
#include <linux/kernel.h>
#include <linux/fprobe.h>
#include <linux/random.h>
#include <kunit/test.h>
#define div_factor 3
static struct kunit *current_test;
static u32 rand1, entry_val, exit_val;
/* Use indirect calls to avoid inlining the target functions */
static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
static unsigned long target_ip;
static unsigned long target2_ip;
static int entry_return_value;
static noinline u32 fprobe_selftest_target(u32 value)
{
return (value / div_factor);
}
static noinline u32 fprobe_selftest_target2(u32 value)
{
return (value / div_factor) + 1;
}
static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct ftrace_regs *fregs, void *data)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
/* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
if (ip != target_ip)
KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
entry_val = (rand1 / div_factor);
if (fp->entry_data_size) {
KUNIT_EXPECT_NOT_NULL(current_test, data);
if (data)
*(u32 *)data = entry_val;
} else
KUNIT_EXPECT_NULL(current_test, data);
return entry_return_value;
}
static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct ftrace_regs *fregs, void *data)
{
unsigned long ret = ftrace_regs_get_return_value(fregs);
KUNIT_EXPECT_FALSE(current_test, preemptible());
if (ip != target_ip) {
KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
} else
KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor));
KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor));
exit_val = entry_val + div_factor;
if (fp->entry_data_size) {
KUNIT_EXPECT_NOT_NULL(current_test, data);
if (data)
KUNIT_EXPECT_EQ(current_test, *(u32 *)data, entry_val);
} else
KUNIT_EXPECT_NULL(current_test, data);
}
/* Test entry only (no rethook) */
static void test_fprobe_entry(struct kunit *test)
{
struct fprobe fp_entry = {
.entry_handler = fp_entry_handler,
};
current_test = test;
/* Before register, unregister should be failed. */
KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry));
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL));
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, 0, exit_val);
entry_val = 0;
exit_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, 0, exit_val);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry));
}
static void test_fprobe(struct kunit *test)
{
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL));
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
entry_val = 0;
exit_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
static void test_fprobe_syms(struct kunit *test)
{
static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"};
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
entry_val = 0;
exit_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
/* Test private entry_data */
static void test_fprobe_data(struct kunit *test)
{
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
.entry_data_size = sizeof(u32),
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
target(rand1);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
static void test_fprobe_skip(struct kunit *test)
{
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
entry_return_value = 1;
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, 0, exit_val);
KUNIT_EXPECT_EQ(test, 0, fp.nmissed);
entry_return_value = 0;
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
static unsigned long get_ftrace_location(void *func)
{
unsigned long size, addr = (unsigned long)func;
if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size)
return 0;
return ftrace_location_range(addr, addr + size - 1);
}
static int fprobe_test_init(struct kunit *test)
{
rand1 = get_random_u32_above(div_factor);
target = fprobe_selftest_target;
target2 = fprobe_selftest_target2;
target_ip = get_ftrace_location(target);
target2_ip = get_ftrace_location(target2);
return 0;
}
static struct kunit_case fprobe_testcases[] = {
KUNIT_CASE(test_fprobe_entry),
KUNIT_CASE(test_fprobe),
KUNIT_CASE(test_fprobe_syms),
KUNIT_CASE(test_fprobe_data),
KUNIT_CASE(test_fprobe_skip),
{}
};
static struct kunit_suite fprobe_test_suite = {
.name = "fprobe_test",
.init = fprobe_test_init,
.test_cases = fprobe_testcases,
};
kunit_test_suites(&fprobe_test_suite);