mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-23 08:35:19 -05:00
kcsan: Avoid checking scoped accesses from nested contexts
Avoid checking scoped accesses from nested contexts (such as nested interrupts or in scheduler code) which share the same kcsan_ctx. This is to avoid detecting false positive races of accesses in the same thread with currently scoped accesses: consider setting up a watchpoint for a non-scoped (normal) access that also "conflicts" with a current scoped access. In a nested interrupt (or in the scheduler), which shares the same kcsan_ctx, we cannot check scoped accesses set up in the parent context -- simply ignore them in this case. With the introduction of kcsan_ctx::disable_scoped, we can also clean up kcsan_check_scoped_accesses()'s recursion guard, and do not need to modify the list's prev pointer. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
71f8de7092
commit
9756f64c8f
2 changed files with 16 additions and 3 deletions
|
@ -21,6 +21,7 @@
|
|||
*/
|
||||
struct kcsan_ctx {
|
||||
int disable_count; /* disable counter */
|
||||
int disable_scoped; /* disable scoped access counter */
|
||||
int atomic_next; /* number of following atomic ops */
|
||||
|
||||
/*
|
||||
|
|
|
@ -204,15 +204,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
|
|||
static noinline void kcsan_check_scoped_accesses(void)
|
||||
{
|
||||
struct kcsan_ctx *ctx = get_ctx();
|
||||
struct list_head *prev_save = ctx->scoped_accesses.prev;
|
||||
struct kcsan_scoped_access *scoped_access;
|
||||
|
||||
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
|
||||
if (ctx->disable_scoped)
|
||||
return;
|
||||
|
||||
ctx->disable_scoped++;
|
||||
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
|
||||
check_access(scoped_access->ptr, scoped_access->size,
|
||||
scoped_access->type, scoped_access->ip);
|
||||
}
|
||||
ctx->scoped_accesses.prev = prev_save;
|
||||
ctx->disable_scoped--;
|
||||
}
|
||||
|
||||
/* Rules for generic atomic accesses. Called from fast-path. */
|
||||
|
@ -465,6 +467,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid races of scoped accesses from nested interrupts (or scheduler).
|
||||
* Assume setting up a watchpoint for a non-scoped (normal) access that
|
||||
* also conflicts with a current scoped access. In a nested interrupt,
|
||||
* which shares the context, it would check a conflicting scoped access.
|
||||
* To avoid, disable scoped access checking.
|
||||
*/
|
||||
ctx->disable_scoped++;
|
||||
|
||||
/*
|
||||
* Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
|
||||
* runtime is entered for every memory access, and potentially useful
|
||||
|
@ -578,6 +589,7 @@ out_unlock:
|
|||
if (!kcsan_interrupt_watcher)
|
||||
local_irq_restore(irq_flags);
|
||||
kcsan_restore_irqtrace(current);
|
||||
ctx->disable_scoped--;
|
||||
out:
|
||||
user_access_restore(ua_flags);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue