mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-23 16:53:58 -05:00
3f1e931d15
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates openrisc to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Stafford Horne <shorne@gmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-26-mark.rutland@arm.com
136 lines
3.5 KiB
C
136 lines
3.5 KiB
C
/*
|
|
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
|
|
*
|
|
* This file is licensed under the terms of the GNU General Public License
|
|
* version 2. This program is licensed "as is" without any warranty of any
|
|
* kind, whether express or implied.
|
|
*/
|
|
|
|
#ifndef __ASM_OPENRISC_ATOMIC_H
|
|
#define __ASM_OPENRISC_ATOMIC_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
/* Atomically perform op with v->counter and i */
|
|
#define ATOMIC_OP(op) \
|
|
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: l.lwa %0,0(%1) \n" \
|
|
" l." #op " %0,%0,%2 \n" \
|
|
" l.swa 0(%1),%0 \n" \
|
|
" l.bnf 1b \n" \
|
|
" l.nop \n" \
|
|
: "=&r"(tmp) \
|
|
: "r"(&v->counter), "r"(i) \
|
|
: "cc", "memory"); \
|
|
}
|
|
|
|
/* Atomically perform op with v->counter and i, return the result */
|
|
#define ATOMIC_OP_RETURN(op) \
|
|
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: l.lwa %0,0(%1) \n" \
|
|
" l." #op " %0,%0,%2 \n" \
|
|
" l.swa 0(%1),%0 \n" \
|
|
" l.bnf 1b \n" \
|
|
" l.nop \n" \
|
|
: "=&r"(tmp) \
|
|
: "r"(&v->counter), "r"(i) \
|
|
: "cc", "memory"); \
|
|
\
|
|
return tmp; \
|
|
}
|
|
|
|
/* Atomically perform op with v->counter and i, return orig v->counter */
|
|
#define ATOMIC_FETCH_OP(op) \
|
|
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp, old; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: l.lwa %0,0(%2) \n" \
|
|
" l." #op " %1,%0,%3 \n" \
|
|
" l.swa 0(%2),%1 \n" \
|
|
" l.bnf 1b \n" \
|
|
" l.nop \n" \
|
|
: "=&r"(old), "=&r"(tmp) \
|
|
: "r"(&v->counter), "r"(i) \
|
|
: "cc", "memory"); \
|
|
\
|
|
return old; \
|
|
}
|
|
|
|
ATOMIC_OP_RETURN(add)
|
|
ATOMIC_OP_RETURN(sub)
|
|
|
|
ATOMIC_FETCH_OP(add)
|
|
ATOMIC_FETCH_OP(sub)
|
|
ATOMIC_FETCH_OP(and)
|
|
ATOMIC_FETCH_OP(or)
|
|
ATOMIC_FETCH_OP(xor)
|
|
|
|
ATOMIC_OP(add)
|
|
ATOMIC_OP(sub)
|
|
ATOMIC_OP(and)
|
|
ATOMIC_OP(or)
|
|
ATOMIC_OP(xor)
|
|
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define arch_atomic_add_return arch_atomic_add_return
|
|
#define arch_atomic_sub_return arch_atomic_sub_return
|
|
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
|
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
|
|
#define arch_atomic_fetch_and arch_atomic_fetch_and
|
|
#define arch_atomic_fetch_or arch_atomic_fetch_or
|
|
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
|
|
#define arch_atomic_add arch_atomic_add
|
|
#define arch_atomic_sub arch_atomic_sub
|
|
#define arch_atomic_and arch_atomic_and
|
|
#define arch_atomic_or arch_atomic_or
|
|
#define arch_atomic_xor arch_atomic_xor
|
|
|
|
/*
|
|
* Atomically add a to v->counter as long as v is not already u.
|
|
* Returns the original value at v->counter.
|
|
*
|
|
* This is often used through atomic_inc_not_zero()
|
|
*/
|
|
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int old, tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"1: l.lwa %0, 0(%2) \n"
|
|
" l.sfeq %0, %4 \n"
|
|
" l.bf 2f \n"
|
|
" l.add %1, %0, %3 \n"
|
|
" l.swa 0(%2), %1 \n"
|
|
" l.bnf 1b \n"
|
|
" l.nop \n"
|
|
"2: \n"
|
|
: "=&r"(old), "=&r" (tmp)
|
|
: "r"(&v->counter), "r"(a), "r"(u)
|
|
: "cc", "memory");
|
|
|
|
return old;
|
|
}
|
|
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
|
|
|
|
#define arch_atomic_read(v) READ_ONCE((v)->counter)
|
|
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
|
|
|
|
#include <asm/cmpxchg.h>
|
|
|
|
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
|
|
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
|
|
|
|
#endif /* __ASM_OPENRISC_ATOMIC_H */
|