summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/qspinlock.h
blob: 9572a2ef974dac1823aa0c344f62ff99f654c840 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_QSPINLOCK_H
#define _ASM_POWERPC_QSPINLOCK_H

#include <linux/compiler.h>
#include <asm/qspinlock_types.h>
#include <asm/paravirt.h>

static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
	return READ_ONCE(lock->val);
}

static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
	return !lock.val;
}

static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
{
	return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK);
}

static __always_inline u32 queued_spin_encode_locked_val(void)
{
	/* XXX: make this use lock value in paca like simple spinlocks? */
	return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
}

static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
	u32 new = queued_spin_encode_locked_val();
	u32 prev;

	asm volatile(
"1:	lwarx	%0,0,%1,%3	# queued_spin_trylock			\n"
"	cmpwi	0,%0,0							\n"
"	bne-	2f							\n"
"	stwcx.	%2,0,%1							\n"
"	bne-	1b							\n"
"\t"	PPC_ACQUIRE_BARRIER "						\n"
"2:									\n"
	: "=&r" (prev)
	: "r" (&lock->val), "r" (new),
	  "i" (IS_ENABLED(CONFIG_PPC64))
	: "cr0", "memory");

	return likely(prev == 0);
}

static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
{
	u32 new = queued_spin_encode_locked_val();
	u32 prev, tmp;

	/* Trylock may get ahead of queued nodes if it finds unlocked */
	asm volatile(
"1:	lwarx	%0,0,%2,%5	# __queued_spin_trylock_steal		\n"
"	andc.	%1,%0,%4						\n"
"	bne-	2f							\n"
"	and	%1,%0,%4						\n"
"	or	%1,%1,%3						\n"
"	stwcx.	%1,0,%2							\n"
"	bne-	1b							\n"
"\t"	PPC_ACQUIRE_BARRIER "						\n"
"2:									\n"
	: "=&r" (prev), "=&r" (tmp)
	: "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK),
	  "i" (IS_ENABLED(CONFIG_PPC64))
	: "cr0", "memory");

	return likely(!(prev & ~_Q_TAIL_CPU_MASK));
}

void queued_spin_lock_slowpath(struct qspinlock *lock);

static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
	if (!queued_spin_trylock(lock))
		queued_spin_lock_slowpath(lock);
}

static inline void queued_spin_unlock(struct qspinlock *lock)
{
	smp_store_release(&lock->locked, 0);
}

#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
#define arch_spin_lock(l)		queued_spin_lock(l)
#define arch_spin_trylock(l)		queued_spin_trylock(l)
#define arch_spin_unlock(l)		queued_spin_unlock(l)

#ifdef CONFIG_PARAVIRT_SPINLOCKS
void pv_spinlocks_init(void);
#else
static inline void pv_spinlocks_init(void) { }
#endif

#endif /* _ASM_POWERPC_QSPINLOCK_H */