summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/progs/res_spin_lock.c
blob: 22c4fb8b9266da16c621243154d2f84129f16a60 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"

#define EDEADLK 35
#define ETIMEDOUT 110

struct arr_elem {
	struct bpf_res_spin_lock lock;
};

struct {
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__uint(max_entries, 64);
	__type(key, int);
	__type(value, struct arr_elem);
} arrmap SEC(".maps");

struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
struct bpf_res_spin_lock lockB __hidden SEC(".data.B");

SEC("tc")
int res_spin_lock_test(struct __sk_buff *ctx)
{
	struct arr_elem *elem1, *elem2;
	int r;

	elem1 = bpf_map_lookup_elem(&arrmap, &(int){0});
	if (!elem1)
		return -1;
	elem2 = bpf_map_lookup_elem(&arrmap, &(int){0});
	if (!elem2)
		return -1;

	r = bpf_res_spin_lock(&elem1->lock);
	if (r)
		return r;
	r = bpf_res_spin_lock(&elem2->lock);
	if (!r) {
		bpf_res_spin_unlock(&elem2->lock);
		bpf_res_spin_unlock(&elem1->lock);
		return -1;
	}
	bpf_res_spin_unlock(&elem1->lock);
	return r != -EDEADLK;
}

SEC("tc")
int res_spin_lock_test_AB(struct __sk_buff *ctx)
{
	int r;

	r = bpf_res_spin_lock(&lockA);
	if (r)
		return !r;
	/* Only unlock if we took the lock. */
	if (!bpf_res_spin_lock(&lockB))
		bpf_res_spin_unlock(&lockB);
	bpf_res_spin_unlock(&lockA);
	return 0;
}

int err;

SEC("tc")
int res_spin_lock_test_BA(struct __sk_buff *ctx)
{
	int r;

	r = bpf_res_spin_lock(&lockB);
	if (r)
		return !r;
	if (!bpf_res_spin_lock(&lockA))
		bpf_res_spin_unlock(&lockA);
	else
		err = -EDEADLK;
	bpf_res_spin_unlock(&lockB);
	return err ?: 0;
}

SEC("tc")
int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
{
	struct bpf_res_spin_lock *locks[48] = {};
	struct arr_elem *e;
	u64 time_beg, time;
	int ret = 0, i;

	_Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31,
		       "RES_NR_HELD assumed to be 31");

	for (i = 0; i < 34; i++) {
		int key = i;

		/* We cannot pass in i as it will get spilled/filled by the compiler and
		 * loses bounds in verifier state.
		 */
		e = bpf_map_lookup_elem(&arrmap, &key);
		if (!e)
			return 1;
		locks[i] = &e->lock;
	}

	for (; i < 48; i++) {
		int key = i - 2;

		/* We cannot pass in i as it will get spilled/filled by the compiler and
		 * loses bounds in verifier state.
		 */
		e = bpf_map_lookup_elem(&arrmap, &key);
		if (!e)
			return 1;
		locks[i] = &e->lock;
	}

	time_beg = bpf_ktime_get_ns();
	for (i = 0; i < 34; i++) {
		if (bpf_res_spin_lock(locks[i]))
			goto end;
	}

	/* Trigger AA, after exhausting entries in the held lock table. This
	 * time, only the timeout can save us, as AA detection won't succeed.
	 */
	ret = bpf_res_spin_lock(locks[34]);
	if (!ret) {
		bpf_res_spin_unlock(locks[34]);
		ret = 1;
		goto end;
	}

	ret = ret != -ETIMEDOUT ? 2 : 0;

end:
	for (i = i - 1; i >= 0; i--)
		bpf_res_spin_unlock(locks[i]);
	time = bpf_ktime_get_ns() - time_beg;
	/* Time spent should be easily above our limit (1/4 s), since AA
	 * detection won't be expedited due to lack of held lock entry.
	 */
	return ret ?: (time > 1000000000 / 4 ? 0 : 1);
}

char _license[] SEC("license") = "GPL";