summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
blob: b6a6eb279e54d2a0a8efbd769a07189b5123422a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
// SPDX-License-Identifier: GPL-2.0

#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>

/* Permit pretty deep stack traces */
#define MAX_STACK_RAWTP 100
struct stack_trace_t {
	int pid;
	int kern_stack_size;
	int user_stack_size;
	int user_stack_buildid_size;
	__u64 kern_stack[MAX_STACK_RAWTP];
	__u64 user_stack[MAX_STACK_RAWTP];
	struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};

struct {
	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
	__uint(max_entries, 2);
	__uint(key_size, sizeof(int));
	__uint(value_size, sizeof(__u32));
} perfmap SEC(".maps");

struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__uint(max_entries, 1);
	__type(key, __u32);
	__type(value, struct stack_trace_t);
} stackdata_map SEC(".maps");

/* Allocate per-cpu space twice the needed. For the code below
 *   usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
 *   if (usize < 0)
 *     return 0;
 *   ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
 *
 * If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
 * verifier will complain that access "raw_data + usize"
 * with size "max_len - usize" may be out of bound.
 * The maximum "raw_data + usize" is "raw_data + max_len"
 * and the maximum "max_len - usize" is "max_len", verifier
 * concludes that the maximum buffer access range is
 * "raw_data[0...max_len * 2 - 1]" and hence reject the program.
 *
 * Doubling the to-be-used max buffer size can fix this verifier
 * issue and avoid complicated C programming massaging.
 * This is an acceptable workaround since there is one entry here.
 */
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__uint(max_entries, 1);
	__type(key, __u32);
	__type(value, __u64[2 * MAX_STACK_RAWTP]);
} rawdata_map SEC(".maps");

SEC("raw_tracepoint/sys_enter")
int bpf_prog1(void *ctx)
{
	int max_len, max_buildid_len, total_size;
	struct stack_trace_t *data;
	long usize, ksize;
	void *raw_data;
	__u32 key = 0;

	data = bpf_map_lookup_elem(&stackdata_map, &key);
	if (!data)
		return 0;

	max_len = MAX_STACK_RAWTP * sizeof(__u64);
	max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
	data->pid = bpf_get_current_pid_tgid();
	data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
					      max_len, 0);
	data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
					    BPF_F_USER_STACK);
	data->user_stack_buildid_size = bpf_get_stack(
		ctx, data->user_stack_buildid, max_buildid_len,
		BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
	bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));

	/* write both kernel and user stacks to the same buffer */
	raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
	if (!raw_data)
		return 0;

	usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
	if (usize < 0)
		return 0;

	ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
	if (ksize < 0)
		return 0;

	total_size = usize + ksize;
	if (total_size > 0 && total_size <= max_len)
		bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);

	return 0;
}

char _license[] SEC("license") = "GPL";