1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
|
// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests relating directly to Control Flow Integrity.
*/
#include "lkdtm.h"
#include <asm/page.h>
static int called_count;
/* Function taking one argument, without a return value. */
static noinline void lkdtm_increment_void(int *counter)
{
(*counter)++;
}
/* Function taking one argument, returning int. */
static noinline int lkdtm_increment_int(int *counter)
{
(*counter)++;
return *counter;
}
/*
* This tries to call an indirect function with a mismatched prototype.
*/
static void lkdtm_CFI_FORWARD_PROTO(void)
{
/*
* Matches lkdtm_increment_void()'s prototype, but not
* lkdtm_increment_int()'s prototype.
*/
void (*func)(int *);
pr_info("Calling matched prototype ...\n");
func = lkdtm_increment_void;
func(&called_count);
pr_info("Calling mismatched prototype ...\n");
func = (void *)lkdtm_increment_int;
func(&called_count);
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
}
/*
* This can stay local to LKDTM, as there should not be a production reason
* to disable PAC && SCS.
*/
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
# ifdef CONFIG_ARM64_BTI_KERNEL
# define __no_pac "branch-protection=bti"
# else
# define __no_pac "branch-protection=none"
# endif
# define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
#else
# define __no_ret_protection __noscs
#endif
#define no_pac_addr(addr) \
((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
/* The ultimate ROP gadget. */
static noinline __no_ret_protection
void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
*ret_addr = (addr);
else
/* Check architecture, stack layout, or compiler behavior... */
pr_warn("Eek: return address mismatch! %px != %px\n",
*ret_addr, addr);
}
static noinline
void set_return_addr(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
*ret_addr = (addr);
else
/* Check architecture, stack layout, or compiler behavior... */
pr_warn("Eek: return address mismatch! %px != %px\n",
*ret_addr, addr);
}
static volatile int force_check;
static void lkdtm_CFI_BACKWARD(void)
{
/* Use calculated gotos to keep labels addressable. */
void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected};
pr_info("Attempting unchecked stack return address redirection ...\n");
/* Always false */
if (force_check) {
/*
* Prepare to call with NULLs to avoid parameters being treated as
* constants in -02.
*/
set_return_addr_unchecked(NULL, NULL);
set_return_addr(NULL, NULL);
if (force_check)
goto *labels[1];
if (force_check)
goto *labels[2];
if (force_check)
goto *labels[3];
if (force_check)
goto *labels[4];
return;
}
/*
* Use fallthrough switch case to keep basic block ordering between
* set_return_addr*() and the label after it.
*/
switch (force_check) {
case 0:
set_return_addr_unchecked(&&normal, &&redirected);
fallthrough;
case 1:
normal:
/* Always true */
if (!force_check) {
pr_err("FAIL: stack return address manipulation failed!\n");
/* If we can't redirect "normally", we can't test mitigations. */
return;
}
break;
default:
redirected:
pr_info("ok: redirected stack return address.\n");
break;
}
pr_info("Attempting checked stack return address redirection ...\n");
switch (force_check) {
case 0:
set_return_addr(&&check_normal, &&check_redirected);
fallthrough;
case 1:
check_normal:
/* Always true */
if (!force_check) {
pr_info("ok: control flow unchanged.\n");
return;
}
check_redirected:
pr_err("FAIL: stack return address was redirected!\n");
break;
}
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
return;
}
if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
pr_expected_config(CONFIG_SHADOW_CALL_STACK);
return;
}
pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
lkdtm_kernel_info,
"CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
}
static struct crashtype crashtypes[] = {
CRASHTYPE(CFI_FORWARD_PROTO),
CRASHTYPE(CFI_BACKWARD),
};
struct crashtype_category cfi_crashtypes = {
.crashtypes = crashtypes,
.len = ARRAY_SIZE(crashtypes),
};
|