blob: 1d0b7d64e1ba1a317d1a08b4dd7ce482ac50a501 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <linux/compiler.h>
#include "../tests.h"
typedef struct _buf {
char data1;
char reserved[55];
char data2;
} buf __attribute__((aligned(64)));
/* volatile to try to avoid the compiler seeing reserved as unused. */
static volatile buf workload_datasym_buf1 = {
/* to have this in the data section */
.reserved[0] = 1,
};
static volatile sig_atomic_t done;
static void sighandler(int sig __maybe_unused)
{
done = 1;
}
static int datasym(int argc, const char **argv)
{
int sec = 1;
if (argc > 0)
sec = atoi(argv[0]);
signal(SIGINT, sighandler);
signal(SIGALRM, sighandler);
alarm(sec);
while (!done) {
workload_datasym_buf1.data1++;
if (workload_datasym_buf1.data1 == 123) {
/*
* Add some 'noise' in the loop to work around errata
* 1694299 on Arm N1.
*
* Bias exists in SPE sampling which can cause the load
* and store instructions to be skipped entirely. This
* comes and goes randomly depending on the offset the
* linker places the datasym loop at in the Perf binary.
* With an extra branch in the middle of the loop that
* isn't always taken, the instruction stream is no
* longer a continuous repeating pattern that interacts
* badly with the bias.
*/
workload_datasym_buf1.data1++;
}
workload_datasym_buf1.data2 += workload_datasym_buf1.data1;
}
return 0;
}
DEFINE_WORKLOAD(datasym);
|