blob: 89cba2622b79bab10c03737b414a522de6a29292 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#ifndef __ASM_SIMD_H
#define __ASM_SIMD_H
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/types.h>
DECLARE_PER_CPU(bool, fpsimd_context_busy);
#ifdef CONFIG_KERNEL_MODE_NEON
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*
* Callers must not assume that the result remains true beyond the next
* preempt_enable() or return from softirq context.
*/
static __must_check inline bool may_use_simd(void)
{
/*
* We must make sure that the SVE has been initialized properly
* before using the SIMD in kernel.
* fpsimd_context_busy is only set while preemption is disabled,
* and is clear whenever preemption is enabled. Since
* this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
* cannot change under our feet -- if it's set we cannot be
* migrated, and if it's clear we cannot be migrated to a CPU
* where it is set.
*/
return !WARN_ON(!system_capabilities_finalized()) &&
system_supports_fpsimd() &&
!in_irq() && !irqs_disabled() && !in_nmi() &&
!this_cpu_read(fpsimd_context_busy);
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
static __must_check inline bool may_use_simd(void) {
return false;
}
#endif /* ! CONFIG_KERNEL_MODE_NEON */
#endif
|