summaryrefslogtreecommitdiffstats
path: root/kernel/scs.c
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-05-15 14:43:11 +0100
committerWill Deacon <will@kernel.org>2020-05-18 17:47:33 +0100
commitbee348fab099b0f551caa874663e82a7f3bb64b3 (patch)
treedb1a964bc311bc58ad1f6ad54831825977ce80c8 /kernel/scs.c
parent51189c7a7ed1b4ed4493e27275d466ff60406d3a (diff)
downloadlinux-bee348fab099b0f551caa874663e82a7f3bb64b3.tar.gz
linux-bee348fab099b0f551caa874663e82a7f3bb64b3.tar.bz2
linux-bee348fab099b0f551caa874663e82a7f3bb64b3.zip
scs: Move accounting into alloc/free functions
There's no need to perform the shadow stack page accounting independently of the lifetime of the underlying allocation, so call the accounting code from the {alloc,free}() functions and simplify the code in the process. Tested-by: Sami Tolvanen <samitolvanen@google.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'kernel/scs.c')
-rw-r--r--kernel/scs.c45
1 files changed, 21 insertions, 24 deletions
diff --git a/kernel/scs.c b/kernel/scs.c
index 5ff8663e4a67..aea841cd7586 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -14,25 +14,35 @@
static struct kmem_cache *scs_cache;
+static void __scs_account(void *s, int account)
+{
+ struct page *scs_page = virt_to_page(s);
+
+ mod_zone_page_state(page_zone(scs_page), NR_KERNEL_SCS_KB,
+ account * (SCS_SIZE / SZ_1K));
+}
+
static void *scs_alloc(int node)
{
- void *s;
-
- s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
- if (s) {
- *__scs_magic(s) = SCS_END_MAGIC;
- /*
- * Poison the allocation to catch unintentional accesses to
- * the shadow stack when KASAN is enabled.
- */
- kasan_poison_object_data(scs_cache, s);
- }
+ void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
+
+ if (!s)
+ return NULL;
+ *__scs_magic(s) = SCS_END_MAGIC;
+
+ /*
+ * Poison the allocation to catch unintentional accesses to
+ * the shadow stack when KASAN is enabled.
+ */
+ kasan_poison_object_data(scs_cache, s);
+ __scs_account(s, 1);
return s;
}
static void scs_free(void *s)
{
+ __scs_account(s, -1);
kasan_unpoison_object_data(scs_cache, s);
kmem_cache_free(scs_cache, s);
}
@@ -42,17 +52,6 @@ void __init scs_init(void)
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
}
-static struct page *__scs_page(struct task_struct *tsk)
-{
- return virt_to_page(task_scs(tsk));
-}
-
-static void scs_account(struct task_struct *tsk, int account)
-{
- mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_KB,
- account * (SCS_SIZE / 1024));
-}
-
int scs_prepare(struct task_struct *tsk, int node)
{
void *s = scs_alloc(node);
@@ -61,7 +60,6 @@ int scs_prepare(struct task_struct *tsk, int node)
return -ENOMEM;
task_scs(tsk) = task_scs_sp(tsk) = s;
- scs_account(tsk, 1);
return 0;
}
@@ -102,6 +100,5 @@ void scs_release(struct task_struct *tsk)
WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n");
scs_check_usage(tsk);
- scs_account(tsk, -1);
scs_free(s);
}