summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin KaFai Lau <martin.lau@kernel.org>2022-10-25 11:45:19 -0700
committerAlexei Starovoitov <ast@kernel.org>2022-10-25 23:11:46 -0700
commite8b02296a6b8d07de752d6157d863a642117bcd3 (patch)
tree4c91db052a526edd1deba8b1c3509bc117345dec
parent6d65500c34d897329ed1be0fd3c4014ec52cd473 (diff)
downloadlinux-stable-e8b02296a6b8d07de752d6157d863a642117bcd3.tar.gz
linux-stable-e8b02296a6b8d07de752d6157d863a642117bcd3.tar.bz2
linux-stable-e8b02296a6b8d07de752d6157d863a642117bcd3.zip
bpf: Avoid taking spinlock in bpf_task_storage_get if potential deadlock is detected
bpf_task_storage_get() does a lookup and optionally inserts new data if BPF_LOCAL_STORAGE_GET_F_CREATE is present. During lookup, it will cache the lookup result and caching requires to acquire a spinlock. When potential deadlock is detected (by the bpf_task_storage_busy pcpu-counter added in commit bc235cdb423a ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")), the current behavior is returning NULL immediately to avoid deadlock. It is too pessimistic. This patch will go ahead to do a lookup (which is a lockless operation) but it will avoid caching it in order to avoid acquiring the spinlock. When lookup fails to find the data and BPF_LOCAL_STORAGE_GET_F_CREATE is set, an insertion is needed and this requires acquiring a spinlock. This patch will still return NULL when a potential deadlock is detected. Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://lore.kernel.org/r/20221025184524.3526117-5-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--kernel/bpf/bpf_local_storage.c1
-rw-r--r--kernel/bpf/bpf_task_storage.c15
2 files changed, 9 insertions, 7 deletions
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 9dc6de1cf185..781d14167140 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -242,6 +242,7 @@ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
__bpf_selem_unlink_storage(selem, use_trace_rcu);
}
+/* If cacheit_lockit is false, this lookup function is lockless */
struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index 2726435e3eda..bc52bc8b59f7 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -230,17 +230,17 @@ out:
/* Called by bpf_task_storage_get*() helpers */
static void *__bpf_task_storage_get(struct bpf_map *map,
struct task_struct *task, void *value,
- u64 flags, gfp_t gfp_flags)
+ u64 flags, gfp_t gfp_flags, bool nobusy)
{
struct bpf_local_storage_data *sdata;
- sdata = task_storage_lookup(task, map, true);
+ sdata = task_storage_lookup(task, map, nobusy);
if (sdata)
return sdata->data;
/* only allocate new storage, when the task is refcounted */
if (refcount_read(&task->usage) &&
- (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) {
+ (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
sdata = bpf_local_storage_update(
task, (struct bpf_local_storage_map *)map, value,
BPF_NOEXIST, gfp_flags);
@@ -254,17 +254,18 @@ static void *__bpf_task_storage_get(struct bpf_map *map,
BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
task, void *, value, u64, flags, gfp_t, gfp_flags)
{
+ bool nobusy;
void *data;
WARN_ON_ONCE(!bpf_rcu_lock_held());
if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
return (unsigned long)NULL;
- if (!bpf_task_storage_trylock())
- return (unsigned long)NULL;
+ nobusy = bpf_task_storage_trylock();
data = __bpf_task_storage_get(map, task, value, flags,
- gfp_flags);
- bpf_task_storage_unlock();
+ gfp_flags, nobusy);
+ if (nobusy)
+ bpf_task_storage_unlock();
return (unsigned long)data;
}