diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2021-12-16 14:47:38 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2021-12-16 14:48:27 +0100 |
commit | 9758ff2fa240173e9a45613b07774b7a78b7653e (patch) | |
tree | 44c1951f9fe4ded7f18d26ca677d006c0e191569 /include/linux/bpf.h | |
parent | 0b665d4af35837f0a0ae63135b84a3c187c1db3b (diff) | |
parent | 244a36e50da05c33b860d20638ee4628017a5334 (diff) | |
download | linux-9758ff2fa240173e9a45613b07774b7a78b7653e.tar.gz linux-9758ff2fa240173e9a45613b07774b7a78b7653e.tar.bz2 linux-9758ff2fa240173e9a45613b07774b7a78b7653e.zip |
Merge drm/drm-next into drm-misc-next
Backmerging for v5.16-rc5. Resolves a conflict between drm-misc-next
and drm-misc-fixes in the vc4 driver.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r-- | include/linux/bpf.h | 20 |
1 files changed, 5 insertions, 15 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f715e8863f4d..755f38e893be 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -193,7 +193,7 @@ struct bpf_map { atomic64_t usercnt; struct work_struct work; struct mutex freeze_mutex; - u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ + atomic64_t writecnt; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); +int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); #define BPF_DISPATCHER_INIT(_name) { \ .mutex = __MUTEX_INITIALIZER(_name.mutex), \ .func = &_name##_func, \ @@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex; * kprobes, tracepoints) to prevent deadlocks on map operations as any of * these events can happen inside a region which holds a map bucket lock * and can deadlock on it. - * - * Use the preemption safe inc/dec variants on RT because migrate disable - * is preemptible on RT and preemption in the middle of the RMW operation - * might lead to inconsistent state. Use the raw variants for non RT - * kernels as migrate_disable() maps to preempt_disable() so the slightly - * more expensive save operation can be avoided. */ static inline void bpf_disable_instrumentation(void) { migrate_disable(); - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_inc(bpf_prog_active); - else - __this_cpu_inc(bpf_prog_active); + this_cpu_inc(bpf_prog_active); } static inline void bpf_enable_instrumentation(void) { - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_dec(bpf_prog_active); - else - __this_cpu_dec(bpf_prog_active); + this_cpu_dec(bpf_prog_active); migrate_enable(); } @@ -1419,6 +1408,7 @@ void bpf_map_put(struct bpf_map *map); void *bpf_map_area_alloc(u64 size, int numa_node); void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void bpf_map_area_free(void *base); +bool bpf_map_write_active(const struct bpf_map *map); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, |