diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-06-15 11:29:31 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-11-24 16:47:49 +0100 |
commit | 545b8c8df41f9ecbaf806332d4095bc4bc7c14e8 (patch) | |
tree | 8ce051dfb3d847b5fa8cddf6aa2ad265e3416a36 /kernel/smp.c | |
parent | 7a9f50a05843fee8366bd3a65addbebaa7cf7f07 (diff) | |
download | linux-stable-545b8c8df41f9ecbaf806332d4095bc4bc7c14e8.tar.gz linux-stable-545b8c8df41f9ecbaf806332d4095bc4bc7c14e8.tar.bz2 linux-stable-545b8c8df41f9ecbaf806332d4095bc4bc7c14e8.zip |
smp: Cleanup smp_call_function*()
Get rid of the __call_single_node union and cleanup the API a little
to avoid external code relying on the structure layout as much.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 50 |
1 files changed, 25 insertions, 25 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 4d17501433be..faf1a3ace6a9 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -27,7 +27,7 @@ #include "smpboot.h" #include "sched/smp.h" -#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK) +#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) struct call_function_data { call_single_data_t __percpu *csd; @@ -146,7 +146,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t bool firsttime; u64 ts2, ts_delta; call_single_data_t *cpu_cur_csd; - unsigned int flags = READ_ONCE(csd->flags); + unsigned int flags = READ_ONCE(csd->node.u_flags); if (!(flags & CSD_FLAG_LOCK)) { if (!unlikely(*bug_id)) @@ -224,14 +224,14 @@ static void csd_lock_record(call_single_data_t *csd) static __always_inline void csd_lock_wait(call_single_data_t *csd) { - smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); + smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } #endif static __always_inline void csd_lock(call_single_data_t *csd) { csd_lock_wait(csd); - csd->flags |= CSD_FLAG_LOCK; + csd->node.u_flags |= CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment @@ -243,12 +243,12 @@ static __always_inline void csd_lock(call_single_data_t *csd) static __always_inline void csd_unlock(call_single_data_t *csd) { - WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); + WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ - smp_store_release(&csd->flags, 0); + smp_store_release(&csd->node.u_flags, 0); } static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); @@ -300,7 +300,7 @@ static int generic_exec_single(int cpu, call_single_data_t *csd) return -ENXIO; } - __smp_call_single_queue(cpu, &csd->llist); + __smp_call_single_queue(cpu, &csd->node.llist); return 0; } @@ -353,7 +353,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ - llist_for_each_entry(csd, entry, llist) { + llist_for_each_entry(csd, entry, node.llist) { switch (CSD_TYPE(csd)) { case CSD_TYPE_ASYNC: case CSD_TYPE_SYNC: @@ -378,16 +378,16 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) * First; run all SYNC callbacks, people are waiting for us. */ prev = NULL; - llist_for_each_entry_safe(csd, csd_next, entry, llist) { + llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { /* Do we wait until *after* callback? */ if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { smp_call_func_t func = csd->func; void *info = csd->info; if (prev) { - prev->next = &csd_next->llist; + prev->next = &csd_next->node.llist; } else { - entry = &csd_next->llist; + entry = &csd_next->node.llist; } csd_lock_record(csd); @@ -395,7 +395,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) csd_unlock(csd); csd_lock_record(NULL); } else { - prev = &csd->llist; + prev = &csd->node.llist; } } @@ -406,14 +406,14 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) * Second; run all !SYNC callbacks. */ prev = NULL; - llist_for_each_entry_safe(csd, csd_next, entry, llist) { + llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { int type = CSD_TYPE(csd); if (type != CSD_TYPE_TTWU) { if (prev) { - prev->next = &csd_next->llist; + prev->next = &csd_next->node.llist; } else { - entry = &csd_next->llist; + entry = &csd_next->node.llist; } if (type == CSD_TYPE_ASYNC) { @@ -429,7 +429,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) } } else { - prev = &csd->llist; + prev = &csd->node.llist; } } @@ -465,7 +465,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, { call_single_data_t *csd; call_single_data_t csd_stack = { - .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, + .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, }, }; int this_cpu; int err; @@ -502,8 +502,8 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, csd->func = func; csd->info = info; #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG - csd->src = smp_processor_id(); - csd->dst = cpu; + csd->node.src = smp_processor_id(); + csd->node.dst = cpu; #endif err = generic_exec_single(cpu, csd); @@ -544,12 +544,12 @@ int smp_call_function_single_async(int cpu, call_single_data_t *csd) preempt_disable(); - if (csd->flags & CSD_FLAG_LOCK) { + if (csd->node.u_flags & CSD_FLAG_LOCK) { err = -EBUSY; goto out; } - csd->flags = CSD_FLAG_LOCK; + csd->node.u_flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd); @@ -667,14 +667,14 @@ static void smp_call_function_many_cond(const struct cpumask *mask, csd_lock(csd); if (wait) - csd->flags |= CSD_TYPE_SYNC; + csd->node.u_flags |= CSD_TYPE_SYNC; csd->func = func; csd->info = info; #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG - csd->src = smp_processor_id(); - csd->dst = cpu; + csd->node.src = smp_processor_id(); + csd->node.dst = cpu; #endif - if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) + if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) __cpumask_set_cpu(cpu, cfd->cpumask_ipi); } |