diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-12-10 20:25:41 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-12-15 16:19:30 +0100 |
commit | 9e42ad10cedf0632fc39860381375806092212bd (patch) | |
tree | 869b44fb4351939487b8d5a47ec4057357ff19da | |
parent | 3e2380123fb96987ce958f623207010c667ffa7c (diff) | |
download | linux-stable-9e42ad10cedf0632fc39860381375806092212bd.tar.gz linux-stable-9e42ad10cedf0632fc39860381375806092212bd.tar.bz2 linux-stable-9e42ad10cedf0632fc39860381375806092212bd.zip |
genirq: Annotate irq stats data races
Both the per cpu stats and the accumulated count are accessed lockless and
can be concurrently modified. That's intentional and the stats are a rough
estimate anyway. Annotate them with data_race().
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20201210194043.067097663@linutronix.de
-rw-r--r-- | kernel/irq/irqdesc.c | 4 | ||||
-rw-r--r-- | kernel/irq/proc.c | 5 |
2 files changed, 5 insertions, 4 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 20a54fa7cd30..02b446a21ce6 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -943,10 +943,10 @@ unsigned int kstat_irqs(unsigned int irq) if (!irq_settings_is_per_cpu_devid(desc) && !irq_settings_is_per_cpu(desc) && !irq_is_nmi(desc)) - return desc->tot_count; + return data_race(desc->tot_count); for_each_possible_cpu(cpu) - sum += *per_cpu_ptr(desc->kstat_irqs, cpu); + sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); return sum; } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 72513ed2a5fc..98138788cb04 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -488,9 +488,10 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc || irq_settings_is_hidden(desc)) goto outsparse; - if (desc->kstat_irqs) + if (desc->kstat_irqs) { for_each_online_cpu(j) - any_count |= *per_cpu_ptr(desc->kstat_irqs, j); + any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); + } if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) goto outsparse; |