summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-03-20 16:34:54 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 07:49:39 -0400
commitbe5f1797523004d0d9aaee81a523d86e3b890007 (patch)
tree79cabae66b7506020d1b220c29a0c1b642867030 /include
parentbd3488e7b4d61780eb3dfaca1cc6f4026bcffd48 (diff)
downloadlinux-be5f1797523004d0d9aaee81a523d86e3b890007.tar.gz
linux-be5f1797523004d0d9aaee81a523d86e3b890007.tar.bz2
linux-be5f1797523004d0d9aaee81a523d86e3b890007.zip
flex_proportions: Allow N events instead of 1
When batching events (such as writing back N pages in a single I/O), it is better to do one flex_proportion operation instead of N. There is only one caller of __fprop_inc_percpu_max(), and it's the one we're going to change in the next patch, so rename it instead of adding a compatibility wrapper. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'include')
-rw-r--r--include/linux/flex_proportions.h9
1 files changed, 5 insertions, 4 deletions
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index c12df59d3f5f..3e378b1fb0bc 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -83,9 +83,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -96,7 +97,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}