summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-policy-smq.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2017-06-08 14:14:40 +0200
committerJohannes Berg <johannes.berg@intel.com>2017-06-08 14:14:45 +0200
commita43e61842ec55baa486d60eed2a19af67ba78b9f (patch)
tree6c3c93f19e3933273b73b9edd16edc146ab18527 /drivers/md/dm-cache-policy-smq.c
parent5d473fedd17ae3a9f92fb35551e307d01459ea6a (diff)
parent50dffe7fad6c156c2928e45c19ff7b86eb951f4c (diff)
downloadlinux-a43e61842ec55baa486d60eed2a19af67ba78b9f.tar.gz
linux-a43e61842ec55baa486d60eed2a19af67ba78b9f.tar.bz2
linux-a43e61842ec55baa486d60eed2a19af67ba78b9f.zip
Merge remote-tracking branch 'net-next/master' into mac80211-next
This brings in commit 7a7c0a6438b8 ("mac80211: fix TX aggregation start/stop callback race") to allow the follow-up cleanup. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/md/dm-cache-policy-smq.c')
-rw-r--r--drivers/md/dm-cache-policy-smq.c31
1 files changed, 12 insertions, 19 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 72479bd61e11..e5eb9c9b4bc8 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- unsigned nr_clean;
-
if (idle) {
/*
* We'd like to clean everything.
@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
return q_size(&mq->dirty) == 0u;
}
- nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
- return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
- percent_to_target(mq, CLEAN_TARGET);
+ /*
+ * If we're busy we don't worry about cleaning at all.
+ */
+ return true;
}
-static bool free_target_met(struct smq_policy *mq, bool idle)
+static bool free_target_met(struct smq_policy *mq)
{
unsigned nr_free;
- if (!idle)
- return true;
-
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET);
@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
return;
- e = q_peek(&mq->clean, mq->clean.nr_levels, true);
+ e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) {
- if (!clean_target_met(mq, false))
+ if (!clean_target_met(mq, true))
queue_writeback(mq);
return;
}
@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
* We always claim to be 'idle' to ensure some demotions happen
* with continuous loads.
*/
- if (!free_target_met(mq, true))
+ if (!free_target_met(mq))
queue_demotion(mq);
return;
}
@@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
spin_lock_irqsave(&mq->lock, flags);
r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) {
- /* find some writeback work to do */
- if (mq->migrations_allowed && !free_target_met(mq, idle))
- queue_demotion(mq);
-
- else if (!clean_target_met(mq, idle))
+ if (!clean_target_met(mq, idle)) {
queue_writeback(mq);
-
- r = btracker_issue(mq->bg_work, result);
+ r = btracker_issue(mq->bg_work, result);
+ }
}
spin_unlock_irqrestore(&mq->lock, flags);
@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
clear_pending(mq, e);
if (success) {
e->oblock = work->oblock;
+ e->level = NR_CACHE_LEVELS - 1;
push(mq, e);
// h, q, a
} else {