summaryrefslogtreecommitdiffstats
path: root/net/rfkill
diff options
context:
space:
mode:
authorShaibal Dutta <shaibal.dutta@broadcom.com>2014-01-30 14:43:34 -0800
committerJohannes Berg <johannes.berg@intel.com>2014-02-04 21:58:16 +0100
commit67235cbca44f082e9c4c2ed370f9afe5fc478d49 (patch)
treedfbb6acb68b824afdab6d106da35b5b7cddd024b /net/rfkill
parent845f3351b15a4cd8c6e47255c0dbfac03c6aceda (diff)
downloadlinux-67235cbca44f082e9c4c2ed370f9afe5fc478d49.tar.gz
linux-67235cbca44f082e9c4c2ed370f9afe5fc478d49.tar.bz2
linux-67235cbca44f082e9c4c2ed370f9afe5fc478d49.zip
net: rfkill: move poll work to power efficient workqueue
This patch moves the rfkill poll_work to the power efficient workqueue. This work does not have to be bound to the CPU that scheduled it, hence the selection of CPU that executes it would be left to the scheduler. Net result is that CPU idle times would be extended, resulting in power savings. This behaviour is enabled when CONFIG_WQ_POWER_EFFICIENT is selected. Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Shaibal Dutta <shaibal.dutta@broadcom.com> [zoran.markovic@linaro.org: Rebased to latest kernel, added commit message. Fixed workqueue selection after suspend/resume cycle.] Signed-off-by: Zoran Markovic <zoran.markovic@linaro.org> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/rfkill')
-rw-r--r--net/rfkill/core.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ed7e0b4e7f90..b3b16c070a7f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -789,7 +789,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
if (!rfkill->ops->poll)
return;
- schedule_work(&rfkill->poll_work.work);
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work, 0);
}
EXPORT_SYMBOL(rfkill_resume_polling);
@@ -894,7 +895,8 @@ static void rfkill_poll(struct work_struct *work)
*/
rfkill->ops->poll(rfkill, rfkill->data);
- schedule_delayed_work(&rfkill->poll_work,
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
}
@@ -958,7 +960,8 @@ int __must_check rfkill_register(struct rfkill *rfkill)
INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
if (rfkill->ops->poll)
- schedule_delayed_work(&rfkill->poll_work,
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {