summaryrefslogtreecommitdiffstats
path: root/net/rfkill
diff options
context:
space:
mode:
Diffstat (limited to 'net/rfkill')
-rw-r--r--net/rfkill/Kconfig4
-rw-r--r--net/rfkill/core.c330
2 files changed, 327 insertions, 7 deletions
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index b47f72fae05d..fd7600d8ab14 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@ config RFKILL_LEDS
default y
config RFKILL_INPUT
- bool
+ bool "RF switch input support"
depends on RFKILL
depends on INPUT = y || RFKILL = INPUT
- default y
+ default y if !EMBEDDED
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 30a6f8d819b2..2230aa6b14f3 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -28,6 +28,10 @@
#include <linux/mutex.h>
#include <linux/rfkill.h>
#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
#include "rfkill.h"
@@ -49,6 +53,8 @@ struct rfkill {
unsigned long state;
+ u32 idx;
+
bool registered;
bool suspended;
@@ -69,6 +75,18 @@ struct rfkill {
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
+struct rfkill_int_event {
+ struct list_head list;
+ struct rfkill_event ev;
+};
+
+struct rfkill_data {
+ struct list_head list;
+ struct list_head events;
+ struct mutex mtx;
+ wait_queue_head_t read_wait;
+ bool input_handler;
+};
MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
@@ -90,6 +108,7 @@ MODULE_LICENSE("GPL");
*/
static LIST_HEAD(rfkill_list); /* list of registered rf switches */
static DEFINE_MUTEX(rfkill_global_mutex);
+static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
static unsigned int rfkill_default_state = 1;
module_param_named(default_state, rfkill_default_state, uint, 0444);
@@ -171,12 +190,48 @@ static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
}
#endif /* CONFIG_RFKILL_LEDS */
-static void rfkill_uevent(struct rfkill *rfkill)
+static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
+ enum rfkill_operation op)
+{
+ unsigned long flags;
+
+ ev->idx = rfkill->idx;
+ ev->type = rfkill->type;
+ ev->op = op;
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
+ ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
+ RFKILL_BLOCK_SW_PREV));
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+
+static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
+{
+ struct rfkill_data *data;
+ struct rfkill_int_event *ev;
+
+ list_for_each_entry(data, &rfkill_fds, list) {
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ continue;
+ rfkill_fill_event(&ev->ev, rfkill, op);
+ mutex_lock(&data->mtx);
+ list_add_tail(&ev->list, &data->events);
+ mutex_unlock(&data->mtx);
+ wake_up_interruptible(&data->read_wait);
+ }
+}
+
+static void rfkill_event(struct rfkill *rfkill)
{
if (!rfkill->registered || rfkill->suspended)
return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
+
+ /* also send event to /dev/rfkill */
+ rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
}
static bool __rfkill_set_hw_state(struct rfkill *rfkill,
@@ -260,9 +315,12 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
- rfkill_uevent(rfkill);
+ rfkill_event(rfkill);
}
+#ifdef CONFIG_RFKILL_INPUT
+static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
+
/**
* __rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
@@ -299,6 +357,9 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
*/
void rfkill_switch_all(enum rfkill_type type, bool blocked)
{
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
mutex_lock(&rfkill_global_mutex);
if (!rfkill_epo_lock_active)
@@ -321,6 +382,9 @@ void rfkill_epo(void)
struct rfkill *rfkill;
int i;
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = true;
@@ -331,6 +395,7 @@ void rfkill_epo(void)
rfkill_global_states[i].def = rfkill_global_states[i].cur;
rfkill_global_states[i].cur = true;
}
+
mutex_unlock(&rfkill_global_mutex);
}
@@ -345,6 +410,9 @@ void rfkill_restore_states(void)
{
int i;
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
@@ -361,6 +429,9 @@ void rfkill_restore_states(void)
*/
void rfkill_remove_epo_lock(void)
{
+ if (atomic_read(&rfkill_input_disabled))
+ return;
+
mutex_lock(&rfkill_global_mutex);
rfkill_epo_lock_active = false;
mutex_unlock(&rfkill_global_mutex);
@@ -391,9 +462,12 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
{
return rfkill_global_states[type].cur;
}
+#endif
void rfkill_set_global_sw_state(const enum rfkill_type type, bool blocked)
{
+ BUG_ON(type == RFKILL_TYPE_ALL);
+
mutex_lock(&rfkill_global_mutex);
/* don't allow unblock when epo */
@@ -537,6 +611,15 @@ static ssize_t rfkill_type_show(struct device *dev,
return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
}
+static ssize_t rfkill_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", rfkill->idx);
+}
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -594,6 +677,7 @@ static ssize_t rfkill_claim_store(struct device *dev,
static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
+ __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR_NULL
@@ -708,7 +792,7 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
if (WARN_ON(!name))
return NULL;
- if (WARN_ON(type >= NUM_RFKILL_TYPES))
+ if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
return NULL;
rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
@@ -754,7 +838,9 @@ static void rfkill_uevent_work(struct work_struct *work)
rfkill = container_of(work, struct rfkill, uevent_work);
- rfkill_uevent(rfkill);
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_event(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
}
static void rfkill_sync_work(struct work_struct *work)
@@ -785,6 +871,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
goto unlock;
}
+ rfkill->idx = rfkill_no;
dev_set_name(dev, "rfkill%lu", rfkill_no);
rfkill_no++;
@@ -819,6 +906,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
schedule_work(&rfkill->sync_work);
+ rfkill_send_events(rfkill, RFKILL_OP_ADD);
mutex_unlock(&rfkill_global_mutex);
return 0;
@@ -848,6 +936,7 @@ void rfkill_unregister(struct rfkill *rfkill)
device_del(&rfkill->dev);
mutex_lock(&rfkill_global_mutex);
+ rfkill_send_events(rfkill, RFKILL_OP_DEL);
list_del_init(&rfkill->node);
mutex_unlock(&rfkill_global_mutex);
@@ -862,6 +951,227 @@ void rfkill_destroy(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_destroy);
+static int rfkill_fop_open(struct inode *inode, struct file *file)
+{
+ struct rfkill_data *data;
+ struct rfkill *rfkill;
+ struct rfkill_int_event *ev, *tmp;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->events);
+ mutex_init(&data->mtx);
+ init_waitqueue_head(&data->read_wait);
+
+ mutex_lock(&rfkill_global_mutex);
+ mutex_lock(&data->mtx);
+ /*
+ * start getting events from elsewhere but hold mtx to get
+ * startup events added first
+ */
+ list_add(&data->list, &rfkill_fds);
+
+ list_for_each_entry(rfkill, &rfkill_list, node) {
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ goto free;
+ rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+ list_add_tail(&ev->list, &data->events);
+ }
+ mutex_unlock(&data->mtx);
+ mutex_unlock(&rfkill_global_mutex);
+
+ file->private_data = data;
+
+ return nonseekable_open(inode, file);
+
+ free:
+ mutex_unlock(&data->mtx);
+ mutex_unlock(&rfkill_global_mutex);
+ mutex_destroy(&data->mtx);
+ list_for_each_entry_safe(ev, tmp, &data->events, list)
+ kfree(ev);
+ kfree(data);
+ return -ENOMEM;
+}
+
+static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+{
+ struct rfkill_data *data = file->private_data;
+ unsigned int res = POLLOUT | POLLWRNORM;
+
+ poll_wait(file, &data->read_wait, wait);
+
+ mutex_lock(&data->mtx);
+ if (!list_empty(&data->events))
+ res = POLLIN | POLLRDNORM;
+ mutex_unlock(&data->mtx);
+
+ return res;
+}
+
+static bool rfkill_readable(struct rfkill_data *data)
+{
+ bool r;
+
+ mutex_lock(&data->mtx);
+ r = !list_empty(&data->events);
+ mutex_unlock(&data->mtx);
+
+ return r;
+}
+
+static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct rfkill_data *data = file->private_data;
+ struct rfkill_int_event *ev;
+ unsigned long sz;
+ int ret;
+
+ mutex_lock(&data->mtx);
+
+ while (list_empty(&data->events)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ mutex_unlock(&data->mtx);
+ ret = wait_event_interruptible(data->read_wait,
+ rfkill_readable(data));
+ mutex_lock(&data->mtx);
+
+ if (ret)
+ goto out;
+ }
+
+ ev = list_first_entry(&data->events, struct rfkill_int_event,
+ list);
+
+ sz = min_t(unsigned long, sizeof(ev->ev), count);
+ ret = sz;
+ if (copy_to_user(buf, &ev->ev, sz))
+ ret = -EFAULT;
+
+ list_del(&ev->list);
+ kfree(ev);
+ out:
+ mutex_unlock(&data->mtx);
+ return ret;
+}
+
+static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct rfkill *rfkill;
+ struct rfkill_event ev;
+
+ /* we don't need the 'hard' variable but accept it */
+ if (count < sizeof(ev) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(&ev, buf, sizeof(ev) - 1))
+ return -EFAULT;
+
+ if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
+ return -EINVAL;
+
+ if (ev.type >= NUM_RFKILL_TYPES)
+ return -EINVAL;
+
+ mutex_lock(&rfkill_global_mutex);
+
+ if (ev.op == RFKILL_OP_CHANGE_ALL) {
+ if (ev.type == RFKILL_TYPE_ALL) {
+ enum rfkill_type i;
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_global_states[i].cur = ev.soft;
+ } else {
+ rfkill_global_states[ev.type].cur = ev.soft;
+ }
+ }
+
+ list_for_each_entry(rfkill, &rfkill_list, node) {
+ if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
+ continue;
+
+ if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
+ continue;
+
+ rfkill_set_block(rfkill, ev.soft);
+ }
+ mutex_unlock(&rfkill_global_mutex);
+
+ return count;
+}
+
+static int rfkill_fop_release(struct inode *inode, struct file *file)
+{
+ struct rfkill_data *data = file->private_data;
+ struct rfkill_int_event *ev, *tmp;
+
+ mutex_lock(&rfkill_global_mutex);
+ list_del(&data->list);
+ mutex_unlock(&rfkill_global_mutex);
+
+ mutex_destroy(&data->mtx);
+ list_for_each_entry_safe(ev, tmp, &data->events, list)
+ kfree(ev);
+
+#ifdef CONFIG_RFKILL_INPUT
+ if (data->input_handler)
+ atomic_dec(&rfkill_input_disabled);
+#endif
+
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_RFKILL_INPUT
+static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rfkill_data *data = file->private_data;
+
+ if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
+ return -ENOSYS;
+
+ if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
+ return -ENOSYS;
+
+ mutex_lock(&data->mtx);
+
+ if (!data->input_handler) {
+ atomic_inc(&rfkill_input_disabled);
+ data->input_handler = true;
+ }
+
+ mutex_unlock(&data->mtx);
+
+ return 0;
+}
+#endif
+
+static const struct file_operations rfkill_fops = {
+ .open = rfkill_fop_open,
+ .read = rfkill_fop_read,
+ .write = rfkill_fop_write,
+ .poll = rfkill_fop_poll,
+ .release = rfkill_fop_release,
+#ifdef CONFIG_RFKILL_INPUT
+ .unlocked_ioctl = rfkill_fop_ioctl,
+ .compat_ioctl = rfkill_fop_ioctl,
+#endif
+};
+
+static struct miscdevice rfkill_miscdev = {
+ .name = "rfkill",
+ .fops = &rfkill_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
static int __init rfkill_init(void)
{
@@ -875,10 +1185,19 @@ static int __init rfkill_init(void)
if (error)
goto out;
+ error = misc_register(&rfkill_miscdev);
+ if (error) {
+ class_unregister(&rfkill_class);
+ goto out;
+ }
+
#ifdef CONFIG_RFKILL_INPUT
error = rfkill_handler_init();
- if (error)
+ if (error) {
+ misc_deregister(&rfkill_miscdev);
class_unregister(&rfkill_class);
+ goto out;
+ }
#endif
out:
@@ -891,6 +1210,7 @@ static void __exit rfkill_exit(void)
#ifdef CONFIG_RFKILL_INPUT
rfkill_handler_exit();
#endif
+ misc_deregister(&rfkill_miscdev);
class_unregister(&rfkill_class);
}
module_exit(rfkill_exit);