summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/Kconfig16
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c53
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/core.c11
-rw-r--r--net/bluetooth/hci_conn.c78
-rw-r--r--net/bluetooth/hci_core.c345
-rw-r--r--net/bluetooth/hci_event.c622
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/bluetooth/hci_sysfs.c58
-rw-r--r--net/bluetooth/hidp/core.c11
-rw-r--r--net/bluetooth/l2cap_core.c (renamed from net/bluetooth/l2cap.c)1508
-rw-r--r--net/bluetooth/l2cap_sock.c1156
-rw-r--r--net/bluetooth/mgmt.c1250
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c17
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/cfg.c1
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/ibss.c11
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/main.c33
-rw-r--r--net/mac80211/mlme.c8
-rw-r--r--net/mac80211/rx.c12
-rw-r--r--net/mac80211/scan.c15
-rw-r--r--net/mac80211/tx.c1
-rw-r--r--net/mac80211/util.c6
-rw-r--r--net/mac80211/work.c59
31 files changed, 3849 insertions, 1452 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..c6f9c2fb4891 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,12 +27,12 @@ menuconfig BT
compile it as module (bluetooth).
To use Linux Bluetooth subsystem, you will need several user-space
- utilities like hciconfig and hcid. These utilities and updates to
- Bluetooth kernel modules are provided in the BlueZ packages.
- For more information, see <http://www.bluez.org/>.
+ utilities like hciconfig and bluetoothd. These utilities and updates
+ to Bluetooth kernel modules are provided in the BlueZ packages. For
+ more information, see <http://www.bluez.org/>.
config BT_L2CAP
- tristate "L2CAP protocol support"
+ bool "L2CAP protocol support"
depends on BT
select CRC16
help
@@ -40,19 +40,13 @@ config BT_L2CAP
connection oriented and connection-less data transport. L2CAP
support is required for most Bluetooth applications.
- Say Y here to compile L2CAP support into the kernel or say M to
- compile it as module (l2cap).
-
config BT_SCO
- tristate "SCO links support"
+ bool "SCO links support"
depends on BT
help
SCO link provides voice transport over Bluetooth. SCO support is
required for voice applications like Headset and Audio.
- Say Y here to compile SCO support into the kernel or say M to
- compile it as module (sco).
-
source "net/bluetooth/rfcomm/Kconfig"
source "net/bluetooth/bnep/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 250f954f0213..f04fe9a9d634 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,11 @@
#
obj-$(CONFIG_BT) += bluetooth.o
-obj-$(CONFIG_BT_L2CAP) += l2cap.o
-obj-$(CONFIG_BT_SCO) += sco.o
obj-$(CONFIG_BT_RFCOMM) += rfcomm/
obj-$(CONFIG_BT_BNEP) += bnep/
obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
+bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index c4cf3f595004..88af9eb9aa48 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,7 +40,7 @@
#include <net/bluetooth/bluetooth.h>
-#define VERSION "2.15"
+#define VERSION "2.16"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
BT_DBG("parent %p", parent);
+ local_bh_disable();
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
- lock_sock(sk);
+ bh_lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
- release_sock(sk);
+ bh_unlock_sock(sk);
bt_accept_unlink(sk);
continue;
}
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
- release_sock(sk);
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
return sk;
}
- release_sock(sk);
+ bh_unlock_sock(sk);
}
+ local_bh_enable();
+
return NULL;
}
EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (copied >= target)
break;
- if ((err = sock_error(sk)) != 0)
+ err = sock_error(sk);
+ if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
-unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
@@ -538,13 +545,41 @@ static int __init bt_init(void)
BT_INFO("HCI device and connection manager initialized");
- hci_sock_init();
+ err = hci_sock_init();
+ if (err < 0)
+ goto error;
+
+ err = l2cap_init();
+ if (err < 0) {
+ hci_sock_cleanup();
+ goto sock_err;
+ }
+
+ err = sco_init();
+ if (err < 0) {
+ l2cap_exit();
+ goto sock_err;
+ }
return 0;
+
+sock_err:
+ hci_sock_cleanup();
+
+error:
+ sock_unregister(PF_BLUETOOTH);
+ bt_sysfs_cleanup();
+
+ return err;
}
static void __exit bt_exit(void)
{
+
+ sco_exit();
+
+ l2cap_exit();
+
hci_sock_cleanup();
sock_unregister(PF_BLUETOOTH);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5868597534e5..03d4d1245d58 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
{
char flt[50] = "";
- l2cap_load();
-
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
strcat(flt, "protocol ");
#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b1..d935da71ab3b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
sockfd_put(nsock);
return -EBADFD;
}
+ ca.device[sizeof(ca.device)-1] = 0;
err = bnep_add_connection(&ca, nsock);
if (!err) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74aec..67cff810c77d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
- if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) {
+ skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for interoperability packet");
return;
}
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 8e5f292529ac..964ea9126f9f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
size = (skb) ? skb->len + count : count;
- if (!(nskb = alloc_skb(size, GFP_ATOMIC))) {
+ nskb = alloc_skb(size, GFP_ATOMIC);
+ if (!nskb) {
BT_ERR("Can't allocate memory for CAPI message");
return;
}
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
BT_DBG("session %p", session);
- if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
+ nskb = alloc_skb(session->mtu, GFP_ATOMIC);
+ if (!nskb) {
BT_ERR("Can't allocate memory for new frame");
return;
}
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
while ((skb = skb_dequeue(&session->transmit))) {
struct cmtp_scb *scb = (void *) skb->cb;
- if ((tail = (session->mtu - nskb->len)) < 5) {
+ tail = session->mtu - nskb->len;
+ if (tail < 5) {
cmtp_send_frame(session, nskb->data, nskb->len);
skb_trim(nskb, 0);
tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
static int __init cmtp_init(void)
{
- l2cap_load();
-
BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
cmtp_init_sockets();
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 99cd8d9d891b..a050a6984901 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -45,6 +45,33 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+static void hci_le_connect(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_cp_le_create_conn cp;
+
+ conn->state = BT_CONNECT;
+ conn->out = 1;
+ conn->link_mode |= HCI_LM_MASTER;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.scan_interval = cpu_to_le16(0x0004);
+ cp.scan_window = cpu_to_le16(0x0004);
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.conn_interval_min = cpu_to_le16(0x0008);
+ cp.conn_interval_max = cpu_to_le16(0x0100);
+ cp.supervision_timeout = cpu_to_le16(0x0064);
+ cp.min_ce_len = cpu_to_le16(0x0001);
+ cp.max_ce_len = cpu_to_le16(0x0001);
+
+ hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+}
+
+static void hci_le_connect_cancel(struct hci_conn *conn)
+{
+ hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+}
+
void hci_acl_connect(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
}
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+ u16 latency, u16 to_multiplier)
+{
+ struct hci_cp_le_conn_update cp;
+ struct hci_dev *hdev = conn->hdev;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.conn_interval_min = cpu_to_le16(min);
+ cp.conn_interval_max = cpu_to_le16(max);
+ cp.conn_latency = cpu_to_le16(latency);
+ cp.supervision_timeout = cpu_to_le16(to_multiplier);
+ cp.min_ce_len = cpu_to_le16(0x0001);
+ cp.max_ce_len = cpu_to_le16(0x0001);
+
+ hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
+}
+EXPORT_SYMBOL(hci_le_conn_update);
+
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
switch (conn->state) {
case BT_CONNECT:
case BT_CONNECT2:
- if (conn->type == ACL_LINK && conn->out)
- hci_acl_connect_cancel(conn);
+ if (conn->out) {
+ if (conn->type == ACL_LINK)
+ hci_acl_connect_cancel(conn);
+ else if (conn->type == LE_LINK)
+ hci_le_connect_cancel(conn);
+ }
break;
case BT_CONFIG:
case BT_CONNECTED:
@@ -234,6 +285,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->mode = HCI_CM_ACTIVE;
conn->state = BT_OPEN;
conn->auth_type = HCI_AT_GENERAL_BONDING;
+ conn->io_capability = hdev->io_capability;
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +347,11 @@ int hci_conn_del(struct hci_conn *conn)
/* Unacked frames */
hdev->acl_cnt += conn->sent;
+ } else if (conn->type == LE_LINK) {
+ if (hdev->le_pkts)
+ hdev->le_cnt += conn->sent;
+ else
+ hdev->acl_cnt += conn->sent;
} else {
struct hci_conn *acl = conn->link;
if (acl) {
@@ -360,15 +417,30 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
}
EXPORT_SYMBOL(hci_get_route);
-/* Create SCO or ACL connection.
+/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
+ struct hci_conn *le;
BT_DBG("%s dst %s", hdev->name, batostr(dst));
+ if (type == LE_LINK) {
+ le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (!le)
+ le = hci_conn_add(hdev, LE_LINK, dst);
+ if (!le)
+ return NULL;
+ if (le->state == BT_OPEN)
+ hci_le_connect(le);
+
+ hci_conn_hold(le);
+
+ return le;
+ }
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9c4541bc488a..b372fb8bcdcf 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rfkill.h>
+#include <linux/timer.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -50,6 +51,8 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#define AUTO_OFF_TIMEOUT 2000
+
static void hci_cmd_task(unsigned long arg);
static void hci_rx_task(unsigned long arg);
static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
{
BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
- /* If the request has set req_last_cmd (typical for multi-HCI
- * command requests) check if the completed command matches
- * this, and if not just return. Single HCI command requests
- * typically leave req_last_cmd as 0 */
- if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
+ /* If this is the init phase check if the completed command matches
+ * the last init command, and if not just return.
+ */
+ if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
return;
if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
/* Execute request and wait for completion. */
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+ unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
break;
}
- hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
+ hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
}
static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+ unsigned long opt, __u32 timeout)
{
int ret;
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
+ struct hci_cp_delete_stored_link_key cp;
struct sk_buff *skb;
__le16 param;
__u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
flt_type = HCI_FLT_CLEAR_ALL;
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
- /* Page timeout ~20 secs */
- param = cpu_to_le16(0x8000);
- hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
-
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
- hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 1;
+ hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+}
+
+static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+ BT_DBG("%s", hdev->name);
+
+ /* Read LE buffer size */
+ hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
}
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
if (copy_from_user(&ir, ptr, sizeof(ir)))
return -EFAULT;
- if (!(hdev = hci_dev_get(ir.dev_id)))
+ hdev = hci_dev_get(ir.dev_id);
+ if (!hdev)
return -ENODEV;
hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
- buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
+ buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
struct hci_dev *hdev;
int ret = 0;
- if (!(hdev = hci_dev_get(dev)))
+ hdev = hci_dev_get(dev);
+ if (!hdev)
return -ENODEV;
BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
+ hdev->init_last_cmd = 0;
- //__hci_request(hdev, hci_reset_req, 0, HZ);
ret = __hci_request(hdev, hci_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ if (lmp_le_capable(hdev))
+ ret = __hci_request(hdev, hci_le_init_req, 0,
+ msecs_to_jiffies(HCI_INIT_TIMEOUT));
+
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
+ if (!test_bit(HCI_SETUP, &hdev->flags))
+ mgmt_powered(hdev->id, 1);
} else {
/* Init failed, cleanup */
tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Drop last sent command */
if (hdev->sent_cmd) {
+ del_timer_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
* and no tasks are scheduled. */
hdev->close(hdev);
+ mgmt_powered(hdev->id, 0);
+
/* Clear flags */
hdev->flags = 0;
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
hdev->flush(hdev);
atomic_set(&hdev->cmd_cnt, 1);
- hdev->acl_cnt = 0; hdev->sco_cnt = 0;
+ hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
read_lock_bh(&hci_dev_list_lock);
list_for_each(p, &hci_dev_list) {
struct hci_dev *hdev;
+
hdev = list_entry(p, struct hci_dev, list);
+
+ hci_del_off_timer(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
+
if (++n >= dev_num)
break;
}
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
if (!hdev)
return -ENODEV;
+ hci_del_off_timer(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_free_dev);
+static void hci_power_on(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+
+ BT_DBG("%s", hdev->name);
+
+ if (hci_dev_open(hdev->id) < 0)
+ return;
+
+ if (test_bit(HCI_AUTO_OFF, &hdev->flags))
+ mod_timer(&hdev->off_timer,
+ jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+
+ if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
+ mgmt_index_added(hdev->id);
+}
+
+static void hci_power_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_close(hdev->id);
+}
+
+static void hci_auto_off(unsigned long data)
+{
+ struct hci_dev *hdev = (struct hci_dev *) data;
+
+ BT_DBG("%s", hdev->name);
+
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
+ queue_work(hdev->workqueue, &hdev->power_off);
+}
+
+void hci_del_off_timer(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ del_timer(&hdev->off_timer);
+}
+
+int hci_uuids_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->uuids) {
+ struct bt_uuid *uuid;
+
+ uuid = list_entry(p, struct bt_uuid, list);
+
+ list_del(p);
+ kfree(uuid);
+ }
+
+ return 0;
+}
+
+int hci_link_keys_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->link_keys) {
+ struct link_key *key;
+
+ key = list_entry(p, struct link_key, list);
+
+ list_del(p);
+ kfree(key);
+ }
+
+ return 0;
+}
+
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct list_head *p;
+
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
+
+ k = list_entry(p, struct link_key, list);
+
+ if (bacmp(bdaddr, &k->bdaddr) == 0)
+ return k;
+ }
+
+ return NULL;
+}
+
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+ u8 *val, u8 type, u8 pin_len)
+{
+ struct link_key *key, *old_key;
+ u8 old_key_type;
+
+ old_key = hci_find_link_key(hdev, bdaddr);
+ if (old_key) {
+ old_key_type = old_key->type;
+ key = old_key;
+ } else {
+ old_key_type = 0xff;
+ key = kzalloc(sizeof(*key), GFP_ATOMIC);
+ if (!key)
+ return -ENOMEM;
+ list_add(&key->list, &hdev->link_keys);
+ }
+
+ BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
+
+ bacpy(&key->bdaddr, bdaddr);
+ memcpy(key->val, val, 16);
+ key->type = type;
+ key->pin_len = pin_len;
+
+ if (new_key)
+ mgmt_new_key(hdev->id, key, old_key_type);
+
+ if (type == 0x06)
+ key->type = old_key_type;
+
+ return 0;
+}
+
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct link_key *key;
+
+ key = hci_find_link_key(hdev, bdaddr);
+ if (!key)
+ return -ENOENT;
+
+ BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
+
+ list_del(&key->list);
+ kfree(key);
+
+ return 0;
+}
+
+/* HCI command timer function */
+static void hci_cmd_timer(unsigned long arg)
+{
+ struct hci_dev *hdev = (void *) arg;
+
+ BT_ERR("%s command tx timeout", hdev->name);
+ atomic_set(&hdev->cmd_cnt, 1);
+ tasklet_schedule(&hdev->cmd_task);
+}
+
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
+ hdev->io_capability = 0x03; /* No Input No Output */
hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+
for (i = 0; i < NUM_REASSEMBLY; i++)
hdev->reassembly[i] = NULL;
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+
+ INIT_LIST_HEAD(&hdev->link_keys);
+
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->power_off, hci_power_off);
+ setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
}
}
- mgmt_index_added(hdev->id);
+ set_bit(HCI_AUTO_OFF, &hdev->flags);
+ set_bit(HCI_SETUP, &hdev->flags);
+ queue_work(hdev->workqueue, &hdev->power_on);
+
hci_notify(hdev, HCI_DEV_REG);
return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
- mgmt_index_removed(hdev->id);
+ if (!test_bit(HCI_INIT, &hdev->flags) &&
+ !test_bit(HCI_SETUP, &hdev->flags))
+ mgmt_index_removed(hdev->id);
+
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
@@ -1009,10 +1212,14 @@ int hci_unregister_dev(struct hci_dev *hdev)
hci_unregister_sysfs(hdev);
+ hci_del_off_timer(hdev);
+
destroy_workqueue(hdev->workqueue);
hci_dev_lock_bh(hdev);
hci_blacklist_clear(hdev);
+ hci_uuids_clear(hdev);
+ hci_link_keys_clear(hdev);
hci_dev_unlock_bh(hdev);
__hci_dev_put(hdev);
@@ -1313,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
/* Time stamp */
__net_timestamp(skb);
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
}
/* Get rid of skb owner, prior to sending to the driver. */
@@ -1349,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
+ if (test_bit(HCI_INIT, &hdev->flags))
+ hdev->init_last_cmd = opcode;
+
skb_queue_tail(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
@@ -1395,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
+ hci_add_acl_hdr(skb, conn->handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
@@ -1413,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
spin_lock_bh(&conn->data_q.lock);
__skb_queue_tail(&conn->data_q, skb);
+
+ flags &= ~ACL_START;
+ flags |= ACL_CONT;
do {
skb = list; list = list->next;
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
+ hci_add_acl_hdr(skb, conn->handle, flags);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1486,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
}
if (conn) {
- int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
- int q = cnt / num;
+ int cnt, q;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
*quote = q ? q : 1;
} else
*quote = 0;
@@ -1496,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
return conn;
}
-static inline void hci_acl_tx_to(struct hci_dev *hdev)
+static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct list_head *p;
struct hci_conn *c;
- BT_ERR("%s ACL tx timeout", hdev->name);
+ BT_ERR("%s link tx timeout", hdev->name);
/* Kill stalled connections */
list_for_each(p, &h->list) {
c = list_entry(p, struct hci_conn, list);
- if (c->type == ACL_LINK && c->sent) {
- BT_ERR("%s killing stalled ACL connection %s",
+ if (c->type == type && c->sent) {
+ BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
@@ -1527,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
- hci_acl_tx_to(hdev);
+ hci_link_tx_to(hdev, ACL_LINK);
}
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1586,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
}
}
+static inline void hci_sched_le(struct hci_dev *hdev)
+{
+ struct hci_conn *conn;
+ struct sk_buff *skb;
+ int quote, cnt;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
+ /* LE tx timeout must be longer than maximum
+ * link supervision timeout (40.9 seconds) */
+ if (!hdev->le_cnt && hdev->le_pkts &&
+ time_after(jiffies, hdev->le_last_tx + HZ * 45))
+ hci_link_tx_to(hdev, LE_LINK);
+ }
+
+ cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+ while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
+ while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+ BT_DBG("skb %p len %d", skb, skb->len);
+
+ hci_send_frame(skb);
+ hdev->le_last_tx = jiffies;
+
+ cnt--;
+ conn->sent++;
+ }
+ }
+ if (hdev->le_pkts)
+ hdev->le_cnt = cnt;
+ else
+ hdev->acl_cnt = cnt;
+}
+
static void hci_tx_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1593,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
read_lock(&hci_task_lock);
- BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
+ BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
+ hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
@@ -1603,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
hci_sched_esco(hdev);
+ hci_sched_le(hdev);
+
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
@@ -1700,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
}
if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1750,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
- if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
- BT_ERR("%s command tx timeout", hdev->name);
- atomic_set(&hdev->cmd_cnt, 1);
- }
-
/* Send queued commands */
- if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
+ if (atomic_read(&hdev->cmd_cnt)) {
+ skb = skb_dequeue(&hdev->cmd_q);
+ if (!skb)
+ return;
+
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
- hdev->cmd_last_tx = jiffies;
+ mod_timer(&hdev->cmd_timer,
+ jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
tasklet_schedule(&hdev->cmd_task);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a290854fdaa6..98b5764e4315 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!status) {
__u8 param = *((__u8 *) sent);
+ int old_pscan, old_iscan;
- clear_bit(HCI_PSCAN, &hdev->flags);
- clear_bit(HCI_ISCAN, &hdev->flags);
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
- if (param & SCAN_INQUIRY)
+ if (param & SCAN_INQUIRY) {
set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev->id, 1);
+ } else if (old_iscan)
+ mgmt_discoverable(hdev->id, 0);
- if (param & SCAN_PAGE)
+ if (param & SCAN_PAGE) {
set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev->id, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev->id, 0);
}
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
hdev->ssp_mode = *((__u8 *) sent);
}
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+ if (hdev->features[6] & LMP_EXT_INQ)
+ return 2;
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ return 1;
+
+ if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+ hdev->lmp_subver == 0x0757)
+ return 1;
+
+ if (hdev->manufacturer == 15) {
+ if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+ return 1;
+ if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+ return 1;
+ if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+ return 1;
+ }
+
+ if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+ hdev->lmp_subver == 0x1805)
+ return 1;
+
+ return 0;
+}
+
+static void hci_setup_inquiry_mode(struct hci_dev *hdev)
+{
+ u8 mode;
+
+ mode = hci_get_inquiry_mode(hdev);
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_dev *hdev)
+{
+ /* The second byte is 0xff instead of 0x9f (two reserved bits
+ * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+ * command otherwise */
+ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ /* Events for 1.2 and newer controllers */
+ if (hdev->lmp_ver > 1) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ events[4] |= 0x04; /* Inquiry Result with RSSI */
+
+ if (hdev->features[5] & LMP_SNIFF_SUBR)
+ events[5] |= 0x20; /* Sniff Subrating */
+
+ if (hdev->features[5] & LMP_PAUSE_ENC)
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+ if (hdev->features[6] & LMP_EXT_INQ)
+ events[5] |= 0x40; /* Extended Inquiry Result */
+
+ if (hdev->features[6] & LMP_NO_FLUSH)
+ events[7] |= 0x01; /* Enhanced Flush Complete */
+
+ if (hdev->features[7] & LMP_LSTO)
+ events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+ if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ events[6] |= 0x01; /* IO Capability Request */
+ events[6] |= 0x02; /* IO Capability Response */
+ events[6] |= 0x04; /* User Confirmation Request */
+ events[6] |= 0x08; /* User Passkey Request */
+ events[6] |= 0x10; /* Remote OOB Data Request */
+ events[6] |= 0x20; /* Simple Pairing Complete */
+ events[7] |= 0x04; /* User Passkey Notification */
+ events[7] |= 0x08; /* Keypress Notification */
+ events[7] |= 0x10; /* Remote Host Supported
+ * Features Notification */
+ }
+
+ if (hdev->features[4] & LMP_LE)
+ events[7] |= 0x20; /* LE Meta-Event */
+
+ hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+}
+
+static void hci_setup(struct hci_dev *hdev)
+{
+ hci_setup_event_mask(hdev);
+
+ if (hdev->lmp_ver > 1)
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+ if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ u8 mode = 0x01;
+ hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
+ }
+
+ if (hdev->features[3] & LMP_RSSI_INQ)
+ hci_setup_inquiry_mode(hdev);
+
+ if (hdev->features[7] & LMP_INQ_TX_PWR)
+ hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+}
+
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+ hdev->lmp_ver = rp->lmp_ver;
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+ hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
hdev->manufacturer,
hdev->hci_ver, hdev->hci_rev);
+
+ if (test_bit(HCI_INIT, &hdev->flags))
+ hci_setup(hdev);
+}
+
+static void hci_setup_link_policy(struct hci_dev *hdev)
+{
+ u16 link_policy = 0;
+
+ if (hdev->features[0] & LMP_RSWITCH)
+ link_policy |= HCI_LP_RSWITCH;
+ if (hdev->features[0] & LMP_HOLD)
+ link_policy |= HCI_LP_HOLD;
+ if (hdev->features[0] & LMP_SNIFF)
+ link_policy |= HCI_LP_SNIFF;
+ if (hdev->features[1] & LMP_PARK)
+ link_policy |= HCI_LP_PARK;
+
+ link_policy = cpu_to_le16(link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ sizeof(link_policy), &link_policy);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
- return;
+ goto done;
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
+
+ if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
+ hci_setup_link_policy(hdev);
+
+done:
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
}
static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,107 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
+}
+
+static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
+}
+
+static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
+}
+
+static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+}
+
+static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
+}
+
+static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_pin_code_reply *rp = (void *) skb->data;
+ struct hci_cp_pin_code_reply *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+
+ if (rp->status != 0)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
+ if (!cp)
+ return;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn)
+ conn->pin_length = cp->pin_len;
+}
+
+static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+ rp->status);
+}
+static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
+ hdev->le_pkts = rp->le_max_pkt;
+
+ hdev->le_cnt = hdev->le_pkts;
+
+ BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+
+ hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
+}
+
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +870,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
- if (acl && (sco = acl->link)) {
- sco->state = BT_CLOSED;
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
- hci_conn_del(sco);
+ hci_proto_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
}
hci_dev_unlock(hdev);
@@ -687,7 +938,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
@@ -808,11 +1059,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
- if (acl && (sco = acl->link)) {
- sco->state = BT_CLOSED;
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
- hci_conn_del(sco);
+ hci_proto_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
}
hci_dev_unlock(hdev);
@@ -872,6 +1126,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
hci_dev_unlock(hdev);
}
+static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_le_create_conn *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+
+ BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
+ conn);
+
+ if (status) {
+ if (conn && conn->state == BT_CONNECT) {
+ conn->state = BT_CLOSED;
+ hci_proto_connect_cfm(conn, status);
+ hci_conn_del(conn);
+ }
+ } else {
+ if (!conn) {
+ conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
+ if (conn)
+ conn->out = 1;
+ else
+ BT_ERR("No memory for new connection");
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -942,6 +1233,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr);
} else
conn->state = BT_CONNECTED;
@@ -970,8 +1262,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
sizeof(cp), &cp);
}
- } else
+ } else {
conn->state = BT_CLOSED;
+ if (conn->type == ACL_LINK)
+ mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ }
if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status);
@@ -998,7 +1293,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
- if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ if ((mask & HCI_LM_ACCEPT) &&
+ !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1068,19 +1364,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, ev->status);
- if (ev->status)
+ if (ev->status) {
+ mgmt_disconnect_failed(hdev->id);
return;
+ }
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- conn->state = BT_CLOSED;
+ if (!conn)
+ goto unlock;
- hci_proto_disconn_cfm(conn, ev->reason);
- hci_conn_del(conn);
- }
+ conn->state = BT_CLOSED;
+
+ if (conn->type == ACL_LINK)
+ mgmt_disconnected(hdev->id, &conn->dst);
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -1393,11 +1696,46 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_write_ca_timeout(hdev, skb);
break;
+ case HCI_OP_DELETE_STORED_LINK_KEY:
+ hci_cc_delete_stored_link_key(hdev, skb);
+ break;
+
+ case HCI_OP_SET_EVENT_MASK:
+ hci_cc_set_event_mask(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_INQUIRY_MODE:
+ hci_cc_write_inquiry_mode(hdev, skb);
+ break;
+
+ case HCI_OP_READ_INQ_RSP_TX_POWER:
+ hci_cc_read_inq_rsp_tx_power(hdev, skb);
+ break;
+
+ case HCI_OP_SET_EVENT_FLT:
+ hci_cc_set_event_flt(hdev, skb);
+ break;
+
+ case HCI_OP_PIN_CODE_REPLY:
+ hci_cc_pin_code_reply(hdev, skb);
+ break;
+
+ case HCI_OP_PIN_CODE_NEG_REPLY:
+ hci_cc_pin_code_neg_reply(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_BUFFER_SIZE:
+ hci_cc_le_read_buffer_size(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
}
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
@@ -1459,11 +1797,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
+ case HCI_OP_DISCONNECT:
+ if (ev->status != 0)
+ mgmt_disconnect_failed(hdev->id);
+ break;
+
+ case HCI_OP_LE_CREATE_CONN:
+ hci_cs_le_create_conn(hdev, ev->status);
+ break;
+
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
}
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
@@ -1529,6 +1879,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
+ } else if (conn->type == LE_LINK) {
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ }
} else {
hdev->sco_cnt += count;
if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1586,18 +1946,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
hci_conn_put(conn);
}
+ if (!test_bit(HCI_PAIRABLE, &hdev->flags))
+ hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_request(hdev->id, &ev->bdaddr);
+
hci_dev_unlock(hdev);
}
static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
+ struct hci_ev_link_key_req *ev = (void *) skb->data;
+ struct hci_cp_link_key_reply cp;
+ struct hci_conn *conn;
+ struct link_key *key;
+
BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ key = hci_find_link_key(hdev, &ev->bdaddr);
+ if (!key) {
+ BT_DBG("%s link key not found for %s", hdev->name,
+ batostr(&ev->bdaddr));
+ goto not_found;
+ }
+
+ BT_DBG("%s found key type %u for %s", hdev->name, key->type,
+ batostr(&ev->bdaddr));
+
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
+ BT_DBG("%s ignoring debug key", hdev->name);
+ goto not_found;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+
+ if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
+ (conn->auth_type & 0x01)) {
+ BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ goto not_found;
+ }
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ memcpy(cp.link_key, key->val, 16);
+
+ hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
+
+ hci_dev_unlock(hdev);
+
+ return;
+
+not_found:
+ hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
+ hci_dev_unlock(hdev);
}
static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
+ u8 pin_len = 0;
BT_DBG("%s", hdev->name);
@@ -1607,9 +2021,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ pin_len = conn->pin_length;
hci_conn_put(conn);
}
+ if (test_bit(HCI_LINK_KEYS, &hdev->flags))
+ hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
+ ev->key_type, pin_len);
+
hci_dev_unlock(hdev);
}
@@ -1683,7 +2102,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
- struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
+ struct inquiry_info_with_rssi_and_pscan_mode *info;
+ info = (void *) (skb->data + 1);
for (; num_rsp; num_rsp--) {
bacpy(&data.bdaddr, &info->bdaddr);
@@ -1824,17 +2244,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_sniff_subrate *ev = (void *) skb->data;
- struct hci_conn *conn;
BT_DBG("%s status %d", hdev->name, ev->status);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- }
-
- hci_dev_unlock(hdev);
}
static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1852,12 +2263,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
for (; num_rsp; num_rsp--) {
bacpy(&data.bdaddr, &info->bdaddr);
- data.pscan_rep_mode = info->pscan_rep_mode;
- data.pscan_period_mode = info->pscan_period_mode;
- data.pscan_mode = 0x00;
+ data.pscan_rep_mode = info->pscan_rep_mode;
+ data.pscan_period_mode = info->pscan_period_mode;
+ data.pscan_mode = 0x00;
memcpy(data.dev_class, info->dev_class, 3);
- data.clock_offset = info->clock_offset;
- data.rssi = info->rssi;
+ data.clock_offset = info->clock_offset;
+ data.rssi = info->rssi;
data.ssp_mode = 0x01;
info++;
hci_inquiry_cache_update(hdev, &data);
@@ -1866,6 +2277,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
+static inline u8 hci_get_auth_req(struct hci_conn *conn)
+{
+ /* If remote requests dedicated bonding follow that lead */
+ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+ /* If both remote and local IO capabilities allow MITM
+ * protection then require it, otherwise don't */
+ if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
+ return 0x02;
+ else
+ return 0x03;
+ }
+
+ /* If remote requests no-bonding follow that lead */
+ if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+ return 0x00;
+
+ return conn->auth_type;
+}
+
static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1876,9 +2306,59 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn)
- hci_conn_hold(conn);
+ if (!conn)
+ goto unlock;
+
+ hci_conn_hold(conn);
+
+ if (!test_bit(HCI_MGMT, &hdev->flags))
+ goto unlock;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
+ (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+ struct hci_cp_io_capability_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ cp.capability = conn->io_capability;
+ cp.oob_data = 0;
+ cp.authentication = hci_get_auth_req(conn);
+
+ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
+ sizeof(cp), &cp);
+ } else {
+ struct hci_cp_io_capability_neg_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ cp.reason = 0x16; /* Pairing not allowed */
+
+ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
+ sizeof(cp), &cp);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_io_capa_reply *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ goto unlock;
+
+ hci_conn_hold(conn);
+
+ conn->remote_cap = ev->capability;
+ conn->remote_oob = ev->oob_data;
+ conn->remote_auth = ev->authentication;
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -1914,6 +2394,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
hci_dev_unlock(hdev);
}
+static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status %d", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
+ if (!conn) {
+ conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+ if (!conn) {
+ BT_ERR("No memory for new connection");
+ hci_dev_unlock(hdev);
+ return;
+ }
+ }
+
+ if (ev->status) {
+ hci_proto_connect_cfm(conn, ev->status);
+ conn->state = BT_CLOSED;
+ hci_conn_del(conn);
+ goto unlock;
+ }
+
+ conn->handle = __le16_to_cpu(ev->handle);
+ conn->state = BT_CONNECTED;
+
+ hci_conn_hold_device(conn);
+ hci_conn_add_sysfs(conn);
+
+ hci_proto_connect_cfm(conn, ev->status);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_meta *le_ev = (void *) skb->data;
+
+ skb_pull(skb, sizeof(*le_ev));
+
+ switch (le_ev->subevent) {
+ case HCI_EV_LE_CONN_COMPLETE:
+ hci_le_conn_complete_evt(hdev, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2042,6 +2576,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_io_capa_request_evt(hdev, skb);
break;
+ case HCI_EV_IO_CAPA_REPLY:
+ hci_io_capa_reply_evt(hdev, skb);
+ break;
+
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;
@@ -2050,6 +2588,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_remote_host_features_evt(hdev, skb);
break;
+ case HCI_EV_LE_META:
+ hci_le_meta_evt(hdev, skb);
+ break;
+
default:
BT_DBG("%s event 0x%x", hdev->name, event);
break;
@@ -2083,6 +2625,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
skb->dev = (void *) hdev;
- hci_send_to_sock(hdev, skb);
+ hci_send_to_sock(hdev, skb, NULL);
kfree_skb(skb);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 29827c77f6ce..d50e96136608 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
};
/* Send frame to RAW socket */
-void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
+void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
+ struct sock *skip_sk)
{
struct sock *sk;
struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_filter *flt;
struct sk_buff *nskb;
+ if (sk == skip_sk)
+ continue;
+
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
continue;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5fce3d6d07b4..3c838a65a75a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -11,7 +11,7 @@
static struct class *bt_class;
-struct dentry *bt_debugfs = NULL;
+struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
conn->features[6], conn->features[7]);
}
-#define LINK_ATTR(_name,_mode,_show,_store) \
-struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
+#define LINK_ATTR(_name, _mode, _show, _store) \
+struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+static void print_bt_uuid(struct seq_file *f, u8 *uuid)
+{
+ u32 data0, data4;
+ u16 data1, data2, data3, data5;
+
+ memcpy(&data0, &uuid[0], 4);
+ memcpy(&data1, &uuid[4], 2);
+ memcpy(&data2, &uuid[6], 2);
+ memcpy(&data3, &uuid[8], 2);
+ memcpy(&data4, &uuid[10], 4);
+ memcpy(&data5, &uuid[14], 2);
+
+ seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
+ ntohl(data0), ntohs(data1), ntohs(data2),
+ ntohs(data3), ntohl(data4), ntohs(data5));
+}
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *l;
+
+ hci_dev_lock_bh(hdev);
+
+ list_for_each(l, &hdev->uuids) {
+ struct bt_uuid *uuid;
+
+ uuid = list_entry(l, struct bt_uuid, list);
+
+ print_bt_uuid(f, uuid->uuid);
+ }
+
+ hci_dev_unlock_bh(hdev);
+
+ return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+ .open = uuids_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
debugfs_create_file("blacklist", 0444, hdev->debugfs,
hdev, &blacklist_fops);
+ debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
return 0;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 29544c21f4b5..2429ca2d7b06 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -157,7 +157,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
session->leds = newleds;
- if (!(skb = alloc_skb(3, GFP_ATOMIC))) {
+ skb = alloc_skb(3, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
@@ -250,7 +251,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
BT_DBG("session %p data %p size %d", session, data, size);
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+ skb = alloc_skb(size + 1, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
@@ -283,7 +285,8 @@ static int hidp_queue_report(struct hidp_session *session,
BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+ skb = alloc_skb(size + 1, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
@@ -1016,8 +1019,6 @@ static int __init hidp_init(void)
{
int ret;
- l2cap_load();
-
BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap_core.c
index 675614e38e14..efcef0dc1259 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap_core.c
@@ -24,7 +24,7 @@
SOFTWARE IS DISCLAIMED.
*/
-/* Bluetooth L2CAP core and sockets. */
+/* Bluetooth L2CAP core. */
#include <linux/module.h>
@@ -55,79 +55,24 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#define VERSION "2.15"
-
-static int disable_ertm;
+int disable_ertm;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
-static const struct proto_ops l2cap_sock_ops;
-
static struct workqueue_struct *_busy_wq;
-static struct bt_sock_list l2cap_sk_list = {
+struct bt_sock_list l2cap_sk_list = {
.lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
};
static void l2cap_busy_work(struct work_struct *work);
-static void __l2cap_sock_close(struct sock *sk, int reason);
-static void l2cap_sock_close(struct sock *sk);
-static void l2cap_sock_kill(struct sock *sk);
-
-static int l2cap_build_conf_req(struct sock *sk, void *data);
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-static void l2cap_sock_clear_timer(struct sock *sk)
-{
- BT_DBG("sock %p state %d", sk, sk->sk_state);
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
-static void l2cap_sock_timeout(unsigned long arg)
-{
- struct sock *sk = (struct sock *) arg;
- int reason;
-
- BT_DBG("sock %p state %d", sk, sk->sk_state);
-
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- sock_put(sk);
- return;
- }
-
- if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
- reason = ECONNREFUSED;
- else if (sk->sk_state == BT_CONNECT &&
- l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
- reason = ECONNREFUSED;
- else
- reason = ETIMEDOUT;
-
- __l2cap_sock_close(sk, reason);
-
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
- sock_put(sk);
-}
-
/* ---- L2CAP channels ---- */
static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
{
@@ -236,8 +181,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
l2cap_pi(sk)->conn = conn;
if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
- /* Alloc CID for connection-oriented socket */
- l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+ if (conn->hcon->type == LE_LINK) {
+ /* LE connection */
+ l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
+ l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
+ l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
+ } else {
+ /* Alloc CID for connection-oriented socket */
+ l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+ }
} else if (sk->sk_type == SOCK_DGRAM) {
/* Connectionless socket */
l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
@@ -258,7 +211,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
/* Delete channel.
* Must be called on the locked socket. */
-static void l2cap_chan_del(struct sock *sk, int err)
+void l2cap_chan_del(struct sock *sk, int err)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sock *parent = bt_sk(sk)->parent;
@@ -348,7 +301,7 @@ static inline int l2cap_check_security(struct sock *sk)
auth_type);
}
-static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
+u8 l2cap_get_ident(struct l2cap_conn *conn)
{
u8 id;
@@ -370,16 +323,22 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
return id;
}
-static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
+ u8 flags;
BT_DBG("code 0x%2.2x", code);
if (!skb)
return;
- hci_send_acl(conn->hcon, skb, 0);
+ if (lmp_no_flush_capable(conn->hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ hci_send_acl(conn->hcon, skb, flags);
}
static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -389,6 +348,7 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
struct l2cap_conn *conn = pi->conn;
struct sock *sk = (struct sock *)pi;
int count, hlen = L2CAP_HDR_SIZE + 2;
+ u8 flags;
if (sk->sk_state != BT_CONNECTED)
return;
@@ -425,7 +385,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
put_unaligned_le16(fcs, skb_put(skb, 2));
}
- hci_send_acl(pi->conn->hcon, skb, 0);
+ if (lmp_no_flush_capable(conn->hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ hci_send_acl(pi->conn->hcon, skb, flags);
}
static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
@@ -496,7 +461,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
}
}
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
{
struct l2cap_disconn_req req;
@@ -624,6 +589,82 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
}
}
+/* Find socket with cid and source bdaddr.
+ * Returns closest match, locked.
+ */
+static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
+{
+ struct sock *s, *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
+
+ read_lock(&l2cap_sk_list.lock);
+
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (state && sk->sk_state != state)
+ continue;
+
+ if (l2cap_pi(sk)->scid == cid) {
+ /* Exact match. */
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
+
+ /* Closest match */
+ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ sk1 = sk;
+ }
+ }
+ s = node ? sk : sk1;
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l2cap_sk_list.lock);
+
+ return s;
+}
+
+static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+{
+ struct l2cap_chan_list *list = &conn->chan_list;
+ struct sock *parent, *uninitialized_var(sk);
+
+ BT_DBG("");
+
+ /* Check if we have socket listening on cid */
+ parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+ conn->src);
+ if (!parent)
+ return;
+
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ goto clean;
+ }
+
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+ if (!sk)
+ goto clean;
+
+ write_lock_bh(&list->lock);
+
+ hci_conn_hold(conn->hcon);
+
+ l2cap_sock_init(sk, parent);
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
+
+ __l2cap_chan_add(conn, sk, parent);
+
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+
+ sk->sk_state = BT_CONNECTED;
+ parent->sk_data_ready(parent, 0);
+
+ write_unlock_bh(&list->lock);
+
+clean:
+ bh_unlock_sock(parent);
+}
+
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan_list *l = &conn->chan_list;
@@ -631,11 +672,20 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
BT_DBG("conn %p", conn);
+ if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
+
read_lock(&l->lock);
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_sock_clear_timer(sk);
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ }
+
if (sk->sk_type != SOCK_SEQPACKET &&
sk->sk_type != SOCK_STREAM) {
l2cap_sock_clear_timer(sk);
@@ -694,7 +744,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
BT_DBG("hcon %p conn %p", hcon, conn);
- conn->mtu = hcon->hdev->acl_mtu;
+ if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
+ conn->mtu = hcon->hdev->le_mtu;
+ else
+ conn->mtu = hcon->hdev->acl_mtu;
+
conn->src = &hcon->hdev->bdaddr;
conn->dst = &hcon->dst;
@@ -703,7 +757,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
spin_lock_init(&conn->lock);
rwlock_init(&conn->chan_list.lock);
- setup_timer(&conn->info_timer, l2cap_info_timeout,
+ if (hcon->type != LE_LINK)
+ setup_timer(&conn->info_timer, l2cap_info_timeout,
(unsigned long) conn);
conn->disc_reason = 0x13;
@@ -747,17 +802,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
}
/* ---- Socket interface ---- */
-static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
-{
- struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &l2cap_sk_list.head)
- if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
- goto found;
- sk = NULL;
-found:
- return sk;
-}
/* Find socket with psm and source bdaddr.
* Returns closest match.
@@ -789,277 +833,7 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
return node ? sk : sk1;
}
-static void l2cap_sock_destruct(struct sock *sk)
-{
- BT_DBG("sk %p", sk);
-
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-}
-
-static void l2cap_sock_cleanup_listen(struct sock *parent)
-{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL)))
- l2cap_sock_close(sk);
-
- parent->sk_state = BT_CLOSED;
- sock_set_flag(parent, SOCK_ZAPPED);
-}
-
-/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
- */
-static void l2cap_sock_kill(struct sock *sk)
-{
- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
- return;
-
- BT_DBG("sk %p state %d", sk, sk->sk_state);
-
- /* Kill poor orphan */
- bt_sock_unlink(&l2cap_sk_list, sk);
- sock_set_flag(sk, SOCK_DEAD);
- sock_put(sk);
-}
-
-static void __l2cap_sock_close(struct sock *sk, int reason)
-{
- BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
-
- switch (sk->sk_state) {
- case BT_LISTEN:
- l2cap_sock_cleanup_listen(sk);
- break;
-
- case BT_CONNECTED:
- case BT_CONFIG:
- if (sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) {
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- l2cap_send_disconn_req(conn, sk, reason);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT2:
- if (sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) {
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (bt_sk(sk)->defer_setup)
- result = L2CAP_CR_SEC_BLOCK;
- else
- result = L2CAP_CR_BAD_PSM;
- sk->sk_state = BT_DISCONN;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT:
- case BT_DISCONN:
- l2cap_chan_del(sk, reason);
- break;
-
- default:
- sock_set_flag(sk, SOCK_ZAPPED);
- break;
- }
-}
-
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
-{
- l2cap_sock_clear_timer(sk);
- lock_sock(sk);
- __l2cap_sock_close(sk, ECONNRESET);
- release_sock(sk);
- l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- BT_DBG("sk %p", sk);
-
- if (parent) {
- sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
-
- pi->imtu = l2cap_pi(parent)->imtu;
- pi->omtu = l2cap_pi(parent)->omtu;
- pi->conf_state = l2cap_pi(parent)->conf_state;
- pi->mode = l2cap_pi(parent)->mode;
- pi->fcs = l2cap_pi(parent)->fcs;
- pi->max_tx = l2cap_pi(parent)->max_tx;
- pi->tx_win = l2cap_pi(parent)->tx_win;
- pi->sec_level = l2cap_pi(parent)->sec_level;
- pi->role_switch = l2cap_pi(parent)->role_switch;
- pi->force_reliable = l2cap_pi(parent)->force_reliable;
- } else {
- pi->imtu = L2CAP_DEFAULT_MTU;
- pi->omtu = 0;
- if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
- pi->mode = L2CAP_MODE_ERTM;
- pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
- } else {
- pi->mode = L2CAP_MODE_BASIC;
- }
- pi->max_tx = L2CAP_DEFAULT_MAX_TX;
- pi->fcs = L2CAP_FCS_CRC16;
- pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- pi->sec_level = BT_SECURITY_LOW;
- pi->role_switch = 0;
- pi->force_reliable = 0;
- }
-
- /* Default config options */
- pi->conf_len = 0;
- pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
- skb_queue_head_init(TX_QUEUE(sk));
- skb_queue_head_init(SREJ_QUEUE(sk));
- skb_queue_head_init(BUSY_QUEUE(sk));
- INIT_LIST_HEAD(SREJ_LIST(sk));
-}
-
-static struct proto l2cap_proto = {
- .name = "L2CAP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct l2cap_pinfo)
-};
-
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
-{
- struct sock *sk;
-
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
- if (!sk)
- return NULL;
-
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
- sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
-
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
- setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
-
- bt_sock_link(&l2cap_sk_list, sk);
- return sk;
-}
-
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
-
- BT_DBG("sock %p", sock);
-
- sock->state = SS_UNCONNECTED;
-
- if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
- sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
- return -ESOCKTNOSUPPORT;
-
- if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
- return -EPERM;
-
- sock->ops = &l2cap_sock_ops;
-
- sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
- if (!sk)
- return -ENOMEM;
-
- l2cap_sock_init(sk, NULL);
- return 0;
-}
-
-static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_l2 la;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (!addr || addr->sa_family != AF_BLUETOOTH)
- return -EINVAL;
-
- memset(&la, 0, sizeof(la));
- len = min_t(unsigned int, sizeof(la), alen);
- memcpy(&la, addr, len);
-
- if (la.l2_cid)
- return -EINVAL;
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_OPEN) {
- err = -EBADFD;
- goto done;
- }
-
- if (la.l2_psm) {
- __u16 psm = __le16_to_cpu(la.l2_psm);
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((psm & 0x0101) != 0x0001) {
- err = -EINVAL;
- goto done;
- }
-
- /* Restrict usage of well-known PSMs */
- if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
- err = -EACCES;
- goto done;
- }
- }
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
- l2cap_pi(sk)->sport = la.l2_psm;
- sk->sk_state = BT_BOUND;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_do_connect(struct sock *sk)
+int l2cap_do_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -1082,8 +856,13 @@ static int l2cap_do_connect(struct sock *sk)
auth_type = l2cap_get_auth_type(sk);
- hcon = hci_connect(hdev, ACL_LINK, dst,
+ if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
+ hcon = hci_connect(hdev, LE_LINK, dst,
+ l2cap_pi(sk)->sec_level, auth_type);
+ else
+ hcon = hci_connect(hdev, ACL_LINK, dst,
l2cap_pi(sk)->sec_level, auth_type);
+
if (!hcon)
goto done;
@@ -1119,230 +898,7 @@ done:
return err;
}
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_l2 la;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (!addr || alen < sizeof(addr->sa_family) ||
- addr->sa_family != AF_BLUETOOTH)
- return -EINVAL;
-
- memset(&la, 0, sizeof(la));
- len = min_t(unsigned int, sizeof(la), alen);
- memcpy(&la, addr, len);
-
- if (la.l2_cid)
- return -EINVAL;
-
- lock_sock(sk);
-
- if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
- && !la.l2_psm) {
- err = -EINVAL;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- goto wait;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- goto done;
- }
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
- sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
-
- err = l2cap_do_connect(sk);
- if (err)
- goto done;
-
-wait:
- err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_listen(struct socket *sock, int backlog)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- BT_DBG("sk %p backlog %d", sk, backlog);
-
- lock_sock(sk);
-
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
- err = -EBADFD;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- if (!l2cap_pi(sk)->psm) {
- bdaddr_t *src = &bt_sk(sk)->src;
- u16 psm;
-
- err = -EINVAL;
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- for (psm = 0x1001; psm < 0x1100; psm += 2)
- if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
- l2cap_pi(sk)->psm = cpu_to_le16(psm);
- l2cap_pi(sk)->sport = cpu_to_le16(psm);
- err = 0;
- break;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
- if (err < 0)
- goto done;
- }
-
- sk->sk_max_ack_backlog = backlog;
- sk->sk_ack_backlog = 0;
- sk->sk_state = BT_LISTEN;
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct sock *sk = sock->sk, *nsk;
- long timeo;
- int err = 0;
-
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
-
- BT_DBG("sk %p timeo %ld", sk, timeo);
-
- /* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- break;
- }
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(sk), &wait);
-
- if (err)
- goto done;
-
- newsock->state = SS_CONNECTED;
-
- BT_DBG("new socket %p", nsk);
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
-{
- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
- struct sock *sk = sock->sk;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- addr->sa_family = AF_BLUETOOTH;
- *len = sizeof(struct sockaddr_l2);
-
- if (peer) {
- la->l2_psm = l2cap_pi(sk)->psm;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- } else {
- la->l2_psm = l2cap_pi(sk)->sport;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
- }
-
- return 0;
-}
-
-static int __l2cap_wait_ack(struct sock *sk)
+int __l2cap_wait_ack(struct sock *sk)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@@ -1428,16 +984,23 @@ static void l2cap_drop_acked_frames(struct sock *sk)
del_timer(&l2cap_pi(sk)->retrans_timer);
}
-static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct hci_conn *hcon = pi->conn->hcon;
+ u16 flags;
BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- hci_send_acl(pi->conn->hcon, skb, 0);
+ if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ hci_send_acl(hcon, skb, flags);
}
-static void l2cap_streaming_send(struct sock *sk)
+void l2cap_streaming_send(struct sock *sk)
{
struct sk_buff *skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1506,7 +1069,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
l2cap_do_send(sk, tx_skb);
}
-static int l2cap_ertm_send(struct sock *sk)
+int l2cap_ertm_send(struct sock *sk)
{
struct sk_buff *skb, *tx_skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1646,7 +1209,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
@@ -1675,7 +1238,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr
return skb;
}
-static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
@@ -1703,7 +1266,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *ms
return skb;
}
-static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
@@ -1748,7 +1311,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
return skb;
}
-static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
+int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct sk_buff *skb;
@@ -1794,487 +1357,6 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
return size;
}
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
-{
- struct sock *sk = sock->sk;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb;
- u16 control;
- int err;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- err = sock_error(sk);
- if (err)
- return err;
-
- if (msg->msg_flags & MSG_OOB)
- return -EOPNOTSUPP;
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- err = -ENOTCONN;
- goto done;
- }
-
- /* Connectionless channel */
- if (sk->sk_type == SOCK_DGRAM) {
- skb = l2cap_create_connless_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- } else {
- l2cap_do_send(sk, skb);
- err = len;
- }
- goto done;
- }
-
- switch (pi->mode) {
- case L2CAP_MODE_BASIC:
- /* Check outgoing MTU */
- if (len > pi->omtu) {
- err = -EMSGSIZE;
- goto done;
- }
-
- /* Create a basic PDU */
- skb = l2cap_create_basic_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
-
- l2cap_do_send(sk, skb);
- err = len;
- break;
-
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= pi->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
- skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
- __skb_queue_tail(TX_QUEUE(sk), skb);
-
- if (sk->sk_send_head == NULL)
- sk->sk_send_head = skb;
-
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(sk, msg, len);
- if (err < 0)
- goto done;
- }
-
- if (pi->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(sk);
- } else {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->conn_state & L2CAP_CONN_WAIT_F)) {
- err = len;
- break;
- }
- err = l2cap_ertm_send(sk);
- }
-
- if (err >= 0)
- err = len;
- break;
-
- default:
- BT_DBG("bad state %1.1x", pi->mode);
- err = -EBADFD;
- }
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
-{
- struct sock *sk = sock->sk;
-
- lock_sock(sk);
-
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
- struct l2cap_conn_rsp rsp;
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- u8 buf[128];
-
- sk->sk_state = BT_CONFIG;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
- release_sock(sk);
- return 0;
- }
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
-
- release_sock(sk);
- return 0;
- }
-
- release_sock(sk);
-
- if (sock->type == SOCK_STREAM)
- return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
-
- return bt_sock_recvmsg(iocb, sock, msg, len, flags);
-}
-
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct l2cap_options opts;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- lock_sock(sk);
-
- switch (optname) {
- case L2CAP_OPTIONS:
- if (sk->sk_state == BT_CONNECTED) {
- err = -EINVAL;
- break;
- }
-
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
- len = min_t(unsigned int, sizeof(opts), optlen);
- if (copy_from_user((char *) &opts, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->mode = opts.mode;
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->imtu = opts.imtu;
- l2cap_pi(sk)->omtu = opts.omtu;
- l2cap_pi(sk)->fcs = opts.fcs;
- l2cap_pi(sk)->max_tx = opts.max_tx;
- l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
- break;
-
- case L2CAP_LM:
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- if (opt & L2CAP_LM_AUTH)
- l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
- if (opt & L2CAP_LM_ENCRYPT)
- l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
- if (opt & L2CAP_LM_SECURE)
- l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
-
- l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
- l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- if (level == SOL_L2CAP)
- return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
-
- if (level != SOL_BLUETOOTH)
- return -ENOPROTOOPT;
-
- lock_sock(sk);
-
- switch (optname) {
- case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- sec.level = BT_SECURITY_LOW;
-
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_user((char *) &sec, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (sec.level < BT_SECURITY_LOW ||
- sec.level > BT_SECURITY_HIGH) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->sec_level = sec.level;
- break;
-
- case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- bt_sk(sk)->defer_setup = opt;
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct l2cap_options opts;
- struct l2cap_conninfo cinfo;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case L2CAP_OPTIONS:
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
- len = min_t(unsigned int, len, sizeof(opts));
- if (copy_to_user(optval, (char *) &opts, len))
- err = -EFAULT;
-
- break;
-
- case L2CAP_LM:
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_LOW:
- opt = L2CAP_LM_AUTH;
- break;
- case BT_SECURITY_MEDIUM:
- opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
- break;
- case BT_SECURITY_HIGH:
- opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
- L2CAP_LM_SECURE;
- break;
- default:
- opt = 0;
- break;
- }
-
- if (l2cap_pi(sk)->role_switch)
- opt |= L2CAP_LM_MASTER;
-
- if (l2cap_pi(sk)->force_reliable)
- opt |= L2CAP_LM_RELIABLE;
-
- if (put_user(opt, (u32 __user *) optval))
- err = -EFAULT;
- break;
-
- case L2CAP_CONNINFO:
- if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
- err = -ENOTCONN;
- break;
- }
-
- cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
- memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
-
- len = min_t(unsigned int, len, sizeof(cinfo));
- if (copy_to_user(optval, (char *) &cinfo, len))
- err = -EFAULT;
-
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (level == SOL_L2CAP)
- return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
-
- if (level != SOL_BLUETOOTH)
- return -ENOPROTOOPT;
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- sec.level = l2cap_pi(sk)->sec_level;
-
- len = min_t(unsigned int, len, sizeof(sec));
- if (copy_to_user(optval, (char *) &sec, len))
- err = -EFAULT;
-
- break;
-
- case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
- err = -EFAULT;
-
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_shutdown(struct socket *sock, int how)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- if (!sk)
- return 0;
-
- lock_sock(sk);
- if (!sk->sk_shutdown) {
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
- err = __l2cap_wait_ack(sk);
-
- sk->sk_shutdown = SHUTDOWN_MASK;
- l2cap_sock_clear_timer(sk);
- __l2cap_sock_close(sk, 0);
-
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
- err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
- }
-
- if (!err && sk->sk_err)
- err = -sk->sk_err;
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- int err;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- if (!sk)
- return 0;
-
- err = l2cap_sock_shutdown(sock, 2);
-
- sock_orphan(sk);
- l2cap_sock_kill(sk);
- return err;
-}
-
static void l2cap_chan_ready(struct sock *sk)
{
struct sock *parent = bt_sk(sk)->parent;
@@ -2346,7 +1428,11 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
- lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+
+ if (conn->hcon->type == LE_LINK)
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+ else
+ lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
@@ -2493,7 +1579,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
}
}
-static int l2cap_build_conf_req(struct sock *sk, void *data)
+int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
@@ -2518,11 +1604,11 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
}
done:
+ if (pi->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+
switch (pi->mode) {
case L2CAP_MODE_BASIC:
- if (pi->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
-
if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
!(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
@@ -2585,10 +1671,6 @@ done:
break;
}
- /* FIXME: Need actual value of the flush timeout */
- //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
- // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
-
req->dcid = cpu_to_le16(pi->dcid);
req->flags = cpu_to_le16(0);
@@ -3415,12 +2497,153 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
return 0;
}
-static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
+ u16 to_multiplier)
+{
+ u16 max_latency;
+
+ if (min > max || min < 6 || max > 3200)
+ return -EINVAL;
+
+ if (to_multiplier < 10 || to_multiplier > 3200)
+ return -EINVAL;
+
+ if (max >= to_multiplier * 8)
+ return -EINVAL;
+
+ max_latency = (to_multiplier * 8 / max) - 1;
+ if (latency > 499 || latency > max_latency)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_conn_param_update_req *req;
+ struct l2cap_conn_param_update_rsp rsp;
+ u16 min, max, latency, to_multiplier, cmd_len;
+ int err;
+
+ if (!(hcon->link_mode & HCI_LM_MASTER))
+ return -EINVAL;
+
+ cmd_len = __le16_to_cpu(cmd->len);
+ if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
+ return -EPROTO;
+
+ req = (struct l2cap_conn_param_update_req *) data;
+ min = __le16_to_cpu(req->min);
+ max = __le16_to_cpu(req->max);
+ latency = __le16_to_cpu(req->latency);
+ to_multiplier = __le16_to_cpu(req->to_multiplier);
+
+ BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+ min, max, latency, to_multiplier);
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+ if (err)
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ else
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
+ sizeof(rsp), &rsp);
+
+ if (!err)
+ hci_le_conn_update(hcon, min, max, latency, to_multiplier);
+
+ return 0;
+}
+
+static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+{
+ int err = 0;
+
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+ l2cap_command_rej(conn, cmd, data);
+ break;
+
+ case L2CAP_CONN_REQ:
+ err = l2cap_connect_req(conn, cmd, data);
+ break;
+
+ case L2CAP_CONN_RSP:
+ err = l2cap_connect_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_CONF_REQ:
+ err = l2cap_config_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONF_RSP:
+ err = l2cap_config_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_DISCONN_REQ:
+ err = l2cap_disconnect_req(conn, cmd, data);
+ break;
+
+ case L2CAP_DISCONN_RSP:
+ err = l2cap_disconnect_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_ECHO_REQ:
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
+ break;
+
+ case L2CAP_ECHO_RSP:
+ break;
+
+ case L2CAP_INFO_REQ:
+ err = l2cap_information_req(conn, cmd, data);
+ break;
+
+ case L2CAP_INFO_RSP:
+ err = l2cap_information_rsp(conn, cmd, data);
+ break;
+
+ default:
+ BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+ return 0;
+
+ case L2CAP_CONN_PARAM_UPDATE_REQ:
+ return l2cap_conn_param_update_req(conn, cmd, data);
+
+ case L2CAP_CONN_PARAM_UPDATE_RSP:
+ return 0;
+
+ default:
+ BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
+ return -EINVAL;
+ }
+}
+
+static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
{
u8 *data = skb->data;
int len = skb->len;
struct l2cap_cmd_hdr cmd;
- int err = 0;
+ int err;
l2cap_raw_recv(conn, skb);
@@ -3439,55 +2662,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
break;
}
- switch (cmd.code) {
- case L2CAP_COMMAND_REJ:
- l2cap_command_rej(conn, &cmd, data);
- break;
-
- case L2CAP_CONN_REQ:
- err = l2cap_connect_req(conn, &cmd, data);
- break;
-
- case L2CAP_CONN_RSP:
- err = l2cap_connect_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_CONF_REQ:
- err = l2cap_config_req(conn, &cmd, cmd_len, data);
- break;
-
- case L2CAP_CONF_RSP:
- err = l2cap_config_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_DISCONN_REQ:
- err = l2cap_disconnect_req(conn, &cmd, data);
- break;
-
- case L2CAP_DISCONN_RSP:
- err = l2cap_disconnect_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_ECHO_REQ:
- l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
- break;
-
- case L2CAP_ECHO_RSP:
- break;
-
- case L2CAP_INFO_REQ:
- err = l2cap_information_req(conn, &cmd, data);
- break;
-
- case L2CAP_INFO_RSP:
- err = l2cap_information_rsp(conn, &cmd, data);
- break;
-
- default:
- BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
- err = -EINVAL;
- break;
- }
+ if (conn->hcon->type == LE_LINK)
+ err = l2cap_le_sig_cmd(conn, &cmd, data);
+ else
+ err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
if (err) {
struct l2cap_cmd_rej rej;
@@ -4484,6 +3662,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("len %d, cid 0x%4.4x", len, cid);
switch (cid) {
+ case L2CAP_CID_LE_SIGNALING:
case L2CAP_CID_SIGNALING:
l2cap_sig_channel(conn, skb);
break;
@@ -4541,7 +3720,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
- if (hcon->type != ACL_LINK)
+ if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
return -EINVAL;
if (!status) {
@@ -4570,7 +3749,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (hcon->type != ACL_LINK)
+ if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
return -EINVAL;
l2cap_conn_del(hcon, bt_err(reason));
@@ -4673,12 +3852,15 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
{
struct l2cap_conn *conn = hcon->l2cap_data;
- if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
+ if (!conn)
+ conn = l2cap_conn_add(hcon, 0);
+
+ if (!conn)
goto drop;
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
- if (flags & ACL_START) {
+ if (!(flags & ACL_CONT)) {
struct l2cap_hdr *hdr;
struct sock *sk;
u16 cid;
@@ -4784,12 +3966,13 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
sk_for_each(sk, node, &l2cap_sk_list.head) {
struct l2cap_pinfo *pi = l2cap_pi(sk);
- seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
+ seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst),
sk->sk_state, __le16_to_cpu(pi->psm),
pi->scid, pi->dcid,
- pi->imtu, pi->omtu, pi->sec_level);
+ pi->imtu, pi->omtu, pi->sec_level,
+ pi->mode);
}
read_unlock_bh(&l2cap_sk_list.lock);
@@ -4811,32 +3994,6 @@ static const struct file_operations l2cap_debugfs_fops = {
static struct dentry *l2cap_debugfs;
-static const struct proto_ops l2cap_sock_ops = {
- .family = PF_BLUETOOTH,
- .owner = THIS_MODULE,
- .release = l2cap_sock_release,
- .bind = l2cap_sock_bind,
- .connect = l2cap_sock_connect,
- .listen = l2cap_sock_listen,
- .accept = l2cap_sock_accept,
- .getname = l2cap_sock_getname,
- .sendmsg = l2cap_sock_sendmsg,
- .recvmsg = l2cap_sock_recvmsg,
- .poll = bt_sock_poll,
- .ioctl = bt_sock_ioctl,
- .mmap = sock_no_mmap,
- .socketpair = sock_no_socketpair,
- .shutdown = l2cap_sock_shutdown,
- .setsockopt = l2cap_sock_setsockopt,
- .getsockopt = l2cap_sock_getsockopt
-};
-
-static const struct net_proto_family l2cap_sock_family_ops = {
- .family = PF_BLUETOOTH,
- .owner = THIS_MODULE,
- .create = l2cap_sock_create,
-};
-
static struct hci_proto l2cap_hci_proto = {
.name = "L2CAP",
.id = HCI_PROTO_L2CAP,
@@ -4848,23 +4005,17 @@ static struct hci_proto l2cap_hci_proto = {
.recv_acldata = l2cap_recv_acldata
};
-static int __init l2cap_init(void)
+int __init l2cap_init(void)
{
int err;
- err = proto_register(&l2cap_proto, 0);
+ err = l2cap_init_sockets();
if (err < 0)
return err;
_busy_wq = create_singlethread_workqueue("l2cap");
if (!_busy_wq) {
- proto_unregister(&l2cap_proto);
- return -ENOMEM;
- }
-
- err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
- if (err < 0) {
- BT_ERR("L2CAP socket registration failed");
+ err = -ENOMEM;
goto error;
}
@@ -4882,49 +4033,28 @@ static int __init l2cap_init(void)
BT_ERR("Failed to create L2CAP debug file");
}
- BT_INFO("L2CAP ver %s", VERSION);
BT_INFO("L2CAP socket layer initialized");
return 0;
error:
destroy_workqueue(_busy_wq);
- proto_unregister(&l2cap_proto);
+ l2cap_cleanup_sockets();
return err;
}
-static void __exit l2cap_exit(void)
+void l2cap_exit(void)
{
debugfs_remove(l2cap_debugfs);
flush_workqueue(_busy_wq);
destroy_workqueue(_busy_wq);
- if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
- BT_ERR("L2CAP socket unregistration failed");
-
if (hci_unregister_proto(&l2cap_hci_proto) < 0)
BT_ERR("L2CAP protocol unregistration failed");
- proto_unregister(&l2cap_proto);
+ l2cap_cleanup_sockets();
}
-void l2cap_load(void)
-{
- /* Dummy function to trigger automatic L2CAP module loading by
- * other modules that use L2CAP sockets but don't use any other
- * symbols from it. */
-}
-EXPORT_SYMBOL(l2cap_load);
-
-module_init(l2cap_init);
-module_exit(l2cap_exit);
-
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 000000000000..fc85e7ae33c7
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1156 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+ Copyright (C) 2010 Google Inc.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP sockets. */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+/* ---- L2CAP timers ---- */
+static void l2cap_sock_timeout(unsigned long arg)
+{
+ struct sock *sk = (struct sock *) arg;
+ int reason;
+
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+
+ bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ /* sk is owned by user. Try again later */
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
+ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
+ reason = ECONNREFUSED;
+ else if (sk->sk_state == BT_CONNECT &&
+ l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+
+ __l2cap_sock_close(sk, reason);
+
+ bh_unlock_sock(sk);
+
+ l2cap_sock_kill(sk);
+ sock_put(sk);
+}
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+void l2cap_sock_clear_timer(struct sock *sk)
+{
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+ sk_stop_timer(sk, &sk->sk_timer);
+}
+
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ sk_for_each(sk, node, &l2cap_sk_list.head)
+ if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
+ goto found;
+ sk = NULL;
+found:
+ return sk;
+}
+
+static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_l2 la;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ memset(&la, 0, sizeof(la));
+ len = min_t(unsigned int, sizeof(la), alen);
+ memcpy(&la, addr, len);
+
+ if (la.l2_cid && la.l2_psm)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_OPEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ if (la.l2_psm) {
+ __u16 psm = __le16_to_cpu(la.l2_psm);
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((psm & 0x0101) != 0x0001) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ /* Restrict usage of well-known PSMs */
+ if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
+ err = -EACCES;
+ goto done;
+ }
+ }
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+ err = -EADDRINUSE;
+ } else {
+ /* Save source address */
+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->sport = la.l2_psm;
+ sk->sk_state = BT_BOUND;
+
+ if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+ __le16_to_cpu(la.l2_psm) == 0x0003)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ }
+
+ if (la.l2_cid)
+ l2cap_pi(sk)->scid = la.l2_cid;
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_l2 la;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (!addr || alen < sizeof(addr->sa_family) ||
+ addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ memset(&la, 0, sizeof(la));
+ len = min_t(unsigned int, sizeof(la), alen);
+ memcpy(&la, addr, len);
+
+ if (la.l2_cid && la.l2_psm)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+ && !(la.l2_psm || la.l2_cid)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ switch (sk->sk_state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ goto wait;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
+ sk->sk_type != SOCK_RAW && !la.l2_cid) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->dcid = la.l2_cid;
+
+ err = l2cap_do_connect(sk);
+ if (err)
+ goto done;
+
+wait:
+ err = bt_sock_wait_state(sk, BT_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sk %p backlog %d", sk, backlog);
+
+ lock_sock(sk);
+
+ if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+ || sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
+ bdaddr_t *src = &bt_sk(sk)->src;
+ u16 psm;
+
+ err = -EINVAL;
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ for (psm = 0x1001; psm < 0x1100; psm += 2)
+ if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
+ l2cap_pi(sk)->psm = cpu_to_le16(psm);
+ l2cap_pi(sk)->sport = cpu_to_le16(psm);
+ err = 0;
+ break;
+ }
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+
+ if (err < 0)
+ goto done;
+ }
+
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = BT_LISTEN;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sock *sk = sock->sk, *nsk;
+ long timeo;
+ int err = 0;
+
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+ BT_DBG("sk %p timeo %ld", sk, timeo);
+
+ /* Wait for an incoming connection. (wake-one). */
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ if (err)
+ goto done;
+
+ newsock->state = SS_CONNECTED;
+
+ BT_DBG("new socket %p", nsk);
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+{
+ struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+ struct sock *sk = sock->sk;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ addr->sa_family = AF_BLUETOOTH;
+ *len = sizeof(struct sockaddr_l2);
+
+ if (peer) {
+ la->l2_psm = l2cap_pi(sk)->psm;
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ } else {
+ la->l2_psm = l2cap_pi(sk)->sport;
+ bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
+ }
+
+ return 0;
+}
+
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_options opts;
+ struct l2cap_conninfo cinfo;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
+ memset(&opts, 0, sizeof(opts));
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+ len = min_t(unsigned int, len, sizeof(opts));
+ if (copy_to_user(optval, (char *) &opts, len))
+ err = -EFAULT;
+
+ break;
+
+ case L2CAP_LM:
+ switch (l2cap_pi(sk)->sec_level) {
+ case BT_SECURITY_LOW:
+ opt = L2CAP_LM_AUTH;
+ break;
+ case BT_SECURITY_MEDIUM:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
+ break;
+ case BT_SECURITY_HIGH:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+ L2CAP_LM_SECURE;
+ break;
+ default:
+ opt = 0;
+ break;
+ }
+
+ if (l2cap_pi(sk)->role_switch)
+ opt |= L2CAP_LM_MASTER;
+
+ if (l2cap_pi(sk)->force_reliable)
+ opt |= L2CAP_LM_RELIABLE;
+
+ if (put_user(opt, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case L2CAP_CONNINFO:
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ bt_sk(sk)->defer_setup)) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
+
+ len = min_t(unsigned int, len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *) &cinfo, len))
+ err = -EFAULT;
+
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (level == SOL_L2CAP)
+ return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SECURITY:
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ sec.level = l2cap_pi(sk)->sec_level;
+
+ len = min_t(unsigned int, len, sizeof(sec));
+ if (copy_to_user(optval, (char *) &sec, len))
+ err = -EFAULT;
+
+ break;
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case BT_FLUSHABLE:
+ if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_options opts;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
+ if (sk->sk_state == BT_CONNECTED) {
+ err = -EINVAL;
+ break;
+ }
+
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
+
+ len = min_t(unsigned int, sizeof(opts), optlen);
+ if (copy_from_user((char *) &opts, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->mode = opts.mode;
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->fcs = opts.fcs;
+ l2cap_pi(sk)->max_tx = opts.max_tx;
+ l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
+ break;
+
+ case L2CAP_LM:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt & L2CAP_LM_AUTH)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
+ if (opt & L2CAP_LM_ENCRYPT)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
+ if (opt & L2CAP_LM_SECURE)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
+
+ l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
+ l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ if (level == SOL_L2CAP)
+ return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SECURITY:
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ sec.level = BT_SECURITY_LOW;
+
+ len = min_t(unsigned int, sizeof(sec), optlen);
+ if (copy_from_user((char *) &sec, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (sec.level < BT_SECURITY_LOW ||
+ sec.level > BT_SECURITY_HIGH) {
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->sec_level = sec.level;
+ break;
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ bt_sk(sk)->defer_setup = opt;
+ break;
+
+ case BT_FLUSHABLE:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_FLUSHABLE_ON) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (opt == BT_FLUSHABLE_OFF) {
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ /* proceed futher only when we have l2cap_conn and
+ No Flush support in the LM */
+ if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ l2cap_pi(sk)->flushable = opt;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ u16 control;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ err = sock_error(sk);
+ if (err)
+ return err;
+
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ goto done;
+ }
+
+ /* Connectionless channel */
+ if (sk->sk_type == SOCK_DGRAM) {
+ skb = l2cap_create_connless_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ } else {
+ l2cap_do_send(sk, skb);
+ err = len;
+ }
+ goto done;
+ }
+
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EMSGSIZE;
+ goto done;
+ }
+
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+
+ l2cap_do_send(sk, skb);
+ err = len;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ /* Entire SDU fits into one PDU */
+ if (len <= pi->remote_mps) {
+ control = L2CAP_SDU_UNSEGMENTED;
+ skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+ __skb_queue_tail(TX_QUEUE(sk), skb);
+
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb;
+
+ } else {
+ /* Segment SDU into multiples PDUs */
+ err = l2cap_sar_segment_sdu(sk, msg, len);
+ if (err < 0)
+ goto done;
+ }
+
+ if (pi->mode == L2CAP_MODE_STREAMING) {
+ l2cap_streaming_send(sk);
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->conn_state & L2CAP_CONN_WAIT_F)) {
+ err = len;
+ break;
+ }
+ err = l2cap_ertm_send(sk);
+ }
+
+ if (err >= 0)
+ err = len;
+ break;
+
+ default:
+ BT_DBG("bad state %1.1x", pi->mode);
+ err = -EBADFD;
+ }
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ u8 buf[128];
+
+ sk->sk_state = BT_CONFIG;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
+ release_sock(sk);
+ return 0;
+ }
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
+
+ release_sock(sk);
+ return 0;
+ }
+
+ release_sock(sk);
+
+ if (sock->type == SOCK_STREAM)
+ return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+
+ return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+}
+
+/* Kill socket (only if zapped and orphan)
+ * Must be called on unlocked socket.
+ */
+void l2cap_sock_kill(struct sock *sk)
+{
+ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+ return;
+
+ BT_DBG("sk %p state %d", sk, sk->sk_state);
+
+ /* Kill poor orphan */
+ bt_sock_unlink(&l2cap_sk_list, sk);
+ sock_set_flag(sk, SOCK_DEAD);
+ sock_put(sk);
+}
+
+/* Must be called on unlocked socket. */
+static void l2cap_sock_close(struct sock *sk)
+{
+ l2cap_sock_clear_timer(sk);
+ lock_sock(sk);
+ __l2cap_sock_close(sk, ECONNRESET);
+ release_sock(sk);
+ l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL)))
+ l2cap_sock_close(sk);
+
+ parent->sk_state = BT_CLOSED;
+ sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+void __l2cap_sock_close(struct sock *sk, int reason)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+
+ switch (sk->sk_state) {
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ break;
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+ l2cap_send_disconn_req(conn, sk, reason);
+ } else
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT2:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ struct l2cap_conn_rsp rsp;
+ __u16 result;
+
+ if (bt_sk(sk)->defer_setup)
+ result = L2CAP_CR_SEC_BLOCK;
+ else
+ result = L2CAP_CR_BAD_PSM;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ } else
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(sk, reason);
+ break;
+
+ default:
+ sock_set_flag(sk, SOCK_ZAPPED);
+ break;
+ }
+}
+
+static int l2cap_sock_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (!sk->sk_shutdown) {
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
+ err = __l2cap_wait_ack(sk);
+
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ l2cap_sock_clear_timer(sk);
+ __l2cap_sock_close(sk, 0);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+
+ if (!err && sk->sk_err)
+ err = -sk->sk_err;
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ err = l2cap_sock_shutdown(sock, 2);
+
+ sock_orphan(sk);
+ l2cap_sock_kill(sk);
+ return err;
+}
+
+static void l2cap_sock_destruct(struct sock *sk)
+{
+ BT_DBG("sk %p", sk);
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
+void l2cap_sock_init(struct sock *sk, struct sock *parent)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p", sk);
+
+ if (parent) {
+ sk->sk_type = parent->sk_type;
+ bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+
+ pi->imtu = l2cap_pi(parent)->imtu;
+ pi->omtu = l2cap_pi(parent)->omtu;
+ pi->conf_state = l2cap_pi(parent)->conf_state;
+ pi->mode = l2cap_pi(parent)->mode;
+ pi->fcs = l2cap_pi(parent)->fcs;
+ pi->max_tx = l2cap_pi(parent)->max_tx;
+ pi->tx_win = l2cap_pi(parent)->tx_win;
+ pi->sec_level = l2cap_pi(parent)->sec_level;
+ pi->role_switch = l2cap_pi(parent)->role_switch;
+ pi->force_reliable = l2cap_pi(parent)->force_reliable;
+ pi->flushable = l2cap_pi(parent)->flushable;
+ } else {
+ pi->imtu = L2CAP_DEFAULT_MTU;
+ pi->omtu = 0;
+ if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
+ pi->mode = L2CAP_MODE_ERTM;
+ pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+ } else {
+ pi->mode = L2CAP_MODE_BASIC;
+ }
+ pi->max_tx = L2CAP_DEFAULT_MAX_TX;
+ pi->fcs = L2CAP_FCS_CRC16;
+ pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ pi->sec_level = BT_SECURITY_LOW;
+ pi->role_switch = 0;
+ pi->force_reliable = 0;
+ pi->flushable = BT_FLUSHABLE_OFF;
+ }
+
+ /* Default config options */
+ pi->conf_len = 0;
+ pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ skb_queue_head_init(TX_QUEUE(sk));
+ skb_queue_head_init(SREJ_QUEUE(sk));
+ skb_queue_head_init(BUSY_QUEUE(sk));
+ INIT_LIST_HEAD(SREJ_LIST(sk));
+}
+
+static struct proto l2cap_proto = {
+ .name = "L2CAP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct l2cap_pinfo)
+};
+
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk);
+ INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+
+ sk->sk_destruct = l2cap_sock_destruct;
+ sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
+
+ setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
+
+ bt_sock_link(&l2cap_sk_list, sk);
+ return sk;
+}
+
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+
+ BT_DBG("sock %p", sock);
+
+ sock->state = SS_UNCONNECTED;
+
+ if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
+ sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
+
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+ return -EPERM;
+
+ sock->ops = &l2cap_sock_ops;
+
+ sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+ if (!sk)
+ return -ENOMEM;
+
+ l2cap_sock_init(sk, NULL);
+ return 0;
+}
+
+const struct proto_ops l2cap_sock_ops = {
+ .family = PF_BLUETOOTH,
+ .owner = THIS_MODULE,
+ .release = l2cap_sock_release,
+ .bind = l2cap_sock_bind,
+ .connect = l2cap_sock_connect,
+ .listen = l2cap_sock_listen,
+ .accept = l2cap_sock_accept,
+ .getname = l2cap_sock_getname,
+ .sendmsg = l2cap_sock_sendmsg,
+ .recvmsg = l2cap_sock_recvmsg,
+ .poll = bt_sock_poll,
+ .ioctl = bt_sock_ioctl,
+ .mmap = sock_no_mmap,
+ .socketpair = sock_no_socketpair,
+ .shutdown = l2cap_sock_shutdown,
+ .setsockopt = l2cap_sock_setsockopt,
+ .getsockopt = l2cap_sock_getsockopt
+};
+
+static const struct net_proto_family l2cap_sock_family_ops = {
+ .family = PF_BLUETOOTH,
+ .owner = THIS_MODULE,
+ .create = l2cap_sock_create,
+};
+
+int __init l2cap_init_sockets(void)
+{
+ int err;
+
+ err = proto_register(&l2cap_proto, 0);
+ if (err < 0)
+ return err;
+
+ err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
+ if (err < 0)
+ goto error;
+
+ BT_INFO("L2CAP socket layer initialized");
+
+ return 0;
+
+error:
+ BT_ERR("L2CAP socket registration failed");
+ proto_unregister(&l2cap_proto);
+ return err;
+}
+
+void l2cap_cleanup_sockets(void)
+{
+ if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
+ BT_ERR("L2CAP socket unregistration failed");
+
+ proto_unregister(&l2cap_proto);
+}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f827fd908380..f5ef7a3374c7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,7 +22,7 @@
/* Bluetooth HCI Management interface */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -32,6 +32,16 @@
#define MGMT_VERSION 0
#define MGMT_REVISION 1
+struct pending_cmd {
+ struct list_head list;
+ __u16 opcode;
+ int index;
+ void *cmd;
+ struct sock *sk;
+};
+
+LIST_HEAD(cmd_list);
+
static int cmd_status(struct sock *sk, u16 cmd, u8 status)
{
struct sk_buff *skb;
@@ -59,29 +69,26 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
return 0;
}
-static int read_version(struct sock *sk)
+static int cmd_complete(struct sock *sk, u16 cmd, void *rp, size_t rp_len)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
- struct mgmt_rp_read_version *rp;
BT_DBG("sock %p", sk);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode);
+ hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+ hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
- rp = (void *) skb_put(skb, sizeof(*rp));
- rp->version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp->revision);
+ ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+ put_unaligned_le16(cmd, &ev->opcode);
+ memcpy(ev->data, rp, rp_len);
if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
@@ -89,16 +96,25 @@ static int read_version(struct sock *sk)
return 0;
}
+static int read_version(struct sock *sk)
+{
+ struct mgmt_rp_read_version rp;
+
+ BT_DBG("sock %p", sk);
+
+ rp.version = MGMT_VERSION;
+ put_unaligned_le16(MGMT_REVISION, &rp.revision);
+
+ return cmd_complete(sk, MGMT_OP_READ_VERSION, &rp, sizeof(rp));
+}
+
static int read_index_list(struct sock *sk)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
- size_t body_len;
+ size_t rp_len;
u16 count;
- int i;
+ int i, err;
BT_DBG("sock %p", sk);
@@ -109,43 +125,43 @@ static int read_index_list(struct sock *sk)
count++;
}
- body_len = sizeof(*ev) + sizeof(*rp) + (2 * count);
- skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC);
- if (!skb)
+ rp_len = sizeof(*rp) + (2 * count);
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ read_unlock(&hci_dev_list_lock);
return -ENOMEM;
+ }
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(body_len);
-
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
-
- rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
put_unaligned_le16(count, &rp->num_controllers);
i = 0;
list_for_each(p, &hci_dev_list) {
struct hci_dev *d = list_entry(p, struct hci_dev, list);
+
+ hci_del_off_timer(d);
+
+ set_bit(HCI_MGMT, &d->flags);
+
+ if (test_bit(HCI_SETUP, &d->flags))
+ continue;
+
put_unaligned_le16(d->id, &rp->index[i++]);
BT_DBG("Added hci%u", d->id);
}
read_unlock(&hci_dev_list_lock);
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ err = cmd_complete(sk, MGMT_OP_READ_INDEX_LIST, rp, rp_len);
- return 0;
+ kfree(rp);
+
+ return err;
}
static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
- struct mgmt_rp_read_info *rp;
- struct mgmt_cp_read_info *cp;
+ struct mgmt_rp_read_info rp;
+ struct mgmt_cp_read_info *cp = (void *) data;
struct hci_dev *hdev;
u16 dev_id;
@@ -154,18 +170,333 @@ static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
if (len != 2)
return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+
+ hci_del_off_timer(hdev);
+
+ hci_dev_lock_bh(hdev);
+
+ set_bit(HCI_MGMT, &hdev->flags);
+
+ put_unaligned_le16(hdev->id, &rp.index);
+ rp.type = hdev->dev_type;
+
+ rp.powered = test_bit(HCI_UP, &hdev->flags);
+ rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
+ rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
+ rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ rp.sec_mode = 3;
+ else if (hdev->ssp_mode > 0)
+ rp.sec_mode = 4;
+ else
+ rp.sec_mode = 2;
+
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
+ memcpy(rp.features, hdev->features, 8);
+ memcpy(rp.dev_class, hdev->dev_class, 3);
+ put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.hci_ver = hdev->hci_ver;
+ put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return cmd_complete(sk, MGMT_OP_READ_INFO, &rp, sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+ sock_put(cmd->sk);
+ kfree(cmd->cmd);
+ kfree(cmd);
+}
+
+static int mgmt_pending_add(struct sock *sk, u16 opcode, int index,
+ void *data, u16 len)
+{
+ struct pending_cmd *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->opcode = opcode;
+ cmd->index = index;
+
+ cmd->cmd = kmalloc(len, GFP_ATOMIC);
+ if (!cmd->cmd) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ memcpy(cmd->cmd, data, len);
+
+ cmd->sk = sk;
+ sock_hold(sk);
+
+ list_add(&cmd->list, &cmd_list);
+
+ return 0;
+}
+
+static void mgmt_pending_foreach(u16 opcode, int index,
+ void (*cb)(struct pending_cmd *cmd, void *data),
+ void *data)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (cmd->opcode != opcode)
+ continue;
+
+ if (index >= 0 && cmd->index != index)
+ continue;
+
+ cb(cmd, data);
+ }
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+{
+ struct list_head *p;
+
+ list_for_each(p, &cmd_list) {
+ struct pending_cmd *cmd;
+
+ cmd = list_entry(p, struct pending_cmd, list);
+
+ if (cmd->opcode != opcode)
+ continue;
+
+ if (index >= 0 && cmd->index != index)
+ continue;
+
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void mgmt_pending_remove(u16 opcode, int index)
+{
+ struct pending_cmd *cmd;
+
+ cmd = mgmt_pending_find(opcode, index);
+ if (cmd == NULL)
+ return;
+
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
+
+static int set_powered(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ u16 dev_id;
+ int ret, up;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_POWERED, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ up = test_bit(HCI_UP, &hdev->flags);
+ if ((cp->val && up) || (!cp->val && !up)) {
+ ret = cmd_status(sk, MGMT_OP_SET_POWERED, EALREADY);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, dev_id)) {
+ ret = cmd_status(sk, MGMT_OP_SET_POWERED, EBUSY);
+ goto failed;
+ }
+
+ ret = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, dev_id, data, len);
+ if (ret < 0)
+ goto failed;
+
+ if (cp->val)
+ queue_work(hdev->workqueue, &hdev->power_on);
+ else
+ queue_work(hdev->workqueue, &hdev->power_off);
+
+ ret = 0;
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+ return ret;
+}
+
+static int set_discoverable(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ u16 dev_id;
+ u8 scan;
+ int err;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) {
+ err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+ goto failed;
+ }
+
+ err = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, dev_id, data, len);
+ if (err < 0)
+ goto failed;
+
+ scan = SCAN_PAGE;
+
+ if (cp->val)
+ scan |= SCAN_INQUIRY;
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(MGMT_OP_SET_DISCOVERABLE, dev_id);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_connectable(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp;
+ struct hci_dev *hdev;
+ u16 dev_id;
+ u8 scan;
+ int err;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) {
+ err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EBUSY);
+ goto failed;
+ }
+
+ if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EALREADY);
+ goto failed;
+ }
+
+ err = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, dev_id, data, len);
+ if (err < 0)
+ goto failed;
+
+ if (cp->val)
+ scan = SCAN_PAGE;
+ else
+ scan = 0;
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (err < 0)
+ mgmt_pending_remove(MGMT_OP_SET_CONNECTABLE, dev_id);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int mgmt_event(u16 event, void *data, u16 data_len, struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
+ bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+
hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+ hdr->opcode = cpu_to_le16(event);
+ hdr->len = cpu_to_le16(data_len);
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
+ memcpy(skb_put(skb, data_len), data, data_len);
- rp = (void *) skb_put(skb, sizeof(*rp));
+ hci_send_to_sock(NULL, skb, skip_sk);
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
+{
+ struct mgmt_mode rp;
+
+ put_unaligned_le16(index, &rp.index);
+ rp.val = val;
+
+ return cmd_complete(sk, opcode, &rp, sizeof(rp));
+}
+
+static int set_pairable(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct mgmt_mode *cp, ev;
+ struct hci_dev *hdev;
+ u16 dev_id;
+ int err;
cp = (void *) data;
dev_id = get_unaligned_le16(&cp->index);
@@ -173,43 +504,547 @@ static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
BT_DBG("request for hci%u", dev_id);
hdev = hci_dev_get(dev_id);
- if (!hdev) {
- kfree_skb(skb);
- return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_PAIRABLE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (cp->val)
+ set_bit(HCI_PAIRABLE, &hdev->flags);
+ else
+ clear_bit(HCI_PAIRABLE, &hdev->flags);
+
+ err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, dev_id, cp->val);
+ if (err < 0)
+ goto failed;
+
+ put_unaligned_le16(dev_id, &ev.index);
+ ev.val = cp->val;
+
+ err = mgmt_event(MGMT_EV_PAIRABLE, &ev, sizeof(ev), sk);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct list_head *p;
+ u8 val = 0;
+
+ list_for_each(p, &hdev->uuids) {
+ struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+
+ val |= uuid->svc_hint;
+ }
+
+ return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static int add_uuid(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct mgmt_cp_add_uuid *cp;
+ struct hci_dev *hdev;
+ struct bt_uuid *uuid;
+ u16 dev_id;
+ int err;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_ADD_UUID, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+ if (!uuid) {
+ err = -ENOMEM;
+ goto failed;
}
+ memcpy(uuid->uuid, cp->uuid, 16);
+ uuid->svc_hint = cp->svc_hint;
+
+ list_add(&uuid->list, &hdev->uuids);
+
+ err = update_class(hdev);
+ if (err < 0)
+ goto failed;
+
+ err = cmd_complete(sk, MGMT_OP_ADD_UUID, &dev_id, sizeof(dev_id));
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int remove_uuid(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct list_head *p, *n;
+ struct mgmt_cp_add_uuid *cp;
+ struct hci_dev *hdev;
+ u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ u16 dev_id;
+ int err, found;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_REMOVE_UUID, ENODEV);
+
hci_dev_lock_bh(hdev);
- put_unaligned_le16(hdev->id, &rp->index);
- rp->type = hdev->dev_type;
+ if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
+ err = hci_uuids_clear(hdev);
+ goto unlock;
+ }
- rp->powered = test_bit(HCI_UP, &hdev->flags);
- rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
- rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
+ found = 0;
- if (test_bit(HCI_AUTH, &hdev->flags))
- rp->sec_mode = 3;
- else if (hdev->ssp_mode > 0)
- rp->sec_mode = 4;
- else
- rp->sec_mode = 2;
+ list_for_each_safe(p, n, &hdev->uuids) {
+ struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
- bacpy(&rp->bdaddr, &hdev->bdaddr);
- memcpy(rp->features, hdev->features, 8);
- memcpy(rp->dev_class, hdev->dev_class, 3);
- put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
- rp->hci_ver = hdev->hci_ver;
- put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
+ if (memcmp(match->uuid, cp->uuid, 16) != 0)
+ continue;
+ list_del(&match->list);
+ found++;
+ }
+
+ if (found == 0) {
+ err = cmd_status(sk, MGMT_OP_REMOVE_UUID, ENOENT);
+ goto unlock;
+ }
+
+ err = update_class(hdev);
+ if (err < 0)
+ goto unlock;
+
+ err = cmd_complete(sk, MGMT_OP_REMOVE_UUID, &dev_id, sizeof(dev_id));
+
+unlock:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ return err;
+}
+
+static int set_dev_class(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_dev_class *cp;
+ u16 dev_id;
+ int err;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ BT_DBG("request for hci%u", dev_id);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_DEV_CLASS, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ hdev->major_class = cp->major;
+ hdev->minor_class = cp->minor;
+
+ err = update_class(hdev);
+
+ if (err == 0)
+ err = cmd_complete(sk, MGMT_OP_SET_DEV_CLASS, &dev_id,
+ sizeof(dev_id));
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_service_cache(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_service_cache *cp;
+ u16 dev_id;
+ int err;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ BT_DBG("hci%u enable %d", dev_id, cp->enable);
+
+ if (cp->enable) {
+ set_bit(HCI_SERVICE_CACHE, &hdev->flags);
+ err = 0;
+ } else {
+ clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
+ err = update_class(hdev);
+ }
+
+ if (err == 0)
+ err = cmd_complete(sk, MGMT_OP_SET_SERVICE_CACHE, &dev_id,
+ sizeof(dev_id));
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int load_keys(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_load_keys *cp;
+ u16 dev_id, key_count, expected_len;
+ int i;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+ key_count = get_unaligned_le16(&cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
+ return -EINVAL;
+ }
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_LOAD_KEYS, ENODEV);
+
+ BT_DBG("hci%u debug_keys %u key_count %u", dev_id, cp->debug_keys,
+ key_count);
+
+ hci_dev_lock_bh(hdev);
+
+ hci_link_keys_clear(hdev);
+
+ set_bit(HCI_LINK_KEYS, &hdev->flags);
+
+ if (cp->debug_keys)
+ set_bit(HCI_DEBUG_KEYS, &hdev->flags);
+ else
+ clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_key_info *key = &cp->keys[i];
+
+ hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
+ key->pin_len);
+ }
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
return 0;
}
+static int remove_key(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_remove_key *cp;
+ struct hci_conn *conn;
+ u16 dev_id;
+ int err;
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_REMOVE_KEY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ err = hci_remove_link_key(hdev, &cp->bdaddr);
+ if (err < 0) {
+ err = cmd_status(sk, MGMT_OP_REMOVE_KEY, -err);
+ goto unlock;
+ }
+
+ err = 0;
+
+ if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn) {
+ struct hci_cp_disconnect dc;
+
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
+ }
+
+unlock:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int disconnect(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_disconnect *cp;
+ struct hci_cp_disconnect dc;
+ struct hci_conn *conn;
+ u16 dev_id;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_DISCONNECT, ENETDOWN);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, dev_id)) {
+ err = cmd_status(sk, MGMT_OP_DISCONNECT, EBUSY);
+ goto failed;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, MGMT_OP_DISCONNECT, ENOTCONN);
+ goto failed;
+ }
+
+ err = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, dev_id, data, len);
+ if (err < 0)
+ goto failed;
+
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(MGMT_OP_DISCONNECT, dev_id);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int get_connections(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct mgmt_cp_get_connections *cp;
+ struct mgmt_rp_get_connections *rp;
+ struct hci_dev *hdev;
+ struct list_head *p;
+ size_t rp_len;
+ u16 dev_id, count;
+ int i, err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_GET_CONNECTIONS, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ count = 0;
+ list_for_each(p, &hdev->conn_hash.list) {
+ count++;
+ }
+
+ rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ put_unaligned_le16(dev_id, &rp->index);
+ put_unaligned_le16(count, &rp->conn_count);
+
+ read_lock(&hci_dev_list_lock);
+
+ i = 0;
+ list_for_each(p, &hdev->conn_hash.list) {
+ struct hci_conn *c = list_entry(p, struct hci_conn, list);
+
+ bacpy(&rp->conn[i++], &c->dst);
+ }
+
+ read_unlock(&hci_dev_list_lock);
+
+ err = cmd_complete(sk, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
+
+unlock:
+ kfree(rp);
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+ return err;
+}
+
+static int pin_code_reply(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pin_code_reply *cp;
+ struct hci_cp_pin_code_reply reply;
+ u16 dev_id;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+ goto failed;
+ }
+
+ err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, dev_id, data, len);
+ if (err < 0)
+ goto failed;
+
+ bacpy(&reply.bdaddr, &cp->bdaddr);
+ reply.pin_len = cp->pin_len;
+ memcpy(reply.pin_code, cp->pin_code, 16);
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
+ if (err < 0)
+ mgmt_pending_remove(MGMT_OP_PIN_CODE_REPLY, dev_id);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int pin_code_neg_reply(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_pin_code_neg_reply *cp;
+ u16 dev_id;
+ int err;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENETDOWN);
+ goto failed;
+ }
+
+ err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, dev_id,
+ data, len);
+ if (err < 0)
+ goto failed;
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(bdaddr_t),
+ &cp->bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(MGMT_OP_PIN_CODE_NEG_REPLY, dev_id);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_io_capability(struct sock *sk, unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_io_capability *cp;
+ u16 dev_id;
+
+ BT_DBG("");
+
+ cp = (void *) data;
+ dev_id = get_unaligned_le16(&cp->index);
+
+ hdev = hci_dev_get(dev_id);
+ if (!hdev)
+ return cmd_status(sk, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ hdev->io_capability = cp->io_capability;
+
+ BT_DBG("%s IO capability set to 0x%02x", hdev->name,
+ hdev->io_capability);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return cmd_complete(sk, MGMT_OP_SET_IO_CAPABILITY,
+ &dev_id, sizeof(dev_id));
+}
+
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
unsigned char *buf;
@@ -250,6 +1085,51 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_READ_INFO:
err = read_controller_info(sk, buf + sizeof(*hdr), len);
break;
+ case MGMT_OP_SET_POWERED:
+ err = set_powered(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_DISCOVERABLE:
+ err = set_discoverable(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_CONNECTABLE:
+ err = set_connectable(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_PAIRABLE:
+ err = set_pairable(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_ADD_UUID:
+ err = add_uuid(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_UUID:
+ err = remove_uuid(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_DEV_CLASS:
+ err = set_dev_class(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_SERVICE_CACHE:
+ err = set_service_cache(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_LOAD_KEYS:
+ err = load_keys(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_REMOVE_KEY:
+ err = remove_key(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_DISCONNECT:
+ err = disconnect(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_GET_CONNECTIONS:
+ err = get_connections(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_PIN_CODE_REPLY:
+ err = pin_code_reply(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_PIN_CODE_NEG_REPLY:
+ err = pin_code_neg_reply(sk, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_SET_IO_CAPABILITY:
+ err = set_io_capability(sk, buf + sizeof(*hdr), len);
+ break;
default:
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, opcode, 0x01);
@@ -266,43 +1146,247 @@ done:
return err;
}
-static int mgmt_event(u16 event, void *data, u16 data_len)
+int mgmt_index_added(u16 index)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
+ struct mgmt_ev_index_added ev;
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ put_unaligned_le16(index, &ev.index);
- bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+ return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev), NULL);
+}
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- hdr->len = cpu_to_le16(data_len);
+int mgmt_index_removed(u16 index)
+{
+ struct mgmt_ev_index_added ev;
- memcpy(skb_put(skb, data_len), data, data_len);
+ put_unaligned_le16(index, &ev.index);
- hci_send_to_sock(NULL, skb);
- kfree_skb(skb);
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev), NULL);
+}
- return 0;
+struct cmd_lookup {
+ u8 val;
+ struct sock *sk;
+};
+
+static void mode_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct mgmt_mode *cp = cmd->cmd;
+ struct cmd_lookup *match = data;
+
+ if (cp->val != match->val)
+ return;
+
+ send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+
+ list_del(&cmd->list);
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
+ }
+
+ mgmt_pending_free(cmd);
}
-int mgmt_index_added(u16 index)
+int mgmt_powered(u16 index, u8 powered)
{
- struct mgmt_ev_index_added ev;
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { powered, NULL };
+ int ret;
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
put_unaligned_le16(index, &ev.index);
+ ev.val = powered;
+
+ ret = mgmt_event(MGMT_EV_POWERED, &ev, sizeof(ev), match.sk);
- return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev));
+ if (match.sk)
+ sock_put(match.sk);
+
+ return ret;
}
-int mgmt_index_removed(u16 index)
+int mgmt_discoverable(u16 index, u8 discoverable)
{
- struct mgmt_ev_index_added ev;
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { discoverable, NULL };
+ int ret;
+
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index,
+ mode_rsp, &match);
+
+ put_unaligned_le16(index, &ev.index);
+ ev.val = discoverable;
+
+ ret = mgmt_event(MGMT_EV_DISCOVERABLE, &ev, sizeof(ev), match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return ret;
+}
+
+int mgmt_connectable(u16 index, u8 connectable)
+{
+ struct mgmt_mode ev;
+ struct cmd_lookup match = { connectable, NULL };
+ int ret;
+
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+
+ put_unaligned_le16(index, &ev.index);
+ ev.val = connectable;
+
+ ret = mgmt_event(MGMT_EV_CONNECTABLE, &ev, sizeof(ev), match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return ret;
+}
+
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
+{
+ struct mgmt_ev_new_key ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ put_unaligned_le16(index, &ev.index);
+
+ bacpy(&ev.key.bdaddr, &key->bdaddr);
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, 16);
+ ev.key.pin_len = key->pin_len;
+ ev.old_key_type = old_key_type;
+
+ return mgmt_event(MGMT_EV_NEW_KEY, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_connected(u16 index, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_connected ev;
put_unaligned_le16(index, &ev.index);
+ bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev));
+ return mgmt_event(MGMT_EV_CONNECTED, &ev, sizeof(ev), NULL);
+}
+
+static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct mgmt_cp_disconnect *cp = cmd->cmd;
+ struct sock **sk = data;
+ struct mgmt_rp_disconnect rp;
+
+ put_unaligned_le16(cmd->index, &rp.index);
+ bacpy(&rp.bdaddr, &cp->bdaddr);
+
+ cmd_complete(cmd->sk, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
+
+ *sk = cmd->sk;
+ sock_hold(*sk);
+
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
+
+int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_disconnected ev;
+ struct sock *sk = NULL;
+ int err;
+
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+
+ put_unaligned_le16(index, &ev.index);
+ bacpy(&ev.bdaddr, bdaddr);
+
+ err = mgmt_event(MGMT_EV_DISCONNECTED, &ev, sizeof(ev), sk);
+
+ if (sk)
+ sock_put(sk);
+
+ return err;
+}
+
+int mgmt_disconnect_failed(u16 index)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, MGMT_OP_DISCONNECT, EIO);
+
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+
+ return err;
+}
+
+int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ struct mgmt_ev_connect_failed ev;
+
+ put_unaligned_le16(index, &ev.index);
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.status = status;
+
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_pin_code_request ev;
+
+ put_unaligned_le16(index, &ev.index);
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+ if (!cmd)
+ return -ENOENT;
+
+ if (status != 0)
+ err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_REPLY, status);
+ else
+ err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_REPLY,
+ bdaddr, sizeof(*bdaddr));
+
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+
+ return err;
+}
+
+int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+ if (!cmd)
+ return -ENOENT;
+
+ if (status != 0)
+ err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY, status);
+ else
+ err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY,
+ bdaddr, sizeof(*bdaddr));
+
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+
+ return err;
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 6b83776534fb..c9973932456f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2154,8 +2154,6 @@ static int __init rfcomm_init(void)
{
int err;
- l2cap_load();
-
hci_register_cb(&rfcomm_cb);
rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..d7b9af4703d0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
break;
}
+ tty_unlock();
schedule();
+ tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 960c6d1637da..c9348ddda877 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -50,8 +50,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
-#define VERSION "0.6"
-
static int disable_esco;
static const struct proto_ops sco_sock_ops;
@@ -703,6 +701,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
break;
}
+ memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
@@ -1023,7 +1022,7 @@ static struct hci_proto sco_hci_proto = {
.recv_scodata = sco_recv_scodata
};
-static int __init sco_init(void)
+int __init sco_init(void)
{
int err;
@@ -1051,7 +1050,6 @@ static int __init sco_init(void)
BT_ERR("Failed to create SCO debug file");
}
- BT_INFO("SCO (Voice Link) ver %s", VERSION);
BT_INFO("SCO socket layer initialized");
return 0;
@@ -1061,7 +1059,7 @@ error:
return err;
}
-static void __exit sco_exit(void)
+void __exit sco_exit(void)
{
debugfs_remove(sco_debugfs);
@@ -1074,14 +1072,5 @@ static void __exit sco_exit(void)
proto_unregister(&sco_proto);
}
-module_init(sco_init);
-module_exit(sco_exit);
-
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-2");
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index c766056d0488..dbf5e4006bc1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -17,7 +17,7 @@ comment "CFG80211 needs to be enabled for MAC80211"
if MAC80211 != n
config MAC80211_HAS_RC
- def_bool n
+ bool
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EXPERT
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2ba3af850dda..140503d4c97a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1863,6 +1863,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
wk->chan = chan;
+ wk->chan_type = channel_type;
wk->sdata = sdata;
wk->done = ieee80211_offchan_tx_done;
wk->offchan_tx.frame = skb;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1f02e599a318..51f0d780dafa 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -60,6 +60,10 @@ static const struct file_operations name## _ops = { \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
+DEBUGFS_READONLY_FILE(user_power, "%d",
+ local->user_power_level);
+DEBUGFS_READONLY_FILE(power, "%d",
+ local->hw.conf.power_level);
DEBUGFS_READONLY_FILE(frequency, "%d",
local->hw.conf.channel->center_freq);
DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
@@ -391,6 +395,8 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(uapsd_queues);
DEBUGFS_ADD(uapsd_max_sp_len);
DEBUGFS_ADD(channel_type);
+ DEBUGFS_ADD(user_power);
+ DEBUGFS_ADD(power);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 775fb63471c4..a42aa61269ea 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -664,12 +664,13 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
}
static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len)
+ struct sk_buff *req)
{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
+ struct ieee80211_mgmt *mgmt = (void *)req->data;
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- int tx_last_beacon;
+ int tx_last_beacon, len = req->len;
struct sk_buff *skb;
struct ieee80211_mgmt *resp;
u8 *pos, *end;
@@ -689,7 +690,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
mgmt->bssid, tx_last_beacon);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
- if (!tx_last_beacon)
+ if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH))
return;
if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
@@ -786,7 +787,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_PROBE_REQ:
- ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
+ ieee80211_rx_mgmt_probe_req(sdata, skb);
break;
case IEEE80211_STYPE_PROBE_RESP:
ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f2ef15d910a5..0a570a111a84 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1066,8 +1066,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
-extern bool ieee80211_disable_40mhz_24ghz;
-
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c155c0b69426..2543e48bd813 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -34,7 +34,7 @@
#include "debugfs.h"
-bool ieee80211_disable_40mhz_24ghz;
+static bool ieee80211_disable_40mhz_24ghz;
module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
"Disable 40MHz support in the 2.4GHz band");
@@ -112,7 +112,13 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
/* This logic needs to match logic in ieee80211_hw_config */
if (local->scan_channel) {
chan = local->scan_channel;
- channel_type = NL80211_CHAN_NO_HT;
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ if (chan == local->oper_channel)
+ channel_type = local->_oper_channel_type;
+ else
+ channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
@@ -151,7 +157,13 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (scan_chan) {
chan = scan_chan;
- channel_type = NL80211_CHAN_NO_HT;
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ if (chan == local->oper_channel)
+ channel_type = local->_oper_channel_type;
+ else
+ channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
@@ -187,7 +199,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
- if (scan_chan)
+ if ((local->scanning & SCAN_SW_SCANNING) ||
+ (local->scanning & SCAN_HW_SCANNING))
power = chan->max_power;
else
power = local->power_constr_level ?
@@ -710,6 +723,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
channels += sband->n_channels;
+ /*
+ * Since ieee80211_disable_40mhz_24ghz is global, we can
+ * modify the sband's ht data even if the driver uses a
+ * global structure for that.
+ */
+ if (ieee80211_disable_40mhz_24ghz &&
+ band == IEEE80211_BAND_2GHZ &&
+ sband->ht_cap.ht_supported) {
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f77adf1a520e..7b3f9df725bd 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1071,6 +1071,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(hdr->addr1))
return;
+ /*
+ * In case we receive frames after disassociation.
+ */
+ if (!sdata->u.mgd.associated)
+ return;
+
ieee80211_sta_reset_conn_monitor(sdata);
}
@@ -2294,6 +2300,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
else
wk->type = IEEE80211_WORK_DIRECT_PROBE;
wk->chan = req->bss->channel;
+ wk->chan_type = NL80211_CHAN_NO_HT;
wk->sdata = sdata;
wk->done = ieee80211_probe_auth_done;
@@ -2443,6 +2450,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
wk->chan = req->bss->channel;
+ wk->chan_type = NL80211_CHAN_NO_HT;
wk->sdata = sdata;
wk->done = ieee80211_assoc_done;
if (!bss->dtim_period &&
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 045b2fe4a414..f502634d43af 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -832,18 +832,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
- (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
- if ((!ieee80211_has_fromds(hdr->frame_control) &&
- !ieee80211_has_tods(hdr->frame_control) &&
- ieee80211_is_data(hdr->frame_control)) ||
- !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
- /* Drop IBSS frames and frames for other hosts
- * silently. */
- return RX_DROP_MONITOR;
- }
-
+ (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
return RX_DROP_MONITOR;
- }
return RX_CONTINUE;
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 0ea6adae3e06..842954509925 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -307,11 +307,15 @@ static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
mutex_lock(&local->mtx);
on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+ WARN_ON(local->scanning & (SCAN_SW_SCANNING | SCAN_HW_SCANNING));
+
if (was_hw_scan || !on_oper_chan) {
if (WARN_ON(local->scan_channel))
local->scan_channel = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- }
+ } else
+ /* Set power back to normal operating levels. */
+ ieee80211_hw_config(local, 0);
if (!was_hw_scan) {
bool on_oper_chan2;
@@ -377,6 +381,9 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_configure_filter(local);
+ /* We need to set power level at maximum rate for scanning. */
+ ieee80211_hw_config(local, 0);
+
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
IEEE80211_CHANNEL_TIME);
@@ -517,8 +524,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
if (ieee80211_cfg_on_oper_channel(local)) {
/* We're currently on operating channel. */
- if ((next_chan == local->oper_channel) &&
- (local->_oper_channel_type == NL80211_CHAN_NO_HT))
+ if (next_chan == local->oper_channel)
/* We don't need to move off of operating channel. */
local->next_scan_state = SCAN_SET_CHANNEL;
else
@@ -620,8 +626,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->scan_channel = chan;
/* Only call hw-config if we really need to change channels. */
- if ((chan != local->hw.conf.channel) ||
- (local->hw.conf.channel_type != NL80211_CHAN_NO_HT))
+ if (chan != local->hw.conf.channel)
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 17ef4f4e8602..34edf7f22b0e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -236,6 +236,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
ieee80211_stop_queues_by_reason(&local->hw,
IEEE80211_QUEUE_STOP_REASON_PS);
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
ieee80211_queue_work(&local->hw,
&local->dynamic_ps_disable_work);
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d036597aabbe..556647a910ac 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -986,12 +986,6 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u16 cap = sband->ht_cap.cap;
__le16 tmp;
- if (ieee80211_disable_40mhz_24ghz &&
- sband->band == IEEE80211_BAND_2GHZ) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
-
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
memset(pos, 0, sizeof(struct ieee80211_ht_cap));
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 6bf787a5b38a..204f0a4db969 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -126,12 +126,6 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
/* determine capability flags */
- if (ieee80211_disable_40mhz_24ghz &&
- sband->band == IEEE80211_BAND_2GHZ) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
-
switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
@@ -874,6 +868,44 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
kfree_skb(skb);
}
+static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
+ enum nl80211_channel_type oper_ct)
+{
+ switch (wk_ct) {
+ case NL80211_CHAN_NO_HT:
+ return true;
+ case NL80211_CHAN_HT20:
+ if (oper_ct != NL80211_CHAN_NO_HT)
+ return true;
+ return false;
+ case NL80211_CHAN_HT40MINUS:
+ case NL80211_CHAN_HT40PLUS:
+ return (wk_ct == oper_ct);
+ }
+ WARN_ON(1); /* shouldn't get here */
+ return false;
+}
+
+static enum nl80211_channel_type
+ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
+ enum nl80211_channel_type oper_ct)
+{
+ switch (wk_ct) {
+ case NL80211_CHAN_NO_HT:
+ return oper_ct;
+ case NL80211_CHAN_HT20:
+ if (oper_ct != NL80211_CHAN_NO_HT)
+ return oper_ct;
+ return wk_ct;
+ case NL80211_CHAN_HT40MINUS:
+ case NL80211_CHAN_HT40PLUS:
+ return wk_ct;
+ }
+ WARN_ON(1); /* shouldn't get here */
+ return wk_ct;
+}
+
+
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
@@ -927,14 +959,22 @@ static void ieee80211_work_work(struct work_struct *work)
bool on_oper_chan;
bool tmp_chan_changed = false;
bool on_oper_chan2;
+ enum nl80211_channel_type wk_ct;
on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+ /* Work with existing channel type if possible. */
+ wk_ct = wk->chan_type;
+ if (wk->chan == local->hw.conf.channel)
+ wk_ct = ieee80211_calc_ct(wk->chan_type,
+ local->hw.conf.channel_type);
+
if (local->tmp_channel)
if ((local->tmp_channel != wk->chan) ||
- (local->tmp_channel_type != wk->chan_type))
+ (local->tmp_channel_type != wk_ct))
tmp_chan_changed = true;
local->tmp_channel = wk->chan;
- local->tmp_channel_type = wk->chan_type;
+ local->tmp_channel_type = wk_ct;
/*
* Leave the station vifs in awake mode if they
* happen to be on the same channel as
@@ -1031,7 +1071,8 @@ static void ieee80211_work_work(struct work_struct *work)
continue;
if (wk->chan != local->tmp_channel)
continue;
- if (wk->chan_type != local->tmp_channel_type)
+ if (ieee80211_work_ct_coexists(wk->chan_type,
+ local->tmp_channel_type))
continue;
remain_off_channel = true;
}