summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig6
-rw-r--r--net/atm/common.c4
-rw-r--r--net/atm/mpc.h4
-rw-r--r--net/atm/mpoa_caches.c4
-rw-r--r--net/batman-adv/hard-interface.c7
-rw-r--r--net/bluetooth/af_bluetooth.c18
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_conn.c103
-rw-r--r--net/bluetooth/hci_core.c576
-rw-r--r--net/bluetooth/hci_request.c682
-rw-r--r--net/bluetooth/hci_request.h25
-rw-r--r--net/bluetooth/hci_sock.c200
-rw-r--r--net/bluetooth/l2cap_core.c19
-rw-r--r--net/bluetooth/mgmt.c616
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/core/datagram.c79
-rw-r--r--net/core/dev.c56
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/netclassid_cgroup.c37
-rw-r--r--net/core/netprio_cgroup.c19
-rw-r--r--net/core/scm.c6
-rw-r--r--net/core/sock.c37
-rw-r--r--net/core/stream.c8
-rw-r--r--net/dccp/ipv6.c37
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/decnet/af_decnet.c8
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/dsa/dsa.c78
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/slave.c11
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/ipv4/igmp.c5
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_tunnel.c1
-rw-r--r--net/ipv4/ip_tunnel_core.c1
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ipconfig.c19
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c764
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/tcp_timer.c14
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c21
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/icmp.c14
-rw-r--r--net/ipv6/inet6_connection_sock.c21
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c19
-rw-r--r--net/ipv6/ipv6_sockglue.c33
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c9
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c38
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c528
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/ibss.c1
-rw-r--r--net/mac80211/ieee80211_i.h25
-rw-r--r--net/mac80211/iface.c5
-rw-r--r--net/mac80211/key.c56
-rw-r--r--net/mac80211/main.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c12
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/offchannel.c829
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c9
-rw-r--r--net/mac80211/sta_info.c163
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/trace.h25
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c7
-rw-r--r--net/mac802154/rx.c3
-rw-r--r--net/mpls/af_mpls.c185
-rw-r--r--net/mpls/internal.h2
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netfilter/xt_cgroup.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/vport-geneve.c1
-rw-r--r--net/openvswitch/vport-gre.c1
-rw-r--r--net/openvswitch/vport-netdev.c10
-rw-r--r--net/openvswitch/vport.c8
-rw-r--r--net/openvswitch/vport.h8
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-ack.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/sched/sch_api.c27
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_mq.c4
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sctp/ipv6.c13
-rw-r--r--net/sctp/socket.c41
-rw-r--r--net/socket.c21
-rw-r--r--net/sunrpc/backchannel_rqst.c8
-rw-r--r--net/sunrpc/svc.c1
-rw-r--r--net/sunrpc/xprtsock.c14
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/node.c15
-rw-r--r--net/tipc/socket.c14
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/unix/af_unix.c311
-rw-r--r--net/vmw_vsock/vmci_transport.h2
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c2
-rw-r--r--net/vmw_vsock/vmci_transport_notify.h5
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c2
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c4
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/nl80211.c62
-rw-r--r--net/wireless/ocb.c3
-rw-r--r--net/wireless/rdev-ops.h51
-rw-r--r--net/wireless/reg.c98
-rw-r--r--net/wireless/trace.h103
-rw-r--r--net/wireless/util.c121
135 files changed, 3803 insertions, 2834 deletions
diff --git a/net/Kconfig b/net/Kconfig
index 127da94ae25e..11f8c22af34d 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -250,9 +250,14 @@ config XPS
depends on SMP
default y
+config SOCK_CGROUP_DATA
+ bool
+ default n
+
config CGROUP_NET_PRIO
bool "Network priority cgroup"
depends on CGROUPS
+ select SOCK_CGROUP_DATA
---help---
Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis.
@@ -260,6 +265,7 @@ config CGROUP_NET_PRIO
config CGROUP_NET_CLASSID
bool "Network classid cgroup"
depends on CGROUPS
+ select SOCK_CGROUP_DATA
---help---
Cgroup subsystem for use as general purpose socket classid marker that is
being used in cls_cgroup and for netfilter matching.
diff --git a/net/atm/common.c b/net/atm/common.c
index 49a872db7e42..6dc12305799e 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -96,7 +96,7 @@ static void vcc_def_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up(&wq->wait);
rcu_read_unlock();
}
@@ -117,7 +117,7 @@ static void vcc_write_space(struct sock *sk)
if (vcc_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
diff --git a/net/atm/mpc.h b/net/atm/mpc.h
index 0919a88bbc70..cfc7b745aa91 100644
--- a/net/atm/mpc.h
+++ b/net/atm/mpc.h
@@ -21,11 +21,11 @@ struct mpoa_client {
uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */
rwlock_t ingress_lock;
- struct in_cache_ops *in_ops; /* ingress cache operations */
+ const struct in_cache_ops *in_ops; /* ingress cache operations */
in_cache_entry *in_cache; /* the ingress cache of this MPC */
rwlock_t egress_lock;
- struct eg_cache_ops *eg_ops; /* egress cache operations */
+ const struct eg_cache_ops *eg_ops; /* egress cache operations */
eg_cache_entry *eg_cache; /* the egress cache of this MPC */
uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index d1b2d9a03144..9e60e74c807d 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -534,7 +534,7 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
}
-static struct in_cache_ops ingress_ops = {
+static const struct in_cache_ops ingress_ops = {
in_cache_add_entry, /* add_entry */
in_cache_get, /* get */
in_cache_get_with_mask, /* get_with_mask */
@@ -548,7 +548,7 @@ static struct in_cache_ops ingress_ops = {
in_destroy_cache /* destroy_cache */
};
-static struct eg_cache_ops egress_ops = {
+static const struct eg_cache_ops egress_ops = {
eg_cache_add_entry, /* add_entry */
eg_cache_get_by_cache_id, /* get_by_cache_id */
eg_cache_get_by_tag, /* get_by_tag */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f11345e163d7..a58184fdf5fd 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -464,7 +464,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
- ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface);
+ ret = netdev_master_upper_dev_link(hard_iface->net_dev,
+ soft_iface, NULL, NULL);
if (ret)
goto err_dev;
@@ -708,7 +709,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
}
hard_iface = batadv_hardif_get_by_netdev(net_dev);
- if (!hard_iface && event == NETDEV_REGISTER)
+ if (!hard_iface && (event == NETDEV_REGISTER ||
+ event == NETDEV_POST_TYPE_CHANGE))
hard_iface = batadv_hardif_add_interface(net_dev);
if (!hard_iface)
@@ -723,6 +725,7 @@ static int batadv_hard_if_event(struct notifier_block *this,
batadv_hardif_deactivate_interface(hard_iface);
break;
case NETDEV_UNREGISTER:
+ case NETDEV_PRE_TYPE_CHANGE:
list_del_rcu(&hard_iface->list);
batadv_hardif_remove_interface(hard_iface);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a3bffd1ec2b4..5785e8e6400e 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -33,8 +33,6 @@
#include "selftest.h"
-#define VERSION "2.21"
-
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
@@ -271,11 +269,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
if (signal_pending(current) || !timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
__set_current_state(TASK_RUNNING);
@@ -441,7 +439,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
@@ -671,7 +669,7 @@ static const struct file_operations bt_fops = {
};
int bt_procfs_init(struct net *net, const char *name,
- struct bt_sock_list* sk_list,
+ struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
sk_list->custom_seq_show = seq_show;
@@ -687,7 +685,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
}
#else
int bt_procfs_init(struct net *net, const char *name,
- struct bt_sock_list* sk_list,
+ struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
return 0;
@@ -715,7 +713,7 @@ static int __init bt_init(void)
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
- BT_INFO("Core ver %s", VERSION);
+ BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
err = bt_selftest();
if (err < 0)
@@ -789,7 +787,7 @@ subsys_initcall(bt_init);
module_exit(bt_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
-MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
+MODULE_VERSION(BT_SUBSYS_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 298ed37010e6..9e59b6654126 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -178,8 +178,7 @@ static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *
cmtp_add_msgpart(session, id, skb->data + hdrlen, len);
break;
default:
- if (session->reassembly[id] != NULL)
- kfree_skb(session->reassembly[id]);
+ kfree_skb(session->reassembly[id]);
session->reassembly[id] = NULL;
break;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 85b82f7adbd2..2d334e07fd77 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -178,6 +178,10 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn)
hci_dev_hold(conn->hdev);
hci_conn_get(conn);
+ /* Even though we hold a reference to the hdev, many other
+ * things might get cleaned up meanwhile, including the hdev's
+ * own workqueue, so we can't use that for scheduling.
+ */
schedule_work(&conn->le_scan_cleanup);
}
@@ -781,7 +785,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 role)
{
struct hci_conn_params *params;
- struct hci_conn *conn, *conn_unfinished;
+ struct hci_conn *conn;
struct smp_irk *irk;
struct hci_request req;
int err;
@@ -794,35 +798,22 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EOPNOTSUPP);
}
- /* Some devices send ATT messages as soon as the physical link is
- * established. To be able to handle these ATT messages, the user-
- * space first establishes the connection and then starts the pairing
- * process.
- *
- * So if a hci_conn object already exists for the following connection
- * attempt, we simply update pending_sec_level and auth_type fields
- * and return the object found.
- */
- conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
- conn_unfinished = NULL;
- if (conn) {
- if (conn->state == BT_CONNECT &&
- test_bit(HCI_CONN_SCANNING, &conn->flags)) {
- BT_DBG("will continue unfinished conn %pMR", dst);
- conn_unfinished = conn;
- } else {
- if (conn->pending_sec_level < sec_level)
- conn->pending_sec_level = sec_level;
- goto done;
- }
- }
-
/* Since the controller supports only one LE connection attempt at a
* time, we return -EBUSY if there is any connection attempt running.
*/
if (hci_lookup_le_connect(hdev))
return ERR_PTR(-EBUSY);
+ /* If there's already a connection object but it's not in
+ * scanning state it means it must already be established, in
+ * which case we can't do anything else except report a failure
+ * to connect.
+ */
+ conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
+ if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+ return ERR_PTR(-EBUSY);
+ }
+
/* When given an identity address with existing identity
* resolving key, the connection needs to be established
* to a resolvable random address.
@@ -838,23 +829,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
dst_type = ADDR_LE_DEV_RANDOM;
}
- if (conn_unfinished) {
- conn = conn_unfinished;
+ if (conn) {
bacpy(&conn->dst, dst);
} else {
conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+ hci_conn_hold(conn);
+ conn->pending_sec_level = sec_level;
}
- if (!conn)
- return ERR_PTR(-ENOMEM);
-
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
- if (!conn_unfinished)
- conn->pending_sec_level = sec_level;
-
hci_req_init(&req, hdev);
/* Disable advertising if we're active. For master role
@@ -918,37 +906,9 @@ create_conn:
return ERR_PTR(err);
}
-done:
- /* If this is continuation of connect started by hci_connect_le_scan,
- * it already called hci_conn_hold and calling it again would mess the
- * counter.
- */
- if (!conn_unfinished)
- hci_conn_hold(conn);
-
return conn;
}
-static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_conn *conn;
-
- if (!status)
- return;
-
- BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
- status);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
- hci_le_conn_failed(conn, status);
-
- hci_dev_unlock(hdev);
-}
-
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{
struct hci_conn *conn;
@@ -964,10 +924,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
}
/* This function requires the caller holds hdev->lock */
-static int hci_explicit_conn_params_set(struct hci_request *req,
+static int hci_explicit_conn_params_set(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type)
{
- struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
if (is_connected(hdev, addr, addr_type))
@@ -995,7 +954,6 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
}
params->explicit_connect = true;
- __hci_update_background_scan(req);
BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
params->auto_connect);
@@ -1006,11 +964,9 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
/* This function requires the caller holds hdev->lock */
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level,
- u16 conn_timeout, u8 role)
+ u16 conn_timeout)
{
struct hci_conn *conn;
- struct hci_request req;
- int err;
/* Let's make sure that le is enabled.*/
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
@@ -1038,29 +994,22 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
BT_DBG("requesting refresh of dst_addr");
- conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
if (!conn)
return ERR_PTR(-ENOMEM);
- hci_req_init(&req, hdev);
-
- if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
return ERR_PTR(-EBUSY);
conn->state = BT_CONNECT;
set_bit(HCI_CONN_SCANNING, &conn->flags);
-
- err = hci_req_run(&req, hci_connect_le_scan_complete);
- if (err && err != -ENODATA) {
- hci_conn_del(conn);
- return ERR_PTR(err);
- }
-
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
conn->pending_sec_level = sec_level;
conn->conn_timeout = conn_timeout;
+ hci_update_background_scan(hdev);
+
done:
hci_conn_hold(conn);
return conn;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 62edbf1b114e..89af7e4fac02 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -56,15 +56,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
-/* ----- HCI requests ----- */
-
-#define HCI_REQ_DONE 0
-#define HCI_REQ_PEND 1
-#define HCI_REQ_CANCELED 2
-
-#define hci_req_lock(d) mutex_lock(&d->req_lock)
-#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
-
/* ---- HCI debugfs entries ---- */
static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
@@ -73,7 +64,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -101,14 +92,14 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
return -EALREADY;
- hci_req_lock(hdev);
+ hci_req_sync_lock(hdev);
if (enable)
skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
HCI_CMD_TIMEOUT);
else
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
HCI_CMD_TIMEOUT);
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -133,7 +124,7 @@ static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
- buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
+ buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -165,9 +156,9 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
!test_bit(HCI_RUNNING, &hdev->flags))
goto done;
- hci_req_lock(hdev);
+ hci_req_sync_lock(hdev);
err = hdev->set_diag(hdev, enable);
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
if (err < 0)
return err;
@@ -198,197 +189,14 @@ static void hci_debugfs_create_basic(struct hci_dev *hdev)
&vendor_diag_fops);
}
-/* ---- HCI requests ---- */
-
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb)
-{
- BT_DBG("%s result 0x%2.2x", hdev->name, result);
-
- if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = result;
- hdev->req_status = HCI_REQ_DONE;
- if (skb)
- hdev->req_skb = skb_get(skb);
- wake_up_interruptible(&hdev->req_wait_q);
- }
-}
-
-static void hci_req_cancel(struct hci_dev *hdev, int err)
-{
- BT_DBG("%s err 0x%2.2x", hdev->name, err);
-
- if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = err;
- hdev->req_status = HCI_REQ_CANCELED;
- wake_up_interruptible(&hdev->req_wait_q);
- }
-}
-
-struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u8 event, u32 timeout)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct hci_request req;
- struct sk_buff *skb;
- int err = 0;
-
- BT_DBG("%s", hdev->name);
-
- hci_req_init(&req, hdev);
-
- hci_req_add_ev(&req, opcode, plen, param, event);
-
- hdev->req_status = HCI_REQ_PEND;
-
- add_wait_queue(&hdev->req_wait_q, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- err = hci_req_run_skb(&req, hci_req_sync_complete);
- if (err < 0) {
- remove_wait_queue(&hdev->req_wait_q, &wait);
- set_current_state(TASK_RUNNING);
- return ERR_PTR(err);
- }
-
- schedule_timeout(timeout);
-
- remove_wait_queue(&hdev->req_wait_q, &wait);
-
- if (signal_pending(current))
- return ERR_PTR(-EINTR);
-
- switch (hdev->req_status) {
- case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
- break;
-
- case HCI_REQ_CANCELED:
- err = -hdev->req_result;
- break;
-
- default:
- err = -ETIMEDOUT;
- break;
- }
-
- hdev->req_status = hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
-
- BT_DBG("%s end: err %d", hdev->name, err);
-
- if (err < 0) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- if (!skb)
- return ERR_PTR(-ENODATA);
-
- return skb;
-}
-EXPORT_SYMBOL(__hci_cmd_sync_ev);
-
-struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout)
-{
- return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
-}
-EXPORT_SYMBOL(__hci_cmd_sync);
-
-/* Execute request and wait for completion. */
-static int __hci_req_sync(struct hci_dev *hdev,
- void (*func)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, __u32 timeout)
-{
- struct hci_request req;
- DECLARE_WAITQUEUE(wait, current);
- int err = 0;
-
- BT_DBG("%s start", hdev->name);
-
- hci_req_init(&req, hdev);
-
- hdev->req_status = HCI_REQ_PEND;
-
- func(&req, opt);
-
- add_wait_queue(&hdev->req_wait_q, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- err = hci_req_run_skb(&req, hci_req_sync_complete);
- if (err < 0) {
- hdev->req_status = 0;
-
- remove_wait_queue(&hdev->req_wait_q, &wait);
- set_current_state(TASK_RUNNING);
-
- /* ENODATA means the HCI request command queue is empty.
- * This can happen when a request with conditionals doesn't
- * trigger any commands to be sent. This is normal behavior
- * and should not trigger an error return.
- */
- if (err == -ENODATA)
- return 0;
-
- return err;
- }
-
- schedule_timeout(timeout);
-
- remove_wait_queue(&hdev->req_wait_q, &wait);
-
- if (signal_pending(current))
- return -EINTR;
-
- switch (hdev->req_status) {
- case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
- break;
-
- case HCI_REQ_CANCELED:
- err = -hdev->req_result;
- break;
-
- default:
- err = -ETIMEDOUT;
- break;
- }
-
- hdev->req_status = hdev->req_result = 0;
-
- BT_DBG("%s end: err %d", hdev->name, err);
-
- return err;
-}
-
-static int hci_req_sync(struct hci_dev *hdev,
- void (*req)(struct hci_request *req,
- unsigned long opt),
- unsigned long opt, __u32 timeout)
-{
- int ret;
-
- if (!test_bit(HCI_UP, &hdev->flags))
- return -ENETDOWN;
-
- /* Serialize all requests */
- hci_req_lock(hdev);
- ret = __hci_req_sync(hdev, req, opt, timeout);
- hci_req_unlock(hdev);
-
- return ret;
-}
-
-static void hci_reset_req(struct hci_request *req, unsigned long opt)
+static int hci_reset_req(struct hci_request *req, unsigned long opt)
{
BT_DBG("%s %ld", req->hdev->name, opt);
/* Reset device */
set_bit(HCI_RESET, &req->hdev->flags);
hci_req_add(req, HCI_OP_RESET, 0, NULL);
+ return 0;
}
static void bredr_init(struct hci_request *req)
@@ -428,7 +236,7 @@ static void amp_init1(struct hci_request *req)
hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
}
-static void amp_init2(struct hci_request *req)
+static int amp_init2(struct hci_request *req)
{
/* Read Local Supported Features. Not all AMP controllers
* support this so it's placed conditionally in the second
@@ -436,9 +244,11 @@ static void amp_init2(struct hci_request *req)
*/
if (req->hdev->commands[14] & 0x20)
hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
+ return 0;
}
-static void hci_init1_req(struct hci_request *req, unsigned long opt)
+static int hci_init1_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
@@ -461,6 +271,8 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
BT_ERR("Unknown device type %d", hdev->dev_type);
break;
}
+
+ return 0;
}
static void bredr_setup(struct hci_request *req)
@@ -531,20 +343,30 @@ static void hci_setup_event_mask(struct hci_request *req)
if (lmp_bredr_capable(hdev)) {
events[4] |= 0x01; /* Flow Specification Complete */
- events[4] |= 0x02; /* Inquiry Result with RSSI */
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
} else {
/* Use a different default for LE-only devices */
memset(events, 0, sizeof(events));
- events[0] |= 0x10; /* Disconnection Complete */
- events[1] |= 0x08; /* Read Remote Version Information Complete */
events[1] |= 0x20; /* Command Complete */
events[1] |= 0x40; /* Command Status */
events[1] |= 0x80; /* Hardware Error */
- events[2] |= 0x04; /* Number of Completed Packets */
- events[3] |= 0x02; /* Data Buffer Overflow */
+
+ /* If the controller supports the Disconnect command, enable
+ * the corresponding event. In addition enable packet flow
+ * control related events.
+ */
+ if (hdev->commands[0] & 0x20) {
+ events[0] |= 0x10; /* Disconnection Complete */
+ events[2] |= 0x04; /* Number of Completed Packets */
+ events[3] |= 0x02; /* Data Buffer Overflow */
+ }
+
+ /* If the controller supports the Read Remote Version
+ * Information command, enable the corresponding event.
+ */
+ if (hdev->commands[2] & 0x80)
+ events[1] |= 0x08; /* Read Remote Version Information
+ * Complete
+ */
if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
events[0] |= 0x80; /* Encryption Change */
@@ -552,9 +374,18 @@ static void hci_setup_event_mask(struct hci_request *req)
}
}
- if (lmp_inq_rssi_capable(hdev))
+ if (lmp_inq_rssi_capable(hdev) ||
+ test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
events[4] |= 0x02; /* Inquiry Result with RSSI */
+ if (lmp_ext_feat_capable(hdev))
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+
+ if (lmp_esco_capable(hdev)) {
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
+
if (lmp_sniffsubr_capable(hdev))
events[5] |= 0x20; /* Sniff Subrating */
@@ -590,7 +421,7 @@ static void hci_setup_event_mask(struct hci_request *req)
hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
}
-static void hci_init2_req(struct hci_request *req, unsigned long opt)
+static int hci_init2_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
@@ -670,6 +501,8 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
&enable);
}
+
+ return 0;
}
static void hci_setup_link_policy(struct hci_request *req)
@@ -744,7 +577,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
}
-static void hci_init3_req(struct hci_request *req, unsigned long opt)
+static int hci_init3_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
u8 p;
@@ -777,7 +610,6 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
u8 events[8];
memset(events, 0, sizeof(events));
- events[0] = 0x0f;
if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
events[0] |= 0x10; /* LE Long Term Key Request */
@@ -804,6 +636,34 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
* Report
*/
+ /* If the controller supports the LE Set Scan Enable command,
+ * enable the corresponding advertising report event.
+ */
+ if (hdev->commands[26] & 0x08)
+ events[0] |= 0x02; /* LE Advertising Report */
+
+ /* If the controller supports the LE Create Connection
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[26] & 0x10)
+ events[0] |= 0x01; /* LE Connection Complete */
+
+ /* If the controller supports the LE Connection Update
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[27] & 0x04)
+ events[0] |= 0x04; /* LE Connection Update
+ * Complete
+ */
+
+ /* If the controller supports the LE Read Remote Used Features
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[27] & 0x20)
+ events[0] |= 0x08; /* LE Read Remote Used
+ * Features Complete
+ */
+
/* If the controller supports the LE Read Local P-256
* Public Key command, enable the corresponding event.
*/
@@ -856,9 +716,11 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
sizeof(cp), &cp);
}
+
+ return 0;
}
-static void hci_init4_req(struct hci_request *req, unsigned long opt)
+static int hci_init4_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
@@ -909,20 +771,22 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
sizeof(support), &support);
}
+
+ return 0;
}
static int __hci_init(struct hci_dev *hdev)
{
int err;
- err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
+ err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
if (err < 0)
return err;
if (hci_dev_test_flag(hdev, HCI_SETUP))
hci_debugfs_create_basic(hdev);
- err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
+ err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
if (err < 0)
return err;
@@ -933,11 +797,11 @@ static int __hci_init(struct hci_dev *hdev)
if (hdev->dev_type != HCI_BREDR)
return 0;
- err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+ err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
if (err < 0)
return err;
- err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
+ err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
if (err < 0)
return err;
@@ -968,7 +832,7 @@ static int __hci_init(struct hci_dev *hdev)
return 0;
}
-static void hci_init0_req(struct hci_request *req, unsigned long opt)
+static int hci_init0_req(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
@@ -984,6 +848,8 @@ static void hci_init0_req(struct hci_request *req, unsigned long opt)
/* Read BD Address */
if (hdev->set_bdaddr)
hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
+
+ return 0;
}
static int __hci_unconf_init(struct hci_dev *hdev)
@@ -993,7 +859,7 @@ static int __hci_unconf_init(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return 0;
- err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
+ err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
if (err < 0)
return err;
@@ -1003,7 +869,7 @@ static int __hci_unconf_init(struct hci_dev *hdev)
return 0;
}
-static void hci_scan_req(struct hci_request *req, unsigned long opt)
+static int hci_scan_req(struct hci_request *req, unsigned long opt)
{
__u8 scan = opt;
@@ -1011,9 +877,10 @@ static void hci_scan_req(struct hci_request *req, unsigned long opt)
/* Inquiry and Page scans */
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ return 0;
}
-static void hci_auth_req(struct hci_request *req, unsigned long opt)
+static int hci_auth_req(struct hci_request *req, unsigned long opt)
{
__u8 auth = opt;
@@ -1021,9 +888,10 @@ static void hci_auth_req(struct hci_request *req, unsigned long opt)
/* Authentication */
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
+ return 0;
}
-static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
+static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
{
__u8 encrypt = opt;
@@ -1031,9 +899,10 @@ static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
/* Encryption */
hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
+ return 0;
}
-static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
+static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
{
__le16 policy = cpu_to_le16(opt);
@@ -1041,6 +910,7 @@ static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
/* Default link policy */
hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
+ return 0;
}
/* Get HCI device by index.
@@ -1285,7 +1155,7 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
return copied;
}
-static void hci_inq_req(struct hci_request *req, unsigned long opt)
+static int hci_inq_req(struct hci_request *req, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
struct hci_dev *hdev = req->hdev;
@@ -1294,13 +1164,15 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt)
BT_DBG("%s", hdev->name);
if (test_bit(HCI_INQUIRY, &hdev->flags))
- return;
+ return 0;
/* Start Inquiry */
memcpy(&cp.lap, &ir->lap, 3);
cp.length = ir->length;
cp.num_rsp = ir->num_rsp;
hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+ return 0;
}
int hci_inquiry(void __user *arg)
@@ -1351,7 +1223,7 @@ int hci_inquiry(void __user *arg)
if (do_inquiry) {
err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
- timeo);
+ timeo, NULL);
if (err < 0)
goto done;
@@ -1404,7 +1276,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
BT_DBG("%s %p", hdev->name, hdev);
- hci_req_lock(hdev);
+ hci_req_sync_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
ret = -ENODEV;
@@ -1557,7 +1429,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
}
done:
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
return ret;
}
@@ -1651,12 +1523,12 @@ int hci_dev_do_close(struct hci_dev *hdev)
cancel_delayed_work(&hdev->power_off);
- hci_req_cancel(hdev, ENODEV);
- hci_req_lock(hdev);
+ hci_request_cancel_all(hdev);
+ hci_req_sync_lock(hdev);
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
cancel_delayed_work_sync(&hdev->cmd_timer);
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
return 0;
}
@@ -1674,9 +1546,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
cancel_delayed_work(&hdev->service_cache);
- cancel_delayed_work_sync(&hdev->le_scan_disable);
- cancel_delayed_work_sync(&hdev->le_scan_restart);
-
if (hci_dev_test_flag(hdev, HCI_MGMT))
cancel_delayed_work_sync(&hdev->rpa_expired);
@@ -1717,7 +1586,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
!auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
set_bit(HCI_INIT, &hdev->flags);
- __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
+ __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -1754,7 +1623,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
bacpy(&hdev->random_addr, BDADDR_ANY);
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
hci_dev_put(hdev);
return 0;
@@ -1790,7 +1659,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
BT_DBG("%s %p", hdev->name, hdev);
- hci_req_lock(hdev);
+ hci_req_sync_lock(hdev);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -1812,9 +1681,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1);
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
- ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
+ ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
return ret;
}
@@ -1947,7 +1816,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
- HCI_INIT_TIMEOUT);
+ HCI_INIT_TIMEOUT, NULL);
break;
case HCISETENCRYPT:
@@ -1959,18 +1828,18 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
- HCI_INIT_TIMEOUT);
+ HCI_INIT_TIMEOUT, NULL);
if (err)
break;
}
err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
- HCI_INIT_TIMEOUT);
+ HCI_INIT_TIMEOUT, NULL);
break;
case HCISETSCAN:
err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
- HCI_INIT_TIMEOUT);
+ HCI_INIT_TIMEOUT, NULL);
/* Ensure that the connectable and discoverable states
* get correctly modified as this was a non-mgmt change.
@@ -1981,7 +1850,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
case HCISETLINKPOL:
err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
- HCI_INIT_TIMEOUT);
+ HCI_INIT_TIMEOUT, NULL);
break;
case HCISETLINKMODE:
@@ -2731,7 +2600,8 @@ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
}
/* This function requires the caller holds hdev->lock */
-struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
+{
struct adv_info *cur_instance;
cur_instance = hci_find_adv_instance(hdev, instance);
@@ -3024,181 +2894,16 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
}
/* This function requires the caller holds hdev->lock */
-void hci_conn_params_clear_all(struct hci_dev *hdev)
+static void hci_conn_params_clear_all(struct hci_dev *hdev)
{
struct hci_conn_params *params, *tmp;
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
hci_conn_params_free(params);
- hci_update_background_scan(hdev);
-
BT_DBG("All LE connection parameters were removed");
}
-static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- if (status) {
- BT_ERR("Failed to start inquiry: status %d", status);
-
- hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- hci_dev_unlock(hdev);
- return;
- }
-}
-
-static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- /* General inquiry access code (GIAC) */
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
- int err;
-
- if (status) {
- BT_ERR("Failed to disable LE scanning: status %d", status);
- return;
- }
-
- hdev->discovery.scan_start = 0;
-
- switch (hdev->discovery.type) {
- case DISCOV_TYPE_LE:
- hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- hci_dev_unlock(hdev);
- break;
-
- case DISCOV_TYPE_INTERLEAVED:
- hci_dev_lock(hdev);
-
- if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
- &hdev->quirks)) {
- /* If we were running LE only scan, change discovery
- * state. If we were running both LE and BR/EDR inquiry
- * simultaneously, and BR/EDR inquiry is already
- * finished, stop discovery, otherwise BR/EDR inquiry
- * will stop discovery when finished. If we will resolve
- * remote device name, do not change discovery state.
- */
- if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
- hdev->discovery.state != DISCOVERY_RESOLVING)
- hci_discovery_set_state(hdev,
- DISCOVERY_STOPPED);
- } else {
- struct hci_request req;
-
- hci_inquiry_cache_flush(hdev);
-
- hci_req_init(&req, hdev);
-
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, sizeof(cp.lap));
- cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
- hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
-
- err = hci_req_run(&req, inquiry_complete);
- if (err) {
- BT_ERR("Inquiry request failed: err %d", err);
- hci_discovery_set_state(hdev,
- DISCOVERY_STOPPED);
- }
- }
-
- hci_dev_unlock(hdev);
- break;
- }
-}
-
-static void le_scan_disable_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- le_scan_disable.work);
- struct hci_request req;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- cancel_delayed_work_sync(&hdev->le_scan_restart);
-
- hci_req_init(&req, hdev);
-
- hci_req_add_le_scan_disable(&req);
-
- err = hci_req_run(&req, le_scan_disable_work_complete);
- if (err)
- BT_ERR("Disable LE scanning request failed: err %d", err);
-}
-
-static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- unsigned long timeout, duration, scan_start, now;
-
- BT_DBG("%s", hdev->name);
-
- if (status) {
- BT_ERR("Failed to restart LE scan: status %d", status);
- return;
- }
-
- if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
- !hdev->discovery.scan_start)
- return;
-
- /* When the scan was started, hdev->le_scan_disable has been queued
- * after duration from scan_start. During scan restart this job
- * has been canceled, and we need to queue it again after proper
- * timeout, to make sure that scan does not run indefinitely.
- */
- duration = hdev->discovery.scan_duration;
- scan_start = hdev->discovery.scan_start;
- now = jiffies;
- if (now - scan_start <= duration) {
- int elapsed;
-
- if (now >= scan_start)
- elapsed = now - scan_start;
- else
- elapsed = ULONG_MAX - scan_start + now;
-
- timeout = duration - elapsed;
- } else {
- timeout = 0;
- }
- queue_delayed_work(hdev->workqueue,
- &hdev->le_scan_disable, timeout);
-}
-
-static void le_scan_restart_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- le_scan_restart.work);
- struct hci_request req;
- struct hci_cp_le_set_scan_enable cp;
- int err;
-
- BT_DBG("%s", hdev->name);
-
- /* If controller is not scanning we are done. */
- if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
- return;
-
- hci_req_init(&req, hdev);
-
- hci_req_add_le_scan_disable(&req);
-
- memset(&cp, 0, sizeof(cp));
- cp.enable = LE_SCAN_ENABLE;
- cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
- hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-
- err = hci_req_run(&req, le_scan_restart_work_complete);
- if (err)
- BT_ERR("Restart LE scan request failed: err %d", err);
-}
-
/* Copy the Identity Address of the controller.
*
* If the controller has a public BD_ADDR, then by default use that one.
@@ -3298,8 +3003,6 @@ struct hci_dev *hci_alloc_dev(void)
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
- INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
skb_queue_head_init(&hdev->rx_q);
@@ -3310,6 +3013,8 @@ struct hci_dev *hci_alloc_dev(void)
INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
+ hci_request_setup(hdev);
+
hci_init_sysfs(hdev);
discovery_init(hdev);
@@ -3520,7 +3225,7 @@ int hci_reset_dev(struct hci_dev *hdev)
if (!skb)
return -ENOMEM;
- bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
memcpy(skb_put(skb, 3), hw_err, 3);
/* Send Hardware Error to upper stack */
@@ -3537,9 +3242,9 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
return -ENXIO;
}
- if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
- bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
+ hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+ hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
kfree_skb(skb);
return -EINVAL;
}
@@ -3561,7 +3266,7 @@ EXPORT_SYMBOL(hci_recv_frame);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{
/* Mark as diagnostic packet */
- bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
+ hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
/* Time stamp */
__net_timestamp(skb);
@@ -3603,7 +3308,8 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
int err;
- BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
+ BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
+ skb->len);
/* Time stamp */
__net_timestamp(skb);
@@ -3648,7 +3354,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
/* Stand-alone HCI commands must be flagged as
* single-command requests.
*/
- bt_cb(skb)->hci.req_start = true;
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
skb_queue_tail(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -3685,9 +3391,9 @@ struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
- hci_req_lock(hdev);
+ hci_req_sync_lock(hdev);
skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
- hci_req_unlock(hdev);
+ hci_req_sync_unlock(hdev);
return skb;
}
@@ -3716,7 +3422,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
skb->len = skb_headlen(skb);
skb->data_len = 0;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
switch (hdev->dev_type) {
case HCI_BREDR:
@@ -3756,7 +3462,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
do {
skb = list; list = list->next;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -3794,7 +3500,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_reset_transport_header(skb);
memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
- bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
+ hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
@@ -4345,7 +4051,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev)
if (!skb)
return true;
- return bt_cb(skb)->hci.req_start;
+ return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
}
static void hci_resend_last(struct hci_dev *hdev)
@@ -4405,20 +4111,20 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
* callback would be found in hdev->sent_cmd instead of the
* command queue (hdev->cmd_q).
*/
- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
+ if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
+ *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
return;
}
- if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
+ if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
+ *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
return;
}
/* Remove all pending commands belonging to this request */
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
while ((skb = __skb_dequeue(&hdev->cmd_q))) {
- if (bt_cb(skb)->hci.req_start) {
+ if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
__skb_queue_head(&hdev->cmd_q, skb);
break;
}
@@ -4453,7 +4159,7 @@ static void hci_rx_work(struct work_struct *work)
if (test_bit(HCI_INIT, &hdev->flags)) {
/* Don't process data packets in this states. */
- switch (bt_cb(skb)->pkt_type) {
+ switch (hci_skb_pkt_type(skb)) {
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
kfree_skb(skb);
@@ -4462,7 +4168,7 @@ static void hci_rx_work(struct work_struct *work)
}
/* Process frame */
- switch (bt_cb(skb)->pkt_type) {
+ switch (hci_skb_pkt_type(skb)) {
case HCI_EVENT_PKT:
BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 981f8a202c27..e639671f54bd 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -27,6 +27,10 @@
#include "smp.h"
#include "hci_request.h"
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
@@ -56,8 +60,12 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete,
return -ENODATA;
skb = skb_peek_tail(&req->cmd_q);
- bt_cb(skb)->hci.req_complete = complete;
- bt_cb(skb)->hci.req_complete_skb = complete_skb;
+ if (complete) {
+ bt_cb(skb)->hci.req_complete = complete;
+ } else if (complete_skb) {
+ bt_cb(skb)->hci.req_complete_skb = complete_skb;
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
+ }
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -78,6 +86,203 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
return req_run(req, NULL, complete);
}
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb)
+{
+ BT_DBG("%s result 0x%2.2x", hdev->name, result);
+
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+ if (skb)
+ hdev->req_skb = skb_get(skb);
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+}
+
+void hci_req_sync_cancel(struct hci_dev *hdev, int err)
+{
+ BT_DBG("%s err 0x%2.2x", hdev->name, err);
+
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = err;
+ hdev->req_status = HCI_REQ_CANCELED;
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+}
+
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct hci_request req;
+ struct sk_buff *skb;
+ int err = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add_ev(&req, opcode, plen, param, event);
+
+ hdev->req_status = HCI_REQ_PEND;
+
+ add_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = hci_req_run_skb(&req, hci_req_sync_complete);
+ if (err < 0) {
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return ERR_PTR(err);
+ }
+
+ schedule_timeout(timeout);
+
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+
+ if (signal_pending(current))
+ return ERR_PTR(-EINTR);
+
+ switch (hdev->req_status) {
+ case HCI_REQ_DONE:
+ err = -bt_to_errno(hdev->req_result);
+ break;
+
+ case HCI_REQ_CANCELED:
+ err = -hdev->req_result;
+ break;
+
+ default:
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ hdev->req_status = hdev->req_result = 0;
+ skb = hdev->req_skb;
+ hdev->req_skb = NULL;
+
+ BT_DBG("%s end: err %d", hdev->name, err);
+
+ if (err < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ if (!skb)
+ return ERR_PTR(-ENODATA);
+
+ return skb;
+}
+EXPORT_SYMBOL(__hci_cmd_sync_ev);
+
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
+}
+EXPORT_SYMBOL(__hci_cmd_sync);
+
+/* Execute request and wait for completion. */
+int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, u32 timeout, u8 *hci_status)
+{
+ struct hci_request req;
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+
+ BT_DBG("%s start", hdev->name);
+
+ hci_req_init(&req, hdev);
+
+ hdev->req_status = HCI_REQ_PEND;
+
+ err = func(&req, opt);
+ if (err) {
+ if (hci_status)
+ *hci_status = HCI_ERROR_UNSPECIFIED;
+ return err;
+ }
+
+ add_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = hci_req_run_skb(&req, hci_req_sync_complete);
+ if (err < 0) {
+ hdev->req_status = 0;
+
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
+ /* ENODATA means the HCI request command queue is empty.
+ * This can happen when a request with conditionals doesn't
+ * trigger any commands to be sent. This is normal behavior
+ * and should not trigger an error return.
+ */
+ if (err == -ENODATA) {
+ if (hci_status)
+ *hci_status = 0;
+ return 0;
+ }
+
+ if (hci_status)
+ *hci_status = HCI_ERROR_UNSPECIFIED;
+
+ return err;
+ }
+
+ schedule_timeout(timeout);
+
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ switch (hdev->req_status) {
+ case HCI_REQ_DONE:
+ err = -bt_to_errno(hdev->req_result);
+ if (hci_status)
+ *hci_status = hdev->req_result;
+ break;
+
+ case HCI_REQ_CANCELED:
+ err = -hdev->req_result;
+ if (hci_status)
+ *hci_status = HCI_ERROR_UNSPECIFIED;
+ break;
+
+ default:
+ err = -ETIMEDOUT;
+ if (hci_status)
+ *hci_status = HCI_ERROR_UNSPECIFIED;
+ break;
+ }
+
+ hdev->req_status = hdev->req_result = 0;
+
+ BT_DBG("%s end: err %d", hdev->name, err);
+
+ return err;
+}
+
+int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, u32 timeout, u8 *hci_status)
+{
+ int ret;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENETDOWN;
+
+ /* Serialize all requests */
+ hci_req_sync_lock(hdev);
+ ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
+ hci_req_sync_unlock(hdev);
+
+ return ret;
+}
+
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param)
{
@@ -98,8 +303,8 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
BT_DBG("skb len %d", skb->len);
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- bt_cb(skb)->hci.opcode = opcode;
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+ hci_skb_opcode(skb) = opcode;
return skb;
}
@@ -128,7 +333,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
}
if (skb_queue_empty(&req->cmd_q))
- bt_cb(skb)->hci.req_start = true;
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
bt_cb(skb)->hci.req_event = event;
@@ -476,7 +681,7 @@ void hci_update_page_scan(struct hci_dev *hdev)
*
* This function requires the caller holds hdev->lock.
*/
-void __hci_update_background_scan(struct hci_request *req)
+static void __hci_update_background_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
@@ -543,28 +748,6 @@ void __hci_update_background_scan(struct hci_request *req)
}
}
-static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- if (status)
- BT_DBG("HCI request failed to update background scanning: "
- "status 0x%2.2x", status);
-}
-
-void hci_update_background_scan(struct hci_dev *hdev)
-{
- int err;
- struct hci_request req;
-
- hci_req_init(&req, hdev);
-
- __hci_update_background_scan(&req);
-
- err = hci_req_run(&req, update_background_scan_complete);
- if (err && err != -ENODATA)
- BT_ERR("Failed to run HCI request: err %d", err);
-}
-
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason)
{
@@ -657,3 +840,446 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return 0;
}
+
+static int update_bg_scan(struct hci_request *req, unsigned long opt)
+{
+ hci_dev_lock(req->hdev);
+ __hci_update_background_scan(req);
+ hci_dev_unlock(req->hdev);
+ return 0;
+}
+
+static void bg_scan_update(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ bg_scan_update);
+ struct hci_conn *conn;
+ u8 status;
+ int err;
+
+ err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
+ if (!err)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ hci_le_conn_failed(conn, status);
+
+ hci_dev_unlock(hdev);
+}
+
+static int le_scan_disable(struct hci_request *req, unsigned long opt)
+{
+ hci_req_add_le_scan_disable(req);
+ return 0;
+}
+
+static int bredr_inquiry(struct hci_request *req, unsigned long opt)
+{
+ u8 length = opt;
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ BT_DBG("%s", req->hdev->name);
+
+ hci_dev_lock(req->hdev);
+ hci_inquiry_cache_flush(req->hdev);
+ hci_dev_unlock(req->hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = length;
+
+ hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+ return 0;
+}
+
+static void le_scan_disable_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_disable.work);
+ u8 status;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ return;
+
+ cancel_delayed_work(&hdev->le_scan_restart);
+
+ hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
+ if (status) {
+ BT_ERR("Failed to disable LE scan: status 0x%02x", status);
+ return;
+ }
+
+ hdev->discovery.scan_start = 0;
+
+ /* If we were running LE only scan, change discovery state. If
+ * we were running both LE and BR/EDR inquiry simultaneously,
+ * and BR/EDR inquiry is already finished, stop discovery,
+ * otherwise BR/EDR inquiry will stop discovery when finished.
+ * If we will resolve remote device name, do not change
+ * discovery state.
+ */
+
+ if (hdev->discovery.type == DISCOV_TYPE_LE)
+ goto discov_stopped;
+
+ if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
+ return;
+
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
+ if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
+ hdev->discovery.state != DISCOVERY_RESOLVING)
+ goto discov_stopped;
+
+ return;
+ }
+
+ hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
+ HCI_CMD_TIMEOUT, &status);
+ if (status) {
+ BT_ERR("Inquiry failed: status 0x%02x", status);
+ goto discov_stopped;
+ }
+
+ return;
+
+discov_stopped:
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+}
+
+static int le_scan_restart(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_scan_enable cp;
+
+ /* If controller is not scanning we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ return 0;
+
+ hci_req_add_le_scan_disable(req);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_ENABLE;
+ cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+
+ return 0;
+}
+
+static void le_scan_restart_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_restart.work);
+ unsigned long timeout, duration, scan_start, now;
+ u8 status;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
+ if (status) {
+ BT_ERR("Failed to restart LE scan: status %d", status);
+ return;
+ }
+
+ hci_dev_lock(hdev);
+
+ if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+ !hdev->discovery.scan_start)
+ goto unlock;
+
+ /* When the scan was started, hdev->le_scan_disable has been queued
+ * after duration from scan_start. During scan restart this job
+ * has been canceled, and we need to queue it again after proper
+ * timeout, to make sure that scan does not run indefinitely.
+ */
+ duration = hdev->discovery.scan_duration;
+ scan_start = hdev->discovery.scan_start;
+ now = jiffies;
+ if (now - scan_start <= duration) {
+ int elapsed;
+
+ if (now >= scan_start)
+ elapsed = now - scan_start;
+ else
+ elapsed = ULONG_MAX - scan_start + now;
+
+ timeout = duration - elapsed;
+ } else {
+ timeout = 0;
+ }
+
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->le_scan_disable, timeout);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void cancel_adv_timeout(struct hci_dev *hdev)
+{
+ if (hdev->adv_instance_timeout) {
+ hdev->adv_instance_timeout = 0;
+ cancel_delayed_work(&hdev->adv_instance_expire);
+ }
+}
+
+static void disable_advertising(struct hci_request *req)
+{
+ u8 enable = 0x00;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static int active_scan(struct hci_request *req, unsigned long opt)
+{
+ uint16_t interval = opt;
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
+ u8 own_addr_type;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
+ hci_dev_lock(hdev);
+
+ /* Don't let discovery abort an outgoing connection attempt
+ * that's using directed advertising.
+ */
+ if (hci_lookup_le_connect(hdev)) {
+ hci_dev_unlock(hdev);
+ return -EBUSY;
+ }
+
+ cancel_adv_timeout(hdev);
+ hci_dev_unlock(hdev);
+
+ disable_advertising(req);
+ }
+
+ /* If controller is scanning, it means the background scanning is
+ * running. Thus, we should temporarily stop it in order to set the
+ * discovery scanning parameters.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ hci_req_add_le_scan_disable(req);
+
+ /* All active scans will be done with either a resolvable private
+ * address (when privacy feature has been enabled) or non-resolvable
+ * private address.
+ */
+ err = hci_update_random_address(req, true, &own_addr_type);
+ if (err < 0)
+ own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ memset(&param_cp, 0, sizeof(param_cp));
+ param_cp.type = LE_SCAN_ACTIVE;
+ param_cp.interval = cpu_to_le16(interval);
+ param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+ param_cp.own_address_type = own_addr_type;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+
+ return 0;
+}
+
+static int interleaved_discov(struct hci_request *req, unsigned long opt)
+{
+ int err;
+
+ BT_DBG("%s", req->hdev->name);
+
+ err = active_scan(req, opt);
+ if (err)
+ return err;
+
+ return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
+}
+
+static void start_discovery(struct hci_dev *hdev, u8 *status)
+{
+ unsigned long timeout;
+
+ BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
+ hci_req_sync(hdev, bredr_inquiry,
+ DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
+ status);
+ return;
+ case DISCOV_TYPE_INTERLEAVED:
+ /* When running simultaneous discovery, the LE scanning time
+ * should occupy the whole discovery time sine BR/EDR inquiry
+ * and LE scanning are scheduled by the controller.
+ *
+ * For interleaving discovery in comparison, BR/EDR inquiry
+ * and LE scanning are done sequentially with separate
+ * timeouts.
+ */
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+ &hdev->quirks)) {
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ /* During simultaneous discovery, we double LE scan
+ * interval. We must leave some time for the controller
+ * to do BR/EDR inquiry.
+ */
+ hci_req_sync(hdev, interleaved_discov,
+ DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
+ status);
+ break;
+ }
+
+ timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
+ hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
+ HCI_CMD_TIMEOUT, status);
+ break;
+ case DISCOV_TYPE_LE:
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
+ HCI_CMD_TIMEOUT, status);
+ break;
+ default:
+ *status = HCI_ERROR_UNSPECIFIED;
+ return;
+ }
+
+ if (*status)
+ return;
+
+ BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
+
+ /* When service discovery is used and the controller has a
+ * strict duplicate filter, it is important to remember the
+ * start and duration of the scan. This is required for
+ * restarting scanning during the discovery phase.
+ */
+ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
+ hdev->discovery.result_filtering) {
+ hdev->discovery.scan_start = jiffies;
+ hdev->discovery.scan_duration = timeout;
+ }
+
+ queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
+ timeout);
+}
+
+bool hci_req_stop_discovery(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct discovery_state *d = &hdev->discovery;
+ struct hci_cp_remote_name_req_cancel cp;
+ struct inquiry_entry *e;
+ bool ret = false;
+
+ BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
+
+ if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ cancel_delayed_work(&hdev->le_scan_disable);
+ hci_req_add_le_scan_disable(req);
+ }
+
+ ret = true;
+ } else {
+ /* Passive scanning */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ hci_req_add_le_scan_disable(req);
+ ret = true;
+ }
+ }
+
+ /* No further actions needed for LE-only discovery */
+ if (d->type == DISCOV_TYPE_LE)
+ return ret;
+
+ if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e)
+ return ret;
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
+ &cp);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static int stop_discovery(struct hci_request *req, unsigned long opt)
+{
+ hci_dev_lock(req->hdev);
+ hci_req_stop_discovery(req);
+ hci_dev_unlock(req->hdev);
+
+ return 0;
+}
+
+static void discov_update(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ discov_update);
+ u8 status = 0;
+
+ switch (hdev->discovery.state) {
+ case DISCOVERY_STARTING:
+ start_discovery(hdev, &status);
+ mgmt_start_discovery_complete(hdev, status);
+ if (status)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ break;
+ case DISCOVERY_STOPPING:
+ hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
+ mgmt_stop_discovery_complete(hdev, status);
+ if (!status)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ break;
+ case DISCOVERY_STOPPED:
+ default:
+ return;
+ }
+}
+
+void hci_request_setup(struct hci_dev *hdev)
+{
+ INIT_WORK(&hdev->discov_update, discov_update);
+ INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
+}
+
+void hci_request_cancel_all(struct hci_dev *hdev)
+{
+ hci_req_sync_cancel(hdev, ENODEV);
+
+ cancel_work_sync(&hdev->discov_update);
+ cancel_work_sync(&hdev->bg_scan_update);
+ cancel_delayed_work_sync(&hdev->le_scan_disable);
+ cancel_delayed_work_sync(&hdev->le_scan_restart);
+}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 25c7f1305dcb..6b9e59f7f7a9 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -20,6 +20,9 @@
SOFTWARE IS DISCLAIMED.
*/
+#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
+#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
+
struct hci_request {
struct hci_dev *hdev;
struct sk_buff_head cmd_q;
@@ -41,21 +44,37 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
hci_req_complete_t *req_complete,
hci_req_complete_skb_t *req_complete_skb);
+int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, u32 timeout, u8 *hci_status);
+int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, u32 timeout, u8 *hci_status);
+void hci_req_sync_cancel(struct hci_dev *hdev, int err);
+
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
void hci_req_add_le_scan_disable(struct hci_request *req);
void hci_req_add_le_passive_scan(struct hci_request *req);
+/* Returns true if HCI commands were queued */
+bool hci_req_stop_discovery(struct hci_request *req);
+
void hci_update_page_scan(struct hci_dev *hdev);
void __hci_update_page_scan(struct hci_request *req);
int hci_update_random_address(struct hci_request *req, bool require_privacy,
u8 *own_addr_type);
-void hci_update_background_scan(struct hci_dev *hdev);
-void __hci_update_background_scan(struct hci_request *req);
-
int hci_abort_conn(struct hci_conn *conn, u8 reason);
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason);
+
+static inline void hci_update_background_scan(struct hci_dev *hdev)
+{
+ queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
+}
+
+void hci_request_setup(struct hci_dev *hdev);
+void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b1eb8c09a660..41f579ba447b 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -26,6 +26,8 @@
#include <linux/export.h>
#include <asm/unaligned.h>
+#include <generated/compile.h>
+#include <generated/utsrelease.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -120,13 +122,13 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
/* Apply filter */
flt = &hci_pi(sk)->filter;
- flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+ flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
if (!test_bit(flt_type, &flt->type_mask))
return true;
/* Extra filter for event packets only */
- if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
return false;
flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
@@ -170,19 +172,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
continue;
if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
- if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
- bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
- bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
+ if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
+ hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
+ hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+ hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
continue;
if (is_filtered_packet(sk, skb))
continue;
} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
if (!bt_cb(skb)->incoming)
continue;
- if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
- bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
+ hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+ hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
continue;
} else {
/* Don't send frame to other channel types */
@@ -196,7 +198,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
continue;
/* Put type byte before the data */
- memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
+ memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
}
nskb = skb_clone(skb_copy, GFP_ATOMIC);
@@ -262,7 +264,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("hdev %p len %d", hdev, skb->len);
- switch (bt_cb(skb)->pkt_type) {
+ switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
break;
@@ -294,7 +296,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
return;
/* Put header before the data */
- hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
+ hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
hdr->opcode = opcode;
hdr->index = cpu_to_le16(hdev->id);
hdr->len = cpu_to_le16(skb->len);
@@ -375,7 +377,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
__net_timestamp(skb);
- hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
hdr->opcode = opcode;
hdr->index = cpu_to_le16(hdev->id);
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
@@ -383,6 +385,29 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
return skb;
}
+static void send_monitor_note(struct sock *sk, const char *text)
+{
+ size_t len = strlen(text);
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ strcpy(skb_put(skb, len + 1), text);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
+ hdr->index = cpu_to_le16(HCI_DEV_NONE);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ if (sock_queue_rcv_skb(sk, skb))
+ kfree_skb(skb);
+}
+
static void send_monitor_replay(struct sock *sk)
{
struct hci_dev *hdev;
@@ -436,18 +461,18 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
if (!skb)
return;
- hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
+ hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
hdr->evt = HCI_EV_STACK_INTERNAL;
hdr->plen = sizeof(*ev) + dlen;
- ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
+ ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
ev->type = type;
memcpy(ev->data, data, dlen);
bt_cb(skb)->incoming = 1;
__net_timestamp(skb);
- bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
hci_send_to_sock(hdev, skb);
kfree_skb(skb);
}
@@ -653,20 +678,20 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
return -EOPNOTSUPP;
case HCIGETCONNINFO:
- return hci_get_conn_info(hdev, (void __user *) arg);
+ return hci_get_conn_info(hdev, (void __user *)arg);
case HCIGETAUTHINFO:
- return hci_get_auth_info(hdev, (void __user *) arg);
+ return hci_get_auth_info(hdev, (void __user *)arg);
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return hci_sock_blacklist_add(hdev, (void __user *) arg);
+ return hci_sock_blacklist_add(hdev, (void __user *)arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- return hci_sock_blacklist_del(hdev, (void __user *) arg);
+ return hci_sock_blacklist_del(hdev, (void __user *)arg);
}
return -ENOIOCTLCMD;
@@ -675,7 +700,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
- void __user *argp = (void __user *) arg;
+ void __user *argp = (void __user *)arg;
struct sock *sk = sock->sk;
int err;
@@ -872,11 +897,27 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
*/
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+ send_monitor_note(sk, "Linux version " UTS_RELEASE
+ " (" UTS_MACHINE ")");
+ send_monitor_note(sk, "Bluetooth subsystem version "
+ BT_SUBSYS_VERSION);
send_monitor_replay(sk);
atomic_inc(&monitor_promisc);
break;
+ case HCI_CHANNEL_LOGGING:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+ break;
+
default:
if (!hci_mgmt_chan_find(haddr.hci_channel)) {
err = -EINVAL;
@@ -926,7 +967,7 @@ done:
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
int *addr_len, int peer)
{
- struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
+ struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
struct sock *sk = sock->sk;
struct hci_dev *hdev;
int err = 0;
@@ -991,8 +1032,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
}
}
-static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -1004,6 +1045,9 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
+ if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
+ return -EOPNOTSUPP;
+
if (sk->sk_state == BT_CLOSED)
return 0;
@@ -1150,6 +1194,90 @@ done:
return err;
}
+static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+ struct hci_dev *hdev;
+ u16 index;
+ int err;
+
+ /* The logging frame consists at minimum of the standard header,
+ * the priority byte, the ident length byte and at least one string
+ * terminator NUL byte. Anything shorter are invalid packets.
+ */
+ if (len < sizeof(*hdr) + 3)
+ return -EINVAL;
+
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return err;
+
+ if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
+ err = -EFAULT;
+ goto drop;
+ }
+
+ hdr = (void *)skb->data;
+
+ if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
+ err = -EINVAL;
+ goto drop;
+ }
+
+ if (__le16_to_cpu(hdr->opcode) == 0x0000) {
+ __u8 priority = skb->data[sizeof(*hdr)];
+ __u8 ident_len = skb->data[sizeof(*hdr) + 1];
+
+ /* Only the priorities 0-7 are valid and with that any other
+ * value results in an invalid packet.
+ *
+ * The priority byte is followed by an ident length byte and
+ * the NUL terminated ident string. Check that the ident
+ * length is not overflowing the packet and also that the
+ * ident string itself is NUL terminated. In case the ident
+ * length is zero, the length value actually doubles as NUL
+ * terminator identifier.
+ *
+ * The message follows the ident string (if present) and
+ * must be NUL terminated. Otherwise it is not a valid packet.
+ */
+ if (priority > 7 || skb->data[len - 1] != 0x00 ||
+ ident_len > len - sizeof(*hdr) - 3 ||
+ skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
+ err = -EINVAL;
+ goto drop;
+ }
+ } else {
+ err = -EINVAL;
+ goto drop;
+ }
+
+ index = __le16_to_cpu(hdr->index);
+
+ if (index != MGMT_INDEX_NONE) {
+ hdev = hci_dev_get(index);
+ if (!hdev) {
+ err = -ENODEV;
+ goto drop;
+ }
+ } else {
+ hdev = NULL;
+ }
+
+ hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
+
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
+ err = len;
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+drop:
+ kfree_skb(skb);
+ return err;
+}
+
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
@@ -1179,6 +1307,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
case HCI_CHANNEL_MONITOR:
err = -EOPNOTSUPP;
goto done;
+ case HCI_CHANNEL_LOGGING:
+ err = hci_logging_frame(sk, msg, len);
+ goto done;
default:
mutex_lock(&mgmt_chan_list_lock);
chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
@@ -1211,7 +1342,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
goto drop;
}
- bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
+ hci_skb_pkt_type(skb) = skb->data[0];
skb_pull(skb, 1);
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
@@ -1220,16 +1351,16 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
*
* However check that the packet type is valid.
*/
- if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
- bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
+ hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+ hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
err = -EINVAL;
goto drop;
}
skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
- } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+ } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
u16 opcode = get_unaligned_le16(skb->data);
u16 ogf = hci_opcode_ogf(opcode);
u16 ocf = hci_opcode_ocf(opcode);
@@ -1242,6 +1373,11 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
goto drop;
}
+ /* Since the opcode has already been extracted here, store
+ * a copy of the value for later use by the drivers.
+ */
+ hci_skb_opcode(skb) = opcode;
+
if (ogf == 0x3f) {
skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
@@ -1249,7 +1385,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* Stand-alone HCI commands must be flagged as
* single-command requests.
*/
- bt_cb(skb)->hci.req_start = true;
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
skb_queue_tail(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -1260,8 +1396,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
goto drop;
}
- if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+ hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
err = -EINVAL;
goto drop;
}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 66e8b6ee19a5..39a5149f3010 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6538,8 +6538,6 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff *skb)
{
- int err = 0;
-
BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
chan->rx_state);
@@ -6570,7 +6568,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
chan->last_acked_seq = control->txseq;
chan->expected_tx_seq = __next_seq(chan, control->txseq);
- return err;
+ return 0;
}
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -7113,8 +7111,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
chan->dcid = cid;
if (bdaddr_type_is_le(dst_type)) {
- u8 role;
-
/* Convert from L2CAP channel address type to HCI address type
*/
if (dst_type == BDADDR_LE_PUBLIC)
@@ -7123,14 +7119,15 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
dst_type = ADDR_LE_DEV_RANDOM;
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
- role = HCI_ROLE_SLAVE;
+ hcon = hci_connect_le(hdev, dst, dst_type,
+ chan->sec_level,
+ HCI_LE_CONN_TIMEOUT,
+ HCI_ROLE_SLAVE);
else
- role = HCI_ROLE_MASTER;
+ hcon = hci_connect_le_scan(hdev, dst, dst_type,
+ chan->sec_level,
+ HCI_LE_CONN_TIMEOUT);
- hcon = hci_connect_le_scan(hdev, dst, dst_type,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
- role);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7f22119276f3..3d9d2e4839c5 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,7 +38,7 @@
#include "mgmt_util.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 10
+#define MGMT_REVISION 11
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -102,6 +102,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_READ_ADV_FEATURES,
MGMT_OP_ADD_ADVERTISING,
MGMT_OP_REMOVE_ADVERTISING,
+ MGMT_OP_GET_ADV_SIZE_INFO,
};
static const u16 mgmt_events[] = {
@@ -1416,49 +1417,6 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
}
}
-static bool hci_stop_discovery(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_remote_name_req_cancel cp;
- struct inquiry_entry *e;
-
- switch (hdev->discovery.state) {
- case DISCOVERY_FINDING:
- if (test_bit(HCI_INQUIRY, &hdev->flags))
- hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- cancel_delayed_work(&hdev->le_scan_disable);
- hci_req_add_le_scan_disable(req);
- }
-
- return true;
-
- case DISCOVERY_RESOLVING:
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
- NAME_PENDING);
- if (!e)
- break;
-
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
-
- return true;
-
- default:
- /* Passive scanning */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- hci_req_add_le_scan_disable(req);
- return true;
- }
-
- break;
- }
-
- return false;
-}
-
static void advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance)
{
@@ -1636,7 +1594,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(&req);
- discov_stopped = hci_stop_discovery(&req);
+ discov_stopped = hci_req_stop_discovery(&req);
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
/* 0x15 == Terminated due to Power Off */
@@ -2510,8 +2468,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
- __hci_update_background_scan(&req);
hci_req_run(&req, NULL);
+ hci_update_background_scan(hdev);
}
unlock:
@@ -3561,8 +3519,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
addr_type, sec_level,
- HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_MASTER);
+ HCI_LE_CONN_TIMEOUT);
}
if (IS_ERR(conn)) {
@@ -4164,145 +4121,9 @@ done:
return err;
}
-static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_inquiry cp;
- /* General inquiry access code (GIAC) */
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
-
- *status = mgmt_bredr_support(hdev);
- if (*status)
- return false;
-
- if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
- *status = MGMT_STATUS_BUSY;
- return false;
- }
-
- hci_inquiry_cache_flush(hdev);
-
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, sizeof(cp.lap));
- cp.length = DISCOV_BREDR_INQUIRY_LEN;
-
- hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
-
- return true;
-}
-
-static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_le_set_scan_param param_cp;
- struct hci_cp_le_set_scan_enable enable_cp;
- u8 own_addr_type;
- int err;
-
- *status = mgmt_le_support(hdev);
- if (*status)
- return false;
-
- if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
- /* Don't let discovery abort an outgoing connection attempt
- * that's using directed advertising.
- */
- if (hci_lookup_le_connect(hdev)) {
- *status = MGMT_STATUS_REJECTED;
- return false;
- }
-
- cancel_adv_timeout(hdev);
- disable_advertising(req);
- }
-
- /* If controller is scanning, it means the background scanning is
- * running. Thus, we should temporarily stop it in order to set the
- * discovery scanning parameters.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
- hci_req_add_le_scan_disable(req);
-
- /* All active scans will be done with either a resolvable private
- * address (when privacy feature has been enabled) or non-resolvable
- * private address.
- */
- err = hci_update_random_address(req, true, &own_addr_type);
- if (err < 0) {
- *status = MGMT_STATUS_FAILED;
- return false;
- }
-
- memset(&param_cp, 0, sizeof(param_cp));
- param_cp.type = LE_SCAN_ACTIVE;
- param_cp.interval = cpu_to_le16(interval);
- param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
- param_cp.own_address_type = own_addr_type;
-
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
- &param_cp);
-
- memset(&enable_cp, 0, sizeof(enable_cp));
- enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
-
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
- &enable_cp);
-
- return true;
-}
-
-static bool trigger_discovery(struct hci_request *req, u8 *status)
-{
- struct hci_dev *hdev = req->hdev;
-
- switch (hdev->discovery.type) {
- case DISCOV_TYPE_BREDR:
- if (!trigger_bredr_inquiry(req, status))
- return false;
- break;
-
- case DISCOV_TYPE_INTERLEAVED:
- if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
- &hdev->quirks)) {
- /* During simultaneous discovery, we double LE scan
- * interval. We must leave some time for the controller
- * to do BR/EDR inquiry.
- */
- if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
- status))
- return false;
-
- if (!trigger_bredr_inquiry(req, status))
- return false;
-
- return true;
- }
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
- *status = MGMT_STATUS_NOT_SUPPORTED;
- return false;
- }
- /* fall through */
-
- case DISCOV_TYPE_LE:
- if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
- return false;
- break;
-
- default:
- *status = MGMT_STATUS_INVALID_PARAMS;
- return false;
- }
-
- return true;
-}
-
-static void start_discovery_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
{
struct mgmt_pending_cmd *cmd;
- unsigned long timeout;
BT_DBG("status %d", status);
@@ -4317,62 +4138,34 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
mgmt_pending_remove(cmd);
}
- if (status) {
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
- }
-
- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_dev_unlock(hdev);
+}
- /* If the scan involves LE scan, pick proper timeout to schedule
- * hdev->le_scan_disable that will stop it.
- */
- switch (hdev->discovery.type) {
+static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
+ uint8_t *mgmt_status)
+{
+ switch (type) {
case DISCOV_TYPE_LE:
- timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ *mgmt_status = mgmt_le_support(hdev);
+ if (*mgmt_status)
+ return false;
break;
case DISCOV_TYPE_INTERLEAVED:
- /* When running simultaneous discovery, the LE scanning time
- * should occupy the whole discovery time sine BR/EDR inquiry
- * and LE scanning are scheduled by the controller.
- *
- * For interleaving discovery in comparison, BR/EDR inquiry
- * and LE scanning are done sequentially with separate
- * timeouts.
- */
- if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
- timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
- else
- timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
- break;
+ *mgmt_status = mgmt_le_support(hdev);
+ if (*mgmt_status)
+ return false;
+ /* Intentional fall-through */
case DISCOV_TYPE_BREDR:
- timeout = 0;
+ *mgmt_status = mgmt_bredr_support(hdev);
+ if (*mgmt_status)
+ return false;
break;
default:
- BT_ERR("Invalid discovery type %d", hdev->discovery.type);
- timeout = 0;
- break;
- }
-
- if (timeout) {
- /* When service discovery is used and the controller has
- * a strict duplicate filter, it is important to remember
- * the start and duration of the scan. This is required
- * for restarting scanning during the discovery phase.
- */
- if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
- &hdev->quirks) &&
- hdev->discovery.result_filtering) {
- hdev->discovery.scan_start = jiffies;
- hdev->discovery.scan_duration = timeout;
- }
-
- queue_delayed_work(hdev->workqueue,
- &hdev->le_scan_disable, timeout);
+ *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
+ return false;
}
-unlock:
- hci_dev_unlock(hdev);
+ return true;
}
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4380,7 +4173,6 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_cp_start_discovery *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u8 status;
int err;
@@ -4403,14 +4195,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
+ if (!discovery_type_is_valid(hdev, cp->type, &status)) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ status, &cp->type, sizeof(cp->type));
goto failed;
}
- cmd->cmd_complete = generic_cmd_complete;
-
/* Clear the discovery filter first to free any previously
* allocated memory for the UUID list.
*/
@@ -4419,22 +4209,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
hdev->discovery.type = cp->type;
hdev->discovery.report_invalid_rssi = false;
- hci_req_init(&req, hdev);
-
- if (!trigger_discovery(&req, &status)) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
- status, &cp->type, sizeof(cp->type));
- mgmt_pending_remove(cmd);
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
goto failed;
}
- err = hci_req_run(&req, start_discovery_complete);
- if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
- }
+ cmd->cmd_complete = generic_cmd_complete;
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+ queue_work(hdev->req_workqueue, &hdev->discov_update);
+ err = 0;
failed:
hci_dev_unlock(hdev);
@@ -4453,7 +4238,6 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_cp_start_service_discovery *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
u16 uuid_count, expected_len;
u8 status;
@@ -4502,6 +4286,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
+ if (!discovery_type_is_valid(hdev, cp->type, &status)) {
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_START_SERVICE_DISCOVERY,
+ status, &cp->type, sizeof(cp->type));
+ goto failed;
+ }
+
cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
hdev, data, len);
if (!cmd) {
@@ -4534,30 +4325,16 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
}
}
- hci_req_init(&req, hdev);
-
- if (!trigger_discovery(&req, &status)) {
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_START_SERVICE_DISCOVERY,
- status, &cp->type, sizeof(cp->type));
- mgmt_pending_remove(cmd);
- goto failed;
- }
-
- err = hci_req_run(&req, start_discovery_complete);
- if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
- }
-
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+ queue_work(hdev->req_workqueue, &hdev->discov_update);
+ err = 0;
failed:
hci_dev_unlock(hdev);
return err;
}
-static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
{
struct mgmt_pending_cmd *cmd;
@@ -4571,9 +4348,6 @@ static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
mgmt_pending_remove(cmd);
}
- if (!status)
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-
hci_dev_unlock(hdev);
}
@@ -4582,7 +4356,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_stop_discovery *mgmt_cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
BT_DBG("%s", hdev->name);
@@ -4611,24 +4384,9 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->cmd_complete = generic_cmd_complete;
- hci_req_init(&req, hdev);
-
- hci_stop_discovery(&req);
-
- err = hci_req_run(&req, stop_discovery_complete);
- if (!err) {
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- goto unlock;
- }
-
- mgmt_pending_remove(cmd);
-
- /* If no HCI commands were sent we're done */
- if (err == -ENODATA) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- }
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ queue_work(hdev->req_workqueue, &hdev->discov_update);
+ err = 0;
unlock:
hci_dev_unlock(hdev);
@@ -6076,10 +5834,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
}
/* This function requires the caller holds hdev->lock */
-static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
+static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
u8 addr_type, u8 auto_connect)
{
- struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
params = hci_conn_params_add(hdev, addr, addr_type);
@@ -6099,26 +5856,17 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
*/
if (params->explicit_connect)
list_add(&params->action, &hdev->pend_le_conns);
-
- __hci_update_background_scan(req);
break;
case HCI_AUTO_CONN_REPORT:
if (params->explicit_connect)
list_add(&params->action, &hdev->pend_le_conns);
else
list_add(&params->action, &hdev->pend_le_reports);
- __hci_update_background_scan(req);
break;
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
- if (!is_connected(hdev, addr, addr_type)) {
+ if (!is_connected(hdev, addr, addr_type))
list_add(&params->action, &hdev->pend_le_conns);
- /* If we are in scan phase of connecting, we were
- * already added to pend_le_conns and scanning.
- */
- if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
- __hci_update_background_scan(req);
- }
break;
}
@@ -6142,31 +5890,10 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}
-static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- struct mgmt_pending_cmd *cmd;
-
- BT_DBG("status 0x%02x", status);
-
- hci_dev_lock(hdev);
-
- cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
- if (!cmd)
- goto unlock;
-
- cmd->cmd_complete(cmd, mgmt_status(status));
- mgmt_pending_remove(cmd);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_add_device *cp = data;
- struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u8 auto_conn, addr_type;
int err;
@@ -6183,24 +5910,15 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_INVALID_PARAMS,
&cp->addr, sizeof(cp->addr));
- hci_req_init(&req, hdev);
-
hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
-
- cmd->cmd_complete = addr_cmd_complete;
-
if (cp->addr.type == BDADDR_BREDR) {
/* Only incoming connections action is supported for now */
if (cp->action != 0x01) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
@@ -6209,7 +5927,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
if (err)
goto unlock;
- __hci_update_page_scan(&req);
+ hci_update_page_scan(hdev);
goto added;
}
@@ -6229,33 +5947,31 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
* hci_conn_params_lookup.
*/
if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
- err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
/* If the connection parameters don't exist for this device,
* they will be created and configured with defaults.
*/
- if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
+ if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
auto_conn) < 0) {
- err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_FAILED, &cp->addr,
+ sizeof(cp->addr));
goto unlock;
}
+ hci_update_background_scan(hdev);
+
added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
- err = hci_req_run(&req, add_device_complete);
- if (err < 0) {
- /* ENODATA means no HCI commands were needed (e.g. if
- * the adapter is powered off).
- */
- if (err == -ENODATA)
- err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
- mgmt_pending_remove(cmd);
- }
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+ MGMT_STATUS_SUCCESS, &cp->addr,
+ sizeof(cp->addr));
unlock:
hci_dev_unlock(hdev);
@@ -6273,55 +5989,25 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}
-static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- struct mgmt_pending_cmd *cmd;
-
- BT_DBG("status 0x%02x", status);
-
- hci_dev_lock(hdev);
-
- cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
- if (!cmd)
- goto unlock;
-
- cmd->cmd_complete(cmd, mgmt_status(status));
- mgmt_pending_remove(cmd);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
static int remove_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_remove_device *cp = data;
- struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
BT_DBG("%s", hdev->name);
- hci_req_init(&req, hdev);
-
hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
- if (!cmd) {
- err = -ENOMEM;
- goto unlock;
- }
-
- cmd->cmd_complete = addr_cmd_complete;
-
if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
struct hci_conn_params *params;
u8 addr_type;
if (!bdaddr_type_is_valid(cp->addr.type)) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
@@ -6330,13 +6016,15 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
&cp->addr.bdaddr,
cp->addr.type);
if (err) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr,
+ sizeof(cp->addr));
goto unlock;
}
- __hci_update_page_scan(&req);
+ hci_update_page_scan(hdev);
device_removed(sk, hdev, &cp->addr.bdaddr,
cp->addr.type);
@@ -6351,33 +6039,36 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
* hci_conn_params_lookup.
*/
if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type);
if (!params) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
list_del(&params->action);
list_del(&params->list);
kfree(params);
- __hci_update_background_scan(&req);
+ hci_update_background_scan(hdev);
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
} else {
@@ -6385,9 +6076,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
struct bdaddr_list *b, *btmp;
if (cp->addr.type) {
- err = cmd->cmd_complete(cmd,
- MGMT_STATUS_INVALID_PARAMS);
- mgmt_pending_remove(cmd);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
goto unlock;
}
@@ -6397,7 +6089,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
kfree(b);
}
- __hci_update_page_scan(&req);
+ hci_update_page_scan(hdev);
list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
@@ -6414,20 +6106,13 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
BT_DBG("All LE connection parameters were removed");
- __hci_update_background_scan(&req);
+ hci_update_background_scan(hdev);
}
complete:
- err = hci_req_run(&req, remove_device_complete);
- if (err < 0) {
- /* ENODATA means no HCI commands were needed (e.g. if
- * the adapter is powered off).
- */
- if (err == -ENODATA)
- err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
- mgmt_pending_remove(cmd);
- }
-
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
+ MGMT_STATUS_SUCCESS, &cp->addr,
+ sizeof(cp->addr));
unlock:
hci_dev_unlock(hdev);
return err;
@@ -7016,17 +6701,19 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
int i, cur_len;
bool flags_managed = false;
bool tx_power_managed = false;
- u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
- MGMT_ADV_FLAG_MANAGED_FLAGS;
- if (is_adv_data && (adv_flags & flags_params)) {
- flags_managed = true;
- max_len -= 3;
- }
+ if (is_adv_data) {
+ if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
+ MGMT_ADV_FLAG_LIMITED_DISCOV |
+ MGMT_ADV_FLAG_MANAGED_FLAGS)) {
+ flags_managed = true;
+ max_len -= 3;
+ }
- if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
- tx_power_managed = true;
- max_len -= 3;
+ if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
+ tx_power_managed = true;
+ max_len -= 3;
+ }
}
if (len > max_len)
@@ -7155,6 +6842,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
status);
+ if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
flags = __le32_to_cpu(cp->flags);
timeout = __le16_to_cpu(cp->timeout);
duration = __le16_to_cpu(cp->duration);
@@ -7369,6 +7060,62 @@ unlock:
return err;
}
+static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
+{
+ u8 max_len = HCI_MAX_AD_LENGTH;
+
+ if (is_adv_data) {
+ if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
+ MGMT_ADV_FLAG_LIMITED_DISCOV |
+ MGMT_ADV_FLAG_MANAGED_FLAGS))
+ max_len -= 3;
+
+ if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
+ max_len -= 3;
+ }
+
+ return max_len;
+}
+
+static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_cp_get_adv_size_info *cp = data;
+ struct mgmt_rp_get_adv_size_info rp;
+ u32 flags, supported_flags;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
+ MGMT_STATUS_REJECTED);
+
+ if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ flags = __le32_to_cpu(cp->flags);
+
+ /* The current implementation only supports a subset of the specified
+ * flags.
+ */
+ supported_flags = get_supported_adv_flags(hdev);
+ if (flags & ~supported_flags)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ rp.instance = cp->instance;
+ rp.flags = cp->flags;
+ rp.max_adv_data_len = tlv_data_max_len(flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+
+ return err;
+}
+
static const struct hci_mgmt_handler mgmt_handlers[] = {
{ NULL }, /* 0x0000 (no command) */
{ read_version, MGMT_READ_VERSION_SIZE,
@@ -7456,6 +7203,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ add_advertising, MGMT_ADD_ADVERTISING_SIZE,
HCI_MGMT_VAR_LEN },
{ remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
+ { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
@@ -7526,9 +7274,8 @@ void mgmt_index_removed(struct hci_dev *hdev)
}
/* This function requires the caller holds hdev->lock */
-static void restart_le_actions(struct hci_request *req)
+static void restart_le_actions(struct hci_dev *hdev)
{
- struct hci_dev *hdev = req->hdev;
struct hci_conn_params *p;
list_for_each_entry(p, &hdev->le_conn_params, list) {
@@ -7549,8 +7296,6 @@ static void restart_le_actions(struct hci_request *req)
break;
}
}
-
- __hci_update_background_scan(req);
}
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -7560,12 +7305,8 @@ static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
BT_DBG("status 0x%02x", status);
if (!status) {
- /* Register the available SMP channels (BR/EDR and LE) only
- * when successfully powering on the controller. This late
- * registration is required so that LE SMP can clearly
- * decide if the public address or static address is used.
- */
- smp_register(hdev);
+ restart_le_actions(hdev);
+ hci_update_background_scan(hdev);
}
hci_dev_lock(hdev);
@@ -7644,8 +7385,6 @@ static int powered_update_hci(struct hci_dev *hdev)
hdev->cur_adv_instance)
schedule_adv_instance(&req, hdev->cur_adv_instance,
true);
-
- restart_le_actions(&req);
}
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
@@ -7677,6 +7416,13 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
return 0;
if (powered) {
+ /* Register the available SMP channels (BR/EDR and LE) only
+ * when successfully powering on the controller. This late
+ * registration is required so that LE SMP can clearly
+ * decide if the public address or static address is used.
+ */
+ smp_register(hdev);
+
if (powered_update_hci(hdev) == 0)
return 0;
@@ -8452,7 +8198,7 @@ static void restart_le_scan(struct hci_dev *hdev)
hdev->discovery.scan_duration))
return;
- queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
+ queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
DISCOV_LE_RESTART_DELAY);
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c91353841e40..ffed8a1d4f27 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
+ /* No need to call l2cap_chan_hold() here since we already own
+ * the reference taken in smp_new_conn_cb(). This is just the
+ * first time that we tie it to a specific pointer. The code in
+ * l2cap_core.c ensures that there's no risk this function wont
+ * get called if smp_new_conn_cb was previously called.
+ */
conn->smp = chan;
- l2cap_chan_hold(chan);
if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
bredr_pairing(chan);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ec02f5869a78..8d1d4a22c50d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -493,7 +493,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
dev->priv_flags |= IFF_BRIDGE_PORT;
- err = netdev_master_upper_dev_link(dev, br->dev);
+ err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL);
if (err)
goto err5;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index cc858919108e..aa209b1066c9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
!timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
if (sock_flag(sk, SOCK_DEAD))
break;
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
finish_wait(sk_sleep(sk), &wait);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 617088aee21d..fa9dc6450b08 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -83,8 +83,8 @@ static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int syn
/*
* Wait for the last received packet to be different from skb
*/
-static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
- const struct sk_buff *skb)
+int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+ const struct sk_buff *skb)
{
int error;
DEFINE_WAIT_FUNC(wait, receiver_wake_function);
@@ -130,6 +130,7 @@ out_noerr:
error = 1;
goto out;
}
+EXPORT_SYMBOL(__skb_wait_for_more_packets);
static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
{
@@ -161,13 +162,15 @@ done:
}
/**
- * __skb_recv_datagram - Receive a datagram skbuff
+ * __skb_try_recv_datagram - Receive a datagram skbuff
* @sk: socket
* @flags: MSG_ flags
* @peeked: returns non-zero if this packet has been seen before
* @off: an offset in bytes to peek skb from. Returns an offset
* within an skb where data actually starts
* @err: error code returned
+ * @last: set to last peeked message to inform the wait function
+ * what to look for when peeking
*
* Get a datagram skbuff, understands the peeking, nonblocking wakeups
* and possible races. This replaces identical code in packet, raw and
@@ -175,9 +178,11 @@ done:
* the long standing peek and read race for datagram sockets. If you
* alter this routine remember it must be re-entrant.
*
- * This function will lock the socket if a skb is returned, so the caller
- * needs to unlock the socket in that case (usually by calling
- * skb_free_datagram)
+ * This function will lock the socket if a skb is returned, so
+ * the caller needs to unlock the socket in that case (usually by
+ * calling skb_free_datagram). Returns NULL with *err set to
+ * -EAGAIN if no data was available or to some other value if an
+ * error was detected.
*
* * It does not lock socket since today. This function is
* * free of race conditions. This measure should/can improve
@@ -191,13 +196,13 @@ done:
* quite explicitly by POSIX 1003.1g, don't change them without having
* the standard around please.
*/
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
- int *peeked, int *off, int *err)
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
+ int *peeked, int *off, int *err,
+ struct sk_buff **last)
{
struct sk_buff_head *queue = &sk->sk_receive_queue;
- struct sk_buff *skb, *last;
+ struct sk_buff *skb;
unsigned long cpu_flags;
- long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
*/
@@ -206,8 +211,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
if (error)
goto no_packet;
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
do {
/* Again only user level code calls this function, so nothing
* interrupt level will suddenly eat the receive_queue.
@@ -217,10 +220,10 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
*/
int _off = *off;
- last = (struct sk_buff *)queue;
+ *last = (struct sk_buff *)queue;
spin_lock_irqsave(&queue->lock, cpu_flags);
skb_queue_walk(queue, skb) {
- last = skb;
+ *last = skb;
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
if (_off >= skb->len && (skb->len || _off ||
@@ -231,8 +234,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
skb = skb_set_peeked(skb);
error = PTR_ERR(skb);
- if (IS_ERR(skb))
- goto unlock_err;
+ if (IS_ERR(skb)) {
+ spin_unlock_irqrestore(&queue->lock,
+ cpu_flags);
+ goto no_packet;
+ }
atomic_inc(&skb->users);
} else
@@ -242,25 +248,38 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
*off = _off;
return skb;
}
+
spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ } while (sk_can_busy_loop(sk) &&
+ sk_busy_loop(sk, flags & MSG_DONTWAIT));
- if (sk_can_busy_loop(sk) &&
- sk_busy_loop(sk, flags & MSG_DONTWAIT))
- continue;
+ error = -EAGAIN;
- /* User doesn't want to wait */
- error = -EAGAIN;
- if (!timeo)
- goto no_packet;
+no_packet:
+ *err = error;
+ return NULL;
+}
+EXPORT_SYMBOL(__skb_try_recv_datagram);
- } while (!wait_for_more_packets(sk, err, &timeo, last));
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ int *peeked, int *off, int *err)
+{
+ struct sk_buff *skb, *last;
+ long timeo;
- return NULL;
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+ do {
+ skb = __skb_try_recv_datagram(sk, flags, peeked, off, err,
+ &last);
+ if (skb)
+ return skb;
+
+ if (*err != -EAGAIN)
+ break;
+ } while (timeo &&
+ !__skb_wait_for_more_packets(sk, err, &timeo, last));
-unlock_err:
- spin_unlock_irqrestore(&queue->lock, cpu_flags);
-no_packet:
- *err = error;
return NULL;
}
EXPORT_SYMBOL(__skb_recv_datagram);
@@ -785,7 +804,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 5df6cbce727c..8f705fcedb94 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2929,7 +2929,8 @@ static void skb_update_prio(struct sk_buff *skb)
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
if (!skb->priority && skb->sk && map) {
- unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+ unsigned int prioidx =
+ sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
if (prioidx < map->priomap_len)
skb->priority = map->priomap[prioidx];
@@ -5421,7 +5422,7 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master,
- void *private)
+ void *upper_priv, void *upper_info)
{
struct netdev_notifier_changeupper_info changeupper_info;
struct netdev_adjacent *i, *j, *to_i, *to_j;
@@ -5445,6 +5446,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
changeupper_info.upper_dev = upper_dev;
changeupper_info.master = master;
changeupper_info.linking = true;
+ changeupper_info.upper_info = upper_info;
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
&changeupper_info.info);
@@ -5452,7 +5454,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (ret)
return ret;
- ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+ ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
master);
if (ret)
return ret;
@@ -5490,8 +5492,12 @@ static int __netdev_upper_dev_link(struct net_device *dev,
goto rollback_lower_mesh;
}
- call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
- &changeupper_info.info);
+ ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+ &changeupper_info.info);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ goto rollback_lower_mesh;
+
return 0;
rollback_lower_mesh:
@@ -5545,7 +5551,7 @@ rollback_mesh:
int netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev)
{
- return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
+ return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
}
EXPORT_SYMBOL(netdev_upper_dev_link);
@@ -5553,6 +5559,8 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
* netdev_master_upper_dev_link - Add a master link to the upper device
* @dev: device
* @upper_dev: new upper device
+ * @upper_priv: upper device private
+ * @upper_info: upper info to be passed down via notifier
*
* Adds a link to device which is upper to this one. In this case, only
* one master upper device can be linked, although other non-master devices
@@ -5561,20 +5569,14 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
* counts are adjusted and the function returns zero.
*/
int netdev_master_upper_dev_link(struct net_device *dev,
- struct net_device *upper_dev)
+ struct net_device *upper_dev,
+ void *upper_priv, void *upper_info)
{
- return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
+ return __netdev_upper_dev_link(dev, upper_dev, true,
+ upper_priv, upper_info);
}
EXPORT_SYMBOL(netdev_master_upper_dev_link);
-int netdev_master_upper_dev_link_private(struct net_device *dev,
- struct net_device *upper_dev,
- void *private)
-{
- return __netdev_upper_dev_link(dev, upper_dev, true, private);
-}
-EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
-
/**
* netdev_upper_dev_unlink - Removes a link to upper device
* @dev: device
@@ -5733,7 +5735,7 @@ EXPORT_SYMBOL(netdev_lower_dev_get_private);
int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(struct net_device *dev))
+ bool (*type_check)(const struct net_device *dev))
{
struct net_device *lower = NULL;
struct list_head *iter;
@@ -5755,6 +5757,26 @@ int dev_get_nest_level(struct net_device *dev,
}
EXPORT_SYMBOL(dev_get_nest_level);
+/**
+ * netdev_lower_change - Dispatch event about lower device state change
+ * @lower_dev: device
+ * @lower_state_info: state to dispatch
+ *
+ * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
+ * The caller must hold the RTNL lock.
+ */
+void netdev_lower_state_changed(struct net_device *lower_dev,
+ void *lower_state_info)
+{
+ struct netdev_notifier_changelowerstate_info changelowerstate_info;
+
+ ASSERT_RTNL();
+ changelowerstate_info.lower_state_info = lower_state_info;
+ call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
+ &changelowerstate_info.info);
+}
+EXPORT_SYMBOL(netdev_lower_state_changed);
+
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e6af42da28d9..f18ae91b652e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
ndm->ndm_pad2 = 0;
ndm->ndm_flags = pn->flags | NTF_PROXY;
ndm->ndm_type = RTN_UNICAST;
- ndm->ndm_ifindex = pn->dev->ifindex;
+ ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
ndm->ndm_state = NUD_NONE;
if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (h > s_h)
s_idx = 0;
for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
- if (dev_net(n->dev) != net)
+ if (pneigh_net(n) != net)
continue;
if (idx < s_idx)
goto next;
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6441f47b1a8f..04257a0e3534 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -56,29 +56,39 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
kfree(css_cls_state(css));
}
-static int update_classid(const void *v, struct file *file, unsigned n)
+static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
int err;
struct socket *sock = sock_from_file(file, &err);
- if (sock)
- sock->sk->sk_classid = (u32)(unsigned long)v;
-
+ if (sock) {
+ spin_lock(&cgroup_sk_update_lock);
+ sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
+ (unsigned long)v);
+ spin_unlock(&cgroup_sk_update_lock);
+ }
return 0;
}
-static void cgrp_attach(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset)
+static void update_classid(struct cgroup_subsys_state *css, void *v)
{
- struct cgroup_cls_state *cs = css_cls_state(css);
- void *v = (void *)(unsigned long)cs->classid;
+ struct css_task_iter it;
struct task_struct *p;
- cgroup_taskset_for_each(p, tset) {
+ css_task_iter_start(css, &it);
+ while ((p = css_task_iter_next(&it))) {
task_lock(p);
- iterate_fd(p->files, 0, update_classid, v);
+ iterate_fd(p->files, 0, update_classid_sock, v);
task_unlock(p);
}
+ css_task_iter_end(&it);
+}
+
+static void cgrp_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
+{
+ update_classid(css,
+ (void *)(unsigned long)css_cls_state(css)->classid);
}
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -89,8 +99,13 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
u64 value)
{
- css_cls_state(css)->classid = (u32) value;
+ struct cgroup_cls_state *cs = css_cls_state(css);
+
+ cgroup_sk_alloc_disable();
+
+ cs->classid = (u32)value;
+ update_classid(css, (void *)(unsigned long)cs->classid);
return 0;
}
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index cbd0a199bf52..053d60c33395 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -27,6 +27,12 @@
#include <linux/fdtable.h>
+/*
+ * netprio allocates per-net_device priomap array which is indexed by
+ * css->id. Limiting css ID to 16bits doesn't lose anything.
+ */
+#define NETPRIO_ID_MAX USHRT_MAX
+
#define PRIOMAP_MIN_SZ 128
/*
@@ -144,6 +150,9 @@ static int cgrp_css_online(struct cgroup_subsys_state *css)
struct net_device *dev;
int ret = 0;
+ if (css->id > NETPRIO_ID_MAX)
+ return -ENOSPC;
+
if (!parent_css)
return 0;
@@ -200,6 +209,8 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
if (!dev)
return -ENODEV;
+ cgroup_sk_alloc_disable();
+
rtnl_lock();
ret = netprio_set_prio(of_css(of), dev, prio);
@@ -213,8 +224,12 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
{
int err;
struct socket *sock = sock_from_file(file, &err);
- if (sock)
- sock->sk->sk_cgrp_prioidx = (u32)(unsigned long)v;
+ if (sock) {
+ spin_lock(&cgroup_sk_update_lock);
+ sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
+ (unsigned long)v);
+ spin_unlock(&cgroup_sk_update_lock);
+ }
return 0;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 3b6899b7d810..14596fb37172 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -289,8 +289,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
/* Bump the usage count and install the file. */
sock = sock_from_file(fp[i], &err);
if (sock) {
- sock_update_netprioidx(sock->sk);
- sock_update_classid(sock->sk);
+ sock_update_netprioidx(&sock->sk->sk_cgrp_data);
+ sock_update_classid(&sock->sk->sk_cgrp_data);
}
fd_install(new_fd, get_file(fp[i]));
}
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_SPACE(i*sizeof(int));
+ if (msg->msg_controllen < cmlen)
+ cmlen = msg->msg_controllen;
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 1e4dd54bfb5a..1278d7b7bd9a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1363,6 +1363,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
if (!try_module_get(prot->owner))
goto out_free_sec;
sk_tx_queue_clear(sk);
+ cgroup_sk_alloc(&sk->sk_cgrp_data);
}
return sk;
@@ -1385,6 +1386,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
owner = prot->owner;
slab = prot->slab;
+ cgroup_sk_free(&sk->sk_cgrp_data);
security_sk_free(sk);
if (slab != NULL)
kmem_cache_free(slab, sk);
@@ -1393,17 +1395,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
module_put(owner);
}
-#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
-void sock_update_netprioidx(struct sock *sk)
-{
- if (in_interrupt())
- return;
-
- sk->sk_cgrp_prioidx = task_netprioidx(current);
-}
-EXPORT_SYMBOL_GPL(sock_update_netprioidx);
-#endif
-
/**
* sk_alloc - All socket objects are allocated here
* @net: the applicable net namespace
@@ -1432,8 +1423,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sock_net_set(sk, net);
atomic_set(&sk->sk_wmem_alloc, 1);
- sock_update_classid(sk);
- sock_update_netprioidx(sk);
+ sock_update_classid(&sk->sk_cgrp_data);
+ sock_update_netprioidx(&sk->sk_cgrp_data);
}
return sk;
@@ -1530,7 +1521,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
skb_queue_head_init(&newsk->sk_receive_queue);
skb_queue_head_init(&newsk->sk_write_queue);
- spin_lock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock);
lockdep_set_class_and_name(&newsk->sk_callback_lock,
af_callback_keys + newsk->sk_family,
@@ -1607,7 +1597,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
u32 max_segs = 1;
- __sk_dst_set(sk, dst);
+ sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
@@ -1815,7 +1805,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (!timeo)
break;
@@ -1861,7 +1851,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
break;
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
@@ -2048,9 +2038,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
finish_wait(sk_sleep(sk), &wait);
return rc;
}
@@ -2283,7 +2273,7 @@ static void sock_def_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
rcu_read_unlock();
}
@@ -2294,7 +2284,7 @@ static void sock_def_error_report(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_poll(&wq->wait, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
rcu_read_unlock();
@@ -2306,7 +2296,7 @@ static void sock_def_readable(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@@ -2324,7 +2314,7 @@ static void sock_def_write_space(struct sock *sk)
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
@@ -2388,7 +2378,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
} else
sk->sk_wq = NULL;
- spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index d70f77a0c889..159516a11b7e 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -35,11 +35,11 @@ void sk_stream_write_space(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
- sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+ sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
}
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
while (1) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
}
if (signal_pending(current))
goto do_interrupted;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
if (sk_stream_memory_free(sk) && !vm_wait)
break;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index db5fc2440a23..9c6d0508e63a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
&ireq->ir_v6_loc_addr,
&ireq->ir_v6_rmt_addr);
fl6.daddr = ireq->ir_v6_rmt_addr;
- err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+ rcu_read_lock();
+ err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ np->tclass);
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *newnp;
const struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
* comment in that function for the gory details. -acme
*/
- __ip6_dst_store(newsk, dst, NULL, NULL);
+ ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
- if (np->opt != NULL)
- newnp->opt = ipv6_dup_options(newsk, np->opt);
-
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ opt = ipv6_dup_options(newsk, opt);
+ RCU_INIT_POINTER(newnp->opt, opt);
+ }
inet_csk(newsk)->icsk_ext_hdr_len = 0;
- if (newnp->opt != NULL)
- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
- newnp->opt->opt_flen);
+ if (opt)
+ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
+ opt->opt_flen;
dccp_sync_mss(newsk, dst_mtu(dst));
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
+ struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
- __ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
icsk->icsk_ext_hdr_len = 0;
- if (np->opt != NULL)
- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
- np->opt->opt_nflen);
+ if (opt)
+ icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
inet->inet_dport = usin->sin6_port;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 4ce912e691d0..b66c84db0766 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -201,7 +201,7 @@ void dccp_write_space(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b5cf13a28009..41e65804ddf5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
- set_bit(SOCK_ASYNC_NOSPACE,
- &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 675cf94e04f8..eebf5ac8ce18 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1747,9 +1747,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
finish_wait(sk_sleep(sk), &wait);
}
@@ -2004,10 +2004,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk_wait_event(sk, &timeo,
!dn_queue_too_long(scp, queue, flags));
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
finish_wait(sk_sleep(sk), &wait);
continue;
}
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 4677b6fa6dda..ecc28cff08ab 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -67,7 +67,7 @@
* Returns the size of the result on success, -ve error code otherwise.
*/
int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time_t *_expiry)
+ const char *options, char **_result, time64_t *_expiry)
{
struct key *rkey;
const struct user_key_payload *upayload;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 1eba07feb34a..208d1b257194 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -21,8 +21,10 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
+#include <linux/of_gpio.h>
#include <linux/sysfs.h>
#include <linux/phy_fixed.h>
+#include <linux/gpio/consumer.h>
#include "dsa_priv.h"
char dsa_driver_version[] = "0.1";
@@ -454,8 +456,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
if (!ds->ports[port])
continue;
- unregister_netdev(ds->ports[port]);
- free_netdev(ds->ports[port]);
+ dsa_slave_destroy(ds->ports[port]);
}
mdiobus_unregister(ds->slave_mii_bus);
@@ -506,33 +507,6 @@ static int dsa_switch_resume(struct dsa_switch *ds)
}
#endif
-
-/* link polling *************************************************************/
-static void dsa_link_poll_work(struct work_struct *ugly)
-{
- struct dsa_switch_tree *dst;
- int i;
-
- dst = container_of(ugly, struct dsa_switch_tree, link_poll_work);
-
- for (i = 0; i < dst->pd->nr_chips; i++) {
- struct dsa_switch *ds = dst->ds[i];
-
- if (ds != NULL && ds->drv->poll_link != NULL)
- ds->drv->poll_link(ds);
- }
-
- mod_timer(&dst->link_poll_timer, round_jiffies(jiffies + HZ));
-}
-
-static void dsa_link_poll_timer(unsigned long _dst)
-{
- struct dsa_switch_tree *dst = (void *)_dst;
-
- schedule_work(&dst->link_poll_work);
-}
-
-
/* platform driver init and cleanup *****************************************/
static int dev_is_class(struct device *dev, void *class)
{
@@ -688,6 +662,9 @@ static int dsa_of_probe(struct device *dev)
const char *port_name;
int chip_index, port_index;
const unsigned int *sw_addr, *port_reg;
+ int gpio;
+ enum of_gpio_flags of_flags;
+ unsigned long flags;
u32 eeprom_len;
int ret;
@@ -766,6 +743,19 @@ static int dsa_of_probe(struct device *dev)
put_device(cd->host_dev);
cd->host_dev = &mdio_bus_switch->dev;
}
+ gpio = of_get_named_gpio_flags(child, "reset-gpios", 0,
+ &of_flags);
+ if (gpio_is_valid(gpio)) {
+ flags = (of_flags == OF_GPIO_ACTIVE_LOW ?
+ GPIOF_ACTIVE_LOW : 0);
+ ret = devm_gpio_request_one(dev, gpio, flags,
+ "switch_reset");
+ if (ret)
+ goto out_free_chip;
+
+ cd->reset = gpio_to_desc(gpio);
+ gpiod_direction_output(cd->reset, 0);
+ }
for_each_available_child_of_node(child, port) {
port_reg = of_get_property(port, "reg", NULL);
@@ -859,8 +849,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
}
dst->ds[i] = ds;
- if (ds->drv->poll_link != NULL)
- dst->link_poll_needed = 1;
++configured;
}
@@ -879,15 +867,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
wmb();
dev->dsa_ptr = (void *)dst;
- if (dst->link_poll_needed) {
- INIT_WORK(&dst->link_poll_work, dsa_link_poll_work);
- init_timer(&dst->link_poll_timer);
- dst->link_poll_timer.data = (unsigned long)dst;
- dst->link_poll_timer.function = dsa_link_poll_timer;
- dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
- add_timer(&dst->link_poll_timer);
- }
-
return 0;
}
@@ -939,8 +918,10 @@ static int dsa_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dst);
ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
- if (ret)
+ if (ret) {
+ dev_put(dev);
goto out;
+ }
return 0;
@@ -954,17 +935,14 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
{
int i;
- if (dst->link_poll_needed)
- del_timer_sync(&dst->link_poll_timer);
-
- flush_work(&dst->link_poll_work);
-
for (i = 0; i < dst->pd->nr_chips; i++) {
struct dsa_switch *ds = dst->ds[i];
if (ds)
dsa_switch_destroy(ds);
}
+
+ dev_put(dst->master_netdev);
}
static int dsa_remove(struct platform_device *pdev)
@@ -1010,6 +988,14 @@ static int dsa_suspend(struct device *d)
struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
int i, ret = 0;
+ dst->master_netdev->dsa_ptr = NULL;
+
+ /* If we used a tagging format that doesn't have an ethertype
+ * field, make sure that all packets from this point get sent
+ * without the tag and go through the regular receive path.
+ */
+ wmb();
+
for (i = 0; i < dst->pd->nr_chips; i++) {
struct dsa_switch *ds = dst->ds[i];
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 311796c809af..1d1a54687e4a 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -61,6 +61,7 @@ extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
int port, char *name);
+void dsa_slave_destroy(struct net_device *slave_dev);
int dsa_slave_suspend(struct net_device *slave_dev);
int dsa_slave_resume(struct net_device *slave_dev);
int dsa_slave_netdevice_event(struct notifier_block *unused,
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7bc787b095c8..1e9e9424a33d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1212,6 +1212,17 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
return 0;
}
+void dsa_slave_destroy(struct net_device *slave_dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(slave_dev);
+
+ netif_carrier_off(slave_dev);
+ if (p->phy)
+ phy_disconnect(p->phy);
+ unregister_netdev(slave_dev);
+ free_netdev(slave_dev);
+}
+
static bool dsa_slave_dev_check(struct net_device *dev)
{
return dev->netdev_ops == &dsa_slave_netdev_ops;
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 35a9788bb3ae..c7d1adca30d8 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
return;
out:
- WARN_ON_ONCE("HSR: Could not send supervision frame\n");
+ WARN_ONCE(1, "HSR: Could not send supervision frame\n");
kfree_skb(skb);
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6baf36e11808..05e4cba14162 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
ASSERT_RTNL();
in_dev = ip_mc_find_dev(net, imr);
- if (!in_dev) {
+ if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
ret = -ENODEV;
goto out;
}
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
- ip_mc_dec_group(in_dev, group);
+ if (in_dev)
+ ip_mc_dec_group(in_dev, group);
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 614521437e30..04a48c0159cc 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -24,7 +24,6 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
-#include <linux/mroute.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/in6.h>
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4233cbe47052..e0b94cd843d7 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -76,7 +76,6 @@
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
-#include <linux/mroute.h>
#include <linux/netlink.h>
#include <linux/tcp.h>
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index cbb51f3fac06..0f6e9ee031c4 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -30,7 +30,6 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
-#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6cb9009c3d96..1db8418aa62e 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -24,7 +24,6 @@
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/if_arp.h>
-#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 4d8f0b698777..02d9c21e2953 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -30,7 +30,6 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
-#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/netfilter_ipv4.h>
#include <linux/if_ether.h>
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index e86e8a9738ea..67f7c9de0b16 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1239,13 +1239,13 @@ static int __init ic_dynamic(void)
(ic_proto_enabled & IC_USE_DHCP) &&
ic_dhcp_msgtype != DHCPACK) {
ic_got_reply = 0;
- pr_notice(",");
+ pr_cont(",");
continue;
}
#endif /* IPCONFIG_DHCP */
if (ic_got_reply) {
- pr_notice(" OK\n");
+ pr_cont(" OK\n");
break;
}
@@ -1253,7 +1253,7 @@ static int __init ic_dynamic(void)
continue;
if (! --retries) {
- pr_notice(" timed out!\n");
+ pr_cont(" timed out!\n");
break;
}
@@ -1263,7 +1263,7 @@ static int __init ic_dynamic(void)
if (timeout > CONF_TIMEOUT_MAX)
timeout = CONF_TIMEOUT_MAX;
- pr_notice(".");
+ pr_cont(".");
}
#ifdef IPCONFIG_BOOTP
@@ -1280,11 +1280,10 @@ static int __init ic_dynamic(void)
return -1;
}
- pr_info("IP-Config: Got %s answer from %pI4, ",
+ pr_info("IP-Config: Got %s answer from %pI4, my address is %pI4\n",
((ic_got_reply & IC_RARP) ? "RARP"
: (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
- &ic_addrservaddr);
- pr_info("my address is %pI4\n", &ic_myaddr);
+ &ic_addrservaddr, &ic_myaddr);
return 0;
}
@@ -1527,14 +1526,14 @@ static int __init ip_auto_config(void)
pr_cont(", mtu=%d", ic_dev_mtu);
for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
if (ic_nameservers[i] != NONE) {
- pr_info(" nameserver%u=%pI4",
+ pr_cont(" nameserver%u=%pI4",
i, &ic_nameservers[i]);
break;
}
for (i++; i < CONF_NAMESERVERS_MAX; i++)
if (ic_nameservers[i] != NONE)
- pr_info(", nameserver%u=%pI4", i, &ic_nameservers[i]);
- pr_info("\n");
+ pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
+ pr_cont("\n");
#endif /* !SILENT */
return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index f34c31defafe..1f067294cbc5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -103,7 +103,6 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
-#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/netfilter_ipv4.h>
#include <linux/if_ether.h>
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 92dd4b74d513..395e2814a46d 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -66,28 +66,7 @@
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
-
-#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
-#define CONFIG_IP_PIMSM 1
-#endif
-
-struct mr_table {
- struct list_head list;
- possible_net_t net;
- u32 id;
- struct sock __rcu *mroute_sk;
- struct timer_list ipmr_expire_timer;
- struct list_head mfc_unres_queue;
- struct list_head mfc_cache_array[MFC_LINES];
- struct vif_device vif_table[MAXVIFS];
- int maxvif;
- atomic_t cache_resolve_queue_len;
- bool mroute_do_assert;
- bool mroute_do_pim;
-#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
- int mroute_reg_vif_num;
-#endif
-};
+#include <net/nexthop.h>
struct ipmr_rule {
struct fib_rule common;
@@ -103,11 +82,7 @@ struct ipmr_result {
static DEFINE_RWLOCK(mrt_lock);
-/*
- * Multicast router control variables
- */
-
-#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
+/* Multicast router control variables */
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -134,7 +109,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
-static void mroute_clean_tables(struct mr_table *mrt);
+static void mroute_clean_tables(struct mr_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -252,8 +227,8 @@ static int __net_init ipmr_rules_init(struct net *net)
INIT_LIST_HEAD(&net->ipv4.mr_tables);
mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
- if (!mrt) {
- err = -ENOMEM;
+ if (IS_ERR(mrt)) {
+ err = PTR_ERR(mrt);
goto err1;
}
@@ -301,8 +276,13 @@ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
static int __net_init ipmr_rules_init(struct net *net)
{
- net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
- return net->ipv4.mrt ? 0 : -ENOMEM;
+ struct mr_table *mrt;
+
+ mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
+ if (IS_ERR(mrt))
+ return PTR_ERR(mrt);
+ net->ipv4.mrt = mrt;
+ return 0;
}
static void __net_exit ipmr_rules_exit(struct net *net)
@@ -319,13 +299,17 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
struct mr_table *mrt;
unsigned int i;
+ /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ if (id != RT_TABLE_DEFAULT && id >= 1000000000)
+ return ERR_PTR(-EINVAL);
+
mrt = ipmr_get_table(net, id);
if (mrt)
return mrt;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
if (!mrt)
- return NULL;
+ return ERR_PTR(-ENOMEM);
write_pnet(&mrt->net, net);
mrt->id = id;
@@ -338,9 +322,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
(unsigned long)mrt);
-#ifdef CONFIG_IP_PIMSM
mrt->mroute_reg_vif_num = -1;
-#endif
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
#endif
@@ -350,7 +332,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
static void ipmr_free_table(struct mr_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, true);
kfree(mrt);
}
@@ -387,8 +369,24 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
}
}
-static
-struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
+/* Initialize ipmr pimreg/tunnel in_device */
+static bool ipmr_init_vif_indev(const struct net_device *dev)
+{
+ struct in_device *in_dev;
+
+ ASSERT_RTNL();
+
+ in_dev = __in_dev_get_rtnl(dev);
+ if (!in_dev)
+ return false;
+ ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
+ IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
+
+ return true;
+}
+
+static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{
struct net_device *dev;
@@ -399,7 +397,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
int err;
struct ifreq ifr;
struct ip_tunnel_parm p;
- struct in_device *in_dev;
memset(&p, 0, sizeof(p));
p.iph.daddr = v->vifc_rmt_addr.s_addr;
@@ -424,15 +421,8 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
if (err == 0 &&
(dev = __dev_get_by_name(net, p.name)) != NULL) {
dev->flags |= IFF_MULTICAST;
-
- in_dev = __in_dev_get_rtnl(dev);
- if (!in_dev)
+ if (!ipmr_init_vif_indev(dev))
goto failure;
-
- ipv4_devconf_setall(in_dev);
- neigh_parms_data_state_setall(in_dev->arp_parms);
- IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
-
if (dev_open(dev))
goto failure;
dev_hold(dev);
@@ -441,16 +431,11 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
return dev;
failure:
- /* allow the register to be completed before unregistering. */
- rtnl_unlock();
- rtnl_lock();
-
unregister_netdevice(dev);
return NULL;
}
-#ifdef CONFIG_IP_PIMSM
-
+#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net *net = dev_net(dev);
@@ -500,7 +485,6 @@ static void reg_vif_setup(struct net_device *dev)
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
struct net_device *dev;
- struct in_device *in_dev;
char name[IFNAMSIZ];
if (mrt->id == RT_TABLE_DEFAULT)
@@ -520,18 +504,8 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
return NULL;
}
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev) {
- rcu_read_unlock();
+ if (!ipmr_init_vif_indev(dev))
goto failure;
- }
-
- ipv4_devconf_setall(in_dev);
- neigh_parms_data_state_setall(in_dev->arp_parms);
- IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
- rcu_read_unlock();
-
if (dev_open(dev))
goto failure;
@@ -540,20 +514,59 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
return dev;
failure:
- /* allow the register to be completed before unregistering. */
- rtnl_unlock();
- rtnl_lock();
-
unregister_netdevice(dev);
return NULL;
}
+
+/* called with rcu_read_lock() */
+static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
+ unsigned int pimlen)
+{
+ struct net_device *reg_dev = NULL;
+ struct iphdr *encap;
+
+ encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
+ /* Check that:
+ * a. packet is really sent to a multicast group
+ * b. packet is not a NULL-REGISTER
+ * c. packet is not truncated
+ */
+ if (!ipv4_is_multicast(encap->daddr) ||
+ encap->tot_len == 0 ||
+ ntohs(encap->tot_len) + pimlen > skb->len)
+ return 1;
+
+ read_lock(&mrt_lock);
+ if (mrt->mroute_reg_vif_num >= 0)
+ reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
+ read_unlock(&mrt_lock);
+
+ if (!reg_dev)
+ return 1;
+
+ skb->mac_header = skb->network_header;
+ skb_pull(skb, (u8 *)encap - skb->data);
+ skb_reset_network_header(skb);
+ skb->protocol = htons(ETH_P_IP);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
+
+ netif_rx(skb);
+
+ return NET_RX_SUCCESS;
+}
+#else
+static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
+{
+ return NULL;
+}
#endif
/**
* vif_delete - Delete a VIF entry
* @notify: Set to 1, if the caller is a notifier_call
*/
-
static int vif_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
{
@@ -575,10 +588,8 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
return -EADDRNOTAVAIL;
}
-#ifdef CONFIG_IP_PIMSM
if (vifi == mrt->mroute_reg_vif_num)
mrt->mroute_reg_vif_num = -1;
-#endif
if (vifi + 1 == mrt->maxvif) {
int tmp;
@@ -625,7 +636,6 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
/* Destroy an unresolved cache entry, killing queued skbs
* and reporting error to netlink readers.
*/
-
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{
struct net *net = read_pnet(&mrt->net);
@@ -653,9 +663,7 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
ipmr_cache_free(c);
}
-
/* Timer process for the unresolved queue. */
-
static void ipmr_expire_process(unsigned long arg)
{
struct mr_table *mrt = (struct mr_table *)arg;
@@ -695,7 +703,6 @@ out:
}
/* Fill oifs list. It is called under write locked mrt_lock. */
-
static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
unsigned char *ttls)
{
@@ -731,10 +738,10 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return -EADDRINUSE;
switch (vifc->vifc_flags) {
-#ifdef CONFIG_IP_PIMSM
case VIFF_REGISTER:
- /*
- * Special Purpose VIF in PIM
+ if (!ipmr_pimsm_enabled())
+ return -EINVAL;
+ /* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
if (mrt->mroute_reg_vif_num >= 0)
@@ -749,7 +756,6 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return err;
}
break;
-#endif
case VIFF_TUNNEL:
dev = ipmr_new_tunnel(net, vifc);
if (!dev)
@@ -761,7 +767,6 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return err;
}
break;
-
case VIFF_USE_IFINDEX:
case 0:
if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
@@ -815,10 +820,8 @@ static int vif_add(struct net *net, struct mr_table *mrt,
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
-#ifdef CONFIG_IP_PIMSM
if (v->flags & VIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
-#endif
if (vifi+1 > mrt->maxvif)
mrt->maxvif = vifi+1;
write_unlock_bh(&mrt_lock);
@@ -883,9 +886,7 @@ skip:
return ipmr_cache_find_any_parent(mrt, vifi);
}
-/*
- * Allocate a multicast cache entry
- */
+/* Allocate a multicast cache entry */
static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
@@ -906,10 +907,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
return c;
}
-/*
- * A cache entry has gone into a resolved state from queued
- */
-
+/* A cache entry has gone into a resolved state from queued */
static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
struct mfc_cache *uc, struct mfc_cache *c)
{
@@ -917,7 +915,6 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
struct nlmsgerr *e;
/* Play the pending entries through our router */
-
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
@@ -941,34 +938,29 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
}
}
-/*
- * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
- * expects the following bizarre scheme.
+/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
+ * expects the following bizarre scheme.
*
- * Called under mrt_lock.
+ * Called under mrt_lock.
*/
-
static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert)
{
- struct sk_buff *skb;
const int ihl = ip_hdrlen(pkt);
+ struct sock *mroute_sk;
struct igmphdr *igmp;
struct igmpmsg *msg;
- struct sock *mroute_sk;
+ struct sk_buff *skb;
int ret;
-#ifdef CONFIG_IP_PIMSM
if (assert == IGMPMSG_WHOLEPKT)
skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
else
-#endif
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
-#ifdef CONFIG_IP_PIMSM
if (assert == IGMPMSG_WHOLEPKT) {
/* Ugly, but we have no choice with this interface.
* Duplicate old header, fix ihl, length etc.
@@ -986,28 +978,23 @@ static int ipmr_cache_report(struct mr_table *mrt,
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
- } else
-#endif
- {
-
- /* Copy the IP header */
-
- skb_set_network_header(skb, skb->len);
- skb_put(skb, ihl);
- skb_copy_to_linear_data(skb, pkt->data, ihl);
- ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
- msg = (struct igmpmsg *)skb_network_header(skb);
- msg->im_vif = vifi;
- skb_dst_set(skb, dst_clone(skb_dst(pkt)));
-
- /* Add our header */
-
- igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
- igmp->type =
- msg->im_msgtype = assert;
- igmp->code = 0;
- ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
- skb->transport_header = skb->network_header;
+ } else {
+ /* Copy the IP header */
+ skb_set_network_header(skb, skb->len);
+ skb_put(skb, ihl);
+ skb_copy_to_linear_data(skb, pkt->data, ihl);
+ /* Flag to the kernel this is a route add */
+ ip_hdr(skb)->protocol = 0;
+ msg = (struct igmpmsg *)skb_network_header(skb);
+ msg->im_vif = vifi;
+ skb_dst_set(skb, dst_clone(skb_dst(pkt)));
+ /* Add our header */
+ igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
+ igmp->type = assert;
+ msg->im_msgtype = assert;
+ igmp->code = 0;
+ ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
+ skb->transport_header = skb->network_header;
}
rcu_read_lock();
@@ -1019,7 +1006,6 @@ static int ipmr_cache_report(struct mr_table *mrt,
}
/* Deliver to mrouted */
-
ret = sock_queue_rcv_skb(mroute_sk, skb);
rcu_read_unlock();
if (ret < 0) {
@@ -1030,12 +1016,9 @@ static int ipmr_cache_report(struct mr_table *mrt,
return ret;
}
-/*
- * Queue a packet for resolution. It gets locked cache entry!
- */
-
-static int
-ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
+/* Queue a packet for resolution. It gets locked cache entry! */
+static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
+ struct sk_buff *skb)
{
bool found = false;
int err;
@@ -1053,7 +1036,6 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
if (!found) {
/* Create a new entry if allowable */
-
if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
(c = ipmr_cache_alloc_unres()) == NULL) {
spin_unlock_bh(&mfc_unres_lock);
@@ -1063,13 +1045,11 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
}
/* Fill in the new cache entry */
-
c->mfc_parent = -1;
c->mfc_origin = iph->saddr;
c->mfc_mcastgrp = iph->daddr;
/* Reflect first query at mrouted. */
-
err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
@@ -1091,7 +1071,6 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
}
/* See if we can append the packet */
-
if (c->mfc_un.unres.unresolved.qlen > 3) {
kfree_skb(skb);
err = -ENOBUFS;
@@ -1104,9 +1083,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
return err;
}
-/*
- * MFC cache manipulation by user space mroute daemon
- */
+/* MFC cache manipulation by user space mroute daemon */
static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
{
@@ -1177,9 +1154,8 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
- /*
- * Check to see if we resolved a queued list. If so we
- * need to send on the frames and tidy up.
+ /* Check to see if we resolved a queued list. If so we
+ * need to send on the frames and tidy up.
*/
found = false;
spin_lock_bh(&mfc_unres_lock);
@@ -1204,29 +1180,25 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
return 0;
}
-/*
- * Close the multicast socket, and clear the vif tables etc
- */
-
-static void mroute_clean_tables(struct mr_table *mrt)
+/* Close the multicast socket, and clear the vif tables etc */
+static void mroute_clean_tables(struct mr_table *mrt, bool all)
{
int i;
LIST_HEAD(list);
struct mfc_cache *c, *next;
/* Shut down all active vif entries */
-
for (i = 0; i < mrt->maxvif; i++) {
- if (!(mrt->vif_table[i].flags & VIFF_STATIC))
- vif_delete(mrt, i, 0, &list);
+ if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
+ continue;
+ vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
/* Wipe the cache */
-
for (i = 0; i < MFC_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
- if (c->mfc_flags & MFC_STATIC)
+ if (!all && (c->mfc_flags & MFC_STATIC))
continue;
list_del_rcu(&c->list);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
@@ -1261,50 +1233,58 @@ static void mrtsock_destruct(struct sock *sk)
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
RCU_INIT_POINTER(mrt->mroute_sk, NULL);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, false);
}
}
rtnl_unlock();
}
-/*
- * Socket options and virtual interface manipulation. The whole
- * virtual interface system is a complete heap, but unfortunately
- * that's how BSD mrouted happens to think. Maybe one day with a proper
- * MOSPF/PIM router set up we can clean this up.
+/* Socket options and virtual interface manipulation. The whole
+ * virtual interface system is a complete heap, but unfortunately
+ * that's how BSD mrouted happens to think. Maybe one day with a proper
+ * MOSPF/PIM router set up we can clean this up.
*/
-int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
+int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen)
{
- int ret, parent = 0;
- struct vifctl vif;
- struct mfcctl mfc;
struct net *net = sock_net(sk);
+ int val, ret = 0, parent = 0;
struct mr_table *mrt;
+ struct vifctl vif;
+ struct mfcctl mfc;
+ u32 uval;
+ /* There's one exception to the lock - MRT_DONE which needs to unlock */
+ rtnl_lock();
if (sk->sk_type != SOCK_RAW ||
- inet_sk(sk)->inet_num != IPPROTO_IGMP)
- return -EOPNOTSUPP;
+ inet_sk(sk)->inet_num != IPPROTO_IGMP) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
- if (!mrt)
- return -ENOENT;
-
+ if (!mrt) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
if (optname != MRT_INIT) {
if (sk != rcu_access_pointer(mrt->mroute_sk) &&
- !ns_capable(net->user_ns, CAP_NET_ADMIN))
- return -EACCES;
+ !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+ ret = -EACCES;
+ goto out_unlock;
+ }
}
switch (optname) {
case MRT_INIT:
- if (optlen != sizeof(int))
- return -EINVAL;
-
- rtnl_lock();
+ if (optlen != sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
if (rtnl_dereference(mrt->mroute_sk)) {
- rtnl_unlock();
- return -EADDRINUSE;
+ ret = -EADDRINUSE;
+ break;
}
ret = ip_ra_control(sk, 1, mrtsock_destruct);
@@ -1315,129 +1295,133 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
}
- rtnl_unlock();
- return ret;
+ break;
case MRT_DONE:
- if (sk != rcu_access_pointer(mrt->mroute_sk))
- return -EACCES;
- return ip_ra_control(sk, 0, NULL);
+ if (sk != rcu_access_pointer(mrt->mroute_sk)) {
+ ret = -EACCES;
+ } else {
+ /* We need to unlock here because mrtsock_destruct takes
+ * care of rtnl itself and we can't change that due to
+ * the IP_ROUTER_ALERT setsockopt which runs without it.
+ */
+ rtnl_unlock();
+ ret = ip_ra_control(sk, 0, NULL);
+ goto out;
+ }
+ break;
case MRT_ADD_VIF:
case MRT_DEL_VIF:
- if (optlen != sizeof(vif))
- return -EINVAL;
- if (copy_from_user(&vif, optval, sizeof(vif)))
- return -EFAULT;
- if (vif.vifc_vifi >= MAXVIFS)
- return -ENFILE;
- rtnl_lock();
+ if (optlen != sizeof(vif)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (copy_from_user(&vif, optval, sizeof(vif))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vif.vifc_vifi >= MAXVIFS) {
+ ret = -ENFILE;
+ break;
+ }
if (optname == MRT_ADD_VIF) {
ret = vif_add(net, mrt, &vif,
sk == rtnl_dereference(mrt->mroute_sk));
} else {
ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
}
- rtnl_unlock();
- return ret;
-
- /*
- * Manipulate the forwarding caches. These live
- * in a sort of kernel/user symbiosis.
- */
+ break;
+ /* Manipulate the forwarding caches. These live
+ * in a sort of kernel/user symbiosis.
+ */
case MRT_ADD_MFC:
case MRT_DEL_MFC:
parent = -1;
case MRT_ADD_MFC_PROXY:
case MRT_DEL_MFC_PROXY:
- if (optlen != sizeof(mfc))
- return -EINVAL;
- if (copy_from_user(&mfc, optval, sizeof(mfc)))
- return -EFAULT;
+ if (optlen != sizeof(mfc)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (copy_from_user(&mfc, optval, sizeof(mfc))) {
+ ret = -EFAULT;
+ break;
+ }
if (parent == 0)
parent = mfc.mfcc_parent;
- rtnl_lock();
if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
ret = ipmr_mfc_delete(mrt, &mfc, parent);
else
ret = ipmr_mfc_add(net, mrt, &mfc,
sk == rtnl_dereference(mrt->mroute_sk),
parent);
- rtnl_unlock();
- return ret;
- /*
- * Control PIM assert.
- */
+ break;
+ /* Control PIM assert. */
case MRT_ASSERT:
- {
- int v;
- if (optlen != sizeof(v))
- return -EINVAL;
- if (get_user(v, (int __user *)optval))
- return -EFAULT;
- mrt->mroute_do_assert = v;
- return 0;
- }
-#ifdef CONFIG_IP_PIMSM
+ if (optlen != sizeof(val)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (get_user(val, (int __user *)optval)) {
+ ret = -EFAULT;
+ break;
+ }
+ mrt->mroute_do_assert = val;
+ break;
case MRT_PIM:
- {
- int v;
-
- if (optlen != sizeof(v))
- return -EINVAL;
- if (get_user(v, (int __user *)optval))
- return -EFAULT;
- v = !!v;
+ if (!ipmr_pimsm_enabled()) {
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ if (optlen != sizeof(val)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (get_user(val, (int __user *)optval)) {
+ ret = -EFAULT;
+ break;
+ }
- rtnl_lock();
- ret = 0;
- if (v != mrt->mroute_do_pim) {
- mrt->mroute_do_pim = v;
- mrt->mroute_do_assert = v;
+ val = !!val;
+ if (val != mrt->mroute_do_pim) {
+ mrt->mroute_do_pim = val;
+ mrt->mroute_do_assert = val;
}
- rtnl_unlock();
- return ret;
- }
-#endif
-#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+ break;
case MRT_TABLE:
- {
- u32 v;
-
- if (optlen != sizeof(u32))
- return -EINVAL;
- if (get_user(v, (u32 __user *)optval))
- return -EFAULT;
-
- /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
- if (v != RT_TABLE_DEFAULT && v >= 1000000000)
- return -EINVAL;
+ if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ if (optlen != sizeof(uval)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (get_user(uval, (u32 __user *)optval)) {
+ ret = -EFAULT;
+ break;
+ }
- rtnl_lock();
- ret = 0;
if (sk == rtnl_dereference(mrt->mroute_sk)) {
ret = -EBUSY;
} else {
- if (!ipmr_new_table(net, v))
- ret = -ENOMEM;
+ mrt = ipmr_new_table(net, uval);
+ if (IS_ERR(mrt))
+ ret = PTR_ERR(mrt);
else
- raw_sk(sk)->ipmr_table = v;
+ raw_sk(sk)->ipmr_table = uval;
}
- rtnl_unlock();
- return ret;
- }
-#endif
- /*
- * Spurious command, or MRT_VERSION which you cannot
- * set.
- */
+ break;
+ /* Spurious command, or MRT_VERSION which you cannot set. */
default:
- return -ENOPROTOOPT;
+ ret = -ENOPROTOOPT;
}
+out_unlock:
+ rtnl_unlock();
+out:
+ return ret;
}
-/*
- * Getsock opt support for the multicast routing system.
- */
-
+/* Getsock opt support for the multicast routing system. */
int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
{
int olr;
@@ -1453,39 +1437,35 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
if (!mrt)
return -ENOENT;
- if (optname != MRT_VERSION &&
-#ifdef CONFIG_IP_PIMSM
- optname != MRT_PIM &&
-#endif
- optname != MRT_ASSERT)
+ switch (optname) {
+ case MRT_VERSION:
+ val = 0x0305;
+ break;
+ case MRT_PIM:
+ if (!ipmr_pimsm_enabled())
+ return -ENOPROTOOPT;
+ val = mrt->mroute_do_pim;
+ break;
+ case MRT_ASSERT:
+ val = mrt->mroute_do_assert;
+ break;
+ default:
return -ENOPROTOOPT;
+ }
if (get_user(olr, optlen))
return -EFAULT;
-
olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
-
if (put_user(olr, optlen))
return -EFAULT;
- if (optname == MRT_VERSION)
- val = 0x0305;
-#ifdef CONFIG_IP_PIMSM
- else if (optname == MRT_PIM)
- val = mrt->mroute_do_pim;
-#endif
- else
- val = mrt->mroute_do_assert;
if (copy_to_user(optval, &val, olr))
return -EFAULT;
return 0;
}
-/*
- * The IP multicast ioctl support routines.
- */
-
+/* The IP multicast ioctl support routines. */
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
struct sioc_sg_req sr;
@@ -1618,7 +1598,6 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
}
#endif
-
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
@@ -1640,17 +1619,14 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
return NOTIFY_DONE;
}
-
static struct notifier_block ip_mr_notifier = {
.notifier_call = ipmr_device_event,
};
-/*
- * Encapsulate a packet by attaching a valid IPIP header to it.
- * This avoids tunnel drivers and other mess and gives us the speed so
- * important for multicast video.
+/* Encapsulate a packet by attaching a valid IPIP header to it.
+ * This avoids tunnel drivers and other mess and gives us the speed so
+ * important for multicast video.
*/
-
static void ip_encap(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr)
{
@@ -1692,9 +1668,7 @@ static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
return dst_output(net, sk, skb);
}
-/*
- * Processing handlers for ipmr_forward
- */
+/* Processing handlers for ipmr_forward */
static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *c, int vifi)
@@ -1709,7 +1683,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (!vif->dev)
goto out_free;
-#ifdef CONFIG_IP_PIMSM
if (vif->flags & VIFF_REGISTER) {
vif->pkt_out++;
vif->bytes_out += skb->len;
@@ -1718,7 +1691,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free;
}
-#endif
if (vif->flags & VIFF_TUNNEL) {
rt = ip_route_output_ports(net, &fl4, NULL,
@@ -1745,7 +1717,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
* allow to send ICMP, so that packets will disappear
* to blackhole.
*/
-
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
ip_rt_put(rt);
goto out_free;
@@ -1777,8 +1748,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
IPCB(skb)->flags |= IPSKB_FORWARDED;
- /*
- * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
+ /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
* not only before forwarding, but after forwarding on all output
* interfaces. It is clear, if mrouter runs a multicasting
* program, it should receive packets not depending to what interface
@@ -1809,7 +1779,6 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
}
/* "local" means that we should preserve one skb (for local delivery) */
-
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local)
@@ -1834,9 +1803,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
goto forward;
}
- /*
- * Wrong interface: drop packet and (maybe) send PIM assert.
- */
+ /* Wrong interface: drop packet and (maybe) send PIM assert. */
if (mrt->vif_table[vif].dev != skb->dev) {
if (rt_is_output_route(skb_rtable(skb))) {
/* It is our own packet, looped back.
@@ -1875,9 +1842,7 @@ forward:
mrt->vif_table[vif].pkt_in++;
mrt->vif_table[vif].bytes_in += skb->len;
- /*
- * Forward the frame
- */
+ /* Forward the frame */
if (cache->mfc_origin == htonl(INADDR_ANY) &&
cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
if (true_vifi >= 0 &&
@@ -1951,11 +1916,9 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
return mrt;
}
-/*
- * Multicast packets for forwarding arrive here
- * Called with rcu_read_lock();
+/* Multicast packets for forwarding arrive here
+ * Called with rcu_read_lock();
*/
-
int ip_mr_input(struct sk_buff *skb)
{
struct mfc_cache *cache;
@@ -2006,9 +1969,7 @@ int ip_mr_input(struct sk_buff *skb)
vif);
}
- /*
- * No usable cache entry
- */
+ /* No usable cache entry */
if (!cache) {
int vif;
@@ -2049,53 +2010,8 @@ dont_forward:
return 0;
}
-#ifdef CONFIG_IP_PIMSM
-/* called with rcu_read_lock() */
-static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
- unsigned int pimlen)
-{
- struct net_device *reg_dev = NULL;
- struct iphdr *encap;
-
- encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
- /*
- * Check that:
- * a. packet is really sent to a multicast group
- * b. packet is not a NULL-REGISTER
- * c. packet is not truncated
- */
- if (!ipv4_is_multicast(encap->daddr) ||
- encap->tot_len == 0 ||
- ntohs(encap->tot_len) + pimlen > skb->len)
- return 1;
-
- read_lock(&mrt_lock);
- if (mrt->mroute_reg_vif_num >= 0)
- reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
- read_unlock(&mrt_lock);
-
- if (!reg_dev)
- return 1;
-
- skb->mac_header = skb->network_header;
- skb_pull(skb, (u8 *)encap - skb->data);
- skb_reset_network_header(skb);
- skb->protocol = htons(ETH_P_IP);
- skb->ip_summed = CHECKSUM_NONE;
-
- skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
-
- netif_rx(skb);
-
- return NET_RX_SUCCESS;
-}
-#endif
-
#ifdef CONFIG_IP_PIMSM_V1
-/*
- * Handle IGMP messages of PIMv1
- */
-
+/* Handle IGMP messages of PIMv1 */
int pim_rcv_v1(struct sk_buff *skb)
{
struct igmphdr *pim;
@@ -2256,8 +2172,6 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
}
read_lock(&mrt_lock);
- if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
- cache->mfc_flags |= MFC_NOTIFY;
err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
rcu_read_unlock();
@@ -2419,10 +2333,133 @@ done:
return skb->len;
}
+static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
+ [RTA_SRC] = { .type = NLA_U32 },
+ [RTA_DST] = { .type = NLA_U32 },
+ [RTA_IIF] = { .type = NLA_U32 },
+ [RTA_TABLE] = { .type = NLA_U32 },
+ [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
+};
+
+static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
+{
+ switch (rtm_protocol) {
+ case RTPROT_STATIC:
+ case RTPROT_MROUTED:
+ return true;
+ }
+ return false;
+}
+
+static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
+{
+ struct rtnexthop *rtnh = nla_data(nla);
+ int remaining = nla_len(nla), vifi = 0;
+
+ while (rtnh_ok(rtnh, remaining)) {
+ mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
+ if (++vifi == MAXVIFS)
+ break;
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ return remaining > 0 ? -EINVAL : vifi;
+}
+
+/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
+static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
+ struct mfcctl *mfcc, int *mrtsock,
+ struct mr_table **mrtret)
+{
+ struct net_device *dev = NULL;
+ u32 tblid = RT_TABLE_DEFAULT;
+ struct mr_table *mrt;
+ struct nlattr *attr;
+ struct rtmsg *rtm;
+ int ret, rem;
+
+ ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
+ if (ret < 0)
+ goto out;
+ rtm = nlmsg_data(nlh);
+
+ ret = -EINVAL;
+ if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
+ rtm->rtm_type != RTN_MULTICAST ||
+ rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
+ !ipmr_rtm_validate_proto(rtm->rtm_protocol))
+ goto out;
+
+ memset(mfcc, 0, sizeof(*mfcc));
+ mfcc->mfcc_parent = -1;
+ ret = 0;
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
+ switch (nla_type(attr)) {
+ case RTA_SRC:
+ mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
+ break;
+ case RTA_DST:
+ mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
+ break;
+ case RTA_IIF:
+ dev = __dev_get_by_index(net, nla_get_u32(attr));
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+ break;
+ case RTA_MULTIPATH:
+ if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case RTA_PREFSRC:
+ ret = 1;
+ break;
+ case RTA_TABLE:
+ tblid = nla_get_u32(attr);
+ break;
+ }
+ }
+ mrt = ipmr_get_table(net, tblid);
+ if (!mrt) {
+ ret = -ENOENT;
+ goto out;
+ }
+ *mrtret = mrt;
+ *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
+ if (dev)
+ mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
+
+out:
+ return ret;
+}
+
+/* takes care of both newroute and delroute */
+static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct net *net = sock_net(skb->sk);
+ int ret, mrtsock, parent;
+ struct mr_table *tbl;
+ struct mfcctl mfcc;
+
+ mrtsock = 0;
+ tbl = NULL;
+ ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl);
+ if (ret < 0)
+ return ret;
+
+ parent = ret ? mfcc.mfcc_parent : -1;
+ if (nlh->nlmsg_type == RTM_NEWROUTE)
+ return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
+ else
+ return ipmr_mfc_delete(tbl, &mfcc, parent);
+}
+
#ifdef CONFIG_PROC_FS
-/*
- * The /proc interfaces to multicast routing :
- * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
+/* The /proc interfaces to multicast routing :
+ * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
@@ -2706,10 +2743,7 @@ static const struct net_protocol pim_protocol = {
};
#endif
-
-/*
- * Setup for IP multicast routing
- */
+/* Setup for IP multicast routing */
static int __net_init ipmr_net_init(struct net *net)
{
int err;
@@ -2759,8 +2793,6 @@ int __init ip_mr_init(void)
sizeof(struct mfc_cache),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
- if (!mrt_cachep)
- return -ENOMEM;
err = register_pernet_subsys(&ipmr_net_ops);
if (err)
@@ -2778,6 +2810,10 @@ int __init ip_mr_init(void)
#endif
rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
NULL, ipmr_rtm_dumproute, NULL);
+ rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
+ ipmr_rtm_route, NULL, NULL);
+ rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
+ ipmr_rtm_route, NULL, NULL);
return 0;
#ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c1728771cf89..c82cca18c90f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
- set_bit(SOCK_ASYNC_NOSPACE,
- &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
goto out_err;
}
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
mss_now = tcp_send_mss(sk, &size_goal, flags);
copied = 0;
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
}
/* This should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
mss_now = tcp_send_mss(sk, &size_goal, flags);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fdd88c3803a6..2d656eef7f8e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_buff *skb;
+ int err = -ENOMEM;
+ int data_len = 0;
bool fragstolen;
if (size == 0)
return 0;
- skb = alloc_skb(size, sk->sk_allocation);
+ if (size > PAGE_SIZE) {
+ int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
+
+ data_len = npages << PAGE_SHIFT;
+ size = data_len + (size & ~PAGE_MASK);
+ }
+ skb = alloc_skb_with_frags(size - data_len, data_len,
+ PAGE_ALLOC_COSTLY_ORDER,
+ &err, sk->sk_allocation);
if (!skb)
goto err;
+ skb_put(skb, size - data_len);
+ skb->data_len = data_len;
+ skb->len = size;
+
if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
goto err_free;
- if (memcpy_from_msg(skb_put(skb, size), msg, size))
+ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+ if (err)
goto err_free;
TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
err_free:
kfree_skb(skb);
err:
- return -ENOMEM;
+ return err;
+
}
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
@@ -5667,6 +5683,7 @@ discard:
}
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tp->copied_seq = tp->rcv_nxt;
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ba09016d1bfd..db003438aaf5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
}
md5sig = rcu_dereference_protected(tp->md5sig_info,
- sock_owned_by_user(sk));
+ sock_owned_by_user(sk) ||
+ lockdep_is_held(&sk->sk_lock.slock));
if (!md5sig) {
md5sig = kmalloc(sizeof(*md5sig), gfp);
if (!md5sig)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cb7ca569052c..a800cee88035 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2296,7 +2296,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
return;
if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
- sk_gfp_atomic(sk, GFP_ATOMIC)))
+ sk_gfp_mask(sk, GFP_ATOMIC)))
tcp_check_probe_timer(sk);
}
@@ -3352,8 +3352,9 @@ void tcp_send_ack(struct sock *sk)
* tcp_transmit_skb() will set the ownership to this
* sock.
*/
- buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
- if (!buff) {
+ buff = alloc_skb(MAX_TCP_HEADER,
+ sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
+ if (unlikely(!buff)) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -3375,7 +3376,7 @@ void tcp_send_ack(struct sock *sk)
/* Send it off, this clears delayed acks for us. */
skb_mstamp_get(&buff->skb_mstamp);
- tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
+ tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
}
EXPORT_SYMBOL_GPL(tcp_send_ack);
@@ -3396,7 +3397,8 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
struct sk_buff *skb;
/* We don't queue it, tcp_transmit_skb() sets ownership. */
- skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
+ skb = alloc_skb(MAX_TCP_HEADER,
+ sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
if (!skb)
return -1;
@@ -3409,7 +3411,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb_mstamp_get(&skb->skb_mstamp);
NET_INC_STATS(sock_net(sk), mib);
- return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
+ return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
}
void tcp_send_window_probe(struct sock *sk)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c9c716a483e4..193ba1fa8a9a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk)
dst_negative_advice(sk);
if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
- if (tp->syn_data)
+ if (tp->syn_data && icsk->icsk_retransmits == 1)
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
}
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
syn_set = true;
} else {
if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+ /* Some middle-boxes may black-hole Fast Open _after_
+ * the handshake. Therefore we conservatively disable
+ * Fast Open on this path on recurring timeouts with
+ * few or zero bytes acked after Fast Open.
+ */
+ if (tp->syn_data_acked &&
+ tp->bytes_acked <= tp->rx_opt.mss_clamp) {
+ tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
+ if (icsk->icsk_retransmits == sysctl_tcp_retries1)
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ }
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 24ec14f9825c..0c7b0e61b917 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -100,7 +100,6 @@
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
-#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d84742f003a9..5e9111da449d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3287,7 +3287,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_POST_TYPE_CHANGE:
- addrconf_type_change(dev, event);
+ if (idev)
+ addrconf_type_change(dev, event);
break;
}
@@ -3642,7 +3643,7 @@ static void addrconf_dad_work(struct work_struct *w)
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
- ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL);
+ ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any);
out:
in6_ifa_put(ifp);
rtnl_unlock();
@@ -5200,6 +5201,20 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
}
static
+int addrconf_sysctl_hop_limit(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table lctl;
+ int min_hl = 1, max_hl = 255;
+
+ lctl = *ctl;
+ lctl.extra1 = &min_hl;
+ lctl.extra2 = &max_hl;
+
+ return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
+}
+
+static
int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -5454,7 +5469,7 @@ static struct addrconf_sysctl_table
.data = &ipv6_devconf.hop_limit,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = addrconf_sysctl_hop_limit,
},
{
.procname = "mtu",
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 44bb66bde0e2..8ec0df75f1c4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk)
/* Free tx options */
- opt = xchg(&np->opt, NULL);
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
}
EXPORT_SYMBOL_GPL(inet6_destroy_sock);
@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
+ &final);
+ rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
@@ -668,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
return PTR_ERR(dst);
}
- __ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
}
return 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index d70b0238f468..517c55b01ba8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,8 +167,10 @@ ipv4_connected:
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- opt = flowlabel ? flowlabel->opt : np->opt;
+ rcu_read_lock();
+ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
final_p = fl6_update_dst(&fl6, opt, &final);
+ rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
err = 0;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ce203b0402be..ea7c4d64a00a 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
*((char **)&opt2->dst1opt) += dif;
if (opt2->srcrt)
*((char **)&opt2->srcrt) += dif;
+ atomic_set(&opt2->refcnt, 1);
}
return opt2;
}
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
return ERR_PTR(-ENOBUFS);
memset(opt2, 0, tot_len);
-
+ atomic_set(&opt2->refcnt, 1);
opt2->tot_len = tot_len;
p = (char *)(opt2 + 1);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 36c5a98b0472..0a37ddc7af51 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
}
-/*
- * Special lock-class for __icmpv6_sk:
- */
-static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
-
static int __net_init icmpv6_sk_init(struct net *net)
{
struct sock *sk;
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net)
net->ipv6.icmp_sk[i] = sk;
- /*
- * Split off their lock-class, because sk->sk_dst_lock
- * gets used from softirqs, which is safe for
- * __icmpv6_sk (because those never get directly used
- * via userspace syscalls), but unsafe for normal sockets.
- */
- lockdep_set_class(&sk->sk_dst_lock,
- &icmpv6_socket_sk_dst_lock_key);
-
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
*/
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 5d1c7cee2cb2..a7ca2cde2ecb 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = proto;
fl6->daddr = ireq->ir_v6_rmt_addr;
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
fl6->saddr = ireq->ir_v6_loc_addr;
fl6->flowi6_oif = ireq->ir_iif;
fl6->flowi6_mark = ireq->ir_mark;
@@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
-void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr)
-{
- __ip6_dst_store(sk, dst, daddr, saddr);
-}
-
-static inline
struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
{
return __sk_dst_check(sk, cookie);
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
fl6->fl6_dport = inet->inet_dport;
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
if (!dst) {
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!IS_ERR(dst))
- __inet6_csk_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
}
return dst;
}
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
/* Restore final destination back after routing done */
fl6.daddr = sk->sk_v6_daddr;
- res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+ res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ np->tclass);
rcu_read_unlock();
return res;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3c7b9310b33f..938d03ce5e4b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -24,7 +24,6 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
-#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eabffbb89795..137fca42aaa6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
int i;
for_each_possible_cpu(i)
- ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
+ ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index ad19136086dd..a10e77103c88 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
int cmd);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
-static void mroute_clean_tables(struct mr6_table *mrt);
+static void mroute_clean_tables(struct mr6_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr6_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, true);
kfree(mrt);
}
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
return dev;
failure:
- /* allow the register to be completed before unregistering. */
- rtnl_unlock();
- rtnl_lock();
-
unregister_netdevice(dev);
return NULL;
}
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct mr6_table *mrt)
+static void mroute_clean_tables(struct mr6_table *mrt, bool all)
{
int i;
LIST_HEAD(list);
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
* Shut down all active vif entries
*/
for (i = 0; i < mrt->maxvif; i++) {
- if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
- mif6_delete(mrt, i, &list);
+ if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
+ continue;
+ mif6_delete(mrt, i, &list);
}
unregister_netdevice_many(&list);
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
*/
for (i = 0; i < MFC6_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
- if (c->mfc_flags & MFC_STATIC)
+ if (!all && (c->mfc_flags & MFC_STATIC))
continue;
write_lock_bh(&mrt_lock);
list_del(&c->list);
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk)
net->ipv6.devconf_all);
write_unlock_bh(&mrt_lock);
- mroute_clean_tables(mrt);
+ mroute_clean_tables(mrt, false);
err = 0;
break;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63e6956917c9..4449ad1f8114 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
}
- opt = xchg(&inet6_sk(sk)->opt, opt);
+ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
+ opt);
sk_dst_reset(sk);
return opt;
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
}
- opt = xchg(&np->opt, NULL);
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ opt = xchg((__force struct ipv6_txoptions **)&np->opt,
+ NULL);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
pktopt = xchg(&np->pktoptions, NULL);
kfree_skb(pktopt);
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
break;
- opt = ipv6_renew_options(sk, np->opt, optname,
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ opt = ipv6_renew_options(sk, opt, optname,
(struct ipv6_opt_hdr __user *)optval,
optlen);
if (IS_ERR(opt)) {
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = 0;
opt = ipv6_update_options(sk, opt);
sticky_done:
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
break;
}
@@ -486,6 +493,7 @@ sticky_done:
break;
memset(opt, 0, sizeof(*opt));
+ atomic_set(&opt->refcnt, 1);
opt->tot_len = sizeof(*opt) + optlen;
retv = -EFAULT;
if (copy_from_user(opt+1, optval, optlen))
@@ -502,8 +510,10 @@ update:
retv = 0;
opt = ipv6_update_options(sk, opt);
done:
- if (opt)
- sock_kfree_s(sk, opt, opt->tot_len);
+ if (opt) {
+ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+ txopt_put(opt);
+ }
break;
}
case IPV6_UNICAST_HOPS:
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
case IPV6_RTHDR:
case IPV6_DSTOPTS:
{
+ struct ipv6_txoptions *opt;
lock_sock(sk);
- len = ipv6_getsockopt_sticky(sk, np->opt,
- optname, optval, len);
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
release_sock(sk);
/* check if ipv6_getsockopt_sticky() returns err code */
if (len < 0)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3e0f855e1bea..d6161e1c48c8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
}
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct sk_buff *oskb)
+ const struct in6_addr *daddr, const struct in6_addr *saddr)
{
struct sk_buff *skb;
struct in6_addr addr_buf;
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
dev->dev_addr);
- if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
- skb_dst_copy(skb, oskb);
-
ndisc_send_skb(skb, daddr, saddr);
}
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
"%s: trying to ucast probe in NUD_INVALID: %pI6\n",
__func__, target);
}
- ndisc_send_ns(dev, target, target, saddr, skb);
+ ndisc_send_ns(dev, target, target, saddr);
} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
neigh_app_ns(neigh);
} else {
addrconf_addr_solict_mult(target, &mcaddr);
- ndisc_send_ns(dev, target, &mcaddr, saddr, skb);
+ ndisc_send_ns(dev, target, &mcaddr, saddr);
}
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6e5f0e0d49e0..e4347aeb2e65 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -183,7 +183,7 @@ static void nf_ct_frag6_expire(unsigned long data)
/* Creation primitives. */
static inline struct frag_queue *fq_find(struct net *net, __be32 id,
u32 user, struct in6_addr *src,
- struct in6_addr *dst, u8 ecn)
+ struct in6_addr *dst, int iif, u8 ecn)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
@@ -193,6 +193,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
arg.user = user;
arg.src = src;
arg.dst = dst;
+ arg.iif = iif;
arg.ecn = ecn;
local_bh_disable();
@@ -589,9 +590,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
fhdr = (struct frag_hdr *)skb_transport_header(skb);
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
- ip6_frag_ecn(hdr));
- if (fq == NULL)
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq == NULL) {
+ pr_debug("Can't find and can't create new queue\n");
return -ENOMEM;
+ }
spin_lock_bh(&fq->q.lock);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dc65ec198f7c..99140986e887 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
+ struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
- if (!opt)
- opt = np->opt;
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
@@ -906,6 +909,7 @@ done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 44e21a03cfc3..45f5ae51de65 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
return fq->id == arg->id &&
fq->user == arg->user &&
ipv6_addr_equal(&fq->saddr, arg->src) &&
- ipv6_addr_equal(&fq->daddr, arg->dst);
+ ipv6_addr_equal(&fq->daddr, arg->dst) &&
+ (arg->iif == fq->iif ||
+ !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
+ IPV6_ADDR_LINKLOCAL)));
}
EXPORT_SYMBOL(ip6_frag_match);
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
static struct frag_queue *
fq_find(struct net *net, __be32 id, const struct in6_addr *src,
- const struct in6_addr *dst, u8 ecn)
+ const struct in6_addr *dst, int iif, u8 ecn)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
arg.user = IP6_DEFRAG_LOCAL_DELIVER;
arg.src = src;
arg.dst = dst;
+ arg.iif = iif;
arg.ecn = ecn;
hash = inet6_hash_frag(id, src, dst);
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
}
fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
- ip6_frag_ecn(hdr));
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
if (fq) {
int ret;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 89758be9c6a6..c83b6a5b3604 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -524,7 +524,7 @@ static void rt6_probe_deferred(struct work_struct *w)
container_of(w, struct __rt6_probe_work, work);
addrconf_addr_solict_mult(&work->target, &mcaddr);
- ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL);
+ ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
dev_put(work->dev);
kfree(work);
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb8f2fa1c7fb..eaf7ac496d50 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = ireq->ir_v6_rmt_addr;
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
fl6.saddr = ireq->ir_v6_loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = ireq->ir_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c5429a636f1a..c16e3fbf6854 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
+ struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+ final_p = fl6_update_dst(&fl6, opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
@@ -255,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
- __ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, NULL, NULL);
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
- if (np->opt)
- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
- np->opt->opt_nflen);
+ if (opt)
+ icsk->icsk_ext_hdr_len = opt->opt_flen +
+ opt->opt_nflen;
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
- err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
+ np->tclass);
err = net_xmit_eval(err);
}
@@ -972,6 +975,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp;
const struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_sock *newtp;
@@ -1056,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
- __ip6_dst_store(newsk, dst, NULL, NULL);
+ ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1098,13 +1102,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
but we make one more one thing there: reattach optmem
to newsk.
*/
- if (np->opt)
- newnp->opt = ipv6_dup_options(newsk, np->opt);
-
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ opt = ipv6_dup_options(newsk, opt);
+ RCU_INIT_POINTER(newnp->opt, opt);
+ }
inet_csk(newsk)->icsk_ext_hdr_len = 0;
- if (newnp->opt)
- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
- newnp->opt->opt_flen);
+ if (opt)
+ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
+ opt->opt_flen;
tcp_ca_openreq_child(newsk, dst);
@@ -1130,7 +1136,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
AF_INET6, key->key, key->keylen,
- sk_gfp_atomic(sk, GFP_ATOMIC));
+ sk_gfp_mask(sk, GFP_ATOMIC));
}
#endif
@@ -1146,7 +1152,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
/* Clone pktoptions received with SYN, if we own the req */
if (ireq->pktopts) {
newnp->pktoptions = skb_clone(ireq->pktopts,
- sk_gfp_atomic(sk, GFP_ATOMIC));
+ sk_gfp_mask(sk, GFP_ATOMIC));
consume_skb(ireq->pktopts);
ireq->pktopts = NULL;
if (newnp->pktoptions)
@@ -1212,7 +1218,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
--ANK (980728)
*/
if (np->rxopt.all)
- opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
+ opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst = sk->sk_rx_dst;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 01bcb49619ee..9da3287a3923 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
+ struct ipv6_txoptions *opt_to_free = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
@@ -1263,8 +1264,10 @@ do_udp_sendmsg:
opt = NULL;
connected = 0;
}
- if (!opt)
- opt = np->opt;
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
@@ -1373,6 +1376,7 @@ release_dst:
out:
dst_release(dst);
fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
if (!err)
return len;
/*
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index fcb2752419c6..3ea4c98d94dc 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -303,7 +303,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk) && iucv_below_msglim(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index aca38d8aed8e..a2c8747d2936 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = NULL;
}
- if (opt == NULL)
- opt = np->opt;
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
@@ -631,6 +634,7 @@ done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
return err < 0 ? err : len;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1ad18c55064c..d93f113cb522 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1862,5 +1862,5 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("PPP over L2TP over UDP");
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
-MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
+MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
MODULE_ALIAS_L2TP_PWTYPE(11);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index a758eb84e8f0..ff757181b0a8 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, start_seq_num,
- local->hw.max_tx_aggregation_subframes,
+ IEEE80211_MAX_AMPDU_BUF,
tid_tx->timeout);
}
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
+ buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
mutex_lock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c2bd1b6a6922..2d1c4c35186d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1216,16 +1216,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (!sta)
return -ENOMEM;
- /*
- * defaults -- if userspace wants something else we'll
- * change it accordingly in sta_apply_parameters()
- */
- if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
- !(params->sta_flags_set & (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
- BIT(NL80211_STA_FLAG_ASSOCIATED)))) {
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
- }
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
@@ -1994,6 +1984,11 @@ static int ieee80211_scan(struct wiphy *wiphy,
return ieee80211_request_scan(sdata, req);
}
+static void ieee80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ ieee80211_scan_cancel(wiphy_priv(wiphy));
+}
+
static int
ieee80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *dev,
@@ -2509,294 +2504,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
-static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
- struct ieee80211_roc_work *new_roc,
- struct ieee80211_roc_work *cur_roc)
-{
- unsigned long now = jiffies;
- unsigned long remaining = cur_roc->hw_start_time +
- msecs_to_jiffies(cur_roc->duration) -
- now;
-
- if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
- return false;
-
- /* if it doesn't fit entirely, schedule a new one */
- if (new_roc->duration > jiffies_to_msecs(remaining))
- return false;
-
- ieee80211_handle_roc_started(new_roc);
-
- /* add to dependents so we send the expired event properly */
- list_add_tail(&new_roc->list, &cur_roc->dependents);
- return true;
-}
-
-static u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local)
-{
- lockdep_assert_held(&local->mtx);
-
- local->roc_cookie_counter++;
-
- /* wow, you wrapped 64 bits ... more likely a bug */
- if (WARN_ON(local->roc_cookie_counter == 0))
- local->roc_cookie_counter++;
-
- return local->roc_cookie_counter;
-}
-
-static int ieee80211_start_roc_work(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *channel,
- unsigned int duration, u64 *cookie,
- struct sk_buff *txskb,
- enum ieee80211_roc_type type)
-{
- struct ieee80211_roc_work *roc, *tmp;
- bool queued = false;
- int ret;
-
- lockdep_assert_held(&local->mtx);
-
- if (local->use_chanctx && !local->ops->remain_on_channel)
- return -EOPNOTSUPP;
-
- roc = kzalloc(sizeof(*roc), GFP_KERNEL);
- if (!roc)
- return -ENOMEM;
-
- /*
- * If the duration is zero, then the driver
- * wouldn't actually do anything. Set it to
- * 10 for now.
- *
- * TODO: cancel the off-channel operation
- * when we get the SKB's TX status and
- * the wait time was zero before.
- */
- if (!duration)
- duration = 10;
-
- roc->chan = channel;
- roc->duration = duration;
- roc->req_duration = duration;
- roc->frame = txskb;
- roc->type = type;
- roc->sdata = sdata;
- INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
- INIT_LIST_HEAD(&roc->dependents);
-
- /*
- * cookie is either the roc cookie (for normal roc)
- * or the SKB (for mgmt TX)
- */
- if (!txskb) {
- roc->cookie = ieee80211_mgmt_tx_cookie(local);
- *cookie = roc->cookie;
- } else {
- roc->mgmt_tx_cookie = *cookie;
- }
-
- /* if there's one pending or we're scanning, queue this one */
- if (!list_empty(&local->roc_list) ||
- local->scanning || ieee80211_is_radar_required(local))
- goto out_check_combine;
-
- /* if not HW assist, just queue & schedule work */
- if (!local->ops->remain_on_channel) {
- ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
- goto out_queue;
- }
-
- /* otherwise actually kick it off here (for error handling) */
-
- ret = drv_remain_on_channel(local, sdata, channel, duration, type);
- if (ret) {
- kfree(roc);
- return ret;
- }
-
- roc->started = true;
- goto out_queue;
-
- out_check_combine:
- list_for_each_entry(tmp, &local->roc_list, list) {
- if (tmp->chan != channel || tmp->sdata != sdata)
- continue;
-
- /*
- * Extend this ROC if possible:
- *
- * If it hasn't started yet, just increase the duration
- * and add the new one to the list of dependents.
- * If the type of the new ROC has higher priority, modify the
- * type of the previous one to match that of the new one.
- */
- if (!tmp->started) {
- list_add_tail(&roc->list, &tmp->dependents);
- tmp->duration = max(tmp->duration, roc->duration);
- tmp->type = max(tmp->type, roc->type);
- queued = true;
- break;
- }
-
- /* If it has already started, it's more difficult ... */
- if (local->ops->remain_on_channel) {
- /*
- * In the offloaded ROC case, if it hasn't begun, add
- * this new one to the dependent list to be handled
- * when the master one begins. If it has begun,
- * check if it fits entirely within the existing one,
- * in which case it will just be dependent as well.
- * Otherwise, schedule it by itself.
- */
- if (!tmp->hw_begun) {
- list_add_tail(&roc->list, &tmp->dependents);
- queued = true;
- break;
- }
-
- if (ieee80211_coalesce_started_roc(local, roc, tmp))
- queued = true;
- } else if (del_timer_sync(&tmp->work.timer)) {
- unsigned long new_end;
-
- /*
- * In the software ROC case, cancel the timer, if
- * that fails then the finish work is already
- * queued/pending and thus we queue the new ROC
- * normally, if that succeeds then we can extend
- * the timer duration and TX the frame (if any.)
- */
-
- list_add_tail(&roc->list, &tmp->dependents);
- queued = true;
-
- new_end = jiffies + msecs_to_jiffies(roc->duration);
-
- /* ok, it was started & we canceled timer */
- if (time_after(new_end, tmp->work.timer.expires))
- mod_timer(&tmp->work.timer, new_end);
- else
- add_timer(&tmp->work.timer);
-
- ieee80211_handle_roc_started(roc);
- }
- break;
- }
-
- out_queue:
- if (!queued)
- list_add_tail(&roc->list, &local->roc_list);
-
- return 0;
-}
-
-static int ieee80211_remain_on_channel(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- struct ieee80211_channel *chan,
- unsigned int duration,
- u64 *cookie)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- struct ieee80211_local *local = sdata->local;
- int ret;
-
- mutex_lock(&local->mtx);
- ret = ieee80211_start_roc_work(local, sdata, chan,
- duration, cookie, NULL,
- IEEE80211_ROC_TYPE_NORMAL);
- mutex_unlock(&local->mtx);
-
- return ret;
-}
-
-static int ieee80211_cancel_roc(struct ieee80211_local *local,
- u64 cookie, bool mgmt_tx)
-{
- struct ieee80211_roc_work *roc, *tmp, *found = NULL;
- int ret;
-
- mutex_lock(&local->mtx);
- list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
- struct ieee80211_roc_work *dep, *tmp2;
-
- list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
- if (!mgmt_tx && dep->cookie != cookie)
- continue;
- else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
- continue;
- /* found dependent item -- just remove it */
- list_del(&dep->list);
- mutex_unlock(&local->mtx);
-
- ieee80211_roc_notify_destroy(dep, true);
- return 0;
- }
-
- if (!mgmt_tx && roc->cookie != cookie)
- continue;
- else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
- continue;
-
- found = roc;
- break;
- }
-
- if (!found) {
- mutex_unlock(&local->mtx);
- return -ENOENT;
- }
-
- /*
- * We found the item to cancel, so do that. Note that it
- * may have dependents, which we also cancel (and send
- * the expired signal for.) Not doing so would be quite
- * tricky here, but we may need to fix it later.
- */
-
- if (local->ops->remain_on_channel) {
- if (found->started) {
- ret = drv_cancel_remain_on_channel(local);
- if (WARN_ON_ONCE(ret)) {
- mutex_unlock(&local->mtx);
- return ret;
- }
- }
-
- list_del(&found->list);
-
- if (found->started)
- ieee80211_start_next_roc(local);
- mutex_unlock(&local->mtx);
-
- ieee80211_roc_notify_destroy(found, true);
- } else {
- /* work may be pending so use it all the time */
- found->abort = true;
- ieee80211_queue_delayed_work(&local->hw, &found->work, 0);
-
- mutex_unlock(&local->mtx);
-
- /* work will clean up etc */
- flush_delayed_work(&found->work);
- WARN_ON(!found->to_be_freed);
- kfree(found);
- }
-
- return 0;
-}
-
-static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u64 cookie)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- struct ieee80211_local *local = sdata->local;
-
- return ieee80211_cancel_roc(local, cookie, false);
-}
-
static int ieee80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
@@ -3267,9 +2974,21 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return err;
}
-static struct sk_buff *ieee80211_make_ack_skb(struct ieee80211_local *local,
- struct sk_buff *skb, u64 *cookie,
- gfp_t gfp)
+u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local)
+{
+ lockdep_assert_held(&local->mtx);
+
+ local->roc_cookie_counter++;
+
+ /* wow, you wrapped 64 bits ... more likely a bug */
+ if (WARN_ON(local->roc_cookie_counter == 0))
+ local->roc_cookie_counter++;
+
+ return local->roc_cookie_counter;
+}
+
+int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
+ u64 *cookie, gfp_t gfp)
{
unsigned long spin_flags;
struct sk_buff *ack_skb;
@@ -3277,7 +2996,7 @@ static struct sk_buff *ieee80211_make_ack_skb(struct ieee80211_local *local,
ack_skb = skb_copy(skb, gfp);
if (!ack_skb)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
spin_lock_irqsave(&local->ack_status_lock, spin_flags);
id = idr_alloc(&local->ack_status_frames, ack_skb,
@@ -3286,7 +3005,7 @@ static struct sk_buff *ieee80211_make_ack_skb(struct ieee80211_local *local,
if (id < 0) {
kfree_skb(ack_skb);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
IEEE80211_SKB_CB(skb)->ack_frame_id = id;
@@ -3294,200 +3013,7 @@ static struct sk_buff *ieee80211_make_ack_skb(struct ieee80211_local *local,
*cookie = ieee80211_mgmt_tx_cookie(local);
IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
- return ack_skb;
-}
-
-static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
- struct cfg80211_mgmt_tx_params *params,
- u64 *cookie)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb, *ack_skb;
- struct sta_info *sta;
- const struct ieee80211_mgmt *mgmt = (void *)params->buf;
- bool need_offchan = false;
- u32 flags;
- int ret;
- u8 *data;
-
- if (params->dont_wait_for_ack)
- flags = IEEE80211_TX_CTL_NO_ACK;
- else
- flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
- IEEE80211_TX_CTL_REQ_TX_STATUS;
-
- if (params->no_cck)
- flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_ADHOC:
- if (!sdata->vif.bss_conf.ibss_joined)
- need_offchan = true;
- /* fall through */
-#ifdef CONFIG_MAC80211_MESH
- case NL80211_IFTYPE_MESH_POINT:
- if (ieee80211_vif_is_mesh(&sdata->vif) &&
- !sdata->u.mesh.mesh_id_len)
- need_offchan = true;
- /* fall through */
-#endif
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_P2P_GO:
- if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
- !ieee80211_vif_is_mesh(&sdata->vif) &&
- !rcu_access_pointer(sdata->bss->beacon))
- need_offchan = true;
- if (!ieee80211_is_action(mgmt->frame_control) ||
- mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
- mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
- mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
- break;
- rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->da);
- rcu_read_unlock();
- if (!sta)
- return -ENOLINK;
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_CLIENT:
- sdata_lock(sdata);
- if (!sdata->u.mgd.associated ||
- (params->offchan && params->wait &&
- local->ops->remain_on_channel &&
- memcmp(sdata->u.mgd.associated->bssid,
- mgmt->bssid, ETH_ALEN)))
- need_offchan = true;
- sdata_unlock(sdata);
- break;
- case NL80211_IFTYPE_P2P_DEVICE:
- need_offchan = true;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /* configurations requiring offchan cannot work if no channel has been
- * specified
- */
- if (need_offchan && !params->chan)
- return -EINVAL;
-
- mutex_lock(&local->mtx);
-
- /* Check if the operating channel is the requested channel */
- if (!need_offchan) {
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-
- if (chanctx_conf) {
- need_offchan = params->chan &&
- (params->chan !=
- chanctx_conf->def.chan);
- } else if (!params->chan) {
- ret = -EINVAL;
- rcu_read_unlock();
- goto out_unlock;
- } else {
- need_offchan = true;
- }
- rcu_read_unlock();
- }
-
- if (need_offchan && !params->offchan) {
- ret = -EBUSY;
- goto out_unlock;
- }
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
- if (!skb) {
- ret = -ENOMEM;
- goto out_unlock;
- }
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- data = skb_put(skb, params->len);
- memcpy(data, params->buf, params->len);
-
- /* Update CSA counters */
- if (sdata->vif.csa_active &&
- (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
- params->n_csa_offsets) {
- int i;
- struct beacon_data *beacon = NULL;
-
- rcu_read_lock();
-
- if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->u.ap.beacon);
- else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- beacon = rcu_dereference(sdata->u.ibss.presp);
- else if (ieee80211_vif_is_mesh(&sdata->vif))
- beacon = rcu_dereference(sdata->u.mesh.beacon);
-
- if (beacon)
- for (i = 0; i < params->n_csa_offsets; i++)
- data[params->csa_offsets[i]] =
- beacon->csa_current_counter;
-
- rcu_read_unlock();
- }
-
- IEEE80211_SKB_CB(skb)->flags = flags;
-
- skb->dev = sdata->dev;
-
- if (!params->dont_wait_for_ack) {
- /* make a copy to preserve the frame contents
- * in case of encryption.
- */
- ack_skb = ieee80211_make_ack_skb(local, skb, cookie,
- GFP_KERNEL);
- if (IS_ERR(ack_skb)) {
- ret = PTR_ERR(ack_skb);
- kfree_skb(skb);
- goto out_unlock;
- }
- } else {
- /* for cookie below */
- ack_skb = skb;
- }
-
- if (!need_offchan) {
- ieee80211_tx_skb(sdata, skb);
- ret = 0;
- goto out_unlock;
- }
-
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
- IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
- if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
- IEEE80211_SKB_CB(skb)->hw_queue =
- local->hw.offchannel_tx_hw_queue;
-
- /* This will handle all kinds of coalescing and immediate TX */
- ret = ieee80211_start_roc_work(local, sdata, params->chan,
- params->wait, cookie, skb,
- IEEE80211_ROC_TYPE_MGMT_TX);
- if (ret)
- kfree_skb(skb);
- out_unlock:
- mutex_unlock(&local->mtx);
- return ret;
-}
-
-static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- u64 cookie)
-{
- struct ieee80211_local *local = wiphy_priv(wiphy);
-
- return ieee80211_cancel_roc(local, cookie, true);
+ return 0;
}
static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
@@ -3565,7 +3091,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct ieee80211_qos_hdr *nullfunc;
- struct sk_buff *skb, *ack_skb;
+ struct sk_buff *skb;
int size = sizeof(*nullfunc);
__le16 fc;
bool qos;
@@ -3633,10 +3159,9 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
if (qos)
nullfunc->qos_ctrl = cpu_to_le16(7);
- ack_skb = ieee80211_make_ack_skb(local, skb, cookie, GFP_ATOMIC);
- if (IS_ERR(ack_skb)) {
+ ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_ATOMIC);
+ if (ret) {
kfree_skb(skb);
- ret = PTR_ERR(ack_skb);
goto unlock;
}
@@ -3838,6 +3363,7 @@ const struct cfg80211_ops mac80211_config_ops = {
.suspend = ieee80211_suspend,
.resume = ieee80211_resume,
.scan = ieee80211_scan,
+ .abort_scan = ieee80211_abort_scan,
.sched_scan_start = ieee80211_sched_scan_start,
.sched_scan_stop = ieee80211_sched_scan_stop,
.auth = ieee80211_auth,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 4d2aaebd4f97..abbdff03ce92 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -125,6 +125,7 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
FLAG(TDLS_WIDER_BW),
FLAG(SUPPORTS_AMSDU_IN_AMPDU),
FLAG(BEACON_TX_STATUS),
+ FLAG(NEEDS_UNIQUE_STA_ADDR),
/* keep last for the build bug below */
(void *)0x1
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 337bb5d78003..f7fc0e00497f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -428,6 +428,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
chandef.width = sdata->u.ibss.chandef.width;
break;
case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
chandef = sdata->u.ibss.chandef;
chandef.chan = cbss->channel;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d832bd59236b..c30b6842ed9f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -325,19 +325,15 @@ struct mesh_preq_queue {
struct ieee80211_roc_work {
struct list_head list;
- struct list_head dependents;
-
- struct delayed_work work;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_channel *chan;
bool started, abort, hw_begun, notified;
- bool to_be_freed;
bool on_channel;
- unsigned long hw_start_time;
+ unsigned long start_time;
u32 duration, req_duration;
struct sk_buff *frame;
@@ -1335,6 +1331,7 @@ struct ieee80211_local {
/*
* Remain-on-channel support
*/
+ struct delayed_work roc_work;
struct list_head roc_list;
struct work_struct hw_roc_start, hw_roc_done;
unsigned long hw_roc_start_time;
@@ -1483,6 +1480,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
+int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
+ u64 *cookie, gfp_t gfp);
+
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -1577,16 +1578,22 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
void ieee80211_sched_scan_end(struct ieee80211_local *local);
void ieee80211_sched_scan_stopped_work(struct work_struct *work);
-/* off-channel helpers */
+/* off-channel/mgmt-tx */
void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
void ieee80211_offchannel_return(struct ieee80211_local *local);
void ieee80211_roc_setup(struct ieee80211_local *local);
void ieee80211_start_next_roc(struct ieee80211_local *local);
void ieee80211_roc_purge(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
-void ieee80211_sw_roc_work(struct work_struct *work);
-void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
+int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration, u64 *cookie);
+int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev, u64 cookie);
+int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie);
+int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ struct wireless_dev *wdev, u64 cookie);
/* channel switch handling */
void ieee80211_csa_finalize_work(struct work_struct *work);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d0dc1bfaeec2..c9e325d2e120 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
bool update_bss)
{
- if (__ieee80211_recalc_txpower(sdata) || update_bss)
+ if (__ieee80211_recalc_txpower(sdata) ||
+ (update_bss && ieee80211_sdata_running(sdata)))
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
}
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
unregister_netdevice(sdata->dev);
} else {
cfg80211_unregister_wdev(&sdata->wdev);
+ ieee80211_teardown_sdata(sdata);
kfree(sdata);
}
}
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
return;
ieee80211_do_stop(sdata, true);
- ieee80211_teardown_sdata(sdata);
}
void ieee80211_remove_interfaces(struct ieee80211_local *local)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 44388d6a1d8e..5e5bc599da4c 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -320,7 +321,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
return;
if (new)
- list_add_tail(&new->list, &sdata->key_list);
+ list_add_tail_rcu(&new->list, &sdata->key_list);
WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx);
@@ -368,7 +369,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
}
if (old)
- list_del(&old->list);
+ list_del_rcu(&old->list);
}
struct ieee80211_key *
@@ -592,8 +593,8 @@ static void ieee80211_key_destroy(struct ieee80211_key *key,
return;
/*
- * Synchronize so the TX path can no longer be using
- * this key before we free/remove it.
+ * Synchronize so the TX path and rcu key iterators
+ * can no longer be using this key before we free/remove it.
*/
synchronize_net();
@@ -744,6 +745,53 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_iter_keys);
+static void
+_ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
+ struct ieee80211_sub_if_data *sdata,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data)
+{
+ struct ieee80211_key *key;
+
+ list_for_each_entry_rcu(key, &sdata->key_list, list) {
+ /* skip keys of station in removal process */
+ if (key->sta && key->sta->removed)
+ continue;
+ if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ continue;
+
+ iter(hw, &sdata->vif,
+ key->sta ? &key->sta->sta : NULL,
+ &key->conf, iter_data);
+ }
+}
+
+void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
+
+ if (vif) {
+ sdata = vif_to_sdata(vif);
+ _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
+ } else {
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
+ }
+}
+EXPORT_SYMBOL(ieee80211_iter_keys_rcu);
+
static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
struct list_head *keys)
{
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 858f6b1cb149..6bcf0faa4a89 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1149,6 +1149,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
rtnl_unlock();
+ cancel_delayed_work_sync(&local->roc_work);
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
cancel_work_sync(&local->tdls_chsw_work);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b890e225a8f1..dadf8dc6f1cf 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta)
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
del_timer_sync(&node->mpath->timer);
- atomic_dec(&sdata->u.mesh.mpaths);
kfree(node->mpath);
kfree(node);
}
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
- struct mesh_path *mpath;
- mpath = node->mpath;
+ struct mesh_path *mpath = node->mpath;
+ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
spin_lock(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING;
if (mpath->is_gate)
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
hlist_del_rcu(&node->list);
call_rcu(&node->rcu, mesh_path_node_reclaim);
spin_unlock(&mpath->state_lock);
+ atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
}
@@ -968,8 +968,8 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
copy = true;
} else {
mpath_dbg(sdata,
- "Not forwarding %p (flags %#x)\n",
- gate->mpath, gate->mpath->flags);
+ "Not forwarding to %pM (flags %#x)\n",
+ gate->mpath->dst, gate->mpath->flags);
}
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b140cc6651f4..123b26d177e8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1930,7 +1930,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
- if (sdata->vif.p2p) {
+ if (sdata->vif.p2p ||
+ sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
@@ -3458,7 +3459,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
}
- if (sdata->vif.p2p) {
+ if (sdata->vif.p2p ||
+ sdata->vif.driver_flags & IEEE80211_VIF_GET_NOA_UPDATE) {
struct ieee80211_p2p_noa_attr noa = {};
int ret;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 04401037140e..8b2f4eaac2ba 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -187,11 +187,80 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
false);
}
-void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
+static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
{
- if (roc->notified)
+ /* was never transmitted */
+ if (roc->frame) {
+ cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie,
+ roc->frame->data, roc->frame->len,
+ false, GFP_KERNEL);
+ ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame);
+ }
+
+ if (!roc->mgmt_tx_cookie)
+ cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
+ roc->cookie, roc->chan,
+ GFP_KERNEL);
+
+ list_del(&roc->list);
+ kfree(roc);
+}
+
+static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local,
+ unsigned long now)
+{
+ struct ieee80211_roc_work *roc, *tmp;
+ long remaining_dur_min = LONG_MAX;
+
+ lockdep_assert_held(&local->mtx);
+
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ long remaining;
+
+ if (!roc->started)
+ break;
+
+ remaining = roc->start_time +
+ msecs_to_jiffies(roc->duration) -
+ now;
+
+ /* In case of HW ROC, it is possible that the HW finished the
+ * ROC session before the actual requested time. In such a case
+ * end the ROC session (disregarding the remaining time).
+ */
+ if (roc->abort || roc->hw_begun || remaining <= 0)
+ ieee80211_roc_notify_destroy(roc);
+ else
+ remaining_dur_min = min(remaining_dur_min, remaining);
+ }
+
+ return remaining_dur_min;
+}
+
+static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
+ unsigned long now)
+{
+ long dur = ieee80211_end_finished_rocs(local, now);
+
+ if (dur == LONG_MAX)
+ return false;
+
+ mod_delayed_work(local->workqueue, &local->roc_work, dur);
+ return true;
+}
+
+static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
+ unsigned long start_time)
+{
+ struct ieee80211_local *local = roc->sdata->local;
+
+ if (WARN_ON(roc->notified))
return;
+ roc->start_time = start_time;
+ roc->started = true;
+ roc->hw_begun = true;
+
if (roc->mgmt_tx_cookie) {
if (!WARN_ON(!roc->frame)) {
ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
@@ -205,40 +274,26 @@ void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
}
roc->notified = true;
+
+ if (!local->ops->remain_on_channel)
+ ieee80211_recalc_sw_work(local, start_time);
}
static void ieee80211_hw_roc_start(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, hw_roc_start);
- struct ieee80211_roc_work *roc, *dep, *tmp;
+ struct ieee80211_roc_work *roc;
mutex_lock(&local->mtx);
- if (list_empty(&local->roc_list))
- goto out_unlock;
-
- roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
- list);
-
- if (!roc->started)
- goto out_unlock;
-
- roc->hw_begun = true;
- roc->hw_start_time = local->hw_roc_start_time;
+ list_for_each_entry(roc, &local->roc_list, list) {
+ if (!roc->started)
+ break;
- ieee80211_handle_roc_started(roc);
- list_for_each_entry_safe(dep, tmp, &roc->dependents, list) {
- ieee80211_handle_roc_started(dep);
-
- if (dep->duration > roc->duration) {
- u32 dur = dep->duration;
- dep->duration = dur - roc->duration;
- roc->duration = dur;
- list_move(&dep->list, &roc->list);
- }
+ ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
}
- out_unlock:
+
mutex_unlock(&local->mtx);
}
@@ -254,34 +309,40 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
-void ieee80211_start_next_roc(struct ieee80211_local *local)
+static void _ieee80211_start_next_roc(struct ieee80211_local *local)
{
- struct ieee80211_roc_work *roc;
+ struct ieee80211_roc_work *roc, *tmp;
+ enum ieee80211_roc_type type;
+ u32 min_dur, max_dur;
lockdep_assert_held(&local->mtx);
- if (list_empty(&local->roc_list)) {
- ieee80211_run_deferred_scan(local);
+ if (WARN_ON(list_empty(&local->roc_list)))
return;
- }
roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
list);
- if (WARN_ON_ONCE(roc->started))
+ if (WARN_ON(roc->started))
return;
- if (local->ops->remain_on_channel) {
- int ret, duration = roc->duration;
-
- /* XXX: duplicated, see ieee80211_start_roc_work() */
- if (!duration)
- duration = 10;
+ min_dur = roc->duration;
+ max_dur = roc->duration;
+ type = roc->type;
- ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
- duration, roc->type);
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp == roc)
+ continue;
+ if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
+ break;
+ max_dur = max(tmp->duration, max_dur);
+ min_dur = min(tmp->duration, min_dur);
+ type = max(tmp->type, type);
+ }
- roc->started = true;
+ if (local->ops->remain_on_channel) {
+ int ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
+ max_dur, type);
if (ret) {
wiphy_warn(local->hw.wiphy,
@@ -290,74 +351,24 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
* queue the work struct again to avoid recursion
* when multiple failures occur
*/
- ieee80211_remain_on_channel_expired(&local->hw);
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp->sdata != roc->sdata ||
+ tmp->chan != roc->chan)
+ break;
+ tmp->started = true;
+ tmp->abort = true;
+ }
+ ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+ return;
}
- } else {
- /* delay it a bit */
- ieee80211_queue_delayed_work(&local->hw, &roc->work,
- round_jiffies_relative(HZ/2));
- }
-}
-
-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)
-{
- struct ieee80211_roc_work *dep, *tmp;
-
- if (WARN_ON(roc->to_be_freed))
- return;
-
- /* was never transmitted */
- if (roc->frame) {
- cfg80211_mgmt_tx_status(&roc->sdata->wdev,
- (unsigned long)roc->frame,
- roc->frame->data, roc->frame->len,
- false, GFP_KERNEL);
- kfree_skb(roc->frame);
- }
-
- if (!roc->mgmt_tx_cookie)
- cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
- roc->cookie, roc->chan,
- GFP_KERNEL);
-
- list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
- ieee80211_roc_notify_destroy(dep, true);
-
- if (free)
- kfree(roc);
- else
- roc->to_be_freed = true;
-}
-
-void ieee80211_sw_roc_work(struct work_struct *work)
-{
- struct ieee80211_roc_work *roc =
- container_of(work, struct ieee80211_roc_work, work.work);
- struct ieee80211_sub_if_data *sdata = roc->sdata;
- struct ieee80211_local *local = sdata->local;
- bool started, on_channel;
-
- mutex_lock(&local->mtx);
-
- if (roc->to_be_freed)
- goto out_unlock;
-
- if (roc->abort)
- goto finish;
-
- if (WARN_ON(list_empty(&local->roc_list)))
- goto out_unlock;
-
- if (WARN_ON(roc != list_first_entry(&local->roc_list,
- struct ieee80211_roc_work,
- list)))
- goto out_unlock;
-
- if (!roc->started) {
- struct ieee80211_roc_work *dep;
-
- WARN_ON(local->use_chanctx);
+ /* we'll notify about the start once the HW calls back */
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
+ break;
+ tmp->started = true;
+ }
+ } else {
/* If actually operating on the desired channel (with at least
* 20 MHz channel width) don't stop all the operations but still
* treat it as though the ROC operation started properly, so
@@ -377,27 +388,72 @@ void ieee80211_sw_roc_work(struct work_struct *work)
ieee80211_hw_config(local, 0);
}
- /* tell userspace or send frame */
- ieee80211_handle_roc_started(roc);
- list_for_each_entry(dep, &roc->dependents, list)
- ieee80211_handle_roc_started(dep);
+ ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+ msecs_to_jiffies(min_dur));
- /* if it was pure TX, just finish right away */
- if (!roc->duration)
- goto finish;
+ /* tell userspace or send frame(s) */
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
+ break;
- roc->started = true;
- ieee80211_queue_delayed_work(&local->hw, &roc->work,
- msecs_to_jiffies(roc->duration));
+ tmp->on_channel = roc->on_channel;
+ ieee80211_handle_roc_started(tmp, jiffies);
+ }
+ }
+}
+
+void ieee80211_start_next_roc(struct ieee80211_local *local)
+{
+ struct ieee80211_roc_work *roc;
+
+ lockdep_assert_held(&local->mtx);
+
+ if (list_empty(&local->roc_list)) {
+ ieee80211_run_deferred_scan(local);
+ return;
+ }
+
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
+
+ if (WARN_ON_ONCE(roc->started))
+ return;
+
+ if (local->ops->remain_on_channel) {
+ _ieee80211_start_next_roc(local);
+ } else {
+ /* delay it a bit */
+ ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+ round_jiffies_relative(HZ/2));
+ }
+}
+
+static void __ieee80211_roc_work(struct ieee80211_local *local)
+{
+ struct ieee80211_roc_work *roc;
+ bool on_channel;
+
+ lockdep_assert_held(&local->mtx);
+
+ if (WARN_ON(local->ops->remain_on_channel))
+ return;
+
+ roc = list_first_entry_or_null(&local->roc_list,
+ struct ieee80211_roc_work, list);
+ if (!roc)
+ return;
+
+ if (!roc->started) {
+ WARN_ON(local->use_chanctx);
+ _ieee80211_start_next_roc(local);
} else {
- /* finish this ROC */
- finish:
- list_del(&roc->list);
- started = roc->started;
on_channel = roc->on_channel;
- ieee80211_roc_notify_destroy(roc, !roc->abort);
+ if (ieee80211_recalc_sw_work(local, jiffies))
+ return;
+
+ /* careful - roc pointer became invalid during recalc */
- if (started && !on_channel) {
+ if (!on_channel) {
ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
@@ -407,14 +463,17 @@ void ieee80211_sw_roc_work(struct work_struct *work)
}
ieee80211_recalc_idle(local);
-
- if (started)
- ieee80211_start_next_roc(local);
- else if (list_empty(&local->roc_list))
- ieee80211_run_deferred_scan(local);
+ ieee80211_start_next_roc(local);
}
+}
- out_unlock:
+static void ieee80211_roc_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, roc_work.work);
+
+ mutex_lock(&local->mtx);
+ __ieee80211_roc_work(local);
mutex_unlock(&local->mtx);
}
@@ -422,27 +481,14 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, hw_roc_done);
- struct ieee80211_roc_work *roc;
mutex_lock(&local->mtx);
- if (list_empty(&local->roc_list))
- goto out_unlock;
-
- roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
- list);
-
- if (!roc->started)
- goto out_unlock;
-
- list_del(&roc->list);
-
- ieee80211_roc_notify_destroy(roc, true);
+ ieee80211_end_finished_rocs(local, jiffies);
/* if there's another roc, start it now */
ieee80211_start_next_roc(local);
- out_unlock:
mutex_unlock(&local->mtx);
}
@@ -456,47 +502,500 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
-void ieee80211_roc_setup(struct ieee80211_local *local)
+static bool
+ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
+ struct ieee80211_roc_work *new_roc,
+ struct ieee80211_roc_work *cur_roc)
{
- INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
- INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
- INIT_LIST_HEAD(&local->roc_list);
+ unsigned long now = jiffies;
+ unsigned long remaining;
+
+ if (WARN_ON(!cur_roc->started))
+ return false;
+
+ /* if it was scheduled in the hardware, but not started yet,
+ * we can only combine if the older one had a longer duration
+ */
+ if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration)
+ return false;
+
+ remaining = cur_roc->start_time +
+ msecs_to_jiffies(cur_roc->duration) -
+ now;
+
+ /* if it doesn't fit entirely, schedule a new one */
+ if (new_roc->duration > jiffies_to_msecs(remaining))
+ return false;
+
+ /* add just after the current one so we combine their finish later */
+ list_add(&new_roc->list, &cur_roc->list);
+
+ /* if the existing one has already begun then let this one also
+ * begin, otherwise they'll both be marked properly by the work
+ * struct that runs once the driver notifies us of the beginning
+ */
+ if (cur_roc->hw_begun)
+ ieee80211_handle_roc_started(new_roc, now);
+
+ return true;
}
-void ieee80211_roc_purge(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie,
+ struct sk_buff *txskb,
+ enum ieee80211_roc_type type)
{
struct ieee80211_roc_work *roc, *tmp;
- LIST_HEAD(tmp_list);
+ bool queued = false, combine_started = true;
+ int ret;
+
+ lockdep_assert_held(&local->mtx);
+
+ if (local->use_chanctx && !local->ops->remain_on_channel)
+ return -EOPNOTSUPP;
+
+ roc = kzalloc(sizeof(*roc), GFP_KERNEL);
+ if (!roc)
+ return -ENOMEM;
+
+ /*
+ * If the duration is zero, then the driver
+ * wouldn't actually do anything. Set it to
+ * 10 for now.
+ *
+ * TODO: cancel the off-channel operation
+ * when we get the SKB's TX status and
+ * the wait time was zero before.
+ */
+ if (!duration)
+ duration = 10;
+
+ roc->chan = channel;
+ roc->duration = duration;
+ roc->req_duration = duration;
+ roc->frame = txskb;
+ roc->type = type;
+ roc->sdata = sdata;
+
+ /*
+ * cookie is either the roc cookie (for normal roc)
+ * or the SKB (for mgmt TX)
+ */
+ if (!txskb) {
+ roc->cookie = ieee80211_mgmt_tx_cookie(local);
+ *cookie = roc->cookie;
+ } else {
+ roc->mgmt_tx_cookie = *cookie;
+ }
+
+ /* if there's no need to queue, handle it immediately */
+ if (list_empty(&local->roc_list) &&
+ !local->scanning && !ieee80211_is_radar_required(local)) {
+ /* if not HW assist, just queue & schedule work */
+ if (!local->ops->remain_on_channel) {
+ list_add_tail(&roc->list, &local->roc_list);
+ ieee80211_queue_delayed_work(&local->hw,
+ &local->roc_work, 0);
+ } else {
+ /* otherwise actually kick it off here
+ * (for error handling)
+ */
+ ret = drv_remain_on_channel(local, sdata, channel,
+ duration, type);
+ if (ret) {
+ kfree(roc);
+ return ret;
+ }
+ roc->started = true;
+ list_add_tail(&roc->list, &local->roc_list);
+ }
+
+ return 0;
+ }
+
+ /* otherwise handle queueing */
+
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp->chan != channel || tmp->sdata != sdata)
+ continue;
+
+ /*
+ * Extend this ROC if possible: If it hasn't started, add
+ * just after the new one to combine.
+ */
+ if (!tmp->started) {
+ list_add(&roc->list, &tmp->list);
+ queued = true;
+ break;
+ }
+
+ if (!combine_started)
+ continue;
+
+ if (!local->ops->remain_on_channel) {
+ /* If there's no hardware remain-on-channel, and
+ * doing so won't push us over the maximum r-o-c
+ * we allow, then we can just add the new one to
+ * the list and mark it as having started now.
+ * If it would push over the limit, don't try to
+ * combine with other started ones (that haven't
+ * been running as long) but potentially sort it
+ * with others that had the same fate.
+ */
+ unsigned long now = jiffies;
+ u32 elapsed = jiffies_to_msecs(now - tmp->start_time);
+ struct wiphy *wiphy = local->hw.wiphy;
+ u32 max_roc = wiphy->max_remain_on_channel_duration;
+
+ if (elapsed + roc->duration > max_roc) {
+ combine_started = false;
+ continue;
+ }
+
+ list_add(&roc->list, &tmp->list);
+ queued = true;
+ roc->on_channel = tmp->on_channel;
+ ieee80211_handle_roc_started(roc, now);
+ break;
+ }
+
+ queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp);
+ if (queued)
+ break;
+ /* if it wasn't queued, perhaps it can be combined with
+ * another that also couldn't get combined previously,
+ * but no need to check for already started ones, since
+ * that can't work.
+ */
+ combine_started = false;
+ }
+
+ if (!queued)
+ list_add_tail(&roc->list, &local->roc_list);
+
+ return 0;
+}
+
+int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration, u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_local *local = sdata->local;
+ int ret;
+
+ mutex_lock(&local->mtx);
+ ret = ieee80211_start_roc_work(local, sdata, chan,
+ duration, cookie, NULL,
+ IEEE80211_ROC_TYPE_NORMAL);
+ mutex_unlock(&local->mtx);
+
+ return ret;
+}
+
+static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ u64 cookie, bool mgmt_tx)
+{
+ struct ieee80211_roc_work *roc, *tmp, *found = NULL;
+ int ret;
+
+ if (!cookie)
+ return -ENOENT;
mutex_lock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
- if (sdata && roc->sdata != sdata)
+ if (!mgmt_tx && roc->cookie != cookie)
continue;
+ else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
+ continue;
+
+ found = roc;
+ break;
+ }
+
+ if (!found) {
+ mutex_unlock(&local->mtx);
+ return -ENOENT;
+ }
+
+ if (!found->started) {
+ ieee80211_roc_notify_destroy(found);
+ goto out_unlock;
+ }
- if (roc->started && local->ops->remain_on_channel) {
- /* can race, so ignore return value */
- drv_cancel_remain_on_channel(local);
+ if (local->ops->remain_on_channel) {
+ ret = drv_cancel_remain_on_channel(local);
+ if (WARN_ON_ONCE(ret)) {
+ mutex_unlock(&local->mtx);
+ return ret;
+ }
+
+ /* TODO:
+ * if multiple items were combined here then we really shouldn't
+ * cancel them all - we should wait for as much time as needed
+ * for the longest remaining one, and only then cancel ...
+ */
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ if (!roc->started)
+ break;
+ if (roc == found)
+ found = NULL;
+ ieee80211_roc_notify_destroy(roc);
}
- list_move_tail(&roc->list, &tmp_list);
- roc->abort = true;
+ /* that really must not happen - it was started */
+ WARN_ON(found);
+
+ ieee80211_start_next_roc(local);
+ } else {
+ /* go through work struct to return to the operating channel */
+ found->abort = true;
+ mod_delayed_work(local->workqueue, &local->roc_work, 0);
}
+
+ out_unlock:
mutex_unlock(&local->mtx);
- list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
- if (local->ops->remain_on_channel) {
- list_del(&roc->list);
- ieee80211_roc_notify_destroy(roc, true);
+ return 0;
+}
+
+int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev, u64 cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_local *local = sdata->local;
+
+ return ieee80211_cancel_roc(local, cookie, false);
+}
+
+int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct sta_info *sta;
+ const struct ieee80211_mgmt *mgmt = (void *)params->buf;
+ bool need_offchan = false;
+ u32 flags;
+ int ret;
+ u8 *data;
+
+ if (params->dont_wait_for_ack)
+ flags = IEEE80211_TX_CTL_NO_ACK;
+ else
+ flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ if (params->no_cck)
+ flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_ADHOC:
+ if (!sdata->vif.bss_conf.ibss_joined)
+ need_offchan = true;
+ /* fall through */
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ if (ieee80211_vif_is_mesh(&sdata->vif) &&
+ !sdata->u.mesh.mesh_id_len)
+ need_offchan = true;
+ /* fall through */
+#endif
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ !ieee80211_vif_is_mesh(&sdata->vif) &&
+ !rcu_access_pointer(sdata->bss->beacon))
+ need_offchan = true;
+ if (!ieee80211_is_action(mgmt->frame_control) ||
+ mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
+ mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
+ mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
+ break;
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->da);
+ rcu_read_unlock();
+ if (!sta)
+ return -ENOLINK;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ sdata_lock(sdata);
+ if (!sdata->u.mgd.associated ||
+ (params->offchan && params->wait &&
+ local->ops->remain_on_channel &&
+ memcmp(sdata->u.mgd.associated->bssid,
+ mgmt->bssid, ETH_ALEN)))
+ need_offchan = true;
+ sdata_unlock(sdata);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ need_offchan = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* configurations requiring offchan cannot work if no channel has been
+ * specified
+ */
+ if (need_offchan && !params->chan)
+ return -EINVAL;
+
+ mutex_lock(&local->mtx);
+
+ /* Check if the operating channel is the requested channel */
+ if (!need_offchan) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+
+ if (chanctx_conf) {
+ need_offchan = params->chan &&
+ (params->chan !=
+ chanctx_conf->def.chan);
+ } else if (!params->chan) {
+ ret = -EINVAL;
+ rcu_read_unlock();
+ goto out_unlock;
} else {
- ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
+ need_offchan = true;
+ }
+ rcu_read_unlock();
+ }
+
+ if (need_offchan && !params->offchan) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ data = skb_put(skb, params->len);
+ memcpy(data, params->buf, params->len);
+
+ /* Update CSA counters */
+ if (sdata->vif.csa_active &&
+ (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
+ params->n_csa_offsets) {
+ int i;
+ struct beacon_data *beacon = NULL;
+
+ rcu_read_lock();
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ beacon = rcu_dereference(sdata->u.ap.beacon);
+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ beacon = rcu_dereference(sdata->u.ibss.presp);
+ else if (ieee80211_vif_is_mesh(&sdata->vif))
+ beacon = rcu_dereference(sdata->u.mesh.beacon);
+
+ if (beacon)
+ for (i = 0; i < params->n_csa_offsets; i++)
+ data[params->csa_offsets[i]] =
+ beacon->csa_current_counter;
+
+ rcu_read_unlock();
+ }
- /* work will clean up etc */
- flush_delayed_work(&roc->work);
- WARN_ON(!roc->to_be_freed);
- kfree(roc);
+ IEEE80211_SKB_CB(skb)->flags = flags;
+
+ skb->dev = sdata->dev;
+
+ if (!params->dont_wait_for_ack) {
+ /* make a copy to preserve the frame contents
+ * in case of encryption.
+ */
+ ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
+ if (ret) {
+ kfree_skb(skb);
+ goto out_unlock;
}
+ } else {
+ /* Assign a dummy non-zero cookie, it's not sent to
+ * userspace in this case but we rely on its value
+ * internally in the need_offchan case to distinguish
+ * mgmt-tx from remain-on-channel.
+ */
+ *cookie = 0xffffffff;
}
- WARN_ON_ONCE(!list_empty(&tmp_list));
+ if (!need_offchan) {
+ ieee80211_tx_skb(sdata, skb);
+ ret = 0;
+ goto out_unlock;
+ }
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
+ IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
+ if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
+ IEEE80211_SKB_CB(skb)->hw_queue =
+ local->hw.offchannel_tx_hw_queue;
+
+ /* This will handle all kinds of coalescing and immediate TX */
+ ret = ieee80211_start_roc_work(local, sdata, params->chan,
+ params->wait, cookie, skb,
+ IEEE80211_ROC_TYPE_MGMT_TX);
+ if (ret)
+ ieee80211_free_txskb(&local->hw, skb);
+ out_unlock:
+ mutex_unlock(&local->mtx);
+ return ret;
+}
+
+int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ struct wireless_dev *wdev, u64 cookie)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ return ieee80211_cancel_roc(local, cookie, true);
+}
+
+void ieee80211_roc_setup(struct ieee80211_local *local)
+{
+ INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+ INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+ INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
+ INIT_LIST_HEAD(&local->roc_list);
+}
+
+void ieee80211_roc_purge(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_roc_work *roc, *tmp;
+ bool work_to_do = false;
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ if (sdata && roc->sdata != sdata)
+ continue;
+
+ if (roc->started) {
+ if (local->ops->remain_on_channel) {
+ /* can race, so ignore return value */
+ drv_cancel_remain_on_channel(local);
+ ieee80211_roc_notify_destroy(roc);
+ } else {
+ roc->abort = true;
+ work_to_do = true;
+ }
+ } else {
+ ieee80211_roc_notify_destroy(roc);
+ }
+ }
+ if (work_to_do)
+ __ieee80211_roc_work(local);
+ mutex_unlock(&local->mtx);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8bae5de0dc44..1f827539d828 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -661,8 +661,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
{
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- WARN_ONCE((unsigned long)rx->skb->data & 1,
- "unaligned packet at 0x%p\n", rx->skb->data);
+ WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
#endif
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4aeca4b0c3cb..a413e52f7691 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
/* We need to ensure power level is at max for scanning. */
ieee80211_hw_config(local, 0);
- if ((req->channels[0]->flags &
- IEEE80211_CHAN_NO_IR) ||
+ if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
+ IEEE80211_CHAN_RADAR)) ||
!req->n_ssids) {
next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
} else {
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
* TODO: channel switching also consumes quite some time,
* add that delay as well to get a better estimation
*/
- if (chan->flags & IEEE80211_CHAN_NO_IR)
+ if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
return IEEE80211_PASSIVE_CHANNEL_TIME;
return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
}
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
*
* In any case, it is not necessary for a passive scan.
*/
- if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) {
+ if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
+ !scan_req->n_ssids) {
*next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
return;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f91d1873218c..4402ad5b27d1 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2,6 +2,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -435,6 +436,19 @@ static int sta_info_insert_check(struct sta_info *sta)
is_multicast_ether_addr(sta->sta.addr)))
return -EINVAL;
+ /* Strictly speaking this isn't necessary as we hold the mutex, but
+ * the rhashtable code can't really deal with that distinction. We
+ * do require the mutex for correctness though.
+ */
+ rcu_read_lock();
+ lockdep_assert_held(&sdata->local->sta_mtx);
+ if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) &&
+ ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) {
+ rcu_read_unlock();
+ return -ENOTUNIQ;
+ }
+ rcu_read_unlock();
+
return 0;
}
@@ -554,14 +568,15 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
might_sleep();
+ mutex_lock(&local->sta_mtx);
+
err = sta_info_insert_check(sta);
if (err) {
+ mutex_unlock(&local->sta_mtx);
rcu_read_lock();
goto out_free;
}
- mutex_lock(&local->sta_mtx);
-
err = sta_info_insert_finish(sta);
if (err)
goto out_free;
@@ -868,6 +883,7 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
}
list_del_rcu(&sta->list);
+ sta->removed = true;
drv_sta_pre_rcu_remove(local, sta->sdata, sta);
@@ -1230,11 +1246,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
ieee80211_check_fast_xmit(sta);
}
-static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, int tid,
+static void ieee80211_send_null_response(struct sta_info *sta, int tid,
enum ieee80211_frame_release_type reason,
- bool call_driver)
+ bool call_driver, bool more_data)
{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_qos_hdr *nullfunc;
struct sk_buff *skb;
@@ -1274,9 +1290,13 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
if (qos) {
nullfunc->qos_ctrl = cpu_to_le16(tid);
- if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD) {
nullfunc->qos_ctrl |=
cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
+ if (more_data)
+ nullfunc->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
}
info = IEEE80211_SKB_CB(skb);
@@ -1323,22 +1343,48 @@ static int find_highest_prio_tid(unsigned long tids)
return fls(tids) - 1;
}
+/* Indicates if the MORE_DATA bit should be set in the last
+ * frame obtained by ieee80211_sta_ps_get_frames.
+ * Note that driver_release_tids is relevant only if
+ * reason = IEEE80211_FRAME_RELEASE_PSPOLL
+ */
+static bool
+ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs,
+ enum ieee80211_frame_release_type reason,
+ unsigned long driver_release_tids)
+{
+ int ac;
+
+ /* If the driver has data on more than one TID then
+ * certainly there's more data if we release just a
+ * single frame now (from a single TID). This will
+ * only happen for PS-Poll.
+ */
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
+ hweight16(driver_release_tids) > 1)
+ return true;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (ignored_acs & BIT(ac))
+ continue;
+
+ if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac]))
+ return true;
+ }
+
+ return false;
+}
+
static void
-ieee80211_sta_ps_deliver_response(struct sta_info *sta,
- int n_frames, u8 ignored_acs,
- enum ieee80211_frame_release_type reason)
+ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs,
+ enum ieee80211_frame_release_type reason,
+ struct sk_buff_head *frames,
+ unsigned long *driver_release_tids)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- bool more_data = false;
int ac;
- unsigned long driver_release_tids = 0;
- struct sk_buff_head frames;
-
- /* Service or PS-Poll period starts */
- set_sta_flag(sta, WLAN_STA_SP);
-
- __skb_queue_head_init(&frames);
/* Get response frame(s) and more data bit for the last one. */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
@@ -1352,26 +1398,13 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
/* if we already have frames from software, then we can't also
* release from hardware queues
*/
- if (skb_queue_empty(&frames)) {
- driver_release_tids |= sta->driver_buffered_tids & tids;
- driver_release_tids |= sta->txq_buffered_tids & tids;
+ if (skb_queue_empty(frames)) {
+ *driver_release_tids |=
+ sta->driver_buffered_tids & tids;
+ *driver_release_tids |= sta->txq_buffered_tids & tids;
}
- if (driver_release_tids) {
- /* If the driver has data on more than one TID then
- * certainly there's more data if we release just a
- * single frame now (from a single TID). This will
- * only happen for PS-Poll.
- */
- if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
- hweight16(driver_release_tids) > 1) {
- more_data = true;
- driver_release_tids =
- BIT(find_highest_prio_tid(
- driver_release_tids));
- break;
- }
- } else {
+ if (!*driver_release_tids) {
struct sk_buff *skb;
while (n_frames > 0) {
@@ -1385,20 +1418,44 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
if (!skb)
break;
n_frames--;
- __skb_queue_tail(&frames, skb);
+ __skb_queue_tail(frames, skb);
}
}
- /* If we have more frames buffered on this AC, then set the
- * more-data bit and abort the loop since we can't send more
- * data from other ACs before the buffered frames from this.
+ /* If we have more frames buffered on this AC, then abort the
+ * loop since we can't send more data from other ACs before
+ * the buffered frames from this.
*/
if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
- !skb_queue_empty(&sta->ps_tx_buf[ac])) {
- more_data = true;
+ !skb_queue_empty(&sta->ps_tx_buf[ac]))
break;
- }
}
+}
+
+static void
+ieee80211_sta_ps_deliver_response(struct sta_info *sta,
+ int n_frames, u8 ignored_acs,
+ enum ieee80211_frame_release_type reason)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ unsigned long driver_release_tids = 0;
+ struct sk_buff_head frames;
+ bool more_data;
+
+ /* Service or PS-Poll period starts */
+ set_sta_flag(sta, WLAN_STA_SP);
+
+ __skb_queue_head_init(&frames);
+
+ ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason,
+ &frames, &driver_release_tids);
+
+ more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids);
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL)
+ driver_release_tids =
+ BIT(find_highest_prio_tid(driver_release_tids));
if (skb_queue_empty(&frames) && !driver_release_tids) {
int tid;
@@ -1421,7 +1478,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
/* This will evaluate to 1, 3, 5 or 7. */
tid = 7 - ((ffs(~ignored_acs) - 1) << 1);
- ieee80211_send_null_response(sdata, sta, tid, reason, true);
+ ieee80211_send_null_response(sta, tid, reason, true, false);
} else if (!driver_release_tids) {
struct sk_buff_head pending;
struct sk_buff *skb;
@@ -1521,8 +1578,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
if (need_null)
ieee80211_send_null_response(
- sdata, sta, find_highest_prio_tid(tids),
- reason, false);
+ sta, find_highest_prio_tid(tids),
+ reason, false, false);
sta_info_recalc_tim(sta);
} else {
@@ -1660,6 +1717,22 @@ void ieee80211_sta_eosp(struct ieee80211_sta *pubsta)
}
EXPORT_SYMBOL(ieee80211_sta_eosp);
+void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ enum ieee80211_frame_release_type reason;
+ bool more_data;
+
+ trace_api_send_eosp_nullfunc(sta->local, pubsta, tid);
+
+ reason = IEEE80211_FRAME_RELEASE_UAPSD;
+ more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues,
+ reason, 0);
+
+ ieee80211_send_null_response(sta, tid, reason, false, more_data);
+}
+EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc);
+
void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
u8 tid, bool buffered)
{
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 2cafb21b422f..d6051629ed15 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -367,6 +367,7 @@ DECLARE_EWMA(signal, 1024, 8)
* @mesh: mesh STA information
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
+ * @removed: set to true when sta is being removed from sta_list
* @uploaded: set to true when sta is uploaded to the driver
* @sta: station information we share with the driver
* @sta_state: duplicates information about station state (for debug)
@@ -412,6 +413,7 @@ struct sta_info {
u16 listen_interval;
bool dead;
+ bool removed;
bool uploaded;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 56c6d6cfa5a1..a6b4442776a0 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2027,6 +2027,31 @@ TRACE_EVENT(api_eosp,
)
);
+TRACE_EVENT(api_send_eosp_nullfunc,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u8 tid),
+
+ TP_ARGS(local, sta, tid),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(u8, tid)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " tid:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->tid
+ )
+);
+
TRACE_EVENT(api_sta_set_buffered,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sta *sta,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index bdc224d5053a..3311ce0f3d6c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1431,7 +1431,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
- dev_kfree_skb(skb);
+ ieee80211_purge_tx_queue(&local->hw, skbs);
return true;
} else
vif = NULL;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 74058020b7d6..08af2b307945 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -288,10 +288,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
if (!test_bit(reason, &local->queue_stop_reasons[queue]))
return;
- if (!refcounted)
+ if (!refcounted) {
local->q_stop_reasons[queue][reason] = 0;
- else
+ } else {
local->q_stop_reasons[queue][reason]--;
+ if (WARN_ON(local->q_stop_reasons[queue][reason] < 0))
+ local->q_stop_reasons[queue][reason] = 0;
+ }
if (local->q_stop_reasons[queue][reason] == 0)
__clear_bit(reason, &local->queue_stop_reasons[queue]);
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 42e96729dae6..446e1300383e 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -217,8 +217,7 @@ __ieee802154_rx_handle_packet(struct ieee802154_local *local,
break;
}
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index c70d750148b6..4b3b9b310c3a 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -96,22 +96,15 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
}
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
-static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
- struct sk_buff *skb, bool bos)
+static u32 mpls_multipath_hash(struct mpls_route *rt,
+ struct sk_buff *skb, bool bos)
{
struct mpls_entry_decoded dec;
struct mpls_shim_hdr *hdr;
bool eli_seen = false;
int label_index;
- int nh_index = 0;
u32 hash = 0;
- /* No need to look further into packet if there's only
- * one path
- */
- if (rt->rt_nhn == 1)
- goto out;
-
for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
label_index++) {
if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
@@ -165,7 +158,38 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
}
}
- nh_index = hash % rt->rt_nhn;
+ return hash;
+}
+
+static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+ struct sk_buff *skb, bool bos)
+{
+ int alive = ACCESS_ONCE(rt->rt_nhn_alive);
+ u32 hash = 0;
+ int nh_index = 0;
+ int n = 0;
+
+ /* No need to look further into packet if there's only
+ * one path
+ */
+ if (rt->rt_nhn == 1)
+ goto out;
+
+ if (alive <= 0)
+ return NULL;
+
+ hash = mpls_multipath_hash(rt, skb, bos);
+ nh_index = hash % alive;
+ if (alive == rt->rt_nhn)
+ goto out;
+ for_nexthops(rt) {
+ if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ continue;
+ if (n == nh_index)
+ return nh;
+ n++;
+ } endfor_nexthops(rt);
+
out:
return &rt->rt_nh[nh_index];
}
@@ -365,6 +389,7 @@ static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
GFP_KERNEL);
if (rt) {
rt->rt_nhn = num_nh;
+ rt->rt_nhn_alive = num_nh;
rt->rt_max_alen = max_alen_aligned;
}
@@ -536,6 +561,16 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
RCU_INIT_POINTER(nh->nh_dev, dev);
+ if (!(dev->flags & IFF_UP)) {
+ nh->nh_flags |= RTNH_F_DEAD;
+ } else {
+ unsigned int flags;
+
+ flags = dev_get_flags(dev);
+ if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
+ nh->nh_flags |= RTNH_F_LINKDOWN;
+ }
+
return 0;
errout:
@@ -570,6 +605,9 @@ static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
if (err)
goto errout;
+ if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ rt->rt_nhn_alive--;
+
return 0;
errout:
@@ -577,8 +615,8 @@ errout:
}
static int mpls_nh_build(struct net *net, struct mpls_route *rt,
- struct mpls_nh *nh, int oif,
- struct nlattr *via, struct nlattr *newdst)
+ struct mpls_nh *nh, int oif, struct nlattr *via,
+ struct nlattr *newdst)
{
int err = -ENOMEM;
@@ -681,11 +719,13 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
goto errout;
err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
- rtnh->rtnh_ifindex, nla_via,
- nla_newdst);
+ rtnh->rtnh_ifindex, nla_via, nla_newdst);
if (err)
goto errout;
+ if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ rt->rt_nhn_alive--;
+
rtnh = rtnh_next(rtnh, &remaining);
nhs++;
} endfor_nexthops(rt);
@@ -875,34 +915,74 @@ free:
return ERR_PTR(err);
}
-static void mpls_ifdown(struct net_device *dev)
+static void mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
- struct mpls_dev *mdev;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
for (index = 0; index < net->mpls.platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+
if (!rt)
continue;
- for_nexthops(rt) {
+
+ change_nexthops(rt) {
if (rtnl_dereference(nh->nh_dev) != dev)
continue;
- nh->nh_dev = NULL;
+ switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ nh->nh_flags |= RTNH_F_DEAD;
+ /* fall through */
+ case NETDEV_CHANGE:
+ nh->nh_flags |= RTNH_F_LINKDOWN;
+ ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+ break;
+ }
+ if (event == NETDEV_UNREGISTER)
+ RCU_INIT_POINTER(nh->nh_dev, NULL);
} endfor_nexthops(rt);
}
- mdev = mpls_dev_get(dev);
- if (!mdev)
- return;
- mpls_dev_sysctl_unregister(mdev);
+ return;
+}
+
+static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
+{
+ struct mpls_route __rcu **platform_label;
+ struct net *net = dev_net(dev);
+ unsigned index;
+ int alive;
+
+ platform_label = rtnl_dereference(net->mpls.platform_label);
+ for (index = 0; index < net->mpls.platform_labels; index++) {
+ struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+
+ if (!rt)
+ continue;
+
+ alive = 0;
+ change_nexthops(rt) {
+ struct net_device *nh_dev =
+ rtnl_dereference(nh->nh_dev);
+
+ if (!(nh->nh_flags & nh_flags)) {
+ alive++;
+ continue;
+ }
+ if (nh_dev != dev)
+ continue;
+ alive++;
+ nh->nh_flags &= ~nh_flags;
+ } endfor_nexthops(rt);
- RCU_INIT_POINTER(dev->mpls_ptr, NULL);
+ ACCESS_ONCE(rt->rt_nhn_alive) = alive;
+ }
- kfree_rcu(mdev, rcu);
+ return;
}
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
@@ -910,9 +990,9 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mpls_dev *mdev;
+ unsigned int flags;
- switch(event) {
- case NETDEV_REGISTER:
+ if (event == NETDEV_REGISTER) {
/* For now just support ethernet devices */
if ((dev->type == ARPHRD_ETHER) ||
(dev->type == ARPHRD_LOOPBACK)) {
@@ -920,10 +1000,39 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
if (IS_ERR(mdev))
return notifier_from_errno(PTR_ERR(mdev));
}
- break;
+ return NOTIFY_OK;
+ }
+ mdev = mpls_dev_get(dev);
+ if (!mdev)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ mpls_ifdown(dev, event);
+ break;
+ case NETDEV_UP:
+ flags = dev_get_flags(dev);
+ if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+ mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
+ else
+ mpls_ifup(dev, RTNH_F_DEAD);
+ break;
+ case NETDEV_CHANGE:
+ flags = dev_get_flags(dev);
+ if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+ mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
+ else
+ mpls_ifdown(dev, event);
+ break;
case NETDEV_UNREGISTER:
- mpls_ifdown(dev);
+ mpls_ifdown(dev, event);
+ mdev = mpls_dev_get(dev);
+ if (mdev) {
+ mpls_dev_sysctl_unregister(mdev);
+ RCU_INIT_POINTER(dev->mpls_ptr, NULL);
+ kfree_rcu(mdev, rcu);
+ }
break;
case NETDEV_CHANGENAME:
mdev = mpls_dev_get(dev);
@@ -1237,9 +1346,15 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
dev = rtnl_dereference(nh->nh_dev);
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
+ if (nh->nh_flags & RTNH_F_LINKDOWN)
+ rtm->rtm_flags |= RTNH_F_LINKDOWN;
+ if (nh->nh_flags & RTNH_F_DEAD)
+ rtm->rtm_flags |= RTNH_F_DEAD;
} else {
struct rtnexthop *rtnh;
struct nlattr *mp;
+ int dead = 0;
+ int linkdown = 0;
mp = nla_nest_start(skb, RTA_MULTIPATH);
if (!mp)
@@ -1253,6 +1368,15 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
dev = rtnl_dereference(nh->nh_dev);
if (dev)
rtnh->rtnh_ifindex = dev->ifindex;
+ if (nh->nh_flags & RTNH_F_LINKDOWN) {
+ rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
+ linkdown++;
+ }
+ if (nh->nh_flags & RTNH_F_DEAD) {
+ rtnh->rtnh_flags |= RTNH_F_DEAD;
+ dead++;
+ }
+
if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
nh->nh_labels,
nh->nh_label))
@@ -1266,6 +1390,11 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
} endfor_nexthops(rt);
+ if (linkdown == rt->rt_nhn)
+ rtm->rtm_flags |= RTNH_F_LINKDOWN;
+ if (dead == rt->rt_nhn)
+ rtm->rtm_flags |= RTNH_F_DEAD;
+
nla_nest_end(skb, mp);
}
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index bde52ce88c94..732a5c17e986 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -41,6 +41,7 @@ enum mpls_payload_type {
struct mpls_nh { /* next hop label forwarding entry */
struct net_device __rcu *nh_dev;
+ unsigned int nh_flags;
u32 nh_label[MAX_NEW_LABELS];
u8 nh_labels;
u8 nh_via_alen;
@@ -74,6 +75,7 @@ struct mpls_route { /* next hop label forwarding entry */
u8 rt_payload_type;
u8 rt_max_alen;
unsigned int rt_nhn;
+ unsigned int rt_nhn_alive;
struct mpls_nh rt_nh[0];
};
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 67591aef9cae..cdd01e6416db 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -37,7 +37,7 @@ static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
return en->labels * sizeof(struct mpls_shim_hdr);
}
-int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct mpls_iptunnel_encap *tun_encap_info;
struct mpls_shim_hdr *hdr;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 85a465b773e5..5bcd1b0cc2ec 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -176,7 +176,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
goto err;
- *dest = sk->sk_classid;
+ *dest = sock_cgroup_classid(&sk->sk_cgrp_data);
break;
#endif
default:
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index a1d126f29463..54eaeb45ce99 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -42,7 +42,8 @@ cgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (skb->sk == NULL || !sk_fullsock(skb->sk))
return false;
- return (info->id == skb->sk->sk_classid) ^ info->invert;
+ return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
+ info->invert;
}
static struct xt_match cgroup_mt_reg __read_mostly = {
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index b7de0da46acd..ecf0a0196f18 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
pr_debug("mask 0x%x\n", mask);
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index a7a80a6b77b0..653d073bae45 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
- if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
+ if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
continue;
if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index efb736bb6855..e41cd12d9b2d 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
.destroy = ovs_netdev_tunnel_destroy,
.get_options = geneve_get_options,
.send = dev_queue_xmit,
- .owner = THIS_MODULE,
};
static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index c3257d78d3d2..7f8897f33a67 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = {
.create = gre_create,
.send = dev_queue_xmit,
.destroy = ovs_netdev_tunnel_destroy,
- .owner = THIS_MODULE,
};
static int __init ovs_gre_tnl_init(void)
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index b327368a3848..6a6adf314363 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -105,7 +105,7 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name)
rtnl_lock();
err = netdev_master_upper_dev_link(vport->dev,
- get_dpdev(vport->dp));
+ get_dpdev(vport->dp), NULL, NULL);
if (err)
goto error_unlock;
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
ovs_netdev_detach_dev(vport);
- /* Early release so we can unregister the device */
+ /* We can be invoked by both explicit vport deletion and
+ * underlying netdev deregistration; delete the link only
+ * if it's not already shutting down.
+ */
+ if (vport->dev->reg_state == NETREG_REGISTERED)
+ rtnl_delete_link(vport->dev);
dev_put(vport->dev);
- rtnl_delete_link(vport->dev);
vport->dev = NULL;
rtnl_unlock();
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 0ac0fd004d7e..31cbc8c5c7db 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name)
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
}
-int ovs_vport_ops_register(struct vport_ops *ops)
+int __ovs_vport_ops_register(struct vport_ops *ops)
{
int err = -EEXIST;
struct vport_ops *o;
@@ -87,7 +87,7 @@ errout:
ovs_unlock();
return err;
}
-EXPORT_SYMBOL_GPL(ovs_vport_ops_register);
+EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
void ovs_vport_ops_unregister(struct vport_ops *ops)
{
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
*
* @vport: vport to delete.
*
- * Detaches @vport from its datapath and destroys it. It is possible to fail
- * for reasons such as lack of memory. ovs_mutex must be held.
+ * Detaches @vport from its datapath and destroys it. ovs_mutex must
+ * be held.
*/
void ovs_vport_del(struct vport *vport)
{
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bdfd82a7c064..8ea3a96980ac 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport)
return vport->dev->name;
}
-int ovs_vport_ops_register(struct vport_ops *ops);
+int __ovs_vport_ops_register(struct vport_ops *ops);
+#define ovs_vport_ops_register(ops) \
+ ({ \
+ (ops)->owner = THIS_MODULE; \
+ __ovs_vport_ops_register(ops); \
+ })
+
void ovs_vport_ops_unregister(struct vport_ops *ops);
static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1cf928fb573e..992396aa635c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2329,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
static bool ll_header_truncated(const struct net_device *dev, int len)
{
/* net device doesn't like empty head */
- if (unlikely(len <= dev->hard_header_len)) {
- net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
+ if (unlikely(len < dev->hard_header_len)) {
+ net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
current->comm, len, dev->hard_header_len);
return true;
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index d4564036a339..e3b118cae81d 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net,
}
}
- if (trans == NULL) {
- kmem_cache_free(rds_conn_slab, conn);
- conn = ERR_PTR(-ENODEV);
- goto out;
- }
-
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
diff --git a/net/rds/send.c b/net/rds/send.c
index 827155c2ead1..c9cdb358ea88 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
release_sock(sk);
}
- /* racing with another thread binding seems ok here */
+ lock_sock(sk);
if (daddr == 0 || rs->rs_bound_addr == 0) {
+ release_sock(sk);
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
+ release_sock(sk);
if (payload_len > rds_sk_sndbuf(rs)) {
ret = -EMSGSIZE;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 1f8a144a5dc2..7e2d1057d8bc 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -67,7 +67,7 @@ static void rxrpc_write_space(struct sock *sk)
if (rxrpc_writable(sk)) {
struct socket_wq *wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index e0547f521f20..adc555e0323d 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -723,8 +723,10 @@ process_further:
if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
- hard > tx)
+ hard > tx) {
+ call->acks_hard = tx;
goto all_acked;
+ }
smp_rmb();
rxrpc_rotate_tx_window(call, hard - 1);
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index a40d3afe93b7..14c4e12c47b0 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
/* this should be in poll */
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
return -EPIPE;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f43c8f33f09e..7ec667dd4ce1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
}
/* We know handle. Find qdisc among all qdisc's attached to device
- (root qdisc, all its children, children of children etc.)
+ * (root qdisc, all its children, children of children etc.)
+ * Note: caller either uses rtnl or rcu_read_lock()
*/
static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
root->handle == handle)
return root;
- list_for_each_entry(q, &root->list, list) {
+ list_for_each_entry_rcu(q, &root->list, list) {
if (q->handle == handle)
return q;
}
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
struct Qdisc *root = qdisc_dev(q)->qdisc;
WARN_ON_ONCE(root == &noop_qdisc);
- list_add_tail(&q->list, &root->list);
+ ASSERT_RTNL();
+ list_add_tail_rcu(&q->list, &root->list);
}
}
EXPORT_SYMBOL(qdisc_list_add);
void qdisc_list_del(struct Qdisc *q)
{
- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
- list_del(&q->list);
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ ASSERT_RTNL();
+ list_del_rcu(&q->list);
+ }
}
EXPORT_SYMBOL(qdisc_list_del);
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
if (n == 0)
return;
drops = max_t(int, n, 0);
+ rcu_read_lock();
while ((parentid = sch->parent)) {
if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
- return;
+ break;
+ if (sch->flags & TCQ_F_NOPARENT)
+ break;
+ /* TODO: perform the search on a per txq basis */
sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
- WARN_ON(parentid != TC_H_ROOT);
- return;
+ WARN_ON_ONCE(parentid != TC_H_ROOT);
+ break;
}
cops = sch->ops->cl_ops;
if (cops->qlen_notify) {
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
sch->q.qlen -= n;
__qdisc_qstats_drop(sch, drops);
}
+ rcu_read_unlock();
}
EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
}
lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
if (!netif_is_multiqueue(dev))
- sch->flags |= TCQ_F_ONETXQUEUE;
+ sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->handle = handle;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cb5d4ad32946..e82a1ad80aa5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
return;
}
if (!netif_is_multiqueue(dev))
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
dev_queue->qdisc_sleeping = qdisc;
}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index f3cbaecd283a..3e82f047caaf 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
if (qdisc == NULL)
goto err;
priv->qdiscs[ntx] = qdisc;
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
*old = dev_graft_qdisc(dev_queue, new);
if (new)
- new->flags |= TCQ_F_ONETXQUEUE;
+ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 3811a745452c..ad70ecf57ce7 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
goto err;
}
priv->qdiscs[i] = qdisc;
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
/* If the mqprio options indicate that hardware should own
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
*old = dev_graft_qdisc(dev_queue, new);
if (new)
- new->flags |= TCQ_F_ONETXQUEUE;
+ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e917d27328ea..acb45b8c2a9d 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
struct sock *sk = skb->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi6 *fl6 = &transport->fl.u.ip6;
+ int res;
pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
skb->len, &fl6->saddr, &fl6->daddr);
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
- return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+ rcu_read_lock();
+ res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+ rcu_read_unlock();
+ return res;
}
/* Returns the dst cache entry for the given source and destination ip
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
pr_debug("src=%pI6 - ", &fl6->saddr);
}
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
+
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!asoc || saddr)
goto out;
@@ -321,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (baddr) {
fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
- final_p = fl6_update_dst(fl6, np->opt, &final);
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 897c01c029ca..106bb09636f1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
- kaddrs = kmalloc(addrs_size, GFP_KERNEL);
+ kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
if (unlikely(!kaddrs))
return -ENOMEM;
@@ -4928,7 +4928,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
to = optval + offsetof(struct sctp_getaddrs, addrs);
space_left = len - offsetof(struct sctp_getaddrs, addrs);
- addrs = kmalloc(space_left, GFP_KERNEL);
+ addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
@@ -6458,7 +6458,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sctp_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/*
* Since the socket is not locked, the buffer
* might be made available after the writeable check and
@@ -6801,26 +6801,30 @@ no_packet:
static void __sctp_write_space(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
- struct socket *sock = sk->sk_socket;
- if ((sctp_wspace(asoc) > 0) && sock) {
- if (waitqueue_active(&asoc->wait))
- wake_up_interruptible(&asoc->wait);
+ if (sctp_wspace(asoc) <= 0)
+ return;
+
+ if (waitqueue_active(&asoc->wait))
+ wake_up_interruptible(&asoc->wait);
- if (sctp_writeable(sk)) {
- wait_queue_head_t *wq = sk_sleep(sk);
+ if (sctp_writeable(sk)) {
+ struct socket_wq *wq;
- if (wq && waitqueue_active(wq))
- wake_up_interruptible(wq);
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq) {
+ if (waitqueue_active(&wq->wait))
+ wake_up_interruptible(&wq->wait);
/* Note that we try to include the Async I/O support
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
- sock_wake_async(sock,
- SOCK_WAKE_SPACE, POLL_OUT);
+ sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
}
+ rcu_read_unlock();
}
}
@@ -6978,7 +6982,7 @@ void sctp_data_ready(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = {
#if IS_ENABLED(CONFIG_IPV6)
+#include <net/transp_v6.h>
+static void sctp_v6_destroy_sock(struct sock *sk)
+{
+ sctp_destroy_sock(sk);
+ inet6_destroy_sock(sk);
+}
+
struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = {
.accept = sctp_accept,
.ioctl = sctp_ioctl,
.init = sctp_init_sock,
- .destroy = sctp_destroy_sock,
+ .destroy = sctp_v6_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
diff --git a/net/socket.c b/net/socket.c
index dd2c247c99e3..456fadb3d819 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
return 0;
}
-/* This function may be called only under socket lock or callback_lock or rcu_lock */
+/* This function may be called only under rcu_lock */
-int sock_wake_async(struct socket *sock, int how, int band)
+int sock_wake_async(struct socket_wq *wq, int how, int band)
{
- struct socket_wq *wq;
-
- if (!sock)
- return -1;
- rcu_read_lock();
- wq = rcu_dereference(sock->wq);
- if (!wq || !wq->fasync_list) {
- rcu_read_unlock();
+ if (!wq || !wq->fasync_list)
return -1;
- }
+
switch (how) {
case SOCK_WAKE_WAITD:
- if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
+ if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags))
break;
goto call_kill;
case SOCK_WAKE_SPACE:
- if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
+ if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
break;
/* fall through */
case SOCK_WAKE_IO:
@@ -1086,7 +1079,7 @@ call_kill:
case SOCK_WAKE_URG:
kill_fasync(&wq->fasync_list, SIGURG, band);
}
- rcu_read_unlock();
+
return 0;
}
EXPORT_SYMBOL(sock_wake_async);
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 229956bf8457..95f82d8d4888 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -353,12 +353,20 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
struct rpc_xprt *xprt = req->rq_xprt;
struct svc_serv *bc_serv = xprt->bc_serv;
+ struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
spin_lock(&xprt->bc_pa_lock);
list_del(&req->rq_bc_pa_list);
xprt_dec_alloc_count(xprt, 1);
spin_unlock(&xprt->bc_pa_lock);
+ if (copied <= rq_rcv_buf->head[0].iov_len) {
+ rq_rcv_buf->head[0].iov_len = copied;
+ rq_rcv_buf->page_len = 0;
+ } else {
+ rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
+ }
+
req->rq_private_buf.len = copied;
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index bc5b7b5032ca..7fccf9675df8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1363,6 +1363,7 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+ rqstp->rq_arg.len = req->rq_private_buf.len;
/* reset result send buffer "put" position */
resv->iov_len = 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1d1a70498910..2ffaf6a79499 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
if (unlikely(!sock))
return -ENOTSOCK;
- clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
if (base != 0) {
addr = NULL;
addrlen = 0;
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task)
struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
transport->inet->sk_write_pending--;
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
/**
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task)
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
- if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
+ if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
/*
* Notify TCP that we're limited by the application
* window size
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task)
xprt_wait_for_buffer_space(task, xs_nospace_callback);
}
} else {
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
ret = -ENOTCONN;
}
@@ -626,7 +626,7 @@ process_status:
case -EPERM:
/* When the server has died, an ICMP port unreachable message
* prompts ECONNREFUSED. */
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
return status;
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
case -EADDRINUSE:
case -ENOBUFS:
case -EPIPE:
- clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
return status;
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk)
if (unlikely(!(xprt = xprt_from_sock(sk))))
return;
- if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
+ if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
return;
xprt_write_space(xprt);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b11afe71dfc1..0c2944fb9ae0 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -348,6 +348,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l,
snd_l->ackers++;
rcv_l->acked = snd_l->snd_nxt - 1;
+ snd_l->state = LINK_ESTABLISHED;
tipc_link_build_bc_init_msg(uc_l, xmitq);
}
@@ -363,6 +364,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
rcv_l->state = LINK_RESET;
if (!snd_l->ackers) {
tipc_link_reset(snd_l);
+ snd_l->state = LINK_RESET;
__skb_queue_purge(xmitq);
}
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3f7a4ed71990..fa97d9649a28 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1189,20 +1189,19 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
spin_unlock_bh(&le->lock);
}
tipc_node_read_unlock(n);
- if (likely(!skb_queue_empty(&xmitq))) {
+ if (likely(!rc))
tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
- return 0;
- }
- if (unlikely(rc == -ENOBUFS))
+ else if (rc == -ENOBUFS)
tipc_node_link_down(n, bearer_id, false);
tipc_node_put(n);
return rc;
}
- if (unlikely(!in_own_node(net, dnode)))
- return rc;
- tipc_sk_rcv(net, list);
- return 0;
+ if (likely(in_own_node(net, dnode))) {
+ tipc_sk_rcv(net, list);
+ return 0;
+ }
+ return rc;
}
/* tipc_node_xmit_skb(): send single buffer to destination
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 552dbaba9cf3..69c29050f14a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -105,6 +105,7 @@ struct tipc_sock {
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
+static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
+ sk->sk_destruct = tipc_sock_destruct;
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk->sent_unacked = 0;
atomic_set(&tsk->dupl_rcvcnt, 0);
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock)
tipc_node_remove_conn(net, dnode, tsk->portid);
}
- /* Discard any remaining (connection-based) messages in receive queue */
- __skb_queue_purge(&sk->sk_receive_queue);
-
/* Reject any messages that accumulated in backlog queue */
sock->state = SS_DISCONNECTING;
release_sock(sk);
@@ -1492,7 +1491,7 @@ static void tipc_write_space(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
rcu_read_unlock();
@@ -1509,12 +1508,17 @@ static void tipc_data_ready(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
rcu_read_unlock();
}
+static void tipc_sock_destruct(struct sock *sk)
+{
+ __skb_queue_purge(&sk->sk_receive_queue);
+}
+
/**
* filter_connect - Handle all incoming messages for a connection-based socket
* @tsk: TIPC socket
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 816914ef228d..6af78c6276b4 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -157,8 +157,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
struct rtable *rt;
- if (skb_headroom(skb) < UDP_MIN_HEADROOM)
- pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
+ err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+ if (err)
+ goto tx_error;
+ }
skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
ub = rcu_dereference_rtnl(b->media_ptr);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 955ec152cb71..1c3c1f3a3ec4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,6 +326,118 @@ found:
return s;
}
+/* Support code for asymmetrically connected dgram sockets
+ *
+ * If a datagram socket is connected to a socket not itself connected
+ * to the first socket (eg, /dev/log), clients may only enqueue more
+ * messages if the present receive queue of the server socket is not
+ * "too large". This means there's a second writeability condition
+ * poll and sendmsg need to test. The dgram recv code will do a wake
+ * up on the peer_wait wait queue of a socket upon reception of a
+ * datagram which needs to be propagated to sleeping would-be writers
+ * since these might not have sent anything so far. This can't be
+ * accomplished via poll_wait because the lifetime of the server
+ * socket might be less than that of its clients if these break their
+ * association with it or if the server socket is closed while clients
+ * are still connected to it and there's no way to inform "a polling
+ * implementation" that it should let go of a certain wait queue
+ *
+ * In order to propagate a wake up, a wait_queue_t of the client
+ * socket is enqueued on the peer_wait queue of the server socket
+ * whose wake function does a wake_up on the ordinary client socket
+ * wait queue. This connection is established whenever a write (or
+ * poll for write) hit the flow control condition and broken when the
+ * association to the server socket is dissolved or after a wake up
+ * was relayed.
+ */
+
+static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
+ void *key)
+{
+ struct unix_sock *u;
+ wait_queue_head_t *u_sleep;
+
+ u = container_of(q, struct unix_sock, peer_wake);
+
+ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
+ q);
+ u->peer_wake.private = NULL;
+
+ /* relaying can only happen while the wq still exists */
+ u_sleep = sk_sleep(&u->sk);
+ if (u_sleep)
+ wake_up_interruptible_poll(u_sleep, key);
+
+ return 0;
+}
+
+static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
+{
+ struct unix_sock *u, *u_other;
+ int rc;
+
+ u = unix_sk(sk);
+ u_other = unix_sk(other);
+ rc = 0;
+ spin_lock(&u_other->peer_wait.lock);
+
+ if (!u->peer_wake.private) {
+ u->peer_wake.private = other;
+ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
+
+ rc = 1;
+ }
+
+ spin_unlock(&u_other->peer_wait.lock);
+ return rc;
+}
+
+static void unix_dgram_peer_wake_disconnect(struct sock *sk,
+ struct sock *other)
+{
+ struct unix_sock *u, *u_other;
+
+ u = unix_sk(sk);
+ u_other = unix_sk(other);
+ spin_lock(&u_other->peer_wait.lock);
+
+ if (u->peer_wake.private == other) {
+ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
+ u->peer_wake.private = NULL;
+ }
+
+ spin_unlock(&u_other->peer_wait.lock);
+}
+
+static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
+ struct sock *other)
+{
+ unix_dgram_peer_wake_disconnect(sk, other);
+ wake_up_interruptible_poll(sk_sleep(sk),
+ POLLOUT |
+ POLLWRNORM |
+ POLLWRBAND);
+}
+
+/* preconditions:
+ * - unix_peer(sk) == other
+ * - association is stable
+ */
+static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+{
+ int connected;
+
+ connected = unix_dgram_peer_wake_connect(sk, other);
+
+ if (unix_recvq_full(other))
+ return 1;
+
+ if (connected)
+ unix_dgram_peer_wake_disconnect(sk, other);
+
+ return 0;
+}
+
static int unix_writable(const struct sock *sk)
{
return sk->sk_state != TCP_LISTEN &&
@@ -339,7 +451,7 @@ static void unix_write_space(struct sock *sk)
rcu_read_lock();
if (unix_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
POLLOUT | POLLWRNORM | POLLWRBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
+
+ unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
}
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
+ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
if (sk == NULL)
@@ -1033,6 +1148,8 @@ restart:
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
unix_peer(sk) = other;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+
unix_state_double_unlock(sk, other);
if (other != old_peer)
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
return err;
}
+static bool unix_passcred_enabled(const struct socket *sock,
+ const struct sock *other)
+{
+ return test_bit(SOCK_PASSCRED, &sock->flags) ||
+ !other->sk_socket ||
+ test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
+}
+
/*
* Some apps rely on write() giving SCM_CREDENTIALS
* We include credentials if source or destination socket
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
{
if (UNIXCB(skb).pid)
return;
- if (test_bit(SOCK_PASSCRED, &sock->flags) ||
- !other->sk_socket ||
- test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+ if (unix_passcred_enabled(sock, other)) {
UNIXCB(skb).pid = get_pid(task_tgid(current));
current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
}
}
+static int maybe_init_creds(struct scm_cookie *scm,
+ struct socket *socket,
+ const struct sock *other)
+{
+ int err;
+ struct msghdr msg = { .msg_controllen = 0 };
+
+ err = scm_send(socket, &msg, scm, false);
+ if (err)
+ return err;
+
+ if (unix_passcred_enabled(socket, other)) {
+ scm->pid = get_pid(task_tgid(current));
+ current_uid_gid(&scm->creds.uid, &scm->creds.gid);
+ }
+ return err;
+}
+
+static bool unix_skb_scm_eq(struct sk_buff *skb,
+ struct scm_cookie *scm)
+{
+ const struct unix_skb_parms *u = &UNIXCB(skb);
+
+ return u->pid == scm->pid &&
+ uid_eq(u->uid, scm->creds.uid) &&
+ gid_eq(u->gid, scm->creds.gid) &&
+ unix_secdata_eq(scm, skb);
+}
+
/*
* Send AF_UNIX data.
*/
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
struct scm_cookie scm;
int max_level;
int data_len = 0;
+ int sk_locked;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
@@ -1550,12 +1703,14 @@ restart:
goto out_free;
}
+ sk_locked = 0;
unix_state_lock(other);
+restart_locked:
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
- if (sock_flag(other, SOCK_DEAD)) {
+ if (unlikely(sock_flag(other, SOCK_DEAD))) {
/*
* Check with 1003.1g - what should
* datagram error
@@ -1563,10 +1718,14 @@ restart:
unix_state_unlock(other);
sock_put(other);
+ if (!sk_locked)
+ unix_state_lock(sk);
+
err = 0;
- unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
@@ -1592,21 +1751,38 @@ restart:
goto out_unlock;
}
- if (unix_peer(other) != sk && unix_recvq_full(other)) {
- if (!timeo) {
- err = -EAGAIN;
- goto out_unlock;
+ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ if (timeo) {
+ timeo = unix_wait_for_peer(other, timeo);
+
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ goto out_free;
+
+ goto restart;
}
- timeo = unix_wait_for_peer(other, timeo);
+ if (!sk_locked) {
+ unix_state_unlock(other);
+ unix_state_double_lock(sk, other);
+ }
- err = sock_intr_errno(timeo);
- if (signal_pending(current))
- goto out_free;
+ if (unix_peer(sk) != other ||
+ unix_dgram_peer_wake_me(sk, other)) {
+ err = -EAGAIN;
+ sk_locked = 1;
+ goto out_unlock;
+ }
- goto restart;
+ if (!sk_locked) {
+ sk_locked = 1;
+ goto restart_locked;
+ }
}
+ if (unlikely(sk_locked))
+ unix_state_unlock(sk);
+
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
@@ -1620,6 +1796,8 @@ restart:
return len;
out_unlock:
+ if (sk_locked)
+ unix_state_unlock(sk);
unix_state_unlock(other);
out_free:
kfree_skb(skb);
@@ -1741,8 +1919,10 @@ out_err:
static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
int offset, size_t size, int flags)
{
- int err = 0;
- bool send_sigpipe = true;
+ int err;
+ bool send_sigpipe = false;
+ bool init_scm = true;
+ struct scm_cookie scm;
struct sock *other, *sk = socket->sk;
struct sk_buff *skb, *newskb = NULL, *tail = NULL;
@@ -1760,7 +1940,7 @@ alloc_skb:
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
&err, 0);
if (!newskb)
- return err;
+ goto err;
}
/* we must acquire readlock as we modify already present
@@ -1769,12 +1949,12 @@ alloc_skb:
err = mutex_lock_interruptible(&unix_sk(other)->readlock);
if (err) {
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
- send_sigpipe = false;
goto err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
err = -EPIPE;
+ send_sigpipe = true;
goto err_unlock;
}
@@ -1783,17 +1963,27 @@ alloc_skb:
if (sock_flag(other, SOCK_DEAD) ||
other->sk_shutdown & RCV_SHUTDOWN) {
err = -EPIPE;
+ send_sigpipe = true;
goto err_state_unlock;
}
+ if (init_scm) {
+ err = maybe_init_creds(&scm, socket, other);
+ if (err)
+ goto err_state_unlock;
+ init_scm = false;
+ }
+
skb = skb_peek_tail(&other->sk_receive_queue);
if (tail && tail == skb) {
skb = newskb;
- } else if (!skb) {
- if (newskb)
+ } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
+ if (newskb) {
skb = newskb;
- else
+ } else {
+ tail = skb;
goto alloc_skb;
+ }
} else if (newskb) {
/* this is fast path, we don't necessarily need to
* call to kfree_skb even though with newskb == NULL
@@ -1814,6 +2004,9 @@ alloc_skb:
atomic_add(size, &sk->sk_wmem_alloc);
if (newskb) {
+ err = unix_scm_to_skb(&scm, skb, false);
+ if (err)
+ goto err_state_unlock;
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, newskb);
spin_unlock(&other->sk_receive_queue.lock);
@@ -1823,7 +2016,7 @@ alloc_skb:
mutex_unlock(&unix_sk(other)->readlock);
other->sk_data_ready(other);
-
+ scm_destroy(&scm);
return size;
err_state_unlock:
@@ -1834,6 +2027,8 @@ err:
kfree_skb(newskb);
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
+ if (!init_scm)
+ scm_destroy(&scm);
return err;
}
@@ -1883,8 +2078,8 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
- int noblock = flags & MSG_DONTWAIT;
- struct sk_buff *skb;
+ struct sk_buff *skb, *last;
+ long timeo;
int err;
int peeked, skip;
@@ -1892,30 +2087,38 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
if (flags&MSG_OOB)
goto out;
- err = mutex_lock_interruptible(&u->readlock);
- if (unlikely(err)) {
- /* recvmsg() in non blocking mode is supposed to return -EAGAIN
- * sk_rcvtimeo is not honored by mutex_lock_interruptible()
- */
- err = noblock ? -EAGAIN : -ERESTARTSYS;
- goto out;
- }
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
- skip = sk_peek_offset(sk, flags);
+ do {
+ mutex_lock(&u->readlock);
- skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
- if (!skb) {
+ skip = sk_peek_offset(sk, flags);
+ skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
+ &last);
+ if (skb)
+ break;
+
+ mutex_unlock(&u->readlock);
+
+ if (err != -EAGAIN)
+ break;
+ } while (timeo &&
+ !__skb_wait_for_more_packets(sk, &err, &timeo, last));
+
+ if (!skb) { /* implies readlock unlocked */
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
(sk->sk_shutdown & RCV_SHUTDOWN))
err = 0;
unix_state_unlock(sk);
- goto out_unlock;
+ goto out;
}
- wake_up_interruptible_sync_poll(&u->peer_wait,
- POLLOUT | POLLWRNORM | POLLWRBAND);
+ if (wq_has_sleeper(&u->peer_wait))
+ wake_up_interruptible_sync_poll(&u->peer_wait,
+ POLLOUT | POLLWRNORM |
+ POLLWRBAND);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
@@ -1967,7 +2170,6 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
out_free:
skb_free_datagram(sk, skb);
-out_unlock:
mutex_unlock(&u->readlock);
out:
return err;
@@ -1996,7 +2198,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
!timeo)
break;
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
@@ -2004,7 +2206,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
if (sock_flag(sk, SOCK_DEAD))
break;
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
finish_wait(sk_sleep(sk), &wait);
@@ -2137,10 +2339,7 @@ unlock:
if (check_creds) {
/* Never glue messages from different writers */
- if ((UNIXCB(skb).pid != scm.pid) ||
- !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
- !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
- !unix_secdata_eq(&scm, skb))
+ if (!unix_skb_scm_eq(skb, &scm))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
@@ -2476,20 +2675,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
return mask;
writable = unix_writable(sk);
- other = unix_peer_get(sk);
- if (other) {
- if (unix_peer(other) != sk) {
- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
- if (unix_recvq_full(other))
- writable = 0;
- }
- sock_put(other);
+ if (writable) {
+ unix_state_lock(sk);
+
+ other = unix_peer(sk);
+ if (other && unix_peer(other) != sk &&
+ unix_recvq_full(other) &&
+ unix_dgram_peer_wake_me(sk, other))
+ writable = 0;
+
+ unix_state_unlock(sk);
}
if (writable)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index 2ad46f39649f..1820e74a5752 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -121,7 +121,7 @@ struct vmci_transport {
u64 queue_pair_max_size;
u32 detach_sub_id;
union vmci_transport_notify notify;
- struct vmci_transport_notify_ops *notify_ops;
+ const struct vmci_transport_notify_ops *notify_ops;
struct list_head elem;
struct sock *sk;
spinlock_t lock; /* protects sk. */
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
index 9b7f207f2bee..fd8cf0214d51 100644
--- a/net/vmw_vsock/vmci_transport_notify.c
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -661,7 +661,7 @@ static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
}
/* Socket control packet based operations. */
-struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
+const struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
vmci_transport_notify_pkt_socket_init,
vmci_transport_notify_pkt_socket_destruct,
vmci_transport_notify_pkt_poll_in,
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
index 7df793249b6c..3c464d394a8f 100644
--- a/net/vmw_vsock/vmci_transport_notify.h
+++ b/net/vmw_vsock/vmci_transport_notify.h
@@ -77,7 +77,8 @@ struct vmci_transport_notify_ops {
void (*process_negotiate) (struct sock *sk);
};
-extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
-extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
+extern const struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
+extern const
+struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
index dc9c7929a2f9..21e591dafb03 100644
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -419,7 +419,7 @@ vmci_transport_notify_pkt_send_pre_enqueue(
}
/* Socket always on control packet based operations. */
-struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
+const struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
vmci_transport_notify_pkt_socket_init,
vmci_transport_notify_pkt_socket_destruct,
vmci_transport_notify_pkt_poll_in,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a618b4b86fa4..022ccad06cbe 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -416,13 +416,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
void cfg80211_process_wdev_events(struct wireless_dev *wdev);
-int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_iftype iftype,
- struct ieee80211_channel *chan,
- enum cfg80211_chan_mode chanmode,
- u8 radar_detect);
-
/**
* cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
* @wiphy: the wiphy to validate against
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index dc0e59e53dbf..6beab0cfcb99 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -311,8 +311,8 @@ static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
keyidx >>= 6;
if (key->key_idx != keyidx) {
- printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
- "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
+ net_dbg_ratelimited("CCMP: RX tkey->key_idx=%d frame keyidx=%d\n",
+ key->key_idx, keyidx);
return -6;
}
if (!key->key_set) {
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 8c90ba79e56e..3cd819539241 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -434,8 +434,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
keyidx >>= 6;
if (tkey->key_idx != keyidx) {
- printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
- "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
+ net_dbg_ratelimited("TKIP: RX tkey->key_idx=%d frame keyidx=%d\n",
+ tkey->key_idx, keyidx);
return -6;
}
if (!tkey->key_set) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c71e274c810a..72de6989dd12 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4256,8 +4256,8 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
* station. Include these parameters here and will check them in
* cfg80211_check_station_change().
*/
- if (info->attrs[NL80211_ATTR_PEER_AID])
- params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
+ if (info->attrs[NL80211_ATTR_STA_AID])
+ params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
params.listen_interval =
@@ -4359,6 +4359,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
struct net_device *dev = info->user_ptr[1];
struct station_parameters params;
u8 *mac_addr = NULL;
+ u32 auth_assoc = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+ BIT(NL80211_STA_FLAG_ASSOCIATED);
memset(&params, 0, sizeof(params));
@@ -4470,10 +4472,23 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
/* allow authenticated/associated only if driver handles it */
if (!(rdev->wiphy.features &
NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
- params.sta_flags_mask &
- (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
- BIT(NL80211_STA_FLAG_ASSOCIATED)))
- return -EINVAL;
+ params.sta_flags_mask & auth_assoc)
+ return -EINVAL;
+
+ /* Older userspace, or userspace wanting to be compatible with
+ * !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth
+ * and assoc flags in the mask, but assumes the station will be
+ * added as associated anyway since this was the required driver
+ * behaviour before NL80211_FEATURE_FULL_AP_CLIENT_STATE was
+ * introduced.
+ * In order to not bother drivers with this quirk in the API
+ * set the flags in both the mask and set for new stations in
+ * this case.
+ */
+ if (!(params.sta_flags_mask & auth_assoc)) {
+ params.sta_flags_mask |= auth_assoc;
+ params.sta_flags_set |= auth_assoc;
+ }
/* must be last in here for error handling */
params.vlan = get_vlan(info, rdev);
@@ -5997,6 +6012,24 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+
+ if (!rdev->ops->abort_scan)
+ return -EOPNOTSUPP;
+
+ if (rdev->scan_msg)
+ return 0;
+
+ if (!rdev->scan_req)
+ return -ENOENT;
+
+ rdev_abort_scan(rdev, wdev);
+ return 0;
+}
+
static int
nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
struct cfg80211_sched_scan_request *request,
@@ -6507,8 +6540,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
if (WARN_ON(!cac_time_ms))
cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
- err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef,
- cac_time_ms);
+ err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms);
if (!err) {
wdev->chandef = chandef;
wdev->cac_started = true;
@@ -7571,7 +7603,7 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate))
return -EINVAL;
- err = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate);
+ err = rdev_set_mcast_rate(rdev, dev, mcast_rate);
return err;
}
@@ -9716,7 +9748,7 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
cfg80211_rdev_free_coalesce(rdev);
- rdev->ops->set_coalesce(&rdev->wiphy, NULL);
+ rdev_set_coalesce(rdev, NULL);
return 0;
}
@@ -9744,7 +9776,7 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
i++;
}
- err = rdev->ops->set_coalesce(&rdev->wiphy, &new_coalesce);
+ err = rdev_set_coalesce(rdev, &new_coalesce);
if (err)
goto error;
@@ -10946,6 +10978,14 @@ static const struct genl_ops nl80211_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_ABORT_SCAN,
+ .doit = nl80211_abort_scan,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_GET_SCAN,
.policy = nl80211_policy,
.dumpit = nl80211_dump_scan,
diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c
index c00d4a792319..e64dbf16330c 100644
--- a/net/wireless/ocb.c
+++ b/net/wireless/ocb.c
@@ -29,6 +29,9 @@ int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
return -EOPNOTSUPP;
+ if (!rdev->ops->join_ocb)
+ return -EOPNOTSUPP;
+
if (WARN_ON(!setup->chandef.chan))
return -EINVAL;
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index c23516d0f807..8ae0c04f9fc7 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -427,6 +427,14 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ trace_rdev_abort_scan(&rdev->wiphy, wdev);
+ rdev->ops->abort_scan(&rdev->wiphy, wdev);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
static inline int rdev_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_auth_request *req)
@@ -1020,4 +1028,47 @@ rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev,
trace_rdev_return_void(&rdev->wiphy);
}
+static inline int
+rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms)
+{
+ int ret = -ENOTSUPP;
+
+ trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef,
+ cac_time_ms);
+ if (rdev->ops->start_radar_detection)
+ ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev,
+ chandef, cac_time_ms);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline int
+rdev_set_mcast_rate(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ int mcast_rate[IEEE80211_NUM_BANDS])
+{
+ int ret = -ENOTSUPP;
+
+ trace_rdev_set_mcast_rate(&rdev->wiphy, dev, mcast_rate);
+ if (rdev->ops->set_mcast_rate)
+ ret = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline int
+rdev_set_coalesce(struct cfg80211_registered_device *rdev,
+ struct cfg80211_coalesce *coalesce)
+{
+ int ret = -ENOTSUPP;
+
+ trace_rdev_set_coalesce(&rdev->wiphy, coalesce);
+ if (rdev->ops->set_coalesce)
+ ret = rdev->ops->set_coalesce(&rdev->wiphy, coalesce);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2e8d6f39ed56..0a4f5481ab83 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1052,7 +1052,7 @@ static u32 map_regdom_flags(u32 rd_flags)
}
static const struct ieee80211_reg_rule *
-freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
+freq_reg_info_regd(u32 center_freq,
const struct ieee80211_regdomain *regd, u32 bw)
{
int i;
@@ -1097,7 +1097,7 @@ __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
u32 bw;
for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
- reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+ reg_rule = freq_reg_info_regd(center_freq, regd, bw);
if (!IS_ERR(reg_rule))
return reg_rule;
}
@@ -1166,6 +1166,41 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
#endif
}
+static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd,
+ const struct ieee80211_reg_rule *reg_rule,
+ const struct ieee80211_channel *chan)
+{
+ const struct ieee80211_freq_range *freq_range = NULL;
+ u32 max_bandwidth_khz, bw_flags = 0;
+
+ freq_range = &reg_rule->freq_range;
+
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ /* Check if auto calculation requested */
+ if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+ max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+
+ /* If we get a reg_rule we can assume that at least 5Mhz fit */
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(10)))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(20)))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
+ bw_flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
+ bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ return bw_flags;
+}
+
/*
* Note that right now we assume the desired channel bandwidth
* is always 20 MHz for each individual channel (HT40 uses 20 MHz
@@ -1178,11 +1213,9 @@ static void handle_channel(struct wiphy *wiphy,
u32 flags, bw_flags = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
- const struct ieee80211_freq_range *freq_range = NULL;
struct wiphy *request_wiphy = NULL;
struct regulatory_request *lr = get_last_request();
const struct ieee80211_regdomain *regd;
- u32 max_bandwidth_khz;
request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -1223,31 +1256,7 @@ static void handle_channel(struct wiphy *wiphy,
chan_reg_rule_print_dbg(regd, chan, reg_rule);
power_rule = &reg_rule->power_rule;
- freq_range = &reg_rule->freq_range;
-
- max_bandwidth_khz = freq_range->max_bandwidth_khz;
- /* Check if auto calculation requested */
- if (reg_rule->flags & NL80211_RRF_AUTO_BW)
- max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
-
- /* If we get a reg_rule we can assume that at least 5Mhz fit */
- if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
- MHZ_TO_KHZ(10)))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
- MHZ_TO_KHZ(20)))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
-
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
- if (max_bandwidth_khz < MHZ_TO_KHZ(80))
- bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(160))
- bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
request_wiphy && request_wiphy == wiphy &&
@@ -1760,13 +1769,10 @@ static void handle_channel_custom(struct wiphy *wiphy,
u32 bw_flags = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
- const struct ieee80211_freq_range *freq_range = NULL;
- u32 max_bandwidth_khz;
u32 bw;
for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
- reg_rule = freq_reg_info_regd(wiphy,
- MHZ_TO_KHZ(chan->center_freq),
+ reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq),
regd, bw);
if (!IS_ERR(reg_rule))
break;
@@ -1787,31 +1793,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
chan_reg_rule_print_dbg(regd, chan, reg_rule);
power_rule = &reg_rule->power_rule;
- freq_range = &reg_rule->freq_range;
-
- max_bandwidth_khz = freq_range->max_bandwidth_khz;
- /* Check if auto calculation requested */
- if (reg_rule->flags & NL80211_RRF_AUTO_BW)
- max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
-
- /* If we get a reg_rule we can assume that at least 5Mhz fit */
- if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
- MHZ_TO_KHZ(10)))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
- MHZ_TO_KHZ(20)))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
-
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
- if (max_bandwidth_khz < MHZ_TO_KHZ(80))
- bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(160))
- bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
chan->dfs_state_entered = jiffies;
chan->dfs_state = NL80211_DFS_USABLE;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 0c392d36781b..09b242b09bed 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -623,12 +623,24 @@ DECLARE_EVENT_CLASS(station_add_change,
__field(u32, sta_flags_set)
__field(u32, sta_modify_mask)
__field(int, listen_interval)
+ __field(u16, capability)
__field(u16, aid)
__field(u8, plink_action)
__field(u8, plink_state)
__field(u8, uapsd_queues)
+ __field(u8, max_sp)
+ __field(u8, opmode_notif)
+ __field(bool, opmode_notif_used)
__array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap))
+ __array(u8, vht_capa, (int)sizeof(struct ieee80211_vht_cap))
__array(char, vlan, IFNAMSIZ)
+ __dynamic_array(u8, supported_rates,
+ params->supported_rates_len)
+ __dynamic_array(u8, ext_capab, params->ext_capab_len)
+ __dynamic_array(u8, supported_channels,
+ params->supported_channels_len)
+ __dynamic_array(u8, supported_oper_classes,
+ params->supported_oper_classes_len)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -646,9 +658,35 @@ DECLARE_EVENT_CLASS(station_add_change,
if (params->ht_capa)
memcpy(__entry->ht_capa, params->ht_capa,
sizeof(struct ieee80211_ht_cap));
+ memset(__entry->vht_capa, 0, sizeof(struct ieee80211_vht_cap));
+ if (params->vht_capa)
+ memcpy(__entry->vht_capa, params->vht_capa,
+ sizeof(struct ieee80211_vht_cap));
memset(__entry->vlan, 0, sizeof(__entry->vlan));
if (params->vlan)
memcpy(__entry->vlan, params->vlan->name, IFNAMSIZ);
+ if (params->supported_rates && params->supported_rates_len)
+ memcpy(__get_dynamic_array(supported_rates),
+ params->supported_rates,
+ params->supported_rates_len);
+ if (params->ext_capab && params->ext_capab_len)
+ memcpy(__get_dynamic_array(ext_capab),
+ params->ext_capab,
+ params->ext_capab_len);
+ if (params->supported_channels &&
+ params->supported_channels_len)
+ memcpy(__get_dynamic_array(supported_channels),
+ params->supported_channels,
+ params->supported_channels_len);
+ if (params->supported_oper_classes &&
+ params->supported_oper_classes_len)
+ memcpy(__get_dynamic_array(supported_oper_classes),
+ params->supported_oper_classes,
+ params->supported_oper_classes_len);
+ __entry->max_sp = params->max_sp;
+ __entry->capability = params->capability;
+ __entry->opmode_notif = params->opmode_notif;
+ __entry->opmode_notif_used = params->opmode_notif_used;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
", station flags mask: %u, station flags set: %u, "
@@ -2818,6 +2856,71 @@ TRACE_EVENT(cfg80211_stop_iface,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
+TRACE_EVENT(rdev_start_radar_detection,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms),
+ TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ __field(u32, cac_time_ms)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(chandef);
+ __entry->cac_time_ms = cac_time_ms;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+ ", cac_time_ms=%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->cac_time_ms)
+);
+
+TRACE_EVENT(rdev_set_mcast_rate,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ int mcast_rate[IEEE80211_NUM_BANDS]),
+ TP_ARGS(wiphy, netdev, mcast_rate),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(int, mcast_rate, IEEE80211_NUM_BANDS)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memcpy(__entry->mcast_rate, mcast_rate,
+ sizeof(int) * IEEE80211_NUM_BANDS);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
+ "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->mcast_rate[IEEE80211_BAND_2GHZ],
+ __entry->mcast_rate[IEEE80211_BAND_5GHZ],
+ __entry->mcast_rate[IEEE80211_BAND_60GHZ])
+);
+
+TRACE_EVENT(rdev_set_coalesce,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce),
+ TP_ARGS(wiphy, coalesce),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(int, n_rules)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->n_rules = coalesce ? coalesce->n_rules : 0;
+ ),
+ TP_printk(WIPHY_PR_FMT ", n_rules=%d",
+ WIPHY_PR_ARG, __entry->n_rules)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index baf7218cec15..92770427b211 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1325,13 +1325,6 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
}
EXPORT_SYMBOL(ieee80211_ie_split_ric);
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset)
-{
- return ieee80211_ie_split_ric(ies, ielen, ids, n_ids, NULL, 0, offset);
-}
-EXPORT_SYMBOL(ieee80211_ie_split);
-
bool ieee80211_operating_class_to_band(u8 operating_class,
enum ieee80211_band *band)
{
@@ -1620,120 +1613,6 @@ int cfg80211_check_combinations(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_check_combinations);
-int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_iftype iftype,
- struct ieee80211_channel *chan,
- enum cfg80211_chan_mode chanmode,
- u8 radar_detect)
-{
- struct wireless_dev *wdev_iter;
- int num[NUM_NL80211_IFTYPES];
- struct ieee80211_channel
- *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
- struct ieee80211_channel *ch;
- enum cfg80211_chan_mode chmode;
- int num_different_channels = 0;
- int total = 1;
- int i;
-
- ASSERT_RTNL();
-
- if (WARN_ON(hweight32(radar_detect) > 1))
- return -EINVAL;
-
- if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
- return -EINVAL;
-
- /* Always allow software iftypes */
- if (rdev->wiphy.software_iftypes & BIT(iftype)) {
- if (radar_detect)
- return -EINVAL;
- return 0;
- }
-
- memset(num, 0, sizeof(num));
- memset(used_channels, 0, sizeof(used_channels));
-
- num[iftype] = 1;
-
- /* TODO: We'll probably not need this anymore, since this
- * should only be called with CHAN_MODE_UNDEFINED. There are
- * still a couple of pending calls where other chanmodes are
- * used, but we should get rid of them.
- */
- switch (chanmode) {
- case CHAN_MODE_UNDEFINED:
- break;
- case CHAN_MODE_SHARED:
- WARN_ON(!chan);
- used_channels[0] = chan;
- num_different_channels++;
- break;
- case CHAN_MODE_EXCLUSIVE:
- num_different_channels++;
- break;
- }
-
- list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
- if (wdev_iter == wdev)
- continue;
- if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- if (!wdev_iter->p2p_started)
- continue;
- } else if (wdev_iter->netdev) {
- if (!netif_running(wdev_iter->netdev))
- continue;
- } else {
- WARN_ON(1);
- }
-
- if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
- continue;
-
- /*
- * We may be holding the "wdev" mutex, but now need to lock
- * wdev_iter. This is OK because once we get here wdev_iter
- * is not wdev (tested above), but we need to use the nested
- * locking for lockdep.
- */
- mutex_lock_nested(&wdev_iter->mtx, 1);
- __acquire(wdev_iter->mtx);
- cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
- wdev_unlock(wdev_iter);
-
- switch (chmode) {
- case CHAN_MODE_UNDEFINED:
- break;
- case CHAN_MODE_SHARED:
- for (i = 0; i < CFG80211_MAX_NUM_DIFFERENT_CHANNELS; i++)
- if (!used_channels[i] || used_channels[i] == ch)
- break;
-
- if (i == CFG80211_MAX_NUM_DIFFERENT_CHANNELS)
- return -EBUSY;
-
- if (used_channels[i] == NULL) {
- used_channels[i] = ch;
- num_different_channels++;
- }
- break;
- case CHAN_MODE_EXCLUSIVE:
- num_different_channels++;
- break;
- }
-
- num[wdev_iter->iftype]++;
- total++;
- }
-
- if (total == 1 && !radar_detect)
- return 0;
-
- return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
- radar_detect, num);
-}
-
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
u32 *mask)