diff options
Diffstat (limited to 'net')
172 files changed, 5067 insertions, 2913 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 2f34bbdde0e8..cfca99e295b8 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -85,8 +85,10 @@ static void ax25_kill_by_device(struct net_device *dev) again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { - s->ax25_dev = NULL; spin_unlock_bh(&ax25_list_lock); + lock_sock(s->sk); + s->ax25_dev = NULL; + release_sock(s->sk); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index cd6e1cf7e396..04ebe901e86f 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -911,267 +911,45 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) hci_enable_advertising(hdev); } -static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) +static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) { - struct hci_conn *conn; + struct hci_conn *conn = data; hci_dev_lock(hdev); - conn = hci_lookup_le_connect(hdev); - - if (hdev->adv_instance_cnt) - hci_req_resume_adv_instances(hdev); - - if (!status) { + if (!err) { hci_connect_le_scan_cleanup(conn); goto done; } - bt_dev_err(hdev, "request failed to create LE connection: " - "status 0x%2.2x", status); + bt_dev_err(hdev, "request failed to create LE connection: err %d", err); if (!conn) goto done; - hci_le_conn_failed(conn, status); + hci_le_conn_failed(conn, err); done: hci_dev_unlock(hdev); } -static bool conn_use_rpa(struct hci_conn *conn) -{ - struct hci_dev *hdev = conn->hdev; - - return hci_dev_test_flag(hdev, HCI_PRIVACY); -} - -static void set_ext_conn_params(struct hci_conn *conn, - struct hci_cp_le_ext_conn_param *p) -{ - struct hci_dev *hdev = conn->hdev; - - memset(p, 0, sizeof(*p)); - - p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); - p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); - p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); - p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); - p->conn_latency = cpu_to_le16(conn->le_conn_latency); - p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); - p->min_ce_len = cpu_to_le16(0x0000); - p->max_ce_len = cpu_to_le16(0x0000); -} - -static void hci_req_add_le_create_conn(struct hci_request *req, - struct hci_conn *conn, - bdaddr_t *direct_rpa) -{ - struct hci_dev *hdev = conn->hdev; - u8 own_addr_type; - - /* If direct address was provided we use it instead of current - * address. - */ - if (direct_rpa) { - if (bacmp(&req->hdev->random_addr, direct_rpa)) - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, - direct_rpa); - - /* direct address is always RPA */ - own_addr_type = ADDR_LE_DEV_RANDOM; - } else { - /* Update random address, but set require_privacy to false so - * that we never connect with an non-resolvable address. - */ - if (hci_update_random_address(req, false, conn_use_rpa(conn), - &own_addr_type)) - return; - } - - if (use_ext_conn(hdev)) { - struct hci_cp_le_ext_create_conn *cp; - struct hci_cp_le_ext_conn_param *p; - u8 data[sizeof(*cp) + sizeof(*p) * 3]; - u32 plen; - - cp = (void *) data; - p = (void *) cp->data; - - memset(cp, 0, sizeof(*cp)); - - bacpy(&cp->peer_addr, &conn->dst); - cp->peer_addr_type = conn->dst_type; - cp->own_addr_type = own_addr_type; - - plen = sizeof(*cp); - - if (scan_1m(hdev)) { - cp->phys |= LE_SCAN_PHY_1M; - set_ext_conn_params(conn, p); - - p++; - plen += sizeof(*p); - } - - if (scan_2m(hdev)) { - cp->phys |= LE_SCAN_PHY_2M; - set_ext_conn_params(conn, p); - - p++; - plen += sizeof(*p); - } - - if (scan_coded(hdev)) { - cp->phys |= LE_SCAN_PHY_CODED; - set_ext_conn_params(conn, p); - - plen += sizeof(*p); - } - - hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data); - - } else { - struct hci_cp_le_create_conn cp; - - memset(&cp, 0, sizeof(cp)); - - cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); - cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); - - bacpy(&cp.peer_addr, &conn->dst); - cp.peer_addr_type = conn->dst_type; - cp.own_address_type = own_addr_type; - cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); - cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); - cp.conn_latency = cpu_to_le16(conn->le_conn_latency); - cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); - cp.min_ce_len = cpu_to_le16(0x0000); - cp.max_ce_len = cpu_to_le16(0x0000); - - hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); - } - - conn->state = BT_CONNECT; - clear_bit(HCI_CONN_SCANNING, &conn->flags); -} - -static void hci_req_directed_advertising(struct hci_request *req, - struct hci_conn *conn) +static int hci_connect_le_sync(struct hci_dev *hdev, void *data) { - struct hci_dev *hdev = req->hdev; - u8 own_addr_type; - u8 enable; - - if (ext_adv_capable(hdev)) { - struct hci_cp_le_set_ext_adv_params cp; - bdaddr_t random_addr; - - /* Set require_privacy to false so that the remote device has a - * chance of identifying us. - */ - if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, - &own_addr_type, &random_addr) < 0) - return; - - memset(&cp, 0, sizeof(cp)); - - cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); - cp.own_addr_type = own_addr_type; - cp.channel_map = hdev->le_adv_channel_map; - cp.tx_power = HCI_TX_POWER_INVALID; - cp.primary_phy = HCI_ADV_PHY_1M; - cp.secondary_phy = HCI_ADV_PHY_1M; - cp.handle = 0; /* Use instance 0 for directed adv */ - cp.own_addr_type = own_addr_type; - cp.peer_addr_type = conn->dst_type; - bacpy(&cp.peer_addr, &conn->dst); - - /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for - * advertising_event_property LE_LEGACY_ADV_DIRECT_IND - * does not supports advertising data when the advertising set already - * contains some, the controller shall return erroc code 'Invalid - * HCI Command Parameters(0x12). - * So it is required to remove adv set for handle 0x00. since we use - * instance 0 for directed adv. - */ - __hci_req_remove_ext_adv_instance(req, cp.handle); - - hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); + struct hci_conn *conn = data; - if (own_addr_type == ADDR_LE_DEV_RANDOM && - bacmp(&random_addr, BDADDR_ANY) && - bacmp(&random_addr, &hdev->random_addr)) { - struct hci_cp_le_set_adv_set_rand_addr cp; - - memset(&cp, 0, sizeof(cp)); - - cp.handle = 0; - bacpy(&cp.bdaddr, &random_addr); - - hci_req_add(req, - HCI_OP_LE_SET_ADV_SET_RAND_ADDR, - sizeof(cp), &cp); - } - - __hci_req_enable_ext_advertising(req, 0x00); - } else { - struct hci_cp_le_set_adv_param cp; + bt_dev_dbg(hdev, "conn %p", conn); - /* Clear the HCI_LE_ADV bit temporarily so that the - * hci_update_random_address knows that it's safe to go ahead - * and write a new random address. The flag will be set back on - * as soon as the SET_ADV_ENABLE HCI command completes. - */ - hci_dev_clear_flag(hdev, HCI_LE_ADV); - - /* Set require_privacy to false so that the remote device has a - * chance of identifying us. - */ - if (hci_update_random_address(req, false, conn_use_rpa(conn), - &own_addr_type) < 0) - return; - - memset(&cp, 0, sizeof(cp)); - - /* Some controllers might reject command if intervals are not - * within range for undirected advertising. - * BCM20702A0 is known to be affected by this. - */ - cp.min_interval = cpu_to_le16(0x0020); - cp.max_interval = cpu_to_le16(0x0020); - - cp.type = LE_ADV_DIRECT_IND; - cp.own_address_type = own_addr_type; - cp.direct_addr_type = conn->dst_type; - bacpy(&cp.direct_addr, &conn->dst); - cp.channel_map = hdev->le_adv_channel_map; - - hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); - - enable = 0x01; - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), - &enable); - } - - conn->state = BT_CONNECT; + return hci_le_create_conn_sync(hdev, conn); } struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, bool dst_resolved, u8 sec_level, - u16 conn_timeout, u8 role, bdaddr_t *direct_rpa) + u16 conn_timeout, u8 role) { - struct hci_conn_params *params; struct hci_conn *conn; struct smp_irk *irk; - struct hci_request req; int err; - /* This ensures that during disable le_scan address resolution - * will not be disabled if it is followed by le_create_conn - */ - bool rpa_le_conn = true; - /* Let's make sure that le is enabled.*/ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (lmp_le_capable(hdev)) @@ -1230,68 +1008,13 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, conn->sec_level = BT_SECURITY_LOW; conn->conn_timeout = conn_timeout; - hci_req_init(&req, hdev); - - /* Disable advertising if we're active. For central role - * connections most controllers will refuse to connect if - * advertising is enabled, and for peripheral role connections we - * anyway have to disable it in order to start directed - * advertising. Any registered advertisements will be - * re-enabled after the connection attempt is finished. - */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_pause_adv_instances(&req); - - /* If requested to connect as peripheral use directed advertising */ - if (conn->role == HCI_ROLE_SLAVE) { - /* If we're active scanning most controllers are unable - * to initiate advertising. Simply reject the attempt. - */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && - hdev->le_scan_type == LE_SCAN_ACTIVE) { - hci_req_purge(&req); - hci_conn_del(conn); - return ERR_PTR(-EBUSY); - } - - hci_req_directed_advertising(&req, conn); - goto create_conn; - } - - params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); - if (params) { - conn->le_conn_min_interval = params->conn_min_interval; - conn->le_conn_max_interval = params->conn_max_interval; - conn->le_conn_latency = params->conn_latency; - conn->le_supv_timeout = params->supervision_timeout; - } else { - conn->le_conn_min_interval = hdev->le_conn_min_interval; - conn->le_conn_max_interval = hdev->le_conn_max_interval; - conn->le_conn_latency = hdev->le_conn_latency; - conn->le_supv_timeout = hdev->le_supv_timeout; - } - - /* If controller is scanning, we stop it since some controllers are - * not able to scan and connect at the same time. Also set the - * HCI_LE_SCAN_INTERRUPTED flag so that the command complete - * handler for scan disabling knows to set the correct discovery - * state. - */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - hci_req_add_le_scan_disable(&req, rpa_le_conn); - hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); - } - - hci_req_add_le_create_conn(&req, conn, direct_rpa); + conn->state = BT_CONNECT; + clear_bit(HCI_CONN_SCANNING, &conn->flags); -create_conn: - err = hci_req_run(&req, create_le_conn_complete); + err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn, + create_le_conn_complete); if (err) { hci_conn_del(conn); - - if (hdev->adv_instance_cnt) - hci_req_resume_adv_instances(hdev); - return ERR_PTR(err); } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index fdc0dcf8ee36..2b7bd3655b07 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2153,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; - entry->current_flags = flags; + bitmap_from_u64(entry->flags, flags); list_add(&entry->list, list); @@ -2629,6 +2629,12 @@ int hci_register_dev(struct hci_dev *hdev) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + /* Mark Remote Wakeup connection flag as supported if driver has wakeup + * callback. + */ + if (hdev->wakeup) + set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags); + hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); @@ -2906,7 +2912,7 @@ int hci_unregister_cb(struct hci_cb *cb) } EXPORT_SYMBOL(hci_unregister_cb); -static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { int err; @@ -2929,14 +2935,17 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) if (!test_bit(HCI_RUNNING, &hdev->flags)) { kfree_skb(skb); - return; + return -EINVAL; } err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); kfree_skb(skb); + return err; } + + return 0; } /* Send HCI command */ @@ -3843,10 +3852,15 @@ static void hci_cmd_work(struct work_struct *work) hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); if (hdev->sent_cmd) { + int res; if (hci_req_status_pend(hdev)) hci_dev_set_flag(hdev, HCI_CMD_PENDING); atomic_dec(&hdev->cmd_cnt); - hci_send_frame(hdev, skb); + + res = hci_send_frame(hdev, skb); + if (res < 0) + __hci_cmd_sync_cancel(hdev, -res); + if (test_bit(HCI_RESET, &hdev->flags)) cancel_delayed_work(&hdev->cmd_timer); else diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index efc5458b1345..f1082b7c0218 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -45,12 +45,48 @@ /* Handle HCI Event packets */ -static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, - u8 *new_status) +static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u8 ev, size_t len) { - __u8 status = *((__u8 *) skb->data); + void *data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); + + return data; +} + +static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u16 op, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); + + return data; +} + +static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u8 ev, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); + + return data; +} + +static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); /* It is possible that we receive Inquiry Complete event right * before we receive Inquiry Cancel Command Complete event, in @@ -59,15 +95,13 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, * we actually achieve what Inquiry Cancel wants to achieve, * which is to end the last Inquiry session. */ - if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); - status = 0x00; + rp->status = 0x00; } - *new_status = status; - - if (status) - return; + if (rp->status) + return rp->status; clear_bit(HCI_INQUIRY, &hdev->flags); smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ @@ -83,49 +117,62 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, hci_dev_unlock(hdev); hci_conn_check_pending(hdev); + + return rp->status; } -static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); + + return rp->status; } -static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); hci_conn_check_pending(hdev); + + return rp->status; } -static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - BT_DBG("%s", hdev->name); + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + return rp->status; } -static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_role_discovery *rp = (void *) skb->data; + struct hci_rp_role_discovery *rp = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hci_dev_lock(hdev); @@ -134,17 +181,20 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) conn->role = rp->role; hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_link_policy *rp = (void *) skb->data; + struct hci_rp_read_link_policy *rp = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hci_dev_lock(hdev); @@ -153,22 +203,25 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) conn->link_policy = __le16_to_cpu(rp->policy); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_write_link_policy *rp = (void *) skb->data; + struct hci_rp_write_link_policy *rp = data; struct hci_conn *conn; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -177,49 +230,55 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) conn->link_policy = get_unaligned_le16(sent + 2); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_def_link_policy(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_def_link_policy *rp = (void *) skb->data; + struct hci_rp_read_def_link_policy *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->link_policy = __le16_to_cpu(rp->policy); + + return rp->status; } -static void hci_cc_write_def_link_policy(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); if (!sent) - return; + return rp->status; hdev->link_policy = get_unaligned_le16(sent); + + return rp->status; } -static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); clear_bit(HCI_RESET, &hdev->flags); - if (status) - return; + if (rp->status) + return rp->status; /* Reset all non-persistent flags */ hci_dev_clear_volatile_flags(hdev); @@ -241,91 +300,104 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); + + return rp->status; } -static void hci_cc_read_stored_link_key(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_stored_link_key *rp = (void *)skb->data; + struct hci_rp_read_stored_link_key *rp = data; struct hci_cp_read_stored_link_key *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); if (!sent) - return; + return rp->status; if (!rp->status && sent->read_all == 0x01) { - hdev->stored_max_keys = rp->max_keys; - hdev->stored_num_keys = rp->num_keys; + hdev->stored_max_keys = le16_to_cpu(rp->max_keys); + hdev->stored_num_keys = le16_to_cpu(rp->num_keys); } + + return rp->status; } -static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_delete_stored_link_key *rp = (void *)skb->data; + struct hci_rp_delete_stored_link_key *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (rp->num_keys <= hdev->stored_num_keys) - hdev->stored_num_keys -= rp->num_keys; + hdev->stored_num_keys -= le16_to_cpu(rp->num_keys); else hdev->stored_num_keys = 0; + + return rp->status; } -static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_set_local_name_complete(hdev, sent, status); - else if (!status) + mgmt_set_local_name_complete(hdev, sent, rp->status); + else if (!rp->status) memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_name *rp = (void *) skb->data; + struct hci_rp_read_local_name *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); + + return rp->status; } -static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); - if (!status) { + if (!rp->status) { __u8 param = *((__u8 *) sent); if (param == AUTH_ENABLED) @@ -335,25 +407,28 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) } if (hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_auth_enable_complete(hdev, status); + mgmt_auth_enable_complete(hdev, rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; __u8 param; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); if (!sent) - return; + return rp->status; param = *((__u8 *) sent); @@ -361,25 +436,28 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) set_bit(HCI_ENCRYPT, &hdev->flags); else clear_bit(HCI_ENCRYPT, &hdev->flags); + + return rp->status; } -static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; __u8 param; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); if (!sent) - return; + return rp->status; param = *((__u8 *) sent); hci_dev_lock(hdev); - if (status) { + if (rp->status) { hdev->discov_timeout = 0; goto done; } @@ -396,22 +474,25 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) done: hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *)skb->data); + struct hci_ev_status *rp = data; struct hci_cp_set_event_filter *cp; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); if (!sent) - return; + return rp->status; cp = (struct hci_cp_set_event_filter *)sent; @@ -419,133 +500,149 @@ static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); else hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); + + return rp->status; } -static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_class_of_dev *rp = (void *) skb->data; + struct hci_rp_read_class_of_dev *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; memcpy(hdev->dev_class, rp->dev_class, 3); - BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, - hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); + bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], + hdev->dev_class[1], hdev->dev_class[0]); + + return rp->status; } -static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); - if (status == 0) + if (!rp->status) memcpy(hdev->dev_class, sent, 3); if (hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_set_class_of_dev_complete(hdev, sent, status); + mgmt_set_class_of_dev_complete(hdev, sent, rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_voice_setting *rp = (void *) skb->data; + struct hci_rp_read_voice_setting *rp = data; __u16 setting; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; setting = __le16_to_cpu(rp->voice_setting); if (hdev->voice_setting == setting) - return; + return rp->status; hdev->voice_setting = setting; - BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); + bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + + return rp->status; } -static void hci_cc_write_voice_setting(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; __u16 setting; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); if (!sent) - return; + return rp->status; setting = get_unaligned_le16(sent); if (hdev->voice_setting == setting) - return; + return rp->status; hdev->voice_setting = setting; - BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); + bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + + return rp->status; } -static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; + struct hci_rp_read_num_supported_iac *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->num_iac = rp->num_iac; - BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); + bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); + + return rp->status; } -static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; struct hci_cp_write_ssp_mode *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); - if (!status) { + if (!rp->status) { if (sent->mode) hdev->features[1][0] |= LMP_HOST_SSP; else hdev->features[1][0] &= ~LMP_HOST_SSP; } - if (!status) { + if (!rp->status) { if (sent->mode) hci_dev_set_flag(hdev, HCI_SSP_ENABLED); else @@ -553,29 +650,32 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) } hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - u8 status = *((u8 *) skb->data); + struct hci_ev_status *rp = data; struct hci_cp_write_sc_support *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); - if (!status) { + if (!rp->status) { if (sent->support) hdev->features[1][0] |= LMP_HOST_SC; else hdev->features[1][0] &= ~LMP_HOST_SC; } - if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { + if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { if (sent->support) hci_dev_set_flag(hdev, HCI_SC_ENABLED); else @@ -583,16 +683,19 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) } hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_version *rp = (void *) skb->data; + struct hci_rp_read_local_version *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) { @@ -602,33 +705,37 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) hdev->manufacturer = __le16_to_cpu(rp->manufacturer); hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); } + + return rp->status; } -static void hci_cc_read_local_commands(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_commands *rp = (void *) skb->data; + struct hci_rp_read_local_commands *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); + + return rp->status; } -static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_auth_payload_to *rp = (void *)skb->data; + struct hci_rp_read_auth_payload_to *rp = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hci_dev_lock(hdev); @@ -637,23 +744,25 @@ static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_write_auth_payload_to *rp = (void *)skb->data; + struct hci_rp_write_auth_payload_to *rp = data; struct hci_conn *conn; void *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -662,17 +771,19 @@ static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, conn->auth_payload_timeout = get_unaligned_le16(sent + 2); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_local_features(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_features *rp = (void *) skb->data; + struct hci_rp_read_local_features *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; memcpy(hdev->features, rp->features, 8); @@ -712,46 +823,53 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, if (hdev->features[0][5] & LMP_EDR_3S_ESCO) hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); + + return rp->status; } -static void hci_cc_read_local_ext_features(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_ext_features *rp = (void *) skb->data; + struct hci_rp_read_local_ext_features *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (hdev->max_page < rp->max_page) hdev->max_page = rp->max_page; if (rp->page < HCI_MAX_PAGES) memcpy(hdev->features[rp->page], rp->features, 8); + + return rp->status; } -static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; + struct hci_rp_read_flow_control_mode *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->flow_ctl_mode = rp->mode; + + return rp->status; } -static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_buffer_size *rp = (void *) skb->data; + struct hci_rp_read_buffer_size *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); hdev->sco_mtu = rp->sco_mtu; @@ -768,115 +886,130 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); + + return rp->status; } -static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_bd_addr *rp = (void *) skb->data; + struct hci_rp_read_bd_addr *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) bacpy(&hdev->bdaddr, &rp->bdaddr); if (hci_dev_test_flag(hdev, HCI_SETUP)) bacpy(&hdev->setup_addr, &rp->bdaddr); + + return rp->status; } -static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data; + struct hci_rp_read_local_pairing_opts *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) { hdev->pairing_opts = rp->pairing_opts; hdev->max_enc_key_size = rp->max_key_size; } + + return rp->status; } -static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; + struct hci_rp_read_page_scan_activity *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) { hdev->page_scan_interval = __le16_to_cpu(rp->interval); hdev->page_scan_window = __le16_to_cpu(rp->window); } + + return rp->status; } -static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - u8 status = *((u8 *) skb->data); + struct hci_ev_status *rp = data; struct hci_cp_write_page_scan_activity *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); if (!sent) - return; + return rp->status; hdev->page_scan_interval = __le16_to_cpu(sent->interval); hdev->page_scan_window = __le16_to_cpu(sent->window); + + return rp->status; } -static void hci_cc_read_page_scan_type(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_page_scan_type *rp = (void *) skb->data; + struct hci_rp_read_page_scan_type *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) hdev->page_scan_type = rp->type; + + return rp->status; } -static void hci_cc_write_page_scan_type(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - u8 status = *((u8 *) skb->data); + struct hci_ev_status *rp = data; u8 *type; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); if (type) hdev->page_scan_type = *type; + + return rp->status; } -static void hci_cc_read_data_block_size(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_data_block_size *rp = (void *) skb->data; + struct hci_rp_read_data_block_size *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); hdev->block_len = __le16_to_cpu(rp->block_len); @@ -886,21 +1019,21 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev, BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, hdev->block_cnt, hdev->block_len); + + return rp->status; } -static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_clock *rp = (void *) skb->data; + struct hci_rp_read_clock *rp = data; struct hci_cp_read_clock *cp; struct hci_conn *conn; - BT_DBG("%s", hdev->name); - - if (skb->len < sizeof(*rp)) - return; + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hci_dev_lock(hdev); @@ -921,17 +1054,18 @@ static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) unlock: hci_dev_unlock(hdev); + return rp->status; } -static void hci_cc_read_local_amp_info(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_amp_info *rp = (void *) skb->data; + struct hci_rp_read_local_amp_info *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->amp_status = rp->amp_status; hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); @@ -943,59 +1077,68 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); + + return rp->status; } -static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; + struct hci_rp_read_inq_rsp_tx_power *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->inq_tx_power = rp->tx_power; + + return rp->status; } -static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data; + struct hci_rp_read_def_err_data_reporting *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->err_data_reporting = rp->err_data_reporting; + + return rp->status; } -static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *)skb->data); + struct hci_ev_status *rp = data; struct hci_cp_write_def_err_data_reporting *cp; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); if (!cp) - return; + return rp->status; hdev->err_data_reporting = cp->err_data_reporting; + + return rp->status; } -static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_pin_code_reply *rp = (void *) skb->data; + struct hci_rp_pin_code_reply *rp = data; struct hci_cp_pin_code_reply *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); @@ -1015,13 +1158,15 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) unlock: hci_dev_unlock(hdev); + return rp->status; } -static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; + struct hci_rp_pin_code_neg_reply *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); @@ -1030,17 +1175,19 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; + struct hci_rp_le_read_buffer_size *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->le_mtu = __le16_to_cpu(rp->le_mtu); hdev->le_pkts = rp->le_max_pkt; @@ -1048,39 +1195,46 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, hdev->le_cnt = hdev->le_pkts; BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); + + return rp->status; } -static void hci_cc_le_read_local_features(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_local_features *rp = (void *) skb->data; + struct hci_rp_le_read_local_features *rp = data; BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); if (rp->status) - return; + return rp->status; memcpy(hdev->le_features, rp->features, 8); + + return rp->status; } -static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; + struct hci_rp_le_read_adv_tx_power *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->adv_tx_power = rp->tx_power; + + return rp->status; } -static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_user_confirm_reply *rp = (void *) skb->data; + struct hci_rp_user_confirm_reply *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); @@ -1089,14 +1243,16 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_user_confirm_reply *rp = (void *) skb->data; + struct hci_rp_user_confirm_reply *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); @@ -1105,13 +1261,16 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_user_confirm_reply *rp = (void *) skb->data; + struct hci_rp_user_confirm_reply *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); @@ -1120,14 +1279,16 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 0, rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_user_confirm_reply *rp = (void *) skb->data; + struct hci_rp_user_confirm_reply *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); @@ -1136,37 +1297,44 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_local_oob_data(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_oob_data *rp = (void *) skb->data; + struct hci_rp_read_local_oob_data *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + return rp->status; } -static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; + struct hci_rp_read_local_oob_ext_data *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + return rp->status; } -static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; bdaddr_t *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -1179,21 +1347,24 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) } hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; struct hci_cp_le_set_default_phy *cp; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); if (!cp) - return; + return rp->status; hci_dev_lock(hdev); @@ -1201,17 +1372,21 @@ static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) hdev->le_rx_def_phys = cp->rx_phys; hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; struct hci_cp_le_set_adv_set_rand_addr *cp; struct adv_info *adv; - if (status) - return; + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); /* Update only in case the adv instance since handle 0x00 shall be using @@ -1219,7 +1394,7 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, * non-extended adverting. */ if (!cp || !cp->handle) - return; + return rp->status; hci_dev_lock(hdev); @@ -1235,20 +1410,25 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, } hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *)skb->data); + struct hci_ev_status *rp = data; u8 *instance; int err; - if (status) - return; + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); if (!instance) - return; + return rp->status; hci_dev_lock(hdev); @@ -1258,19 +1438,24 @@ static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb) *instance); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *)skb->data); + struct hci_ev_status *rp = data; struct adv_info *adv, *n; int err; - if (status) - return; + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) - return; + return rp->status; hci_dev_lock(hdev); @@ -1284,34 +1469,67 @@ static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb) } hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_transmit_power *rp = (void *)skb->data; + struct hci_rp_le_read_transmit_power *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->min_le_tx_power = rp->min_le_tx_power; hdev->max_le_tx_power = rp->max_le_tx_power; + + return rp->status; } -static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 *sent, status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; + struct hci_cp_le_set_privacy_mode *cp; + struct hci_conn_params *params; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); + if (params) + params->privacy_mode = cp->mode; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -1333,24 +1551,26 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) } hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; - __u8 status = *((__u8 *) skb->data); struct adv_info *adv = NULL, *n; + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); if (!cp) - return; + return rp->status; set = (void *)cp->data; @@ -1397,44 +1617,48 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, unlock: hci_dev_unlock(hdev); + return rp->status; } -static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_set_scan_param *cp; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); if (!cp) - return; + return rp->status; hci_dev_lock(hdev); hdev->le_scan_type = cp->type; hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_set_ext_scan_params *cp; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; struct hci_cp_le_scan_phy_params *phy_param; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); if (!cp) - return; + return rp->status; phy_param = (void *)cp->data; @@ -1443,6 +1667,8 @@ static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, hdev->le_scan_type = phy_param->type; hci_dev_unlock(hdev); + + return rp->status; } static bool has_pending_adv_report(struct hci_dev *hdev) @@ -1528,244 +1754,273 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) hci_dev_unlock(hdev); } -static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_set_scan_enable *cp; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); if (!cp) - return; + return rp->status; le_set_scan_enable_complete(hdev, cp->enable); + + return rp->status; } -static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_set_ext_scan_enable *cp; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); if (!cp) - return; + return rp->status; le_set_scan_enable_complete(hdev, cp->enable); + + return rp->status; } -static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, +static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; + struct hci_rp_le_read_num_supported_adv_sets *rp = data; - BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, - rp->num_of_sets); + bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, + rp->num_of_sets); if (rp->status) - return; + return rp->status; hdev->le_num_of_adv_sets = rp->num_of_sets; + + return rp->status; } -static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data; + struct hci_rp_le_read_accept_list_size *rp = data; - BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); + bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); if (rp->status) - return; + return rp->status; hdev->le_accept_list_size = rp->size; + + return rp->status; } -static void hci_cc_le_clear_accept_list(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; hci_bdaddr_list_clear(&hdev->le_accept_list); + + return rp->status; } -static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_add_to_accept_list *sent; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); if (!sent) - return; + return rp->status; hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); + + return rp->status; } -static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_del_from_accept_list *sent; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); if (!sent) - return; + return rp->status; hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); + + return rp->status; } -static void hci_cc_le_read_supported_states(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_supported_states *rp = (void *) skb->data; + struct hci_rp_le_read_supported_states *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; memcpy(hdev->le_states, rp->le_states, 8); + + return rp->status; } -static void hci_cc_le_read_def_data_len(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_def_data_len *rp = (void *) skb->data; + struct hci_rp_le_read_def_data_len *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); + + return rp->status; } -static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_write_def_data_len *sent; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); if (!sent) - return; + return rp->status; hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); + + return rp->status; } -static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_add_to_resolv_list *sent; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); if (!sent) - return; + return rp->status; hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type, sent->peer_irk, sent->local_irk); + + return rp->status; } -static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_del_from_resolv_list *sent; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); if (!sent) - return; + return rp->status; hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type); + + return rp->status; } -static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; hci_bdaddr_list_clear(&hdev->le_resolv_list); + + return rp->status; } -static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; + struct hci_rp_le_read_resolv_list_size *rp = data; - BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); + bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); if (rp->status) - return; + return rp->status; hdev->le_resolv_list_size = rp->size; + + return rp->status; } -static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 *sent, status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; + __u8 *sent; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -1775,38 +2030,42 @@ static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_read_max_data_len *rp = (void *) skb->data; + struct hci_rp_le_read_max_data_len *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); + + return rp->status; } -static void hci_cc_write_le_host_supported(struct hci_dev *hdev, - struct sk_buff *skb) +static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_write_le_host_supported *sent; - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -1825,41 +2084,47 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_le_set_adv_param *cp; - u8 status = *((u8 *) skb->data); + struct hci_ev_status *rp = data; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); if (!cp) - return; + return rp->status; hci_dev_lock(hdev); hdev->adv_addr_type = cp->own_address_type; hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; + struct hci_rp_le_set_ext_adv_params *rp = data; struct hci_cp_le_set_ext_adv_params *cp; struct adv_info *adv_instance; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); if (!cp) - return; + return rp->status; hci_dev_lock(hdev); hdev->adv_addr_type = cp->own_addr_type; @@ -1875,17 +2140,20 @@ static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) hci_req_update_adv_data(hdev, cp->handle); hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_rp_read_rssi *rp = (void *) skb->data; + struct hci_rp_read_rssi *rp = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; hci_dev_lock(hdev); @@ -1894,22 +2162,25 @@ static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) conn->rssi = rp->rssi; hci_dev_unlock(hdev); + + return rp->status; } -static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { struct hci_cp_read_tx_power *sent; - struct hci_rp_read_tx_power *rp = (void *) skb->data; + struct hci_rp_read_tx_power *rp = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) - return; + return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); if (!sent) - return; + return rp->status; hci_dev_lock(hdev); @@ -1928,26 +2199,30 @@ static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) unlock: hci_dev_unlock(hdev); + return rp->status; } -static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb) +static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - u8 status = *((u8 *) skb->data); + struct hci_ev_status *rp = data; u8 *mode; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); - if (status) - return; + if (rp->status) + return rp->status; mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); if (mode) hdev->ssp_debug_mode = *mode; + + return rp->status; } static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (status) { hci_conn_check_pending(hdev); @@ -1962,7 +2237,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) struct hci_cp_create_conn *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); if (!cp) @@ -1972,7 +2247,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); - BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); + bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); if (status) { if (conn && conn->state == BT_CONNECT) { @@ -2001,7 +2276,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) struct hci_conn *acl, *sco; __u16 handle; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2012,7 +2287,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) handle = __le16_to_cpu(cp->handle); - BT_DBG("%s handle 0x%4.4x", hdev->name, handle); + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); hci_dev_lock(hdev); @@ -2035,7 +2310,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) struct hci_cp_auth_requested *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2062,7 +2337,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) struct hci_cp_set_conn_encrypt *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2129,6 +2404,12 @@ static bool hci_resolve_next_name(struct hci_dev *hdev) if (list_empty(&discov->resolve)) return false; + /* We should stop if we already spent too much time resolving names. */ + if (time_after(jiffies, discov->name_resolve_timeout)) { + bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); + return false; + } + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (!e) return false; @@ -2175,13 +2456,10 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, return; list_del(&e->list); - if (name) { - e->name_state = NAME_KNOWN; - mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, - e->data.rssi, name, name_len); - } else { - e->name_state = NAME_NOT_KNOWN; - } + + e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; + mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, + name, name_len); if (hci_resolve_next_name(hdev)) return; @@ -2195,7 +2473,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) struct hci_cp_remote_name_req *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); /* If successful wait for the name req complete event before * checking for the need to do authentication */ @@ -2238,7 +2516,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) struct hci_cp_read_remote_features *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2265,7 +2543,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) struct hci_cp_read_remote_ext_features *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2293,7 +2571,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) struct hci_conn *acl, *sco; __u16 handle; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2304,7 +2582,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) handle = __le16_to_cpu(cp->handle); - BT_DBG("%s handle 0x%4.4x", hdev->name, handle); + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); hci_dev_lock(hdev); @@ -2362,7 +2640,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) struct hci_cp_sniff_mode *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2389,7 +2667,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) struct hci_cp_exit_sniff_mode *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2418,6 +2696,8 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) struct hci_conn *conn; bool mgmt_conn; + bt_dev_dbg(hdev, "status 0x%2.2x", status); + /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended * otherwise cleanup the connection immediately. */ @@ -2551,7 +2831,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_create_conn *cp; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the * hci_le_conn_failed function which is triggered by the HCI @@ -2576,7 +2856,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_ext_create_conn *cp; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the * hci_le_conn_failed function which is triggered by the HCI @@ -2602,7 +2882,7 @@ static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) struct hci_cp_le_read_remote_features *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2629,7 +2909,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) struct hci_cp_le_start_enc *cp; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; @@ -2677,13 +2957,14 @@ static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) hci_dev_unlock(hdev); } -static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + struct hci_ev_status *ev = data; struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; - BT_DBG("%s status 0x%2.2x", hdev->name, status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_conn_check_pending(hdev); @@ -2719,6 +3000,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (e && hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); + discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; } else { /* When BR/EDR inquiry is active and no LE scanning is in * progress, then change discovery state to indicate completion. @@ -2736,15 +3018,20 @@ unlock: hci_dev_unlock(hdev); } -static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, + struct sk_buff *skb) { + struct hci_ev_inquiry_result *ev = edata; struct inquiry_data data; - struct inquiry_info *info = (void *) (skb->data + 1); - int num_rsp = *((__u8 *) skb->data); + int i; + + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, + flex_array_size(ev, info, ev->num))) + return; - BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + bt_dev_dbg(hdev, "num %d", ev->num); - if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) + if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -2752,7 +3039,8 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - for (; num_rsp; num_rsp--, info++) { + for (i = 0; i < ev->num; i++) { + struct inquiry_info *info = &ev->info[i]; u32 flags; bacpy(&data.bdaddr, &info->bdaddr); @@ -2774,12 +3062,13 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_conn_complete *ev = (void *) skb->data; + struct hci_ev_conn_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -2898,16 +3187,16 @@ static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); } -static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_conn_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_conn_request *ev = (void *) skb->data; + struct hci_ev_conn_request *ev = data; int mask = hdev->link_mode; struct inquiry_entry *ie; struct hci_conn *conn; __u8 flags = 0; - BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, - ev->link_type); + bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, &flags); @@ -3009,15 +3298,16 @@ static u8 hci_to_mgmt_reason(u8 err) } } -static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_disconn_complete *ev = (void *) skb->data; + struct hci_ev_disconn_complete *ev = data; u8 reason; struct hci_conn_params *params; struct hci_conn *conn; bool mgmt_connected; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -3093,12 +3383,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_auth_complete *ev = (void *) skb->data; + struct hci_ev_auth_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -3163,12 +3454,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_remote_name_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_remote_name *ev = (void *) skb->data; + struct hci_ev_remote_name *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_conn_check_pending(hdev); @@ -3246,12 +3538,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_encrypt_change *ev = (void *) skb->data; + struct hci_ev_encrypt_change *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -3360,13 +3653,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_change_link_key_complete_evt(struct hci_dev *hdev, +static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_change_link_key_complete *ev = (void *) skb->data; + struct hci_ev_change_link_key_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -3383,13 +3676,13 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_features_evt(struct hci_dev *hdev, +static void hci_remote_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_remote_features *ev = (void *) skb->data; + struct hci_ev_remote_features *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -3447,374 +3740,227 @@ static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) } } -static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, - u16 *opcode, u8 *status, +#define HCI_CC_VL(_op, _func, _min, _max) \ +{ \ + .op = _op, \ + .func = _func, \ + .min_len = _min, \ + .max_len = _max, \ +} + +#define HCI_CC(_op, _func, _len) \ + HCI_CC_VL(_op, _func, _len, _len) + +#define HCI_CC_STATUS(_op, _func) \ + HCI_CC(_op, _func, sizeof(struct hci_ev_status)) + +static const struct hci_cc { + u16 op; + u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); + u16 min_len; + u16 max_len; +} hci_cc_table[] = { + HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), + HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), + HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), + HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, + hci_cc_remote_name_req_cancel), + HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, + sizeof(struct hci_rp_role_discovery)), + HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, + sizeof(struct hci_rp_read_link_policy)), + HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, + sizeof(struct hci_rp_write_link_policy)), + HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, + sizeof(struct hci_rp_read_def_link_policy)), + HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, + hci_cc_write_def_link_policy), + HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), + HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, + sizeof(struct hci_rp_read_stored_link_key)), + HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, + sizeof(struct hci_rp_delete_stored_link_key)), + HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), + HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, + sizeof(struct hci_rp_read_local_name)), + HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), + HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), + HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), + HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), + HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, + sizeof(struct hci_rp_read_class_of_dev)), + HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), + HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, + sizeof(struct hci_rp_read_voice_setting)), + HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), + HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, + sizeof(struct hci_rp_read_num_supported_iac)), + HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), + HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), + HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, + sizeof(struct hci_rp_read_auth_payload_to)), + HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, + sizeof(struct hci_rp_write_auth_payload_to)), + HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, + sizeof(struct hci_rp_read_local_version)), + HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, + sizeof(struct hci_rp_read_local_commands)), + HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, + sizeof(struct hci_rp_read_local_features)), + HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, + sizeof(struct hci_rp_read_local_ext_features)), + HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, + sizeof(struct hci_rp_read_buffer_size)), + HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, + sizeof(struct hci_rp_read_bd_addr)), + HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, + sizeof(struct hci_rp_read_local_pairing_opts)), + HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, + sizeof(struct hci_rp_read_page_scan_activity)), + HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, + hci_cc_write_page_scan_activity), + HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, + sizeof(struct hci_rp_read_page_scan_type)), + HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), + HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, + sizeof(struct hci_rp_read_data_block_size)), + HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, + sizeof(struct hci_rp_read_flow_control_mode)), + HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, + sizeof(struct hci_rp_read_local_amp_info)), + HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, + sizeof(struct hci_rp_read_clock)), + HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, + sizeof(struct hci_rp_read_inq_rsp_tx_power)), + HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, + hci_cc_read_def_err_data_reporting, + sizeof(struct hci_rp_read_def_err_data_reporting)), + HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, + hci_cc_write_def_err_data_reporting), + HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, + sizeof(struct hci_rp_pin_code_reply)), + HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, + sizeof(struct hci_rp_pin_code_neg_reply)), + HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, + sizeof(struct hci_rp_read_local_oob_data)), + HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, + sizeof(struct hci_rp_read_local_oob_ext_data)), + HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, + sizeof(struct hci_rp_le_read_buffer_size)), + HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, + sizeof(struct hci_rp_le_read_local_features)), + HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, + sizeof(struct hci_rp_le_read_adv_tx_power)), + HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), + HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), + HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, + hci_cc_le_read_accept_list_size, + sizeof(struct hci_rp_le_read_accept_list_size)), + HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), + HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, + hci_cc_le_add_to_accept_list), + HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, + hci_cc_le_del_from_accept_list), + HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, + sizeof(struct hci_rp_le_read_supported_states)), + HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, + sizeof(struct hci_rp_le_read_def_data_len)), + HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, + hci_cc_le_write_def_data_len), + HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, + hci_cc_le_add_to_resolv_list), + HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, + hci_cc_le_del_from_resolv_list), + HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, + hci_cc_le_clear_resolv_list), + HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, + sizeof(struct hci_rp_le_read_resolv_list_size)), + HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, + hci_cc_le_set_addr_resolution_enable), + HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, + sizeof(struct hci_rp_le_read_max_data_len)), + HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, + hci_cc_write_le_host_supported), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), + HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, + sizeof(struct hci_rp_read_rssi)), + HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, + sizeof(struct hci_rp_read_tx_power)), + HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, + hci_cc_le_set_ext_scan_param), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, + hci_cc_le_set_ext_scan_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), + HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + hci_cc_le_read_num_adv_sets, + sizeof(struct hci_rp_le_read_num_supported_adv_sets)), + HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, + sizeof(struct hci_rp_le_set_ext_adv_params)), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, + hci_cc_le_set_ext_adv_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + hci_cc_le_set_adv_set_random_addr), + HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), + HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), + HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, + sizeof(struct hci_rp_le_read_transmit_power)), + HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode) +}; + +static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, + struct sk_buff *skb) +{ + void *data; + + if (skb->len < cc->min_len) { + bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", + cc->op, skb->len, cc->min_len); + return HCI_ERROR_UNSPECIFIED; + } + + /* Just warn if the length is over max_len size it still be possible to + * partially parse the cc so leave to callback to decide if that is + * acceptable. + */ + if (skb->len > cc->max_len) + bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", + cc->op, skb->len, cc->max_len); + + data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); + if (!data) + return HCI_ERROR_UNSPECIFIED; + + return cc->func(hdev, data, skb); +} + +static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { - struct hci_ev_cmd_complete *ev = (void *) skb->data; + struct hci_ev_cmd_complete *ev = data; + int i; *opcode = __le16_to_cpu(ev->opcode); - *status = skb->data[sizeof(*ev)]; - - skb_pull(skb, sizeof(*ev)); - - switch (*opcode) { - case HCI_OP_INQUIRY_CANCEL: - hci_cc_inquiry_cancel(hdev, skb, status); - break; - - case HCI_OP_PERIODIC_INQ: - hci_cc_periodic_inq(hdev, skb); - break; - - case HCI_OP_EXIT_PERIODIC_INQ: - hci_cc_exit_periodic_inq(hdev, skb); - break; - - case HCI_OP_REMOTE_NAME_REQ_CANCEL: - hci_cc_remote_name_req_cancel(hdev, skb); - break; - - case HCI_OP_ROLE_DISCOVERY: - hci_cc_role_discovery(hdev, skb); - break; - - case HCI_OP_READ_LINK_POLICY: - hci_cc_read_link_policy(hdev, skb); - break; - - case HCI_OP_WRITE_LINK_POLICY: - hci_cc_write_link_policy(hdev, skb); - break; - - case HCI_OP_READ_DEF_LINK_POLICY: - hci_cc_read_def_link_policy(hdev, skb); - break; - - case HCI_OP_WRITE_DEF_LINK_POLICY: - hci_cc_write_def_link_policy(hdev, skb); - break; - - case HCI_OP_RESET: - hci_cc_reset(hdev, skb); - break; - - case HCI_OP_READ_STORED_LINK_KEY: - hci_cc_read_stored_link_key(hdev, skb); - break; - - case HCI_OP_DELETE_STORED_LINK_KEY: - hci_cc_delete_stored_link_key(hdev, skb); - break; - - case HCI_OP_WRITE_LOCAL_NAME: - hci_cc_write_local_name(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_NAME: - hci_cc_read_local_name(hdev, skb); - break; - - case HCI_OP_WRITE_AUTH_ENABLE: - hci_cc_write_auth_enable(hdev, skb); - break; - - case HCI_OP_WRITE_ENCRYPT_MODE: - hci_cc_write_encrypt_mode(hdev, skb); - break; - - case HCI_OP_WRITE_SCAN_ENABLE: - hci_cc_write_scan_enable(hdev, skb); - break; - - case HCI_OP_SET_EVENT_FLT: - hci_cc_set_event_filter(hdev, skb); - break; - - case HCI_OP_READ_CLASS_OF_DEV: - hci_cc_read_class_of_dev(hdev, skb); - break; - - case HCI_OP_WRITE_CLASS_OF_DEV: - hci_cc_write_class_of_dev(hdev, skb); - break; - - case HCI_OP_READ_VOICE_SETTING: - hci_cc_read_voice_setting(hdev, skb); - break; - - case HCI_OP_WRITE_VOICE_SETTING: - hci_cc_write_voice_setting(hdev, skb); - break; - - case HCI_OP_READ_NUM_SUPPORTED_IAC: - hci_cc_read_num_supported_iac(hdev, skb); - break; - - case HCI_OP_WRITE_SSP_MODE: - hci_cc_write_ssp_mode(hdev, skb); - break; - - case HCI_OP_WRITE_SC_SUPPORT: - hci_cc_write_sc_support(hdev, skb); - break; - - case HCI_OP_READ_AUTH_PAYLOAD_TO: - hci_cc_read_auth_payload_timeout(hdev, skb); - break; - - case HCI_OP_WRITE_AUTH_PAYLOAD_TO: - hci_cc_write_auth_payload_timeout(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_VERSION: - hci_cc_read_local_version(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_COMMANDS: - hci_cc_read_local_commands(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_FEATURES: - hci_cc_read_local_features(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_EXT_FEATURES: - hci_cc_read_local_ext_features(hdev, skb); - break; - - case HCI_OP_READ_BUFFER_SIZE: - hci_cc_read_buffer_size(hdev, skb); - break; - - case HCI_OP_READ_BD_ADDR: - hci_cc_read_bd_addr(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_PAIRING_OPTS: - hci_cc_read_local_pairing_opts(hdev, skb); - break; - - case HCI_OP_READ_PAGE_SCAN_ACTIVITY: - hci_cc_read_page_scan_activity(hdev, skb); - break; - - case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: - hci_cc_write_page_scan_activity(hdev, skb); - break; - - case HCI_OP_READ_PAGE_SCAN_TYPE: - hci_cc_read_page_scan_type(hdev, skb); - break; - - case HCI_OP_WRITE_PAGE_SCAN_TYPE: - hci_cc_write_page_scan_type(hdev, skb); - break; - - case HCI_OP_READ_DATA_BLOCK_SIZE: - hci_cc_read_data_block_size(hdev, skb); - break; - - case HCI_OP_READ_FLOW_CONTROL_MODE: - hci_cc_read_flow_control_mode(hdev, skb); - break; - case HCI_OP_READ_LOCAL_AMP_INFO: - hci_cc_read_local_amp_info(hdev, skb); - break; - - case HCI_OP_READ_CLOCK: - hci_cc_read_clock(hdev, skb); - break; - - case HCI_OP_READ_INQ_RSP_TX_POWER: - hci_cc_read_inq_rsp_tx_power(hdev, skb); - break; - - case HCI_OP_READ_DEF_ERR_DATA_REPORTING: - hci_cc_read_def_err_data_reporting(hdev, skb); - break; - - case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING: - hci_cc_write_def_err_data_reporting(hdev, skb); - break; - - case HCI_OP_PIN_CODE_REPLY: - hci_cc_pin_code_reply(hdev, skb); - break; - - case HCI_OP_PIN_CODE_NEG_REPLY: - hci_cc_pin_code_neg_reply(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_OOB_DATA: - hci_cc_read_local_oob_data(hdev, skb); - break; - - case HCI_OP_READ_LOCAL_OOB_EXT_DATA: - hci_cc_read_local_oob_ext_data(hdev, skb); - break; - - case HCI_OP_LE_READ_BUFFER_SIZE: - hci_cc_le_read_buffer_size(hdev, skb); - break; - - case HCI_OP_LE_READ_LOCAL_FEATURES: - hci_cc_le_read_local_features(hdev, skb); - break; - - case HCI_OP_LE_READ_ADV_TX_POWER: - hci_cc_le_read_adv_tx_power(hdev, skb); - break; - - case HCI_OP_USER_CONFIRM_REPLY: - hci_cc_user_confirm_reply(hdev, skb); - break; - - case HCI_OP_USER_CONFIRM_NEG_REPLY: - hci_cc_user_confirm_neg_reply(hdev, skb); - break; - - case HCI_OP_USER_PASSKEY_REPLY: - hci_cc_user_passkey_reply(hdev, skb); - break; - - case HCI_OP_USER_PASSKEY_NEG_REPLY: - hci_cc_user_passkey_neg_reply(hdev, skb); - break; - - case HCI_OP_LE_SET_RANDOM_ADDR: - hci_cc_le_set_random_addr(hdev, skb); - break; - - case HCI_OP_LE_SET_ADV_ENABLE: - hci_cc_le_set_adv_enable(hdev, skb); - break; - - case HCI_OP_LE_SET_SCAN_PARAM: - hci_cc_le_set_scan_param(hdev, skb); - break; - - case HCI_OP_LE_SET_SCAN_ENABLE: - hci_cc_le_set_scan_enable(hdev, skb); - break; - - case HCI_OP_LE_READ_ACCEPT_LIST_SIZE: - hci_cc_le_read_accept_list_size(hdev, skb); - break; - - case HCI_OP_LE_CLEAR_ACCEPT_LIST: - hci_cc_le_clear_accept_list(hdev, skb); - break; - - case HCI_OP_LE_ADD_TO_ACCEPT_LIST: - hci_cc_le_add_to_accept_list(hdev, skb); - break; - - case HCI_OP_LE_DEL_FROM_ACCEPT_LIST: - hci_cc_le_del_from_accept_list(hdev, skb); - break; - - case HCI_OP_LE_READ_SUPPORTED_STATES: - hci_cc_le_read_supported_states(hdev, skb); - break; - - case HCI_OP_LE_READ_DEF_DATA_LEN: - hci_cc_le_read_def_data_len(hdev, skb); - break; - - case HCI_OP_LE_WRITE_DEF_DATA_LEN: - hci_cc_le_write_def_data_len(hdev, skb); - break; - - case HCI_OP_LE_ADD_TO_RESOLV_LIST: - hci_cc_le_add_to_resolv_list(hdev, skb); - break; - - case HCI_OP_LE_DEL_FROM_RESOLV_LIST: - hci_cc_le_del_from_resolv_list(hdev, skb); - break; - - case HCI_OP_LE_CLEAR_RESOLV_LIST: - hci_cc_le_clear_resolv_list(hdev, skb); - break; - - case HCI_OP_LE_READ_RESOLV_LIST_SIZE: - hci_cc_le_read_resolv_list_size(hdev, skb); - break; - - case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE: - hci_cc_le_set_addr_resolution_enable(hdev, skb); - break; - - case HCI_OP_LE_READ_MAX_DATA_LEN: - hci_cc_le_read_max_data_len(hdev, skb); - break; - - case HCI_OP_WRITE_LE_HOST_SUPPORTED: - hci_cc_write_le_host_supported(hdev, skb); - break; - - case HCI_OP_LE_SET_ADV_PARAM: - hci_cc_set_adv_param(hdev, skb); - break; - - case HCI_OP_READ_RSSI: - hci_cc_read_rssi(hdev, skb); - break; - - case HCI_OP_READ_TX_POWER: - hci_cc_read_tx_power(hdev, skb); - break; - - case HCI_OP_WRITE_SSP_DEBUG_MODE: - hci_cc_write_ssp_debug_mode(hdev, skb); - break; - - case HCI_OP_LE_SET_EXT_SCAN_PARAMS: - hci_cc_le_set_ext_scan_param(hdev, skb); - break; - - case HCI_OP_LE_SET_EXT_SCAN_ENABLE: - hci_cc_le_set_ext_scan_enable(hdev, skb); - break; - - case HCI_OP_LE_SET_DEFAULT_PHY: - hci_cc_le_set_default_phy(hdev, skb); - break; - - case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: - hci_cc_le_read_num_adv_sets(hdev, skb); - break; - - case HCI_OP_LE_SET_EXT_ADV_PARAMS: - hci_cc_set_ext_adv_param(hdev, skb); - break; - - case HCI_OP_LE_SET_EXT_ADV_ENABLE: - hci_cc_le_set_ext_adv_enable(hdev, skb); - break; - - case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: - hci_cc_le_set_adv_set_random_addr(hdev, skb); - break; - - case HCI_OP_LE_REMOVE_ADV_SET: - hci_cc_le_remove_adv_set(hdev, skb); - break; - - case HCI_OP_LE_CLEAR_ADV_SETS: - hci_cc_le_clear_adv_sets(hdev, skb); - break; + bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); - case HCI_OP_LE_READ_TRANSMIT_POWER: - hci_cc_le_read_transmit_power(hdev, skb); - break; - - default: - BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); - break; + for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { + if (hci_cc_table[i].op == *opcode) { + *status = hci_cc_func(hdev, &hci_cc_table[i], skb); + break; + } } handle_cmd_cnt_and_timer(hdev, ev->ncmd); @@ -3832,94 +3978,56 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, queue_work(hdev->workqueue, &hdev->cmd_work); } -static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, - u16 *opcode, u8 *status, +#define HCI_CS(_op, _func) \ +{ \ + .op = _op, \ + .func = _func, \ +} + +static const struct hci_cs { + u16 op; + void (*func)(struct hci_dev *hdev, __u8 status); +} hci_cs_table[] = { + HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), + HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), + HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), + HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), + HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), + HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), + HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), + HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), + HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, + hci_cs_read_remote_ext_features), + HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), + HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, + hci_cs_enhanced_setup_sync_conn), + HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), + HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), + HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), + HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), + HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), + HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), + HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn) +}; + +static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { - struct hci_ev_cmd_status *ev = (void *) skb->data; - - skb_pull(skb, sizeof(*ev)); + struct hci_ev_cmd_status *ev = data; + int i; *opcode = __le16_to_cpu(ev->opcode); *status = ev->status; - switch (*opcode) { - case HCI_OP_INQUIRY: - hci_cs_inquiry(hdev, ev->status); - break; - - case HCI_OP_CREATE_CONN: - hci_cs_create_conn(hdev, ev->status); - break; - - case HCI_OP_DISCONNECT: - hci_cs_disconnect(hdev, ev->status); - break; - - case HCI_OP_ADD_SCO: - hci_cs_add_sco(hdev, ev->status); - break; - - case HCI_OP_AUTH_REQUESTED: - hci_cs_auth_requested(hdev, ev->status); - break; - - case HCI_OP_SET_CONN_ENCRYPT: - hci_cs_set_conn_encrypt(hdev, ev->status); - break; - - case HCI_OP_REMOTE_NAME_REQ: - hci_cs_remote_name_req(hdev, ev->status); - break; - - case HCI_OP_READ_REMOTE_FEATURES: - hci_cs_read_remote_features(hdev, ev->status); - break; - - case HCI_OP_READ_REMOTE_EXT_FEATURES: - hci_cs_read_remote_ext_features(hdev, ev->status); - break; - - case HCI_OP_SETUP_SYNC_CONN: - hci_cs_setup_sync_conn(hdev, ev->status); - break; - - case HCI_OP_ENHANCED_SETUP_SYNC_CONN: - hci_cs_enhanced_setup_sync_conn(hdev, ev->status); - break; - - case HCI_OP_SNIFF_MODE: - hci_cs_sniff_mode(hdev, ev->status); - break; + bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); - case HCI_OP_EXIT_SNIFF_MODE: - hci_cs_exit_sniff_mode(hdev, ev->status); - break; - - case HCI_OP_SWITCH_ROLE: - hci_cs_switch_role(hdev, ev->status); - break; - - case HCI_OP_LE_CREATE_CONN: - hci_cs_le_create_conn(hdev, ev->status); - break; - - case HCI_OP_LE_READ_REMOTE_FEATURES: - hci_cs_le_read_remote_features(hdev, ev->status); - break; - - case HCI_OP_LE_START_ENC: - hci_cs_le_start_enc(hdev, ev->status); - break; - - case HCI_OP_LE_EXT_CREATE_CONN: - hci_cs_le_ext_create_conn(hdev, ev->status); - break; - - default: - BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); - break; + for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { + if (hci_cs_table[i].op == *opcode) { + hci_cs_table[i].func(hdev, ev->status); + break; + } } handle_cmd_cnt_and_timer(hdev, ev->ncmd); @@ -3930,36 +4038,39 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, * (since for this kind of commands there will not be a command * complete event). */ - if (ev->status || - (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event)) + if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, req_complete_skb); - - if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { - bt_dev_err(hdev, - "unexpected event for opcode 0x%4.4x", *opcode); - return; + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { + bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", + *opcode); + return; + } } if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } -static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_hardware_error *ev = (void *) skb->data; + struct hci_ev_hardware_error *ev = data; + + bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); hdev->hw_error_code = ev->code; queue_work(hdev->req_workqueue, &hdev->error_reset); } -static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_role_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_role_change *ev = (void *) skb->data; + struct hci_ev_role_change *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -3976,25 +4087,24 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_num_comp_pkts *ev = (void *) skb->data; + struct hci_ev_num_comp_pkts *ev = data; int i; - if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { - bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, + flex_array_size(ev, handles, ev->num))) return; - } - if (skb->len < sizeof(*ev) || - skb->len < struct_size(ev, handles, ev->num_hndl)) { - BT_DBG("%s bad parameters", hdev->name); + if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { + bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); return; } - BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); + bt_dev_dbg(hdev, "num %d", ev->num); - for (i = 0; i < ev->num_hndl; i++) { + for (i = 0; i < ev->num; i++) { struct hci_comp_pkts_info *info = &ev->handles[i]; struct hci_conn *conn; __u16 handle, count; @@ -4064,24 +4174,24 @@ static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, return NULL; } -static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_num_comp_blocks *ev = (void *) skb->data; + struct hci_ev_num_comp_blocks *ev = data; int i; - if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { - bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, + flex_array_size(ev, handles, ev->num_hndl))) return; - } - if (skb->len < sizeof(*ev) || - skb->len < struct_size(ev, handles, ev->num_hndl)) { - BT_DBG("%s bad parameters", hdev->name); + if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { + bt_dev_err(hdev, "wrong event for mode %d", + hdev->flow_ctl_mode); return; } - BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, - ev->num_hndl); + bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, + ev->num_hndl); for (i = 0; i < ev->num_hndl; i++) { struct hci_comp_blocks_info *info = &ev->handles[i]; @@ -4115,12 +4225,13 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) queue_work(hdev->workqueue, &hdev->tx_work); } -static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_mode_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_mode_change *ev = (void *) skb->data; + struct hci_ev_mode_change *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -4143,12 +4254,13 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_pin_code_req *ev = (void *) skb->data; + struct hci_ev_pin_code_req *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -4213,14 +4325,15 @@ static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) } } -static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_link_key_req *ev = (void *) skb->data; + struct hci_ev_link_key_req *ev = data; struct hci_cp_link_key_reply cp; struct hci_conn *conn; struct link_key *key; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; @@ -4229,13 +4342,11 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) key = hci_find_link_key(hdev, &ev->bdaddr); if (!key) { - BT_DBG("%s link key not found for %pMR", hdev->name, - &ev->bdaddr); + bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); goto not_found; } - BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, - &ev->bdaddr); + bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { @@ -4244,15 +4355,14 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || key->type == HCI_LK_UNAUTH_COMBINATION_P256) && conn->auth_type != 0xff && (conn->auth_type & 0x01)) { - BT_DBG("%s ignoring unauthenticated key", hdev->name); + bt_dev_dbg(hdev, "ignoring unauthenticated key"); goto not_found; } if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && (conn->pending_sec_level == BT_SECURITY_HIGH || conn->pending_sec_level == BT_SECURITY_FIPS)) { - BT_DBG("%s ignoring key unauthenticated for high security", - hdev->name); + bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); goto not_found; } @@ -4273,15 +4383,16 @@ not_found: hci_dev_unlock(hdev); } -static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_link_key_notify *ev = (void *) skb->data; + struct hci_ev_link_key_notify *ev = data; struct hci_conn *conn; struct link_key *key; bool persistent; u8 pin_len = 0; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -4333,12 +4444,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_clock_offset *ev = (void *) skb->data; + struct hci_ev_clock_offset *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -4356,12 +4468,13 @@ static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_pkt_type_change *ev = (void *) skb->data; + struct hci_ev_pkt_type_change *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -4372,12 +4485,13 @@ static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; + struct hci_ev_pscan_rep_mode *ev = data; struct inquiry_entry *ie; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -4390,15 +4504,19 @@ static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, +static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { + union { + struct hci_ev_inquiry_result_rssi *res1; + struct hci_ev_inquiry_result_rssi_pscan *res2; + } *ev = edata; struct inquiry_data data; - int num_rsp = *((__u8 *) skb->data); + int i; - BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + bt_dev_dbg(hdev, "num_rsp %d", ev->res1->num); - if (!num_rsp) + if (!ev->res1->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -4406,16 +4524,21 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, hci_dev_lock(hdev); - if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { - struct inquiry_info_with_rssi_and_pscan_mode *info; - info = (void *) (skb->data + 1); + if (skb->len == flex_array_size(ev, res2->info, ev->res2->num)) { + struct inquiry_info_rssi_pscan *info; - if (skb->len < num_rsp * sizeof(*info) + 1) - goto unlock; - - for (; num_rsp; num_rsp--, info++) { + for (i = 0; i < ev->res2->num; i++) { u32 flags; + info = hci_ev_skb_pull(hdev, skb, + HCI_EV_INQUIRY_RESULT_WITH_RSSI, + sizeof(*info)); + if (!info) { + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", + HCI_EV_INQUIRY_RESULT_WITH_RSSI); + return; + } + bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; @@ -4431,15 +4554,21 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, info->dev_class, info->rssi, flags, NULL, 0, NULL, 0); } - } else { - struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); - - if (skb->len < num_rsp * sizeof(*info) + 1) - goto unlock; + } else if (skb->len == flex_array_size(ev, res1->info, ev->res1->num)) { + struct inquiry_info_rssi *info; - for (; num_rsp; num_rsp--, info++) { + for (i = 0; i < ev->res1->num; i++) { u32 flags; + info = hci_ev_skb_pull(hdev, skb, + HCI_EV_INQUIRY_RESULT_WITH_RSSI, + sizeof(*info)); + if (!info) { + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", + HCI_EV_INQUIRY_RESULT_WITH_RSSI); + return; + } + bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; @@ -4455,19 +4584,21 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, info->dev_class, info->rssi, flags, NULL, 0, NULL, 0); } + } else { + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", + HCI_EV_INQUIRY_RESULT_WITH_RSSI); } -unlock: hci_dev_unlock(hdev); } -static void hci_remote_ext_features_evt(struct hci_dev *hdev, +static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_remote_ext_features *ev = (void *) skb->data; + struct hci_ev_remote_ext_features *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -4525,13 +4656,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_sync_conn_complete_evt(struct hci_dev *hdev, +static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_sync_conn_complete *ev = (void *) skb->data; + struct hci_ev_sync_conn_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -4640,17 +4771,21 @@ static inline size_t eir_get_length(u8 *eir, size_t eir_len) return eir_len; } -static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, +static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { + struct hci_ev_ext_inquiry_result *ev = edata; struct inquiry_data data; - struct extended_inquiry_info *info = (void *) (skb->data + 1); - int num_rsp = *((__u8 *) skb->data); size_t eir_len; + int i; + + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, + flex_array_size(ev, info, ev->num))) + return; - BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + bt_dev_dbg(hdev, "num %d", ev->num); - if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) + if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -4658,7 +4793,8 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, hci_dev_lock(hdev); - for (; num_rsp; num_rsp--, info++) { + for (i = 0; i < ev->num; i++) { + struct extended_inquiry_info *info = &ev->info[i]; u32 flags; bool name_known; @@ -4690,14 +4826,14 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_key_refresh_complete_evt(struct hci_dev *hdev, +static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_key_refresh_complete *ev = (void *) skb->data; + struct hci_ev_key_refresh_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, - __le16_to_cpu(ev->handle)); + bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, + __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); @@ -4800,12 +4936,13 @@ static u8 bredr_oob_data_present(struct hci_conn *conn) return 0x01; } -static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_io_capa_request *ev = (void *) skb->data; + struct hci_ev_io_capa_request *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -4869,12 +5006,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_io_capa_reply *ev = (void *) skb->data; + struct hci_ev_io_capa_reply *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -4889,14 +5027,14 @@ unlock: hci_dev_unlock(hdev); } -static void hci_user_confirm_request_evt(struct hci_dev *hdev, +static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_user_confirm_req *ev = (void *) skb->data; + struct hci_ev_user_confirm_req *ev = data; int loc_mitm, rem_mitm, confirm_hint = 0; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -4917,7 +5055,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, */ if (conn->pending_sec_level > BT_SECURITY_MEDIUM && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { - BT_DBG("Rejecting request: remote device can't provide MITM"); + bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; @@ -4936,7 +5074,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && (loc_mitm || rem_mitm)) { - BT_DBG("Confirming auto-accept as acceptor"); + bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; } @@ -4974,24 +5112,24 @@ unlock: hci_dev_unlock(hdev); } -static void hci_user_passkey_request_evt(struct hci_dev *hdev, +static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_user_passkey_req *ev = (void *) skb->data; + struct hci_ev_user_passkey_req *ev = data; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); } -static void hci_user_passkey_notify_evt(struct hci_dev *hdev, +static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_user_passkey_notify *ev = (void *) skb->data; + struct hci_ev_user_passkey_notify *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) @@ -5006,12 +5144,13 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev, conn->passkey_entered); } -static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_keypress_notify *ev = (void *) skb->data; + struct hci_ev_keypress_notify *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) @@ -5044,13 +5183,13 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->passkey_entered); } -static void hci_simple_pair_complete_evt(struct hci_dev *hdev, +static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_simple_pair_complete *ev = (void *) skb->data; + struct hci_ev_simple_pair_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -5075,14 +5214,14 @@ unlock: hci_dev_unlock(hdev); } -static void hci_remote_host_features_evt(struct hci_dev *hdev, +static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_remote_host_features *ev = (void *) skb->data; + struct hci_ev_remote_host_features *ev = data; struct inquiry_entry *ie; struct hci_conn *conn; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -5097,13 +5236,13 @@ static void hci_remote_host_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, +static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { - struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; + struct hci_ev_remote_oob_data_request *ev = edata; struct oob_data *data; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -5152,14 +5291,13 @@ unlock: } #if IS_ENABLED(CONFIG_BT_HS) -static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_channel_selected *ev = (void *)skb->data; + struct hci_ev_channel_selected *ev = data; struct hci_conn *hcon; - BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); - - skb_pull(skb, sizeof(*ev)); + bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); if (!hcon) @@ -5168,14 +5306,14 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) amp_read_loc_assoc_final_data(hdev, hcon); } -static void hci_phy_link_complete_evt(struct hci_dev *hdev, +static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_phy_link_complete *ev = (void *) skb->data; + struct hci_ev_phy_link_complete *ev = data; struct hci_conn *hcon, *bredr_hcon; - BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, - ev->status); + bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, + ev->status); hci_dev_lock(hdev); @@ -5209,16 +5347,16 @@ unlock: hci_dev_unlock(hdev); } -static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_logical_link_complete *ev = (void *) skb->data; + struct hci_ev_logical_link_complete *ev = data; struct hci_conn *hcon; struct hci_chan *hchan; struct amp_mgr *mgr; - BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", - hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, - ev->status); + bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", + le16_to_cpu(ev->handle), ev->phy_handle, ev->status); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); if (!hcon) @@ -5248,14 +5386,14 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, +static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; + struct hci_ev_disconn_logical_link_complete *ev = data; struct hci_chan *hchan; - BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, - le16_to_cpu(ev->handle), ev->status); + bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", + le16_to_cpu(ev->handle), ev->status); if (ev->status) return; @@ -5272,13 +5410,13 @@ unlock: hci_dev_unlock(hdev); } -static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, +static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; + struct hci_ev_disconn_phy_link_complete *ev = data; struct hci_conn *hcon; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; @@ -5496,11 +5634,12 @@ unlock: hci_dev_unlock(hdev); } -static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_le_conn_complete *ev = (void *) skb->data; + struct hci_ev_le_conn_complete *ev = data; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, NULL, ev->role, le16_to_cpu(ev->handle), @@ -5509,12 +5648,12 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) le16_to_cpu(ev->supervision_timeout)); } -static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, +static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; + struct hci_ev_le_enh_conn_complete *ev = data; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), @@ -5523,13 +5662,14 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->supervision_timeout)); } -static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; + struct hci_evt_le_ext_adv_set_term *ev = data; struct hci_conn *conn; struct adv_info *adv, *n; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); adv = hci_find_adv_instance(hdev, ev->handle); @@ -5587,13 +5727,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, +static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; + struct hci_ev_le_conn_update_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; @@ -5614,7 +5754,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, bool addr_resolved, - u8 adv_type, bdaddr_t *direct_rpa) + u8 adv_type) { struct hci_conn *conn; struct hci_conn_params *params; @@ -5669,7 +5809,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, - HCI_ROLE_MASTER, direct_rpa); + HCI_ROLE_MASTER); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned * by higher layer that tried to connect, if no then @@ -5792,7 +5932,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * for advertising reports) and is already verified to be RPA above. */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, - type, direct_addr); + type); if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected @@ -5909,32 +6049,37 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, clear_pending_adv_report(hdev); } -static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - u8 num_reports = skb->data[0]; - void *ptr = &skb->data[1]; + struct hci_ev_le_advertising_report *ev = data; + + if (!ev->num) + return; hci_dev_lock(hdev); - while (num_reports--) { - struct hci_ev_le_advertising_info *ev = ptr; + while (ev->num--) { + struct hci_ev_le_advertising_info *info; s8 rssi; - if (ev->length <= HCI_MAX_AD_LENGTH && - ev->data + ev->length <= skb_tail_pointer(skb)) { - rssi = ev->data[ev->length]; - process_adv_report(hdev, ev->evt_type, &ev->bdaddr, - ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length, false); - } else { - bt_dev_err(hdev, "Dropping invalid advertising data"); - } - - ptr += sizeof(*ev) + ev->length + 1; + info = hci_le_ev_skb_pull(hdev, skb, + HCI_EV_LE_ADVERTISING_REPORT, + sizeof(*info)); + if (!info) + break; - if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) { - bt_dev_err(hdev, "Malicious advertising data. Stopping processing"); + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, + info->length + 1)) break; + + if (info->length <= HCI_MAX_AD_LENGTH) { + rssi = info->data[info->length]; + process_adv_report(hdev, info->type, &info->bdaddr, + info->bdaddr_type, NULL, 0, rssi, + info->data, info->length, false); + } else { + bt_dev_err(hdev, "Dropping invalid advertising data"); } } @@ -5985,40 +6130,50 @@ invalid: return LE_ADV_INVALID; } -static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - u8 num_reports = skb->data[0]; - void *ptr = &skb->data[1]; + struct hci_ev_le_ext_adv_report *ev = data; + + if (!ev->num) + return; hci_dev_lock(hdev); - while (num_reports--) { - struct hci_ev_le_ext_adv_report *ev = ptr; + while (ev->num--) { + struct hci_ev_le_ext_adv_info *info; u8 legacy_evt_type; u16 evt_type; - evt_type = __le16_to_cpu(ev->evt_type); + info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, + sizeof(*info)); + if (!info) + break; + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, + info->length)) + break; + + evt_type = __le16_to_cpu(info->type); legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); if (legacy_evt_type != LE_ADV_INVALID) { - process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, - ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length, + process_adv_report(hdev, legacy_evt_type, &info->bdaddr, + info->bdaddr_type, NULL, 0, + info->rssi, info->data, info->length, !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } - - ptr += sizeof(*ev) + ev->length; } hci_dev_unlock(hdev); } -static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, +static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data; + struct hci_ev_le_remote_feat_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); @@ -6054,15 +6209,16 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_le_ltk_req *ev = (void *) skb->data; + struct hci_ev_le_ltk_req *ev = data; struct hci_cp_le_ltk_reply cp; struct hci_cp_le_ltk_neg_reply neg; struct hci_conn *conn; struct smp_ltk *ltk; - BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); + bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); @@ -6130,14 +6286,16 @@ static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, &cp); } -static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, +static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; + struct hci_ev_le_remote_conn_param_req *ev = data; struct hci_cp_le_conn_param_req_reply cp; struct hci_conn *hcon; u16 handle, min, max, latency, timeout; + bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); + handle = le16_to_cpu(ev->handle); min = le16_to_cpu(ev->interval_min); max = le16_to_cpu(ev->interval_max); @@ -6188,32 +6346,40 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); } -static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, +static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - u8 num_reports = skb->data[0]; - struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; + struct hci_ev_le_direct_adv_report *ev = data; + int i; - if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, + flex_array_size(ev, info, ev->num))) + return; + + if (!ev->num) return; hci_dev_lock(hdev); - for (; num_reports; num_reports--, ev++) - process_adv_report(hdev, ev->evt_type, &ev->bdaddr, - ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0, + for (i = 0; i < ev->num; i++) { + struct hci_ev_le_direct_adv_info *info = &ev->info[i]; + + process_adv_report(hdev, info->type, &info->bdaddr, + info->bdaddr_type, &info->direct_addr, + info->direct_addr_type, info->rssi, NULL, 0, false); + } hci_dev_unlock(hdev); } -static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) { - struct hci_ev_le_phy_update_complete *ev = (void *) skb->data; + struct hci_ev_le_phy_update_complete *ev = data; struct hci_conn *conn; - BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; @@ -6231,60 +6397,113 @@ unlock: hci_dev_unlock(hdev); } -static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) -{ - struct hci_ev_le_meta *le_ev = (void *) skb->data; - - skb_pull(skb, sizeof(*le_ev)); - - switch (le_ev->subevent) { - case HCI_EV_LE_CONN_COMPLETE: - hci_le_conn_complete_evt(hdev, skb); - break; - - case HCI_EV_LE_CONN_UPDATE_COMPLETE: - hci_le_conn_update_complete_evt(hdev, skb); - break; - - case HCI_EV_LE_ADVERTISING_REPORT: - hci_le_adv_report_evt(hdev, skb); - break; - - case HCI_EV_LE_REMOTE_FEAT_COMPLETE: - hci_le_remote_feat_complete_evt(hdev, skb); - break; - - case HCI_EV_LE_LTK_REQ: - hci_le_ltk_request_evt(hdev, skb); - break; - - case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: - hci_le_remote_conn_param_req_evt(hdev, skb); - break; - - case HCI_EV_LE_DIRECT_ADV_REPORT: - hci_le_direct_adv_report_evt(hdev, skb); - break; +#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ +[_op] = { \ + .func = _func, \ + .min_len = _min_len, \ + .max_len = _max_len, \ +} + +#define HCI_LE_EV(_op, _func, _len) \ + HCI_LE_EV_VL(_op, _func, _len, _len) + +#define HCI_LE_EV_STATUS(_op, _func) \ + HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) + +/* Entries in this table shall have their position according to the subevent + * opcode they handle so the use of the macros above is recommend since it does + * attempt to initialize at its proper index using Designated Initializers that + * way events without a callback function can be ommited. + */ +static const struct hci_le_ev { + void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); + u16 min_len; + u16 max_len; +} hci_le_ev_table[U8_MAX + 1] = { + /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, + sizeof(struct hci_ev_le_conn_complete)), + /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, + sizeof(struct hci_ev_le_advertising_report), + HCI_MAX_EVENT_SIZE), + /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, + hci_le_conn_update_complete_evt, + sizeof(struct hci_ev_le_conn_update_complete)), + /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, + hci_le_remote_feat_complete_evt, + sizeof(struct hci_ev_le_remote_feat_complete)), + /* [0x05 = HCI_EV_LE_LTK_REQ] */ + HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, + sizeof(struct hci_ev_le_ltk_req)), + /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ + HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, + hci_le_remote_conn_param_req_evt, + sizeof(struct hci_ev_le_remote_conn_param_req)), + /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, + hci_le_enh_conn_complete_evt, + sizeof(struct hci_ev_le_enh_conn_complete)), + /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, + sizeof(struct hci_ev_le_direct_adv_report), + HCI_MAX_EVENT_SIZE), + /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, + sizeof(struct hci_ev_le_phy_update_complete)), + /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, + sizeof(struct hci_ev_le_ext_adv_report), + HCI_MAX_EVENT_SIZE), + /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ + HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, + sizeof(struct hci_evt_le_ext_adv_set_term)), +}; + +static void hci_le_meta_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + struct hci_ev_le_meta *ev = data; + const struct hci_le_ev *subev; + + bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); + + /* Only match event if command OGF is for LE */ + if (hdev->sent_cmd && + hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && + hci_skb_event(hdev->sent_cmd) == ev->subevent) { + *opcode = hci_skb_opcode(hdev->sent_cmd); + hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, + req_complete_skb); + } - case HCI_EV_LE_PHY_UPDATE_COMPLETE: - hci_le_phy_update_evt(hdev, skb); - break; + subev = &hci_le_ev_table[ev->subevent]; + if (!subev->func) + return; - case HCI_EV_LE_EXT_ADV_REPORT: - hci_le_ext_adv_report_evt(hdev, skb); - break; + if (skb->len < subev->min_len) { + bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", + ev->subevent, skb->len, subev->min_len); + return; + } - case HCI_EV_LE_ENHANCED_CONN_COMPLETE: - hci_le_enh_conn_complete_evt(hdev, skb); - break; + /* Just warn if the length is over max_len size it still be + * possible to partially parse the event so leave to callback to + * decide if that is acceptable. + */ + if (skb->len > subev->max_len) + bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", + ev->subevent, skb->len, subev->max_len); - case HCI_EV_LE_EXT_ADV_SET_TERM: - hci_le_ext_adv_term_evt(hdev, skb); - break; + data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); + if (!data) + return; - default: - break; - } + subev->func(hdev, data, skb); } static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, @@ -6296,13 +6515,9 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, if (!skb) return false; - if (skb->len < sizeof(*hdr)) { - bt_dev_err(hdev, "too short HCI event"); + hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); + if (!hdr) return false; - } - - hdr = (void *) skb->data; - skb_pull(skb, HCI_EVENT_HDR_SIZE); if (event) { if (hdr->evt != event) @@ -6322,13 +6537,9 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, return false; } - if (skb->len < sizeof(*ev)) { - bt_dev_err(hdev, "too short cmd_complete event"); + ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); + if (!ev) return false; - } - - ev = (void *) skb->data; - skb_pull(skb, sizeof(*ev)); if (opcode != __le16_to_cpu(ev->opcode)) { BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, @@ -6344,7 +6555,7 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, { struct hci_ev_le_advertising_info *adv; struct hci_ev_le_direct_adv_info *direct_adv; - struct hci_ev_le_ext_adv_report *ext_adv; + struct hci_ev_le_ext_adv_info *ext_adv; const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; const struct hci_ev_conn_request *conn_request = (void *)skb->data; @@ -6408,25 +6619,252 @@ unlock: hci_dev_unlock(hdev); } +#define HCI_EV_VL(_op, _func, _min_len, _max_len) \ +[_op] = { \ + .req = false, \ + .func = _func, \ + .min_len = _min_len, \ + .max_len = _max_len, \ +} + +#define HCI_EV(_op, _func, _len) \ + HCI_EV_VL(_op, _func, _len, _len) + +#define HCI_EV_STATUS(_op, _func) \ + HCI_EV(_op, _func, sizeof(struct hci_ev_status)) + +#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ +[_op] = { \ + .req = true, \ + .func_req = _func, \ + .min_len = _min_len, \ + .max_len = _max_len, \ +} + +#define HCI_EV_REQ(_op, _func, _len) \ + HCI_EV_REQ_VL(_op, _func, _len, _len) + +/* Entries in this table shall have their position according to the event opcode + * they handle so the use of the macros above is recommend since it does attempt + * to initialize at its proper index using Designated Initializers that way + * events without a callback function don't have entered. + */ +static const struct hci_ev { + bool req; + union { + void (*func)(struct hci_dev *hdev, void *data, + struct sk_buff *skb); + void (*func_req)(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb); + }; + u16 min_len; + u16 max_len; +} hci_ev_table[U8_MAX + 1] = { + /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ + HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), + /* [0x02 = HCI_EV_INQUIRY_RESULT] */ + HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, + sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), + /* [0x03 = HCI_EV_CONN_COMPLETE] */ + HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, + sizeof(struct hci_ev_conn_complete)), + /* [0x04 = HCI_EV_CONN_REQUEST] */ + HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, + sizeof(struct hci_ev_conn_request)), + /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ + HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, + sizeof(struct hci_ev_disconn_complete)), + /* [0x06 = HCI_EV_AUTH_COMPLETE] */ + HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, + sizeof(struct hci_ev_auth_complete)), + /* [0x07 = HCI_EV_REMOTE_NAME] */ + HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, + sizeof(struct hci_ev_remote_name)), + /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ + HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, + sizeof(struct hci_ev_encrypt_change)), + /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ + HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, + hci_change_link_key_complete_evt, + sizeof(struct hci_ev_change_link_key_complete)), + /* [0x0b = HCI_EV_REMOTE_FEATURES] */ + HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, + sizeof(struct hci_ev_remote_features)), + /* [0x0e = HCI_EV_CMD_COMPLETE] */ + HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, + sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), + /* [0x0f = HCI_EV_CMD_STATUS] */ + HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, + sizeof(struct hci_ev_cmd_status)), + /* [0x10 = HCI_EV_CMD_STATUS] */ + HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, + sizeof(struct hci_ev_hardware_error)), + /* [0x12 = HCI_EV_ROLE_CHANGE] */ + HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, + sizeof(struct hci_ev_role_change)), + /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ + HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, + sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), + /* [0x14 = HCI_EV_MODE_CHANGE] */ + HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, + sizeof(struct hci_ev_mode_change)), + /* [0x16 = HCI_EV_PIN_CODE_REQ] */ + HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, + sizeof(struct hci_ev_pin_code_req)), + /* [0x17 = HCI_EV_LINK_KEY_REQ] */ + HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, + sizeof(struct hci_ev_link_key_req)), + /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ + HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, + sizeof(struct hci_ev_link_key_notify)), + /* [0x1c = HCI_EV_CLOCK_OFFSET] */ + HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, + sizeof(struct hci_ev_clock_offset)), + /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ + HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, + sizeof(struct hci_ev_pkt_type_change)), + /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ + HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, + sizeof(struct hci_ev_pscan_rep_mode)), + /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ + HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, + hci_inquiry_result_with_rssi_evt, + sizeof(struct hci_ev_inquiry_result_rssi), + HCI_MAX_EVENT_SIZE), + /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ + HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, + sizeof(struct hci_ev_remote_ext_features)), + /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ + HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, + sizeof(struct hci_ev_sync_conn_complete)), + /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ + HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, + hci_extended_inquiry_result_evt, + sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), + /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ + HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, + sizeof(struct hci_ev_key_refresh_complete)), + /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ + HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, + sizeof(struct hci_ev_io_capa_request)), + /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ + HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, + sizeof(struct hci_ev_io_capa_reply)), + /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ + HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, + sizeof(struct hci_ev_user_confirm_req)), + /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ + HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, + sizeof(struct hci_ev_user_passkey_req)), + /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ + HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, + sizeof(struct hci_ev_remote_oob_data_request)), + /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ + HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, + sizeof(struct hci_ev_simple_pair_complete)), + /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ + HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, + sizeof(struct hci_ev_user_passkey_notify)), + /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ + HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, + sizeof(struct hci_ev_keypress_notify)), + /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ + HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, + sizeof(struct hci_ev_remote_host_features)), + /* [0x3e = HCI_EV_LE_META] */ + HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, + sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), +#if IS_ENABLED(CONFIG_BT_HS) + /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ + HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, + sizeof(struct hci_ev_phy_link_complete)), + /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ + HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, + sizeof(struct hci_ev_channel_selected)), + /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ + HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, + hci_disconn_loglink_complete_evt, + sizeof(struct hci_ev_disconn_logical_link_complete)), + /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ + HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, + sizeof(struct hci_ev_logical_link_complete)), + /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ + HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, + hci_disconn_phylink_complete_evt, + sizeof(struct hci_ev_disconn_phy_link_complete)), +#endif + /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ + HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, + sizeof(struct hci_ev_num_comp_blocks)), + /* [0xff = HCI_EV_VENDOR] */ + HCI_EV(HCI_EV_VENDOR, msft_vendor_evt, 0), +}; + +static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, + u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + const struct hci_ev *ev = &hci_ev_table[event]; + void *data; + + if (!ev->func) + return; + + if (skb->len < ev->min_len) { + bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", + event, skb->len, ev->min_len); + return; + } + + /* Just warn if the length is over max_len size it still be + * possible to partially parse the event so leave to callback to + * decide if that is acceptable. + */ + if (skb->len > ev->max_len) + bt_dev_warn(hdev, "unexpected event 0x%2.2x length: %u > %u", + event, skb->len, ev->max_len); + + data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); + if (!data) + return; + + if (ev->req) + ev->func_req(hdev, data, skb, opcode, status, req_complete, + req_complete_skb); + else + ev->func(hdev, data, skb); +} + void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; hci_req_complete_t req_complete = NULL; hci_req_complete_skb_t req_complete_skb = NULL; struct sk_buff *orig_skb = NULL; - u8 status = 0, event = hdr->evt, req_evt = 0; + u8 status = 0, event, req_evt = 0; u16 opcode = HCI_OP_NOP; + if (skb->len < sizeof(*hdr)) { + bt_dev_err(hdev, "Malformed HCI Event"); + goto done; + } + + event = hdr->evt; if (!event) { - bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); + bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", + event); goto done; } - if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { - struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; - opcode = __le16_to_cpu(cmd_hdr->opcode); - hci_req_cmd_complete(hdev, opcode, status, &req_complete, - &req_complete_skb); + /* Only match event if command OGF is not for LE */ + if (hdev->sent_cmd && + hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && + hci_skb_event(hdev->sent_cmd) == event) { + hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), + status, &req_complete, &req_complete_skb); req_evt = event; } @@ -6444,191 +6882,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) /* Store wake reason if we're suspended */ hci_store_wake_reason(hdev, event, skb); - switch (event) { - case HCI_EV_INQUIRY_COMPLETE: - hci_inquiry_complete_evt(hdev, skb); - break; - - case HCI_EV_INQUIRY_RESULT: - hci_inquiry_result_evt(hdev, skb); - break; - - case HCI_EV_CONN_COMPLETE: - hci_conn_complete_evt(hdev, skb); - break; - - case HCI_EV_CONN_REQUEST: - hci_conn_request_evt(hdev, skb); - break; - - case HCI_EV_DISCONN_COMPLETE: - hci_disconn_complete_evt(hdev, skb); - break; - - case HCI_EV_AUTH_COMPLETE: - hci_auth_complete_evt(hdev, skb); - break; - - case HCI_EV_REMOTE_NAME: - hci_remote_name_evt(hdev, skb); - break; - - case HCI_EV_ENCRYPT_CHANGE: - hci_encrypt_change_evt(hdev, skb); - break; - - case HCI_EV_CHANGE_LINK_KEY_COMPLETE: - hci_change_link_key_complete_evt(hdev, skb); - break; - - case HCI_EV_REMOTE_FEATURES: - hci_remote_features_evt(hdev, skb); - break; - - case HCI_EV_CMD_COMPLETE: - hci_cmd_complete_evt(hdev, skb, &opcode, &status, - &req_complete, &req_complete_skb); - break; - - case HCI_EV_CMD_STATUS: - hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, - &req_complete_skb); - break; - - case HCI_EV_HARDWARE_ERROR: - hci_hardware_error_evt(hdev, skb); - break; - - case HCI_EV_ROLE_CHANGE: - hci_role_change_evt(hdev, skb); - break; - - case HCI_EV_NUM_COMP_PKTS: - hci_num_comp_pkts_evt(hdev, skb); - break; - - case HCI_EV_MODE_CHANGE: - hci_mode_change_evt(hdev, skb); - break; + bt_dev_dbg(hdev, "event 0x%2.2x", event); - case HCI_EV_PIN_CODE_REQ: - hci_pin_code_request_evt(hdev, skb); - break; - - case HCI_EV_LINK_KEY_REQ: - hci_link_key_request_evt(hdev, skb); - break; - - case HCI_EV_LINK_KEY_NOTIFY: - hci_link_key_notify_evt(hdev, skb); - break; - - case HCI_EV_CLOCK_OFFSET: - hci_clock_offset_evt(hdev, skb); - break; - - case HCI_EV_PKT_TYPE_CHANGE: - hci_pkt_type_change_evt(hdev, skb); - break; - - case HCI_EV_PSCAN_REP_MODE: - hci_pscan_rep_mode_evt(hdev, skb); - break; - - case HCI_EV_INQUIRY_RESULT_WITH_RSSI: - hci_inquiry_result_with_rssi_evt(hdev, skb); - break; - - case HCI_EV_REMOTE_EXT_FEATURES: - hci_remote_ext_features_evt(hdev, skb); - break; - - case HCI_EV_SYNC_CONN_COMPLETE: - hci_sync_conn_complete_evt(hdev, skb); - break; - - case HCI_EV_EXTENDED_INQUIRY_RESULT: - hci_extended_inquiry_result_evt(hdev, skb); - break; - - case HCI_EV_KEY_REFRESH_COMPLETE: - hci_key_refresh_complete_evt(hdev, skb); - break; - - case HCI_EV_IO_CAPA_REQUEST: - hci_io_capa_request_evt(hdev, skb); - break; - - case HCI_EV_IO_CAPA_REPLY: - hci_io_capa_reply_evt(hdev, skb); - break; - - case HCI_EV_USER_CONFIRM_REQUEST: - hci_user_confirm_request_evt(hdev, skb); - break; - - case HCI_EV_USER_PASSKEY_REQUEST: - hci_user_passkey_request_evt(hdev, skb); - break; - - case HCI_EV_USER_PASSKEY_NOTIFY: - hci_user_passkey_notify_evt(hdev, skb); - break; - - case HCI_EV_KEYPRESS_NOTIFY: - hci_keypress_notify_evt(hdev, skb); - break; - - case HCI_EV_SIMPLE_PAIR_COMPLETE: - hci_simple_pair_complete_evt(hdev, skb); - break; - - case HCI_EV_REMOTE_HOST_FEATURES: - hci_remote_host_features_evt(hdev, skb); - break; - - case HCI_EV_LE_META: - hci_le_meta_evt(hdev, skb); - break; - - case HCI_EV_REMOTE_OOB_DATA_REQUEST: - hci_remote_oob_data_request_evt(hdev, skb); - break; - -#if IS_ENABLED(CONFIG_BT_HS) - case HCI_EV_CHANNEL_SELECTED: - hci_chan_selected_evt(hdev, skb); - break; - - case HCI_EV_PHY_LINK_COMPLETE: - hci_phy_link_complete_evt(hdev, skb); - break; - - case HCI_EV_LOGICAL_LINK_COMPLETE: - hci_loglink_complete_evt(hdev, skb); - break; - - case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: - hci_disconn_loglink_complete_evt(hdev, skb); - break; - - case HCI_EV_DISCONN_PHY_LINK_COMPLETE: - hci_disconn_phylink_complete_evt(hdev, skb); - break; -#endif - - case HCI_EV_NUM_COMP_BLOCKS: - hci_num_comp_blocks_evt(hdev, skb); - break; - - case HCI_EV_VENDOR: - msft_vendor_evt(hdev, skb); - break; - - default: - BT_DBG("%s event 0x%2.2x", hdev->name, event); - break; - } + hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, + &req_complete_skb); if (req_complete) { req_complete(hdev, status, opcode); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 8b3205e4b23e..42c8047a9897 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -111,17 +111,6 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, } } -void hci_req_sync_cancel(struct hci_dev *hdev, int err) -{ - bt_dev_dbg(hdev, "err 0x%2.2x", err); - - if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = err; - hdev->req_status = HCI_REQ_CANCELED; - wake_up_interruptible(&hdev->req_wait_q); - } -} - /* Execute request and wait for completion. */ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), @@ -492,8 +481,8 @@ static int add_to_accept_list(struct hci_request *req, } /* During suspend, only wakeable devices can be in accept list */ - if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, - params->current_flags)) + if (hdev->suspended && + !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags)) return 0; *num_entries += 1; @@ -829,56 +818,6 @@ static void cancel_adv_timeout(struct hci_dev *hdev) } } -/* This function requires the caller holds hdev->lock */ -void __hci_req_pause_adv_instances(struct hci_request *req) -{ - bt_dev_dbg(req->hdev, "Pausing advertising instances"); - - /* Call to disable any advertisements active on the controller. - * This will succeed even if no advertisements are configured. - */ - __hci_req_disable_advertising(req); - - /* If we are using software rotation, pause the loop */ - if (!ext_adv_capable(req->hdev)) - cancel_adv_timeout(req->hdev); -} - -/* This function requires the caller holds hdev->lock */ -static void __hci_req_resume_adv_instances(struct hci_request *req) -{ - struct adv_info *adv; - - bt_dev_dbg(req->hdev, "Resuming advertising instances"); - - if (ext_adv_capable(req->hdev)) { - /* Call for each tracked instance to be re-enabled */ - list_for_each_entry(adv, &req->hdev->adv_instances, list) { - __hci_req_enable_ext_advertising(req, - adv->instance); - } - - } else { - /* Schedule for most recent instance to be restarted and begin - * the software rotation loop - */ - __hci_req_schedule_adv_instance(req, - req->hdev->cur_adv_instance, - true); - } -} - -/* This function requires the caller holds hdev->lock */ -int hci_req_resume_adv_instances(struct hci_dev *hdev) -{ - struct hci_request req; - - hci_req_init(&req, hdev); - __hci_req_resume_adv_instances(&req); - - return hci_req_run(&req, NULL); -} - static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); @@ -2703,7 +2642,7 @@ void hci_request_setup(struct hci_dev *hdev) void hci_request_cancel_all(struct hci_dev *hdev) { - hci_req_sync_cancel(hdev, ENODEV); + __hci_cmd_sync_cancel(hdev, ENODEV); cancel_work_sync(&hdev->discov_update); cancel_work_sync(&hdev->scan_update); diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 5f8e8846ec74..7f8df258e295 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -64,7 +64,6 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status); -void hci_req_sync_cancel(struct hci_dev *hdev, int err); struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param); @@ -81,8 +80,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req); void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); void hci_req_disable_address_resolution(struct hci_dev *hdev); -void __hci_req_pause_adv_instances(struct hci_request *req); -int hci_req_resume_adv_instances(struct hci_dev *hdev); void hci_req_reenable_advertising(struct hci_dev *hdev); void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req); diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index ad86caf41f91..0feb68f12545 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -103,7 +103,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; - bt_cb(skb)->hci.req_event = event; + hci_skb_event(skb) = event; skb_queue_tail(&req->cmd_q, skb); } @@ -313,11 +313,24 @@ static void hci_cmd_sync_work(struct work_struct *work) } } +static void hci_cmd_sync_cancel_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); + + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + atomic_set(&hdev->cmd_cnt, 1); + + wake_up_interruptible(&hdev->req_wait_q); +} + void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); + + INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); } void hci_cmd_sync_clear(struct hci_dev *hdev) @@ -335,6 +348,35 @@ void hci_cmd_sync_clear(struct hci_dev *hdev) } } +void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = err; + hdev->req_status = HCI_REQ_CANCELED; + + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + atomic_set(&hdev->cmd_cnt, 1); + + wake_up_interruptible(&hdev->req_wait_q); + } +} + +void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = err; + hdev->req_status = HCI_REQ_CANCELED; + + queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); + } +} +EXPORT_SYMBOL(hci_cmd_sync_cancel); + int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { @@ -1580,8 +1622,40 @@ done: sizeof(cp), &cp, HCI_CMD_TIMEOUT); } +/* Set Device Privacy Mode. */ +static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, + struct hci_conn_params *params) +{ + struct hci_cp_le_set_privacy_mode cp; + struct smp_irk *irk; + + /* If device privacy mode has already been set there is nothing to do */ + if (params->privacy_mode == HCI_DEVICE_PRIVACY) + return 0; + + /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also + * indicates that LL Privacy has been enabled and + * HCI_OP_LE_SET_PRIVACY_MODE is supported. + */ + if (!test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, params->flags)) + return 0; + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); + if (!irk) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.bdaddr_type = irk->addr_type; + bacpy(&cp.bdaddr, &irk->bdaddr); + cp.mode = HCI_DEVICE_PRIVACY; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + /* Adds connection to allow list if needed, if the device uses RPA (has IRK) - * this attempts to program the device in the resolving list as well. + * this attempts to program the device in the resolving list as well and + * properly set the privacy mode. */ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, struct hci_conn_params *params, @@ -1590,11 +1664,6 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, struct hci_cp_le_add_to_accept_list cp; int err; - /* Already in accept list */ - if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, - params->addr_type)) - return 0; - /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_accept_list_size) return -ENOSPC; @@ -1606,8 +1675,8 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, } /* During suspend, only wakeable devices can be in acceptlist */ - if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, - params->current_flags)) + if (hdev->suspended && + !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags)) return 0; /* Attempt to program the device in the resolving list first to avoid @@ -1620,6 +1689,18 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, return err; } + /* Set Privacy Mode */ + err = hci_le_set_privacy_mode_sync(hdev, params); + if (err) { + bt_dev_err(hdev, "Unable to set privacy mode: %d", err); + return err; + } + + /* Check if already in accept list */ + if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, + params->addr_type)) + return 0; + *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, ¶ms->addr); @@ -1645,10 +1726,8 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev) int err; int old_state; - /* If there are no instances or advertising has already been paused - * there is nothing to do. - */ - if (!hdev->adv_instance_cnt || hdev->advertising_paused) + /* If already been paused there is nothing to do. */ + if (hdev->advertising_paused) return 0; bt_dev_dbg(hdev, "Pausing directed advertising"); @@ -3283,7 +3362,8 @@ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) /* Read LE Min/Max Tx Power*/ static int hci_le_read_tx_power_sync(struct hci_dev *hdev) { - if (!(hdev->commands[38] & 0x80)) + if (!(hdev->commands[38] & 0x80) || + test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, @@ -4749,8 +4829,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev) hci_clear_event_filter_sync(hdev); list_for_each_entry(b, &hdev->accept_list, list) { - if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, - b->current_flags)) + if (!test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, b->flags)) continue; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); @@ -4920,3 +4999,283 @@ int hci_resume_sync(struct hci_dev *hdev) return 0; } + +static bool conn_use_rpa(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + return hci_dev_test_flag(hdev, HCI_PRIVACY); +} + +static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + struct hci_cp_le_set_ext_adv_params cp; + int err; + bdaddr_t random_addr; + u8 own_addr_type; + + err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), + &own_addr_type); + if (err) + return err; + + /* Set require_privacy to false so that the remote device has a + * chance of identifying us. + */ + err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, + &own_addr_type, &random_addr); + if (err) + return err; + + memset(&cp, 0, sizeof(cp)); + + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); + cp.own_addr_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + cp.tx_power = HCI_TX_POWER_INVALID; + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + cp.handle = 0x00; /* Use instance 0 for directed adv */ + cp.own_addr_type = own_addr_type; + cp.peer_addr_type = conn->dst_type; + bacpy(&cp.peer_addr, &conn->dst); + + /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for + * advertising_event_property LE_LEGACY_ADV_DIRECT_IND + * does not supports advertising data when the advertising set already + * contains some, the controller shall return erroc code 'Invalid + * HCI Command Parameters(0x12). + * So it is required to remove adv set for handle 0x00. since we use + * instance 0 for directed adv. + */ + err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); + if (err) + return err; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Check if random address need to be updated */ + if (own_addr_type == ADDR_LE_DEV_RANDOM && + bacmp(&random_addr, BDADDR_ANY) && + bacmp(&random_addr, &hdev->random_addr)) { + err = hci_set_adv_set_random_addr_sync(hdev, 0x00, + &random_addr); + if (err) + return err; + } + + return hci_enable_ext_advertising_sync(hdev, 0x00); +} + +static int hci_le_directed_advertising_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + struct hci_cp_le_set_adv_param cp; + u8 status; + u8 own_addr_type; + u8 enable; + + if (ext_adv_capable(hdev)) + return hci_le_ext_directed_advertising_sync(hdev, conn); + + /* Clear the HCI_LE_ADV bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + /* Set require_privacy to false so that the remote device has a + * chance of identifying us. + */ + status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), + &own_addr_type); + if (status) + return status; + + memset(&cp, 0, sizeof(cp)); + + /* Some controllers might reject command if intervals are not + * within range for undirected advertising. + * BCM20702A0 is known to be affected by this. + */ + cp.min_interval = cpu_to_le16(0x0020); + cp.max_interval = cpu_to_le16(0x0020); + + cp.type = LE_ADV_DIRECT_IND; + cp.own_address_type = own_addr_type; + cp.direct_addr_type = conn->dst_type; + bacpy(&cp.direct_addr, &conn->dst); + cp.channel_map = hdev->le_adv_channel_map; + + status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (status) + return status; + + enable = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static void set_ext_conn_params(struct hci_conn *conn, + struct hci_cp_le_ext_conn_param *p) +{ + struct hci_dev *hdev = conn->hdev; + + memset(p, 0, sizeof(*p)); + + p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); + p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + p->conn_latency = cpu_to_le16(conn->le_conn_latency); + p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + p->min_ce_len = cpu_to_le16(0x0000); + p->max_ce_len = cpu_to_le16(0x0000); +} + +int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 own_addr_type) +{ + struct hci_cp_le_ext_create_conn *cp; + struct hci_cp_le_ext_conn_param *p; + u8 data[sizeof(*cp) + sizeof(*p) * 3]; + u32 plen; + + cp = (void *)data; + p = (void *)cp->data; + + memset(cp, 0, sizeof(*cp)); + + bacpy(&cp->peer_addr, &conn->dst); + cp->peer_addr_type = conn->dst_type; + cp->own_addr_type = own_addr_type; + + plen = sizeof(*cp); + + if (scan_1m(hdev)) { + cp->phys |= LE_SCAN_PHY_1M; + set_ext_conn_params(conn, p); + + p++; + plen += sizeof(*p); + } + + if (scan_2m(hdev)) { + cp->phys |= LE_SCAN_PHY_2M; + set_ext_conn_params(conn, p); + + p++; + plen += sizeof(*p); + } + + if (scan_coded(hdev)) { + cp->phys |= LE_SCAN_PHY_CODED; + set_ext_conn_params(conn, p); + + plen += sizeof(*p); + } + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, + plen, data, + HCI_EV_LE_ENHANCED_CONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); +} + +int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + struct hci_cp_le_create_conn cp; + struct hci_conn_params *params; + u8 own_addr_type; + int err; + + /* If requested to connect as peripheral use directed advertising */ + if (conn->role == HCI_ROLE_SLAVE) { + /* If we're active scanning and simultaneous roles is not + * enabled simply reject the attempt. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && + hdev->le_scan_type == LE_SCAN_ACTIVE && + !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { + hci_conn_del(conn); + return -EBUSY; + } + + /* Pause advertising while doing directed advertising. */ + hci_pause_advertising_sync(hdev); + + err = hci_le_directed_advertising_sync(hdev, conn); + goto done; + } + + /* Disable advertising if simultaneous roles is not in use. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) + hci_pause_advertising_sync(hdev); + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + conn->le_conn_min_interval = params->conn_min_interval; + conn->le_conn_max_interval = params->conn_max_interval; + conn->le_conn_latency = params->conn_latency; + conn->le_supv_timeout = params->supervision_timeout; + } else { + conn->le_conn_min_interval = hdev->le_conn_min_interval; + conn->le_conn_max_interval = hdev->le_conn_max_interval; + conn->le_conn_latency = hdev->le_conn_latency; + conn->le_supv_timeout = hdev->le_supv_timeout; + } + + /* If controller is scanning, we stop it since some controllers are + * not able to scan and connect at the same time. Also set the + * HCI_LE_SCAN_INTERRUPTED flag so that the command complete + * handler for scan disabling knows to set the correct discovery + * state. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + hci_scan_disable_sync(hdev); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); + } + + /* Update random address, but set require_privacy to false so + * that we never connect with an non-resolvable address. + */ + err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), + &own_addr_type); + if (err) + goto done; + + if (use_ext_conn(hdev)) { + err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); + goto done; + } + + memset(&cp, 0, sizeof(cp)); + + cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); + + bacpy(&cp.peer_addr, &conn->dst); + cp.peer_addr_type = conn->dst_type; + cp.own_address_type = own_addr_type; + cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + cp.conn_latency = cpu_to_le16(conn->le_conn_latency); + cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, + sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + +done: + /* Re-enable advertising after the connection attempt is finished. */ + hci_resume_advertising_sync(hdev); + return err; +} diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 4f8f37599962..e817ff0607a0 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7905,7 +7905,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, hcon = hci_connect_le(hdev, dst, dst_type, false, chan->sec_level, HCI_LE_CONN_TIMEOUT, - HCI_ROLE_SLAVE, NULL); + HCI_ROLE_SLAVE); else hcon = hci_connect_le_scan(hdev, dst, dst_type, chan->sec_level, diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index dc50737b785b..188e4d4813b0 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -162,7 +162,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) break; } - if (chan->psm && bdaddr_type_is_le(chan->src_type)) + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->state = BT_BOUND; @@ -256,7 +260,11 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; } - if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; l2cap_sock_init_pid(sk); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index f8f74d344297..37087cf7dc5a 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -335,6 +335,12 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, HCI_SOCK_TRUSTED, skip_sk); } +static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk) +{ + return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED, + skip_sk); +} + static u8 le_addr_type(u8 mgmt_addr_type) { if (mgmt_addr_type == BDADDR_LE_PUBLIC) @@ -3876,7 +3882,7 @@ static const u8 offload_codecs_uuid[16] = { }; /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ -static const u8 simult_central_periph_uuid[16] = { +static const u8 le_simultaneous_roles_uuid[16] = { 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, }; @@ -3909,16 +3915,13 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, } #endif - if (hdev) { - if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && - (hdev->le_states[4] & 0x08) && /* Central */ - (hdev->le_states[4] & 0x40) && /* Peripheral */ - (hdev->le_states[3] & 0x10)) /* Simultaneous */ + if (hdev && hci_dev_le_state_simultaneous(hdev)) { + if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) flags = BIT(0); else flags = 0; - memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16); + memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } @@ -3978,35 +3981,24 @@ static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, memcpy(ev.uuid, rpa_resolution_uuid, 16); ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); + if (enabled && privacy_mode_capable(hdev)) + set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags); + else + clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags); + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } -#ifdef CONFIG_BT_FEATURE_DEBUG -static int exp_debug_feature_changed(bool enabled, struct sock *skip) +static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid, + bool enabled, struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; memset(&ev, 0, sizeof(ev)); - memcpy(ev.uuid, debug_uuid, 16); - ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); - - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, - &ev, sizeof(ev), - HCI_MGMT_EXP_FEATURE_EVENTS, skip); -} -#endif - -static int exp_quality_report_feature_changed(bool enabled, - struct hci_dev *hdev, - struct sock *skip) -{ - struct mgmt_ev_exp_feature_changed ev; - - memset(&ev, 0, sizeof(ev)); - memcpy(ev.uuid, quality_report_uuid, 16); + memcpy(ev.uuid, uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, @@ -4036,17 +4028,18 @@ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, bt_dbg_set(false); if (changed) - exp_debug_feature_changed(false, sk); + exp_feature_changed(NULL, ZERO_KEY, false, sk); } #endif if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { - bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); - - hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + bool changed; + changed = hci_dev_test_and_clear_flag(hdev, + HCI_ENABLE_LL_PRIVACY); if (changed) - exp_ll_privacy_feature_changed(false, hdev, sk); + exp_feature_changed(hdev, rpa_resolution_uuid, false, + sk); } hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); @@ -4097,7 +4090,7 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); if (changed) - exp_debug_feature_changed(val, sk); + exp_feature_changed(hdev, debug_uuid, val, sk); return err; } @@ -4139,15 +4132,15 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, val = !!cp->param[0]; if (val) { - changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); - hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); + changed = !hci_dev_test_and_set_flag(hdev, + HCI_ENABLE_LL_PRIVACY); hci_dev_clear_flag(hdev, HCI_ADVERTISING); /* Enable LL privacy + supported settings changed */ flags = BIT(0) | BIT(1); } else { - changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY); - hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_ENABLE_LL_PRIVACY); /* Disable LL privacy + supported settings changed */ flags = BIT(1); @@ -4235,27 +4228,13 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); if (changed) - exp_quality_report_feature_changed(val, hdev, sk); + exp_feature_changed(hdev, quality_report_uuid, val, sk); unlock_quality_report: hci_req_sync_unlock(hdev); return err; } -static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev, - struct sock *skip) -{ - struct mgmt_ev_exp_feature_changed ev; - - memset(&ev, 0, sizeof(ev)); - memcpy(ev.uuid, offload_codecs_uuid, 16); - ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); - - return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, - &ev, sizeof(ev), - HCI_MGMT_EXP_FEATURE_EVENTS, skip); -} - static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) @@ -4309,7 +4288,65 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, &rp, sizeof(rp)); if (changed) - exp_offload_codec_feature_changed(val, hdev, sk); + exp_feature_changed(hdev, offload_codecs_uuid, val, sk); + + return err; +} + +static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + bool val, changed; + int err; + struct mgmt_rp_set_exp_feature rp; + + /* Command requires to use a valid controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)); + + if (!hci_dev_le_state_simultaneous(hdev)) { + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); + } + + if (changed) { + if (val) + hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES); + else + hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES); + } + + bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d", + val, changed); + + memcpy(rp.uuid, le_simultaneous_roles_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk); return err; } @@ -4326,6 +4363,7 @@ static const struct mgmt_exp_feature { EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), EXP_FEAT(quality_report_uuid, set_quality_report_func), EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), + EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func), /* end with a null feature */ EXP_FEAT(NULL, NULL) @@ -4349,8 +4387,6 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_NOT_SUPPORTED); } -#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1) - static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { @@ -4358,7 +4394,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_rp_get_device_flags rp; struct bdaddr_list_with_flags *br_params; struct hci_conn_params *params; - u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 supported_flags; u32 current_flags = 0; u8 status = MGMT_STATUS_INVALID_PARAMS; @@ -4367,6 +4403,9 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); + bitmap_to_arr32(&supported_flags, hdev->conn_flags, + __HCI_CONN_NUM_FLAGS); + memset(&rp, 0, sizeof(rp)); if (cp->addr.type == BDADDR_BREDR) { @@ -4376,7 +4415,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, if (!br_params) goto done; - current_flags = br_params->current_flags; + bitmap_to_arr32(¤t_flags, br_params->flags, + __HCI_CONN_NUM_FLAGS); } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); @@ -4384,7 +4424,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, if (!params) goto done; - current_flags = params->current_flags; + bitmap_to_arr32(¤t_flags, params->flags, + __HCI_CONN_NUM_FLAGS); } bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); @@ -4422,13 +4463,16 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, struct bdaddr_list_with_flags *br_params; struct hci_conn_params *params; u8 status = MGMT_STATUS_INVALID_PARAMS; - u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 supported_flags; u32 current_flags = __le32_to_cpu(cp->current_flags); bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", &cp->addr.bdaddr, cp->addr.type, __le32_to_cpu(current_flags)); + bitmap_to_arr32(&supported_flags, hdev->conn_flags, + __HCI_CONN_NUM_FLAGS); + if ((supported_flags | current_flags) != supported_flags) { bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", current_flags, supported_flags); @@ -4443,7 +4487,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, cp->addr.type); if (br_params) { - br_params->current_flags = current_flags; + bitmap_from_u64(br_params->flags, current_flags); status = MGMT_STATUS_SUCCESS; } else { bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", @@ -4453,8 +4497,15 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (params) { - params->current_flags = current_flags; + bitmap_from_u64(params->flags, current_flags); status = MGMT_STATUS_SUCCESS; + + /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY + * has been set. + */ + if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, + params->flags)) + hci_update_passive_scan(hdev); } else { bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", &cp->addr.bdaddr, @@ -6979,6 +7030,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, struct hci_conn_params *params; int err; u32 current_flags = 0; + u32 supported_flags; bt_dev_dbg(hdev, "sock %p", sk); @@ -7050,7 +7102,8 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (params) - current_flags = params->current_flags; + bitmap_to_arr32(¤t_flags, params->flags, + __HCI_CONN_NUM_FLAGS); } err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); @@ -7059,8 +7112,10 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); + bitmap_to_arr32(&supported_flags, hdev->conn_flags, + __HCI_CONN_NUM_FLAGS); device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, - SUPPORTED_DEVICE_FLAGS(), current_flags); + supported_flags, current_flags); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, @@ -8999,11 +9054,19 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u8 *name, u8 name_len) { - char buf[512]; - struct mgmt_ev_device_connected *ev = (void *) buf; + struct sk_buff *skb; + struct mgmt_ev_device_connected *ev; u16 eir_len = 0; u32 flags = 0; + if (conn->le_adv_data_len > 0) + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, + conn->le_adv_data_len); + else + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, + 2 + name_len + 5); + + ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, &conn->dst); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); @@ -9017,24 +9080,26 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, * adding any BR/EDR data to the LE adv. */ if (conn->le_adv_data_len > 0) { - memcpy(&ev->eir[eir_len], - conn->le_adv_data, conn->le_adv_data_len); + skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len); eir_len = conn->le_adv_data_len; } else { - if (name_len > 0) + if (name_len > 0) { eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, name_len); + skb_put(skb, eir_len); + } - if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) + if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) { eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, conn->dev_class, 3); + skb_put(skb, 5); + } } ev->eir_len = cpu_to_le16(eir_len); - mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, - sizeof(*ev) + eir_len, NULL); + mgmt_event_skb(skb, NULL); } static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) @@ -9528,9 +9593,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) { - char buf[512]; - struct mgmt_ev_device_found *ev = (void *)buf; - size_t ev_size; + struct sk_buff *skb; + struct mgmt_ev_device_found *ev; /* Don't send events for a non-kernel initiated discovery. With * LE one exception is if we have pend_le_reports > 0 in which @@ -9565,13 +9629,13 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, } } - /* Make sure that the buffer is big enough. The 5 extra bytes - * are for the potential CoD field. - */ - if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf)) + /* Allocate skb. The 5 extra bytes are for the potential CoD field */ + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, + sizeof(*ev) + eir_len + scan_rsp_len + 5); + if (!skb) return; - memset(buf, 0, sizeof(buf)); + ev = skb_put(skb, sizeof(*ev)); /* In case of device discovery with BR/EDR devices (pre 1.2), the * RSSI value was reported as 0 when not available. This behavior @@ -9592,44 +9656,57 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, if (eir_len > 0) /* Copy EIR or advertising data into event */ - memcpy(ev->eir, eir, eir_len); + skb_put_data(skb, eir, eir_len); - if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, - NULL)) - eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, - dev_class, 3); + if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) { + u8 eir_cod[5]; + + eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV, + dev_class, 3); + skb_put_data(skb, eir_cod, sizeof(eir_cod)); + } if (scan_rsp_len > 0) /* Append scan response data to event */ - memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); + skb_put_data(skb, scan_rsp, scan_rsp_len); ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); - ev_size = sizeof(*ev) + eir_len + scan_rsp_len; - mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); + mgmt_event_skb(skb, NULL); } void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len) { + struct sk_buff *skb; struct mgmt_ev_device_found *ev; - char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2]; u16 eir_len; + u32 flags; - ev = (struct mgmt_ev_device_found *) buf; - - memset(buf, 0, sizeof(buf)); + if (name_len) + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len); + else + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0); + ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; - eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, - name_len); + if (name) { + eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, + name_len); + flags = 0; + skb_put(skb, eir_len); + } else { + eir_len = 0; + flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED; + } ev->eir_len = cpu_to_le16(eir_len); + ev->flags = cpu_to_le32(flags); - mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL); + mgmt_event_skb(skb, NULL); } void mgmt_discovering(struct hci_dev *hdev, u8 discovering) diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index 83875f2a0604..edee60bbc7b4 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -56,40 +56,72 @@ static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, return skb; } -int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, - void *data, u16 data_len, int flag, struct sock *skip_sk) +struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, + unsigned int size) { struct sk_buff *skb; - struct mgmt_hdr *hdr; - skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); + skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL); if (!skb) - return -ENOMEM; + return skb; - hdr = skb_put(skb, sizeof(*hdr)); - hdr->opcode = cpu_to_le16(event); - if (hdev) - hdr->index = cpu_to_le16(hdev->id); - else - hdr->index = cpu_to_le16(MGMT_INDEX_NONE); - hdr->len = cpu_to_le16(data_len); + skb_reserve(skb, sizeof(struct mgmt_hdr)); + bt_cb(skb)->mgmt.hdev = hdev; + bt_cb(skb)->mgmt.opcode = opcode; - if (data) - skb_put_data(skb, data, data_len); + return skb; +} + +int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, + struct sock *skip_sk) +{ + struct hci_dev *hdev; + struct mgmt_hdr *hdr; + int len = skb->len; + + if (!skb) + return -EINVAL; + + hdev = bt_cb(skb)->mgmt.hdev; /* Time stamp */ __net_timestamp(skb); - hci_send_to_channel(channel, skb, flag, skip_sk); - + /* Send just the data, without headers, to the monitor */ if (channel == HCI_CHANNEL_CONTROL) - hci_send_monitor_ctrl_event(hdev, event, data, data_len, + hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode, + skb->data, skb->len, skb_get_ktime(skb), flag, skip_sk); + hdr = skb_push(skb, sizeof(*hdr)); + hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode); + if (hdev) + hdr->index = cpu_to_le16(hdev->id); + else + hdr->index = cpu_to_le16(MGMT_INDEX_NONE); + hdr->len = cpu_to_le16(len); + + hci_send_to_channel(channel, skb, flag, skip_sk); + kfree_skb(skb); return 0; } +int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, + void *data, u16 data_len, int flag, struct sock *skip_sk) +{ + struct sk_buff *skb; + + skb = mgmt_alloc_skb(hdev, event, data_len); + if (!skb) + return -ENOMEM; + + if (data) + skb_put_data(skb, data, data_len); + + return mgmt_send_event_skb(channel, skb, flag, skip_sk); +} + int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { struct sk_buff *skb, *mskb; diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index 63b965eaaaac..98e40395a383 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -32,6 +32,10 @@ struct mgmt_pending_cmd { int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); }; +struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, + unsigned int size); +int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, + struct sock *skip_sk); int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, void *data, u16 data_len, int flag, struct sock *skip_sk); int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index 1122097e1e49..6a943634b31a 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -590,7 +590,7 @@ void msft_unregister(struct hci_dev *hdev) kfree(msft); } -void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) +void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct msft_data *msft = hdev->msft_data; u8 event; diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index b59b63dc0ea8..afcaf7d3b1cb 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -17,7 +17,7 @@ void msft_register(struct hci_dev *hdev); void msft_unregister(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); -void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); +void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb); __u64 msft_get_features(struct hci_dev *hdev); int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, @@ -39,7 +39,8 @@ static inline void msft_register(struct hci_dev *hdev) {} static inline void msft_unregister(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} -static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} +static inline void msft_vendor_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) {} static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } static inline int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 9b54d7d0bfc4..f213ed108361 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -103,37 +103,56 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) return ret; } +#define BR_UARGS_MAX 4 +static int br_dev_read_uargs(unsigned long *args, size_t nr_args, + void __user **argp, void __user *data) +{ + int ret; + + if (nr_args < 2 || nr_args > BR_UARGS_MAX) + return -EINVAL; + + if (in_compat_syscall()) { + unsigned int cargs[BR_UARGS_MAX]; + int i; + + ret = copy_from_user(cargs, data, nr_args * sizeof(*cargs)); + if (ret) + goto fault; + + for (i = 0; i < nr_args; ++i) + args[i] = cargs[i]; + + *argp = compat_ptr(args[1]); + } else { + ret = copy_from_user(args, data, nr_args * sizeof(*args)); + if (ret) + goto fault; + *argp = (void __user *)args[1]; + } + + return 0; +fault: + return -EFAULT; +} + /* * Legacy ioctl's through SIOCDEVPRIVATE * This interface is deprecated because it was too difficult * to do the translation for 32/64bit ioctl compatibility. */ -int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) +int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p = NULL; unsigned long args[4]; void __user *argp; - int ret = -EOPNOTSUPP; - - if (in_compat_syscall()) { - unsigned int cargs[4]; - - if (copy_from_user(cargs, data, sizeof(cargs))) - return -EFAULT; - - args[0] = cargs[0]; - args[1] = cargs[1]; - args[2] = cargs[2]; - args[3] = cargs[3]; - - argp = compat_ptr(args[1]); - } else { - if (copy_from_user(args, data, sizeof(args))) - return -EFAULT; + int ret; - argp = (void __user *)args[1]; - } + ret = br_dev_read_uargs(args, ARRAY_SIZE(args), &argp, data); + if (ret) + return ret; switch (args[0]) { case BRCTL_ADD_IF: @@ -302,6 +321,9 @@ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user case BRCTL_GET_FDB_ENTRIES: return get_fdb_entries(br, argp, args[2], args[3]); + + default: + ret = -EOPNOTSUPP; } if (!ret) { @@ -314,12 +336,15 @@ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user return ret; } -static int old_deviceless(struct net *net, void __user *uarg) +static int old_deviceless(struct net *net, void __user *data) { unsigned long args[3]; + void __user *argp; + int ret; - if (copy_from_user(args, uarg, sizeof(args))) - return -EFAULT; + ret = br_dev_read_uargs(args, ARRAY_SIZE(args), &argp, data); + if (ret) + return ret; switch (args[0]) { case BRCTL_GET_VERSION: @@ -338,7 +363,7 @@ static int old_deviceless(struct net *net, void __user *uarg) args[2] = get_bridge_ifindices(net, indices, args[2]); - ret = copy_to_user(uarg, indices, + ret = copy_to_user(argp, indices, array_size(args[2], sizeof(int))) ? -EFAULT : args[2]; @@ -354,7 +379,7 @@ static int old_deviceless(struct net *net, void __user *uarg) if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; - if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ)) + if (copy_from_user(buf, argp, IFNAMSIZ)) return -EFAULT; buf[IFNAMSIZ-1] = 0; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f3d751105343..de2409889489 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx, } #endif +void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, + unsigned long val) +{ + unsigned long intvl_jiffies = clock_t_to_jiffies(val); + + if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { + br_info(brmctx->br, + "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", + jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), + jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); + intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; + } + + brmctx->multicast_query_interval = intvl_jiffies; +} + +void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, + unsigned long val) +{ + unsigned long intvl_jiffies = clock_t_to_jiffies(val); + + if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { + br_info(brmctx->br, + "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", + jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), + jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); + intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; + } + + brmctx->multicast_startup_query_interval = intvl_jiffies; +} + /** * br_multicast_list_adjacent - Returns snooped multicast addresses * @dev: The bridge port adjacent to which to retrieve addresses diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index b5af68c105a8..4fd882686b04 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -743,6 +743,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) mtu = nf_bridge->frag_max_size; + nf_bridge_update_protocol(skb); + nf_bridge_push_encap_header(skb); + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); @@ -760,8 +763,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); if (skb_vlan_tag_present(skb)) { @@ -789,8 +790,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 0c8b5f1a15bc..2ff83d84230d 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (data[IFLA_BR_MCAST_QUERY_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); - br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val); + br_multicast_set_query_intvl(&br->multicast_ctx, val); } if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { @@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); - br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); + br_multicast_set_startup_query_intvl(&br->multicast_ctx, val); } if (data[IFLA_BR_MCAST_STATS_ENABLED]) { diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index af2b3512d86c..2661dda1a92b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -28,6 +28,8 @@ #define BR_MAX_PORTS (1<<BR_PORT_BITS) #define BR_MULTICAST_DEFAULT_HASH_MAX 4096 +#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000) +#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN #define BR_HWDOM_MAX BITS_PER_LONG @@ -964,6 +966,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb, int nest_attr); size_t br_multicast_querier_state_size(void); size_t br_rports_size(const struct net_bridge_mcast *brmctx); +void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx, + unsigned long val); +void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx, + unsigned long val); static inline bool br_group_is_l2(const struct br_ip *group) { @@ -1148,9 +1154,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx) static inline bool br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx) { - return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && - br_multicast_ctx_is_vlan(brmctx) && - !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED); + return br_multicast_ctx_is_vlan(brmctx) && + (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) || + !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)); } static inline bool diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 159590d5c2af..3f7ca88c2aa3 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -657,7 +657,7 @@ static ssize_t multicast_query_interval_show(struct device *d, static int set_query_interval(struct net_bridge *br, unsigned long val, struct netlink_ext_ack *extack) { - br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val); + br_multicast_set_query_intvl(&br->multicast_ctx, val); return 0; } @@ -705,7 +705,7 @@ static ssize_t multicast_startup_query_interval_show( static int set_startup_query_interval(struct net_bridge *br, unsigned long val, struct netlink_ext_ack *extack) { - br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); + br_multicast_set_startup_query_intvl(&br->multicast_ctx, val); return 0; } diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c index 8ffd4ed2563c..a6382973b3e7 100644 --- a/net/bridge/br_vlan_options.c +++ b/net/bridge/br_vlan_options.c @@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br, u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]); - v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val); + br_multicast_set_query_intvl(&v->br_mcast_ctx, val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) { @@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br, u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]); - v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); + br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) { diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 40cd57ad0a0f..aee11c74d3c8 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -128,7 +128,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) if (pkt != NULL) cfpkt_destroy(pkt); layr->incomplete_frm = NULL; - expectlen = 0; spin_unlock(&layr->sync); return -EPROTO; } diff --git a/net/core/dev.c b/net/core/dev.c index c431c8925eed..6c8b226b5f2f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3719,8 +3719,12 @@ no_lock_out: * separate lock before trying to get qdisc main lock. * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. + * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit + * and then other tasks will only enqueue packets. The packets will be + * sent after the qdisc owner is scheduled again. To prevent this + * scenario the task always serialize on the lock. */ - contended = qdisc_is_running(q); + contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); if (unlikely(contended)) spin_lock(&q->busylock); @@ -3824,8 +3828,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) return skb; /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ - qdisc_skb_cb(skb)->mru = 0; - qdisc_skb_cb(skb)->post_ct = false; + tc_skb_cb(skb)->mru = 0; + tc_skb_cb(skb)->post_ct = false; mini_qdisc_bstats_cpu_update(miniq, skb); switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { @@ -4984,8 +4988,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, } qdisc_skb_cb(skb)->pkt_len = skb->len; - qdisc_skb_cb(skb)->mru = 0; - qdisc_skb_cb(skb)->post_ct = false; + tc_skb_cb(skb)->mru = 0; + tc_skb_cb(skb)->post_ct = false; skb->tc_at_ingress = 1; mini_qdisc_bstats_cpu_update(miniq, skb); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 1d309a666932..1b807d119da5 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -192,7 +192,7 @@ static int net_hwtstamp_validate(struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ + if (cfg.flags & ~HWTSTAMP_FLAG_MASK) return -EINVAL; tx_type = cfg.tx_type; diff --git a/net/core/devlink.c b/net/core/devlink.c index 492a26d3c3f1..fcd9f6d85cf1 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4467,6 +4467,16 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, + .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME, + .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE, + }, + { + .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, + .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME, + .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 1bb567a3b329..75282222e0b4 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -750,6 +750,27 @@ static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, return 0; } +static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = { + [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, + [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, + [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, + [FRA_PRIORITY] = { .type = NLA_U32 }, + [FRA_FWMARK] = { .type = NLA_U32 }, + [FRA_FLOW] = { .type = NLA_U32 }, + [FRA_TUN_ID] = { .type = NLA_U64 }, + [FRA_FWMASK] = { .type = NLA_U32 }, + [FRA_TABLE] = { .type = NLA_U32 }, + [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, + [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, + [FRA_GOTO] = { .type = NLA_U32 }, + [FRA_L3MDEV] = { .type = NLA_U8 }, + [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, + [FRA_PROTOCOL] = { .type = NLA_U8 }, + [FRA_IP_PROTO] = { .type = NLA_U8 }, + [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, + [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) } +}; + int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { @@ -774,7 +795,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, } err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, - ops->policy, extack); + fib_rule_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Error parsing msg"); goto errout; @@ -882,7 +903,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, } err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, - ops->policy, extack); + fib_rule_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Error parsing msg"); goto errout; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index de1109f2cfcf..15833e1d6ea1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -239,7 +239,7 @@ void skb_flow_dissect_ct(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, u16 *ctinfo_map, - size_t mapsize, bool post_ct) + size_t mapsize, bool post_ct, u16 zone) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) struct flow_dissector_key_ct *key; @@ -261,6 +261,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb, if (!ct) { key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | TCA_FLOWER_KEY_CT_FLAGS_INVALID; + key->ct_zone = zone; return; } diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 6beaea13564a..73f68d4625f3 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/kernel.h> #include <linux/slab.h> +#include <net/act_api.h> #include <net/flow_offload.h> #include <linux/rtnetlink.h> #include <linux/mutex.h> @@ -27,6 +28,26 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions) } EXPORT_SYMBOL(flow_rule_alloc); +struct flow_offload_action *offload_action_alloc(unsigned int num_actions) +{ + struct flow_offload_action *fl_action; + int i; + + fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), + GFP_KERNEL); + if (!fl_action) + return NULL; + + fl_action->action.num_entries = num_actions; + /* Pre-fill each action hw_stats with DONT_CARE. + * Caller can override this if it wants stats for a given action. + */ + for (i = 0; i < num_actions; i++) + fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; + + return fl_action; +} + #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ const struct flow_match *__m = &(__rule)->match; \ struct flow_dissector *__d = (__m)->dissector; \ @@ -397,6 +418,8 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) existing_qdiscs_register(cb, cb_priv); mutex_unlock(&flow_indr_block_lock); + tcf_action_reoffload_cb(cb, cb_priv, true); + return 0; } EXPORT_SYMBOL(flow_indr_dev_register); @@ -449,6 +472,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); + tcf_action_reoffload_cb(cb, cb_priv, false); flow_block_indr_notify(&cleanup_list); kfree(indr_dev); } @@ -549,19 +573,25 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_indr_dev *this; + u32 count = 0; + int err; mutex_lock(&flow_indr_block_lock); + if (bo) { + if (bo->command == FLOW_BLOCK_BIND) + indir_dev_add(data, dev, sch, type, cleanup, bo); + else if (bo->command == FLOW_BLOCK_UNBIND) + indir_dev_remove(data); + } - if (bo->command == FLOW_BLOCK_BIND) - indir_dev_add(data, dev, sch, type, cleanup, bo); - else if (bo->command == FLOW_BLOCK_UNBIND) - indir_dev_remove(data); - - list_for_each_entry(this, &flow_block_indr_dev_list, list) - this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); + list_for_each_entry(this, &flow_block_indr_dev_list, list) { + err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); + if (!err) + count++; + } mutex_unlock(&flow_indr_block_lock); - return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; + return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); diff --git a/net/core/link_watch.c b/net/core/link_watch.c index d7d089963b1d..b0f5344d1185 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -166,7 +166,10 @@ static void linkwatch_do_dev(struct net_device *dev) netdev_state_change(dev); } - dev_put_track(dev, &dev->linkwatch_dev_tracker); + /* Note: our callers are responsible for + * calling netdev_tracker_free(). + */ + dev_put(dev); } static void __linkwatch_run_queue(int urgent_only) @@ -209,6 +212,10 @@ static void __linkwatch_run_queue(int urgent_only) list_add_tail(&dev->link_watch_list, &lweventlist); continue; } + /* We must free netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->linkwatch_dev_tracker); spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); do_dev--; @@ -232,6 +239,10 @@ void linkwatch_forget_dev(struct net_device *dev) if (!list_empty(&dev->link_watch_list)) { list_del_init(&dev->link_watch_list); clean = 1; + /* We must release netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->linkwatch_dev_tracker); } spin_unlock_irqrestore(&lweventlist_lock, flags); if (clean) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 506aa01776df..213cb7b26b7a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3770,10 +3770,6 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, neigh_proc_base_reachable_time; } - /* Don't export sysctls to unprivileged users */ - if (neigh_parms_net(p)->user_ns != &init_user_ns) - t->neigh_vars[0].procname = NULL; - switch (neigh_parms_family(p)) { case AF_INET: p_name = "ipv4"; diff --git a/net/core/of_net.c b/net/core/of_net.c index f1a9bf7578e7..95a64c813ae5 100644 --- a/net/core/of_net.c +++ b/net/core/of_net.c @@ -61,7 +61,7 @@ static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) { struct platform_device *pdev = of_find_device_by_node(np); struct nvmem_cell *cell; - const void *mac; + const void *buf; size_t len; int ret; @@ -78,21 +78,32 @@ static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr) if (IS_ERR(cell)) return PTR_ERR(cell); - mac = nvmem_cell_read(cell, &len); + buf = nvmem_cell_read(cell, &len); nvmem_cell_put(cell); - if (IS_ERR(mac)) - return PTR_ERR(mac); - - if (len != ETH_ALEN || !is_valid_ether_addr(mac)) { - kfree(mac); - return -EINVAL; + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = 0; + if (len == ETH_ALEN) { + if (is_valid_ether_addr(buf)) + memcpy(addr, buf, ETH_ALEN); + else + ret = -EINVAL; + } else if (len == 3 * ETH_ALEN - 1) { + u8 mac[ETH_ALEN]; + + if (mac_pton(buf, mac)) + memcpy(addr, mac, ETH_ALEN); + else + ret = -EINVAL; + } else { + ret = -EINVAL; } - memcpy(addr, mac, ETH_ALEN); - kfree(mac); + kfree(buf); - return 0; + return ret; } /** diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a33247fdb8f5..e514a36bcffc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -832,7 +832,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); if (dev) - printk("%sdev name=%s feat=0x%pNF\n", + printk("%sdev name=%s feat=%pNF\n", level, dev->name, &dev->features); if (sk) printk("%ssk family=%hu type=%u proto=%u\n", @@ -2024,6 +2024,30 @@ void *skb_pull(struct sk_buff *skb, unsigned int len) EXPORT_SYMBOL(skb_pull); /** + * skb_pull_data - remove data from the start of a buffer returning its + * original position. + * @skb: buffer to use + * @len: amount of data to remove + * + * This function removes data from the start of a buffer, returning + * the memory to the headroom. A pointer to the original data in the buffer + * is returned after checking if there is enough data to pull. Once the + * data has been pulled future pushes will overwrite the old data. + */ +void *skb_pull_data(struct sk_buff *skb, size_t len) +{ + void *data = skb->data; + + if (skb->len < len) + return NULL; + + skb_pull(skb, len); + + return data; +} +EXPORT_SYMBOL(skb_pull_data); + +/** * skb_trim - remove end from a buffer * @skb: buffer to alter * @len: new length diff --git a/net/core/xdp.c b/net/core/xdp.c index 5ddc29f29bad..7fe1df85f505 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -159,6 +159,11 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index, unsigned int napi_id) { + if (!dev) { + WARN(1, "Missing net_device from driver"); + return -ENODEV; + } + if (xdp_rxq->reg_state == REG_STATE_UNUSED) { WARN(1, "Driver promised not to register this"); return -EINVAL; @@ -169,11 +174,6 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, xdp_rxq_info_unreg(xdp_rxq); } - if (!dev) { - WARN(1, "Missing net_device from driver"); - return -ENODEV; - } - /* State either UNREGISTERED or NEW */ xdp_rxq_info_init(xdp_rxq); xdp_rxq->dev = dev; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 4a4e3c17740c..ee73057529cf 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -101,10 +101,6 @@ errout: return err; } -static const struct nla_policy dn_fib_rule_policy[FRA_MAX+1] = { - FRA_GENERIC_POLICY, -}; - static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { struct dn_fib_rule *r = (struct dn_fib_rule *)rule; @@ -235,7 +231,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = { .fill = dn_fib_rule_fill, .flush_cache = dn_fib_rule_flush_cache, .nlgroup = RTNLGRP_DECnet_RULE, - .policy = dn_fib_rule_policy, .owner = THIS_MODULE, .fro_net = &init_net, }; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 8814fa0e44c8..c18b22c0bf55 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -822,7 +822,7 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) int err; if (tag_ops->proto == dst->default_proto) - return 0; + goto connect; dsa_switch_for_each_cpu_port(cpu_dp, ds) { rtnl_lock(); @@ -836,7 +836,30 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) } } +connect: + if (tag_ops->connect) { + err = tag_ops->connect(ds); + if (err) + return err; + } + + if (ds->ops->connect_tag_protocol) { + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + dev_err(ds->dev, + "Unable to connect to tag protocol \"%s\": %pe\n", + tag_ops->name, ERR_PTR(err)); + goto disconnect; + } + } + return 0; + +disconnect: + if (tag_ops->disconnect) + tag_ops->disconnect(ds); + + return err; } static int dsa_switch_setup(struct dsa_switch *ds) @@ -1136,6 +1159,37 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) dst->setup = false; } +static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, + const struct dsa_device_ops *tag_ops) +{ + const struct dsa_device_ops *old_tag_ops = dst->tag_ops; + struct dsa_notifier_tag_proto_info info; + int err; + + dst->tag_ops = tag_ops; + + /* Notify the switches from this tree about the connection + * to the new tagger + */ + info.tag_ops = tag_ops; + err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); + if (err && err != -EOPNOTSUPP) + goto out_disconnect; + + /* Notify the old tagger about the disconnection from this tree */ + info.tag_ops = old_tag_ops; + dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + + return 0; + +out_disconnect: + info.tag_ops = tag_ops; + dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + dst->tag_ops = old_tag_ops; + + return err; +} + /* Since the dsa/tagging sysfs device attribute is per master, the assumption * is that all DSA switches within a tree share the same tagger, otherwise * they would have formed disjoint trees (different "dsa,member" values). @@ -1168,12 +1222,15 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, goto out_unlock; } + /* Notify the tag protocol change */ info.tag_ops = tag_ops; err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); if (err) - goto out_unwind_tagger; + return err; - dst->tag_ops = tag_ops; + err = dsa_tree_bind_tag_proto(dst, tag_ops); + if (err) + goto out_unwind_tagger; rtnl_unlock(); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 0194a969c9b5..b5ae21f172a8 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -38,6 +38,8 @@ enum { DSA_NOTIFIER_VLAN_DEL, DSA_NOTIFIER_MTU, DSA_NOTIFIER_TAG_PROTO, + DSA_NOTIFIER_TAG_PROTO_CONNECT, + DSA_NOTIFIER_TAG_PROTO_DISCONNECT, DSA_NOTIFIER_MRP_ADD, DSA_NOTIFIER_MRP_DEL, DSA_NOTIFIER_MRP_ADD_RING_ROLE, diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 9c92edd96961..393f2d8a860a 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -647,6 +647,60 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, return 0; } +/* We use the same cross-chip notifiers to inform both the tagger side, as well + * as the switch side, of connection and disconnection events. + * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the + * switch side doesn't support connecting to this tagger, and therefore, the + * fact that we don't disconnect the tagger side doesn't constitute a memory + * leak: the tagger will still operate with persistent per-switch memory, just + * with the switch side unconnected to it. What does constitute a hard error is + * when the switch side supports connecting but fails. + */ +static int +dsa_switch_connect_tag_proto(struct dsa_switch *ds, + struct dsa_notifier_tag_proto_info *info) +{ + const struct dsa_device_ops *tag_ops = info->tag_ops; + int err; + + /* Notify the new tagger about the connection to this switch */ + if (tag_ops->connect) { + err = tag_ops->connect(ds); + if (err) + return err; + } + + if (!ds->ops->connect_tag_protocol) + return -EOPNOTSUPP; + + /* Notify the switch about the connection to the new tagger */ + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + /* Revert the new tagger's connection to this tree */ + if (tag_ops->disconnect) + tag_ops->disconnect(ds); + return err; + } + + return 0; +} + +static int +dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, + struct dsa_notifier_tag_proto_info *info) +{ + const struct dsa_device_ops *tag_ops = info->tag_ops; + + /* Notify the tagger about the disconnection from this switch */ + if (tag_ops->disconnect && ds->tagger_data) + tag_ops->disconnect(ds); + + /* No need to notify the switch, since it shouldn't have any + * resources to tear down + */ + return 0; +} + static int dsa_switch_mrp_add(struct dsa_switch *ds, struct dsa_notifier_mrp_info *info) { @@ -766,6 +820,12 @@ static int dsa_switch_event(struct notifier_block *nb, case DSA_NOTIFIER_TAG_PROTO: err = dsa_switch_change_tag_proto(ds, info); break; + case DSA_NOTIFIER_TAG_PROTO_CONNECT: + err = dsa_switch_connect_tag_proto(ds, info); + break; + case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: + err = dsa_switch_disconnect_tag_proto(ds, info); + break; case DSA_NOTIFIER_MRP_ADD: err = dsa_switch_mrp_add(ds, info); break; diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 4ba460c5a880..0d81f172b7a6 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -47,9 +47,13 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, void *injection; __be32 *prefix; u32 rew_op = 0; + u64 qos_class; ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type); + qos_class = netdev_get_num_tc(netdev) ? + netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority; + injection = skb_push(skb, OCELOT_TAG_LEN); prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN); @@ -57,7 +61,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, memset(injection, 0, OCELOT_TAG_LEN); ocelot_ifh_set_bypass(injection, 1); ocelot_ifh_set_src(injection, ds->num_ports); - ocelot_ifh_set_qos_class(injection, skb->priority); + ocelot_ifh_set_qos_class(injection, qos_class); ocelot_ifh_set_vlan_tci(injection, vlan_tci); ocelot_ifh_set_tag_type(injection, tag_type); diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index a1919ea5e828..68982b2789a5 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -12,25 +12,39 @@ #include <linux/dsa/ocelot.h> #include "dsa_priv.h" +struct ocelot_8021q_tagger_private { + struct ocelot_8021q_tagger_data data; /* Must be first */ + struct kthread_worker *xmit_worker; +}; + static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) { + struct ocelot_8021q_tagger_private *priv = dp->ds->tagger_data; + struct ocelot_8021q_tagger_data *data = &priv->data; + void (*xmit_work_fn)(struct kthread_work *work); struct felix_deferred_xmit_work *xmit_work; - struct felix_port *felix_port = dp->priv; + struct kthread_worker *xmit_worker; + + xmit_work_fn = data->xmit_work_fn; + xmit_worker = priv->xmit_worker; + + if (!xmit_work_fn || !xmit_worker) + return NULL; xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); if (!xmit_work) return NULL; /* Calls felix_port_deferred_xmit in felix.c */ - kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn); + kthread_init_work(&xmit_work->work, xmit_work_fn); /* Increase refcount so the kfree_skb in dsa_slave_xmit * won't really free the packet. */ xmit_work->dp = dp; xmit_work->skb = skb_get(skb); - kthread_queue_work(felix_port->xmit_worker, &xmit_work->work); + kthread_queue_work(xmit_worker, &xmit_work->work); return NULL; } @@ -67,11 +81,43 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, return skb; } +static void ocelot_disconnect(struct dsa_switch *ds) +{ + struct ocelot_8021q_tagger_private *priv = ds->tagger_data; + + kthread_destroy_worker(priv->xmit_worker); + kfree(priv); + ds->tagger_data = NULL; +} + +static int ocelot_connect(struct dsa_switch *ds) +{ + struct ocelot_8021q_tagger_private *priv; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->xmit_worker = kthread_create_worker(0, "felix_xmit"); + if (IS_ERR(priv->xmit_worker)) { + err = PTR_ERR(priv->xmit_worker); + kfree(priv); + return err; + } + + ds->tagger_data = priv; + + return 0; +} + static const struct dsa_device_ops ocelot_8021q_netdev_ops = { .name = "ocelot-8021q", .proto = DSA_TAG_PROTO_OCELOT_8021Q, .xmit = ocelot_xmit, .rcv = ocelot_rcv, + .connect = ocelot_connect, + .disconnect = ocelot_disconnect, .needed_headroom = VLAN_HLEN, .promisc_on_master = true, }; diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 6c293c2a3008..72d5e0ef8dcf 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -4,7 +4,6 @@ #include <linux/if_vlan.h> #include <linux/dsa/sja1105.h> #include <linux/dsa/8021q.h> -#include <linux/skbuff.h> #include <linux/packing.h> #include "dsa_priv.h" @@ -54,11 +53,25 @@ #define SJA1110_TX_TRAILER_LEN 4 #define SJA1110_MAX_PADDING_LEN 15 -enum sja1110_meta_tstamp { - SJA1110_META_TSTAMP_TX = 0, - SJA1110_META_TSTAMP_RX = 1, +#define SJA1105_HWTS_RX_EN 0 + +struct sja1105_tagger_private { + struct sja1105_tagger_data data; /* Must be first */ + unsigned long state; + /* Protects concurrent access to the meta state machine + * from taggers running on multiple ports on SMP systems + */ + spinlock_t meta_lock; + struct sk_buff *stampable_skb; + struct kthread_worker *xmit_worker; }; +static struct sja1105_tagger_private * +sja1105_tagger_private(struct dsa_switch *ds) +{ + return ds->tagger_data; +} + /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */ static inline bool sja1105_is_link_local(const struct sk_buff *skb) { @@ -125,16 +138,30 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb) static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) { - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds); + struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds); + void (*xmit_work_fn)(struct kthread_work *work); + struct sja1105_deferred_xmit_work *xmit_work; + struct kthread_worker *xmit_worker; - if (!dsa_port_is_sja1105(dp)) - return skb; + xmit_work_fn = tagger_data->xmit_work_fn; + xmit_worker = priv->xmit_worker; + + if (!xmit_work_fn || !xmit_worker) + return NULL; + + xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); + if (!xmit_work) + return NULL; + kthread_init_work(&xmit_work->work, xmit_work_fn); /* Increase refcount so the kfree_skb in dsa_slave_xmit * won't really free the packet. */ - skb_queue_tail(&sp->xmit_queue, skb_get(skb)); - kthread_queue_work(sp->xmit_worker, &sp->xmit_work); + xmit_work->dp = dp; + xmit_work->skb = skb_get(skb); + + kthread_queue_work(xmit_worker, &xmit_work->work); return NULL; } @@ -355,32 +382,32 @@ static struct sk_buff */ if (is_link_local) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_private *priv; + struct dsa_switch *ds = dp->ds; - if (unlikely(!dsa_port_is_sja1105(dp))) - return skb; + priv = sja1105_tagger_private(ds); - if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) + if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) /* Do normal processing. */ return skb; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->meta_lock); /* Was this a link-local frame instead of the meta * that we were expecting? */ - if (sp->data->stampable_skb) { - dev_err_ratelimited(dp->ds->dev, + if (priv->stampable_skb) { + dev_err_ratelimited(ds->dev, "Expected meta frame, is %12llx " "in the DSA master multicast filter?\n", SJA1105_META_DMAC); - kfree_skb(sp->data->stampable_skb); + kfree_skb(priv->stampable_skb); } /* Hold a reference to avoid dsa_switch_rcv * from freeing the skb. */ - sp->data->stampable_skb = skb_get(skb); - spin_unlock(&sp->data->meta_lock); + priv->stampable_skb = skb_get(skb); + spin_unlock(&priv->meta_lock); /* Tell DSA we got nothing */ return NULL; @@ -393,37 +420,37 @@ static struct sk_buff */ } else if (is_meta) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_private *priv; + struct dsa_switch *ds = dp->ds; struct sk_buff *stampable_skb; - if (unlikely(!dsa_port_is_sja1105(dp))) - return skb; + priv = sja1105_tagger_private(ds); /* Drop the meta frame if we're not in the right state * to process it. */ - if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) + if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) return NULL; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->meta_lock); - stampable_skb = sp->data->stampable_skb; - sp->data->stampable_skb = NULL; + stampable_skb = priv->stampable_skb; + priv->stampable_skb = NULL; /* Was this a meta frame instead of the link-local * that we were expecting? */ if (!stampable_skb) { - dev_err_ratelimited(dp->ds->dev, + dev_err_ratelimited(ds->dev, "Unexpected meta frame\n"); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); return NULL; } if (stampable_skb->dev != skb->dev) { - dev_err_ratelimited(dp->ds->dev, + dev_err_ratelimited(ds->dev, "Meta frame on wrong port\n"); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); return NULL; } @@ -434,12 +461,36 @@ static struct sk_buff skb = stampable_skb; sja1105_transfer_meta(skb, meta); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); } return skb; } +static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds) +{ + struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); + + return test_bit(SJA1105_HWTS_RX_EN, &priv->state); +} + +static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on) +{ + struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); + + if (on) + set_bit(SJA1105_HWTS_RX_EN, &priv->state); + else + clear_bit(SJA1105_HWTS_RX_EN, &priv->state); + + /* Initialize the meta state machine to a known state */ + if (!priv->stampable_skb) + return; + + kfree_skb(priv->stampable_skb); + priv->stampable_skb = NULL; +} + static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb) { u16 tpid = ntohs(eth_hdr(skb)->h_proto); @@ -526,48 +577,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, is_meta); } -static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, - u8 ts_id, enum sja1110_meta_tstamp dir, - u64 tstamp) -{ - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; - struct dsa_port *dp = dsa_to_port(ds, port); - struct skb_shared_hwtstamps shwt = {0}; - struct sja1105_port *sp = dp->priv; - - if (!dsa_port_is_sja1105(dp)) - return; - - /* We don't care about RX timestamps on the CPU port */ - if (dir == SJA1110_META_TSTAMP_RX) - return; - - spin_lock(&sp->data->skb_txtstamp_queue.lock); - - skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) { - if (SJA1105_SKB_CB(skb)->ts_id != ts_id) - continue; - - __skb_unlink(skb, &sp->data->skb_txtstamp_queue); - skb_match = skb; - - break; - } - - spin_unlock(&sp->data->skb_txtstamp_queue.lock); - - if (WARN_ON(!skb_match)) - return; - - shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); - skb_complete_tx_timestamp(skb_match, &shwt); -} - static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) { u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN; int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); int n_ts = SJA1110_RX_HEADER_N_TS(rx_header); + struct sja1105_tagger_data *tagger_data; struct net_device *master = skb->dev; struct dsa_port *cpu_dp; struct dsa_switch *ds; @@ -581,6 +596,10 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) return NULL; } + tagger_data = sja1105_tagger_data(ds); + if (!tagger_data->meta_tstamp_handler) + return NULL; + for (i = 0; i <= n_ts; i++) { u8 ts_id, source_port, dir; u64 tstamp; @@ -590,8 +609,8 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) dir = (buf[1] & BIT(3)) >> 3; tstamp = be64_to_cpu(*(__be64 *)(buf + 2)); - sja1110_process_meta_tstamp(ds, source_port, ts_id, dir, - tstamp); + tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir, + tstamp); buf += SJA1110_META_TSTAMP_SIZE; } @@ -722,11 +741,53 @@ static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto, *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1]; } +static void sja1105_disconnect(struct dsa_switch *ds) +{ + struct sja1105_tagger_private *priv = ds->tagger_data; + + kthread_destroy_worker(priv->xmit_worker); + kfree(priv); + ds->tagger_data = NULL; +} + +static int sja1105_connect(struct dsa_switch *ds) +{ + struct sja1105_tagger_data *tagger_data; + struct sja1105_tagger_private *priv; + struct kthread_worker *xmit_worker; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->meta_lock); + + xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", + ds->dst->index, ds->index); + if (IS_ERR(xmit_worker)) { + err = PTR_ERR(xmit_worker); + kfree(priv); + return err; + } + + priv->xmit_worker = xmit_worker; + /* Export functions for switch driver use */ + tagger_data = &priv->data; + tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state; + tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state; + ds->tagger_data = priv; + + return 0; +} + static const struct dsa_device_ops sja1105_netdev_ops = { .name = "sja1105", .proto = DSA_TAG_PROTO_SJA1105, .xmit = sja1105_xmit, .rcv = sja1105_rcv, + .connect = sja1105_connect, + .disconnect = sja1105_disconnect, .needed_headroom = VLAN_HLEN, .flow_dissect = sja1105_flow_dissect, .promisc_on_master = true, @@ -740,6 +801,8 @@ static const struct dsa_device_ops sja1110_netdev_ops = { .proto = DSA_TAG_PROTO_SJA1110, .xmit = sja1110_xmit, .rcv = sja1110_rcv, + .connect = sja1105_connect, + .disconnect = sja1105_disconnect, .flow_dissect = sja1110_flow_dissect, .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN, .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN, diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index 63560bbb7d1f..920aac02fe39 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -96,7 +96,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); out_dev_put: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } @@ -353,7 +353,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); out_dev_put: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 6a070dc8e4b0..403158862011 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -219,6 +219,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c index 46776ea42a92..487bdf345541 100644 --- a/net/ethtool/coalesce.c +++ b/net/ethtool/coalesce.c @@ -336,6 +336,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c index f99912d7957e..d73888c7d19c 100644 --- a/net/ethtool/debug.c +++ b/net/ethtool/debug.c @@ -123,6 +123,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c index e10bfcc07853..45c42b2d5f17 100644 --- a/net/ethtool/eee.c +++ b/net/ethtool/eee.c @@ -185,6 +185,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/features.c b/net/ethtool/features.c index 2e7331b23996..55d449a2d3fc 100644 --- a/net/ethtool/features.c +++ b/net/ethtool/features.c @@ -283,6 +283,6 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c index 8738dafd5417..9f5a134e2e01 100644 --- a/net/ethtool/fec.c +++ b/net/ethtool/fec.c @@ -305,6 +305,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c index b91839870efc..efa0f7f48836 100644 --- a/net/ethtool/linkinfo.c +++ b/net/ethtool/linkinfo.c @@ -149,6 +149,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index f9eda596f301..99b29b4fe947 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -358,6 +358,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/module.c b/net/ethtool/module.c index bc2cef11bbda..898ed436b9e4 100644 --- a/net/ethtool/module.c +++ b/net/ethtool/module.c @@ -175,6 +175,6 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 23f32a995099..ea23659fab28 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -142,7 +142,8 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, } req_info->dev = dev; - netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); + if (dev) + netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); req_info->flags = flags; return 0; } @@ -637,7 +638,6 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, if (ret < 0) goto err_cleanup; reply_len = ret + ethnl_reply_header_size(); - ret = -ENOMEM; skb = genlmsg_new(reply_len, GFP_KERNEL); if (!skb) goto err_cleanup; diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index a779bbb0c524..75856db299e9 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -235,6 +235,11 @@ struct ethnl_req_info { u32 flags; }; +static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) +{ + dev_put_track(req_info->dev, &req_info->dev_tracker); +} + /** * struct ethnl_reply_data - base type of reply data for GET requests * @dev: device for current reply message; in single shot requests it is diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c index ee1e5806bc93..a8c113d244db 100644 --- a/net/ethtool/pause.c +++ b/net/ethtool/pause.c @@ -181,6 +181,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c index fc9f3be23a19..4c7bfa81e4ab 100644 --- a/net/ethtool/privflags.c +++ b/net/ethtool/privflags.c @@ -196,6 +196,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 450b8866373d..c1d5f5e0fdc9 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -196,6 +196,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c index e7f2ee0d2471..efde33536687 100644 --- a/net/ethtool/tunnels.c +++ b/net/ethtool/tunnels.c @@ -195,7 +195,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) if (ret) goto err_free_msg; rtnl_unlock(); - dev_put(req_info.dev); + ethnl_parse_header_dev_put(&req_info); genlmsg_end(rskb, reply_payload); return genlmsg_reply(rskb, info); @@ -204,7 +204,7 @@ err_free_msg: nlmsg_free(rskb); err_unlock_rtnl: rtnl_unlock(); - dev_put(req_info.dev); + ethnl_parse_header_dev_put(&req_info); return ret; } @@ -230,7 +230,7 @@ int ethnl_tunnel_info_start(struct netlink_callback *cb) sock_net(cb->skb->sk), cb->extack, false); if (ctx->req_info.dev) { - dev_put(ctx->req_info.dev); + ethnl_parse_header_dev_put(&ctx->req_info); ctx->req_info.dev = NULL; } diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c index ada7df2331d2..88f435e76481 100644 --- a/net/ethtool/wol.c +++ b/net/ethtool/wol.c @@ -165,6 +165,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 04067b249bf3..f53184767ee7 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -155,7 +155,7 @@ void inet_sock_destruct(struct sock *sk) kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); - dst_release(sk->sk_rx_dst); + dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); sk_refcnt_debug_dec(sk); } EXPORT_SYMBOL(inet_sock_destruct); @@ -1985,6 +1985,10 @@ static int __init inet_init(void) ip_init(); + /* Initialise per-cpu ipv4 mibs */ + if (init_ipv4_mibs()) + panic("%s: Cannot init ipv4 mibs\n", __func__); + /* Setup TCP slab cache for open requests. */ tcp_init(); @@ -2015,12 +2019,6 @@ static int __init inet_init(void) if (init_inet_pernet_ops()) pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); - /* - * Initialise per-cpu ipv4 mibs - */ - - if (init_ipv4_mibs()) - pr_crit("%s: Cannot init ipv4 mibs\n", __func__); ipv4_proc_init(); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index d279cb8ac158..e0b6c8b6de57 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -216,11 +216,6 @@ static struct fib_table *fib_empty_table(struct net *net) return NULL; } -static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { - FRA_GENERIC_POLICY, - [FRA_FLOW] = { .type = NLA_U32 }, -}; - static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, @@ -386,7 +381,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = { .nlmsg_payload = fib4_rule_nlmsg_payload, .flush_cache = fib4_rule_flush_cache, .nlgroup = RTNLGRP_IPV4_RULE, - .policy = fib4_rule_policy, .owner = THIS_MODULE, }; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index c8fa6e7f7d12..581b5b2d72a5 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -261,6 +261,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, r->idiag_state = sk->sk_state; r->idiag_timer = 0; r->idiag_retrans = 0; + r->idiag_expires = 0; if (inet_diag_msg_attrs_fill(sk, skb, r, ext, sk_user_ns(NETLINK_CB(cb->skb).sk), @@ -314,9 +315,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies); - } else { - r->idiag_timer = 0; - r->idiag_expires = 0; } if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 4c7aca884fa9..07274619b9ea 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -195,10 +195,6 @@ static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) return 1; } -static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { - FRA_GENERIC_POLICY, -}; - static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, struct netlink_ext_ack *extack) @@ -231,7 +227,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { .compare = ipmr_rule_compare, .fill = ipmr_rule_fill, .nlgroup = RTNLGRP_IPV4_RULE, - .policy = ipmr_rule_policy, .owner = THIS_MODULE, }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 20054618c87e..3b75836db19b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3042,8 +3042,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(sk->sk_rx_dst); - sk->sk_rx_dst = NULL; + dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3658b9c3dd2b..8010583f868b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5787,7 +5787,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) trace_tcp_probe(sk, skb); tcp_mstamp_refresh(tp); - if (unlikely(!sk->sk_rx_dst)) + if (unlikely(!rcu_access_pointer(sk->sk_rx_dst))) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3dd19a2bf06c..ac10e4cdd8d0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1701,7 +1701,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); @@ -1709,8 +1712,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_rx_dst_ifindex != skb->skb_iif || !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check, dst, 0)) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb); @@ -1786,7 +1789,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); @@ -2201,7 +2204,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); sk->sk_rx_dst_ifindex = skb->skb_iif; } } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 99536127650b..7b18a6f42f18 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2251,7 +2251,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg(&sk->sk_rx_dst, dst); + old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } @@ -2441,7 +2441,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_unicast_rcv_skb(sk, skb, uh); @@ -2600,7 +2600,7 @@ int udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); @@ -3076,7 +3076,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) { seq_setwidth(seq, 127); if (v == SEQ_START_TOKEN) - seq_puts(seq, " sl local_address rem_address st tx_queue " + seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index dcedfe29d9d9..ec029c86ae06 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -340,10 +340,6 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule, return 1; } -static const struct nla_policy fib6_rule_policy[FRA_MAX+1] = { - FRA_GENERIC_POLICY, -}; - static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, @@ -459,7 +455,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = { .fill = fib6_rule_fill, .nlmsg_payload = fib6_rule_nlmsg_payload, .nlgroup = RTNLGRP_IPV6_RULE, - .policy = fib6_rule_policy, .owner = THIS_MODULE, .fro_net = &init_net, }; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed9b6d6ca65e..3a434d75925c 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); + memset(&p1, 0, sizeof(p1)); + switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index a77a15a7f3dc..7cf73e60e619 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -182,10 +182,6 @@ static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) return 1; } -static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { - FRA_GENERIC_POLICY, -}; - static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, struct netlink_ext_ack *extack) @@ -218,7 +214,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = { .compare = ip6mr_rule_compare, .fill = ip6mr_rule_fill, .nlgroup = RTNLGRP_IPV6_RULE, - .policy = ip6mr_rule_policy, .owner = THIS_MODULE, }; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 60f1e4f5be5a..c51d5ce3711c 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, struct raw6_sock *rp = raw6_sk(sk); int val; + if (optlen < sizeof(val)) + return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4d02a329ab60..03be0e6b4826 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -658,7 +658,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) } else { INIT_WORK(&work->work, rt6_probe_deferred); work->target = *nh_gw; - dev_hold_track(dev, &work->dev_tracker, GFP_KERNEL); + dev_hold_track(dev, &work->dev_tracker, GFP_ATOMIC); work->dev = dev; schedule_work(&work->work); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 057c0f83c800..a618dce7e0bc 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1933,7 +1933,6 @@ static int __net_init sit_init_net(struct net *net) return 0; err_reg_dev: - ipip6_dev_free(sitn->fb_tunnel_dev); free_netdev(sitn->fb_tunnel_dev); err_alloc_dev: return err; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3b7d6ede1364..1ac243d18c2b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); sk->sk_rx_dst_ifindex = skb->skb_iif; sk->sk_rx_dst_cookie = rt6_get_cookie(rt); } @@ -1506,7 +1506,10 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); @@ -1514,8 +1517,8 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_rx_dst_ifindex != skb->skb_iif || INDIRECT_CALL_1(dst->ops->check, ip6_dst_check, dst, sk->sk_rx_dst_cookie) == NULL) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } @@ -1876,7 +1879,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, sk->sk_rx_dst_cookie); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index ba8986d12413..1accc06abc54 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -957,7 +957,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp6_sk_rx_dst_set(sk, dst); if (!uh->check && !udp_sk(sk)->no_check6_rx) { @@ -1071,7 +1071,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, sk->sk_rx_dst_cookie); @@ -1205,7 +1205,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -EINVAL; } - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { + if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { kfree_skb(skb); return -EINVAL; } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 470ff0ce3dc7..7d2925bb966e 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ /** @@ -191,7 +191,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata, sband = ieee80211_get_sband(sdata); if (!sband) return; - he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type); + he_cap = ieee80211_get_he_iftype_cap(sband, + ieee80211_vif_type_p2p(&sdata->vif)); if (!he_cap) return; diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 430a58587538..74a878f213d3 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation + * Copyright (C) 2018 - 2021 Intel Corporation */ #include <linux/ieee80211.h> @@ -106,7 +106,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); - ieee80211_tx_skb(sdata, skb); + ieee80211_tx_skb_tid(sdata, skb, tid); } void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) @@ -213,6 +213,8 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable) struct ieee80211_txq *txq = sta->sta.txq[tid]; struct txq_info *txqi; + lockdep_assert_held(&sta->ampdu_mlme.mtx); + if (!txq) return; @@ -290,7 +292,6 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) ieee80211_assign_tid_tx(sta, tid, NULL); ieee80211_agg_splice_finish(sta->sdata, tid); - ieee80211_agg_start_txq(sta, tid, false); kfree_rcu(tid_tx, rcu_head); } @@ -480,8 +481,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, /* send AddBA request */ ieee80211_send_addba_request(sdata, sta->sta.addr, tid, - tid_tx->dialog_token, - sta->tid_seq[tid] >> 4, + tid_tx->dialog_token, tid_tx->ssn, buf_size, tid_tx->timeout); WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)); @@ -523,6 +523,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) params.ssn = sta->tid_seq[tid] >> 4; ret = drv_ampdu_action(local, sdata, ¶ms); + tid_tx->ssn = params.ssn; if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) { return; } else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) { @@ -889,6 +890,7 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, { struct ieee80211_sub_if_data *sdata = sta->sdata; bool send_delba = false; + bool start_txq = false; ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", sta->sta.addr, tid); @@ -906,10 +908,14 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, send_delba = true; ieee80211_remove_tid_tx(sta, tid); + start_txq = true; unlock_sta: spin_unlock_bh(&sta->lock); + if (start_txq) + ieee80211_agg_start_txq(sta, tid, false); + if (send_delba) ieee80211_send_delba(sdata, sta->sta.addr, tid, WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index bd3d3195097f..87a208089caf 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <linux/ieee80211.h> @@ -1264,7 +1264,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, return 0; error: + mutex_lock(&local->mtx); ieee80211_vif_release_channel(sdata); + mutex_unlock(&local->mtx); + return err; } @@ -3198,6 +3201,18 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif) } EXPORT_SYMBOL(ieee80211_csa_finish); +void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + + sdata->csa_block_tx = block_tx; + sdata_info(sdata, "channel switch failed, disconnecting\n"); + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); +} +EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); + static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, u32 *changed) { @@ -4268,6 +4283,21 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata, changed |= BSS_CHANGED_HE_BSS_COLOR; ieee80211_bss_info_change_notify(sdata, changed); + + if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) { + struct ieee80211_sub_if_data *child; + + mutex_lock(&sdata->local->iflist_mtx); + list_for_each_entry(child, &sdata->local->interfaces, list) { + if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) { + child->vif.bss_conf.he_bss_color.color = color; + child->vif.bss_conf.he_bss_color.enabled = enable; + ieee80211_bss_info_change_notify(child, + BSS_CHANGED_HE_BSS_COLOR); + } + } + mutex_unlock(&sdata->local->iflist_mtx); + } } static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata) @@ -4352,6 +4382,9 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev, sdata_assert_lock(sdata); + if (sdata->vif.bss_conf.nontransmitted) + return -EINVAL; + mutex_lock(&local->mtx); /* don't allow another color change if one is already active or if csa @@ -4383,6 +4416,18 @@ out: return err; } +static int +ieee80211_set_radar_background(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->set_radar_background) + return -EOPNOTSUPP; + + return local->ops->set_radar_background(&local->hw, chandef); +} + const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -4487,4 +4532,5 @@ const struct cfg80211_ops mac80211_config_ops = { .reset_tid_config = ieee80211_reset_tid_config, .set_sar_specs = ieee80211_set_sar_specs, .color_change = ieee80211_color_change, + .set_radar_background = ieee80211_set_radar_background, }; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 481f01b0f65c..9479f2787ea7 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -936,14 +936,15 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf, PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB"); - switch (cap[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: + switch (u8_get_bits(cap[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) { + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: PRINT("NOMINAL-PACKET-PADDING-0US"); break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: PRINT("NOMINAL-PACKET-PADDING-8US"); break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: PRINT("NOMINAL-PACKET-PADDING-16US"); break; } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index cd3731cbf6c6..4e2fc1a08681 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1219,8 +1219,11 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local, { struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); - if (local->in_reconfig) + /* In reconfig don't transmit now, but mark for waking later */ + if (local->in_reconfig) { + set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags); return; + } if (!check_sdata_in_driver(sdata)) return; @@ -1483,4 +1486,26 @@ static inline void drv_twt_teardown_request(struct ieee80211_local *local, trace_drv_return_void(local); } +static inline int drv_net_fill_forward_path(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + int ret = -EOPNOTSUPP; + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_net_fill_forward_path(local, sdata, sta); + if (local->ops->net_fill_forward_path) + ret = local->ops->net_fill_forward_path(&local->hw, + &sdata->vif, sta, + ctx, path); + trace_drv_return_int(local, ret); + + return ret; +} + #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 5666bbb8860b..08c0542c93a3 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1463,7 +1463,7 @@ struct ieee80211_local { }; static inline struct ieee80211_sub_if_data * -IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) +IEEE80211_DEV_TO_SUB_IF(const struct net_device *dev) { return netdev_priv(dev); } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 20aa5cc31f77..41531478437c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -789,6 +789,64 @@ static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_get_stats64 = ieee80211_get_stats64, }; +static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + struct sta_info *sta; + int ret = -ENOENT; + + sdata = IEEE80211_DEV_TO_SUB_IF(ctx->dev); + local = sdata->local; + + if (!local->ops->net_fill_forward_path) + return -EOPNOTSUPP; + + rcu_read_lock(); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) + break; + if (sdata->wdev.use_4addr) + goto out; + if (is_multicast_ether_addr(ctx->daddr)) + goto out; + sta = sta_info_get_bss(sdata, ctx->daddr); + break; + case NL80211_IFTYPE_AP: + if (is_multicast_ether_addr(ctx->daddr)) + goto out; + sta = sta_info_get(sdata, ctx->daddr); + break; + case NL80211_IFTYPE_STATION: + if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { + sta = sta_info_get(sdata, ctx->daddr); + if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + goto out; + + break; + } + } + + sta = sta_info_get(sdata, sdata->u.mgd.bssid); + break; + default: + goto out; + } + + if (!sta) + goto out; + + ret = drv_net_fill_forward_path(local, sdata, &sta->sta, ctx, path); +out: + rcu_read_unlock(); + + return ret; +} + static const struct net_device_ops ieee80211_dataif_8023_ops = { .ndo_open = ieee80211_open, .ndo_stop = ieee80211_stop, @@ -798,6 +856,7 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = { .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_netdev_select_queue, .ndo_get_stats64 = ieee80211_get_stats64, + .ndo_fill_forward_path = ieee80211_netdev_fill_forward_path, }; static bool ieee80211_iftype_supports_hdr_offload(enum nl80211_iftype iftype) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 45fb517591ee..5311c3cd3050 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1131,17 +1131,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); - /* HE cap element is variable in size - set len to allow max size */ /* - * TODO: 1 is added at the end of the calculation to accommodate for - * the temporary placing of the HE capabilities IE under EXT. - * Remove it once it is placed in the final place. - */ - if (supp_he) + * HE cap element is variable in size - set len to allow max size */ + if (supp_he) { local->scan_ies_len += - 2 + sizeof(struct ieee80211_he_cap_elem) + + 3 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + - IEEE80211_HE_PPE_THRES_MAX_LEN + 1; + IEEE80211_HE_PPE_THRES_MAX_LEN; + } if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 54ab0e1ef6ca..51f55c4ee3c6 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -164,12 +164,15 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, chandef->freq1_offset = channel->freq_offset; if (channel->band == NL80211_BAND_6GHZ) { - if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef)) + if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef)) { + mlme_dbg(sdata, + "bad 6 GHz operation, disabling HT/VHT/HE\n"); ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; - else + } else { ret = 0; + } vht_chandef = *chandef; goto out; } else if (sband->band == NL80211_BAND_S1GHZ) { @@ -190,6 +193,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); if (!ht_oper || !sta_ht_cap.ht_supported) { + mlme_dbg(sdata, "HT operation missing / HT not supported\n"); ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; @@ -223,6 +227,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { ieee80211_chandef_ht_oper(ht_oper, chandef); } else { + mlme_dbg(sdata, "40 MHz not supported\n"); /* 40 MHz (and 80 MHz) must be supported for VHT */ ret = IEEE80211_STA_DISABLE_VHT; /* also mark 40 MHz disabled */ @@ -231,6 +236,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, } if (!vht_oper || !sband->vht_cap.vht_supported) { + mlme_dbg(sdata, "VHT operation missing / VHT not supported\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -253,7 +259,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) sdata_info(sdata, - "HE AP VHT information is invalid, disable HE\n"); + "HE AP VHT information is invalid, disabling HE\n"); ret = IEEE80211_STA_DISABLE_HE; goto out; } @@ -263,7 +269,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, - "AP VHT information is invalid, disable VHT\n"); + "AP VHT information is invalid, disabling VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -271,7 +277,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (!cfg80211_chandef_valid(&vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, - "AP VHT information is invalid, disable VHT\n"); + "AP VHT information is invalid, disabling VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -284,7 +290,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) sdata_info(sdata, - "AP VHT information doesn't match HT, disable VHT\n"); + "AP VHT information doesn't match HT, disabling VHT\n"); ret = IEEE80211_STA_DISABLE_VHT; goto out; } @@ -649,10 +655,6 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, if (!he_cap || !reg_cap) return; - /* - * TODO: the 1 added is because this temporarily is under the EXTENSION - * IE. Get rid of it when it moves. - */ he_cap_size = 2 + 1 + sizeof(he_cap->he_cap_elem) + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + @@ -2452,11 +2454,18 @@ static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata, u16 tx_time) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - u16 tid = ieee80211_get_tid(hdr); - int ac = ieee80211_ac_from_tid(tid); - struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac]; + u16 tid; + int ac; + struct ieee80211_sta_tx_tspec *tx_tspec; unsigned long now = jiffies; + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + tid = ieee80211_get_tid(hdr); + ac = ieee80211_ac_from_tid(tid); + tx_tspec = &ifmgd->tx_tspec[ac]; + if (likely(!tx_tspec->admitted_time)) return; @@ -3734,6 +3743,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, elems->timeout_int && elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; + + cfg80211_assoc_comeback(sdata->dev, assoc_data->bss, + le32_to_cpu(elems->timeout_int->value)); + tu = le32_to_cpu(elems->timeout_int->value); ms = tu * 1024 / 1000; sdata_info(sdata, @@ -5036,19 +5049,23 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, /* disable HT/VHT/HE if we don't support them */ if (!sband->ht_cap.ht_supported && !is_6ghz) { + mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } if (!sband->vht_cap.vht_supported && is_5ghz) { + mlme_dbg(sdata, "VHT not supported, disabling VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } if (!ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) + ieee80211_vif_type_p2p(&sdata->vif))) { + mlme_dbg(sdata, "HE not supported, disabling it\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + } if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && !is_6ghz) { ht_oper = elems->ht_operation; @@ -5072,6 +5089,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } if (!elems->vht_cap_elem) { + sdata_info(sdata, + "bad VHT capabilities, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; vht_oper = NULL; } @@ -5119,8 +5138,10 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, break; } - if (!have_80mhz) + if (!have_80mhz) { + sdata_info(sdata, "80 MHz not supported, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + } if (sband->band == NL80211_BAND_S1GHZ) { s1g_oper = elems->s1g_oper; @@ -5684,12 +5705,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, else if (!is_6ghz) ifmgd->flags |= IEEE80211_STA_DISABLE_HT; vht_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_VHT_CAPABILITY); - if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) + if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) { memcpy(&assoc_data->ap_vht_cap, vht_elem->data, sizeof(struct ieee80211_vht_cap)); - else if (is_5ghz) + } else if (is_5ghz) { + sdata_info(sdata, "VHT capa missing/short, disabling VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; + } rcu_read_unlock(); if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) && @@ -5763,16 +5786,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, } if (req->flags & ASSOC_REQ_DISABLE_HT) { + mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } - if (req->flags & ASSOC_REQ_DISABLE_VHT) + if (req->flags & ASSOC_REQ_DISABLE_VHT) { + mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + } - if (req->flags & ASSOC_REQ_DISABLE_HE) + if (req->flags & ASSOC_REQ_DISABLE_HE) { + mlme_dbg(sdata, "HE disabled by flag, disabling VHT\n"); ifmgd->flags |= IEEE80211_STA_DISABLE_HE; + } err = ieee80211_prep_connection(sdata, req->bss, true, override); if (err) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 72b44d4c42d0..9c3b7fc377c1 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -18,8 +18,6 @@ #define AVG_AMPDU_SIZE 16 #define AVG_PKT_SIZE 1200 -#define SAMPLE_SWITCH_THR 100 - /* Number of bits for an average sized packet */ #define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 9541a4c30aca..fec82f7c2fa6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -465,7 +465,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, unsigned int stbc; rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); - *pos++ = local->hw.radiotap_mcs_details; + *pos = local->hw.radiotap_mcs_details; + if (status->enc_flags & RX_ENC_FLAG_HT_GF) + *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; + if (status->enc_flags & RX_ENC_FLAG_LDPC) + *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC; + pos++; *pos = 0; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_MCS_SGI; @@ -2944,6 +2949,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (!fwd_skb) goto out; + fwd_skb->dev = sdata->dev; fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); info = IEEE80211_SKB_CB(fwd_skb); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 51b49f0d3ad4..537535a88990 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -644,13 +644,13 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) /* check if STA exists already */ if (sta_info_get_bss(sdata, sta->sta.addr)) { err = -EEXIST; - goto out_err; + goto out_cleanup; } sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); if (!sinfo) { err = -ENOMEM; - goto out_err; + goto out_cleanup; } local->num_sta++; @@ -667,6 +667,15 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) list_add_tail_rcu(&sta->list, &local->sta_list); + /* update channel context before notifying the driver about state + * change, this enables driver using the updated channel context right away. + */ + if (sta->sta_state >= IEEE80211_STA_ASSOC) { + ieee80211_recalc_min_chandef(sta->sdata); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } + /* notify driver */ err = sta_info_insert_drv_state(local, sdata, sta); if (err) @@ -674,12 +683,6 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) set_sta_flag(sta, WLAN_STA_INSERTED); - if (sta->sta_state >= IEEE80211_STA_ASSOC) { - ieee80211_recalc_min_chandef(sta->sdata); - if (!sta->sta.support_p2p_ps) - ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); - } - /* accept BA sessions now */ clear_sta_flag(sta, WLAN_STA_BLOCK_BA); @@ -706,8 +709,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) out_drop_sta: local->num_sta--; synchronize_net(); + out_cleanup: cleanup_single_sta(sta); - out_err: mutex_unlock(&local->sta_mtx); kfree(sinfo); rcu_read_lock(); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index ba2796782008..379fd367197f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -176,6 +176,7 @@ struct sta_info; * @failed_bar_ssn: ssn of the last failed BAR tx attempt * @bar_pending: BAR needs to be re-sent * @amsdu: support A-MSDU withing A-MDPU + * @ssn: starting sequence number of the session * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. @@ -199,6 +200,7 @@ struct tid_ampdu_tx { u8 stop_initiator; bool tx_stop; u16 buf_size; + u16 ssn; u16 failed_bar_ssn; bool bar_pending; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 9e8381bef7ed..d91498f77796 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2892,6 +2892,13 @@ TRACE_EVENT(drv_twt_teardown_request, ) ); +DEFINE_EVENT(sta_event, drv_net_fill_forward_path, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + TP_ARGS(local, sdata, sta) +); + #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 278945e3e08a..6d054fed062f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1822,15 +1822,15 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_result res = TX_CONTINUE; + if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) + CALL_TXH(ieee80211_tx_h_rate_ctrl); + if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { __skb_queue_tail(&tx->skbs, tx->skb); tx->skb = NULL; goto txh_done; } - if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) - CALL_TXH(ieee80211_tx_h_rate_ctrl); - CALL_TXH(ieee80211_tx_h_michael_mic_add); CALL_TXH(ieee80211_tx_h_sequence); CALL_TXH(ieee80211_tx_h_fragment); @@ -3821,7 +3821,7 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) { struct ieee80211_local *local = hw_to_local(hw); struct airtime_sched_info *air_sched; - u64 now = ktime_get_boottime_ns(); + u64 now = ktime_get_coarse_boottime_ns(); struct ieee80211_txq *ret = NULL; struct airtime_info *air_info; struct txq_info *txqi = NULL; @@ -3948,7 +3948,7 @@ void ieee80211_update_airtime_weight(struct ieee80211_local *local, u64 weight_sum = 0; if (unlikely(!now)) - now = ktime_get_boottime_ns(); + now = ktime_get_coarse_boottime_ns(); lockdep_assert_held(&air_sched->lock); @@ -3974,7 +3974,7 @@ void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_local *local = hw_to_local(hw); struct txq_info *txqi = to_txq_info(txq); struct airtime_sched_info *air_sched; - u64 now = ktime_get_boottime_ns(); + u64 now = ktime_get_coarse_boottime_ns(); struct airtime_info *air_info; u8 ac = txq->ac; bool was_active; @@ -4032,7 +4032,7 @@ static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw, if (!purge) airtime_set_active(air_sched, air_info, - ktime_get_boottime_ns()); + ktime_get_coarse_boottime_ns()); rb_erase_cached(&txqi->schedule_order, &air_sched->active_txqs); @@ -4120,7 +4120,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, if (RB_EMPTY_NODE(&txqi->schedule_order)) goto out; - now = ktime_get_boottime_ns(); + now = ktime_get_coarse_boottime_ns(); /* Like in ieee80211_next_txq(), make sure the first station in the * scheduling order is eligible for transmission to avoid starvation. @@ -4191,11 +4191,11 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, ieee80211_aggr_check(sdata, sta, skb); + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); + if (sta) { struct ieee80211_fast_tx *fast_tx; - sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); - fast_tx = rcu_dereference(sta->fast_tx); if (fast_tx && diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 43df2f0c5db9..0e4e1956bcea 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -943,7 +943,12 @@ static void ieee80211_parse_extension_element(u32 *crc, struct ieee802_11_elems *elems) { const void *data = elem->data + 1; - u8 len = elem->datalen - 1; + u8 len; + + if (!elem->datalen) + return; + + len = elem->datalen - 1; switch (elem->data[0]) { case WLAN_EID_EXT_HE_MU_EDCA: @@ -2063,7 +2068,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, chandef.chan = chan; skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len, - 100 + ie_len); + local->scan_ies_len + ie_len); if (!skb) return NULL; @@ -2646,6 +2651,13 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->sta_mtx); } + /* + * If this is for hw restart things are still running. + * We may want to change that later, however. + */ + if (local->open_count && (!suspended || reconfig_due_to_wowlan)) + drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); + if (local->in_reconfig) { local->in_reconfig = false; barrier(); @@ -2664,13 +2676,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); - /* - * If this is for hw restart things are still running. - * We may want to change that later, however. - */ - if (local->open_count && (!suspended || reconfig_due_to_wowlan)) - drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); - if (!suspended) return 0; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 4eed23e27610..7ed0d268aff2 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -449,7 +449,6 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; - hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); @@ -686,7 +685,6 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; - hdr = (struct ieee80211_hdr *)pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->conf.tx_pn); @@ -881,8 +879,6 @@ ieee80211_crypto_cs_decrypt(struct ieee80211_rx_data *rx) if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; - hdr = (struct ieee80211_hdr *)rx->skb->data; - rx_pn = key->u.gen.rx_pn[qos_tid]; skb_pn = rx->skb->data + hdrlen + cs->pn_off; diff --git a/net/mctp/device.c b/net/mctp/device.c index 8799ee77e7b7..ef2755f82f87 100644 --- a/net/mctp/device.c +++ b/net/mctp/device.c @@ -35,14 +35,24 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->mctp_ptr); } -static int mctp_fill_addrinfo(struct sk_buff *skb, struct netlink_callback *cb, - struct mctp_dev *mdev, mctp_eid_t eid) +static int mctp_addrinfo_size(void) +{ + return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + + nla_total_size(1) // IFA_LOCAL + + nla_total_size(1) // IFA_ADDRESS + ; +} + +/* flag should be NLM_F_MULTI for dump calls */ +static int mctp_fill_addrinfo(struct sk_buff *skb, + struct mctp_dev *mdev, mctp_eid_t eid, + int msg_type, u32 portid, u32 seq, int flag) { struct ifaddrmsg *hdr; struct nlmsghdr *nlh; - nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RTM_NEWADDR, sizeof(*hdr), NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, + msg_type, sizeof(*hdr), flag); if (!nlh) return -EMSGSIZE; @@ -72,10 +82,14 @@ static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb, struct netlink_callback *cb) { struct mctp_dump_cb *mcb = (void *)cb->ctx; + u32 portid, seq; int rc = 0; + portid = NETLINK_CB(cb->skb).portid; + seq = cb->nlh->nlmsg_seq; for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) { - rc = mctp_fill_addrinfo(skb, cb, mdev, mdev->addrs[mcb->a_idx]); + rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx], + RTM_NEWADDR, portid, seq, NLM_F_MULTI); if (rc < 0) break; } @@ -127,6 +141,32 @@ out: return skb->len; } +static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type, + struct sk_buff *req_skb, struct nlmsghdr *req_nlh) +{ + u32 portid = NETLINK_CB(req_skb).portid; + struct net *net = dev_net(mdev->dev); + struct sk_buff *skb; + int rc = -ENOBUFS; + + skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL); + if (!skb) + goto out; + + rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type, + portid, req_nlh->nlmsg_seq, 0); + if (rc < 0) { + WARN_ON_ONCE(rc == -EMSGSIZE); + goto out; + } + + rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL); + return; +out: + kfree_skb(skb); + rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc); +} + static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = { [IFA_ADDRESS] = { .type = NLA_U8 }, [IFA_LOCAL] = { .type = NLA_U8 }, @@ -189,6 +229,7 @@ static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, kfree(tmp_addrs); + mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh); mctp_route_add_local(mdev, addr->s_addr); return 0; @@ -244,6 +285,8 @@ static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, mdev->num_addrs--; spin_unlock_irqrestore(&mdev->addrs_lock, flags); + mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh); + return 0; } diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 4ff8d55cbe82..6cde58c259a8 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -38,7 +38,8 @@ struct mptcp_pm_add_entry { u8 retrans_times; }; -#define MAX_ADDR_ID 255 +/* max value of mptcp_addr_info.id */ +#define MAX_ADDR_ID U8_MAX #define BITMAP_SZ DIV_ROUND_UP(MAX_ADDR_ID + 1, BITS_PER_LONG) struct pm_nl_pernet { @@ -700,6 +701,9 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, msk_owned_by_me(msk); + if (sk->sk_state == TCP_LISTEN) + return; + if (!rm_list->nr) return; @@ -822,14 +826,13 @@ find_next: entry->addr.id = find_next_zero_bit(pernet->id_bitmap, MAX_ADDR_ID + 1, pernet->next_id); - if ((!entry->addr.id || entry->addr.id > MAX_ADDR_ID) && - pernet->next_id != 1) { + if (!entry->addr.id && pernet->next_id != 1) { pernet->next_id = 1; goto find_next; } } - if (!entry->addr.id || entry->addr.id > MAX_ADDR_ID) + if (!entry->addr.id) goto out; __set_bit(entry->addr.id, pernet->id_bitmap); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f124cca125d2..df5a0cf431c1 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1372,7 +1372,7 @@ out: struct subflow_send_info { struct sock *ssk; - u64 ratio; + u64 linger_time; }; void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) @@ -1397,20 +1397,24 @@ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) return __mptcp_subflow_active(subflow); } +#define SSK_MODE_ACTIVE 0 +#define SSK_MODE_BACKUP 1 +#define SSK_MODE_MAX 2 + /* implement the mptcp packet scheduler; * returns the subflow that will transmit the next DSS * additionally updates the rtx timeout */ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) { - struct subflow_send_info send_info[2]; + struct subflow_send_info send_info[SSK_MODE_MAX]; struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; + u32 pace, burst, wmem; int i, nr_active = 0; struct sock *ssk; + u64 linger_time; long tout = 0; - u64 ratio; - u32 pace; sock_owned_by_me(sk); @@ -1429,10 +1433,11 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) } /* pick the subflow with the lower wmem/wspace ratio */ - for (i = 0; i < 2; ++i) { + for (i = 0; i < SSK_MODE_MAX; ++i) { send_info[i].ssk = NULL; - send_info[i].ratio = -1; + send_info[i].linger_time = -1; } + mptcp_for_each_subflow(msk, subflow) { trace_mptcp_subflow_get_send(subflow); ssk = mptcp_subflow_tcp_sock(subflow); @@ -1441,34 +1446,51 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) tout = max(tout, mptcp_timeout_from_subflow(subflow)); nr_active += !subflow->backup; - if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd) - continue; - - pace = READ_ONCE(ssk->sk_pacing_rate); - if (!pace) - continue; + pace = subflow->avg_pacing_rate; + if (unlikely(!pace)) { + /* init pacing rate from socket */ + subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); + pace = subflow->avg_pacing_rate; + if (!pace) + continue; + } - ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, - pace); - if (ratio < send_info[subflow->backup].ratio) { + linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); + if (linger_time < send_info[subflow->backup].linger_time) { send_info[subflow->backup].ssk = ssk; - send_info[subflow->backup].ratio = ratio; + send_info[subflow->backup].linger_time = linger_time; } } __mptcp_set_timeout(sk, tout); /* pick the best backup if no other subflow is active */ if (!nr_active) - send_info[0].ssk = send_info[1].ssk; - - if (send_info[0].ssk) { - msk->last_snd = send_info[0].ssk; - msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE, - tcp_sk(msk->last_snd)->snd_wnd); - return msk->last_snd; - } + send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; + + /* According to the blest algorithm, to avoid HoL blocking for the + * faster flow, we need to: + * - estimate the faster flow linger time + * - use the above to estimate the amount of byte transferred + * by the faster flow + * - check that the amount of queued data is greter than the above, + * otherwise do not use the picked, slower, subflow + * We select the subflow with the shorter estimated time to flush + * the queued mem, which basically ensure the above. We just need + * to check that subflow has a non empty cwin. + */ + ssk = send_info[SSK_MODE_ACTIVE].ssk; + if (!ssk || !sk_stream_memory_free(ssk) || !tcp_sk(ssk)->snd_wnd) + return NULL; - return NULL; + burst = min_t(int, MPTCP_SEND_BURST_SIZE, tcp_sk(ssk)->snd_wnd); + wmem = READ_ONCE(ssk->sk_wmem_queued); + subflow = mptcp_subflow_ctx(ssk); + subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + + READ_ONCE(ssk->sk_pacing_rate) * burst, + burst + wmem); + msk->last_snd = ssk; + msk->snd_burst = burst; + return ssk; } static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) @@ -1527,7 +1549,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) int ret = 0; prev_ssk = ssk; - mptcp_flush_join_list(msk); + __mptcp_flush_join_list(msk); ssk = mptcp_subflow_get_send(msk); /* First check. If the ssk has changed since @@ -2914,7 +2936,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, */ if (WARN_ON_ONCE(!new_mptcp_sock)) { tcp_sk(newsk)->is_mptcp = 0; - return newsk; + goto out; } /* acquire the 2nd reference for the owning socket */ @@ -2926,6 +2948,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); } +out: + newsk->sk_kern_sock = kern; return newsk; } diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index e1469155fb15..0486c9f5b38b 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -395,6 +395,7 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); /* MPTCP subflow context */ struct mptcp_subflow_context { struct list_head node;/* conn_list of subflows */ + unsigned long avg_pacing_rate; /* protected by msk socket lock */ u64 local_key; u64 remote_key; u64 idsn; diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 3c3db22fd36a..aa3fcd86dbe2 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -543,7 +543,6 @@ static bool mptcp_supported_sockopt(int level, int optname) case TCP_NODELAY: case TCP_THIN_LINEAR_TIMEOUTS: case TCP_CONGESTION: - case TCP_ULP: case TCP_CORK: case TCP_KEEPIDLE: case TCP_KEEPINTVL: diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index b8dd3441f7d0..24bc9d5e87be 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1534,7 +1534,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) * needs it. */ sf->sk->sk_net_refcnt = 1; - get_net(net); + get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); sock_inuse_add(net, 1); err = tcp_set_ulp(sf->sk, "mptcp"); release_sock(sf->sk); diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index bb5f1650f11c..c189b4c8a182 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c @@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb, pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR); if (!pnest) return -ENOMEM; - nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id); + rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id); + if (rc) { + nla_nest_cancel(skb, pnest); + return rc; + } if ((0x1 << np->id) == ndp->package_whitelist) nla_put_flag(skb, NCSI_PKG_ATTR_FORCED); cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 39c523bd775c..7f645328b47f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -960,8 +960,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, * Create a destination for the given service */ static int -ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, - struct ip_vs_dest **dest_p) +ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) { struct ip_vs_dest *dest; unsigned int atype, i; @@ -1021,8 +1020,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, spin_lock_init(&dest->stats.lock); __ip_vs_update_dest(svc, dest, udest, 1); - *dest_p = dest; - LeaveFunction(2); return 0; @@ -1096,7 +1093,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) /* * Allocate and initialize the dest structure */ - ret = ip_vs_new_dest(svc, udest, &dest); + ret = ip_vs_new_dest(svc, udest); } LeaveFunction(2); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9fbce31baf75..d7e313548066 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1562,9 +1562,7 @@ __nf_conntrack_alloc(struct net *net, ct->status = 0; WRITE_ONCE(ct->timeout, 0); write_pnet(&ct->ct_net, net); - memset(&ct->__nfct_init_offset, 0, - offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, __nfct_init_offset)); + memset_after(ct, 0, __nfct_init_offset); nf_ct_zone_add(ct, zone); @@ -2590,7 +2588,6 @@ int nf_conntrack_hash_resize(unsigned int hashsize) hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } - old_size = nf_conntrack_htable_size; old_hash = nf_conntrack_hash; nf_conntrack_hash = hash; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 47c72f28995a..0be2a1ae5c17 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1195,8 +1195,6 @@ restart: } hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], hnnode) { - if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) - continue; ct = nf_ct_tuplehash_to_ctrack(h); if (nf_ct_is_expired(ct)) { if (i < ARRAY_SIZE(nf_ct_evict) && @@ -1208,6 +1206,9 @@ restart: if (!net_eq(net, nf_ct_net(ct))) continue; + if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) + continue; + if (cb->args[1]) { if (ct != last) continue; @@ -1746,7 +1747,7 @@ restart: res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), - ct, dying ? true : false, 0); + ct, dying, 0); if (res < 0) { if (!atomic_inc_not_zero(&ct->ct_general.use)) continue; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c0851fec11d4..c20772822637 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4481,9 +4481,9 @@ struct nft_set_elem_catchall { static void nft_set_catchall_destroy(const struct nft_ctx *ctx, struct nft_set *set) { - struct nft_set_elem_catchall *catchall; + struct nft_set_elem_catchall *next, *catchall; - list_for_each_entry_rcu(catchall, &set->catchall_list, list) { + list_for_each_entry_safe(catchall, next, &set->catchall_list, list) { list_del_rcu(&catchall->list); nft_set_elem_destroy(set, catchall->elem, true); kfree_rcu(catchall); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 691ef4cffdd9..7f83f9697fc1 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -556,7 +556,8 @@ __build_packet_message(struct nfnl_log_net *log, goto nla_put_failure; if (indev && skb->dev && - skb->mac_header != skb->network_header) { + skb_mac_header_was_set(skb) && + skb_mac_header_len(skb) != 0) { struct nfulnl_msg_packet_hw phw; int len; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 5837e8efc9c2..44c3de176d18 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -560,7 +560,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, goto nla_put_failure; if (indev && entskb->dev && - skb_mac_header_was_set(entskb)) { + skb_mac_header_was_set(entskb) && + skb_mac_header_len(entskb) != 0) { struct nfqnl_msg_packet_hw phw; int len; @@ -1527,15 +1528,9 @@ static void __net_exit nfnl_queue_net_exit(struct net *net) WARN_ON_ONCE(!hlist_empty(&q->instance_table[i])); } -static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) -{ - synchronize_rcu(); -} - static struct pernet_operations nfnl_queue_net_ops = { .init = nfnl_queue_net_init, .exit = nfnl_queue_net_exit, - .exit_batch = nfnl_queue_net_exit_batch, .id = &nfnl_queue_net_id, .size = sizeof(struct nfnl_queue_net), }; diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index cd59afde5b2f..fa9301ca6033 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -27,9 +27,11 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, { struct nft_fwd_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; + struct sk_buff *skb = pkt->skb; /* This is used by ifb only. */ - skb_set_redirected(pkt->skb, true); + skb->skb_iif = skb->dev->ifindex; + skb_set_redirected(skb, nft_hook(pkt) == NF_NETDEV_INGRESS); nf_fwd_netdev_egress(pkt, oif); regs->verdict.code = NF_STOLEN; @@ -198,7 +200,8 @@ static int nft_fwd_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { - return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS)); + return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS) | + (1 << NF_NETDEV_EGRESS)); } static struct nft_expr_type nft_fwd_netdev_type; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9713035b89e3..6d262d9aa10e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -34,6 +34,7 @@ #include <net/mpls.h> #include <net/ndisc.h> #include <net/nsh.h> +#include <net/netfilter/nf_conntrack_zones.h> #include "conntrack.h" #include "datapath.h" @@ -860,6 +861,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, #endif bool post_ct = false; int res, err; + u16 zone = 0; /* Extract metadata from packet. */ if (tun_info) { @@ -898,6 +900,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, key->recirc_id = tc_ext ? tc_ext->chain : 0; OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; post_ct = tc_ext ? tc_ext->post_ct : false; + zone = post_ct ? tc_ext->zone : 0; } else { key->recirc_id = 0; } @@ -906,8 +909,11 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, #endif err = key_extract(skb, key); - if (!err) + if (!err) { ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */ + if (post_ct && !skb_get_nfct(skb)) + key->ct_zone = zone; + } return err; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3ca4f890371a..9bbe7282efb6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3110,7 +3110,7 @@ static int packet_release(struct socket *sock) packet_cached_dev_reset(po); if (po->prot_hook.dev) { - dev_put(po->prot_hook.dev); + dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); @@ -3218,18 +3218,25 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, WRITE_ONCE(po->num, proto); po->prot_hook.type = proto; + dev_put_track(dev_curr, &po->prot_hook.dev_tracker); + dev_curr = NULL; + if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; WRITE_ONCE(po->ifindex, -1); packet_cached_dev_reset(po); } else { + if (dev) + netdev_tracker_alloc(dev, + &po->prot_hook.dev_tracker, + GFP_ATOMIC); po->prot_hook.dev = dev; WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); packet_cached_dev_assign(po, dev); } } - dev_put(dev_curr); + dev_put_track(dev_curr, &po->prot_hook.dev_tracker); if (proto == 0 || !need_rehook) goto out_unlock; @@ -4139,7 +4146,8 @@ static int packet_notifier(struct notifier_block *this, if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); WRITE_ONCE(po->ifindex, -1); - dev_put(po->prot_hook.dev); + dev_put_track(po->prot_hook.dev, + &po->prot_hook.dev_tracker); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); @@ -4489,9 +4497,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } out_free_pg_vec: - bitmap_free(rx_owner_map); - if (pg_vec) + if (pg_vec) { + bitmap_free(rx_owner_map); free_pg_vec(pg_vec, order, req->tp_block_nr); + } out: return err; } diff --git a/net/phonet/pep.c b/net/phonet/pep.c index a1525916885a..65d463ad8770 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp, err = pep_accept_conn(newsk, skb); if (err) { + __sock_put(sk); sock_put(newsk); newsk = NULL; goto drop; @@ -946,6 +947,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) ret = -EBUSY; else if (sk->sk_state == TCP_ESTABLISHED) ret = -EISCONN; + else if (!pn->pn_sk.sobject) + ret = -EADDRNOTAVAIL; else ret = pep_sock_enable(sk, NULL, 0); release_sock(sk); diff --git a/net/rds/connection.c b/net/rds/connection.c index a3bc4b54d491..b4cc699c5fad 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -253,6 +253,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, * should end up here, but if it * does, reset/destroy the connection. */ + kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); conn = ERR_PTR(-EOPNOTSUPP); goto out; diff --git a/net/rfkill/core.c b/net/rfkill/core.c index ac15a944573f..5b1927d66f0d 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -946,6 +946,18 @@ bool rfkill_blocked(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_blocked); +bool rfkill_soft_blocked(struct rfkill *rfkill) +{ + unsigned long flags; + u32 state; + + spin_lock_irqsave(&rfkill->lock, flags); + state = rfkill->state; + spin_unlock_irqrestore(&rfkill->lock, flags); + + return !!(state & RFKILL_BLOCK_SW); +} +EXPORT_SYMBOL(rfkill_soft_blocked); struct rfkill * __must_check rfkill_alloc(const char *name, struct device *parent, diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3258da3d5bed..32563cef85bf 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -19,8 +19,10 @@ #include <net/sock.h> #include <net/sch_generic.h> #include <net/pkt_cls.h> +#include <net/tc_act/tc_pedit.h> #include <net/act_api.h> #include <net/netlink.h> +#include <net/flow_offload.h> #ifdef CONFIG_INET DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); @@ -129,8 +131,244 @@ static void free_tcf(struct tc_action *p) kfree(p); } +static void offload_action_hw_count_set(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count = hw_count; +} + +static void offload_action_hw_count_inc(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count += hw_count; +} + +static void offload_action_hw_count_dec(struct tc_action *act, + u32 hw_count) +{ + act->in_hw_count = act->in_hw_count > hw_count ? + act->in_hw_count - hw_count : 0; +} + +static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act) +{ + if (is_tcf_pedit(act)) + return tcf_pedit_nkeys(act); + else + return 1; +} + +static bool tc_act_skip_hw(u32 flags) +{ + return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false; +} + +static bool tc_act_skip_sw(u32 flags) +{ + return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false; +} + +static bool tc_act_in_hw(struct tc_action *act) +{ + return !!act->in_hw_count; +} + +/* SKIP_HW and SKIP_SW are mutually exclusive flags. */ +static bool tc_act_flags_valid(u32 flags) +{ + flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW; + + return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW); +} + +static int offload_action_init(struct flow_offload_action *fl_action, + struct tc_action *act, + enum offload_act_command cmd, + struct netlink_ext_ack *extack) +{ + int err; + + fl_action->extack = extack; + fl_action->command = cmd; + fl_action->index = act->tcfa_index; + + if (act->ops->offload_act_setup) { + spin_lock_bh(&act->tcfa_lock); + err = act->ops->offload_act_setup(act, fl_action, NULL, + false); + spin_unlock_bh(&act->tcfa_lock); + return err; + } + + return -EOPNOTSUPP; +} + +static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act, + u32 *hw_count) +{ + int err; + + err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT, + fl_act, NULL, NULL); + if (err < 0) + return err; + + if (hw_count) + *hw_count = err; + + return 0; +} + +static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act, + u32 *hw_count, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + int err; + + err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL); + if (err < 0) + return err; + + if (hw_count) + *hw_count = 1; + + return 0; +} + +static int tcf_action_offload_cmd(struct flow_offload_action *fl_act, + u32 *hw_count, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count, + cb, cb_priv) : + tcf_action_offload_cmd_ex(fl_act, hw_count); +} + +static int tcf_action_offload_add_ex(struct tc_action *action, + struct netlink_ext_ack *extack, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + bool skip_sw = tc_act_skip_sw(action->tcfa_flags); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = { + [0] = action, + }; + struct flow_offload_action *fl_action; + u32 in_hw_count = 0; + int num, err = 0; + + if (tc_act_skip_hw(action->tcfa_flags)) + return 0; + + num = tcf_offload_act_num_actions_single(action); + fl_action = offload_action_alloc(num); + if (!fl_action) + return -ENOMEM; + + err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack); + if (err) + goto fl_err; + + err = tc_setup_action(&fl_action->action, actions); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup tc actions for offload\n"); + goto fl_err; + } + + err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv); + if (!err) + cb ? offload_action_hw_count_inc(action, in_hw_count) : + offload_action_hw_count_set(action, in_hw_count); + + if (skip_sw && !tc_act_in_hw(action)) + err = -EINVAL; + + tc_cleanup_offload_action(&fl_action->action); + +fl_err: + kfree(fl_action); + + return err; +} + +/* offload the tc action after it is inserted */ +static int tcf_action_offload_add(struct tc_action *action, + struct netlink_ext_ack *extack) +{ + return tcf_action_offload_add_ex(action, extack, NULL, NULL); +} + +int tcf_action_update_hw_stats(struct tc_action *action) +{ + struct flow_offload_action fl_act = {}; + int err; + + if (!tc_act_in_hw(action)) + return -EOPNOTSUPP; + + err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL); + if (err) + return err; + + err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL); + if (!err) { + preempt_disable(); + tcf_action_stats_update(action, fl_act.stats.bytes, + fl_act.stats.pkts, + fl_act.stats.drops, + fl_act.stats.lastused, + true); + preempt_enable(); + action->used_hw_stats = fl_act.stats.used_hw_stats; + action->used_hw_stats_valid = true; + } else { + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL(tcf_action_update_hw_stats); + +static int tcf_action_offload_del_ex(struct tc_action *action, + flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + struct flow_offload_action fl_act = {}; + u32 in_hw_count = 0; + int err = 0; + + if (!tc_act_in_hw(action)) + return 0; + + err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL); + if (err) + return err; + + err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv); + if (err < 0) + return err; + + if (!cb && action->in_hw_count != in_hw_count) + return -EINVAL; + + /* do not need to update hw state when deleting action */ + if (cb && in_hw_count) + offload_action_hw_count_dec(action, in_hw_count); + + return 0; +} + +static int tcf_action_offload_del(struct tc_action *action) +{ + return tcf_action_offload_del_ex(action, NULL, NULL); +} + static void tcf_action_cleanup(struct tc_action *p) { + tcf_action_offload_del(p); if (p->ops->cleanup) p->ops->cleanup(p); @@ -497,7 +735,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, p->tcfa_tm.install = jiffies; p->tcfa_tm.lastuse = jiffies; p->tcfa_tm.firstuse = 0; - p->tcfa_flags = flags & TCA_ACT_FLAGS_USER_MASK; + p->tcfa_flags = flags; if (est) { err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, &p->tcfa_rate_est, @@ -622,6 +860,59 @@ EXPORT_SYMBOL(tcf_idrinfo_destroy); static LIST_HEAD(act_base); static DEFINE_RWLOCK(act_mod_lock); +/* since act ops id is stored in pernet subsystem list, + * then there is no way to walk through only all the action + * subsystem, so we keep tc action pernet ops id for + * reoffload to walk through. + */ +static LIST_HEAD(act_pernet_id_list); +static DEFINE_MUTEX(act_id_mutex); +struct tc_act_pernet_id { + struct list_head list; + unsigned int id; +}; + +static int tcf_pernet_add_id_list(unsigned int id) +{ + struct tc_act_pernet_id *id_ptr; + int ret = 0; + + mutex_lock(&act_id_mutex); + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + if (id_ptr->id == id) { + ret = -EEXIST; + goto err_out; + } + } + + id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL); + if (!id_ptr) { + ret = -ENOMEM; + goto err_out; + } + id_ptr->id = id; + + list_add_tail(&id_ptr->list, &act_pernet_id_list); + +err_out: + mutex_unlock(&act_id_mutex); + return ret; +} + +static void tcf_pernet_del_id_list(unsigned int id) +{ + struct tc_act_pernet_id *id_ptr; + + mutex_lock(&act_id_mutex); + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + if (id_ptr->id == id) { + list_del(&id_ptr->list); + kfree(id_ptr); + break; + } + } + mutex_unlock(&act_id_mutex); +} int tcf_register_action(struct tc_action_ops *act, struct pernet_operations *ops) @@ -640,18 +931,31 @@ int tcf_register_action(struct tc_action_ops *act, if (ret) return ret; + if (ops->id) { + ret = tcf_pernet_add_id_list(*ops->id); + if (ret) + goto err_id; + } + write_lock(&act_mod_lock); list_for_each_entry(a, &act_base, head) { if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { - write_unlock(&act_mod_lock); - unregister_pernet_subsys(ops); - return -EEXIST; + ret = -EEXIST; + goto err_out; } } list_add_tail(&act->head, &act_base); write_unlock(&act_mod_lock); return 0; + +err_out: + write_unlock(&act_mod_lock); + if (ops->id) + tcf_pernet_del_id_list(*ops->id); +err_id: + unregister_pernet_subsys(ops); + return ret; } EXPORT_SYMBOL(tcf_register_action); @@ -670,8 +974,11 @@ int tcf_unregister_action(struct tc_action_ops *act, } } write_unlock(&act_mod_lock); - if (!err) + if (!err) { unregister_pernet_subsys(ops); + if (ops->id) + tcf_pernet_del_id_list(*ops->id); + } return err; } EXPORT_SYMBOL(tcf_unregister_action); @@ -735,6 +1042,9 @@ restart_act_graph: jmp_prgcnt -= 1; continue; } + + if (tc_act_skip_sw(a->tcfa_flags)) + continue; repeat: ret = a->ops->act(skb, a, res); if (ret == TC_ACT_REPEAT) @@ -821,6 +1131,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) int err = -EINVAL; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; + u32 flags; if (tcf_action_dump_terse(skb, a, false)) goto nla_put_failure; @@ -835,9 +1146,13 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) goto nla_put_failure; - if (a->tcfa_flags && + flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; + if (flags && nla_put_bitfield32(skb, TCA_ACT_FLAGS, - a->tcfa_flags, a->tcfa_flags)) + flags, flags)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) goto nla_put_failure; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); @@ -919,7 +1234,9 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { [TCA_ACT_COOKIE] = { .type = NLA_BINARY, .len = TC_COOKIE_MAX_SIZE }, [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, - [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS), + [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS | + TCA_ACT_FLAGS_SKIP_HW | + TCA_ACT_FLAGS_SKIP_SW), [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), }; @@ -1032,8 +1349,13 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, } } hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); - if (tb[TCA_ACT_FLAGS]) + if (tb[TCA_ACT_FLAGS]) { userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); + if (!tc_act_flags_valid(userflags.value)) { + err = -EINVAL; + goto err_out; + } + } err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp, userflags.value | flags, extack); @@ -1061,11 +1383,17 @@ err_out: return ERR_PTR(err); } +static bool tc_act_bind(u32 flags) +{ + return !!(flags & TCA_ACT_FLAGS_BIND); +} + /* Returns numbers of initialized actions or negative error. */ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, struct tc_action *actions[], - int init_res[], size_t *attr_size, u32 flags, + int init_res[], size_t *attr_size, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; @@ -1103,6 +1431,22 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, sz += tcf_action_fill_size(act); /* Start from index 0 */ actions[i - 1] = act; + if (tc_act_bind(flags)) { + bool skip_sw = tc_skip_sw(fl_flags); + bool skip_hw = tc_skip_hw(fl_flags); + + if (tc_act_bind(act->tcfa_flags)) + continue; + if (skip_sw != tc_act_skip_sw(act->tcfa_flags) || + skip_hw != tc_act_skip_hw(act->tcfa_flags)) { + err = -EINVAL; + goto err; + } + } else { + err = tcf_action_offload_add(act, extack); + if (tc_act_skip_sw(act->tcfa_flags) && err) + goto err; + } } /* We have to commit them all together, because if any error happened in @@ -1154,6 +1498,9 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, if (p == NULL) goto errout; + /* update hw stats for this action */ + tcf_action_update_hw_stats(p); + /* compat_mode being true specifies a call that is supposed * to add additional backward compatibility statistic TLVs. */ @@ -1396,6 +1743,96 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) } static int +tcf_reoffload_del_notify(struct net *net, struct tc_action *action) +{ + size_t attr_size = tcf_action_fill_size(action); + struct tc_action *actions[TCA_ACT_MAX_PRIO] = { + [0] = action, + }; + const struct tc_action_ops *ops = action->ops; + struct sk_buff *skb; + int ret; + + skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, + GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + ret = tcf_idr_release_unsafe(action); + if (ret == ACT_P_DELETED) { + module_put(ops->owner); + ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0); + } else { + kfree_skb(skb); + } + + return ret; +} + +int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, + void *cb_priv, bool add) +{ + struct tc_act_pernet_id *id_ptr; + struct tcf_idrinfo *idrinfo; + struct tc_action_net *tn; + struct tc_action *p; + unsigned int act_id; + unsigned long tmp; + unsigned long id; + struct idr *idr; + struct net *net; + int ret; + + if (!cb) + return -EINVAL; + + down_read(&net_rwsem); + mutex_lock(&act_id_mutex); + + for_each_net(net) { + list_for_each_entry(id_ptr, &act_pernet_id_list, list) { + act_id = id_ptr->id; + tn = net_generic(net, act_id); + if (!tn) + continue; + idrinfo = tn->idrinfo; + if (!idrinfo) + continue; + + mutex_lock(&idrinfo->lock); + idr = &idrinfo->action_idr; + idr_for_each_entry_ul(idr, p, tmp, id) { + if (IS_ERR(p) || tc_act_bind(p->tcfa_flags)) + continue; + if (add) { + tcf_action_offload_add_ex(p, NULL, cb, + cb_priv); + continue; + } + + /* cb unregister to update hw count */ + ret = tcf_action_offload_del_ex(p, cb, cb_priv); + if (ret < 0) + continue; + if (tc_act_skip_sw(p->tcfa_flags) && + !tc_act_in_hw(p)) + tcf_reoffload_del_notify(net, p); + } + mutex_unlock(&idrinfo->lock); + } + } + mutex_unlock(&act_id_mutex); + up_read(&net_rwsem); + + return 0; +} + +static int tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { @@ -1508,7 +1945,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, for (loop = 0; loop < 10; loop++) { ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res, - &attr_size, flags, extack); + &attr_size, flags, 0, extack); if (ret != -EAGAIN) break; } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index f2bf896331a5..a77d8908e737 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -305,7 +305,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, act, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, act, - &act_bpf_ops, bind, true, 0); + &act_bpf_ops, bind, true, flags); if (ret < 0) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 94e78ac7a748..09e2aafc8943 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, a, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, a, - &act_connmark_ops, bind, false, 0); + &act_connmark_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index a15ec95e69c3..e0f515b774ca 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -695,6 +695,24 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_csum)); } +static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_CSUM; + entry->csum_flags = tcf_csum_update_flags(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_CSUM; + } + + return 0; +} + static struct tc_action_ops act_csum_ops = { .kind = "csum", .id = TCA_ID_CSUM, @@ -706,6 +724,7 @@ static struct tc_action_ops act_csum_ops = { .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, + .offload_act_setup = tcf_csum_offload_act_setup, .size = sizeof(struct tcf_csum), }; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index ab1810f2e660..f9afb5abff21 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -691,10 +691,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, u8 family, u16 zone, bool *defrag) { enum ip_conntrack_info ctinfo; - struct qdisc_skb_cb cb; struct nf_conn *ct; int err = 0; bool frag; + u16 mru; /* Previously seen (loopback)? Ignore. */ ct = nf_ct_get(skb, &ctinfo); @@ -709,7 +709,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, return err; skb_get(skb); - cb = *qdisc_skb_cb(skb); + mru = tc_skb_cb(skb)->mru; if (family == NFPROTO_IPV4) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; @@ -723,7 +723,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (!err) { *defrag = true; - cb.mru = IPCB(skb)->frag_max_size; + mru = IPCB(skb)->frag_max_size; } } else { /* NFPROTO_IPV6 */ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) @@ -736,7 +736,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (!err) { *defrag = true; - cb.mru = IP6CB(skb)->frag_max_size; + mru = IP6CB(skb)->frag_max_size; } #else err = -EOPNOTSUPP; @@ -745,7 +745,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, } if (err != -EINPROGRESS) - *qdisc_skb_cb(skb) = cb; + tc_skb_cb(skb)->mru = mru; skb_clear_hash(skb); skb->ignore_df = 1; return err; @@ -964,7 +964,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, tcf_action_update_bstats(&c->common, skb); if (clear) { - qdisc_skb_cb(skb)->post_ct = false; + tc_skb_cb(skb)->post_ct = false; ct = nf_ct_get(skb, &ctinfo); if (ct) { nf_conntrack_put(&ct->ct_general); @@ -1049,7 +1049,8 @@ do_nat: out_push: skb_push_rcsum(skb, nh_ofs); - qdisc_skb_cb(skb)->post_ct = true; + tc_skb_cb(skb)->post_ct = true; + tc_skb_cb(skb)->zone = p->zone; out_clear: if (defrag) qdisc_skb_cb(skb)->pkt_len = skb->len; @@ -1493,6 +1494,26 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); } +static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_CT; + entry->ct.action = tcf_ct_action(act); + entry->ct.zone = tcf_ct_zone(act); + entry->ct.flow_table = tcf_ct_ft(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_CT; + } + + return 0; +} + static struct tc_action_ops act_ct_ops = { .kind = "ct", .id = TCA_ID_CT, @@ -1504,6 +1525,7 @@ static struct tc_action_ops act_ct_ops = { .walk = tcf_ct_walker, .lookup = tcf_ct_search, .stats_update = tcf_stats_update, + .offload_act_setup = tcf_ct_offload_act_setup, .size = sizeof(struct tcf_ct), }; diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 549374a2d008..0281e45987a4 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -212,7 +212,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_ctinfo_ops, bind, false, 0); + &act_ctinfo_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index d8dce173df37..bde6a6c01e64 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -252,6 +252,43 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) return sz; } +static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_gact_ok(act)) { + entry->id = FLOW_ACTION_ACCEPT; + } else if (is_tcf_gact_shot(act)) { + entry->id = FLOW_ACTION_DROP; + } else if (is_tcf_gact_trap(act)) { + entry->id = FLOW_ACTION_TRAP; + } else if (is_tcf_gact_goto_chain(act)) { + entry->id = FLOW_ACTION_GOTO; + entry->chain_index = tcf_gact_goto_chain_index(act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_gact_ok(act)) + fl_action->id = FLOW_ACTION_ACCEPT; + else if (is_tcf_gact_shot(act)) + fl_action->id = FLOW_ACTION_DROP; + else if (is_tcf_gact_trap(act)) + fl_action->id = FLOW_ACTION_TRAP; + else if (is_tcf_gact_goto_chain(act)) + fl_action->id = FLOW_ACTION_GOTO; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_gact_ops = { .kind = "gact", .id = TCA_ID_GACT, @@ -263,6 +300,7 @@ static struct tc_action_ops act_gact_ops = { .walk = tcf_gact_walker, .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, + .offload_act_setup = tcf_gact_offload_act_setup, .size = sizeof(struct tcf_gact), }; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 7df72a4197a3..d56e73843a4b 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_gate_ops, bind, false, 0); + &act_gate_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -597,6 +597,54 @@ static size_t tcf_gate_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_gate)); } +static void tcf_gate_entry_destructor(void *priv) +{ + struct action_gate_entry *oe = priv; + + kfree(oe); +} + +static int tcf_gate_get_entries(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->gate.entries = tcf_gate_get_list(act); + + if (!entry->gate.entries) + return -EINVAL; + + entry->destructor = tcf_gate_entry_destructor; + entry->destructor_priv = entry->gate.entries; + + return 0; +} + +static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + int err; + + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_GATE; + entry->gate.prio = tcf_gate_prio(act); + entry->gate.basetime = tcf_gate_basetime(act); + entry->gate.cycletime = tcf_gate_cycletime(act); + entry->gate.cycletimeext = tcf_gate_cycletimeext(act); + entry->gate.num_entries = tcf_gate_num_entries(act); + err = tcf_gate_get_entries(entry, act); + if (err) + return err; + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_GATE; + } + + return 0; +} + static struct tc_action_ops act_gate_ops = { .kind = "gate", .id = TCA_ID_GATE, @@ -609,6 +657,7 @@ static struct tc_action_ops act_gate_ops = { .stats_update = tcf_gate_stats_update, .get_fill_size = tcf_gate_get_fill_size, .lookup = tcf_gate_search, + .offload_act_setup = tcf_gate_offload_act_setup, .size = sizeof(struct tcf_gate), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index b757f90a2d58..41ba55e60b1b 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, - bind, true, 0); + bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); kfree(p); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 265b1443e252..2f3d507c24a1 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -145,7 +145,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, - false, 0); + false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 952416bd65e6..39acd1d18609 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -450,6 +450,55 @@ static size_t tcf_mirred_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_mirred)); } +static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->dev = act->ops->get_dev(act, &entry->destructor); + if (!entry->dev) + return; + entry->destructor_priv = entry->dev; +} + +static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_mirred_egress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_egress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_ingress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT_INGRESS; + tcf_offload_mirred_get_dev(entry, act); + } else if (is_tcf_mirred_ingress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED_INGRESS; + tcf_offload_mirred_get_dev(entry, act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_mirred_egress_redirect(act)) + fl_action->id = FLOW_ACTION_REDIRECT; + else if (is_tcf_mirred_egress_mirror(act)) + fl_action->id = FLOW_ACTION_MIRRED; + else if (is_tcf_mirred_ingress_redirect(act)) + fl_action->id = FLOW_ACTION_REDIRECT_INGRESS; + else if (is_tcf_mirred_ingress_mirror(act)) + fl_action->id = FLOW_ACTION_MIRRED_INGRESS; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .id = TCA_ID_MIRRED, @@ -462,6 +511,7 @@ static struct tc_action_ops act_mirred_ops = { .walk = tcf_mirred_walker, .lookup = tcf_mirred_search, .get_fill_size = tcf_mirred_get_fill_size, + .offload_act_setup = tcf_mirred_offload_act_setup, .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, }; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 8faa4c58305e..b9ff3459fdab 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -248,7 +248,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_mpls_ops, bind, true, 0); + &act_mpls_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -384,6 +384,57 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + entry->id = FLOW_ACTION_MPLS_PUSH; + entry->mpls_push.proto = tcf_mpls_proto(act); + entry->mpls_push.label = tcf_mpls_label(act); + entry->mpls_push.tc = tcf_mpls_tc(act); + entry->mpls_push.bos = tcf_mpls_bos(act); + entry->mpls_push.ttl = tcf_mpls_ttl(act); + break; + case TCA_MPLS_ACT_POP: + entry->id = FLOW_ACTION_MPLS_POP; + entry->mpls_pop.proto = tcf_mpls_proto(act); + break; + case TCA_MPLS_ACT_MODIFY: + entry->id = FLOW_ACTION_MPLS_MANGLE; + entry->mpls_mangle.label = tcf_mpls_label(act); + entry->mpls_mangle.tc = tcf_mpls_tc(act); + entry->mpls_mangle.bos = tcf_mpls_bos(act); + entry->mpls_mangle.ttl = tcf_mpls_ttl(act); + break; + default: + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + switch (tcf_mpls_action(act)) { + case TCA_MPLS_ACT_PUSH: + fl_action->id = FLOW_ACTION_MPLS_PUSH; + break; + case TCA_MPLS_ACT_POP: + fl_action->id = FLOW_ACTION_MPLS_POP; + break; + case TCA_MPLS_ACT_MODIFY: + fl_action->id = FLOW_ACTION_MPLS_MANGLE; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static struct tc_action_ops act_mpls_ops = { .kind = "mpls", .id = TCA_ID_MPLS, @@ -394,6 +445,7 @@ static struct tc_action_ops act_mpls_ops = { .cleanup = tcf_mpls_cleanup, .walk = tcf_mpls_walker, .lookup = tcf_mpls_search, + .offload_act_setup = tcf_mpls_offload_act_setup, .size = sizeof(struct tcf_mpls), }; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 7dd6b586ba7f..2a39b3729e84 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_nat_ops, bind, false, 0); + &act_nat_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index c6c862c459cc..31fcd279c177 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_pedit_ops, bind, false, 0); + &act_pedit_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); goto out_free; @@ -487,6 +487,39 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + int k; + + for (k = 0; k < tcf_pedit_nkeys(act); k++) { + switch (tcf_pedit_cmd(act, k)) { + case TCA_PEDIT_KEY_EX_CMD_SET: + entry->id = FLOW_ACTION_MANGLE; + break; + case TCA_PEDIT_KEY_EX_CMD_ADD: + entry->id = FLOW_ACTION_ADD; + break; + default: + return -EOPNOTSUPP; + } + entry->mangle.htype = tcf_pedit_htype(act, k); + entry->mangle.mask = tcf_pedit_mask(act, k); + entry->mangle.val = tcf_pedit_val(act, k); + entry->mangle.offset = tcf_pedit_offset(act, k); + entry->hw_stats = tc_act_hw_stats(act->hw_stats); + entry++; + } + *index_inc = k; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .id = TCA_ID_PEDIT, @@ -498,6 +531,7 @@ static struct tc_action_ops act_pedit_ops = { .init = tcf_pedit_init, .walk = tcf_pedit_walker, .lookup = tcf_pedit_search, + .offload_act_setup = tcf_pedit_offload_act_setup, .size = sizeof(struct tcf_pedit), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 9e77ba8401e5..0923aa2b8f8a 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -90,7 +90,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, NULL, a, - &act_police_ops, bind, true, 0); + &act_police_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -405,6 +405,30 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_POLICE; + entry->police.burst = tcf_police_burst(act); + entry->police.rate_bytes_ps = + tcf_police_rate_bytes_ps(act); + entry->police.burst_pkt = tcf_police_burst_pkt(act); + entry->police.rate_pkt_ps = + tcf_police_rate_pkt_ps(act); + entry->police.mtu = tcf_police_tcfp_mtu(act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_POLICE; + } + + return 0; +} + MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); @@ -420,6 +444,7 @@ static struct tc_action_ops act_police_ops = { .walk = tcf_police_walker, .lookup = tcf_police_search, .cleanup = tcf_police_cleanup, + .offload_act_setup = tcf_police_offload_act_setup, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index ce859b0e0deb..9a22cdda6bbd 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -70,7 +70,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_sample_ops, bind, true, 0); + &act_sample_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -282,6 +282,35 @@ tcf_sample_get_group(const struct tc_action *a, return group; } +static void tcf_offload_sample_get_group(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->sample.psample_group = + act->ops->get_psample_group(act, &entry->destructor); + entry->destructor_priv = entry->sample.psample_group; +} + +static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + entry->id = FLOW_ACTION_SAMPLE; + entry->sample.trunc_size = tcf_sample_trunc_size(act); + entry->sample.truncate = tcf_sample_truncate(act); + entry->sample.rate = tcf_sample_rate(act); + tcf_offload_sample_get_group(entry, act); + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + fl_action->id = FLOW_ACTION_SAMPLE; + } + + return 0; +} + static struct tc_action_ops act_sample_ops = { .kind = "sample", .id = TCA_ID_SAMPLE, @@ -294,6 +323,7 @@ static struct tc_action_ops act_sample_ops = { .walk = tcf_sample_walker, .lookup = tcf_sample_search, .get_psample_group = tcf_sample_get_group, + .offload_act_setup = tcf_sample_offload_act_setup, .size = sizeof(struct tcf_sample), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e617ab4505ca..8c1d60bde93e 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -129,7 +129,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_simp_ops, bind, false, 0); + &act_simp_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index d30ecbfc8f84..ceba11b198bb 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbedit_ops, bind, true, 0); + &act_skbedit_ops, bind, true, act_flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -327,6 +327,41 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ } +static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_skbedit_mark(act)) { + entry->id = FLOW_ACTION_MARK; + entry->mark = tcf_skbedit_mark(act); + } else if (is_tcf_skbedit_ptype(act)) { + entry->id = FLOW_ACTION_PTYPE; + entry->ptype = tcf_skbedit_ptype(act); + } else if (is_tcf_skbedit_priority(act)) { + entry->id = FLOW_ACTION_PRIORITY; + entry->priority = tcf_skbedit_priority(act); + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_skbedit_mark(act)) + fl_action->id = FLOW_ACTION_MARK; + else if (is_tcf_skbedit_ptype(act)) + fl_action->id = FLOW_ACTION_PTYPE; + else if (is_tcf_skbedit_priority(act)) + fl_action->id = FLOW_ACTION_PRIORITY; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .id = TCA_ID_SKBEDIT, @@ -339,6 +374,7 @@ static struct tc_action_ops act_skbedit_ops = { .walk = tcf_skbedit_walker, .get_fill_size = tcf_skbedit_get_fill_size, .lookup = tcf_skbedit_search, + .offload_act_setup = tcf_skbedit_offload_act_setup, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 9b6b52c5e24e..2083612d8780 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -168,7 +168,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbmod_ops, bind, true, 0); + &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index d9cd174eecb7..23aba03d26a8 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -787,6 +787,59 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static void tcf_tunnel_encap_put_tunnel(void *priv) +{ + struct ip_tunnel_info *tunnel = priv; + + kfree(tunnel); +} + +static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, + const struct tc_action *act) +{ + entry->tunnel = tcf_tunnel_info_copy(act); + if (!entry->tunnel) + return -ENOMEM; + entry->destructor = tcf_tunnel_encap_put_tunnel; + entry->destructor_priv = entry->tunnel; + return 0; +} + +static int tcf_tunnel_key_offload_act_setup(struct tc_action *act, + void *entry_data, + u32 *index_inc, + bool bind) +{ + int err; + + if (bind) { + struct flow_action_entry *entry = entry_data; + + if (is_tcf_tunnel_set(act)) { + entry->id = FLOW_ACTION_TUNNEL_ENCAP; + err = tcf_tunnel_encap_get_tunnel(entry, act); + if (err) + return err; + } else if (is_tcf_tunnel_release(act)) { + entry->id = FLOW_ACTION_TUNNEL_DECAP; + } else { + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + if (is_tcf_tunnel_set(act)) + fl_action->id = FLOW_ACTION_TUNNEL_ENCAP; + else if (is_tcf_tunnel_release(act)) + fl_action->id = FLOW_ACTION_TUNNEL_DECAP; + else + return -EOPNOTSUPP; + } + + return 0; +} + static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .id = TCA_ID_TUNNEL_KEY, @@ -797,6 +850,7 @@ static struct tc_action_ops act_tunnel_key_ops = { .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, + .offload_act_setup = tcf_tunnel_key_offload_act_setup, .size = sizeof(struct tcf_tunnel_key), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index e4dc5a555bd8..756e2dcde1cd 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -368,6 +368,53 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act) + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */ } +static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data, + u32 *index_inc, bool bind) +{ + if (bind) { + struct flow_action_entry *entry = entry_data; + + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + entry->id = FLOW_ACTION_VLAN_PUSH; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + case TCA_VLAN_ACT_POP: + entry->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + entry->id = FLOW_ACTION_VLAN_MANGLE; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + default: + return -EOPNOTSUPP; + } + *index_inc = 1; + } else { + struct flow_offload_action *fl_action = entry_data; + + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + fl_action->id = FLOW_ACTION_VLAN_PUSH; + break; + case TCA_VLAN_ACT_POP: + fl_action->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + fl_action->id = FLOW_ACTION_VLAN_MANGLE; + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .id = TCA_ID_VLAN, @@ -380,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = { .stats_update = tcf_vlan_stats_update, .get_fill_size = tcf_vlan_get_fill_size, .lookup = tcf_vlan_search, + .offload_act_setup = tcf_vlan_offload_act_setup, .size = sizeof(struct tcf_vlan), }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2ef8f5a6205a..a53c72e6d944 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1617,12 +1617,15 @@ int tcf_classify(struct sk_buff *skb, /* If we missed on some chain */ if (ret == TC_ACT_UNSPEC && last_executed_chain) { + struct tc_skb_cb *cb = tc_skb_cb(skb); + ext = tc_skb_ext_alloc(skb); if (WARN_ON_ONCE(!ext)) return TC_ACT_SHOT; ext->chain = last_executed_chain; - ext->mru = qdisc_skb_cb(skb)->mru; - ext->post_ct = qdisc_skb_cb(skb)->post_ct; + ext->mru = cb->mru; + ext->post_ct = cb->post_ct; + ext->zone = cb->zone; } return ret; @@ -3025,9 +3028,9 @@ void tcf_exts_destroy(struct tcf_exts *exts) } EXPORT_SYMBOL(tcf_exts_destroy); -int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, - struct nlattr *rate_tlv, struct tcf_exts *exts, - u32 flags, struct netlink_ext_ack *extack) +int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT { @@ -3061,7 +3064,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, flags |= TCA_ACT_FLAGS_BIND; err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, exts->actions, init_res, - &attr_size, flags, extack); + &attr_size, flags, fl_flags, + extack); if (err < 0) return err; exts->nr_actions = err; @@ -3077,6 +3081,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, return 0; } +EXPORT_SYMBOL(tcf_exts_validate_ex); + +int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, + struct nlattr *rate_tlv, struct tcf_exts *exts, + u32 flags, struct netlink_ext_ack *extack) +{ + return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, + flags, 0, extack); +} EXPORT_SYMBOL(tcf_exts_validate); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) @@ -3320,7 +3333,7 @@ err_unlock: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); - return ok_count < 0 ? ok_count : 0; + return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_add); @@ -3376,7 +3389,7 @@ err_unlock: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); - return ok_count < 0 ? ok_count : 0; + return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_replace); @@ -3414,7 +3427,7 @@ retry: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); - return ok_count < 0 ? ok_count : 0; + return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_destroy); @@ -3461,7 +3474,7 @@ static void tcf_act_put_cookie(struct flow_action_entry *entry) flow_action_cookie_destroy(entry->cookie); } -void tc_cleanup_flow_action(struct flow_action *flow_action) +void tc_cleanup_offload_action(struct flow_action *flow_action) { struct flow_action_entry *entry; int i; @@ -3472,93 +3485,37 @@ void tc_cleanup_flow_action(struct flow_action *flow_action) entry->destructor(entry->destructor_priv); } } -EXPORT_SYMBOL(tc_cleanup_flow_action); +EXPORT_SYMBOL(tc_cleanup_offload_action); -static void tcf_mirred_get_dev(struct flow_action_entry *entry, - const struct tc_action *act) +static int tc_setup_offload_act(struct tc_action *act, + struct flow_action_entry *entry, + u32 *index_inc) { #ifdef CONFIG_NET_CLS_ACT - entry->dev = act->ops->get_dev(act, &entry->destructor); - if (!entry->dev) - return; - entry->destructor_priv = entry->dev; -#endif -} - -static void tcf_tunnel_encap_put_tunnel(void *priv) -{ - struct ip_tunnel_info *tunnel = priv; - - kfree(tunnel); -} - -static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, - const struct tc_action *act) -{ - entry->tunnel = tcf_tunnel_info_copy(act); - if (!entry->tunnel) - return -ENOMEM; - entry->destructor = tcf_tunnel_encap_put_tunnel; - entry->destructor_priv = entry->tunnel; + if (act->ops->offload_act_setup) + return act->ops->offload_act_setup(act, entry, index_inc, true); + else + return -EOPNOTSUPP; +#else return 0; -} - -static void tcf_sample_get_group(struct flow_action_entry *entry, - const struct tc_action *act) -{ -#ifdef CONFIG_NET_CLS_ACT - entry->sample.psample_group = - act->ops->get_psample_group(act, &entry->destructor); - entry->destructor_priv = entry->sample.psample_group; #endif } -static void tcf_gate_entry_destructor(void *priv) -{ - struct action_gate_entry *oe = priv; - - kfree(oe); -} - -static int tcf_gate_get_entries(struct flow_action_entry *entry, - const struct tc_action *act) -{ - entry->gate.entries = tcf_gate_get_list(act); - - if (!entry->gate.entries) - return -EINVAL; - - entry->destructor = tcf_gate_entry_destructor; - entry->destructor_priv = entry->gate.entries; - - return 0; -} - -static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) -{ - if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) - return FLOW_ACTION_HW_STATS_DONT_CARE; - else if (!hw_stats) - return FLOW_ACTION_HW_STATS_DISABLED; - - return hw_stats; -} - -int tc_setup_flow_action(struct flow_action *flow_action, - const struct tcf_exts *exts) +int tc_setup_action(struct flow_action *flow_action, + struct tc_action *actions[]) { + int i, j, index, err = 0; struct tc_action *act; - int i, j, k, err = 0; BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); - if (!exts) + if (!actions) return 0; j = 0; - tcf_exts_for_each_action(i, act, exts) { + tcf_act_for_each_action(i, act, actions) { struct flow_action_entry *entry; entry = &flow_action->entries[j]; @@ -3568,164 +3525,39 @@ int tc_setup_flow_action(struct flow_action *flow_action, goto err_out_locked; entry->hw_stats = tc_act_hw_stats(act->hw_stats); - - if (is_tcf_gact_ok(act)) { - entry->id = FLOW_ACTION_ACCEPT; - } else if (is_tcf_gact_shot(act)) { - entry->id = FLOW_ACTION_DROP; - } else if (is_tcf_gact_trap(act)) { - entry->id = FLOW_ACTION_TRAP; - } else if (is_tcf_gact_goto_chain(act)) { - entry->id = FLOW_ACTION_GOTO; - entry->chain_index = tcf_gact_goto_chain_index(act); - } else if (is_tcf_mirred_egress_redirect(act)) { - entry->id = FLOW_ACTION_REDIRECT; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_egress_mirror(act)) { - entry->id = FLOW_ACTION_MIRRED; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_ingress_redirect(act)) { - entry->id = FLOW_ACTION_REDIRECT_INGRESS; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_mirred_ingress_mirror(act)) { - entry->id = FLOW_ACTION_MIRRED_INGRESS; - tcf_mirred_get_dev(entry, act); - } else if (is_tcf_vlan(act)) { - switch (tcf_vlan_action(act)) { - case TCA_VLAN_ACT_PUSH: - entry->id = FLOW_ACTION_VLAN_PUSH; - entry->vlan.vid = tcf_vlan_push_vid(act); - entry->vlan.proto = tcf_vlan_push_proto(act); - entry->vlan.prio = tcf_vlan_push_prio(act); - break; - case TCA_VLAN_ACT_POP: - entry->id = FLOW_ACTION_VLAN_POP; - break; - case TCA_VLAN_ACT_MODIFY: - entry->id = FLOW_ACTION_VLAN_MANGLE; - entry->vlan.vid = tcf_vlan_push_vid(act); - entry->vlan.proto = tcf_vlan_push_proto(act); - entry->vlan.prio = tcf_vlan_push_prio(act); - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - } else if (is_tcf_tunnel_set(act)) { - entry->id = FLOW_ACTION_TUNNEL_ENCAP; - err = tcf_tunnel_encap_get_tunnel(entry, act); - if (err) - goto err_out_locked; - } else if (is_tcf_tunnel_release(act)) { - entry->id = FLOW_ACTION_TUNNEL_DECAP; - } else if (is_tcf_pedit(act)) { - for (k = 0; k < tcf_pedit_nkeys(act); k++) { - switch (tcf_pedit_cmd(act, k)) { - case TCA_PEDIT_KEY_EX_CMD_SET: - entry->id = FLOW_ACTION_MANGLE; - break; - case TCA_PEDIT_KEY_EX_CMD_ADD: - entry->id = FLOW_ACTION_ADD; - break; - default: - err = -EOPNOTSUPP; - goto err_out_locked; - } - entry->mangle.htype = tcf_pedit_htype(act, k); - entry->mangle.mask = tcf_pedit_mask(act, k); - entry->mangle.val = tcf_pedit_val(act, k); - entry->mangle.offset = tcf_pedit_offset(act, k); - entry->hw_stats = tc_act_hw_stats(act->hw_stats); - entry = &flow_action->entries[++j]; - } - } else if (is_tcf_csum(act)) { - entry->id = FLOW_ACTION_CSUM; - entry->csum_flags = tcf_csum_update_flags(act); - } else if (is_tcf_skbedit_mark(act)) { - entry->id = FLOW_ACTION_MARK; - entry->mark = tcf_skbedit_mark(act); - } else if (is_tcf_sample(act)) { - entry->id = FLOW_ACTION_SAMPLE; - entry->sample.trunc_size = tcf_sample_trunc_size(act); - entry->sample.truncate = tcf_sample_truncate(act); - entry->sample.rate = tcf_sample_rate(act); - tcf_sample_get_group(entry, act); - } else if (is_tcf_police(act)) { - entry->id = FLOW_ACTION_POLICE; - entry->police.burst = tcf_police_burst(act); - entry->police.rate_bytes_ps = - tcf_police_rate_bytes_ps(act); - entry->police.burst_pkt = tcf_police_burst_pkt(act); - entry->police.rate_pkt_ps = - tcf_police_rate_pkt_ps(act); - entry->police.mtu = tcf_police_tcfp_mtu(act); - entry->police.index = act->tcfa_index; - } else if (is_tcf_ct(act)) { - entry->id = FLOW_ACTION_CT; - entry->ct.action = tcf_ct_action(act); - entry->ct.zone = tcf_ct_zone(act); - entry->ct.flow_table = tcf_ct_ft(act); - } else if (is_tcf_mpls(act)) { - switch (tcf_mpls_action(act)) { - case TCA_MPLS_ACT_PUSH: - entry->id = FLOW_ACTION_MPLS_PUSH; - entry->mpls_push.proto = tcf_mpls_proto(act); - entry->mpls_push.label = tcf_mpls_label(act); - entry->mpls_push.tc = tcf_mpls_tc(act); - entry->mpls_push.bos = tcf_mpls_bos(act); - entry->mpls_push.ttl = tcf_mpls_ttl(act); - break; - case TCA_MPLS_ACT_POP: - entry->id = FLOW_ACTION_MPLS_POP; - entry->mpls_pop.proto = tcf_mpls_proto(act); - break; - case TCA_MPLS_ACT_MODIFY: - entry->id = FLOW_ACTION_MPLS_MANGLE; - entry->mpls_mangle.label = tcf_mpls_label(act); - entry->mpls_mangle.tc = tcf_mpls_tc(act); - entry->mpls_mangle.bos = tcf_mpls_bos(act); - entry->mpls_mangle.ttl = tcf_mpls_ttl(act); - break; - default: - goto err_out_locked; - } - } else if (is_tcf_skbedit_ptype(act)) { - entry->id = FLOW_ACTION_PTYPE; - entry->ptype = tcf_skbedit_ptype(act); - } else if (is_tcf_skbedit_priority(act)) { - entry->id = FLOW_ACTION_PRIORITY; - entry->priority = tcf_skbedit_priority(act); - } else if (is_tcf_gate(act)) { - entry->id = FLOW_ACTION_GATE; - entry->gate.index = tcf_gate_index(act); - entry->gate.prio = tcf_gate_prio(act); - entry->gate.basetime = tcf_gate_basetime(act); - entry->gate.cycletime = tcf_gate_cycletime(act); - entry->gate.cycletimeext = tcf_gate_cycletimeext(act); - entry->gate.num_entries = tcf_gate_num_entries(act); - err = tcf_gate_get_entries(entry, act); - if (err) - goto err_out_locked; - } else { - err = -EOPNOTSUPP; + entry->hw_index = act->tcfa_index; + index = 0; + err = tc_setup_offload_act(act, entry, &index); + if (!err) + j += index; + else goto err_out_locked; - } spin_unlock_bh(&act->tcfa_lock); - - if (!is_tcf_pedit(act)) - j++; } err_out: if (err) - tc_cleanup_flow_action(flow_action); + tc_cleanup_offload_action(flow_action); return err; err_out_locked: spin_unlock_bh(&act->tcfa_lock); goto err_out; } -EXPORT_SYMBOL(tc_setup_flow_action); + +int tc_setup_offload_action(struct flow_action *flow_action, + const struct tcf_exts *exts) +{ +#ifdef CONFIG_NET_CLS_ACT + if (!exts) + return 0; + + return tc_setup_action(flow_action, exts->actions); +#else + return 0; +#endif +} +EXPORT_SYMBOL(tc_setup_offload_action); unsigned int tcf_exts_num_actions(struct tcf_exts *exts) { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index aab13ba11767..1a9b1f140f9e 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -19,6 +19,7 @@ #include <net/sch_generic.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/ip.h> #include <net/flow_dissector.h> #include <net/geneve.h> @@ -309,7 +310,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct cls_fl_head *head = rcu_dereference_bh(tp->root); - bool post_ct = qdisc_skb_cb(skb)->post_ct; + bool post_ct = tc_skb_cb(skb)->post_ct; + u16 zone = tc_skb_cb(skb)->zone; struct fl_flow_key skb_key; struct fl_flow_mask *mask; struct cls_fl_filter *f; @@ -327,7 +329,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, ARRAY_SIZE(fl_ct_info_to_flower_map), - post_ct); + post_ct, zone); skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); skb_flow_dissect(skb, &mask->dissector, &skb_key, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP); @@ -461,7 +463,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.rule->match.key = &f->mkey; cls_flower.classid = f->res.classid; - err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (skip_sw) { @@ -473,7 +475,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw, &f->flags, &f->in_hw_count, rtnl_held); - tc_cleanup_flow_action(&cls_flower.rule->action); + tc_cleanup_offload_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { @@ -501,12 +503,12 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, rtnl_held); - tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, - cls_flower.stats.pkts, - cls_flower.stats.drops, - cls_flower.stats.lastused, - cls_flower.stats.used_hw_stats, - cls_flower.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes, + cls_flower.stats.pkts, + cls_flower.stats.drops, + cls_flower.stats.lastused, + cls_flower.stats.used_hw_stats, + cls_flower.stats.used_hw_stats_valid); } static void __fl_put(struct cls_fl_filter *f) @@ -1917,12 +1919,14 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, struct nlattr *est, - struct fl_flow_tmplt *tmplt, u32 flags, + struct fl_flow_tmplt *tmplt, + u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -2036,7 +2040,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], - tp->chain->tmplt_priv, flags, extack); + tp->chain->tmplt_priv, flags, fnew->flags, + extack); if (err) goto errout; @@ -2266,7 +2271,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, cls_flower.rule->match.mask = &f->mask->key; cls_flower.rule->match.key = &f->mkey; - err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); if (err) { kfree(cls_flower.rule); if (tc_skip_sw(f->flags)) { @@ -2283,7 +2288,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_SETUP_CLSFLOWER, &cls_flower, cb_priv, &f->flags, &f->in_hw_count); - tc_cleanup_flow_action(&cls_flower.rule->action); + tc_cleanup_offload_action(&cls_flower.rule->action); kfree(cls_flower.rule); if (err) { diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 24f0046ce0b3..ca5670fd5228 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -97,7 +97,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.cookie = cookie; - err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); if (err) { kfree(cls_mall.rule); mall_destroy_hw_filter(tp, head, cookie, NULL); @@ -111,7 +111,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw, &head->flags, &head->in_hw_count, true); - tc_cleanup_flow_action(&cls_mall.rule->action); + tc_cleanup_offload_action(&cls_mall.rule->action); kfree(cls_mall.rule); if (err) { @@ -163,12 +163,13 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { static int mall_set_parms(struct net *net, struct tcf_proto *tp, struct cls_mall_head *head, unsigned long base, struct nlattr **tb, - struct nlattr *est, u32 flags, + struct nlattr *est, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &head->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -226,8 +227,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, goto err_alloc_percpu; } - err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], flags, - extack); + err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], + flags, new->flags, extack); if (err) goto err_set_parms; @@ -301,7 +302,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; cls_mall.cookie = (unsigned long)head; - err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); if (err) { kfree(cls_mall.rule); if (add && tc_skip_sw(head->flags)) { @@ -314,7 +315,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv, &head->flags, &head->in_hw_count); - tc_cleanup_flow_action(&cls_mall.rule->action); + tc_cleanup_offload_action(&cls_mall.rule->action); kfree(cls_mall.rule); if (err) @@ -336,11 +337,11 @@ static void mall_stats_hw_filter(struct tcf_proto *tp, tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); - tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, - cls_mall.stats.pkts, cls_mall.stats.drops, - cls_mall.stats.lastused, - cls_mall.stats.used_hw_stats, - cls_mall.stats.used_hw_stats_valid); + tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes, + cls_mall.stats.pkts, cls_mall.stats.drops, + cls_mall.stats.lastused, + cls_mall.stats.used_hw_stats, + cls_mall.stats.used_hw_stats_valid); } static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4272814487f0..cf5649292ee0 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -709,12 +709,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { static int u32_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tc_u_knode *n, struct nlattr **tb, - struct nlattr *est, u32 flags, + struct nlattr *est, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { int err; - err = tcf_exts_validate(net, tp, tb, est, &n->exts, flags, extack); + err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags, + fl_flags, extack); if (err < 0) return err; @@ -895,7 +896,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -ENOMEM; err = u32_set_parms(net, tp, base, new, tb, - tca[TCA_RATE], flags, extack); + tca[TCA_RATE], flags, new->flags, + extack); if (err) { u32_destroy_key(new, false); @@ -1060,8 +1062,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } #endif - err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], flags, - extack); + err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], + flags, n->flags, extack); if (err == 0) { struct tc_u_knode __rcu **ins; struct tc_u_knode *pins; diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 3c2300d14468..857aaebd49f4 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -2736,7 +2736,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), GFP_KERNEL); if (!q->tins) - goto nomem; + return -ENOMEM; for (i = 0; i < CAKE_MAX_TINS; i++) { struct cake_tin_data *b = q->tins + i; @@ -2766,10 +2766,6 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, q->min_netlen = ~0; q->min_adjlen = ~0; return 0; - -nomem: - cake_destroy(sch); - return -ENOMEM; } static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index e007fc75ef2f..d73393493553 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -666,9 +666,9 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, } } for (i = q->nbands; i < oldbands; i++) { - qdisc_tree_flush_backlog(q->classes[i].qdisc); - if (i >= q->nstrict) + if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) list_del(&q->classes[i].alist); + qdisc_tree_flush_backlog(q->classes[i].qdisc); } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap)); diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c index cd85a69820b1..a9bd0a235890 100644 --- a/net/sched/sch_frag.c +++ b/net/sched/sch_frag.c @@ -2,6 +2,7 @@ #include <linux/if_vlan.h> #include <net/netlink.h> #include <net/sch_generic.h> +#include <net/pkt_sched.h> #include <net/dst.h> #include <net/ip.h> #include <net/ip6_fib.h> @@ -138,7 +139,7 @@ err: int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) { - u16 mru = qdisc_skb_cb(skb)->mru; + u16 mru = tc_skb_cb(skb)->mru; int err; if (mru && skb->len > mru + skb->dev->hard_header_len) diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 760b367644c1..a7d623171501 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -290,9 +290,8 @@ out: return err; } -static int sctp_sock_dump(struct sctp_transport *tsp, void *p) +static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) { - struct sctp_endpoint *ep = tsp->asoc->ep; struct sctp_comm_param *commp = p; struct sock *sk = ep->base.sk; struct sk_buff *skb = commp->skb; @@ -302,6 +301,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p) int err = 0; lock_sock(sk); + if (ep != tsp->asoc->ep) + goto release; list_for_each_entry(assoc, &ep->asocs, asocs) { if (cb->args[4] < cb->args[1]) goto next; @@ -344,9 +345,8 @@ release: return err; } -static int sctp_sock_filter(struct sctp_transport *tsp, void *p) +static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) { - struct sctp_endpoint *ep = tsp->asoc->ep; struct sctp_comm_param *commp = p; struct sock *sk = ep->base.sk; const struct inet_diag_req_v2 *r = commp->r; @@ -505,8 +505,8 @@ skip: if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) goto done; - sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, - net, &pos, &commp); + sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump, + net, &pos, &commp); cb->args[2] = pos; done: diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 48c9c2c7602f..efffde7f2328 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep) } /* Final destructor for endpoint. */ +static void sctp_endpoint_destroy_rcu(struct rcu_head *head) +{ + struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu); + struct sock *sk = ep->base.sk; + + sctp_sk(sk)->ep = NULL; + sock_put(sk); + + kfree(ep); + SCTP_DBG_OBJCNT_DEC(ep); +} + static void sctp_endpoint_destroy(struct sctp_endpoint *ep) { struct sock *sk; @@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) if (sctp_sk(sk)->bind_hash) sctp_put_port(sk); - sctp_sk(sk)->ep = NULL; - /* Give up our hold on the sock */ - sock_put(sk); - - kfree(ep); - SCTP_DBG_OBJCNT_DEC(ep); + call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu); } /* Hold a reference to an endpoint. */ -void sctp_endpoint_hold(struct sctp_endpoint *ep) +int sctp_endpoint_hold(struct sctp_endpoint *ep) { - refcount_inc(&ep->base.refcnt); + return refcount_inc_not_zero(&ep->base.refcnt); } /* Release a reference to an endpoint and clean up if there are diff --git a/net/sctp/input.c b/net/sctp/input.c index 1f1786021d9c..90e12bafdd48 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -746,23 +746,21 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) struct sock *sk = ep->base.sk; struct net *net = sock_net(sk); struct sctp_hashbucket *head; - struct sctp_ep_common *epb; - epb = &ep->base; - epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port); - head = &sctp_ep_hashtable[epb->hashent]; + ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port); + head = &sctp_ep_hashtable[ep->hashent]; if (sk->sk_reuseport) { bool any = sctp_is_ep_boundall(sk); - struct sctp_ep_common *epb2; + struct sctp_endpoint *ep2; struct list_head *list; int cnt = 0, err = 1; list_for_each(list, &ep->base.bind_addr.address_list) cnt++; - sctp_for_each_hentry(epb2, &head->chain) { - struct sock *sk2 = epb2->sk; + sctp_for_each_hentry(ep2, &head->chain) { + struct sock *sk2 = ep2->base.sk; if (!net_eq(sock_net(sk2), net) || sk2 == sk || !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) || @@ -789,7 +787,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep) } write_lock(&head->lock); - hlist_add_head(&epb->node, &head->chain); + hlist_add_head(&ep->node, &head->chain); write_unlock(&head->lock); return 0; } @@ -811,19 +809,16 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) { struct sock *sk = ep->base.sk; struct sctp_hashbucket *head; - struct sctp_ep_common *epb; - epb = &ep->base; + ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port); - epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port); - - head = &sctp_ep_hashtable[epb->hashent]; + head = &sctp_ep_hashtable[ep->hashent]; if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); write_lock(&head->lock); - hlist_del_init(&epb->node); + hlist_del_init(&ep->node); write_unlock(&head->lock); } @@ -856,7 +851,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint( const union sctp_addr *paddr) { struct sctp_hashbucket *head; - struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; __be16 lport; @@ -866,8 +860,7 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint( hash = sctp_ep_hashfn(net, ntohs(lport)); head = &sctp_ep_hashtable[hash]; read_lock(&head->lock); - sctp_for_each_hentry(epb, &head->chain) { - ep = sctp_ep(epb); + sctp_for_each_hentry(ep, &head->chain) { if (sctp_endpoint_is_match(ep, net, laddr)) goto hit; } diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 982a87b3e11f..f13d6a34f32f 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -161,7 +161,6 @@ static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) static int sctp_eps_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; - struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; int hash = *(loff_t *)v; @@ -171,18 +170,17 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) head = &sctp_ep_hashtable[hash]; read_lock_bh(&head->lock); - sctp_for_each_hentry(epb, &head->chain) { - ep = sctp_ep(epb); - sk = epb->sk; + sctp_for_each_hentry(ep, &head->chain) { + sk = ep->base.sk; if (!net_eq(sock_net(sk), seq_file_net(seq))) continue; seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, - epb->bind_addr.port, + ep->base.bind_addr.port, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk)); - sctp_seq_dump_local_addrs(seq, epb); + sctp_seq_dump_local_addrs(seq, &ep->base); seq_printf(seq, "\n"); } read_unlock_bh(&head->lock); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 055a6d3ec6e2..f548c67c7cff 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5294,14 +5294,14 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p) { int err = 0; int hash = 0; - struct sctp_ep_common *epb; + struct sctp_endpoint *ep; struct sctp_hashbucket *head; for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { read_lock_bh(&head->lock); - sctp_for_each_hentry(epb, &head->chain) { - err = cb(sctp_ep(epb), p); + sctp_for_each_hentry(ep, &head->chain) { + err = cb(ep, p); if (err) break; } @@ -5333,11 +5333,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), } EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); -int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), - int (*cb_done)(struct sctp_transport *, void *), - struct net *net, int *pos, void *p) { +int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done, + struct net *net, int *pos, void *p) +{ struct rhashtable_iter hti; struct sctp_transport *tsp; + struct sctp_endpoint *ep; int ret; again: @@ -5346,26 +5347,32 @@ again: tsp = sctp_transport_get_idx(net, &hti, *pos + 1); for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { - ret = cb(tsp, p); - if (ret) - break; + ep = tsp->asoc->ep; + if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */ + ret = cb(ep, tsp, p); + if (ret) + break; + sctp_endpoint_put(ep); + } (*pos)++; sctp_transport_put(tsp); } sctp_transport_walk_stop(&hti); if (ret) { - if (cb_done && !cb_done(tsp, p)) { + if (cb_done && !cb_done(ep, tsp, p)) { (*pos)++; + sctp_endpoint_put(ep); sctp_transport_put(tsp); goto again; } + sctp_endpoint_put(ep); sctp_transport_put(tsp); } return ret; } -EXPORT_SYMBOL_GPL(sctp_for_each_transport); +EXPORT_SYMBOL_GPL(sctp_transport_traverse_process); /* 7.2.1 Association Status (SCTP_STATUS) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 67a78bbf305f..ba9d1a8ebb4a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -194,7 +194,9 @@ static int smc_release(struct socket *sock) /* cleanup for a dangling non-blocking connect */ if (smc->connect_nonblock && sk->sk_state == SMC_INIT) tcp_abort(smc->clcsock->sk, ECONNABORTED); - flush_work(&smc->connect_work); + + if (cancel_work_sync(&smc->connect_work)) + sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */ if (sk->sk_state == SMC_LISTEN) /* smc_close_non_accepted() is called and acquires diff --git a/net/smc/smc.h b/net/smc/smc.h index f4286ca1f228..1a4fc1c6c4ab 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -180,6 +180,11 @@ struct smc_connection { u16 tx_cdc_seq; /* sequence # for CDC send */ u16 tx_cdc_seq_fin; /* sequence # - tx completed */ spinlock_t send_lock; /* protect wr_sends */ + atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe + * - inc when post wqe, + * - dec on polled tx cqe + */ + wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/ struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ u32 tx_off; /* base offset in peer rmb */ diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 99acd337ba90..84c8a4374fdd 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, struct smc_sock *smc; int diff; - if (!conn) - /* already dismissed */ - return; - smc = container_of(conn, struct smc_sock, conn); bh_lock_sock(&smc->sk); if (!wc_status) { @@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, conn); conn->tx_cdc_seq_fin = cdcpend->ctrl_seq; } + + if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) && + unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq))) + wake_up(&conn->cdc_pend_tx_wq); + WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0); + smc_tx_sndbuf_nonfull(smc); bh_unlock_sock(&smc->sk); } @@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn, conn->tx_cdc_seq++; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); + + atomic_inc(&conn->cdc_pend_tx_wr); + smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */ + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); if (!rc) { smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); @@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, } else { conn->tx_cdc_seq--; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; + atomic_dec(&conn->cdc_pend_tx_wr); } return rc; @@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn, peer->token = htonl(local->token); peer->prod_flags.failover_validation = 1; + /* We need to set pend->conn here to make sure smc_cdc_tx_handler() + * can handle properly + */ + smc_cdc_add_pending_send(conn, pend); + + atomic_inc(&conn->cdc_pend_tx_wr); + smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */ + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); + if (unlikely(rc)) + atomic_dec(&conn->cdc_pend_tx_wr); + return rc; } @@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) return rc; } -static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, - unsigned long data) +void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn) { - struct smc_connection *conn = (struct smc_connection *)data; - struct smc_cdc_tx_pend *cdc_pend = - (struct smc_cdc_tx_pend *)tx_pend; - - return cdc_pend->conn == conn; -} - -static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend) -{ - struct smc_cdc_tx_pend *cdc_pend = - (struct smc_cdc_tx_pend *)tx_pend; - - cdc_pend->conn = NULL; -} - -void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) -{ - struct smc_link *link = conn->lnk; - - smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE, - smc_cdc_tx_filter, smc_cdc_tx_dismisser, - (unsigned long)conn); + wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr)); } /* Send a SMC-D CDC header. diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 0a0a89abd38b..696cc11f2303 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h @@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn, struct smc_wr_buf **wr_buf, struct smc_rdma_wr **wr_rdma_buf, struct smc_cdc_tx_pend **pend); -void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); +void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn); int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, struct smc_cdc_tx_pend *pend); int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 85be94cabb01..20d1417f0a68 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -647,7 +647,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr) for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; - if (smc_link_usable(lnk)) + if (smc_link_sendable(lnk)) lnk->state = SMC_LNK_INACTIVE; } wake_up_all(&lgr->llc_msg_waiter); @@ -1133,7 +1133,7 @@ void smc_conn_free(struct smc_connection *conn) smc_ism_unset_conn(conn); tasklet_kill(&conn->rx_tsklet); } else { - smc_cdc_tx_dismiss_slots(conn); + smc_cdc_wait_pend_tx_wr(conn); if (current_work() != &conn->abort_work) cancel_work_sync(&conn->abort_work); } @@ -1210,7 +1210,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log) smc_llc_link_clear(lnk, log); smcr_buf_unmap_lgr(lnk); smcr_rtoken_clear_link(lnk); - smc_ib_modify_qp_reset(lnk); + smc_ib_modify_qp_error(lnk); smc_wr_free_link(lnk); smc_ib_destroy_queue_pair(lnk); smc_ib_dealloc_protection_domain(lnk); @@ -1342,7 +1342,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft) else tasklet_unlock_wait(&conn->rx_tsklet); } else { - smc_cdc_tx_dismiss_slots(conn); + smc_cdc_wait_pend_tx_wr(conn); } smc_lgr_unregister_conn(conn); smc_close_active_abort(smc); @@ -1465,11 +1465,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd) /* Called when an SMCR device is removed or the smc module is unloaded. * If smcibdev is given, all SMCR link groups using this device are terminated. * If smcibdev is NULL, all SMCR link groups are terminated. + * + * We must wait here for QPs been destroyed before we destroy the CQs, + * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus + * smc_sock cannot be released. */ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); + LIST_HEAD(lgr_linkdown_list); int i; spin_lock_bh(&smc_lgr_list.lock); @@ -1481,7 +1486,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].smcibdev == smcibdev) - smcr_link_down_cond_sched(&lgr->lnk[i]); + list_move_tail(&lgr->list, &lgr_linkdown_list); } } } @@ -1493,6 +1498,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) __smc_lgr_terminate(lgr, false); } + list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) { + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { + if (lgr->lnk[i].smcibdev == smcibdev) { + mutex_lock(&lgr->llc_conf_mutex); + smcr_link_down_cond(&lgr->lnk[i]); + mutex_unlock(&lgr->llc_conf_mutex); + } + } + } + if (smcibdev) { if (atomic_read(&smcibdev->lnk_cnt)) wait_event(smcibdev->lnks_deleted, @@ -1592,7 +1607,6 @@ static void smcr_link_down(struct smc_link *lnk) if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list)) return; - smc_ib_modify_qp_reset(lnk); to_lnk = smc_switch_conns(lgr, lnk, true); if (!to_lnk) { /* no backup link available */ smcr_link_clear(lnk, true); @@ -1830,6 +1844,7 @@ create: conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; conn->urg_state = SMC_URG_READ; + init_waitqueue_head(&conn->cdc_pend_tx_wq); INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work); if (ini->is_smcd) { conn->rx_off = sizeof(struct smcd_cdc_msg); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 59cef3b830d8..d63b08274197 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -415,6 +415,12 @@ static inline bool smc_link_usable(struct smc_link *lnk) return true; } +static inline bool smc_link_sendable(struct smc_link *lnk) +{ + return smc_link_usable(lnk) && + lnk->qp_attr.cur_qp_state == IB_QPS_RTS; +} + static inline bool smc_link_active(struct smc_link *lnk) { return lnk->state == SMC_LNK_ACTIVE; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 905604c378ad..a3e2d3b89568 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -111,12 +111,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk) IB_QP_MAX_QP_RD_ATOMIC); } -int smc_ib_modify_qp_reset(struct smc_link *lnk) +int smc_ib_modify_qp_error(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); - qp_attr.qp_state = IB_QPS_RESET; + qp_attr.qp_state = IB_QPS_ERR; return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); } diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 07585937370e..bfa1c6bf6313 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -90,6 +90,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk); int smc_ib_ready_link(struct smc_link *lnk); int smc_ib_modify_qp_rts(struct smc_link *lnk); int smc_ib_modify_qp_reset(struct smc_link *lnk); +int smc_ib_modify_qp_error(struct smc_link *lnk); long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, struct smc_buf_desc *buf_slot, u8 link_idx); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index b102680296b8..3e9fd8a3124c 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -1630,7 +1630,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn) delllc.reason = htonl(rsn); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&lgr->lnk[i])) + if (!smc_link_sendable(&lgr->lnk[i])) continue; if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc)) break; diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 600ab5889227..24be1d03fef9 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -54,21 +54,13 @@ struct smc_wr_tx_pend { /* control data for a pending send request */ /* returns true if at least one tx work request is pending on the given link */ static inline bool smc_wr_is_tx_pend(struct smc_link *link) { - if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) != - link->wr_tx_cnt) { - return true; - } - return false; + return !bitmap_empty(link->wr_tx_mask, link->wr_tx_cnt); } /* wait till all pending tx work requests on the given link are completed */ -int smc_wr_tx_wait_no_pending_sends(struct smc_link *link) +void smc_wr_tx_wait_no_pending_sends(struct smc_link *link) { - if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link), - SMC_WR_TX_WAIT_PENDING_TIME)) - return 0; - else /* timeout */ - return -EPIPE; + wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link)); } static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) @@ -87,7 +79,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) struct smc_wr_tx_pend pnd_snd; struct smc_link *link; u32 pnd_snd_idx; - int i; link = wc->qp->qp_context; @@ -128,14 +119,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) } if (wc->status) { - for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { - /* clear full struct smc_wr_tx_pend including .priv */ - memset(&link->wr_tx_pends[i], 0, - sizeof(link->wr_tx_pends[i])); - memset(&link->wr_tx_bufs[i], 0, - sizeof(link->wr_tx_bufs[i])); - clear_bit(i, link->wr_tx_mask); - } if (link->lgr->smc_version == SMC_V2) { memset(link->wr_tx_v2_pend, 0, sizeof(*link->wr_tx_v2_pend)); @@ -188,7 +171,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context) static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) { *idx = link->wr_tx_cnt; - if (!smc_link_usable(link)) + if (!smc_link_sendable(link)) return -ENOLINK; for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { if (!test_and_set_bit(*idx, link->wr_tx_mask)) @@ -231,7 +214,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, } else { rc = wait_event_interruptible_timeout( link->wr_tx_wait, - !smc_link_usable(link) || + !smc_link_sendable(link) || lgr->terminating || (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), SMC_WR_TX_WAIT_FREE_SLOT_TIME); @@ -358,18 +341,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, unsigned long timeout) { struct smc_wr_tx_pend *pend; + u32 pnd_idx; int rc; pend = container_of(priv, struct smc_wr_tx_pend, priv); pend->compl_requested = 1; - init_completion(&link->wr_tx_compl[pend->idx]); + pnd_idx = pend->idx; + init_completion(&link->wr_tx_compl[pnd_idx]); rc = smc_wr_tx_send(link, priv); if (rc) return rc; /* wait for completion by smc_wr_tx_process_cqe() */ rc = wait_for_completion_interruptible_timeout( - &link->wr_tx_compl[pend->idx], timeout); + &link->wr_tx_compl[pnd_idx], timeout); if (rc <= 0) rc = -ENODATA; if (rc > 0) @@ -419,25 +404,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) return rc; } -void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type, - smc_wr_tx_filter filter, - smc_wr_tx_dismisser dismisser, - unsigned long data) -{ - struct smc_wr_tx_pend_priv *tx_pend; - struct smc_wr_rx_hdr *wr_tx; - int i; - - for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { - wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i]; - if (wr_tx->type != wr_tx_hdr_type) - continue; - tx_pend = &link->wr_tx_pends[i].priv; - if (filter(tx_pend, data)) - dismisser(tx_pend); - } -} - /****************************** receive queue ********************************/ int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) @@ -673,10 +639,7 @@ void smc_wr_free_link(struct smc_link *lnk) smc_wr_wakeup_reg_wait(lnk); smc_wr_wakeup_tx_wait(lnk); - if (smc_wr_tx_wait_no_pending_sends(lnk)) - memset(lnk->wr_tx_mask, 0, - BITS_TO_LONGS(SMC_WR_BUF_CNT) * - sizeof(*lnk->wr_tx_mask)); + smc_wr_tx_wait_no_pending_sends(lnk); wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt))); wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt))); @@ -729,7 +692,7 @@ void smc_wr_free_link_mem(struct smc_link *lnk) lnk->wr_tx_compl = NULL; kfree(lnk->wr_tx_pends); lnk->wr_tx_pends = NULL; - kfree(lnk->wr_tx_mask); + bitmap_free(lnk->wr_tx_mask); lnk->wr_tx_mask = NULL; kfree(lnk->wr_tx_sges); lnk->wr_tx_sges = NULL; @@ -805,9 +768,7 @@ int smc_wr_alloc_link_mem(struct smc_link *link) GFP_KERNEL); if (!link->wr_rx_sges) goto no_mem_wr_tx_sges; - link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT), - sizeof(*link->wr_tx_mask), - GFP_KERNEL); + link->wr_tx_mask = bitmap_zalloc(SMC_WR_BUF_CNT, GFP_KERNEL); if (!link->wr_tx_mask) goto no_mem_wr_rx_sges; link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, @@ -920,8 +881,7 @@ int smc_wr_create_link(struct smc_link *lnk) goto dma_unmap; } smc_wr_init_sge(lnk); - memset(lnk->wr_tx_mask, 0, - BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); + bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT); init_waitqueue_head(&lnk->wr_tx_wait); atomic_set(&lnk->wr_tx_refcnt, 0); init_waitqueue_head(&lnk->wr_reg_wait); diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index f353311e6f84..47512ccce5ef 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -22,7 +22,6 @@ #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) -#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ) #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */ @@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val) static inline bool smc_wr_tx_link_hold(struct smc_link *link) { - if (!smc_link_usable(link)) + if (!smc_link_sendable(link)) return false; atomic_inc(&link->wr_tx_refcnt); return true; @@ -130,7 +129,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type, smc_wr_tx_filter filter, smc_wr_tx_dismisser dismisser, unsigned long data); -int smc_wr_tx_wait_no_pending_sends(struct smc_link *link); +void smc_wr_tx_wait_no_pending_sends(struct smc_link *link); int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); int smc_wr_rx_post_init(struct smc_link *link); diff --git a/net/socket.c b/net/socket.c index 721a5a1b1106..4b8bb20d5e9a 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3234,21 +3234,6 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, return dev_ioctl(net, cmd, &ifreq, data, NULL); } -/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE - * for some operations; this forces use of the newer bridge-utils that - * use compatible ioctls - */ -static int old_bridge_ioctl(compat_ulong_t __user *argp) -{ - compat_ulong_t tmp; - - if (get_user(tmp, argp)) - return -EFAULT; - if (tmp == BRCTL_GET_VERSION) - return BRCTL_VERSION + 1; - return -EINVAL; -} - static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { @@ -3260,9 +3245,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, return sock_ioctl(file, cmd, (unsigned long)argp); switch (cmd) { - case SIOCSIFBR: - case SIOCGIFBR: - return old_bridge_ioctl(argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGSTAMP_OLD: @@ -3291,6 +3273,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCGSTAMP_NEW: case SIOCGSTAMPNS_NEW: case SIOCGIFCONF: + case SIOCSIFBR: + case SIOCGIFBR: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 60bc74b76adc..473a790f5894 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -787,7 +787,7 @@ int tipc_attach_loopback(struct net *net) if (!dev) return -ENODEV; - dev_hold(dev); + dev_hold_track(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL); tn->loopback_pt.dev = dev; tn->loopback_pt.type = htons(ETH_P_TIPC); tn->loopback_pt.func = tipc_loopback_rcv_pkt; @@ -800,7 +800,7 @@ void tipc_detach_loopback(struct net *net) struct tipc_net *tn = tipc_net(net); dev_remove_pack(&tn->loopback_pt); - dev_put(net->loopback_dev); + dev_put_track(net->loopback_dev, &tn->loopback_pt.dev_tracker); } /* Caller should hold rtnl_lock to protect the bearer */ diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 81116312b753..9325479295b8 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, return -EEXIST; /* Allocate a new AEAD */ - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); if (unlikely(!tmp)) return -ENOMEM; @@ -1463,7 +1463,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, return -EEXIST; /* Allocate crypto */ - c = kzalloc(sizeof(*c), GFP_KERNEL); + c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) return -ENOMEM; @@ -1477,7 +1477,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, } /* Allocate statistic structure */ - c->stats = alloc_percpu(struct tipc_crypto_stats); + c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); if (!c->stats) { if (c->wq) destroy_workqueue(c->wq); @@ -2450,7 +2450,7 @@ static void tipc_crypto_work_tx(struct work_struct *work) } /* Lets duplicate it first */ - skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL); + skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); rcu_read_unlock(); /* Now, generate new key, initiate & distribute it */ diff --git a/net/tipc/link.c b/net/tipc/link.c index 09ae8448f394..8d9e09f48f4c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1298,7 +1298,8 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, return false; #ifdef CONFIG_TIPC_CRYPTO case MSG_CRYPTO: - if (TIPC_SKB_CB(skb)->decrypted) { + if (sysctl_tipc_key_exchange_enabled && + TIPC_SKB_CB(skb)->decrypted) { tipc_crypto_msg_rcv(l->net, skb); return true; } diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index c09bea89151b..01d44e2598e2 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -30,10 +30,6 @@ int __net_init unix_sysctl_register(struct net *net) if (table == NULL) goto err_alloc; - /* Don't export sysctls to unprivileged users */ - if (net->user_ns != &init_user_ns) - table[0].procname = NULL; - table[0].data = &net->unx.sysctl_max_dgram_qlen; net->unx.ctl = register_net_sysctl(net, "net/unix", table); if (net->unx.ctl == NULL) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 59ee1be5a6dd..ec2c2afbf0d0 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1299,7 +1299,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, space_available = virtio_transport_space_update(sk, pkt); /* Update CID in case it has changed after a transport reset event */ - vsk->local_addr.svm_cid = dst.svm_cid; + if (vsk->local_addr.svm_cid != VMADDR_CID_ANY) + vsk->local_addr.svm_cid = dst.svm_cid; if (space_available) sk->sk_write_space(sk); diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 869c43d4414c..eb822052d344 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -245,19 +245,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) oper_freq - MHZ_TO_KHZ(oper_width) / 2) return false; break; - case NL80211_CHAN_WIDTH_40: - if (chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10) - return false; - if (chandef->center_freq2) - return false; - break; case NL80211_CHAN_WIDTH_80P80: - if (chandef->center_freq1 != control_freq + 30 && - chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10 && - chandef->center_freq1 != control_freq - 30) - return false; if (!chandef->center_freq2) return false; /* adjacent is not allowed -- that's a 160 MHz channel */ @@ -265,28 +253,42 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) chandef->center_freq2 - chandef->center_freq1 == 80) return false; break; - case NL80211_CHAN_WIDTH_80: - if (chandef->center_freq1 != control_freq + 30 && - chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10 && - chandef->center_freq1 != control_freq - 30) - return false; + default: if (chandef->center_freq2) return false; break; - case NL80211_CHAN_WIDTH_160: - if (chandef->center_freq1 != control_freq + 70 && - chandef->center_freq1 != control_freq + 50 && - chandef->center_freq1 != control_freq + 30 && - chandef->center_freq1 != control_freq + 10 && - chandef->center_freq1 != control_freq - 10 && - chandef->center_freq1 != control_freq - 30 && - chandef->center_freq1 != control_freq - 50 && - chandef->center_freq1 != control_freq - 70) - return false; - if (chandef->center_freq2) - return false; + } + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: + /* all checked above */ break; + case NL80211_CHAN_WIDTH_160: + if (chandef->center_freq1 == control_freq + 70 || + chandef->center_freq1 == control_freq + 50 || + chandef->center_freq1 == control_freq - 50 || + chandef->center_freq1 == control_freq - 70) + break; + fallthrough; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_80: + if (chandef->center_freq1 == control_freq + 30 || + chandef->center_freq1 == control_freq - 30) + break; + fallthrough; + case NL80211_CHAN_WIDTH_40: + if (chandef->center_freq1 == control_freq + 10 || + chandef->center_freq1 == control_freq - 10) + break; + fallthrough; default: return false; } @@ -712,6 +714,19 @@ static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy, return false; } +static bool +cfg80211_offchan_chain_is_active(struct cfg80211_registered_device *rdev, + struct ieee80211_channel *channel) +{ + if (!rdev->background_radar_wdev) + return false; + + if (!cfg80211_chandef_valid(&rdev->background_radar_chandef)) + return false; + + return cfg80211_is_sub_chan(&rdev->background_radar_chandef, channel); +} + bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan) { @@ -728,6 +743,9 @@ bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan)) return true; + + if (cfg80211_offchan_chain_is_active(rdev, chan)) + return true; } return false; diff --git a/net/wireless/core.c b/net/wireless/core.c index eb297e1015e0..3a54c8e6b6c6 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -545,6 +545,10 @@ use_default_name: INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); + INIT_WORK(&rdev->background_cac_abort_wk, + cfg80211_background_cac_abort_wk); + INIT_DELAYED_WORK(&rdev->background_cac_done_wk, + cfg80211_background_cac_done_wk); init_waitqueue_head(&rdev->dev_wait); @@ -733,6 +737,7 @@ int wiphy_register(struct wiphy *wiphy) if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_P2P_DEVICE) | @@ -1054,11 +1059,13 @@ void wiphy_unregister(struct wiphy *wiphy) cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); + cancel_delayed_work_sync(&rdev->background_cac_done_wk); flush_work(&rdev->destroy_work); flush_work(&rdev->sched_scan_stop_wk); flush_work(&rdev->propagate_radar_detect_wk); flush_work(&rdev->propagate_cac_done_wk); flush_work(&rdev->mgmt_registrations_update_wk); + flush_work(&rdev->background_cac_abort_wk); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) @@ -1207,6 +1214,8 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, cfg80211_pmsr_wdev_down(wdev); + cfg80211_stop_background_radar_detection(wdev); + switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: __cfg80211_leave_ibss(rdev, dev, true); diff --git a/net/wireless/core.h b/net/wireless/core.h index 1720abf36f92..3a7dbd63d8c6 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -84,6 +84,11 @@ struct cfg80211_registered_device { struct delayed_work dfs_update_channels_wk; + struct wireless_dev *background_radar_wdev; + struct cfg80211_chan_def background_radar_chandef; + struct delayed_work background_cac_done_wk; + struct work_struct background_cac_abort_wk; + /* netlink port which started critical protocol (0 means not started) */ u32 crit_proto_nlportid; @@ -491,6 +496,17 @@ cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev); +int +cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef); + +void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev); + +void cfg80211_background_cac_done_wk(struct work_struct *work); + +void cfg80211_background_cac_abort_wk(struct work_struct *work); + bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, struct ieee80211_channel *chan); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 783acd2c4211..c8155a483ec2 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -905,13 +905,13 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) } -void cfg80211_radar_event(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - gfp_t gfp) +void __cfg80211_radar_event(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + bool offchan, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - trace_cfg80211_radar_event(wiphy, chandef); + trace_cfg80211_radar_event(wiphy, chandef, offchan); /* only set the chandef supplied channel to unavailable, in * case the radar is detected on only one of multiple channels @@ -919,6 +919,9 @@ void cfg80211_radar_event(struct wiphy *wiphy, */ cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE); + if (offchan) + queue_work(cfg80211_wq, &rdev->background_cac_abort_wk); + cfg80211_sched_dfs_chan_update(rdev); nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp); @@ -926,7 +929,7 @@ void cfg80211_radar_event(struct wiphy *wiphy, memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); } -EXPORT_SYMBOL(cfg80211_radar_event); +EXPORT_SYMBOL(__cfg80211_radar_event); void cfg80211_cac_event(struct net_device *netdev, const struct cfg80211_chan_def *chandef, @@ -970,3 +973,143 @@ void cfg80211_cac_event(struct net_device *netdev, nl80211_radar_notify(rdev, chandef, event, netdev, gfp); } EXPORT_SYMBOL(cfg80211_cac_event); + +static void +__cfg80211_background_cac_event(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event) +{ + struct wiphy *wiphy = &rdev->wiphy; + struct net_device *netdev; + + lockdep_assert_wiphy(&rdev->wiphy); + + if (!cfg80211_chandef_valid(chandef)) + return; + + if (!rdev->background_radar_wdev) + return; + + switch (event) { + case NL80211_RADAR_CAC_FINISHED: + cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE); + memcpy(&rdev->cac_done_chandef, chandef, sizeof(*chandef)); + queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); + cfg80211_sched_dfs_chan_update(rdev); + wdev = rdev->background_radar_wdev; + break; + case NL80211_RADAR_CAC_ABORTED: + if (!cancel_delayed_work(&rdev->background_cac_done_wk)) + return; + wdev = rdev->background_radar_wdev; + break; + case NL80211_RADAR_CAC_STARTED: + break; + default: + return; + } + + netdev = wdev ? wdev->netdev : NULL; + nl80211_radar_notify(rdev, chandef, event, netdev, GFP_KERNEL); +} + +static void +cfg80211_background_cac_event(struct cfg80211_registered_device *rdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event) +{ + wiphy_lock(&rdev->wiphy); + __cfg80211_background_cac_event(rdev, rdev->background_radar_wdev, + chandef, event); + wiphy_unlock(&rdev->wiphy); +} + +void cfg80211_background_cac_done_wk(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct cfg80211_registered_device *rdev; + + rdev = container_of(delayed_work, struct cfg80211_registered_device, + background_cac_done_wk); + cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef, + NL80211_RADAR_CAC_FINISHED); +} + +void cfg80211_background_cac_abort_wk(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(work, struct cfg80211_registered_device, + background_cac_abort_wk); + cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef, + NL80211_RADAR_CAC_ABORTED); +} + +void cfg80211_background_cac_abort(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + queue_work(cfg80211_wq, &rdev->background_cac_abort_wk); +} +EXPORT_SYMBOL(cfg80211_background_cac_abort); + +int +cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + unsigned int cac_time_ms; + int err; + + lockdep_assert_wiphy(&rdev->wiphy); + + if (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_RADAR_BACKGROUND)) + return -EOPNOTSUPP; + + /* Offchannel chain already locked by another wdev */ + if (rdev->background_radar_wdev && rdev->background_radar_wdev != wdev) + return -EBUSY; + + /* CAC already in progress on the offchannel chain */ + if (rdev->background_radar_wdev == wdev && + delayed_work_pending(&rdev->background_cac_done_wk)) + return -EBUSY; + + err = rdev_set_radar_background(rdev, chandef); + if (err) + return err; + + cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, chandef); + if (!cac_time_ms) + cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; + + rdev->background_radar_chandef = *chandef; + rdev->background_radar_wdev = wdev; /* Get offchain ownership */ + + __cfg80211_background_cac_event(rdev, wdev, chandef, + NL80211_RADAR_CAC_STARTED); + queue_delayed_work(cfg80211_wq, &rdev->background_cac_done_wk, + msecs_to_jiffies(cac_time_ms)); + + return 0; +} + +void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev) +{ + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + lockdep_assert_wiphy(wiphy); + + if (wdev != rdev->background_radar_wdev) + return; + + rdev_set_radar_background(rdev, NULL); + rdev->background_radar_wdev = NULL; /* Release offchain ownership */ + + __cfg80211_background_cac_event(rdev, wdev, + &rdev->background_radar_chandef, + NL80211_RADAR_CAC_ABORTED); +} diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a27b3b5fa210..578bff9c378b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -776,6 +776,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MBSSID_CONFIG] = NLA_POLICY_NESTED(nl80211_mbssid_config_policy), [NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED }, + [NL80211_ATTR_RADAR_BACKGROUND] = { .type = NLA_FLAG }, + [NL80211_ATTR_AP_SETTINGS_FLAGS] = { .type = NLA_U32 }, }; /* policy for the key attributes */ @@ -3669,14 +3671,16 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: { - const u8 *ssid_ie; + const struct element *ssid_elem; + if (!wdev->current_bss) break; rcu_read_lock(); - ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, - WLAN_EID_SSID); - if (ssid_ie && - nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) + ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub, + WLAN_EID_SSID); + if (ssid_elem && + nla_put(msg, NL80211_ATTR_SSID, ssid_elem->datalen, + ssid_elem->data)) goto nla_put_failure_rcu_locked; rcu_read_unlock(); break; @@ -5711,8 +5715,11 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) nl80211_calculate_ap_params(params); - if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) - params->flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; + if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]) + params->flags = nla_get_u32( + info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]); + else if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) + params->flags |= NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; wdev_lock(wdev); err = rdev_start_ap(rdev, dev, params); @@ -9274,38 +9281,60 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, struct cfg80211_chan_def chandef; enum nl80211_dfs_regions dfs_region; unsigned int cac_time_ms; - int err; + int err = -EINVAL; + + flush_delayed_work(&rdev->dfs_update_channels_wk); + + wiphy_lock(wiphy); dfs_region = reg_get_dfs_region(wiphy); if (dfs_region == NL80211_DFS_UNSET) - return -EINVAL; + goto unlock; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) - return err; - - if (netif_carrier_ok(dev)) - return -EBUSY; - - if (wdev->cac_started) - return -EBUSY; + goto unlock; err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); if (err < 0) - return err; + goto unlock; - if (err == 0) - return -EINVAL; + if (err == 0) { + err = -EINVAL; + goto unlock; + } - if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) - return -EINVAL; + if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) { + err = -EINVAL; + goto unlock; + } + + if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) { + err = cfg80211_start_background_radar_detection(rdev, wdev, + &chandef); + goto unlock; + } + + if (netif_carrier_ok(dev)) { + err = -EBUSY; + goto unlock; + } + + if (wdev->cac_started) { + err = -EBUSY; + goto unlock; + } /* CAC start is offloaded to HW and can't be started manually */ - if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) - return -EOPNOTSUPP; + if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) { + err = -EOPNOTSUPP; + goto unlock; + } - if (!rdev->ops->start_radar_detection) - return -EOPNOTSUPP; + if (!rdev->ops->start_radar_detection) { + err = -EOPNOTSUPP; + goto unlock; + } cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); if (WARN_ON(!cac_time_ms)) @@ -9318,6 +9347,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, wdev->cac_start_time = jiffies; wdev->cac_time_ms = cac_time_ms; } +unlock: + wiphy_unlock(wiphy); + return err; } @@ -15954,7 +15986,8 @@ static const struct genl_small_ops nl80211_small_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_start_radar_detection, .flags = GENL_UNS_ADMIN_PERM, - .internal_flags = NL80211_FLAG_NEED_NETDEV_UP, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NO_WIPHY_MTX, }, { .cmd = NL80211_CMD_GET_PROTOCOL_FEATURES, @@ -17035,6 +17068,44 @@ static void nl80211_send_remain_on_chan_event( nlmsg_free(msg); } +void cfg80211_assoc_comeback(struct net_device *netdev, + struct cfg80211_bss *bss, u32 timeout) +{ + struct wireless_dev *wdev = netdev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct sk_buff *msg; + void *hdr; + + trace_cfg80211_assoc_comeback(wdev, bss->bssid, timeout); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ASSOC_COMEBACK); + if (!hdr) { + nlmsg_free(msg); + return; + } + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bss->bssid) || + nla_put_u32(msg, NL80211_ATTR_TIMEOUT, timeout)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, GFP_KERNEL); + return; + + nla_put_failure: + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_assoc_comeback); + void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index cc1efec4b27b..439bcf52369c 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1395,4 +1395,21 @@ rdev_set_fils_aad(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_set_radar_background(struct cfg80211_registered_device *rdev, + struct cfg80211_chan_def *chandef) +{ + struct wiphy *wiphy = &rdev->wiphy; + int ret; + + if (!rdev->ops->set_radar_background) + return -EOPNOTSUPP; + + trace_rdev_set_radar_background(wiphy, chandef); + ret = rdev->ops->set_radar_background(wiphy, chandef); + trace_rdev_return_int(wiphy, ret); + + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index df87c7f3a049..ec25924a1c26 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -133,6 +133,7 @@ static u32 reg_is_indoor_portid; static void restore_regulatory_settings(bool reset_user, bool cached); static void print_regdomain(const struct ieee80211_regdomain *rd); +static void reg_process_hint(struct regulatory_request *reg_request); static const struct ieee80211_regdomain *get_cfg80211_regdom(void) { @@ -1098,6 +1099,8 @@ int reg_reload_regdb(void) const struct firmware *fw; void *db; int err; + const struct ieee80211_regdomain *current_regdomain; + struct regulatory_request *request; err = request_firmware(&fw, "regulatory.db", ®_pdev->dev); if (err) @@ -1118,8 +1121,26 @@ int reg_reload_regdb(void) if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); regdb = db; - rtnl_unlock(); + /* reset regulatory domain */ + current_regdomain = get_cfg80211_regdom(); + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) { + err = -ENOMEM; + goto out_unlock; + } + + request->wiphy_idx = WIPHY_IDX_INVALID; + request->alpha2[0] = current_regdomain->alpha2[0]; + request->alpha2[1] = current_regdomain->alpha2[1]; + request->initiator = NL80211_REGDOM_SET_BY_CORE; + request->user_reg_hint_type = NL80211_USER_REG_HINT_USER; + + reg_process_hint(request); + +out_unlock: + rtnl_unlock(); out: release_firmware(fw); return err; @@ -2338,6 +2359,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) struct cfg80211_chan_def chandef = {}; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); enum nl80211_iftype iftype; + bool ret; wdev_lock(wdev); iftype = wdev->iftype; @@ -2349,6 +2371,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_MESH_POINT: if (!wdev->beacon_interval) goto wdev_inactive_unlock; chandef = wdev->chandef; @@ -2387,7 +2410,12 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_ADHOC: - return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); + case NL80211_IFTYPE_MESH_POINT: + wiphy_lock(wiphy); + ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); + wiphy_unlock(wiphy); + + return ret; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: return cfg80211_chandef_usable(wiphy, &chandef, diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 22e92be61938..b888522f133b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -406,22 +406,20 @@ static int cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, struct cfg80211_bss *nontrans_bss) { - const u8 *ssid; - size_t ssid_len; + const struct element *ssid_elem; struct cfg80211_bss *bss = NULL; rcu_read_lock(); - ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); - if (!ssid) { + ssid_elem = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID); + if (!ssid_elem) { rcu_read_unlock(); return -EINVAL; } - ssid_len = ssid[1]; - ssid = ssid + 2; /* check if nontrans_bss is in the list */ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { - if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) { + if (is_bss(bss, nontrans_bss->bssid, ssid_elem->data, + ssid_elem->datalen)) { rcu_read_unlock(); return 0; } @@ -1795,33 +1793,52 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, } int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, - enum nl80211_band band) + enum nl80211_band band, + enum cfg80211_bss_frame_type ftype) { - const u8 *tmp; - int channel_number = -1; + const struct element *tmp; + + if (band == NL80211_BAND_6GHZ) { + struct ieee80211_he_operation *he_oper; - if (band == NL80211_BAND_S1GHZ) { - tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen); - if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) { - struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2); + tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, + ielen); + if (tmp && tmp->datalen >= sizeof(*he_oper) && + tmp->datalen >= ieee80211_he_oper_size(&tmp->data[1])) { + const struct ieee80211_he_6ghz_oper *he_6ghz_oper; + + he_oper = (void *)&tmp->data[1]; + + he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); + if (!he_6ghz_oper) + return -1; - channel_number = s1gop->primary_ch; + if (ftype != CFG80211_BSS_FTYPE_BEACON || + he_6ghz_oper->control & IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON) + return he_6ghz_oper->primary; + } + } else if (band == NL80211_BAND_S1GHZ) { + tmp = cfg80211_find_elem(WLAN_EID_S1G_OPERATION, ie, ielen); + if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) { + struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data; + + return s1gop->primary_ch; } } else { - tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen); - if (tmp && tmp[1] == 1) { - channel_number = tmp[2]; - } else { - tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen); - if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) { - struct ieee80211_ht_operation *htop = (void *)(tmp + 2); + tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen); + if (tmp && tmp->datalen == 1) + return tmp->data[0]; - channel_number = htop->primary_chan; - } + tmp = cfg80211_find_elem(WLAN_EID_HT_OPERATION, ie, ielen); + if (tmp && + tmp->datalen >= sizeof(struct ieee80211_ht_operation)) { + struct ieee80211_ht_operation *htop = (void *)tmp->data; + + return htop->primary_chan; } } - return channel_number; + return -1; } EXPORT_SYMBOL(cfg80211_get_ies_channel_number); @@ -1831,18 +1848,20 @@ EXPORT_SYMBOL(cfg80211_get_ies_channel_number); * from neighboring channels and the Beacon frames use the DSSS Parameter Set * element to indicate the current (transmitting) channel, but this might also * be needed on other bands if RX frequency does not match with the actual - * operating channel of a BSS. + * operating channel of a BSS, or if the AP reports a different primary channel. */ static struct ieee80211_channel * cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width) + enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype) { u32 freq; int channel_number; struct ieee80211_channel *alt_channel; - channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band); + channel_number = cfg80211_get_ies_channel_number(ie, ielen, + channel->band, ftype); if (channel_number < 0) { /* No channel information in frame payload */ @@ -1850,6 +1869,16 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, } freq = ieee80211_channel_to_freq_khz(channel_number, channel->band); + + /* + * In 6GHz, duplicated beacon indication is relevant for + * beacons only. + */ + if (channel->band == NL80211_BAND_6GHZ && + (freq == channel->center_freq || + abs(freq - channel->center_freq) > 80)) + return channel; + alt_channel = ieee80211_get_channel_khz(wiphy, freq); if (!alt_channel) { if (channel->band == NL80211_BAND_2GHZ) { @@ -1911,7 +1940,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, return NULL; channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan, - data->scan_width); + data->scan_width, ftype); if (!channel) return NULL; @@ -2234,7 +2263,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len) { u8 *ie, *new_ie, *pos; - const u8 *nontrans_ssid, *trans_ssid, *mbssid; + const struct element *nontrans_ssid; + const u8 *trans_ssid, *mbssid; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); size_t new_ie_len; @@ -2261,11 +2291,11 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, return; new_ie_len -= mbssid[1]; - nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); + nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID); if (!nontrans_ssid) return; - new_ie_len += nontrans_ssid[1]; + new_ie_len += nontrans_ssid->datalen; /* generate new ie for nontrans BSS * 1. replace SSID with nontrans BSS' SSID @@ -2282,7 +2312,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, pos = new_ie; /* copy the nontransmitted SSID */ - cpy_len = nontrans_ssid[1] + 2; + cpy_len = nontrans_ssid->datalen + 2; memcpy(pos, nontrans_ssid, cpy_len); pos += cpy_len; /* copy the IEs between SSID and MBSSID */ @@ -2333,6 +2363,7 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); int bss_type; + enum cfg80211_bss_frame_type ftype; BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) != offsetof(struct ieee80211_mgmt, u.beacon.variable)); @@ -2369,8 +2400,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, variable = ext->u.s1g_beacon.variable; } + if (ieee80211_is_beacon(mgmt->frame_control)) + ftype = CFG80211_BSS_FTYPE_BEACON; + else if (ieee80211_is_probe_resp(mgmt->frame_control)) + ftype = CFG80211_BSS_FTYPE_PRESP; + else + ftype = CFG80211_BSS_FTYPE_UNKNOWN; + channel = cfg80211_get_bss_channel(wiphy, variable, - ielen, data->chan, data->scan_width); + ielen, data->chan, data->scan_width, + ftype); if (!channel) return NULL; @@ -2687,7 +2726,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, struct cfg80211_registered_device *rdev; struct wiphy *wiphy; struct iw_scan_req *wreq = NULL; - struct cfg80211_scan_request *creq = NULL; + struct cfg80211_scan_request *creq; int i, err, n_channels = 0; enum nl80211_band band; @@ -2702,10 +2741,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); - if (rdev->scan_req || rdev->scan_msg) { - err = -EBUSY; - goto out; - } + if (rdev->scan_req || rdev->scan_msg) + return -EBUSY; wiphy = &rdev->wiphy; @@ -2718,10 +2755,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + n_channels * sizeof(void *), GFP_ATOMIC); - if (!creq) { - err = -ENOMEM; - goto out; - } + if (!creq) + return -ENOMEM; creq->wiphy = wiphy; creq->wdev = dev->ieee80211_ptr; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 08a70b4f090c..ff4d48fcbfb2 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -680,7 +680,9 @@ void __cfg80211_connect_result(struct net_device *dev, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; - const u8 *country_ie; + const struct element *country_elem; + const u8 *country_data; + u8 country_datalen; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif @@ -762,26 +764,22 @@ void __cfg80211_connect_result(struct net_device *dev, cfg80211_upload_connect_keys(wdev); rcu_read_lock(); - country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY); - if (!country_ie) { + country_elem = ieee80211_bss_get_elem(cr->bss, WLAN_EID_COUNTRY); + if (!country_elem) { rcu_read_unlock(); return; } - country_ie = kmemdup(country_ie, 2 + country_ie[1], GFP_ATOMIC); + country_datalen = country_elem->datalen; + country_data = kmemdup(country_elem->data, country_datalen, GFP_ATOMIC); rcu_read_unlock(); - if (!country_ie) + if (!country_data) return; - /* - * ieee80211_bss_get_ie() ensures we can access: - * - country_ie + 2, the start of the country ie data, and - * - and country_ie[1] which is the IE length - */ regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band, - country_ie + 2, country_ie[1]); - kfree(country_ie); + country_data, country_datalen); + kfree(country_data); } /* Consumes bss object one way or another */ diff --git a/net/wireless/trace.h b/net/wireless/trace.h index ad6c16a06bcb..228079d7690a 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3053,18 +3053,21 @@ TRACE_EVENT(cfg80211_ch_switch_started_notify, ); TRACE_EVENT(cfg80211_radar_event, - TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), - TP_ARGS(wiphy, chandef), + TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, + bool offchan), + TP_ARGS(wiphy, chandef, offchan), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY + __field(bool, offchan) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); + __entry->offchan = offchan; ), - TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, - WIPHY_PR_ARG, CHAN_DEF_PR_ARG) + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", offchan %d", + WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->offchan) ); TRACE_EVENT(cfg80211_cac_event, @@ -3674,6 +3677,42 @@ TRACE_EVENT(cfg80211_bss_color_notify, __entry->color_bitmap) ); +TRACE_EVENT(rdev_set_radar_background, + TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), + + TP_ARGS(wiphy, chandef), + + TP_STRUCT__entry( + WIPHY_ENTRY + CHAN_DEF_ENTRY + ), + + TP_fast_assign( + WIPHY_ASSIGN; + CHAN_DEF_ASSIGN(chandef) + ), + + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, + WIPHY_PR_ARG, CHAN_DEF_PR_ARG) +); + +TRACE_EVENT(cfg80211_assoc_comeback, + TP_PROTO(struct wireless_dev *wdev, const u8 *bssid, u32 timeout), + TP_ARGS(wdev, bssid, timeout), + TP_STRUCT__entry( + WDEV_ENTRY + MAC_ENTRY(bssid) + __field(u32, timeout) + ), + TP_fast_assign( + WDEV_ASSIGN; + MAC_ASSIGN(bssid, bssid); + __entry->timeout = timeout; + ), + TP_printk(WDEV_PR_FMT ", " MAC_PR_FMT ", timeout: %u TUs", + WDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->timeout) +); + #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 193a18a53142..cd09a9042261 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -212,18 +212,18 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev, wdev_lock(wdev); if (wdev->current_bss) { - const u8 *ie; + const struct element *ssid_elem; rcu_read_lock(); - ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, - WLAN_EID_SSID); - if (ie) { + ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub, + WLAN_EID_SSID); + if (ssid_elem) { data->flags = 1; - data->length = ie[1]; + data->length = ssid_elem->datalen; if (data->length > IW_ESSID_MAX_SIZE) ret = -EINVAL; else - memcpy(ssid, ie + 2, data->length); + memcpy(ssid, ssid_elem->data, data->length); } rcu_read_unlock(); } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 28ef3f4465ae..e3d35850fdea 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -677,8 +677,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock, struct xdp_sock *xs = xdp_sk(sk); struct xsk_buff_pool *pool; - sock_poll_wait(file, sock, wait); - if (unlikely(!xsk_is_bound(xs))) return mask; @@ -690,6 +688,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock, else /* Poll needs to drive Tx also in copy mode */ __xsk_sendmsg(sk); + } else { + sock_poll_wait(file, sock, wait); } if (xs->rx && !xskq_prod_is_empty(xs->rx)) diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index bc4ad48ea4f0..fd39bb660ebc 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -83,6 +83,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, xskb = &pool->heads[i]; xskb->pool = pool; xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; + INIT_LIST_HEAD(&xskb->free_list_node); if (pool->unaligned) pool->free_heads[i] = xskb; else |