Merge tag 'for-net-2025-04-25' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth

Luiz Augusto von Dentz says:

====================
bluetooth pull request for net:

 - btmtksdio: Check function enabled before doing close
 - btmtksdio: Do close if SDIO card removed without close
 - btusb: avoid NULL pointer dereference in skb_dequeue()
 - btintel_pcie: Avoid redundant buffer allocation
 - btintel_pcie: Add additional to checks to clear TX/RX paths
 - hci_conn: Fix not setting conn_timeout for Broadcast Receiver
 - hci_conn: Fix not setting timeout for BIG Create Sync

* tag 'for-net-2025-04-25' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth:
  Bluetooth: L2CAP: copy RX timestamp to new fragments
  Bluetooth: btintel_pcie: Add additional to checks to clear TX/RX paths
  Bluetooth: btmtksdio: Do close if SDIO card removed without close
  Bluetooth: btmtksdio: Check function enabled before doing close
  Bluetooth: btusb: avoid NULL pointer dereference in skb_dequeue()
  Bluetooth: btintel_pcie: Avoid redundant buffer allocation
  Bluetooth: hci_conn: Fix not setting timeout for BIG Create Sync
  Bluetooth: hci_conn: Fix not setting conn_timeout for Broadcast Receiver
====================

Link: https://patch.msgid.link/20250425192412.1578759-1-luiz.dentz@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-04-28 15:51:44 -07:00
11 changed files with 300 additions and 272 deletions
+31 -26
View File
@@ -957,8 +957,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
if (skb->data[2] == 0x97)
if (skb->data[2] == 0x97) {
hci_recv_diag(hdev, skb);
return 0;
}
}
return hci_recv_frame(hdev, skb);
@@ -974,7 +976,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -1051,24 +1052,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
if (!new_skb) {
bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
skb->len);
ret = -ENOMEM;
goto exit_error;
}
hci_skb_pkt_type(new_skb) = pkt_type;
skb_put_data(new_skb, skb->data, plen);
hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
ret = btintel_pcie_recv_event(hdev, new_skb);
ret = btintel_pcie_recv_event(hdev, skb);
else
ret = hci_recv_frame(hdev, new_skb);
ret = hci_recv_frame(hdev, skb);
skb = NULL; /* skb is freed in the callee */
exit_error:
if (skb)
kfree_skb(skb);
if (ret)
hdev->stat.err_rx++;
@@ -1202,8 +1199,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
int err;
struct hci_dev *hdev = data->hdev;
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
@@ -1224,11 +1219,7 @@ static void btintel_pcie_rx_work(struct work_struct *work)
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
err = btintel_pcie_recv_frame(data, skb);
if (err)
bt_dev_err(hdev, "Failed to send received frame: %d",
err);
kfree_skb(skb);
btintel_pcie_recv_frame(data, skb);
}
}
@@ -1281,10 +1272,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
if (cr_tia == cr_hia) {
bt_dev_warn(hdev, "RXQ: no new CD found");
if (cr_tia == cr_hia)
return;
}
rxq = &data->rxq;
@@ -1320,6 +1309,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
{
return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
}
static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
{
return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
}
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -1351,12 +1350,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
if (!btintel_pcie_is_rxq_empty(data))
btintel_pcie_msix_rx_handle(data);
}
/* For RX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
if (!btintel_pcie_is_txackq_empty(data))
btintel_pcie_msix_tx_handle(data);
}
/*
* Before sending the interrupt the HW disables it to prevent a nested
+10 -2
View File
@@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
/* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
hdev = bdev->hdev;
/* Make sure to call btmtksdio_close before removing sdio card */
if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
btmtksdio_close(hdev);
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
hdev = bdev->hdev;
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
+73 -28
View File
@@ -3010,22 +3010,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
* ==0: not a dump pkt.
* < 0: fails to handle a dump pkt
* > 0: otherwise.
*/
/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
int ret = 1;
int ret = 0;
u8 pkt_type;
u8 *sk_ptr;
unsigned int sk_len;
u16 seqno;
u32 dump_size;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
@@ -3035,30 +3029,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
sk_len = skb->len;
if (pkt_type == HCI_ACLDATA_PKT) {
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return 0;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
} else {
event_hdr = hci_event_hdr(skb);
}
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return 0;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return 0;
/*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3132,17 +3110,84 @@ out:
return ret;
}
/* Return: true if the ACL packet is a dump packet, false otherwise. */
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return false;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
/* Return: true if the event packet is a dump packet, false otherwise. */
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
event_hdr = hci_event_hdr(skb);
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
if (handle_dump_pkt_qca(hdev, skb))
return 0;
if (acl_pkt_is_dump_qca(hdev, skb))
return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
if (handle_dump_pkt_qca(hdev, skb))
return 0;
if (evt_pkt_is_dump_qca(hdev, skb))
return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
+3 -1
View File
@@ -1931,6 +1931,8 @@ struct hci_cp_le_pa_create_sync {
__u8 sync_cte_type;
} __packed;
#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045
#define HCI_OP_LE_PA_TERM_SYNC 0x2046
struct hci_cp_le_pa_term_sync {
__le16 handle;
@@ -2830,7 +2832,7 @@ struct hci_evt_le_create_big_complete {
__le16 bis_handle[];
} __packed;
#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
struct hci_evt_le_big_sync_estabilished {
__u8 status;
__u8 handle;
+9 -11
View File
@@ -1113,10 +1113,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
return NULL;
}
static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
__u8 sid,
bdaddr_t *dst,
__u8 dst_type)
static inline struct hci_conn *
hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1124,8 +1122,10 @@ static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != ISO_LINK || bacmp(&c->dst, dst) ||
c->dst_type != dst_type || c->sid != sid)
if (c->type != ISO_LINK)
continue;
if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
continue;
rcu_read_unlock();
@@ -1524,8 +1524,6 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
int hci_le_create_cis_pending(struct hci_dev *hdev);
int hci_pa_create_sync_pending(struct hci_dev *hdev);
int hci_le_big_create_sync_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -1566,9 +1564,9 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 data_len, __u8 *data);
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[]);
int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos, __u16 sync_handle,
__u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+3
View File
@@ -185,3 +185,6 @@ int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
struct hci_conn_params *params);
int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
+7 -174
View File
@@ -2064,95 +2064,6 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
return hci_le_create_big(conn, &conn->iso_qos);
}
static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
{
bt_dev_dbg(hdev, "");
if (err)
bt_dev_err(hdev, "Unable to create PA: %d", err);
}
static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
{
if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
return false;
return true;
}
static int create_pa_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_pa_create_sync cp = {0};
struct hci_conn *conn;
int err = 0;
hci_dev_lock(hdev);
rcu_read_lock();
/* The spec allows only one pending LE Periodic Advertising Create
* Sync command at a time. If the command is pending now, don't do
* anything. We check for pending connections after each PA Sync
* Established event.
*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
* page 2493:
*
* If the Host issues this command when another HCI_LE_Periodic_
* Advertising_Create_Sync command is pending, the Controller shall
* return the error code Command Disallowed (0x0C).
*/
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
goto unlock;
}
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (hci_conn_check_create_pa_sync(conn)) {
struct bt_iso_qos *qos = &conn->iso_qos;
cp.options = qos->bcast.options;
cp.sid = conn->sid;
cp.addr_type = conn->dst_type;
bacpy(&cp.addr, &conn->dst);
cp.skip = cpu_to_le16(qos->bcast.skip);
cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
cp.sync_cte_type = qos->bcast.sync_cte_type;
break;
}
}
unlock:
rcu_read_unlock();
hci_dev_unlock(hdev);
if (bacmp(&cp.addr, BDADDR_ANY)) {
hci_dev_set_flag(hdev, HCI_PA_SYNC);
set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
if (!err)
err = hci_update_passive_scan_sync(hdev);
if (err) {
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
}
}
return err;
}
int hci_pa_create_sync_pending(struct hci_dev *hdev)
{
/* Queue start pa_create_sync and scan */
return hci_cmd_sync_queue(hdev, create_pa_sync,
NULL, create_pa_complete);
}
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos)
@@ -2167,97 +2078,18 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
conn->dst_type = dst_type;
conn->sid = sid;
conn->state = BT_LISTEN;
conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
hci_conn_hold(conn);
hci_pa_create_sync_pending(hdev);
hci_connect_pa_sync(hdev, conn);
return conn;
}
static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
{
if (!conn->num_bis)
return false;
return true;
}
static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
{
bt_dev_dbg(hdev, "");
if (err)
bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
}
static int big_create_sync(struct hci_dev *hdev, void *data)
{
DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
struct hci_conn *conn;
rcu_read_lock();
pdu->num_bis = 0;
/* The spec allows only one pending LE BIG Create Sync command at
* a time. If the command is pending now, don't do anything. We
* check for pending connections after each BIG Sync Established
* event.
*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
* page 2586:
*
* If the Host sends this command when the Controller is in the
* process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
* Established event has not been generated, the Controller shall
* return the error code Command Disallowed (0x0C).
*/
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
goto unlock;
}
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
if (hci_conn_check_create_big_sync(conn)) {
struct bt_iso_qos *qos = &conn->iso_qos;
set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
pdu->handle = qos->bcast.big;
pdu->sync_handle = cpu_to_le16(conn->sync_handle);
pdu->encryption = qos->bcast.encryption;
memcpy(pdu->bcode, qos->bcast.bcode,
sizeof(pdu->bcode));
pdu->mse = qos->bcast.mse;
pdu->timeout = cpu_to_le16(qos->bcast.timeout);
pdu->num_bis = conn->num_bis;
memcpy(pdu->bis, conn->bis, conn->num_bis);
break;
}
}
unlock:
rcu_read_unlock();
if (!pdu->num_bis)
return 0;
return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
struct_size(pdu, bis, pdu->num_bis), pdu);
}
int hci_le_big_create_sync_pending(struct hci_dev *hdev)
{
/* Queue big_create_sync */
return hci_cmd_sync_queue_once(hdev, big_create_sync,
NULL, big_create_sync_complete);
}
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[])
int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos, __u16 sync_handle,
__u8 num_bis, __u8 bis[])
{
int err;
@@ -2274,9 +2106,10 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
hcon->num_bis = num_bis;
memcpy(hcon->bis, bis, num_bis);
hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
}
return hci_le_big_create_sync_pending(hdev);
return hci_connect_big_sync(hdev, hcon);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+4 -11
View File
@@ -6378,8 +6378,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
ev->bdaddr_type);
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
if (!conn) {
bt_dev_err(hdev,
"Unable to find connection for dst %pMR sid 0x%2.2x",
@@ -6418,9 +6417,6 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
}
unlock:
/* Handle any other pending PA sync command */
hci_pa_create_sync_pending(hdev);
hci_dev_unlock(hdev);
}
@@ -6932,7 +6928,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
flex_array_size(ev, bis, ev->num_bis)))
return;
@@ -7003,9 +6999,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
}
unlock:
/* Handle any other pending BIG sync command */
hci_le_big_create_sync_pending(hdev);
hci_dev_unlock(hdev);
}
@@ -7127,8 +7120,8 @@ static const struct hci_le_ev {
hci_le_create_big_complete_evt,
sizeof(struct hci_evt_le_create_big_complete),
HCI_MAX_EVENT_SIZE),
/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
hci_le_big_sync_established_evt,
sizeof(struct hci_evt_le_big_sync_estabilished),
HCI_MAX_EVENT_SIZE),
+145 -5
View File
@@ -2693,16 +2693,16 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
/* Force address filtering if PA Sync is in progress */
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
struct hci_cp_le_pa_create_sync *sent;
struct hci_conn *conn;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
if (sent) {
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
if (conn) {
struct conn_params pa;
memset(&pa, 0, sizeof(pa));
bacpy(&pa.addr, &sent->addr);
pa.addr_type = sent->addr_type;
bacpy(&pa.addr, &conn->dst);
pa.addr_type = conn->dst_type;
/* Clear first since there could be addresses left
* behind.
@@ -6895,3 +6895,143 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
{
bt_dev_dbg(hdev, "err %d", err);
if (!err)
return;
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
if (err == -ECANCELED)
return;
hci_dev_lock(hdev);
hci_update_passive_scan_sync(hdev);
hci_dev_unlock(hdev);
}
static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_pa_create_sync cp;
struct hci_conn *conn = data;
struct bt_iso_qos *qos = &conn->iso_qos;
int err;
if (!hci_conn_valid(hdev, conn))
return -ECANCELED;
if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
return -EBUSY;
/* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
* program the address in the allow list so PA advertisements can be
* received.
*/
set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
hci_update_passive_scan_sync(hdev);
memset(&cp, 0, sizeof(cp));
cp.options = qos->bcast.options;
cp.sid = conn->sid;
cp.addr_type = conn->dst_type;
bacpy(&cp.addr, &conn->dst);
cp.skip = cpu_to_le16(qos->bcast.skip);
cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
cp.sync_cte_type = qos->bcast.sync_cte_type;
/* The spec allows only one pending LE Periodic Advertising Create
* Sync command at a time so we forcefully wait for PA Sync Established
* event since cmd_work can only schedule one command at a time.
*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
* page 2493:
*
* If the Host issues this command when another HCI_LE_Periodic_
* Advertising_Create_Sync command is pending, the Controller shall
* return the error code Command Disallowed (0x0C).
*/
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC,
sizeof(cp), &cp,
HCI_EV_LE_PA_SYNC_ESTABLISHED,
conn->conn_timeout, NULL);
if (err == -ETIMEDOUT)
__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
0, NULL, HCI_CMD_TIMEOUT);
return err;
}
int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn)
{
return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn,
create_pa_complete);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
{
struct hci_conn *conn = data;
bt_dev_dbg(hdev, "err %d", err);
if (err == -ECANCELED)
return;
if (hci_conn_valid(hdev, conn))
clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
}
static int hci_le_big_create_sync(struct hci_dev *hdev, void *data)
{
DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11);
struct hci_conn *conn = data;
struct bt_iso_qos *qos = &conn->iso_qos;
int err;
if (!hci_conn_valid(hdev, conn))
return -ECANCELED;
set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
memset(cp, 0, sizeof(*cp));
cp->handle = qos->bcast.big;
cp->sync_handle = cpu_to_le16(conn->sync_handle);
cp->encryption = qos->bcast.encryption;
memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode));
cp->mse = qos->bcast.mse;
cp->timeout = cpu_to_le16(qos->bcast.timeout);
cp->num_bis = conn->num_bis;
memcpy(cp->bis, conn->bis, conn->num_bis);
/* The spec allows only one pending LE BIG Create Sync command at
* a time, so we forcefully wait for BIG Sync Established event since
* cmd_work can only schedule one command at a time.
*
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
* page 2586:
*
* If the Host sends this command when the Controller is in the
* process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
* Established event has not been generated, the Controller shall
* return the error code Command Disallowed (0x0C).
*/
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
struct_size(cp, bis, cp->num_bis), cp,
HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
conn->conn_timeout, NULL);
if (err == -ETIMEDOUT)
hci_le_big_terminate_sync(hdev, cp->handle);
return err;
}
int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn)
{
return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn,
create_big_complete);
}
+12 -14
View File
@@ -1462,14 +1462,13 @@ static void iso_conn_big_sync(struct sock *sk)
lock_sock(sk);
if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
&iso_pi(sk)->qos,
iso_pi(sk)->sync_handle,
iso_pi(sk)->bc_num_bis,
iso_pi(sk)->bc_bis);
err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
&iso_pi(sk)->qos,
iso_pi(sk)->sync_handle,
iso_pi(sk)->bc_num_bis,
iso_pi(sk)->bc_bis);
if (err)
bt_dev_err(hdev, "hci_le_big_create_sync: %d",
err);
bt_dev_err(hdev, "hci_big_create_sync: %d", err);
}
release_sock(sk);
@@ -1922,7 +1921,7 @@ static void iso_conn_ready(struct iso_conn *conn)
hcon);
} else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
ev = hci_recv_event_data(hcon->hdev,
HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
HCI_EVT_LE_BIG_SYNC_ESTABLISHED);
/* Get reference to PA sync parent socket, if it exists */
parent = iso_get_sock(&hcon->src, &hcon->dst,
@@ -2113,12 +2112,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
err = hci_le_big_create_sync(hdev,
hcon,
&iso_pi(sk)->qos,
iso_pi(sk)->sync_handle,
iso_pi(sk)->bc_num_bis,
iso_pi(sk)->bc_bis);
err = hci_conn_big_create_sync(hdev, hcon,
&iso_pi(sk)->qos,
iso_pi(sk)->sync_handle,
iso_pi(sk)->bc_num_bis,
iso_pi(sk)->bc_bis);
if (err) {
bt_dev_err(hdev, "hci_le_big_create_sync: %d",
err);
+3
View File
@@ -7415,6 +7415,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
return -ENOMEM;
/* Init rx_len */
conn->rx_len = len;
skb_set_delivery_time(conn->rx_skb, skb->tstamp,
skb->tstamp_type);
}
/* Copy as much as the rx_skb can hold */