blob: 954b29605c94249bdc509c45f6cfd56f38ba5985 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "hci_request.h"
34#include "hci_debugfs.h"
35#include "a2mp.h"
36#include "amp.h"
37#include "smp.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020038#include "msft.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039
40#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
42
43/* Handle HCI Event packets */
44
Olivier Deprez0e641232021-09-23 10:07:05 +020045static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
46 u8 *new_status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047{
48 __u8 status = *((__u8 *) skb->data);
49
50 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51
Olivier Deprez0e641232021-09-23 10:07:05 +020052 /* It is possible that we receive Inquiry Complete event right
53 * before we receive Inquiry Cancel Command Complete event, in
54 * which case the latter event should have status of Command
55 * Disallowed (0x0c). This should not be treated as error, since
56 * we actually achieve what Inquiry Cancel wants to achieve,
57 * which is to end the last Inquiry session.
58 */
59 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
60 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
61 status = 0x00;
62 }
63
64 *new_status = status;
65
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 if (status)
67 return;
68
69 clear_bit(HCI_INQUIRY, &hdev->flags);
70 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
71 wake_up_bit(&hdev->flags, HCI_INQUIRY);
72
73 hci_dev_lock(hdev);
74 /* Set discovery state to stopped if we're not doing LE active
75 * scanning.
76 */
77 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
78 hdev->le_scan_type != LE_SCAN_ACTIVE)
79 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
80 hci_dev_unlock(hdev);
81
82 hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86{
87 __u8 status = *((__u8 *) skb->data);
88
89 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90
91 if (status)
92 return;
93
94 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
95}
96
97static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98{
99 __u8 status = *((__u8 *) skb->data);
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
102
103 if (status)
104 return;
105
106 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107
108 hci_conn_check_pending(hdev);
109}
110
111static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
112 struct sk_buff *skb)
113{
114 BT_DBG("%s", hdev->name);
115}
116
117static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118{
119 struct hci_rp_role_discovery *rp = (void *) skb->data;
120 struct hci_conn *conn;
121
122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123
124 if (rp->status)
125 return;
126
127 hci_dev_lock(hdev);
128
129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 if (conn)
131 conn->role = rp->role;
132
133 hci_dev_unlock(hdev);
134}
135
136static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137{
138 struct hci_rp_read_link_policy *rp = (void *) skb->data;
139 struct hci_conn *conn;
140
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 hci_dev_lock(hdev);
147
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = __le16_to_cpu(rp->policy);
151
152 hci_dev_unlock(hdev);
153}
154
155static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156{
157 struct hci_rp_write_link_policy *rp = (void *) skb->data;
158 struct hci_conn *conn;
159 void *sent;
160
161 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162
163 if (rp->status)
164 return;
165
166 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
167 if (!sent)
168 return;
169
170 hci_dev_lock(hdev);
171
172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 if (conn)
174 conn->link_policy = get_unaligned_le16(sent + 2);
175
176 hci_dev_unlock(hdev);
177}
178
179static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
180 struct sk_buff *skb)
181{
182 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183
184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
185
186 if (rp->status)
187 return;
188
189 hdev->link_policy = __le16_to_cpu(rp->policy);
190}
191
192static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
193 struct sk_buff *skb)
194{
195 __u8 status = *((__u8 *) skb->data);
196 void *sent;
197
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
199
200 if (status)
201 return;
202
203 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
204 if (!sent)
205 return;
206
207 hdev->link_policy = get_unaligned_le16(sent);
208}
209
210static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211{
212 __u8 status = *((__u8 *) skb->data);
213
214 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215
216 clear_bit(HCI_RESET, &hdev->flags);
217
218 if (status)
219 return;
220
221 /* Reset all non-persistent flags */
222 hci_dev_clear_volatile_flags(hdev);
223
224 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225
226 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228
229 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230 hdev->adv_data_len = 0;
231
232 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233 hdev->scan_rsp_data_len = 0;
234
235 hdev->le_scan_type = LE_SCAN_PASSIVE;
236
237 hdev->ssp_debug_mode = 0;
238
Olivier Deprez92d4c212022-12-06 15:05:30 +0100239 hci_bdaddr_list_clear(&hdev->le_accept_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240 hci_bdaddr_list_clear(&hdev->le_resolv_list);
241}
242
243static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 struct sk_buff *skb)
245{
246 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247 struct hci_cp_read_stored_link_key *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
252 if (!sent)
253 return;
254
255 if (!rp->status && sent->read_all == 0x01) {
256 hdev->stored_max_keys = rp->max_keys;
257 hdev->stored_num_keys = rp->num_keys;
258 }
259}
260
261static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 struct sk_buff *skb)
263{
264 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
267
268 if (rp->status)
269 return;
270
271 if (rp->num_keys <= hdev->stored_num_keys)
272 hdev->stored_num_keys -= rp->num_keys;
273 else
274 hdev->stored_num_keys = 0;
275}
276
277static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278{
279 __u8 status = *((__u8 *) skb->data);
280 void *sent;
281
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
285 if (!sent)
286 return;
287
288 hci_dev_lock(hdev);
289
290 if (hci_dev_test_flag(hdev, HCI_MGMT))
291 mgmt_set_local_name_complete(hdev, sent, status);
292 else if (!status)
293 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294
295 hci_dev_unlock(hdev);
296}
297
298static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299{
300 struct hci_rp_read_local_name *rp = (void *) skb->data;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
303
304 if (rp->status)
305 return;
306
307 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308 hci_dev_test_flag(hdev, HCI_CONFIG))
309 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310}
311
312static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313{
314 __u8 status = *((__u8 *) skb->data);
315 void *sent;
316
317 BT_DBG("%s status 0x%2.2x", hdev->name, status);
318
319 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
320 if (!sent)
321 return;
322
323 hci_dev_lock(hdev);
324
325 if (!status) {
326 __u8 param = *((__u8 *) sent);
327
328 if (param == AUTH_ENABLED)
329 set_bit(HCI_AUTH, &hdev->flags);
330 else
331 clear_bit(HCI_AUTH, &hdev->flags);
332 }
333
334 if (hci_dev_test_flag(hdev, HCI_MGMT))
335 mgmt_auth_enable_complete(hdev, status);
336
337 hci_dev_unlock(hdev);
338}
339
340static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341{
342 __u8 status = *((__u8 *) skb->data);
343 __u8 param;
344 void *sent;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348 if (status)
349 return;
350
351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
352 if (!sent)
353 return;
354
355 param = *((__u8 *) sent);
356
357 if (param)
358 set_bit(HCI_ENCRYPT, &hdev->flags);
359 else
360 clear_bit(HCI_ENCRYPT, &hdev->flags);
361}
362
363static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364{
365 __u8 status = *((__u8 *) skb->data);
366 __u8 param;
367 void *sent;
368
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
372 if (!sent)
373 return;
374
375 param = *((__u8 *) sent);
376
377 hci_dev_lock(hdev);
378
379 if (status) {
380 hdev->discov_timeout = 0;
381 goto done;
382 }
383
384 if (param & SCAN_INQUIRY)
385 set_bit(HCI_ISCAN, &hdev->flags);
386 else
387 clear_bit(HCI_ISCAN, &hdev->flags);
388
389 if (param & SCAN_PAGE)
390 set_bit(HCI_PSCAN, &hdev->flags);
391 else
392 clear_bit(HCI_PSCAN, &hdev->flags);
393
394done:
395 hci_dev_unlock(hdev);
396}
397
398static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399{
400 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401
402 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
403
404 if (rp->status)
405 return;
406
407 memcpy(hdev->dev_class, rp->dev_class, 3);
408
409 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
410 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411}
412
413static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414{
415 __u8 status = *((__u8 *) skb->data);
416 void *sent;
417
418 BT_DBG("%s status 0x%2.2x", hdev->name, status);
419
420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
421 if (!sent)
422 return;
423
424 hci_dev_lock(hdev);
425
426 if (status == 0)
427 memcpy(hdev->dev_class, sent, 3);
428
429 if (hci_dev_test_flag(hdev, HCI_MGMT))
430 mgmt_set_class_of_dev_complete(hdev, sent, status);
431
432 hci_dev_unlock(hdev);
433}
434
435static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436{
437 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438 __u16 setting;
439
440 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
441
442 if (rp->status)
443 return;
444
445 setting = __le16_to_cpu(rp->voice_setting);
446
447 if (hdev->voice_setting == setting)
448 return;
449
450 hdev->voice_setting = setting;
451
452 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453
454 if (hdev->notify)
455 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456}
457
458static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459 struct sk_buff *skb)
460{
461 __u8 status = *((__u8 *) skb->data);
462 __u16 setting;
463 void *sent;
464
465 BT_DBG("%s status 0x%2.2x", hdev->name, status);
466
467 if (status)
468 return;
469
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
471 if (!sent)
472 return;
473
474 setting = get_unaligned_le16(sent);
475
476 if (hdev->voice_setting == setting)
477 return;
478
479 hdev->voice_setting = setting;
480
481 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482
483 if (hdev->notify)
484 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485}
486
487static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488 struct sk_buff *skb)
489{
490 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491
492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
493
494 if (rp->status)
495 return;
496
497 hdev->num_iac = rp->num_iac;
498
499 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500}
501
502static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503{
504 __u8 status = *((__u8 *) skb->data);
505 struct hci_cp_write_ssp_mode *sent;
506
507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
508
509 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
510 if (!sent)
511 return;
512
513 hci_dev_lock(hdev);
514
515 if (!status) {
516 if (sent->mode)
517 hdev->features[1][0] |= LMP_HOST_SSP;
518 else
519 hdev->features[1][0] &= ~LMP_HOST_SSP;
520 }
521
522 if (hci_dev_test_flag(hdev, HCI_MGMT))
523 mgmt_ssp_enable_complete(hdev, sent->mode, status);
524 else if (!status) {
525 if (sent->mode)
526 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
527 else
528 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529 }
530
531 hci_dev_unlock(hdev);
532}
533
534static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535{
536 u8 status = *((u8 *) skb->data);
537 struct hci_cp_write_sc_support *sent;
538
539 BT_DBG("%s status 0x%2.2x", hdev->name, status);
540
541 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
542 if (!sent)
543 return;
544
545 hci_dev_lock(hdev);
546
547 if (!status) {
548 if (sent->support)
549 hdev->features[1][0] |= LMP_HOST_SC;
550 else
551 hdev->features[1][0] &= ~LMP_HOST_SC;
552 }
553
554 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
555 if (sent->support)
556 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
557 else
558 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559 }
560
561 hci_dev_unlock(hdev);
562}
563
564static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565{
566 struct hci_rp_read_local_version *rp = (void *) skb->data;
567
568 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569
570 if (rp->status)
571 return;
572
573 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
574 hci_dev_test_flag(hdev, HCI_CONFIG)) {
575 hdev->hci_ver = rp->hci_ver;
576 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
577 hdev->lmp_ver = rp->lmp_ver;
578 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
579 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
580 }
581}
582
583static void hci_cc_read_local_commands(struct hci_dev *hdev,
584 struct sk_buff *skb)
585{
586 struct hci_rp_read_local_commands *rp = (void *) skb->data;
587
588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589
590 if (rp->status)
591 return;
592
593 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
594 hci_dev_test_flag(hdev, HCI_CONFIG))
595 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596}
597
David Brazdil0f672f62019-12-10 10:32:29 +0000598static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
599 struct sk_buff *skb)
600{
601 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
602 struct hci_conn *conn;
603
604 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
605
606 if (rp->status)
607 return;
608
609 hci_dev_lock(hdev);
610
611 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
612 if (conn)
613 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
614
615 hci_dev_unlock(hdev);
616}
617
618static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
619 struct sk_buff *skb)
620{
621 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
622 struct hci_conn *conn;
623 void *sent;
624
625 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
626
627 if (rp->status)
628 return;
629
630 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
631 if (!sent)
632 return;
633
634 hci_dev_lock(hdev);
635
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637 if (conn)
638 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
639
640 hci_dev_unlock(hdev);
641}
642
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643static void hci_cc_read_local_features(struct hci_dev *hdev,
644 struct sk_buff *skb)
645{
646 struct hci_rp_read_local_features *rp = (void *) skb->data;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 memcpy(hdev->features, rp->features, 8);
654
655 /* Adjust default settings according to features
656 * supported by device. */
657
658 if (hdev->features[0][0] & LMP_3SLOT)
659 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
660
661 if (hdev->features[0][0] & LMP_5SLOT)
662 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
663
664 if (hdev->features[0][1] & LMP_HV2) {
665 hdev->pkt_type |= (HCI_HV2);
666 hdev->esco_type |= (ESCO_HV2);
667 }
668
669 if (hdev->features[0][1] & LMP_HV3) {
670 hdev->pkt_type |= (HCI_HV3);
671 hdev->esco_type |= (ESCO_HV3);
672 }
673
674 if (lmp_esco_capable(hdev))
675 hdev->esco_type |= (ESCO_EV3);
676
677 if (hdev->features[0][4] & LMP_EV4)
678 hdev->esco_type |= (ESCO_EV4);
679
680 if (hdev->features[0][4] & LMP_EV5)
681 hdev->esco_type |= (ESCO_EV5);
682
683 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
684 hdev->esco_type |= (ESCO_2EV3);
685
686 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
687 hdev->esco_type |= (ESCO_3EV3);
688
689 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
690 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
691}
692
693static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
694 struct sk_buff *skb)
695{
696 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
697
698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
699
700 if (rp->status)
701 return;
702
703 if (hdev->max_page < rp->max_page)
704 hdev->max_page = rp->max_page;
705
706 if (rp->page < HCI_MAX_PAGES)
707 memcpy(hdev->features[rp->page], rp->features, 8);
708}
709
710static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
711 struct sk_buff *skb)
712{
713 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 return;
719
720 hdev->flow_ctl_mode = rp->mode;
721}
722
723static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
724{
725 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
726
727 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728
729 if (rp->status)
730 return;
731
732 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
733 hdev->sco_mtu = rp->sco_mtu;
734 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
735 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
736
737 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
738 hdev->sco_mtu = 64;
739 hdev->sco_pkts = 8;
740 }
741
742 hdev->acl_cnt = hdev->acl_pkts;
743 hdev->sco_cnt = hdev->sco_pkts;
744
745 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
746 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
747}
748
749static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
750{
751 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
752
753 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
754
755 if (rp->status)
756 return;
757
758 if (test_bit(HCI_INIT, &hdev->flags))
759 bacpy(&hdev->bdaddr, &rp->bdaddr);
760
761 if (hci_dev_test_flag(hdev, HCI_SETUP))
762 bacpy(&hdev->setup_addr, &rp->bdaddr);
763}
764
Olivier Deprez157378f2022-04-04 15:47:50 +0200765static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
766 struct sk_buff *skb)
767{
768 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
769
770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771
772 if (rp->status)
773 return;
774
775 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
776 hci_dev_test_flag(hdev, HCI_CONFIG)) {
777 hdev->pairing_opts = rp->pairing_opts;
778 hdev->max_enc_key_size = rp->max_key_size;
779 }
780}
781
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
783 struct sk_buff *skb)
784{
785 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
786
787 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788
789 if (rp->status)
790 return;
791
792 if (test_bit(HCI_INIT, &hdev->flags)) {
793 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
794 hdev->page_scan_window = __le16_to_cpu(rp->window);
795 }
796}
797
798static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
799 struct sk_buff *skb)
800{
801 u8 status = *((u8 *) skb->data);
802 struct hci_cp_write_page_scan_activity *sent;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
805
806 if (status)
807 return;
808
809 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
810 if (!sent)
811 return;
812
813 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
814 hdev->page_scan_window = __le16_to_cpu(sent->window);
815}
816
817static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
818 struct sk_buff *skb)
819{
820 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (rp->status)
825 return;
826
827 if (test_bit(HCI_INIT, &hdev->flags))
828 hdev->page_scan_type = rp->type;
829}
830
831static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
832 struct sk_buff *skb)
833{
834 u8 status = *((u8 *) skb->data);
835 u8 *type;
836
837 BT_DBG("%s status 0x%2.2x", hdev->name, status);
838
839 if (status)
840 return;
841
842 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
843 if (type)
844 hdev->page_scan_type = *type;
845}
846
847static void hci_cc_read_data_block_size(struct hci_dev *hdev,
848 struct sk_buff *skb)
849{
850 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
851
852 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853
854 if (rp->status)
855 return;
856
857 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
858 hdev->block_len = __le16_to_cpu(rp->block_len);
859 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
860
861 hdev->block_cnt = hdev->num_blocks;
862
863 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
864 hdev->block_cnt, hdev->block_len);
865}
866
867static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
868{
869 struct hci_rp_read_clock *rp = (void *) skb->data;
870 struct hci_cp_read_clock *cp;
871 struct hci_conn *conn;
872
873 BT_DBG("%s", hdev->name);
874
875 if (skb->len < sizeof(*rp))
876 return;
877
878 if (rp->status)
879 return;
880
881 hci_dev_lock(hdev);
882
883 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
884 if (!cp)
885 goto unlock;
886
887 if (cp->which == 0x00) {
888 hdev->clock = le32_to_cpu(rp->clock);
889 goto unlock;
890 }
891
892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
893 if (conn) {
894 conn->clock = le32_to_cpu(rp->clock);
895 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
896 }
897
898unlock:
899 hci_dev_unlock(hdev);
900}
901
902static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
903 struct sk_buff *skb)
904{
905 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
906
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908
909 if (rp->status)
910 return;
911
912 hdev->amp_status = rp->amp_status;
913 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
914 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
915 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
916 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
917 hdev->amp_type = rp->amp_type;
918 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
919 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
920 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
921 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
922}
923
924static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
925 struct sk_buff *skb)
926{
927 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
928
929 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930
931 if (rp->status)
932 return;
933
934 hdev->inq_tx_power = rp->tx_power;
935}
936
Olivier Deprez157378f2022-04-04 15:47:50 +0200937static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
938 struct sk_buff *skb)
939{
940 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
941
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944 if (rp->status)
945 return;
946
947 hdev->err_data_reporting = rp->err_data_reporting;
948}
949
950static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
951 struct sk_buff *skb)
952{
953 __u8 status = *((__u8 *)skb->data);
954 struct hci_cp_write_def_err_data_reporting *cp;
955
956 BT_DBG("%s status 0x%2.2x", hdev->name, status);
957
958 if (status)
959 return;
960
961 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
962 if (!cp)
963 return;
964
965 hdev->err_data_reporting = cp->err_data_reporting;
966}
967
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000968static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
969{
970 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
971 struct hci_cp_pin_code_reply *cp;
972 struct hci_conn *conn;
973
974 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
975
976 hci_dev_lock(hdev);
977
978 if (hci_dev_test_flag(hdev, HCI_MGMT))
979 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
980
981 if (rp->status)
982 goto unlock;
983
984 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
985 if (!cp)
986 goto unlock;
987
988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
989 if (conn)
990 conn->pin_length = cp->pin_len;
991
992unlock:
993 hci_dev_unlock(hdev);
994}
995
996static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
997{
998 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
999
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001
1002 hci_dev_lock(hdev);
1003
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1006 rp->status);
1007
1008 hci_dev_unlock(hdev);
1009}
1010
1011static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1012 struct sk_buff *skb)
1013{
1014 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1015
1016 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1017
1018 if (rp->status)
1019 return;
1020
1021 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1022 hdev->le_pkts = rp->le_max_pkt;
1023
1024 hdev->le_cnt = hdev->le_pkts;
1025
1026 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1027}
1028
1029static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1030 struct sk_buff *skb)
1031{
1032 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1033
1034 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035
1036 if (rp->status)
1037 return;
1038
1039 memcpy(hdev->le_features, rp->features, 8);
1040}
1041
1042static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1044{
1045 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1046
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048
1049 if (rp->status)
1050 return;
1051
1052 hdev->adv_tx_power = rp->tx_power;
1053}
1054
1055static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1056{
1057 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1058
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060
1061 hci_dev_lock(hdev);
1062
1063 if (hci_dev_test_flag(hdev, HCI_MGMT))
1064 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1065 rp->status);
1066
1067 hci_dev_unlock(hdev);
1068}
1069
1070static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1071 struct sk_buff *skb)
1072{
1073 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1074
1075 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076
1077 hci_dev_lock(hdev);
1078
1079 if (hci_dev_test_flag(hdev, HCI_MGMT))
1080 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1081 ACL_LINK, 0, rp->status);
1082
1083 hci_dev_unlock(hdev);
1084}
1085
1086static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1087{
1088 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1089
1090 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1091
1092 hci_dev_lock(hdev);
1093
1094 if (hci_dev_test_flag(hdev, HCI_MGMT))
1095 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1096 0, rp->status);
1097
1098 hci_dev_unlock(hdev);
1099}
1100
1101static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1102 struct sk_buff *skb)
1103{
1104 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1105
1106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1107
1108 hci_dev_lock(hdev);
1109
1110 if (hci_dev_test_flag(hdev, HCI_MGMT))
1111 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1112 ACL_LINK, 0, rp->status);
1113
1114 hci_dev_unlock(hdev);
1115}
1116
1117static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1118 struct sk_buff *skb)
1119{
1120 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1121
1122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1123}
1124
1125static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1126 struct sk_buff *skb)
1127{
1128 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1129
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131}
1132
1133static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1134{
1135 __u8 status = *((__u8 *) skb->data);
1136 bdaddr_t *sent;
1137
1138 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139
1140 if (status)
1141 return;
1142
1143 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1144 if (!sent)
1145 return;
1146
1147 hci_dev_lock(hdev);
1148
1149 bacpy(&hdev->random_addr, sent);
1150
1151 hci_dev_unlock(hdev);
1152}
1153
1154static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1155{
1156 __u8 status = *((__u8 *) skb->data);
1157 struct hci_cp_le_set_default_phy *cp;
1158
1159 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1160
1161 if (status)
1162 return;
1163
1164 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1165 if (!cp)
1166 return;
1167
1168 hci_dev_lock(hdev);
1169
1170 hdev->le_tx_def_phys = cp->tx_phys;
1171 hdev->le_rx_def_phys = cp->rx_phys;
1172
1173 hci_dev_unlock(hdev);
1174}
1175
1176static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1177 struct sk_buff *skb)
1178{
1179 __u8 status = *((__u8 *) skb->data);
1180 struct hci_cp_le_set_adv_set_rand_addr *cp;
1181 struct adv_info *adv_instance;
1182
1183 if (status)
1184 return;
1185
1186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1187 if (!cp)
1188 return;
1189
1190 hci_dev_lock(hdev);
1191
1192 if (!hdev->cur_adv_instance) {
1193 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1194 bacpy(&hdev->random_addr, &cp->bdaddr);
1195 } else {
1196 adv_instance = hci_find_adv_instance(hdev,
1197 hdev->cur_adv_instance);
1198 if (adv_instance)
1199 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1200 }
1201
1202 hci_dev_unlock(hdev);
1203}
1204
1205static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1206{
1207 __u8 *sent, status = *((__u8 *) skb->data);
1208
1209 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210
1211 if (status)
1212 return;
1213
1214 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1215 if (!sent)
1216 return;
1217
1218 hci_dev_lock(hdev);
1219
1220 /* If we're doing connection initiation as peripheral. Set a
1221 * timeout in case something goes wrong.
1222 */
1223 if (*sent) {
1224 struct hci_conn *conn;
1225
1226 hci_dev_set_flag(hdev, HCI_LE_ADV);
1227
1228 conn = hci_lookup_le_connect(hdev);
1229 if (conn)
1230 queue_delayed_work(hdev->workqueue,
1231 &conn->le_conn_timeout,
1232 conn->conn_timeout);
1233 } else {
1234 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1235 }
1236
1237 hci_dev_unlock(hdev);
1238}
1239
1240static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1241 struct sk_buff *skb)
1242{
1243 struct hci_cp_le_set_ext_adv_enable *cp;
1244 __u8 status = *((__u8 *) skb->data);
1245
1246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247
1248 if (status)
1249 return;
1250
1251 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1252 if (!cp)
1253 return;
1254
1255 hci_dev_lock(hdev);
1256
1257 if (cp->enable) {
1258 struct hci_conn *conn;
1259
1260 hci_dev_set_flag(hdev, HCI_LE_ADV);
1261
1262 conn = hci_lookup_le_connect(hdev);
1263 if (conn)
1264 queue_delayed_work(hdev->workqueue,
1265 &conn->le_conn_timeout,
1266 conn->conn_timeout);
1267 } else {
1268 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1269 }
1270
1271 hci_dev_unlock(hdev);
1272}
1273
1274static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1275{
1276 struct hci_cp_le_set_scan_param *cp;
1277 __u8 status = *((__u8 *) skb->data);
1278
1279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1280
1281 if (status)
1282 return;
1283
1284 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1285 if (!cp)
1286 return;
1287
1288 hci_dev_lock(hdev);
1289
1290 hdev->le_scan_type = cp->type;
1291
1292 hci_dev_unlock(hdev);
1293}
1294
1295static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1296 struct sk_buff *skb)
1297{
1298 struct hci_cp_le_set_ext_scan_params *cp;
1299 __u8 status = *((__u8 *) skb->data);
1300 struct hci_cp_le_scan_phy_params *phy_param;
1301
1302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303
1304 if (status)
1305 return;
1306
1307 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1308 if (!cp)
1309 return;
1310
1311 phy_param = (void *)cp->data;
1312
1313 hci_dev_lock(hdev);
1314
1315 hdev->le_scan_type = phy_param->type;
1316
1317 hci_dev_unlock(hdev);
1318}
1319
1320static bool has_pending_adv_report(struct hci_dev *hdev)
1321{
1322 struct discovery_state *d = &hdev->discovery;
1323
1324 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1325}
1326
1327static void clear_pending_adv_report(struct hci_dev *hdev)
1328{
1329 struct discovery_state *d = &hdev->discovery;
1330
1331 bacpy(&d->last_adv_addr, BDADDR_ANY);
1332 d->last_adv_data_len = 0;
1333}
1334
1335static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336 u8 bdaddr_type, s8 rssi, u32 flags,
1337 u8 *data, u8 len)
1338{
1339 struct discovery_state *d = &hdev->discovery;
1340
Olivier Deprez0e641232021-09-23 10:07:05 +02001341 if (len > HCI_MAX_AD_LENGTH)
1342 return;
1343
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001344 bacpy(&d->last_adv_addr, bdaddr);
1345 d->last_adv_addr_type = bdaddr_type;
1346 d->last_adv_rssi = rssi;
1347 d->last_adv_flags = flags;
1348 memcpy(d->last_adv_data, data, len);
1349 d->last_adv_data_len = len;
1350}
1351
1352static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1353{
1354 hci_dev_lock(hdev);
1355
1356 switch (enable) {
1357 case LE_SCAN_ENABLE:
1358 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1359 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1360 clear_pending_adv_report(hdev);
1361 break;
1362
1363 case LE_SCAN_DISABLE:
1364 /* We do this here instead of when setting DISCOVERY_STOPPED
1365 * since the latter would potentially require waiting for
1366 * inquiry to stop too.
1367 */
1368 if (has_pending_adv_report(hdev)) {
1369 struct discovery_state *d = &hdev->discovery;
1370
1371 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1372 d->last_adv_addr_type, NULL,
1373 d->last_adv_rssi, d->last_adv_flags,
1374 d->last_adv_data,
1375 d->last_adv_data_len, NULL, 0);
1376 }
1377
1378 /* Cancel this timer so that we don't try to disable scanning
1379 * when it's already disabled.
1380 */
1381 cancel_delayed_work(&hdev->le_scan_disable);
1382
1383 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1384
1385 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1386 * interrupted scanning due to a connect request. Mark
1387 * therefore discovery as stopped. If this was not
1388 * because of a connect request advertising might have
1389 * been disabled because of active scanning, so
1390 * re-enable it again if necessary.
1391 */
1392 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1393 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1394 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1395 hdev->discovery.state == DISCOVERY_FINDING)
1396 hci_req_reenable_advertising(hdev);
1397
1398 break;
1399
1400 default:
1401 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1402 enable);
1403 break;
1404 }
1405
1406 hci_dev_unlock(hdev);
1407}
1408
1409static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1410 struct sk_buff *skb)
1411{
1412 struct hci_cp_le_set_scan_enable *cp;
1413 __u8 status = *((__u8 *) skb->data);
1414
1415 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1416
1417 if (status)
1418 return;
1419
1420 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1421 if (!cp)
1422 return;
1423
1424 le_set_scan_enable_complete(hdev, cp->enable);
1425}
1426
1427static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1428 struct sk_buff *skb)
1429{
1430 struct hci_cp_le_set_ext_scan_enable *cp;
1431 __u8 status = *((__u8 *) skb->data);
1432
1433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434
1435 if (status)
1436 return;
1437
1438 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1439 if (!cp)
1440 return;
1441
1442 le_set_scan_enable_complete(hdev, cp->enable);
1443}
1444
1445static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1446 struct sk_buff *skb)
1447{
1448 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1449
1450 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1451 rp->num_of_sets);
1452
1453 if (rp->status)
1454 return;
1455
1456 hdev->le_num_of_adv_sets = rp->num_of_sets;
1457}
1458
Olivier Deprez92d4c212022-12-06 15:05:30 +01001459static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1460 struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001461{
Olivier Deprez92d4c212022-12-06 15:05:30 +01001462 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001463
1464 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1465
1466 if (rp->status)
1467 return;
1468
Olivier Deprez92d4c212022-12-06 15:05:30 +01001469 hdev->le_accept_list_size = rp->size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001470}
1471
Olivier Deprez92d4c212022-12-06 15:05:30 +01001472static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001473 struct sk_buff *skb)
1474{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001475 __u8 status = *((__u8 *) skb->data);
1476
1477 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478
1479 if (status)
1480 return;
1481
Olivier Deprez92d4c212022-12-06 15:05:30 +01001482 hci_bdaddr_list_clear(&hdev->le_accept_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001483}
1484
Olivier Deprez92d4c212022-12-06 15:05:30 +01001485static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1486 struct sk_buff *skb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001487{
Olivier Deprez92d4c212022-12-06 15:05:30 +01001488 struct hci_cp_le_add_to_accept_list *sent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001489 __u8 status = *((__u8 *) skb->data);
1490
1491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1492
1493 if (status)
1494 return;
1495
Olivier Deprez92d4c212022-12-06 15:05:30 +01001496 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001497 if (!sent)
1498 return;
1499
Olivier Deprez92d4c212022-12-06 15:05:30 +01001500 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1501 sent->bdaddr_type);
1502}
1503
1504static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1505 struct sk_buff *skb)
1506{
1507 struct hci_cp_le_del_from_accept_list *sent;
1508 __u8 status = *((__u8 *) skb->data);
1509
1510 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511
1512 if (status)
1513 return;
1514
1515 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1516 if (!sent)
1517 return;
1518
1519 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001520 sent->bdaddr_type);
1521}
1522
1523static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1524 struct sk_buff *skb)
1525{
1526 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1527
1528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1529
1530 if (rp->status)
1531 return;
1532
1533 memcpy(hdev->le_states, rp->le_states, 8);
1534}
1535
1536static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1537 struct sk_buff *skb)
1538{
1539 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1540
1541 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1542
1543 if (rp->status)
1544 return;
1545
1546 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1547 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1548}
1549
1550static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1551 struct sk_buff *skb)
1552{
1553 struct hci_cp_le_write_def_data_len *sent;
1554 __u8 status = *((__u8 *) skb->data);
1555
1556 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1557
1558 if (status)
1559 return;
1560
1561 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1562 if (!sent)
1563 return;
1564
1565 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1566 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1567}
1568
David Brazdil0f672f62019-12-10 10:32:29 +00001569static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1570 struct sk_buff *skb)
1571{
1572 struct hci_cp_le_add_to_resolv_list *sent;
1573 __u8 status = *((__u8 *) skb->data);
1574
1575 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1576
1577 if (status)
1578 return;
1579
1580 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1581 if (!sent)
1582 return;
1583
1584 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1585 sent->bdaddr_type, sent->peer_irk,
1586 sent->local_irk);
1587}
1588
1589static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1590 struct sk_buff *skb)
1591{
1592 struct hci_cp_le_del_from_resolv_list *sent;
1593 __u8 status = *((__u8 *) skb->data);
1594
1595 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596
1597 if (status)
1598 return;
1599
1600 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1601 if (!sent)
1602 return;
1603
1604 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1605 sent->bdaddr_type);
1606}
1607
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001608static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1609 struct sk_buff *skb)
1610{
1611 __u8 status = *((__u8 *) skb->data);
1612
1613 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614
1615 if (status)
1616 return;
1617
1618 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1619}
1620
1621static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1622 struct sk_buff *skb)
1623{
1624 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1625
1626 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1627
1628 if (rp->status)
1629 return;
1630
1631 hdev->le_resolv_list_size = rp->size;
1632}
1633
1634static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1635 struct sk_buff *skb)
1636{
1637 __u8 *sent, status = *((__u8 *) skb->data);
1638
1639 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1640
1641 if (status)
1642 return;
1643
1644 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1645 if (!sent)
1646 return;
1647
1648 hci_dev_lock(hdev);
1649
1650 if (*sent)
1651 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1652 else
1653 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1654
1655 hci_dev_unlock(hdev);
1656}
1657
1658static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1659 struct sk_buff *skb)
1660{
1661 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1662
1663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1664
1665 if (rp->status)
1666 return;
1667
1668 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1669 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1670 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1671 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1672}
1673
1674static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1675 struct sk_buff *skb)
1676{
1677 struct hci_cp_write_le_host_supported *sent;
1678 __u8 status = *((__u8 *) skb->data);
1679
1680 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1681
1682 if (status)
1683 return;
1684
1685 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1686 if (!sent)
1687 return;
1688
1689 hci_dev_lock(hdev);
1690
1691 if (sent->le) {
1692 hdev->features[1][0] |= LMP_HOST_LE;
1693 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1694 } else {
1695 hdev->features[1][0] &= ~LMP_HOST_LE;
1696 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1697 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1698 }
1699
1700 if (sent->simul)
1701 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1702 else
1703 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1704
1705 hci_dev_unlock(hdev);
1706}
1707
1708static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1709{
1710 struct hci_cp_le_set_adv_param *cp;
1711 u8 status = *((u8 *) skb->data);
1712
1713 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1714
1715 if (status)
1716 return;
1717
1718 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1719 if (!cp)
1720 return;
1721
1722 hci_dev_lock(hdev);
1723 hdev->adv_addr_type = cp->own_address_type;
1724 hci_dev_unlock(hdev);
1725}
1726
1727static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1728{
1729 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1730 struct hci_cp_le_set_ext_adv_params *cp;
1731 struct adv_info *adv_instance;
1732
1733 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1734
1735 if (rp->status)
1736 return;
1737
1738 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1739 if (!cp)
1740 return;
1741
1742 hci_dev_lock(hdev);
1743 hdev->adv_addr_type = cp->own_addr_type;
1744 if (!hdev->cur_adv_instance) {
1745 /* Store in hdev for instance 0 */
1746 hdev->adv_tx_power = rp->tx_power;
1747 } else {
1748 adv_instance = hci_find_adv_instance(hdev,
1749 hdev->cur_adv_instance);
1750 if (adv_instance)
1751 adv_instance->tx_power = rp->tx_power;
1752 }
1753 /* Update adv data as tx power is known now */
1754 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1755 hci_dev_unlock(hdev);
1756}
1757
1758static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1759{
1760 struct hci_rp_read_rssi *rp = (void *) skb->data;
1761 struct hci_conn *conn;
1762
1763 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1764
1765 if (rp->status)
1766 return;
1767
1768 hci_dev_lock(hdev);
1769
1770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1771 if (conn)
1772 conn->rssi = rp->rssi;
1773
1774 hci_dev_unlock(hdev);
1775}
1776
1777static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1778{
1779 struct hci_cp_read_tx_power *sent;
1780 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1781 struct hci_conn *conn;
1782
1783 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1784
1785 if (rp->status)
1786 return;
1787
1788 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1789 if (!sent)
1790 return;
1791
1792 hci_dev_lock(hdev);
1793
1794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1795 if (!conn)
1796 goto unlock;
1797
1798 switch (sent->type) {
1799 case 0x00:
1800 conn->tx_power = rp->tx_power;
1801 break;
1802 case 0x01:
1803 conn->max_tx_power = rp->tx_power;
1804 break;
1805 }
1806
1807unlock:
1808 hci_dev_unlock(hdev);
1809}
1810
1811static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1812{
1813 u8 status = *((u8 *) skb->data);
1814 u8 *mode;
1815
1816 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817
1818 if (status)
1819 return;
1820
1821 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1822 if (mode)
1823 hdev->ssp_debug_mode = *mode;
1824}
1825
1826static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1827{
1828 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829
1830 if (status) {
1831 hci_conn_check_pending(hdev);
1832 return;
1833 }
1834
1835 set_bit(HCI_INQUIRY, &hdev->flags);
1836}
1837
1838static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1839{
1840 struct hci_cp_create_conn *cp;
1841 struct hci_conn *conn;
1842
1843 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1844
1845 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1846 if (!cp)
1847 return;
1848
1849 hci_dev_lock(hdev);
1850
1851 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1852
1853 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1854
1855 if (status) {
1856 if (conn && conn->state == BT_CONNECT) {
1857 if (status != 0x0c || conn->attempt > 2) {
1858 conn->state = BT_CLOSED;
1859 hci_connect_cfm(conn, status);
1860 hci_conn_del(conn);
1861 } else
1862 conn->state = BT_CONNECT2;
1863 }
1864 } else {
1865 if (!conn) {
1866 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1867 HCI_ROLE_MASTER);
1868 if (!conn)
1869 bt_dev_err(hdev, "no memory for new connection");
1870 }
1871 }
1872
1873 hci_dev_unlock(hdev);
1874}
1875
1876static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1877{
1878 struct hci_cp_add_sco *cp;
1879 struct hci_conn *acl, *sco;
1880 __u16 handle;
1881
1882 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1883
1884 if (!status)
1885 return;
1886
1887 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1888 if (!cp)
1889 return;
1890
1891 handle = __le16_to_cpu(cp->handle);
1892
1893 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1894
1895 hci_dev_lock(hdev);
1896
1897 acl = hci_conn_hash_lookup_handle(hdev, handle);
1898 if (acl) {
1899 sco = acl->link;
1900 if (sco) {
1901 sco->state = BT_CLOSED;
1902
1903 hci_connect_cfm(sco, status);
1904 hci_conn_del(sco);
1905 }
1906 }
1907
1908 hci_dev_unlock(hdev);
1909}
1910
1911static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1912{
1913 struct hci_cp_auth_requested *cp;
1914 struct hci_conn *conn;
1915
1916 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1917
1918 if (!status)
1919 return;
1920
1921 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1922 if (!cp)
1923 return;
1924
1925 hci_dev_lock(hdev);
1926
1927 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1928 if (conn) {
1929 if (conn->state == BT_CONFIG) {
1930 hci_connect_cfm(conn, status);
1931 hci_conn_drop(conn);
1932 }
1933 }
1934
1935 hci_dev_unlock(hdev);
1936}
1937
1938static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1939{
1940 struct hci_cp_set_conn_encrypt *cp;
1941 struct hci_conn *conn;
1942
1943 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1944
1945 if (!status)
1946 return;
1947
1948 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1949 if (!cp)
1950 return;
1951
1952 hci_dev_lock(hdev);
1953
1954 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1955 if (conn) {
1956 if (conn->state == BT_CONFIG) {
1957 hci_connect_cfm(conn, status);
1958 hci_conn_drop(conn);
1959 }
1960 }
1961
1962 hci_dev_unlock(hdev);
1963}
1964
1965static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1966 struct hci_conn *conn)
1967{
1968 if (conn->state != BT_CONFIG || !conn->out)
1969 return 0;
1970
1971 if (conn->pending_sec_level == BT_SECURITY_SDP)
1972 return 0;
1973
1974 /* Only request authentication for SSP connections or non-SSP
1975 * devices with sec_level MEDIUM or HIGH or if MITM protection
1976 * is requested.
1977 */
1978 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1979 conn->pending_sec_level != BT_SECURITY_FIPS &&
1980 conn->pending_sec_level != BT_SECURITY_HIGH &&
1981 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1982 return 0;
1983
1984 return 1;
1985}
1986
1987static int hci_resolve_name(struct hci_dev *hdev,
1988 struct inquiry_entry *e)
1989{
1990 struct hci_cp_remote_name_req cp;
1991
1992 memset(&cp, 0, sizeof(cp));
1993
1994 bacpy(&cp.bdaddr, &e->data.bdaddr);
1995 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1996 cp.pscan_mode = e->data.pscan_mode;
1997 cp.clock_offset = e->data.clock_offset;
1998
1999 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2000}
2001
2002static bool hci_resolve_next_name(struct hci_dev *hdev)
2003{
2004 struct discovery_state *discov = &hdev->discovery;
2005 struct inquiry_entry *e;
2006
2007 if (list_empty(&discov->resolve))
2008 return false;
2009
2010 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2011 if (!e)
2012 return false;
2013
2014 if (hci_resolve_name(hdev, e) == 0) {
2015 e->name_state = NAME_PENDING;
2016 return true;
2017 }
2018
2019 return false;
2020}
2021
2022static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2023 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2024{
2025 struct discovery_state *discov = &hdev->discovery;
2026 struct inquiry_entry *e;
2027
2028 /* Update the mgmt connected state if necessary. Be careful with
2029 * conn objects that exist but are not (yet) connected however.
2030 * Only those in BT_CONFIG or BT_CONNECTED states can be
2031 * considered connected.
2032 */
2033 if (conn &&
2034 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2035 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2036 mgmt_device_connected(hdev, conn, 0, name, name_len);
2037
2038 if (discov->state == DISCOVERY_STOPPED)
2039 return;
2040
2041 if (discov->state == DISCOVERY_STOPPING)
2042 goto discov_complete;
2043
2044 if (discov->state != DISCOVERY_RESOLVING)
2045 return;
2046
2047 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2048 /* If the device was not found in a list of found devices names of which
2049 * are pending. there is no need to continue resolving a next name as it
2050 * will be done upon receiving another Remote Name Request Complete
2051 * Event */
2052 if (!e)
2053 return;
2054
2055 list_del(&e->list);
2056 if (name) {
2057 e->name_state = NAME_KNOWN;
2058 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2059 e->data.rssi, name, name_len);
2060 } else {
2061 e->name_state = NAME_NOT_KNOWN;
2062 }
2063
2064 if (hci_resolve_next_name(hdev))
2065 return;
2066
2067discov_complete:
2068 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069}
2070
2071static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2072{
2073 struct hci_cp_remote_name_req *cp;
2074 struct hci_conn *conn;
2075
2076 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2077
2078 /* If successful wait for the name req complete event before
2079 * checking for the need to do authentication */
2080 if (!status)
2081 return;
2082
2083 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2084 if (!cp)
2085 return;
2086
2087 hci_dev_lock(hdev);
2088
2089 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2090
2091 if (hci_dev_test_flag(hdev, HCI_MGMT))
2092 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2093
2094 if (!conn)
2095 goto unlock;
2096
2097 if (!hci_outgoing_auth_needed(hdev, conn))
2098 goto unlock;
2099
2100 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2101 struct hci_cp_auth_requested auth_cp;
2102
2103 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2104
2105 auth_cp.handle = __cpu_to_le16(conn->handle);
2106 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2107 sizeof(auth_cp), &auth_cp);
2108 }
2109
2110unlock:
2111 hci_dev_unlock(hdev);
2112}
2113
2114static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2115{
2116 struct hci_cp_read_remote_features *cp;
2117 struct hci_conn *conn;
2118
2119 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2120
2121 if (!status)
2122 return;
2123
2124 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2125 if (!cp)
2126 return;
2127
2128 hci_dev_lock(hdev);
2129
2130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2131 if (conn) {
2132 if (conn->state == BT_CONFIG) {
2133 hci_connect_cfm(conn, status);
2134 hci_conn_drop(conn);
2135 }
2136 }
2137
2138 hci_dev_unlock(hdev);
2139}
2140
2141static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2142{
2143 struct hci_cp_read_remote_ext_features *cp;
2144 struct hci_conn *conn;
2145
2146 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2147
2148 if (!status)
2149 return;
2150
2151 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2152 if (!cp)
2153 return;
2154
2155 hci_dev_lock(hdev);
2156
2157 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2158 if (conn) {
2159 if (conn->state == BT_CONFIG) {
2160 hci_connect_cfm(conn, status);
2161 hci_conn_drop(conn);
2162 }
2163 }
2164
2165 hci_dev_unlock(hdev);
2166}
2167
2168static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2169{
2170 struct hci_cp_setup_sync_conn *cp;
2171 struct hci_conn *acl, *sco;
2172 __u16 handle;
2173
2174 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2175
2176 if (!status)
2177 return;
2178
2179 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2180 if (!cp)
2181 return;
2182
2183 handle = __le16_to_cpu(cp->handle);
2184
2185 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2186
2187 hci_dev_lock(hdev);
2188
2189 acl = hci_conn_hash_lookup_handle(hdev, handle);
2190 if (acl) {
2191 sco = acl->link;
2192 if (sco) {
2193 sco->state = BT_CLOSED;
2194
2195 hci_connect_cfm(sco, status);
2196 hci_conn_del(sco);
2197 }
2198 }
2199
2200 hci_dev_unlock(hdev);
2201}
2202
2203static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2204{
2205 struct hci_cp_sniff_mode *cp;
2206 struct hci_conn *conn;
2207
2208 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2209
2210 if (!status)
2211 return;
2212
2213 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2214 if (!cp)
2215 return;
2216
2217 hci_dev_lock(hdev);
2218
2219 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2220 if (conn) {
2221 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2222
2223 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2224 hci_sco_setup(conn, status);
2225 }
2226
2227 hci_dev_unlock(hdev);
2228}
2229
2230static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2231{
2232 struct hci_cp_exit_sniff_mode *cp;
2233 struct hci_conn *conn;
2234
2235 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2236
2237 if (!status)
2238 return;
2239
2240 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2241 if (!cp)
2242 return;
2243
2244 hci_dev_lock(hdev);
2245
2246 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2247 if (conn) {
2248 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2249
2250 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2251 hci_sco_setup(conn, status);
2252 }
2253
2254 hci_dev_unlock(hdev);
2255}
2256
2257static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2258{
2259 struct hci_cp_disconnect *cp;
2260 struct hci_conn *conn;
2261
2262 if (!status)
2263 return;
2264
2265 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2266 if (!cp)
2267 return;
2268
2269 hci_dev_lock(hdev);
2270
2271 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
Olivier Deprez157378f2022-04-04 15:47:50 +02002272 if (conn) {
2273 u8 type = conn->type;
2274
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002275 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2276 conn->dst_type, status);
2277
Olivier Deprez157378f2022-04-04 15:47:50 +02002278 /* If the disconnection failed for any reason, the upper layer
2279 * does not retry to disconnect in current implementation.
2280 * Hence, we need to do some basic cleanup here and re-enable
2281 * advertising if necessary.
2282 */
2283 hci_conn_del(conn);
2284 if (type == LE_LINK)
2285 hci_req_reenable_advertising(hdev);
2286 }
2287
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002288 hci_dev_unlock(hdev);
2289}
2290
2291static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2292 u8 peer_addr_type, u8 own_address_type,
2293 u8 filter_policy)
2294{
2295 struct hci_conn *conn;
2296
2297 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2298 peer_addr_type);
2299 if (!conn)
2300 return;
2301
Olivier Deprez157378f2022-04-04 15:47:50 +02002302 /* When using controller based address resolution, then the new
2303 * address types 0x02 and 0x03 are used. These types need to be
2304 * converted back into either public address or random address type
2305 */
2306 if (use_ll_privacy(hdev) &&
2307 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2308 switch (own_address_type) {
2309 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2310 own_address_type = ADDR_LE_DEV_PUBLIC;
2311 break;
2312 case ADDR_LE_DEV_RANDOM_RESOLVED:
2313 own_address_type = ADDR_LE_DEV_RANDOM;
2314 break;
2315 }
2316 }
2317
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002318 /* Store the initiator and responder address information which
2319 * is needed for SMP. These values will not change during the
2320 * lifetime of the connection.
2321 */
2322 conn->init_addr_type = own_address_type;
2323 if (own_address_type == ADDR_LE_DEV_RANDOM)
2324 bacpy(&conn->init_addr, &hdev->random_addr);
2325 else
2326 bacpy(&conn->init_addr, &hdev->bdaddr);
2327
2328 conn->resp_addr_type = peer_addr_type;
2329 bacpy(&conn->resp_addr, peer_addr);
2330
2331 /* We don't want the connection attempt to stick around
2332 * indefinitely since LE doesn't have a page timeout concept
2333 * like BR/EDR. Set a timer for any connection that doesn't use
Olivier Deprez92d4c212022-12-06 15:05:30 +01002334 * the accept list for connecting.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002335 */
2336 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2337 queue_delayed_work(conn->hdev->workqueue,
2338 &conn->le_conn_timeout,
2339 conn->conn_timeout);
2340}
2341
2342static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2343{
2344 struct hci_cp_le_create_conn *cp;
2345
2346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2347
2348 /* All connection failure handling is taken care of by the
2349 * hci_le_conn_failed function which is triggered by the HCI
2350 * request completion callbacks used for connecting.
2351 */
2352 if (status)
2353 return;
2354
2355 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2356 if (!cp)
2357 return;
2358
2359 hci_dev_lock(hdev);
2360
2361 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2362 cp->own_address_type, cp->filter_policy);
2363
2364 hci_dev_unlock(hdev);
2365}
2366
2367static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2368{
2369 struct hci_cp_le_ext_create_conn *cp;
2370
2371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2372
2373 /* All connection failure handling is taken care of by the
2374 * hci_le_conn_failed function which is triggered by the HCI
2375 * request completion callbacks used for connecting.
2376 */
2377 if (status)
2378 return;
2379
2380 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2381 if (!cp)
2382 return;
2383
2384 hci_dev_lock(hdev);
2385
2386 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2387 cp->own_addr_type, cp->filter_policy);
2388
2389 hci_dev_unlock(hdev);
2390}
2391
2392static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2393{
2394 struct hci_cp_le_read_remote_features *cp;
2395 struct hci_conn *conn;
2396
2397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2398
2399 if (!status)
2400 return;
2401
2402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2403 if (!cp)
2404 return;
2405
2406 hci_dev_lock(hdev);
2407
2408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2409 if (conn) {
2410 if (conn->state == BT_CONFIG) {
2411 hci_connect_cfm(conn, status);
2412 hci_conn_drop(conn);
2413 }
2414 }
2415
2416 hci_dev_unlock(hdev);
2417}
2418
2419static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2420{
2421 struct hci_cp_le_start_enc *cp;
2422 struct hci_conn *conn;
2423
2424 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2425
2426 if (!status)
2427 return;
2428
2429 hci_dev_lock(hdev);
2430
2431 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2432 if (!cp)
2433 goto unlock;
2434
2435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2436 if (!conn)
2437 goto unlock;
2438
2439 if (conn->state != BT_CONNECTED)
2440 goto unlock;
2441
2442 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2443 hci_conn_drop(conn);
2444
2445unlock:
2446 hci_dev_unlock(hdev);
2447}
2448
2449static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2450{
2451 struct hci_cp_switch_role *cp;
2452 struct hci_conn *conn;
2453
2454 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2455
2456 if (!status)
2457 return;
2458
2459 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2460 if (!cp)
2461 return;
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2466 if (conn)
2467 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2468
2469 hci_dev_unlock(hdev);
2470}
2471
2472static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2473{
2474 __u8 status = *((__u8 *) skb->data);
2475 struct discovery_state *discov = &hdev->discovery;
2476 struct inquiry_entry *e;
2477
2478 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2479
2480 hci_conn_check_pending(hdev);
2481
2482 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2483 return;
2484
2485 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2486 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2487
2488 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2489 return;
2490
2491 hci_dev_lock(hdev);
2492
2493 if (discov->state != DISCOVERY_FINDING)
2494 goto unlock;
2495
2496 if (list_empty(&discov->resolve)) {
2497 /* When BR/EDR inquiry is active and no LE scanning is in
2498 * progress, then change discovery state to indicate completion.
2499 *
2500 * When running LE scanning and BR/EDR inquiry simultaneously
2501 * and the LE scan already finished, then change the discovery
2502 * state to indicate completion.
2503 */
2504 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2505 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2506 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2507 goto unlock;
2508 }
2509
2510 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2511 if (e && hci_resolve_name(hdev, e) == 0) {
2512 e->name_state = NAME_PENDING;
2513 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2514 } else {
2515 /* When BR/EDR inquiry is active and no LE scanning is in
2516 * progress, then change discovery state to indicate completion.
2517 *
2518 * When running LE scanning and BR/EDR inquiry simultaneously
2519 * and the LE scan already finished, then change the discovery
2520 * state to indicate completion.
2521 */
2522 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2523 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2524 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2525 }
2526
2527unlock:
2528 hci_dev_unlock(hdev);
2529}
2530
2531static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2532{
2533 struct inquiry_data data;
2534 struct inquiry_info *info = (void *) (skb->data + 1);
2535 int num_rsp = *((__u8 *) skb->data);
2536
2537 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2538
Olivier Deprez0e641232021-09-23 10:07:05 +02002539 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002540 return;
2541
2542 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2543 return;
2544
2545 hci_dev_lock(hdev);
2546
2547 for (; num_rsp; num_rsp--, info++) {
2548 u32 flags;
2549
2550 bacpy(&data.bdaddr, &info->bdaddr);
2551 data.pscan_rep_mode = info->pscan_rep_mode;
2552 data.pscan_period_mode = info->pscan_period_mode;
2553 data.pscan_mode = info->pscan_mode;
2554 memcpy(data.dev_class, info->dev_class, 3);
2555 data.clock_offset = info->clock_offset;
2556 data.rssi = HCI_RSSI_INVALID;
2557 data.ssp_mode = 0x00;
2558
2559 flags = hci_inquiry_cache_update(hdev, &data, false);
2560
2561 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2562 info->dev_class, HCI_RSSI_INVALID,
2563 flags, NULL, 0, NULL, 0);
2564 }
2565
2566 hci_dev_unlock(hdev);
2567}
2568
2569static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2570{
2571 struct hci_ev_conn_complete *ev = (void *) skb->data;
2572 struct hci_conn *conn;
2573
2574 BT_DBG("%s", hdev->name);
2575
2576 hci_dev_lock(hdev);
2577
2578 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2579 if (!conn) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002580 /* Connection may not exist if auto-connected. Check the bredr
2581 * allowlist to see if this device is allowed to auto connect.
2582 * If link is an ACL type, create a connection class
2583 * automatically.
2584 *
2585 * Auto-connect will only occur if the event filter is
2586 * programmed with a given address. Right now, event filter is
2587 * only used during suspend.
2588 */
2589 if (ev->link_type == ACL_LINK &&
Olivier Deprez92d4c212022-12-06 15:05:30 +01002590 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
Olivier Deprez157378f2022-04-04 15:47:50 +02002591 &ev->bdaddr,
2592 BDADDR_BREDR)) {
2593 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2594 HCI_ROLE_SLAVE);
2595 if (!conn) {
2596 bt_dev_err(hdev, "no memory for new conn");
2597 goto unlock;
2598 }
2599 } else {
2600 if (ev->link_type != SCO_LINK)
2601 goto unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002602
Olivier Deprez157378f2022-04-04 15:47:50 +02002603 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2604 &ev->bdaddr);
2605 if (!conn)
2606 goto unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002607
Olivier Deprez157378f2022-04-04 15:47:50 +02002608 conn->type = SCO_LINK;
2609 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002610 }
2611
2612 if (!ev->status) {
2613 conn->handle = __le16_to_cpu(ev->handle);
2614
2615 if (conn->type == ACL_LINK) {
2616 conn->state = BT_CONFIG;
2617 hci_conn_hold(conn);
2618
2619 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2620 !hci_find_link_key(hdev, &ev->bdaddr))
2621 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2622 else
2623 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2624 } else
2625 conn->state = BT_CONNECTED;
2626
2627 hci_debugfs_create_conn(conn);
2628 hci_conn_add_sysfs(conn);
2629
2630 if (test_bit(HCI_AUTH, &hdev->flags))
2631 set_bit(HCI_CONN_AUTH, &conn->flags);
2632
2633 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2634 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2635
2636 /* Get remote features */
2637 if (conn->type == ACL_LINK) {
2638 struct hci_cp_read_remote_features cp;
2639 cp.handle = ev->handle;
2640 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2641 sizeof(cp), &cp);
2642
2643 hci_req_update_scan(hdev);
2644 }
2645
2646 /* Set packet type for incoming connection */
2647 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2648 struct hci_cp_change_conn_ptype cp;
2649 cp.handle = ev->handle;
2650 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2651 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2652 &cp);
2653 }
2654 } else {
2655 conn->state = BT_CLOSED;
2656 if (conn->type == ACL_LINK)
2657 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2658 conn->dst_type, ev->status);
2659 }
2660
2661 if (conn->type == ACL_LINK)
2662 hci_sco_setup(conn, ev->status);
2663
2664 if (ev->status) {
2665 hci_connect_cfm(conn, ev->status);
2666 hci_conn_del(conn);
Olivier Deprez157378f2022-04-04 15:47:50 +02002667 } else if (ev->link_type == SCO_LINK) {
2668 switch (conn->setting & SCO_AIRMODE_MASK) {
2669 case SCO_AIRMODE_CVSD:
2670 if (hdev->notify)
2671 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2672 break;
2673 }
2674
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002675 hci_connect_cfm(conn, ev->status);
Olivier Deprez157378f2022-04-04 15:47:50 +02002676 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002677
2678unlock:
2679 hci_dev_unlock(hdev);
2680
2681 hci_conn_check_pending(hdev);
2682}
2683
2684static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2685{
2686 struct hci_cp_reject_conn_req cp;
2687
2688 bacpy(&cp.bdaddr, bdaddr);
2689 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2690 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2691}
2692
2693static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2694{
2695 struct hci_ev_conn_request *ev = (void *) skb->data;
2696 int mask = hdev->link_mode;
2697 struct inquiry_entry *ie;
2698 struct hci_conn *conn;
2699 __u8 flags = 0;
2700
2701 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2702 ev->link_type);
2703
2704 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2705 &flags);
2706
2707 if (!(mask & HCI_LM_ACCEPT)) {
2708 hci_reject_conn(hdev, &ev->bdaddr);
2709 return;
2710 }
2711
Olivier Deprez92d4c212022-12-06 15:05:30 +01002712 hci_dev_lock(hdev);
2713
2714 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002715 BDADDR_BREDR)) {
2716 hci_reject_conn(hdev, &ev->bdaddr);
Olivier Deprez92d4c212022-12-06 15:05:30 +01002717 goto unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002718 }
2719
Olivier Deprez92d4c212022-12-06 15:05:30 +01002720 /* Require HCI_CONNECTABLE or an accept list entry to accept the
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002721 * connection. These features are only touched through mgmt so
2722 * only do the checks if HCI_MGMT is set.
2723 */
2724 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2725 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
Olivier Deprez92d4c212022-12-06 15:05:30 +01002726 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02002727 BDADDR_BREDR)) {
2728 hci_reject_conn(hdev, &ev->bdaddr);
Olivier Deprez92d4c212022-12-06 15:05:30 +01002729 goto unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002730 }
2731
2732 /* Connection accepted */
2733
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002734 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2735 if (ie)
2736 memcpy(ie->data.dev_class, ev->dev_class, 3);
2737
2738 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2739 &ev->bdaddr);
2740 if (!conn) {
2741 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2742 HCI_ROLE_SLAVE);
2743 if (!conn) {
2744 bt_dev_err(hdev, "no memory for new connection");
Olivier Deprez92d4c212022-12-06 15:05:30 +01002745 goto unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002746 }
2747 }
2748
2749 memcpy(conn->dev_class, ev->dev_class, 3);
2750
2751 hci_dev_unlock(hdev);
2752
2753 if (ev->link_type == ACL_LINK ||
2754 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2755 struct hci_cp_accept_conn_req cp;
2756 conn->state = BT_CONNECT;
2757
2758 bacpy(&cp.bdaddr, &ev->bdaddr);
2759
2760 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
Olivier Deprez92d4c212022-12-06 15:05:30 +01002761 cp.role = 0x00; /* Become central */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002762 else
Olivier Deprez92d4c212022-12-06 15:05:30 +01002763 cp.role = 0x01; /* Remain peripheral */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002764
2765 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2766 } else if (!(flags & HCI_PROTO_DEFER)) {
2767 struct hci_cp_accept_sync_conn_req cp;
2768 conn->state = BT_CONNECT;
2769
2770 bacpy(&cp.bdaddr, &ev->bdaddr);
2771 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2772
2773 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2774 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2775 cp.max_latency = cpu_to_le16(0xffff);
2776 cp.content_format = cpu_to_le16(hdev->voice_setting);
2777 cp.retrans_effort = 0xff;
2778
2779 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2780 &cp);
2781 } else {
2782 conn->state = BT_CONNECT2;
2783 hci_connect_cfm(conn, 0);
2784 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01002785
2786 return;
2787unlock:
2788 hci_dev_unlock(hdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002789}
2790
2791static u8 hci_to_mgmt_reason(u8 err)
2792{
2793 switch (err) {
2794 case HCI_ERROR_CONNECTION_TIMEOUT:
2795 return MGMT_DEV_DISCONN_TIMEOUT;
2796 case HCI_ERROR_REMOTE_USER_TERM:
2797 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2798 case HCI_ERROR_REMOTE_POWER_OFF:
2799 return MGMT_DEV_DISCONN_REMOTE;
2800 case HCI_ERROR_LOCAL_HOST_TERM:
2801 return MGMT_DEV_DISCONN_LOCAL_HOST;
2802 default:
2803 return MGMT_DEV_DISCONN_UNKNOWN;
2804 }
2805}
2806
2807static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{
2809 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2810 u8 reason;
2811 struct hci_conn_params *params;
2812 struct hci_conn *conn;
2813 bool mgmt_connected;
2814 u8 type;
2815
2816 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2817
2818 hci_dev_lock(hdev);
2819
2820 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2821 if (!conn)
2822 goto unlock;
2823
2824 if (ev->status) {
2825 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2826 conn->dst_type, ev->status);
2827 goto unlock;
2828 }
2829
2830 conn->state = BT_CLOSED;
2831
2832 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2833
2834 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2835 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2836 else
2837 reason = hci_to_mgmt_reason(ev->reason);
2838
2839 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2840 reason, mgmt_connected);
2841
2842 if (conn->type == ACL_LINK) {
2843 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2844 hci_remove_link_key(hdev, &conn->dst);
2845
2846 hci_req_update_scan(hdev);
2847 }
2848
2849 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2850 if (params) {
2851 switch (params->auto_connect) {
2852 case HCI_AUTO_CONN_LINK_LOSS:
2853 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2854 break;
Olivier Deprez157378f2022-04-04 15:47:50 +02002855 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002856
2857 case HCI_AUTO_CONN_DIRECT:
2858 case HCI_AUTO_CONN_ALWAYS:
2859 list_del_init(&params->action);
2860 list_add(&params->action, &hdev->pend_le_conns);
2861 hci_update_background_scan(hdev);
2862 break;
2863
2864 default:
2865 break;
2866 }
2867 }
2868
2869 type = conn->type;
2870
2871 hci_disconn_cfm(conn, ev->reason);
2872 hci_conn_del(conn);
2873
Olivier Deprez157378f2022-04-04 15:47:50 +02002874 /* The suspend notifier is waiting for all devices to disconnect so
2875 * clear the bit from pending tasks and inform the wait queue.
2876 */
2877 if (list_empty(&hdev->conn_hash.list) &&
2878 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2879 wake_up(&hdev->suspend_wait_q);
2880 }
2881
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002882 /* Re-enable advertising if necessary, since it might
2883 * have been disabled by the connection. From the
2884 * HCI_LE_Set_Advertise_Enable command description in
2885 * the core specification (v4.0):
2886 * "The Controller shall continue advertising until the Host
2887 * issues an LE_Set_Advertise_Enable command with
2888 * Advertising_Enable set to 0x00 (Advertising is disabled)
2889 * or until a connection is created or until the Advertising
2890 * is timed out due to Directed Advertising."
2891 */
2892 if (type == LE_LINK)
2893 hci_req_reenable_advertising(hdev);
2894
2895unlock:
2896 hci_dev_unlock(hdev);
2897}
2898
2899static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2900{
2901 struct hci_ev_auth_complete *ev = (void *) skb->data;
2902 struct hci_conn *conn;
2903
2904 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2905
2906 hci_dev_lock(hdev);
2907
2908 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2909 if (!conn)
2910 goto unlock;
2911
2912 if (!ev->status) {
2913 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2914
2915 if (!hci_conn_ssp_enabled(conn) &&
2916 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2917 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2918 } else {
2919 set_bit(HCI_CONN_AUTH, &conn->flags);
2920 conn->sec_level = conn->pending_sec_level;
2921 }
2922 } else {
2923 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2924 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2925
2926 mgmt_auth_failed(conn, ev->status);
2927 }
2928
2929 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2930 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2931
2932 if (conn->state == BT_CONFIG) {
2933 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2934 struct hci_cp_set_conn_encrypt cp;
2935 cp.handle = ev->handle;
2936 cp.encrypt = 0x01;
2937 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2938 &cp);
2939 } else {
2940 conn->state = BT_CONNECTED;
2941 hci_connect_cfm(conn, ev->status);
2942 hci_conn_drop(conn);
2943 }
2944 } else {
2945 hci_auth_cfm(conn, ev->status);
2946
2947 hci_conn_hold(conn);
2948 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2949 hci_conn_drop(conn);
2950 }
2951
2952 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2953 if (!ev->status) {
2954 struct hci_cp_set_conn_encrypt cp;
2955 cp.handle = ev->handle;
2956 cp.encrypt = 0x01;
2957 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2958 &cp);
2959 } else {
2960 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
Olivier Deprez0e641232021-09-23 10:07:05 +02002961 hci_encrypt_cfm(conn, ev->status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002962 }
2963 }
2964
2965unlock:
2966 hci_dev_unlock(hdev);
2967}
2968
2969static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2970{
2971 struct hci_ev_remote_name *ev = (void *) skb->data;
2972 struct hci_conn *conn;
2973
2974 BT_DBG("%s", hdev->name);
2975
2976 hci_conn_check_pending(hdev);
2977
2978 hci_dev_lock(hdev);
2979
2980 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2981
2982 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2983 goto check_auth;
2984
2985 if (ev->status == 0)
2986 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2987 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2988 else
2989 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2990
2991check_auth:
2992 if (!conn)
2993 goto unlock;
2994
2995 if (!hci_outgoing_auth_needed(hdev, conn))
2996 goto unlock;
2997
2998 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2999 struct hci_cp_auth_requested cp;
3000
3001 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3002
3003 cp.handle = __cpu_to_le16(conn->handle);
3004 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3005 }
3006
3007unlock:
3008 hci_dev_unlock(hdev);
3009}
3010
3011static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3012 u16 opcode, struct sk_buff *skb)
3013{
3014 const struct hci_rp_read_enc_key_size *rp;
3015 struct hci_conn *conn;
3016 u16 handle;
3017
3018 BT_DBG("%s status 0x%02x", hdev->name, status);
3019
3020 if (!skb || skb->len < sizeof(*rp)) {
3021 bt_dev_err(hdev, "invalid read key size response");
3022 return;
3023 }
3024
3025 rp = (void *)skb->data;
3026 handle = le16_to_cpu(rp->handle);
3027
3028 hci_dev_lock(hdev);
3029
3030 conn = hci_conn_hash_lookup_handle(hdev, handle);
3031 if (!conn)
3032 goto unlock;
3033
Olivier Deprez157378f2022-04-04 15:47:50 +02003034 /* While unexpected, the read_enc_key_size command may fail. The most
3035 * secure approach is to then assume the key size is 0 to force a
3036 * disconnection.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003037 */
3038 if (rp->status) {
3039 bt_dev_err(hdev, "failed to read key size for handle %u",
3040 handle);
Olivier Deprez157378f2022-04-04 15:47:50 +02003041 conn->enc_key_size = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003042 } else {
3043 conn->enc_key_size = rp->key_size;
3044 }
3045
Olivier Deprez0e641232021-09-23 10:07:05 +02003046 hci_encrypt_cfm(conn, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003047
3048unlock:
3049 hci_dev_unlock(hdev);
3050}
3051
3052static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3053{
3054 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3055 struct hci_conn *conn;
3056
3057 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3058
3059 hci_dev_lock(hdev);
3060
3061 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3062 if (!conn)
3063 goto unlock;
3064
3065 if (!ev->status) {
3066 if (ev->encrypt) {
3067 /* Encryption implies authentication */
3068 set_bit(HCI_CONN_AUTH, &conn->flags);
3069 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3070 conn->sec_level = conn->pending_sec_level;
3071
3072 /* P-256 authentication key implies FIPS */
3073 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3074 set_bit(HCI_CONN_FIPS, &conn->flags);
3075
3076 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3077 conn->type == LE_LINK)
3078 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3079 } else {
3080 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3081 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3082 }
3083 }
3084
3085 /* We should disregard the current RPA and generate a new one
3086 * whenever the encryption procedure fails.
3087 */
3088 if (ev->status && conn->type == LE_LINK) {
3089 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3090 hci_adv_instances_set_rpa_expired(hdev, true);
3091 }
3092
3093 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3094
Olivier Deprez0e641232021-09-23 10:07:05 +02003095 /* Check link security requirements are met */
3096 if (!hci_conn_check_link_mode(conn))
3097 ev->status = HCI_ERROR_AUTH_FAILURE;
3098
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003099 if (ev->status && conn->state == BT_CONNECTED) {
3100 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3101 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3102
Olivier Deprez0e641232021-09-23 10:07:05 +02003103 /* Notify upper layers so they can cleanup before
3104 * disconnecting.
3105 */
3106 hci_encrypt_cfm(conn, ev->status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003107 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3108 hci_conn_drop(conn);
3109 goto unlock;
3110 }
3111
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003112 /* Try reading the encryption key size for encrypted ACL links */
3113 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3114 struct hci_cp_read_enc_key_size cp;
3115 struct hci_request req;
3116
3117 /* Only send HCI_Read_Encryption_Key_Size if the
3118 * controller really supports it. If it doesn't, assume
3119 * the default size (16).
3120 */
3121 if (!(hdev->commands[20] & 0x10)) {
3122 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3123 goto notify;
3124 }
3125
3126 hci_req_init(&req, hdev);
3127
3128 cp.handle = cpu_to_le16(conn->handle);
3129 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3130
3131 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3132 bt_dev_err(hdev, "sending read key size failed");
3133 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3134 goto notify;
3135 }
3136
3137 goto unlock;
3138 }
3139
David Brazdil0f672f62019-12-10 10:32:29 +00003140 /* Set the default Authenticated Payload Timeout after
3141 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3142 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3143 * sent when the link is active and Encryption is enabled, the conn
3144 * type can be either LE or ACL and controller must support LMP Ping.
3145 * Ensure for AES-CCM encryption as well.
3146 */
3147 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3148 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3149 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3150 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3151 struct hci_cp_write_auth_payload_to cp;
3152
3153 cp.handle = cpu_to_le16(conn->handle);
3154 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3155 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3156 sizeof(cp), &cp);
3157 }
3158
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003159notify:
Olivier Deprez0e641232021-09-23 10:07:05 +02003160 hci_encrypt_cfm(conn, ev->status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003161
3162unlock:
3163 hci_dev_unlock(hdev);
3164}
3165
3166static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3167 struct sk_buff *skb)
3168{
3169 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3170 struct hci_conn *conn;
3171
3172 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3173
3174 hci_dev_lock(hdev);
3175
3176 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3177 if (conn) {
3178 if (!ev->status)
3179 set_bit(HCI_CONN_SECURE, &conn->flags);
3180
3181 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3182
3183 hci_key_change_cfm(conn, ev->status);
3184 }
3185
3186 hci_dev_unlock(hdev);
3187}
3188
3189static void hci_remote_features_evt(struct hci_dev *hdev,
3190 struct sk_buff *skb)
3191{
3192 struct hci_ev_remote_features *ev = (void *) skb->data;
3193 struct hci_conn *conn;
3194
3195 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3196
3197 hci_dev_lock(hdev);
3198
3199 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3200 if (!conn)
3201 goto unlock;
3202
3203 if (!ev->status)
3204 memcpy(conn->features[0], ev->features, 8);
3205
3206 if (conn->state != BT_CONFIG)
3207 goto unlock;
3208
3209 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3210 lmp_ext_feat_capable(conn)) {
3211 struct hci_cp_read_remote_ext_features cp;
3212 cp.handle = ev->handle;
3213 cp.page = 0x01;
3214 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3215 sizeof(cp), &cp);
3216 goto unlock;
3217 }
3218
3219 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3220 struct hci_cp_remote_name_req cp;
3221 memset(&cp, 0, sizeof(cp));
3222 bacpy(&cp.bdaddr, &conn->dst);
3223 cp.pscan_rep_mode = 0x02;
3224 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3225 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3226 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3227
3228 if (!hci_outgoing_auth_needed(hdev, conn)) {
3229 conn->state = BT_CONNECTED;
3230 hci_connect_cfm(conn, ev->status);
3231 hci_conn_drop(conn);
3232 }
3233
3234unlock:
3235 hci_dev_unlock(hdev);
3236}
3237
3238static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3239 u16 *opcode, u8 *status,
3240 hci_req_complete_t *req_complete,
3241 hci_req_complete_skb_t *req_complete_skb)
3242{
3243 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3244
3245 *opcode = __le16_to_cpu(ev->opcode);
3246 *status = skb->data[sizeof(*ev)];
3247
3248 skb_pull(skb, sizeof(*ev));
3249
3250 switch (*opcode) {
3251 case HCI_OP_INQUIRY_CANCEL:
Olivier Deprez0e641232021-09-23 10:07:05 +02003252 hci_cc_inquiry_cancel(hdev, skb, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003253 break;
3254
3255 case HCI_OP_PERIODIC_INQ:
3256 hci_cc_periodic_inq(hdev, skb);
3257 break;
3258
3259 case HCI_OP_EXIT_PERIODIC_INQ:
3260 hci_cc_exit_periodic_inq(hdev, skb);
3261 break;
3262
3263 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3264 hci_cc_remote_name_req_cancel(hdev, skb);
3265 break;
3266
3267 case HCI_OP_ROLE_DISCOVERY:
3268 hci_cc_role_discovery(hdev, skb);
3269 break;
3270
3271 case HCI_OP_READ_LINK_POLICY:
3272 hci_cc_read_link_policy(hdev, skb);
3273 break;
3274
3275 case HCI_OP_WRITE_LINK_POLICY:
3276 hci_cc_write_link_policy(hdev, skb);
3277 break;
3278
3279 case HCI_OP_READ_DEF_LINK_POLICY:
3280 hci_cc_read_def_link_policy(hdev, skb);
3281 break;
3282
3283 case HCI_OP_WRITE_DEF_LINK_POLICY:
3284 hci_cc_write_def_link_policy(hdev, skb);
3285 break;
3286
3287 case HCI_OP_RESET:
3288 hci_cc_reset(hdev, skb);
3289 break;
3290
3291 case HCI_OP_READ_STORED_LINK_KEY:
3292 hci_cc_read_stored_link_key(hdev, skb);
3293 break;
3294
3295 case HCI_OP_DELETE_STORED_LINK_KEY:
3296 hci_cc_delete_stored_link_key(hdev, skb);
3297 break;
3298
3299 case HCI_OP_WRITE_LOCAL_NAME:
3300 hci_cc_write_local_name(hdev, skb);
3301 break;
3302
3303 case HCI_OP_READ_LOCAL_NAME:
3304 hci_cc_read_local_name(hdev, skb);
3305 break;
3306
3307 case HCI_OP_WRITE_AUTH_ENABLE:
3308 hci_cc_write_auth_enable(hdev, skb);
3309 break;
3310
3311 case HCI_OP_WRITE_ENCRYPT_MODE:
3312 hci_cc_write_encrypt_mode(hdev, skb);
3313 break;
3314
3315 case HCI_OP_WRITE_SCAN_ENABLE:
3316 hci_cc_write_scan_enable(hdev, skb);
3317 break;
3318
3319 case HCI_OP_READ_CLASS_OF_DEV:
3320 hci_cc_read_class_of_dev(hdev, skb);
3321 break;
3322
3323 case HCI_OP_WRITE_CLASS_OF_DEV:
3324 hci_cc_write_class_of_dev(hdev, skb);
3325 break;
3326
3327 case HCI_OP_READ_VOICE_SETTING:
3328 hci_cc_read_voice_setting(hdev, skb);
3329 break;
3330
3331 case HCI_OP_WRITE_VOICE_SETTING:
3332 hci_cc_write_voice_setting(hdev, skb);
3333 break;
3334
3335 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3336 hci_cc_read_num_supported_iac(hdev, skb);
3337 break;
3338
3339 case HCI_OP_WRITE_SSP_MODE:
3340 hci_cc_write_ssp_mode(hdev, skb);
3341 break;
3342
3343 case HCI_OP_WRITE_SC_SUPPORT:
3344 hci_cc_write_sc_support(hdev, skb);
3345 break;
3346
David Brazdil0f672f62019-12-10 10:32:29 +00003347 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3348 hci_cc_read_auth_payload_timeout(hdev, skb);
3349 break;
3350
3351 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3352 hci_cc_write_auth_payload_timeout(hdev, skb);
3353 break;
3354
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003355 case HCI_OP_READ_LOCAL_VERSION:
3356 hci_cc_read_local_version(hdev, skb);
3357 break;
3358
3359 case HCI_OP_READ_LOCAL_COMMANDS:
3360 hci_cc_read_local_commands(hdev, skb);
3361 break;
3362
3363 case HCI_OP_READ_LOCAL_FEATURES:
3364 hci_cc_read_local_features(hdev, skb);
3365 break;
3366
3367 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3368 hci_cc_read_local_ext_features(hdev, skb);
3369 break;
3370
3371 case HCI_OP_READ_BUFFER_SIZE:
3372 hci_cc_read_buffer_size(hdev, skb);
3373 break;
3374
3375 case HCI_OP_READ_BD_ADDR:
3376 hci_cc_read_bd_addr(hdev, skb);
3377 break;
3378
Olivier Deprez157378f2022-04-04 15:47:50 +02003379 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3380 hci_cc_read_local_pairing_opts(hdev, skb);
3381 break;
3382
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003383 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3384 hci_cc_read_page_scan_activity(hdev, skb);
3385 break;
3386
3387 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3388 hci_cc_write_page_scan_activity(hdev, skb);
3389 break;
3390
3391 case HCI_OP_READ_PAGE_SCAN_TYPE:
3392 hci_cc_read_page_scan_type(hdev, skb);
3393 break;
3394
3395 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3396 hci_cc_write_page_scan_type(hdev, skb);
3397 break;
3398
3399 case HCI_OP_READ_DATA_BLOCK_SIZE:
3400 hci_cc_read_data_block_size(hdev, skb);
3401 break;
3402
3403 case HCI_OP_READ_FLOW_CONTROL_MODE:
3404 hci_cc_read_flow_control_mode(hdev, skb);
3405 break;
3406
3407 case HCI_OP_READ_LOCAL_AMP_INFO:
3408 hci_cc_read_local_amp_info(hdev, skb);
3409 break;
3410
3411 case HCI_OP_READ_CLOCK:
3412 hci_cc_read_clock(hdev, skb);
3413 break;
3414
3415 case HCI_OP_READ_INQ_RSP_TX_POWER:
3416 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3417 break;
3418
Olivier Deprez157378f2022-04-04 15:47:50 +02003419 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3420 hci_cc_read_def_err_data_reporting(hdev, skb);
3421 break;
3422
3423 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3424 hci_cc_write_def_err_data_reporting(hdev, skb);
3425 break;
3426
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003427 case HCI_OP_PIN_CODE_REPLY:
3428 hci_cc_pin_code_reply(hdev, skb);
3429 break;
3430
3431 case HCI_OP_PIN_CODE_NEG_REPLY:
3432 hci_cc_pin_code_neg_reply(hdev, skb);
3433 break;
3434
3435 case HCI_OP_READ_LOCAL_OOB_DATA:
3436 hci_cc_read_local_oob_data(hdev, skb);
3437 break;
3438
3439 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3440 hci_cc_read_local_oob_ext_data(hdev, skb);
3441 break;
3442
3443 case HCI_OP_LE_READ_BUFFER_SIZE:
3444 hci_cc_le_read_buffer_size(hdev, skb);
3445 break;
3446
3447 case HCI_OP_LE_READ_LOCAL_FEATURES:
3448 hci_cc_le_read_local_features(hdev, skb);
3449 break;
3450
3451 case HCI_OP_LE_READ_ADV_TX_POWER:
3452 hci_cc_le_read_adv_tx_power(hdev, skb);
3453 break;
3454
3455 case HCI_OP_USER_CONFIRM_REPLY:
3456 hci_cc_user_confirm_reply(hdev, skb);
3457 break;
3458
3459 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3460 hci_cc_user_confirm_neg_reply(hdev, skb);
3461 break;
3462
3463 case HCI_OP_USER_PASSKEY_REPLY:
3464 hci_cc_user_passkey_reply(hdev, skb);
3465 break;
3466
3467 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3468 hci_cc_user_passkey_neg_reply(hdev, skb);
3469 break;
3470
3471 case HCI_OP_LE_SET_RANDOM_ADDR:
3472 hci_cc_le_set_random_addr(hdev, skb);
3473 break;
3474
3475 case HCI_OP_LE_SET_ADV_ENABLE:
3476 hci_cc_le_set_adv_enable(hdev, skb);
3477 break;
3478
3479 case HCI_OP_LE_SET_SCAN_PARAM:
3480 hci_cc_le_set_scan_param(hdev, skb);
3481 break;
3482
3483 case HCI_OP_LE_SET_SCAN_ENABLE:
3484 hci_cc_le_set_scan_enable(hdev, skb);
3485 break;
3486
Olivier Deprez92d4c212022-12-06 15:05:30 +01003487 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3488 hci_cc_le_read_accept_list_size(hdev, skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003489 break;
3490
Olivier Deprez92d4c212022-12-06 15:05:30 +01003491 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3492 hci_cc_le_clear_accept_list(hdev, skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003493 break;
3494
Olivier Deprez92d4c212022-12-06 15:05:30 +01003495 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3496 hci_cc_le_add_to_accept_list(hdev, skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003497 break;
3498
Olivier Deprez92d4c212022-12-06 15:05:30 +01003499 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3500 hci_cc_le_del_from_accept_list(hdev, skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003501 break;
3502
3503 case HCI_OP_LE_READ_SUPPORTED_STATES:
3504 hci_cc_le_read_supported_states(hdev, skb);
3505 break;
3506
3507 case HCI_OP_LE_READ_DEF_DATA_LEN:
3508 hci_cc_le_read_def_data_len(hdev, skb);
3509 break;
3510
3511 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3512 hci_cc_le_write_def_data_len(hdev, skb);
3513 break;
3514
David Brazdil0f672f62019-12-10 10:32:29 +00003515 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3516 hci_cc_le_add_to_resolv_list(hdev, skb);
3517 break;
3518
3519 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3520 hci_cc_le_del_from_resolv_list(hdev, skb);
3521 break;
3522
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003523 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3524 hci_cc_le_clear_resolv_list(hdev, skb);
3525 break;
3526
3527 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3528 hci_cc_le_read_resolv_list_size(hdev, skb);
3529 break;
3530
3531 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3532 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3533 break;
3534
3535 case HCI_OP_LE_READ_MAX_DATA_LEN:
3536 hci_cc_le_read_max_data_len(hdev, skb);
3537 break;
3538
3539 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3540 hci_cc_write_le_host_supported(hdev, skb);
3541 break;
3542
3543 case HCI_OP_LE_SET_ADV_PARAM:
3544 hci_cc_set_adv_param(hdev, skb);
3545 break;
3546
3547 case HCI_OP_READ_RSSI:
3548 hci_cc_read_rssi(hdev, skb);
3549 break;
3550
3551 case HCI_OP_READ_TX_POWER:
3552 hci_cc_read_tx_power(hdev, skb);
3553 break;
3554
3555 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3556 hci_cc_write_ssp_debug_mode(hdev, skb);
3557 break;
3558
3559 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3560 hci_cc_le_set_ext_scan_param(hdev, skb);
3561 break;
3562
3563 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3564 hci_cc_le_set_ext_scan_enable(hdev, skb);
3565 break;
3566
3567 case HCI_OP_LE_SET_DEFAULT_PHY:
3568 hci_cc_le_set_default_phy(hdev, skb);
3569 break;
3570
3571 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3572 hci_cc_le_read_num_adv_sets(hdev, skb);
3573 break;
3574
3575 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3576 hci_cc_set_ext_adv_param(hdev, skb);
3577 break;
3578
3579 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3580 hci_cc_le_set_ext_adv_enable(hdev, skb);
3581 break;
3582
3583 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3584 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3585 break;
3586
3587 default:
3588 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3589 break;
3590 }
3591
3592 if (*opcode != HCI_OP_NOP)
3593 cancel_delayed_work(&hdev->cmd_timer);
3594
3595 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3596 atomic_set(&hdev->cmd_cnt, 1);
3597
3598 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3599 req_complete_skb);
3600
David Brazdil0f672f62019-12-10 10:32:29 +00003601 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3602 bt_dev_err(hdev,
3603 "unexpected event for opcode 0x%4.4x", *opcode);
3604 return;
3605 }
3606
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003607 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3608 queue_work(hdev->workqueue, &hdev->cmd_work);
3609}
3610
3611static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3612 u16 *opcode, u8 *status,
3613 hci_req_complete_t *req_complete,
3614 hci_req_complete_skb_t *req_complete_skb)
3615{
3616 struct hci_ev_cmd_status *ev = (void *) skb->data;
3617
3618 skb_pull(skb, sizeof(*ev));
3619
3620 *opcode = __le16_to_cpu(ev->opcode);
3621 *status = ev->status;
3622
3623 switch (*opcode) {
3624 case HCI_OP_INQUIRY:
3625 hci_cs_inquiry(hdev, ev->status);
3626 break;
3627
3628 case HCI_OP_CREATE_CONN:
3629 hci_cs_create_conn(hdev, ev->status);
3630 break;
3631
3632 case HCI_OP_DISCONNECT:
3633 hci_cs_disconnect(hdev, ev->status);
3634 break;
3635
3636 case HCI_OP_ADD_SCO:
3637 hci_cs_add_sco(hdev, ev->status);
3638 break;
3639
3640 case HCI_OP_AUTH_REQUESTED:
3641 hci_cs_auth_requested(hdev, ev->status);
3642 break;
3643
3644 case HCI_OP_SET_CONN_ENCRYPT:
3645 hci_cs_set_conn_encrypt(hdev, ev->status);
3646 break;
3647
3648 case HCI_OP_REMOTE_NAME_REQ:
3649 hci_cs_remote_name_req(hdev, ev->status);
3650 break;
3651
3652 case HCI_OP_READ_REMOTE_FEATURES:
3653 hci_cs_read_remote_features(hdev, ev->status);
3654 break;
3655
3656 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3657 hci_cs_read_remote_ext_features(hdev, ev->status);
3658 break;
3659
3660 case HCI_OP_SETUP_SYNC_CONN:
3661 hci_cs_setup_sync_conn(hdev, ev->status);
3662 break;
3663
3664 case HCI_OP_SNIFF_MODE:
3665 hci_cs_sniff_mode(hdev, ev->status);
3666 break;
3667
3668 case HCI_OP_EXIT_SNIFF_MODE:
3669 hci_cs_exit_sniff_mode(hdev, ev->status);
3670 break;
3671
3672 case HCI_OP_SWITCH_ROLE:
3673 hci_cs_switch_role(hdev, ev->status);
3674 break;
3675
3676 case HCI_OP_LE_CREATE_CONN:
3677 hci_cs_le_create_conn(hdev, ev->status);
3678 break;
3679
3680 case HCI_OP_LE_READ_REMOTE_FEATURES:
3681 hci_cs_le_read_remote_features(hdev, ev->status);
3682 break;
3683
3684 case HCI_OP_LE_START_ENC:
3685 hci_cs_le_start_enc(hdev, ev->status);
3686 break;
3687
3688 case HCI_OP_LE_EXT_CREATE_CONN:
3689 hci_cs_le_ext_create_conn(hdev, ev->status);
3690 break;
3691
3692 default:
3693 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3694 break;
3695 }
3696
3697 if (*opcode != HCI_OP_NOP)
3698 cancel_delayed_work(&hdev->cmd_timer);
3699
3700 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3701 atomic_set(&hdev->cmd_cnt, 1);
3702
3703 /* Indicate request completion if the command failed. Also, if
3704 * we're not waiting for a special event and we get a success
3705 * command status we should try to flag the request as completed
3706 * (since for this kind of commands there will not be a command
3707 * complete event).
3708 */
3709 if (ev->status ||
3710 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3711 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3712 req_complete_skb);
3713
David Brazdil0f672f62019-12-10 10:32:29 +00003714 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3715 bt_dev_err(hdev,
3716 "unexpected event for opcode 0x%4.4x", *opcode);
3717 return;
3718 }
3719
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003720 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3721 queue_work(hdev->workqueue, &hdev->cmd_work);
3722}
3723
3724static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3725{
3726 struct hci_ev_hardware_error *ev = (void *) skb->data;
3727
3728 hdev->hw_error_code = ev->code;
3729
3730 queue_work(hdev->req_workqueue, &hdev->error_reset);
3731}
3732
3733static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3734{
3735 struct hci_ev_role_change *ev = (void *) skb->data;
3736 struct hci_conn *conn;
3737
3738 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3739
3740 hci_dev_lock(hdev);
3741
3742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3743 if (conn) {
3744 if (!ev->status)
3745 conn->role = ev->role;
3746
3747 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3748
3749 hci_role_switch_cfm(conn, ev->status, ev->role);
3750 }
3751
3752 hci_dev_unlock(hdev);
3753}
3754
3755static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3756{
3757 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3758 int i;
3759
3760 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3761 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3762 return;
3763 }
3764
David Brazdil0f672f62019-12-10 10:32:29 +00003765 if (skb->len < sizeof(*ev) ||
3766 skb->len < struct_size(ev, handles, ev->num_hndl)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003767 BT_DBG("%s bad parameters", hdev->name);
3768 return;
3769 }
3770
3771 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3772
3773 for (i = 0; i < ev->num_hndl; i++) {
3774 struct hci_comp_pkts_info *info = &ev->handles[i];
3775 struct hci_conn *conn;
3776 __u16 handle, count;
3777
3778 handle = __le16_to_cpu(info->handle);
3779 count = __le16_to_cpu(info->count);
3780
3781 conn = hci_conn_hash_lookup_handle(hdev, handle);
3782 if (!conn)
3783 continue;
3784
3785 conn->sent -= count;
3786
3787 switch (conn->type) {
3788 case ACL_LINK:
3789 hdev->acl_cnt += count;
3790 if (hdev->acl_cnt > hdev->acl_pkts)
3791 hdev->acl_cnt = hdev->acl_pkts;
3792 break;
3793
3794 case LE_LINK:
3795 if (hdev->le_pkts) {
3796 hdev->le_cnt += count;
3797 if (hdev->le_cnt > hdev->le_pkts)
3798 hdev->le_cnt = hdev->le_pkts;
3799 } else {
3800 hdev->acl_cnt += count;
3801 if (hdev->acl_cnt > hdev->acl_pkts)
3802 hdev->acl_cnt = hdev->acl_pkts;
3803 }
3804 break;
3805
3806 case SCO_LINK:
3807 hdev->sco_cnt += count;
3808 if (hdev->sco_cnt > hdev->sco_pkts)
3809 hdev->sco_cnt = hdev->sco_pkts;
3810 break;
3811
3812 default:
3813 bt_dev_err(hdev, "unknown type %d conn %p",
3814 conn->type, conn);
3815 break;
3816 }
3817 }
3818
3819 queue_work(hdev->workqueue, &hdev->tx_work);
3820}
3821
3822static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3823 __u16 handle)
3824{
3825 struct hci_chan *chan;
3826
3827 switch (hdev->dev_type) {
3828 case HCI_PRIMARY:
3829 return hci_conn_hash_lookup_handle(hdev, handle);
3830 case HCI_AMP:
3831 chan = hci_chan_lookup_handle(hdev, handle);
3832 if (chan)
3833 return chan->conn;
3834 break;
3835 default:
3836 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3837 break;
3838 }
3839
3840 return NULL;
3841}
3842
3843static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3844{
3845 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3846 int i;
3847
3848 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3849 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3850 return;
3851 }
3852
David Brazdil0f672f62019-12-10 10:32:29 +00003853 if (skb->len < sizeof(*ev) ||
3854 skb->len < struct_size(ev, handles, ev->num_hndl)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003855 BT_DBG("%s bad parameters", hdev->name);
3856 return;
3857 }
3858
3859 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3860 ev->num_hndl);
3861
3862 for (i = 0; i < ev->num_hndl; i++) {
3863 struct hci_comp_blocks_info *info = &ev->handles[i];
3864 struct hci_conn *conn = NULL;
3865 __u16 handle, block_count;
3866
3867 handle = __le16_to_cpu(info->handle);
3868 block_count = __le16_to_cpu(info->blocks);
3869
3870 conn = __hci_conn_lookup_handle(hdev, handle);
3871 if (!conn)
3872 continue;
3873
3874 conn->sent -= block_count;
3875
3876 switch (conn->type) {
3877 case ACL_LINK:
3878 case AMP_LINK:
3879 hdev->block_cnt += block_count;
3880 if (hdev->block_cnt > hdev->num_blocks)
3881 hdev->block_cnt = hdev->num_blocks;
3882 break;
3883
3884 default:
3885 bt_dev_err(hdev, "unknown type %d conn %p",
3886 conn->type, conn);
3887 break;
3888 }
3889 }
3890
3891 queue_work(hdev->workqueue, &hdev->tx_work);
3892}
3893
3894static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3895{
3896 struct hci_ev_mode_change *ev = (void *) skb->data;
3897 struct hci_conn *conn;
3898
3899 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3900
3901 hci_dev_lock(hdev);
3902
3903 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3904 if (conn) {
3905 conn->mode = ev->mode;
3906
3907 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3908 &conn->flags)) {
3909 if (conn->mode == HCI_CM_ACTIVE)
3910 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3911 else
3912 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3913 }
3914
3915 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3916 hci_sco_setup(conn, ev->status);
3917 }
3918
3919 hci_dev_unlock(hdev);
3920}
3921
3922static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3923{
3924 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3925 struct hci_conn *conn;
3926
3927 BT_DBG("%s", hdev->name);
3928
3929 hci_dev_lock(hdev);
3930
3931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3932 if (!conn)
3933 goto unlock;
3934
3935 if (conn->state == BT_CONNECTED) {
3936 hci_conn_hold(conn);
3937 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3938 hci_conn_drop(conn);
3939 }
3940
3941 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3942 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3943 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3944 sizeof(ev->bdaddr), &ev->bdaddr);
3945 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3946 u8 secure;
3947
3948 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3949 secure = 1;
3950 else
3951 secure = 0;
3952
3953 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3954 }
3955
3956unlock:
3957 hci_dev_unlock(hdev);
3958}
3959
3960static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3961{
3962 if (key_type == HCI_LK_CHANGED_COMBINATION)
3963 return;
3964
3965 conn->pin_length = pin_len;
3966 conn->key_type = key_type;
3967
3968 switch (key_type) {
3969 case HCI_LK_LOCAL_UNIT:
3970 case HCI_LK_REMOTE_UNIT:
3971 case HCI_LK_DEBUG_COMBINATION:
3972 return;
3973 case HCI_LK_COMBINATION:
3974 if (pin_len == 16)
3975 conn->pending_sec_level = BT_SECURITY_HIGH;
3976 else
3977 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3978 break;
3979 case HCI_LK_UNAUTH_COMBINATION_P192:
3980 case HCI_LK_UNAUTH_COMBINATION_P256:
3981 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3982 break;
3983 case HCI_LK_AUTH_COMBINATION_P192:
3984 conn->pending_sec_level = BT_SECURITY_HIGH;
3985 break;
3986 case HCI_LK_AUTH_COMBINATION_P256:
3987 conn->pending_sec_level = BT_SECURITY_FIPS;
3988 break;
3989 }
3990}
3991
3992static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3993{
3994 struct hci_ev_link_key_req *ev = (void *) skb->data;
3995 struct hci_cp_link_key_reply cp;
3996 struct hci_conn *conn;
3997 struct link_key *key;
3998
3999 BT_DBG("%s", hdev->name);
4000
4001 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4002 return;
4003
4004 hci_dev_lock(hdev);
4005
4006 key = hci_find_link_key(hdev, &ev->bdaddr);
4007 if (!key) {
4008 BT_DBG("%s link key not found for %pMR", hdev->name,
4009 &ev->bdaddr);
4010 goto not_found;
4011 }
4012
4013 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4014 &ev->bdaddr);
4015
4016 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4017 if (conn) {
4018 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4019
4020 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4021 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4022 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4023 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4024 goto not_found;
4025 }
4026
4027 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4028 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4029 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4030 BT_DBG("%s ignoring key unauthenticated for high security",
4031 hdev->name);
4032 goto not_found;
4033 }
4034
4035 conn_set_key(conn, key->type, key->pin_len);
4036 }
4037
4038 bacpy(&cp.bdaddr, &ev->bdaddr);
4039 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4040
4041 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4042
4043 hci_dev_unlock(hdev);
4044
4045 return;
4046
4047not_found:
4048 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4049 hci_dev_unlock(hdev);
4050}
4051
4052static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4053{
4054 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4055 struct hci_conn *conn;
4056 struct link_key *key;
4057 bool persistent;
4058 u8 pin_len = 0;
4059
4060 BT_DBG("%s", hdev->name);
4061
4062 hci_dev_lock(hdev);
4063
4064 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4065 if (!conn)
4066 goto unlock;
4067
4068 hci_conn_hold(conn);
4069 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4070 hci_conn_drop(conn);
4071
4072 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4073 conn_set_key(conn, ev->key_type, conn->pin_length);
4074
4075 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4076 goto unlock;
4077
4078 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4079 ev->key_type, pin_len, &persistent);
4080 if (!key)
4081 goto unlock;
4082
4083 /* Update connection information since adding the key will have
4084 * fixed up the type in the case of changed combination keys.
4085 */
4086 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4087 conn_set_key(conn, key->type, key->pin_len);
4088
4089 mgmt_new_link_key(hdev, key, persistent);
4090
4091 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4092 * is set. If it's not set simply remove the key from the kernel
4093 * list (we've still notified user space about it but with
4094 * store_hint being 0).
4095 */
4096 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4097 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4098 list_del_rcu(&key->list);
4099 kfree_rcu(key, rcu);
4100 goto unlock;
4101 }
4102
4103 if (persistent)
4104 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4105 else
4106 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4107
4108unlock:
4109 hci_dev_unlock(hdev);
4110}
4111
4112static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4113{
4114 struct hci_ev_clock_offset *ev = (void *) skb->data;
4115 struct hci_conn *conn;
4116
4117 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4118
4119 hci_dev_lock(hdev);
4120
4121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4122 if (conn && !ev->status) {
4123 struct inquiry_entry *ie;
4124
4125 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4126 if (ie) {
4127 ie->data.clock_offset = ev->clock_offset;
4128 ie->timestamp = jiffies;
4129 }
4130 }
4131
4132 hci_dev_unlock(hdev);
4133}
4134
4135static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4136{
4137 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4138 struct hci_conn *conn;
4139
4140 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4141
4142 hci_dev_lock(hdev);
4143
4144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4145 if (conn && !ev->status)
4146 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4147
4148 hci_dev_unlock(hdev);
4149}
4150
4151static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4152{
4153 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4154 struct inquiry_entry *ie;
4155
4156 BT_DBG("%s", hdev->name);
4157
4158 hci_dev_lock(hdev);
4159
4160 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4161 if (ie) {
4162 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4163 ie->timestamp = jiffies;
4164 }
4165
4166 hci_dev_unlock(hdev);
4167}
4168
4169static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4170 struct sk_buff *skb)
4171{
4172 struct inquiry_data data;
4173 int num_rsp = *((__u8 *) skb->data);
4174
4175 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4176
4177 if (!num_rsp)
4178 return;
4179
4180 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4181 return;
4182
4183 hci_dev_lock(hdev);
4184
4185 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4186 struct inquiry_info_with_rssi_and_pscan_mode *info;
4187 info = (void *) (skb->data + 1);
4188
Olivier Deprez0e641232021-09-23 10:07:05 +02004189 if (skb->len < num_rsp * sizeof(*info) + 1)
4190 goto unlock;
4191
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004192 for (; num_rsp; num_rsp--, info++) {
4193 u32 flags;
4194
4195 bacpy(&data.bdaddr, &info->bdaddr);
4196 data.pscan_rep_mode = info->pscan_rep_mode;
4197 data.pscan_period_mode = info->pscan_period_mode;
4198 data.pscan_mode = info->pscan_mode;
4199 memcpy(data.dev_class, info->dev_class, 3);
4200 data.clock_offset = info->clock_offset;
4201 data.rssi = info->rssi;
4202 data.ssp_mode = 0x00;
4203
4204 flags = hci_inquiry_cache_update(hdev, &data, false);
4205
4206 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4207 info->dev_class, info->rssi,
4208 flags, NULL, 0, NULL, 0);
4209 }
4210 } else {
4211 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4212
Olivier Deprez0e641232021-09-23 10:07:05 +02004213 if (skb->len < num_rsp * sizeof(*info) + 1)
4214 goto unlock;
4215
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004216 for (; num_rsp; num_rsp--, info++) {
4217 u32 flags;
4218
4219 bacpy(&data.bdaddr, &info->bdaddr);
4220 data.pscan_rep_mode = info->pscan_rep_mode;
4221 data.pscan_period_mode = info->pscan_period_mode;
4222 data.pscan_mode = 0x00;
4223 memcpy(data.dev_class, info->dev_class, 3);
4224 data.clock_offset = info->clock_offset;
4225 data.rssi = info->rssi;
4226 data.ssp_mode = 0x00;
4227
4228 flags = hci_inquiry_cache_update(hdev, &data, false);
4229
4230 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4231 info->dev_class, info->rssi,
4232 flags, NULL, 0, NULL, 0);
4233 }
4234 }
4235
Olivier Deprez0e641232021-09-23 10:07:05 +02004236unlock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004237 hci_dev_unlock(hdev);
4238}
4239
4240static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4241 struct sk_buff *skb)
4242{
4243 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4244 struct hci_conn *conn;
4245
4246 BT_DBG("%s", hdev->name);
4247
4248 hci_dev_lock(hdev);
4249
4250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4251 if (!conn)
4252 goto unlock;
4253
4254 if (ev->page < HCI_MAX_PAGES)
4255 memcpy(conn->features[ev->page], ev->features, 8);
4256
4257 if (!ev->status && ev->page == 0x01) {
4258 struct inquiry_entry *ie;
4259
4260 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4261 if (ie)
4262 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4263
4264 if (ev->features[0] & LMP_HOST_SSP) {
4265 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4266 } else {
4267 /* It is mandatory by the Bluetooth specification that
4268 * Extended Inquiry Results are only used when Secure
4269 * Simple Pairing is enabled, but some devices violate
4270 * this.
4271 *
4272 * To make these devices work, the internal SSP
4273 * enabled flag needs to be cleared if the remote host
4274 * features do not indicate SSP support */
4275 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4276 }
4277
4278 if (ev->features[0] & LMP_HOST_SC)
4279 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4280 }
4281
4282 if (conn->state != BT_CONFIG)
4283 goto unlock;
4284
4285 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4286 struct hci_cp_remote_name_req cp;
4287 memset(&cp, 0, sizeof(cp));
4288 bacpy(&cp.bdaddr, &conn->dst);
4289 cp.pscan_rep_mode = 0x02;
4290 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4291 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4292 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4293
4294 if (!hci_outgoing_auth_needed(hdev, conn)) {
4295 conn->state = BT_CONNECTED;
4296 hci_connect_cfm(conn, ev->status);
4297 hci_conn_drop(conn);
4298 }
4299
4300unlock:
4301 hci_dev_unlock(hdev);
4302}
4303
4304static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4305 struct sk_buff *skb)
4306{
4307 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4308 struct hci_conn *conn;
4309
4310 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4311
4312 hci_dev_lock(hdev);
4313
4314 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4315 if (!conn) {
4316 if (ev->link_type == ESCO_LINK)
4317 goto unlock;
4318
4319 /* When the link type in the event indicates SCO connection
4320 * and lookup of the connection object fails, then check
4321 * if an eSCO connection object exists.
4322 *
4323 * The core limits the synchronous connections to either
4324 * SCO or eSCO. The eSCO connection is preferred and tried
4325 * to be setup first and until successfully established,
4326 * the link type will be hinted as eSCO.
4327 */
4328 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4329 if (!conn)
4330 goto unlock;
4331 }
4332
4333 switch (ev->status) {
4334 case 0x00:
Olivier Deprez0e641232021-09-23 10:07:05 +02004335 /* The synchronous connection complete event should only be
4336 * sent once per new connection. Receiving a successful
4337 * complete event when the connection status is already
4338 * BT_CONNECTED means that the device is misbehaving and sent
4339 * multiple complete event packets for the same new connection.
4340 *
4341 * Registering the device more than once can corrupt kernel
4342 * memory, hence upon detecting this invalid event, we report
4343 * an error and ignore the packet.
4344 */
4345 if (conn->state == BT_CONNECTED) {
4346 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4347 goto unlock;
4348 }
4349
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004350 conn->handle = __le16_to_cpu(ev->handle);
4351 conn->state = BT_CONNECTED;
4352 conn->type = ev->link_type;
4353
4354 hci_debugfs_create_conn(conn);
4355 hci_conn_add_sysfs(conn);
4356 break;
4357
4358 case 0x10: /* Connection Accept Timeout */
4359 case 0x0d: /* Connection Rejected due to Limited Resources */
4360 case 0x11: /* Unsupported Feature or Parameter Value */
4361 case 0x1c: /* SCO interval rejected */
4362 case 0x1a: /* Unsupported Remote Feature */
Olivier Deprez0e641232021-09-23 10:07:05 +02004363 case 0x1e: /* Invalid LMP Parameters */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004364 case 0x1f: /* Unspecified error */
4365 case 0x20: /* Unsupported LMP Parameter value */
4366 if (conn->out) {
4367 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4368 (hdev->esco_type & EDR_ESCO_MASK);
4369 if (hci_setup_sync(conn, conn->link->handle))
4370 goto unlock;
4371 }
Olivier Deprez157378f2022-04-04 15:47:50 +02004372 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004373
4374 default:
4375 conn->state = BT_CLOSED;
4376 break;
4377 }
4378
Olivier Deprez157378f2022-04-04 15:47:50 +02004379 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4380
4381 switch (ev->air_mode) {
4382 case 0x02:
4383 if (hdev->notify)
4384 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4385 break;
4386 case 0x03:
4387 if (hdev->notify)
4388 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4389 break;
4390 }
4391
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004392 hci_connect_cfm(conn, ev->status);
4393 if (ev->status)
4394 hci_conn_del(conn);
4395
4396unlock:
4397 hci_dev_unlock(hdev);
4398}
4399
4400static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4401{
4402 size_t parsed = 0;
4403
4404 while (parsed < eir_len) {
4405 u8 field_len = eir[0];
4406
4407 if (field_len == 0)
4408 return parsed;
4409
4410 parsed += field_len + 1;
4411 eir += field_len + 1;
4412 }
4413
4414 return eir_len;
4415}
4416
4417static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4418 struct sk_buff *skb)
4419{
4420 struct inquiry_data data;
4421 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4422 int num_rsp = *((__u8 *) skb->data);
4423 size_t eir_len;
4424
4425 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4426
Olivier Deprez0e641232021-09-23 10:07:05 +02004427 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004428 return;
4429
4430 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4431 return;
4432
4433 hci_dev_lock(hdev);
4434
4435 for (; num_rsp; num_rsp--, info++) {
4436 u32 flags;
4437 bool name_known;
4438
4439 bacpy(&data.bdaddr, &info->bdaddr);
4440 data.pscan_rep_mode = info->pscan_rep_mode;
4441 data.pscan_period_mode = info->pscan_period_mode;
4442 data.pscan_mode = 0x00;
4443 memcpy(data.dev_class, info->dev_class, 3);
4444 data.clock_offset = info->clock_offset;
4445 data.rssi = info->rssi;
4446 data.ssp_mode = 0x01;
4447
4448 if (hci_dev_test_flag(hdev, HCI_MGMT))
4449 name_known = eir_get_data(info->data,
4450 sizeof(info->data),
4451 EIR_NAME_COMPLETE, NULL);
4452 else
4453 name_known = true;
4454
4455 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4456
4457 eir_len = eir_get_length(info->data, sizeof(info->data));
4458
4459 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4460 info->dev_class, info->rssi,
4461 flags, info->data, eir_len, NULL, 0);
4462 }
4463
4464 hci_dev_unlock(hdev);
4465}
4466
4467static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4468 struct sk_buff *skb)
4469{
4470 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4471 struct hci_conn *conn;
4472
4473 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4474 __le16_to_cpu(ev->handle));
4475
4476 hci_dev_lock(hdev);
4477
4478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4479 if (!conn)
4480 goto unlock;
4481
4482 /* For BR/EDR the necessary steps are taken through the
4483 * auth_complete event.
4484 */
4485 if (conn->type != LE_LINK)
4486 goto unlock;
4487
4488 if (!ev->status)
4489 conn->sec_level = conn->pending_sec_level;
4490
4491 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4492
4493 if (ev->status && conn->state == BT_CONNECTED) {
4494 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4495 hci_conn_drop(conn);
4496 goto unlock;
4497 }
4498
4499 if (conn->state == BT_CONFIG) {
4500 if (!ev->status)
4501 conn->state = BT_CONNECTED;
4502
4503 hci_connect_cfm(conn, ev->status);
4504 hci_conn_drop(conn);
4505 } else {
4506 hci_auth_cfm(conn, ev->status);
4507
4508 hci_conn_hold(conn);
4509 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4510 hci_conn_drop(conn);
4511 }
4512
4513unlock:
4514 hci_dev_unlock(hdev);
4515}
4516
4517static u8 hci_get_auth_req(struct hci_conn *conn)
4518{
4519 /* If remote requests no-bonding follow that lead */
4520 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4521 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4522 return conn->remote_auth | (conn->auth_type & 0x01);
4523
4524 /* If both remote and local have enough IO capabilities, require
4525 * MITM protection
4526 */
4527 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4528 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4529 return conn->remote_auth | 0x01;
4530
4531 /* No MITM protection possible so ignore remote requirement */
4532 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4533}
4534
4535static u8 bredr_oob_data_present(struct hci_conn *conn)
4536{
4537 struct hci_dev *hdev = conn->hdev;
4538 struct oob_data *data;
4539
4540 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4541 if (!data)
4542 return 0x00;
4543
4544 if (bredr_sc_enabled(hdev)) {
4545 /* When Secure Connections is enabled, then just
4546 * return the present value stored with the OOB
4547 * data. The stored value contains the right present
4548 * information. However it can only be trusted when
4549 * not in Secure Connection Only mode.
4550 */
4551 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4552 return data->present;
4553
4554 /* When Secure Connections Only mode is enabled, then
4555 * the P-256 values are required. If they are not
4556 * available, then do not declare that OOB data is
4557 * present.
4558 */
4559 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4560 !memcmp(data->hash256, ZERO_KEY, 16))
4561 return 0x00;
4562
4563 return 0x02;
4564 }
4565
4566 /* When Secure Connections is not enabled or actually
4567 * not supported by the hardware, then check that if
4568 * P-192 data values are present.
4569 */
4570 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4571 !memcmp(data->hash192, ZERO_KEY, 16))
4572 return 0x00;
4573
4574 return 0x01;
4575}
4576
4577static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4578{
4579 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4580 struct hci_conn *conn;
4581
4582 BT_DBG("%s", hdev->name);
4583
4584 hci_dev_lock(hdev);
4585
4586 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4587 if (!conn)
4588 goto unlock;
4589
4590 hci_conn_hold(conn);
4591
4592 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4593 goto unlock;
4594
4595 /* Allow pairing if we're pairable, the initiators of the
4596 * pairing or if the remote is not requesting bonding.
4597 */
4598 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4599 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4600 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4601 struct hci_cp_io_capability_reply cp;
4602
4603 bacpy(&cp.bdaddr, &ev->bdaddr);
4604 /* Change the IO capability from KeyboardDisplay
4605 * to DisplayYesNo as it is not supported by BT spec. */
4606 cp.capability = (conn->io_capability == 0x04) ?
4607 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4608
4609 /* If we are initiators, there is no remote information yet */
4610 if (conn->remote_auth == 0xff) {
4611 /* Request MITM protection if our IO caps allow it
4612 * except for the no-bonding case.
4613 */
4614 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4615 conn->auth_type != HCI_AT_NO_BONDING)
4616 conn->auth_type |= 0x01;
4617 } else {
4618 conn->auth_type = hci_get_auth_req(conn);
4619 }
4620
4621 /* If we're not bondable, force one of the non-bondable
4622 * authentication requirement values.
4623 */
4624 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4625 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4626
4627 cp.authentication = conn->auth_type;
4628 cp.oob_data = bredr_oob_data_present(conn);
4629
4630 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4631 sizeof(cp), &cp);
4632 } else {
4633 struct hci_cp_io_capability_neg_reply cp;
4634
4635 bacpy(&cp.bdaddr, &ev->bdaddr);
4636 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4637
4638 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4639 sizeof(cp), &cp);
4640 }
4641
4642unlock:
4643 hci_dev_unlock(hdev);
4644}
4645
4646static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4647{
4648 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4649 struct hci_conn *conn;
4650
4651 BT_DBG("%s", hdev->name);
4652
4653 hci_dev_lock(hdev);
4654
4655 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4656 if (!conn)
4657 goto unlock;
4658
4659 conn->remote_cap = ev->capability;
4660 conn->remote_auth = ev->authentication;
4661
4662unlock:
4663 hci_dev_unlock(hdev);
4664}
4665
4666static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4667 struct sk_buff *skb)
4668{
4669 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4670 int loc_mitm, rem_mitm, confirm_hint = 0;
4671 struct hci_conn *conn;
4672
4673 BT_DBG("%s", hdev->name);
4674
4675 hci_dev_lock(hdev);
4676
4677 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4678 goto unlock;
4679
4680 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4681 if (!conn)
4682 goto unlock;
4683
4684 loc_mitm = (conn->auth_type & 0x01);
4685 rem_mitm = (conn->remote_auth & 0x01);
4686
4687 /* If we require MITM but the remote device can't provide that
4688 * (it has NoInputNoOutput) then reject the confirmation
4689 * request. We check the security level here since it doesn't
4690 * necessarily match conn->auth_type.
4691 */
4692 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4693 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4694 BT_DBG("Rejecting request: remote device can't provide MITM");
4695 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4696 sizeof(ev->bdaddr), &ev->bdaddr);
4697 goto unlock;
4698 }
4699
4700 /* If no side requires MITM protection; auto-accept */
4701 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4702 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4703
4704 /* If we're not the initiators request authorization to
4705 * proceed from user space (mgmt_user_confirm with
4706 * confirm_hint set to 1). The exception is if neither
4707 * side had MITM or if the local IO capability is
4708 * NoInputNoOutput, in which case we do auto-accept
4709 */
4710 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4711 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4712 (loc_mitm || rem_mitm)) {
4713 BT_DBG("Confirming auto-accept as acceptor");
4714 confirm_hint = 1;
4715 goto confirm;
4716 }
4717
Olivier Deprez157378f2022-04-04 15:47:50 +02004718 /* If there already exists link key in local host, leave the
4719 * decision to user space since the remote device could be
4720 * legitimate or malicious.
4721 */
4722 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4723 bt_dev_dbg(hdev, "Local host already has link key");
4724 confirm_hint = 1;
4725 goto confirm;
4726 }
4727
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004728 BT_DBG("Auto-accept of user confirmation with %ums delay",
4729 hdev->auto_accept_delay);
4730
4731 if (hdev->auto_accept_delay > 0) {
4732 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4733 queue_delayed_work(conn->hdev->workqueue,
4734 &conn->auto_accept_work, delay);
4735 goto unlock;
4736 }
4737
4738 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4739 sizeof(ev->bdaddr), &ev->bdaddr);
4740 goto unlock;
4741 }
4742
4743confirm:
4744 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4745 le32_to_cpu(ev->passkey), confirm_hint);
4746
4747unlock:
4748 hci_dev_unlock(hdev);
4749}
4750
4751static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4752 struct sk_buff *skb)
4753{
4754 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4755
4756 BT_DBG("%s", hdev->name);
4757
4758 if (hci_dev_test_flag(hdev, HCI_MGMT))
4759 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4760}
4761
4762static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4763 struct sk_buff *skb)
4764{
4765 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4766 struct hci_conn *conn;
4767
4768 BT_DBG("%s", hdev->name);
4769
4770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4771 if (!conn)
4772 return;
4773
4774 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4775 conn->passkey_entered = 0;
4776
4777 if (hci_dev_test_flag(hdev, HCI_MGMT))
4778 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4779 conn->dst_type, conn->passkey_notify,
4780 conn->passkey_entered);
4781}
4782
4783static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4784{
4785 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4786 struct hci_conn *conn;
4787
4788 BT_DBG("%s", hdev->name);
4789
4790 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4791 if (!conn)
4792 return;
4793
4794 switch (ev->type) {
4795 case HCI_KEYPRESS_STARTED:
4796 conn->passkey_entered = 0;
4797 return;
4798
4799 case HCI_KEYPRESS_ENTERED:
4800 conn->passkey_entered++;
4801 break;
4802
4803 case HCI_KEYPRESS_ERASED:
4804 conn->passkey_entered--;
4805 break;
4806
4807 case HCI_KEYPRESS_CLEARED:
4808 conn->passkey_entered = 0;
4809 break;
4810
4811 case HCI_KEYPRESS_COMPLETED:
4812 return;
4813 }
4814
4815 if (hci_dev_test_flag(hdev, HCI_MGMT))
4816 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4817 conn->dst_type, conn->passkey_notify,
4818 conn->passkey_entered);
4819}
4820
4821static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4822 struct sk_buff *skb)
4823{
4824 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4825 struct hci_conn *conn;
4826
4827 BT_DBG("%s", hdev->name);
4828
4829 hci_dev_lock(hdev);
4830
4831 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4832 if (!conn)
4833 goto unlock;
4834
4835 /* Reset the authentication requirement to unknown */
4836 conn->remote_auth = 0xff;
4837
4838 /* To avoid duplicate auth_failed events to user space we check
4839 * the HCI_CONN_AUTH_PEND flag which will be set if we
4840 * initiated the authentication. A traditional auth_complete
4841 * event gets always produced as initiator and is also mapped to
4842 * the mgmt_auth_failed event */
4843 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4844 mgmt_auth_failed(conn, ev->status);
4845
4846 hci_conn_drop(conn);
4847
4848unlock:
4849 hci_dev_unlock(hdev);
4850}
4851
4852static void hci_remote_host_features_evt(struct hci_dev *hdev,
4853 struct sk_buff *skb)
4854{
4855 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4856 struct inquiry_entry *ie;
4857 struct hci_conn *conn;
4858
4859 BT_DBG("%s", hdev->name);
4860
4861 hci_dev_lock(hdev);
4862
4863 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4864 if (conn)
4865 memcpy(conn->features[1], ev->features, 8);
4866
4867 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4868 if (ie)
4869 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4870
4871 hci_dev_unlock(hdev);
4872}
4873
4874static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4875 struct sk_buff *skb)
4876{
4877 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4878 struct oob_data *data;
4879
4880 BT_DBG("%s", hdev->name);
4881
4882 hci_dev_lock(hdev);
4883
4884 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4885 goto unlock;
4886
4887 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4888 if (!data) {
4889 struct hci_cp_remote_oob_data_neg_reply cp;
4890
4891 bacpy(&cp.bdaddr, &ev->bdaddr);
4892 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4893 sizeof(cp), &cp);
4894 goto unlock;
4895 }
4896
4897 if (bredr_sc_enabled(hdev)) {
4898 struct hci_cp_remote_oob_ext_data_reply cp;
4899
4900 bacpy(&cp.bdaddr, &ev->bdaddr);
4901 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4902 memset(cp.hash192, 0, sizeof(cp.hash192));
4903 memset(cp.rand192, 0, sizeof(cp.rand192));
4904 } else {
4905 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4906 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4907 }
4908 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4909 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4910
4911 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4912 sizeof(cp), &cp);
4913 } else {
4914 struct hci_cp_remote_oob_data_reply cp;
4915
4916 bacpy(&cp.bdaddr, &ev->bdaddr);
4917 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4918 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4919
4920 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4921 sizeof(cp), &cp);
4922 }
4923
4924unlock:
4925 hci_dev_unlock(hdev);
4926}
4927
4928#if IS_ENABLED(CONFIG_BT_HS)
4929static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4930{
4931 struct hci_ev_channel_selected *ev = (void *)skb->data;
4932 struct hci_conn *hcon;
4933
4934 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4935
4936 skb_pull(skb, sizeof(*ev));
4937
4938 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4939 if (!hcon)
4940 return;
4941
4942 amp_read_loc_assoc_final_data(hdev, hcon);
4943}
4944
4945static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4946 struct sk_buff *skb)
4947{
4948 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4949 struct hci_conn *hcon, *bredr_hcon;
4950
4951 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4952 ev->status);
4953
4954 hci_dev_lock(hdev);
4955
4956 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4957 if (!hcon) {
4958 hci_dev_unlock(hdev);
4959 return;
4960 }
4961
Olivier Deprez0e641232021-09-23 10:07:05 +02004962 if (!hcon->amp_mgr) {
4963 hci_dev_unlock(hdev);
4964 return;
4965 }
4966
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004967 if (ev->status) {
4968 hci_conn_del(hcon);
4969 hci_dev_unlock(hdev);
4970 return;
4971 }
4972
4973 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4974
4975 hcon->state = BT_CONNECTED;
4976 bacpy(&hcon->dst, &bredr_hcon->dst);
4977
4978 hci_conn_hold(hcon);
4979 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4980 hci_conn_drop(hcon);
4981
4982 hci_debugfs_create_conn(hcon);
4983 hci_conn_add_sysfs(hcon);
4984
4985 amp_physical_cfm(bredr_hcon, hcon);
4986
4987 hci_dev_unlock(hdev);
4988}
4989
4990static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4991{
4992 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4993 struct hci_conn *hcon;
4994 struct hci_chan *hchan;
4995 struct amp_mgr *mgr;
4996
4997 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4998 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4999 ev->status);
5000
5001 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5002 if (!hcon)
5003 return;
5004
5005 /* Create AMP hchan */
5006 hchan = hci_chan_create(hcon);
5007 if (!hchan)
5008 return;
5009
5010 hchan->handle = le16_to_cpu(ev->handle);
Olivier Deprez0e641232021-09-23 10:07:05 +02005011 hchan->amp = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005012
5013 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5014
5015 mgr = hcon->amp_mgr;
5016 if (mgr && mgr->bredr_chan) {
5017 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5018
5019 l2cap_chan_lock(bredr_chan);
5020
5021 bredr_chan->conn->mtu = hdev->block_mtu;
5022 l2cap_logical_cfm(bredr_chan, hchan, 0);
5023 hci_conn_hold(hcon);
5024
5025 l2cap_chan_unlock(bredr_chan);
5026 }
5027}
5028
5029static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5030 struct sk_buff *skb)
5031{
5032 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5033 struct hci_chan *hchan;
5034
5035 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5036 le16_to_cpu(ev->handle), ev->status);
5037
5038 if (ev->status)
5039 return;
5040
5041 hci_dev_lock(hdev);
5042
5043 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
Olivier Deprez0e641232021-09-23 10:07:05 +02005044 if (!hchan || !hchan->amp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005045 goto unlock;
5046
5047 amp_destroy_logical_link(hchan, ev->reason);
5048
5049unlock:
5050 hci_dev_unlock(hdev);
5051}
5052
5053static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5054 struct sk_buff *skb)
5055{
5056 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5057 struct hci_conn *hcon;
5058
5059 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5060
5061 if (ev->status)
5062 return;
5063
5064 hci_dev_lock(hdev);
5065
5066 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
Olivier Deprez92d4c212022-12-06 15:05:30 +01005067 if (hcon && hcon->type == AMP_LINK) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005068 hcon->state = BT_CLOSED;
Olivier Deprez92d4c212022-12-06 15:05:30 +01005069 hci_disconn_cfm(hcon, ev->reason);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005070 hci_conn_del(hcon);
5071 }
5072
5073 hci_dev_unlock(hdev);
5074}
5075#endif
5076
Olivier Deprez0e641232021-09-23 10:07:05 +02005077static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5078 u8 bdaddr_type, bdaddr_t *local_rpa)
5079{
5080 if (conn->out) {
5081 conn->dst_type = bdaddr_type;
5082 conn->resp_addr_type = bdaddr_type;
5083 bacpy(&conn->resp_addr, bdaddr);
5084
5085 /* Check if the controller has set a Local RPA then it must be
5086 * used instead or hdev->rpa.
5087 */
5088 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5089 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5090 bacpy(&conn->init_addr, local_rpa);
5091 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5092 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5093 bacpy(&conn->init_addr, &conn->hdev->rpa);
5094 } else {
5095 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5096 &conn->init_addr_type);
5097 }
5098 } else {
5099 conn->resp_addr_type = conn->hdev->adv_addr_type;
5100 /* Check if the controller has set a Local RPA then it must be
5101 * used instead or hdev->rpa.
5102 */
5103 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5104 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5105 bacpy(&conn->resp_addr, local_rpa);
5106 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5107 /* In case of ext adv, resp_addr will be updated in
5108 * Adv Terminated event.
5109 */
5110 if (!ext_adv_capable(conn->hdev))
5111 bacpy(&conn->resp_addr,
5112 &conn->hdev->random_addr);
5113 } else {
5114 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5115 }
5116
5117 conn->init_addr_type = bdaddr_type;
5118 bacpy(&conn->init_addr, bdaddr);
5119
5120 /* For incoming connections, set the default minimum
5121 * and maximum connection interval. They will be used
5122 * to check if the parameters are in range and if not
5123 * trigger the connection update procedure.
5124 */
5125 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5126 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5127 }
5128}
5129
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005130static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
Olivier Deprez0e641232021-09-23 10:07:05 +02005131 bdaddr_t *bdaddr, u8 bdaddr_type,
5132 bdaddr_t *local_rpa, u8 role, u16 handle,
5133 u16 interval, u16 latency,
5134 u16 supervision_timeout)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005135{
5136 struct hci_conn_params *params;
5137 struct hci_conn *conn;
5138 struct smp_irk *irk;
5139 u8 addr_type;
5140
5141 hci_dev_lock(hdev);
5142
5143 /* All controllers implicitly stop advertising in the event of a
5144 * connection, so ensure that the state bit is cleared.
5145 */
5146 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5147
5148 conn = hci_lookup_le_connect(hdev);
5149 if (!conn) {
5150 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5151 if (!conn) {
5152 bt_dev_err(hdev, "no memory for new connection");
5153 goto unlock;
5154 }
5155
5156 conn->dst_type = bdaddr_type;
5157
5158 /* If we didn't have a hci_conn object previously
Olivier Deprez92d4c212022-12-06 15:05:30 +01005159 * but we're in central role this must be something
5160 * initiated using an accept list. Since accept list based
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005161 * connections are not "first class citizens" we don't
5162 * have full tracking of them. Therefore, we go ahead
5163 * with a "best effort" approach of determining the
5164 * initiator address based on the HCI_PRIVACY flag.
5165 */
5166 if (conn->out) {
5167 conn->resp_addr_type = bdaddr_type;
5168 bacpy(&conn->resp_addr, bdaddr);
5169 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5170 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5171 bacpy(&conn->init_addr, &hdev->rpa);
5172 } else {
5173 hci_copy_identity_address(hdev,
5174 &conn->init_addr,
5175 &conn->init_addr_type);
5176 }
5177 }
5178 } else {
5179 cancel_delayed_work(&conn->le_conn_timeout);
5180 }
5181
Olivier Deprez0e641232021-09-23 10:07:05 +02005182 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005183
5184 /* Lookup the identity address from the stored connection
5185 * address and address type.
5186 *
5187 * When establishing connections to an identity address, the
5188 * connection procedure will store the resolvable random
5189 * address first. Now if it can be converted back into the
5190 * identity address, start using the identity address from
5191 * now on.
5192 */
5193 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5194 if (irk) {
5195 bacpy(&conn->dst, &irk->bdaddr);
5196 conn->dst_type = irk->addr_type;
5197 }
5198
5199 if (status) {
5200 hci_le_conn_failed(conn, status);
5201 goto unlock;
5202 }
5203
5204 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5205 addr_type = BDADDR_LE_PUBLIC;
5206 else
5207 addr_type = BDADDR_LE_RANDOM;
5208
5209 /* Drop the connection if the device is blocked */
Olivier Deprez92d4c212022-12-06 15:05:30 +01005210 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005211 hci_conn_drop(conn);
5212 goto unlock;
5213 }
5214
5215 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5216 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5217
5218 conn->sec_level = BT_SECURITY_LOW;
5219 conn->handle = handle;
5220 conn->state = BT_CONFIG;
5221
5222 conn->le_conn_interval = interval;
5223 conn->le_conn_latency = latency;
5224 conn->le_supv_timeout = supervision_timeout;
5225
5226 hci_debugfs_create_conn(conn);
5227 hci_conn_add_sysfs(conn);
5228
David Brazdil0f672f62019-12-10 10:32:29 +00005229 /* The remote features procedure is defined for master
5230 * role only. So only in case of an initiated connection
5231 * request the remote features.
5232 *
5233 * If the local controller supports slave-initiated features
5234 * exchange, then requesting the remote features in slave
5235 * role is possible. Otherwise just transition into the
5236 * connected state without requesting the remote features.
5237 */
5238 if (conn->out ||
5239 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5240 struct hci_cp_le_read_remote_features cp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005241
David Brazdil0f672f62019-12-10 10:32:29 +00005242 cp.handle = __cpu_to_le16(conn->handle);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005243
David Brazdil0f672f62019-12-10 10:32:29 +00005244 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5245 sizeof(cp), &cp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005246
David Brazdil0f672f62019-12-10 10:32:29 +00005247 hci_conn_hold(conn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005248 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00005249 conn->state = BT_CONNECTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005250 hci_connect_cfm(conn, status);
5251 }
5252
5253 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5254 conn->dst_type);
5255 if (params) {
5256 list_del_init(&params->action);
5257 if (params->conn) {
5258 hci_conn_drop(params->conn);
5259 hci_conn_put(params->conn);
5260 params->conn = NULL;
5261 }
5262 }
5263
5264unlock:
5265 hci_update_background_scan(hdev);
5266 hci_dev_unlock(hdev);
5267}
5268
5269static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5270{
5271 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5272
5273 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5274
5275 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
Olivier Deprez0e641232021-09-23 10:07:05 +02005276 NULL, ev->role, le16_to_cpu(ev->handle),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005277 le16_to_cpu(ev->interval),
5278 le16_to_cpu(ev->latency),
5279 le16_to_cpu(ev->supervision_timeout));
5280}
5281
5282static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5283 struct sk_buff *skb)
5284{
5285 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5286
5287 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5288
5289 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
Olivier Deprez0e641232021-09-23 10:07:05 +02005290 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005291 le16_to_cpu(ev->interval),
5292 le16_to_cpu(ev->latency),
5293 le16_to_cpu(ev->supervision_timeout));
Olivier Deprez157378f2022-04-04 15:47:50 +02005294
5295 if (use_ll_privacy(hdev) &&
5296 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5297 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5298 hci_req_disable_address_resolution(hdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005299}
5300
5301static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5302{
5303 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5304 struct hci_conn *conn;
5305
5306 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5307
Olivier Deprez0e641232021-09-23 10:07:05 +02005308 if (ev->status) {
5309 struct adv_info *adv;
5310
5311 adv = hci_find_adv_instance(hdev, ev->handle);
5312 if (!adv)
5313 return;
5314
5315 /* Remove advertising as it has been terminated */
5316 hci_remove_adv_instance(hdev, ev->handle);
5317 mgmt_advertising_removed(NULL, hdev, ev->handle);
5318
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005319 return;
Olivier Deprez0e641232021-09-23 10:07:05 +02005320 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005321
5322 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5323 if (conn) {
5324 struct adv_info *adv_instance;
5325
Olivier Deprez0e641232021-09-23 10:07:05 +02005326 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5327 bacmp(&conn->resp_addr, BDADDR_ANY))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005328 return;
5329
5330 if (!hdev->cur_adv_instance) {
5331 bacpy(&conn->resp_addr, &hdev->random_addr);
5332 return;
5333 }
5334
5335 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5336 if (adv_instance)
5337 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5338 }
5339}
5340
5341static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5342 struct sk_buff *skb)
5343{
5344 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5345 struct hci_conn *conn;
5346
5347 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5348
5349 if (ev->status)
5350 return;
5351
5352 hci_dev_lock(hdev);
5353
5354 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5355 if (conn) {
5356 conn->le_conn_interval = le16_to_cpu(ev->interval);
5357 conn->le_conn_latency = le16_to_cpu(ev->latency);
5358 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5359 }
5360
5361 hci_dev_unlock(hdev);
5362}
5363
5364/* This function requires the caller holds hdev->lock */
5365static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5366 bdaddr_t *addr,
5367 u8 addr_type, u8 adv_type,
5368 bdaddr_t *direct_rpa)
5369{
5370 struct hci_conn *conn;
5371 struct hci_conn_params *params;
5372
5373 /* If the event is not connectable don't proceed further */
5374 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5375 return NULL;
5376
5377 /* Ignore if the device is blocked */
Olivier Deprez92d4c212022-12-06 15:05:30 +01005378 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005379 return NULL;
5380
5381 /* Most controller will fail if we try to create new connections
5382 * while we have an existing one in slave role.
5383 */
Olivier Deprez157378f2022-04-04 15:47:50 +02005384 if (hdev->conn_hash.le_num_slave > 0 &&
5385 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5386 !(hdev->le_states[3] & 0x10)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005387 return NULL;
5388
5389 /* If we're not connectable only connect devices that we have in
5390 * our pend_le_conns list.
5391 */
5392 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5393 addr_type);
5394 if (!params)
5395 return NULL;
5396
5397 if (!params->explicit_connect) {
5398 switch (params->auto_connect) {
5399 case HCI_AUTO_CONN_DIRECT:
5400 /* Only devices advertising with ADV_DIRECT_IND are
5401 * triggering a connection attempt. This is allowing
5402 * incoming connections from slave devices.
5403 */
5404 if (adv_type != LE_ADV_DIRECT_IND)
5405 return NULL;
5406 break;
5407 case HCI_AUTO_CONN_ALWAYS:
5408 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5409 * are triggering a connection attempt. This means
Olivier Deprez157378f2022-04-04 15:47:50 +02005410 * that incoming connections from slave device are
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005411 * accepted and also outgoing connections to slave
5412 * devices are established when found.
5413 */
5414 break;
5415 default:
5416 return NULL;
5417 }
5418 }
5419
5420 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
Olivier Deprez157378f2022-04-04 15:47:50 +02005421 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005422 direct_rpa);
5423 if (!IS_ERR(conn)) {
5424 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5425 * by higher layer that tried to connect, if no then
5426 * store the pointer since we don't really have any
5427 * other owner of the object besides the params that
5428 * triggered it. This way we can abort the connection if
5429 * the parameters get removed and keep the reference
5430 * count consistent once the connection is established.
5431 */
5432
5433 if (!params->explicit_connect)
5434 params->conn = hci_conn_get(conn);
5435
5436 return conn;
5437 }
5438
5439 switch (PTR_ERR(conn)) {
5440 case -EBUSY:
5441 /* If hci_connect() returns -EBUSY it means there is already
5442 * an LE connection attempt going on. Since controllers don't
5443 * support more than one connection attempt at the time, we
5444 * don't consider this an error case.
5445 */
5446 break;
5447 default:
5448 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5449 return NULL;
5450 }
5451
5452 return NULL;
5453}
5454
5455static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5456 u8 bdaddr_type, bdaddr_t *direct_addr,
Olivier Deprez0e641232021-09-23 10:07:05 +02005457 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5458 bool ext_adv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005459{
5460 struct discovery_state *d = &hdev->discovery;
5461 struct smp_irk *irk;
5462 struct hci_conn *conn;
5463 bool match;
5464 u32 flags;
Olivier Deprez157378f2022-04-04 15:47:50 +02005465 u8 *ptr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005466
5467 switch (type) {
5468 case LE_ADV_IND:
5469 case LE_ADV_DIRECT_IND:
5470 case LE_ADV_SCAN_IND:
5471 case LE_ADV_NONCONN_IND:
5472 case LE_ADV_SCAN_RSP:
5473 break;
5474 default:
5475 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5476 "type: 0x%02x", type);
5477 return;
5478 }
5479
Olivier Deprez0e641232021-09-23 10:07:05 +02005480 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5481 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5482 return;
5483 }
5484
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005485 /* Find the end of the data in case the report contains padded zero
5486 * bytes at the end causing an invalid length value.
5487 *
5488 * When data is NULL, len is 0 so there is no need for extra ptr
5489 * check as 'ptr < data + 0' is already false in such case.
5490 */
5491 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5492 if (ptr + 1 + *ptr > data + len)
5493 break;
5494 }
5495
Olivier Deprez157378f2022-04-04 15:47:50 +02005496 /* Adjust for actual length. This handles the case when remote
5497 * device is advertising with incorrect data length.
5498 */
5499 len = ptr - data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005500
5501 /* If the direct address is present, then this report is from
5502 * a LE Direct Advertising Report event. In that case it is
5503 * important to see if the address is matching the local
5504 * controller address.
5505 */
5506 if (direct_addr) {
5507 /* Only resolvable random addresses are valid for these
5508 * kind of reports and others can be ignored.
5509 */
5510 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5511 return;
5512
5513 /* If the controller is not using resolvable random
5514 * addresses, then this report can be ignored.
5515 */
5516 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5517 return;
5518
5519 /* If the local IRK of the controller does not match
5520 * with the resolvable random address provided, then
5521 * this report can be ignored.
5522 */
5523 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5524 return;
5525 }
5526
5527 /* Check if we need to convert to identity address */
5528 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5529 if (irk) {
5530 bdaddr = &irk->bdaddr;
5531 bdaddr_type = irk->addr_type;
5532 }
5533
5534 /* Check if we have been requested to connect to this device.
5535 *
5536 * direct_addr is set only for directed advertising reports (it is NULL
5537 * for advertising reports) and is already verified to be RPA above.
5538 */
5539 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5540 direct_addr);
Olivier Deprez0e641232021-09-23 10:07:05 +02005541 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005542 /* Store report for later inclusion by
5543 * mgmt_device_connected
5544 */
5545 memcpy(conn->le_adv_data, data, len);
5546 conn->le_adv_data_len = len;
5547 }
5548
5549 /* Passive scanning shouldn't trigger any device found events,
5550 * except for devices marked as CONN_REPORT for which we do send
Olivier Deprez157378f2022-04-04 15:47:50 +02005551 * device found events, or advertisement monitoring requested.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005552 */
5553 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5554 if (type == LE_ADV_DIRECT_IND)
5555 return;
5556
5557 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
Olivier Deprez157378f2022-04-04 15:47:50 +02005558 bdaddr, bdaddr_type) &&
5559 idr_is_empty(&hdev->adv_monitors_idr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005560 return;
5561
5562 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5563 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5564 else
5565 flags = 0;
5566 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5567 rssi, flags, data, len, NULL, 0);
5568 return;
5569 }
5570
5571 /* When receiving non-connectable or scannable undirected
5572 * advertising reports, this means that the remote device is
5573 * not connectable and then clearly indicate this in the
5574 * device found event.
5575 *
5576 * When receiving a scan response, then there is no way to
5577 * know if the remote device is connectable or not. However
5578 * since scan responses are merged with a previously seen
5579 * advertising report, the flags field from that report
5580 * will be used.
5581 *
5582 * In the really unlikely case that a controller get confused
5583 * and just sends a scan response event, then it is marked as
5584 * not connectable as well.
5585 */
5586 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5587 type == LE_ADV_SCAN_RSP)
5588 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5589 else
5590 flags = 0;
5591
5592 /* If there's nothing pending either store the data from this
5593 * event or send an immediate device found event if the data
5594 * should not be stored for later.
5595 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005596 if (!ext_adv && !has_pending_adv_report(hdev)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005597 /* If the report will trigger a SCAN_REQ store it for
5598 * later merging.
5599 */
5600 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5601 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5602 rssi, flags, data, len);
5603 return;
5604 }
5605
5606 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5607 rssi, flags, data, len, NULL, 0);
5608 return;
5609 }
5610
5611 /* Check if the pending report is for the same device as the new one */
5612 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5613 bdaddr_type == d->last_adv_addr_type);
5614
5615 /* If the pending data doesn't match this report or this isn't a
5616 * scan response (e.g. we got a duplicate ADV_IND) then force
5617 * sending of the pending data.
5618 */
5619 if (type != LE_ADV_SCAN_RSP || !match) {
5620 /* Send out whatever is in the cache, but skip duplicates */
5621 if (!match)
5622 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5623 d->last_adv_addr_type, NULL,
5624 d->last_adv_rssi, d->last_adv_flags,
5625 d->last_adv_data,
5626 d->last_adv_data_len, NULL, 0);
5627
5628 /* If the new report will trigger a SCAN_REQ store it for
5629 * later merging.
5630 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005631 if (!ext_adv && (type == LE_ADV_IND ||
5632 type == LE_ADV_SCAN_IND)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005633 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5634 rssi, flags, data, len);
5635 return;
5636 }
5637
5638 /* The advertising reports cannot be merged, so clear
5639 * the pending report and send out a device found event.
5640 */
5641 clear_pending_adv_report(hdev);
5642 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5643 rssi, flags, data, len, NULL, 0);
5644 return;
5645 }
5646
5647 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5648 * the new event is a SCAN_RSP. We can therefore proceed with
5649 * sending a merged device found event.
5650 */
5651 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5652 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5653 d->last_adv_data, d->last_adv_data_len, data, len);
5654 clear_pending_adv_report(hdev);
5655}
5656
5657static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5658{
5659 u8 num_reports = skb->data[0];
5660 void *ptr = &skb->data[1];
5661
5662 hci_dev_lock(hdev);
5663
5664 while (num_reports--) {
5665 struct hci_ev_le_advertising_info *ev = ptr;
5666 s8 rssi;
5667
Olivier Deprez157378f2022-04-04 15:47:50 +02005668 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5669 bt_dev_err(hdev, "Malicious advertising data.");
5670 break;
5671 }
5672
5673 if (ev->length <= HCI_MAX_AD_LENGTH &&
5674 ev->data + ev->length <= skb_tail_pointer(skb)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005675 rssi = ev->data[ev->length];
5676 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5677 ev->bdaddr_type, NULL, 0, rssi,
Olivier Deprez0e641232021-09-23 10:07:05 +02005678 ev->data, ev->length, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005679 } else {
5680 bt_dev_err(hdev, "Dropping invalid advertising data");
5681 }
5682
5683 ptr += sizeof(*ev) + ev->length + 1;
5684 }
5685
5686 hci_dev_unlock(hdev);
5687}
5688
Olivier Deprez157378f2022-04-04 15:47:50 +02005689static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005690{
5691 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5692 switch (evt_type) {
5693 case LE_LEGACY_ADV_IND:
5694 return LE_ADV_IND;
5695 case LE_LEGACY_ADV_DIRECT_IND:
5696 return LE_ADV_DIRECT_IND;
5697 case LE_LEGACY_ADV_SCAN_IND:
5698 return LE_ADV_SCAN_IND;
5699 case LE_LEGACY_NONCONN_IND:
5700 return LE_ADV_NONCONN_IND;
5701 case LE_LEGACY_SCAN_RSP_ADV:
5702 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5703 return LE_ADV_SCAN_RSP;
5704 }
5705
Olivier Deprez157378f2022-04-04 15:47:50 +02005706 goto invalid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005707 }
5708
5709 if (evt_type & LE_EXT_ADV_CONN_IND) {
5710 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5711 return LE_ADV_DIRECT_IND;
5712
5713 return LE_ADV_IND;
5714 }
5715
5716 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5717 return LE_ADV_SCAN_RSP;
5718
5719 if (evt_type & LE_EXT_ADV_SCAN_IND)
5720 return LE_ADV_SCAN_IND;
5721
5722 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5723 evt_type & LE_EXT_ADV_DIRECT_IND)
5724 return LE_ADV_NONCONN_IND;
5725
Olivier Deprez157378f2022-04-04 15:47:50 +02005726invalid:
5727 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5728 evt_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005729
5730 return LE_ADV_INVALID;
5731}
5732
5733static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5734{
5735 u8 num_reports = skb->data[0];
5736 void *ptr = &skb->data[1];
5737
5738 hci_dev_lock(hdev);
5739
5740 while (num_reports--) {
5741 struct hci_ev_le_ext_adv_report *ev = ptr;
5742 u8 legacy_evt_type;
5743 u16 evt_type;
5744
5745 evt_type = __le16_to_cpu(ev->evt_type);
Olivier Deprez157378f2022-04-04 15:47:50 +02005746 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005747 if (legacy_evt_type != LE_ADV_INVALID) {
5748 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5749 ev->bdaddr_type, NULL, 0, ev->rssi,
Olivier Deprez0e641232021-09-23 10:07:05 +02005750 ev->data, ev->length,
5751 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005752 }
5753
David Brazdil0f672f62019-12-10 10:32:29 +00005754 ptr += sizeof(*ev) + ev->length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005755 }
5756
5757 hci_dev_unlock(hdev);
5758}
5759
5760static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5761 struct sk_buff *skb)
5762{
5763 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5764 struct hci_conn *conn;
5765
5766 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5767
5768 hci_dev_lock(hdev);
5769
5770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5771 if (conn) {
5772 if (!ev->status)
5773 memcpy(conn->features[0], ev->features, 8);
5774
5775 if (conn->state == BT_CONFIG) {
5776 __u8 status;
5777
5778 /* If the local controller supports slave-initiated
5779 * features exchange, but the remote controller does
5780 * not, then it is possible that the error code 0x1a
5781 * for unsupported remote feature gets returned.
5782 *
5783 * In this specific case, allow the connection to
5784 * transition into connected state and mark it as
5785 * successful.
5786 */
5787 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5788 !conn->out && ev->status == 0x1a)
5789 status = 0x00;
5790 else
5791 status = ev->status;
5792
5793 conn->state = BT_CONNECTED;
5794 hci_connect_cfm(conn, status);
5795 hci_conn_drop(conn);
5796 }
5797 }
5798
5799 hci_dev_unlock(hdev);
5800}
5801
5802static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5803{
5804 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5805 struct hci_cp_le_ltk_reply cp;
5806 struct hci_cp_le_ltk_neg_reply neg;
5807 struct hci_conn *conn;
5808 struct smp_ltk *ltk;
5809
5810 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5811
5812 hci_dev_lock(hdev);
5813
5814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5815 if (conn == NULL)
5816 goto not_found;
5817
5818 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5819 if (!ltk)
5820 goto not_found;
5821
5822 if (smp_ltk_is_sc(ltk)) {
5823 /* With SC both EDiv and Rand are set to zero */
5824 if (ev->ediv || ev->rand)
5825 goto not_found;
5826 } else {
5827 /* For non-SC keys check that EDiv and Rand match */
5828 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5829 goto not_found;
5830 }
5831
5832 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5833 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5834 cp.handle = cpu_to_le16(conn->handle);
5835
5836 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5837
5838 conn->enc_key_size = ltk->enc_size;
5839
5840 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5841
5842 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5843 * temporary key used to encrypt a connection following
5844 * pairing. It is used during the Encrypted Session Setup to
5845 * distribute the keys. Later, security can be re-established
5846 * using a distributed LTK.
5847 */
5848 if (ltk->type == SMP_STK) {
5849 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5850 list_del_rcu(&ltk->list);
5851 kfree_rcu(ltk, rcu);
5852 } else {
5853 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5854 }
5855
5856 hci_dev_unlock(hdev);
5857
5858 return;
5859
5860not_found:
5861 neg.handle = ev->handle;
5862 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5863 hci_dev_unlock(hdev);
5864}
5865
5866static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5867 u8 reason)
5868{
5869 struct hci_cp_le_conn_param_req_neg_reply cp;
5870
5871 cp.handle = cpu_to_le16(handle);
5872 cp.reason = reason;
5873
5874 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5875 &cp);
5876}
5877
5878static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5879 struct sk_buff *skb)
5880{
5881 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5882 struct hci_cp_le_conn_param_req_reply cp;
5883 struct hci_conn *hcon;
5884 u16 handle, min, max, latency, timeout;
5885
5886 handle = le16_to_cpu(ev->handle);
5887 min = le16_to_cpu(ev->interval_min);
5888 max = le16_to_cpu(ev->interval_max);
5889 latency = le16_to_cpu(ev->latency);
5890 timeout = le16_to_cpu(ev->timeout);
5891
5892 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5893 if (!hcon || hcon->state != BT_CONNECTED)
5894 return send_conn_param_neg_reply(hdev, handle,
5895 HCI_ERROR_UNKNOWN_CONN_ID);
5896
5897 if (hci_check_conn_params(min, max, latency, timeout))
5898 return send_conn_param_neg_reply(hdev, handle,
5899 HCI_ERROR_INVALID_LL_PARAMS);
5900
5901 if (hcon->role == HCI_ROLE_MASTER) {
5902 struct hci_conn_params *params;
5903 u8 store_hint;
5904
5905 hci_dev_lock(hdev);
5906
5907 params = hci_conn_params_lookup(hdev, &hcon->dst,
5908 hcon->dst_type);
5909 if (params) {
5910 params->conn_min_interval = min;
5911 params->conn_max_interval = max;
5912 params->conn_latency = latency;
5913 params->supervision_timeout = timeout;
5914 store_hint = 0x01;
5915 } else{
5916 store_hint = 0x00;
5917 }
5918
5919 hci_dev_unlock(hdev);
5920
5921 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5922 store_hint, min, max, latency, timeout);
5923 }
5924
5925 cp.handle = ev->handle;
5926 cp.interval_min = ev->interval_min;
5927 cp.interval_max = ev->interval_max;
5928 cp.latency = ev->latency;
5929 cp.timeout = ev->timeout;
5930 cp.min_ce_len = 0;
5931 cp.max_ce_len = 0;
5932
5933 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5934}
5935
5936static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5937 struct sk_buff *skb)
5938{
5939 u8 num_reports = skb->data[0];
Olivier Deprez0e641232021-09-23 10:07:05 +02005940 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5941
5942 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5943 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005944
5945 hci_dev_lock(hdev);
5946
Olivier Deprez0e641232021-09-23 10:07:05 +02005947 for (; num_reports; num_reports--, ev++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005948 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5949 ev->bdaddr_type, &ev->direct_addr,
Olivier Deprez0e641232021-09-23 10:07:05 +02005950 ev->direct_addr_type, ev->rssi, NULL, 0,
5951 false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005952
5953 hci_dev_unlock(hdev);
5954}
5955
Olivier Deprez157378f2022-04-04 15:47:50 +02005956static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5957{
5958 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5959 struct hci_conn *conn;
5960
5961 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5962
5963 if (ev->status)
5964 return;
5965
5966 hci_dev_lock(hdev);
5967
5968 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5969 if (!conn)
5970 goto unlock;
5971
5972 conn->le_tx_phy = ev->tx_phy;
5973 conn->le_rx_phy = ev->rx_phy;
5974
5975unlock:
5976 hci_dev_unlock(hdev);
5977}
5978
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005979static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5980{
5981 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5982
5983 skb_pull(skb, sizeof(*le_ev));
5984
5985 switch (le_ev->subevent) {
5986 case HCI_EV_LE_CONN_COMPLETE:
5987 hci_le_conn_complete_evt(hdev, skb);
5988 break;
5989
5990 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5991 hci_le_conn_update_complete_evt(hdev, skb);
5992 break;
5993
5994 case HCI_EV_LE_ADVERTISING_REPORT:
5995 hci_le_adv_report_evt(hdev, skb);
5996 break;
5997
5998 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5999 hci_le_remote_feat_complete_evt(hdev, skb);
6000 break;
6001
6002 case HCI_EV_LE_LTK_REQ:
6003 hci_le_ltk_request_evt(hdev, skb);
6004 break;
6005
6006 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6007 hci_le_remote_conn_param_req_evt(hdev, skb);
6008 break;
6009
6010 case HCI_EV_LE_DIRECT_ADV_REPORT:
6011 hci_le_direct_adv_report_evt(hdev, skb);
6012 break;
6013
Olivier Deprez157378f2022-04-04 15:47:50 +02006014 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6015 hci_le_phy_update_evt(hdev, skb);
6016 break;
6017
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006018 case HCI_EV_LE_EXT_ADV_REPORT:
6019 hci_le_ext_adv_report_evt(hdev, skb);
6020 break;
6021
6022 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6023 hci_le_enh_conn_complete_evt(hdev, skb);
6024 break;
6025
6026 case HCI_EV_LE_EXT_ADV_SET_TERM:
6027 hci_le_ext_adv_term_evt(hdev, skb);
6028 break;
6029
6030 default:
6031 break;
6032 }
6033}
6034
6035static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6036 u8 event, struct sk_buff *skb)
6037{
6038 struct hci_ev_cmd_complete *ev;
6039 struct hci_event_hdr *hdr;
6040
6041 if (!skb)
6042 return false;
6043
6044 if (skb->len < sizeof(*hdr)) {
6045 bt_dev_err(hdev, "too short HCI event");
6046 return false;
6047 }
6048
6049 hdr = (void *) skb->data;
6050 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6051
6052 if (event) {
6053 if (hdr->evt != event)
6054 return false;
6055 return true;
6056 }
6057
David Brazdil0f672f62019-12-10 10:32:29 +00006058 /* Check if request ended in Command Status - no way to retreive
6059 * any extra parameters in this case.
6060 */
6061 if (hdr->evt == HCI_EV_CMD_STATUS)
6062 return false;
6063
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006064 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6065 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6066 hdr->evt);
6067 return false;
6068 }
6069
6070 if (skb->len < sizeof(*ev)) {
6071 bt_dev_err(hdev, "too short cmd_complete event");
6072 return false;
6073 }
6074
6075 ev = (void *) skb->data;
6076 skb_pull(skb, sizeof(*ev));
6077
6078 if (opcode != __le16_to_cpu(ev->opcode)) {
6079 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6080 __le16_to_cpu(ev->opcode));
6081 return false;
6082 }
6083
6084 return true;
6085}
6086
Olivier Deprez157378f2022-04-04 15:47:50 +02006087static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6088 struct sk_buff *skb)
6089{
6090 struct hci_ev_le_advertising_info *adv;
6091 struct hci_ev_le_direct_adv_info *direct_adv;
6092 struct hci_ev_le_ext_adv_report *ext_adv;
6093 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6094 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6095
6096 hci_dev_lock(hdev);
6097
6098 /* If we are currently suspended and this is the first BT event seen,
6099 * save the wake reason associated with the event.
6100 */
6101 if (!hdev->suspended || hdev->wake_reason)
6102 goto unlock;
6103
6104 /* Default to remote wake. Values for wake_reason are documented in the
6105 * Bluez mgmt api docs.
6106 */
6107 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6108
6109 /* Once configured for remote wakeup, we should only wake up for
6110 * reconnections. It's useful to see which device is waking us up so
6111 * keep track of the bdaddr of the connection event that woke us up.
6112 */
6113 if (event == HCI_EV_CONN_REQUEST) {
6114 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6115 hdev->wake_addr_type = BDADDR_BREDR;
6116 } else if (event == HCI_EV_CONN_COMPLETE) {
6117 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6118 hdev->wake_addr_type = BDADDR_BREDR;
6119 } else if (event == HCI_EV_LE_META) {
6120 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6121 u8 subevent = le_ev->subevent;
6122 u8 *ptr = &skb->data[sizeof(*le_ev)];
6123 u8 num_reports = *ptr;
6124
6125 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6126 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6127 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6128 num_reports) {
6129 adv = (void *)(ptr + 1);
6130 direct_adv = (void *)(ptr + 1);
6131 ext_adv = (void *)(ptr + 1);
6132
6133 switch (subevent) {
6134 case HCI_EV_LE_ADVERTISING_REPORT:
6135 bacpy(&hdev->wake_addr, &adv->bdaddr);
6136 hdev->wake_addr_type = adv->bdaddr_type;
6137 break;
6138 case HCI_EV_LE_DIRECT_ADV_REPORT:
6139 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6140 hdev->wake_addr_type = direct_adv->bdaddr_type;
6141 break;
6142 case HCI_EV_LE_EXT_ADV_REPORT:
6143 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6144 hdev->wake_addr_type = ext_adv->bdaddr_type;
6145 break;
6146 }
6147 }
6148 } else {
6149 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6150 }
6151
6152unlock:
6153 hci_dev_unlock(hdev);
6154}
6155
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006156void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6157{
6158 struct hci_event_hdr *hdr = (void *) skb->data;
6159 hci_req_complete_t req_complete = NULL;
6160 hci_req_complete_skb_t req_complete_skb = NULL;
6161 struct sk_buff *orig_skb = NULL;
6162 u8 status = 0, event = hdr->evt, req_evt = 0;
6163 u16 opcode = HCI_OP_NOP;
6164
Olivier Deprez0e641232021-09-23 10:07:05 +02006165 if (!event) {
6166 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6167 goto done;
6168 }
6169
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006170 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6171 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6172 opcode = __le16_to_cpu(cmd_hdr->opcode);
6173 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6174 &req_complete_skb);
6175 req_evt = event;
6176 }
6177
6178 /* If it looks like we might end up having to call
6179 * req_complete_skb, store a pristine copy of the skb since the
6180 * various handlers may modify the original one through
6181 * skb_pull() calls, etc.
6182 */
6183 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6184 event == HCI_EV_CMD_COMPLETE)
6185 orig_skb = skb_clone(skb, GFP_KERNEL);
6186
6187 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6188
Olivier Deprez157378f2022-04-04 15:47:50 +02006189 /* Store wake reason if we're suspended */
6190 hci_store_wake_reason(hdev, event, skb);
6191
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006192 switch (event) {
6193 case HCI_EV_INQUIRY_COMPLETE:
6194 hci_inquiry_complete_evt(hdev, skb);
6195 break;
6196
6197 case HCI_EV_INQUIRY_RESULT:
6198 hci_inquiry_result_evt(hdev, skb);
6199 break;
6200
6201 case HCI_EV_CONN_COMPLETE:
6202 hci_conn_complete_evt(hdev, skb);
6203 break;
6204
6205 case HCI_EV_CONN_REQUEST:
6206 hci_conn_request_evt(hdev, skb);
6207 break;
6208
6209 case HCI_EV_DISCONN_COMPLETE:
6210 hci_disconn_complete_evt(hdev, skb);
6211 break;
6212
6213 case HCI_EV_AUTH_COMPLETE:
6214 hci_auth_complete_evt(hdev, skb);
6215 break;
6216
6217 case HCI_EV_REMOTE_NAME:
6218 hci_remote_name_evt(hdev, skb);
6219 break;
6220
6221 case HCI_EV_ENCRYPT_CHANGE:
6222 hci_encrypt_change_evt(hdev, skb);
6223 break;
6224
6225 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6226 hci_change_link_key_complete_evt(hdev, skb);
6227 break;
6228
6229 case HCI_EV_REMOTE_FEATURES:
6230 hci_remote_features_evt(hdev, skb);
6231 break;
6232
6233 case HCI_EV_CMD_COMPLETE:
6234 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6235 &req_complete, &req_complete_skb);
6236 break;
6237
6238 case HCI_EV_CMD_STATUS:
6239 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6240 &req_complete_skb);
6241 break;
6242
6243 case HCI_EV_HARDWARE_ERROR:
6244 hci_hardware_error_evt(hdev, skb);
6245 break;
6246
6247 case HCI_EV_ROLE_CHANGE:
6248 hci_role_change_evt(hdev, skb);
6249 break;
6250
6251 case HCI_EV_NUM_COMP_PKTS:
6252 hci_num_comp_pkts_evt(hdev, skb);
6253 break;
6254
6255 case HCI_EV_MODE_CHANGE:
6256 hci_mode_change_evt(hdev, skb);
6257 break;
6258
6259 case HCI_EV_PIN_CODE_REQ:
6260 hci_pin_code_request_evt(hdev, skb);
6261 break;
6262
6263 case HCI_EV_LINK_KEY_REQ:
6264 hci_link_key_request_evt(hdev, skb);
6265 break;
6266
6267 case HCI_EV_LINK_KEY_NOTIFY:
6268 hci_link_key_notify_evt(hdev, skb);
6269 break;
6270
6271 case HCI_EV_CLOCK_OFFSET:
6272 hci_clock_offset_evt(hdev, skb);
6273 break;
6274
6275 case HCI_EV_PKT_TYPE_CHANGE:
6276 hci_pkt_type_change_evt(hdev, skb);
6277 break;
6278
6279 case HCI_EV_PSCAN_REP_MODE:
6280 hci_pscan_rep_mode_evt(hdev, skb);
6281 break;
6282
6283 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6284 hci_inquiry_result_with_rssi_evt(hdev, skb);
6285 break;
6286
6287 case HCI_EV_REMOTE_EXT_FEATURES:
6288 hci_remote_ext_features_evt(hdev, skb);
6289 break;
6290
6291 case HCI_EV_SYNC_CONN_COMPLETE:
6292 hci_sync_conn_complete_evt(hdev, skb);
6293 break;
6294
6295 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6296 hci_extended_inquiry_result_evt(hdev, skb);
6297 break;
6298
6299 case HCI_EV_KEY_REFRESH_COMPLETE:
6300 hci_key_refresh_complete_evt(hdev, skb);
6301 break;
6302
6303 case HCI_EV_IO_CAPA_REQUEST:
6304 hci_io_capa_request_evt(hdev, skb);
6305 break;
6306
6307 case HCI_EV_IO_CAPA_REPLY:
6308 hci_io_capa_reply_evt(hdev, skb);
6309 break;
6310
6311 case HCI_EV_USER_CONFIRM_REQUEST:
6312 hci_user_confirm_request_evt(hdev, skb);
6313 break;
6314
6315 case HCI_EV_USER_PASSKEY_REQUEST:
6316 hci_user_passkey_request_evt(hdev, skb);
6317 break;
6318
6319 case HCI_EV_USER_PASSKEY_NOTIFY:
6320 hci_user_passkey_notify_evt(hdev, skb);
6321 break;
6322
6323 case HCI_EV_KEYPRESS_NOTIFY:
6324 hci_keypress_notify_evt(hdev, skb);
6325 break;
6326
6327 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6328 hci_simple_pair_complete_evt(hdev, skb);
6329 break;
6330
6331 case HCI_EV_REMOTE_HOST_FEATURES:
6332 hci_remote_host_features_evt(hdev, skb);
6333 break;
6334
6335 case HCI_EV_LE_META:
6336 hci_le_meta_evt(hdev, skb);
6337 break;
6338
6339 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6340 hci_remote_oob_data_request_evt(hdev, skb);
6341 break;
6342
6343#if IS_ENABLED(CONFIG_BT_HS)
6344 case HCI_EV_CHANNEL_SELECTED:
6345 hci_chan_selected_evt(hdev, skb);
6346 break;
6347
6348 case HCI_EV_PHY_LINK_COMPLETE:
6349 hci_phy_link_complete_evt(hdev, skb);
6350 break;
6351
6352 case HCI_EV_LOGICAL_LINK_COMPLETE:
6353 hci_loglink_complete_evt(hdev, skb);
6354 break;
6355
6356 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6357 hci_disconn_loglink_complete_evt(hdev, skb);
6358 break;
6359
6360 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6361 hci_disconn_phylink_complete_evt(hdev, skb);
6362 break;
6363#endif
6364
6365 case HCI_EV_NUM_COMP_BLOCKS:
6366 hci_num_comp_blocks_evt(hdev, skb);
6367 break;
6368
Olivier Deprez157378f2022-04-04 15:47:50 +02006369 case HCI_EV_VENDOR:
6370 msft_vendor_evt(hdev, skb);
6371 break;
6372
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006373 default:
6374 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6375 break;
6376 }
6377
6378 if (req_complete) {
6379 req_complete(hdev, status, opcode);
6380 } else if (req_complete_skb) {
6381 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6382 kfree_skb(orig_skb);
6383 orig_skb = NULL;
6384 }
6385 req_complete_skb(hdev, status, opcode, orig_skb);
6386 }
6387
Olivier Deprez0e641232021-09-23 10:07:05 +02006388done:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006389 kfree_skb(orig_skb);
6390 kfree_skb(skb);
6391 hdev->stat.evt_rx++;
6392}