blob: 31469ff084cd32f4046f6d74ec44295157ae5059 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "hci_request.h"
34#include "hci_debugfs.h"
35#include "a2mp.h"
36#include "amp.h"
37#include "smp.h"
38
39#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42/* Handle HCI Event packets */
43
Olivier Deprez0e641232021-09-23 10:07:05 +020044static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
45 u8 *new_status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046{
47 __u8 status = *((__u8 *) skb->data);
48
49 BT_DBG("%s status 0x%2.2x", hdev->name, status);
50
Olivier Deprez0e641232021-09-23 10:07:05 +020051 /* It is possible that we receive Inquiry Complete event right
52 * before we receive Inquiry Cancel Command Complete event, in
53 * which case the latter event should have status of Command
54 * Disallowed (0x0c). This should not be treated as error, since
55 * we actually achieve what Inquiry Cancel wants to achieve,
56 * which is to end the last Inquiry session.
57 */
58 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
59 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
60 status = 0x00;
61 }
62
63 *new_status = status;
64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065 if (status)
66 return;
67
68 clear_bit(HCI_INQUIRY, &hdev->flags);
69 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
70 wake_up_bit(&hdev->flags, HCI_INQUIRY);
71
72 hci_dev_lock(hdev);
73 /* Set discovery state to stopped if we're not doing LE active
74 * scanning.
75 */
76 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
77 hdev->le_scan_type != LE_SCAN_ACTIVE)
78 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
79 hci_dev_unlock(hdev);
80
81 hci_conn_check_pending(hdev);
82}
83
84static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
85{
86 __u8 status = *((__u8 *) skb->data);
87
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
89
90 if (status)
91 return;
92
93 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
94}
95
96static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
97{
98 __u8 status = *((__u8 *) skb->data);
99
100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
101
102 if (status)
103 return;
104
105 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
106
107 hci_conn_check_pending(hdev);
108}
109
110static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
111 struct sk_buff *skb)
112{
113 BT_DBG("%s", hdev->name);
114}
115
116static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
117{
118 struct hci_rp_role_discovery *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->role = rp->role;
131
132 hci_dev_unlock(hdev);
133}
134
135static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136{
137 struct hci_rp_read_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141
142 if (rp->status)
143 return;
144
145 hci_dev_lock(hdev);
146
147 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
148 if (conn)
149 conn->link_policy = __le16_to_cpu(rp->policy);
150
151 hci_dev_unlock(hdev);
152}
153
154static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
155{
156 struct hci_rp_write_link_policy *rp = (void *) skb->data;
157 struct hci_conn *conn;
158 void *sent;
159
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
161
162 if (rp->status)
163 return;
164
165 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
166 if (!sent)
167 return;
168
169 hci_dev_lock(hdev);
170
171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
172 if (conn)
173 conn->link_policy = get_unaligned_le16(sent + 2);
174
175 hci_dev_unlock(hdev);
176}
177
178static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
179 struct sk_buff *skb)
180{
181 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
182
183 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
184
185 if (rp->status)
186 return;
187
188 hdev->link_policy = __le16_to_cpu(rp->policy);
189}
190
191static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
192 struct sk_buff *skb)
193{
194 __u8 status = *((__u8 *) skb->data);
195 void *sent;
196
197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
198
199 if (status)
200 return;
201
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
203 if (!sent)
204 return;
205
206 hdev->link_policy = get_unaligned_le16(sent);
207}
208
209static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
210{
211 __u8 status = *((__u8 *) skb->data);
212
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214
215 clear_bit(HCI_RESET, &hdev->flags);
216
217 if (status)
218 return;
219
220 /* Reset all non-persistent flags */
221 hci_dev_clear_volatile_flags(hdev);
222
223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
224
225 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
226 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
227
228 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
229 hdev->adv_data_len = 0;
230
231 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
232 hdev->scan_rsp_data_len = 0;
233
234 hdev->le_scan_type = LE_SCAN_PASSIVE;
235
236 hdev->ssp_debug_mode = 0;
237
238 hci_bdaddr_list_clear(&hdev->le_white_list);
239 hci_bdaddr_list_clear(&hdev->le_resolv_list);
240}
241
242static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
243 struct sk_buff *skb)
244{
245 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
246 struct hci_cp_read_stored_link_key *sent;
247
248 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249
250 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
251 if (!sent)
252 return;
253
254 if (!rp->status && sent->read_all == 0x01) {
255 hdev->stored_max_keys = rp->max_keys;
256 hdev->stored_num_keys = rp->num_keys;
257 }
258}
259
260static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
261 struct sk_buff *skb)
262{
263 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
264
265 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
266
267 if (rp->status)
268 return;
269
270 if (rp->num_keys <= hdev->stored_num_keys)
271 hdev->stored_num_keys -= rp->num_keys;
272 else
273 hdev->stored_num_keys = 0;
274}
275
276static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277{
278 __u8 status = *((__u8 *) skb->data);
279 void *sent;
280
281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
282
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
284 if (!sent)
285 return;
286
287 hci_dev_lock(hdev);
288
289 if (hci_dev_test_flag(hdev, HCI_MGMT))
290 mgmt_set_local_name_complete(hdev, sent, status);
291 else if (!status)
292 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
293
294 hci_dev_unlock(hdev);
295}
296
297static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
298{
299 struct hci_rp_read_local_name *rp = (void *) skb->data;
300
301 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
302
303 if (rp->status)
304 return;
305
306 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
307 hci_dev_test_flag(hdev, HCI_CONFIG))
308 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
309}
310
311static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
312{
313 __u8 status = *((__u8 *) skb->data);
314 void *sent;
315
316 BT_DBG("%s status 0x%2.2x", hdev->name, status);
317
318 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
319 if (!sent)
320 return;
321
322 hci_dev_lock(hdev);
323
324 if (!status) {
325 __u8 param = *((__u8 *) sent);
326
327 if (param == AUTH_ENABLED)
328 set_bit(HCI_AUTH, &hdev->flags);
329 else
330 clear_bit(HCI_AUTH, &hdev->flags);
331 }
332
333 if (hci_dev_test_flag(hdev, HCI_MGMT))
334 mgmt_auth_enable_complete(hdev, status);
335
336 hci_dev_unlock(hdev);
337}
338
339static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
340{
341 __u8 status = *((__u8 *) skb->data);
342 __u8 param;
343 void *sent;
344
345 BT_DBG("%s status 0x%2.2x", hdev->name, status);
346
347 if (status)
348 return;
349
350 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
351 if (!sent)
352 return;
353
354 param = *((__u8 *) sent);
355
356 if (param)
357 set_bit(HCI_ENCRYPT, &hdev->flags);
358 else
359 clear_bit(HCI_ENCRYPT, &hdev->flags);
360}
361
362static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
363{
364 __u8 status = *((__u8 *) skb->data);
365 __u8 param;
366 void *sent;
367
368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
369
370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
371 if (!sent)
372 return;
373
374 param = *((__u8 *) sent);
375
376 hci_dev_lock(hdev);
377
378 if (status) {
379 hdev->discov_timeout = 0;
380 goto done;
381 }
382
383 if (param & SCAN_INQUIRY)
384 set_bit(HCI_ISCAN, &hdev->flags);
385 else
386 clear_bit(HCI_ISCAN, &hdev->flags);
387
388 if (param & SCAN_PAGE)
389 set_bit(HCI_PSCAN, &hdev->flags);
390 else
391 clear_bit(HCI_PSCAN, &hdev->flags);
392
393done:
394 hci_dev_unlock(hdev);
395}
396
397static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398{
399 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
402
403 if (rp->status)
404 return;
405
406 memcpy(hdev->dev_class, rp->dev_class, 3);
407
408 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
409 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
410}
411
412static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
413{
414 __u8 status = *((__u8 *) skb->data);
415 void *sent;
416
417 BT_DBG("%s status 0x%2.2x", hdev->name, status);
418
419 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
420 if (!sent)
421 return;
422
423 hci_dev_lock(hdev);
424
425 if (status == 0)
426 memcpy(hdev->dev_class, sent, 3);
427
428 if (hci_dev_test_flag(hdev, HCI_MGMT))
429 mgmt_set_class_of_dev_complete(hdev, sent, status);
430
431 hci_dev_unlock(hdev);
432}
433
434static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
435{
436 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
437 __u16 setting;
438
439 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
440
441 if (rp->status)
442 return;
443
444 setting = __le16_to_cpu(rp->voice_setting);
445
446 if (hdev->voice_setting == setting)
447 return;
448
449 hdev->voice_setting = setting;
450
451 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
452
453 if (hdev->notify)
454 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
455}
456
457static void hci_cc_write_voice_setting(struct hci_dev *hdev,
458 struct sk_buff *skb)
459{
460 __u8 status = *((__u8 *) skb->data);
461 __u16 setting;
462 void *sent;
463
464 BT_DBG("%s status 0x%2.2x", hdev->name, status);
465
466 if (status)
467 return;
468
469 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
470 if (!sent)
471 return;
472
473 setting = get_unaligned_le16(sent);
474
475 if (hdev->voice_setting == setting)
476 return;
477
478 hdev->voice_setting = setting;
479
480 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
481
482 if (hdev->notify)
483 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
484}
485
486static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
487 struct sk_buff *skb)
488{
489 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
490
491 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
492
493 if (rp->status)
494 return;
495
496 hdev->num_iac = rp->num_iac;
497
498 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
499}
500
501static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
502{
503 __u8 status = *((__u8 *) skb->data);
504 struct hci_cp_write_ssp_mode *sent;
505
506 BT_DBG("%s status 0x%2.2x", hdev->name, status);
507
508 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
509 if (!sent)
510 return;
511
512 hci_dev_lock(hdev);
513
514 if (!status) {
515 if (sent->mode)
516 hdev->features[1][0] |= LMP_HOST_SSP;
517 else
518 hdev->features[1][0] &= ~LMP_HOST_SSP;
519 }
520
521 if (hci_dev_test_flag(hdev, HCI_MGMT))
522 mgmt_ssp_enable_complete(hdev, sent->mode, status);
523 else if (!status) {
524 if (sent->mode)
525 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
526 else
527 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
528 }
529
530 hci_dev_unlock(hdev);
531}
532
533static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
534{
535 u8 status = *((u8 *) skb->data);
536 struct hci_cp_write_sc_support *sent;
537
538 BT_DBG("%s status 0x%2.2x", hdev->name, status);
539
540 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
541 if (!sent)
542 return;
543
544 hci_dev_lock(hdev);
545
546 if (!status) {
547 if (sent->support)
548 hdev->features[1][0] |= LMP_HOST_SC;
549 else
550 hdev->features[1][0] &= ~LMP_HOST_SC;
551 }
552
553 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
554 if (sent->support)
555 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
556 else
557 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
558 }
559
560 hci_dev_unlock(hdev);
561}
562
563static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
564{
565 struct hci_rp_read_local_version *rp = (void *) skb->data;
566
567 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
568
569 if (rp->status)
570 return;
571
572 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
573 hci_dev_test_flag(hdev, HCI_CONFIG)) {
574 hdev->hci_ver = rp->hci_ver;
575 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
576 hdev->lmp_ver = rp->lmp_ver;
577 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
578 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
579 }
580}
581
582static void hci_cc_read_local_commands(struct hci_dev *hdev,
583 struct sk_buff *skb)
584{
585 struct hci_rp_read_local_commands *rp = (void *) skb->data;
586
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
588
589 if (rp->status)
590 return;
591
592 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
593 hci_dev_test_flag(hdev, HCI_CONFIG))
594 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
595}
596
David Brazdil0f672f62019-12-10 10:32:29 +0000597static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
598 struct sk_buff *skb)
599{
600 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
601 struct hci_conn *conn;
602
603 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
604
605 if (rp->status)
606 return;
607
608 hci_dev_lock(hdev);
609
610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
611 if (conn)
612 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
613
614 hci_dev_unlock(hdev);
615}
616
617static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
618 struct sk_buff *skb)
619{
620 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
621 struct hci_conn *conn;
622 void *sent;
623
624 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
625
626 if (rp->status)
627 return;
628
629 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
630 if (!sent)
631 return;
632
633 hci_dev_lock(hdev);
634
635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
636 if (conn)
637 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
638
639 hci_dev_unlock(hdev);
640}
641
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642static void hci_cc_read_local_features(struct hci_dev *hdev,
643 struct sk_buff *skb)
644{
645 struct hci_rp_read_local_features *rp = (void *) skb->data;
646
647 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
648
649 if (rp->status)
650 return;
651
652 memcpy(hdev->features, rp->features, 8);
653
654 /* Adjust default settings according to features
655 * supported by device. */
656
657 if (hdev->features[0][0] & LMP_3SLOT)
658 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
659
660 if (hdev->features[0][0] & LMP_5SLOT)
661 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
662
663 if (hdev->features[0][1] & LMP_HV2) {
664 hdev->pkt_type |= (HCI_HV2);
665 hdev->esco_type |= (ESCO_HV2);
666 }
667
668 if (hdev->features[0][1] & LMP_HV3) {
669 hdev->pkt_type |= (HCI_HV3);
670 hdev->esco_type |= (ESCO_HV3);
671 }
672
673 if (lmp_esco_capable(hdev))
674 hdev->esco_type |= (ESCO_EV3);
675
676 if (hdev->features[0][4] & LMP_EV4)
677 hdev->esco_type |= (ESCO_EV4);
678
679 if (hdev->features[0][4] & LMP_EV5)
680 hdev->esco_type |= (ESCO_EV5);
681
682 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
683 hdev->esco_type |= (ESCO_2EV3);
684
685 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
686 hdev->esco_type |= (ESCO_3EV3);
687
688 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
689 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
690}
691
692static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
693 struct sk_buff *skb)
694{
695 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
696
697 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
698
699 if (rp->status)
700 return;
701
702 if (hdev->max_page < rp->max_page)
703 hdev->max_page = rp->max_page;
704
705 if (rp->page < HCI_MAX_PAGES)
706 memcpy(hdev->features[rp->page], rp->features, 8);
707}
708
709static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
710 struct sk_buff *skb)
711{
712 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
713
714 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
715
716 if (rp->status)
717 return;
718
719 hdev->flow_ctl_mode = rp->mode;
720}
721
722static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
723{
724 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
725
726 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
727
728 if (rp->status)
729 return;
730
731 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
732 hdev->sco_mtu = rp->sco_mtu;
733 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
734 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
735
736 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
737 hdev->sco_mtu = 64;
738 hdev->sco_pkts = 8;
739 }
740
741 hdev->acl_cnt = hdev->acl_pkts;
742 hdev->sco_cnt = hdev->sco_pkts;
743
744 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
745 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
746}
747
748static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
749{
750 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
751
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
753
754 if (rp->status)
755 return;
756
757 if (test_bit(HCI_INIT, &hdev->flags))
758 bacpy(&hdev->bdaddr, &rp->bdaddr);
759
760 if (hci_dev_test_flag(hdev, HCI_SETUP))
761 bacpy(&hdev->setup_addr, &rp->bdaddr);
762}
763
764static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
765 struct sk_buff *skb)
766{
767 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
768
769 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
770
771 if (rp->status)
772 return;
773
774 if (test_bit(HCI_INIT, &hdev->flags)) {
775 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
776 hdev->page_scan_window = __le16_to_cpu(rp->window);
777 }
778}
779
780static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
781 struct sk_buff *skb)
782{
783 u8 status = *((u8 *) skb->data);
784 struct hci_cp_write_page_scan_activity *sent;
785
786 BT_DBG("%s status 0x%2.2x", hdev->name, status);
787
788 if (status)
789 return;
790
791 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
792 if (!sent)
793 return;
794
795 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
796 hdev->page_scan_window = __le16_to_cpu(sent->window);
797}
798
799static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
800 struct sk_buff *skb)
801{
802 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
805
806 if (rp->status)
807 return;
808
809 if (test_bit(HCI_INIT, &hdev->flags))
810 hdev->page_scan_type = rp->type;
811}
812
813static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
814 struct sk_buff *skb)
815{
816 u8 status = *((u8 *) skb->data);
817 u8 *type;
818
819 BT_DBG("%s status 0x%2.2x", hdev->name, status);
820
821 if (status)
822 return;
823
824 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
825 if (type)
826 hdev->page_scan_type = *type;
827}
828
829static void hci_cc_read_data_block_size(struct hci_dev *hdev,
830 struct sk_buff *skb)
831{
832 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
833
834 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
835
836 if (rp->status)
837 return;
838
839 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
840 hdev->block_len = __le16_to_cpu(rp->block_len);
841 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
842
843 hdev->block_cnt = hdev->num_blocks;
844
845 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
846 hdev->block_cnt, hdev->block_len);
847}
848
849static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
850{
851 struct hci_rp_read_clock *rp = (void *) skb->data;
852 struct hci_cp_read_clock *cp;
853 struct hci_conn *conn;
854
855 BT_DBG("%s", hdev->name);
856
857 if (skb->len < sizeof(*rp))
858 return;
859
860 if (rp->status)
861 return;
862
863 hci_dev_lock(hdev);
864
865 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
866 if (!cp)
867 goto unlock;
868
869 if (cp->which == 0x00) {
870 hdev->clock = le32_to_cpu(rp->clock);
871 goto unlock;
872 }
873
874 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
875 if (conn) {
876 conn->clock = le32_to_cpu(rp->clock);
877 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
878 }
879
880unlock:
881 hci_dev_unlock(hdev);
882}
883
884static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
885 struct sk_buff *skb)
886{
887 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
888
889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890
891 if (rp->status)
892 return;
893
894 hdev->amp_status = rp->amp_status;
895 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
896 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
897 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
898 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
899 hdev->amp_type = rp->amp_type;
900 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
901 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
902 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
903 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
904}
905
906static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
907 struct sk_buff *skb)
908{
909 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
910
911 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
912
913 if (rp->status)
914 return;
915
916 hdev->inq_tx_power = rp->tx_power;
917}
918
919static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
920{
921 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
922 struct hci_cp_pin_code_reply *cp;
923 struct hci_conn *conn;
924
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926
927 hci_dev_lock(hdev);
928
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
931
932 if (rp->status)
933 goto unlock;
934
935 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
936 if (!cp)
937 goto unlock;
938
939 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
940 if (conn)
941 conn->pin_length = cp->pin_len;
942
943unlock:
944 hci_dev_unlock(hdev);
945}
946
947static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
948{
949 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
950
951 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952
953 hci_dev_lock(hdev);
954
955 if (hci_dev_test_flag(hdev, HCI_MGMT))
956 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
957 rp->status);
958
959 hci_dev_unlock(hdev);
960}
961
962static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
963 struct sk_buff *skb)
964{
965 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
966
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968
969 if (rp->status)
970 return;
971
972 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
973 hdev->le_pkts = rp->le_max_pkt;
974
975 hdev->le_cnt = hdev->le_pkts;
976
977 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
978}
979
980static void hci_cc_le_read_local_features(struct hci_dev *hdev,
981 struct sk_buff *skb)
982{
983 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
984
985 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
986
987 if (rp->status)
988 return;
989
990 memcpy(hdev->le_features, rp->features, 8);
991}
992
993static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
994 struct sk_buff *skb)
995{
996 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
997
998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999
1000 if (rp->status)
1001 return;
1002
1003 hdev->adv_tx_power = rp->tx_power;
1004}
1005
1006static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1007{
1008 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1009
1010 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1011
1012 hci_dev_lock(hdev);
1013
1014 if (hci_dev_test_flag(hdev, HCI_MGMT))
1015 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1016 rp->status);
1017
1018 hci_dev_unlock(hdev);
1019}
1020
1021static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1022 struct sk_buff *skb)
1023{
1024 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1025
1026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027
1028 hci_dev_lock(hdev);
1029
1030 if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1032 ACL_LINK, 0, rp->status);
1033
1034 hci_dev_unlock(hdev);
1035}
1036
1037static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1038{
1039 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1042
1043 hci_dev_lock(hdev);
1044
1045 if (hci_dev_test_flag(hdev, HCI_MGMT))
1046 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1047 0, rp->status);
1048
1049 hci_dev_unlock(hdev);
1050}
1051
1052static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1053 struct sk_buff *skb)
1054{
1055 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1058
1059 hci_dev_lock(hdev);
1060
1061 if (hci_dev_test_flag(hdev, HCI_MGMT))
1062 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1063 ACL_LINK, 0, rp->status);
1064
1065 hci_dev_unlock(hdev);
1066}
1067
1068static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1069 struct sk_buff *skb)
1070{
1071 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1072
1073 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1074}
1075
1076static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1077 struct sk_buff *skb)
1078{
1079 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1080
1081 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1082}
1083
1084static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1085{
1086 __u8 status = *((__u8 *) skb->data);
1087 bdaddr_t *sent;
1088
1089 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1090
1091 if (status)
1092 return;
1093
1094 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1095 if (!sent)
1096 return;
1097
1098 hci_dev_lock(hdev);
1099
1100 bacpy(&hdev->random_addr, sent);
1101
1102 hci_dev_unlock(hdev);
1103}
1104
1105static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1106{
1107 __u8 status = *((__u8 *) skb->data);
1108 struct hci_cp_le_set_default_phy *cp;
1109
1110 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1111
1112 if (status)
1113 return;
1114
1115 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1116 if (!cp)
1117 return;
1118
1119 hci_dev_lock(hdev);
1120
1121 hdev->le_tx_def_phys = cp->tx_phys;
1122 hdev->le_rx_def_phys = cp->rx_phys;
1123
1124 hci_dev_unlock(hdev);
1125}
1126
1127static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1128 struct sk_buff *skb)
1129{
1130 __u8 status = *((__u8 *) skb->data);
1131 struct hci_cp_le_set_adv_set_rand_addr *cp;
1132 struct adv_info *adv_instance;
1133
1134 if (status)
1135 return;
1136
1137 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1138 if (!cp)
1139 return;
1140
1141 hci_dev_lock(hdev);
1142
1143 if (!hdev->cur_adv_instance) {
1144 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1145 bacpy(&hdev->random_addr, &cp->bdaddr);
1146 } else {
1147 adv_instance = hci_find_adv_instance(hdev,
1148 hdev->cur_adv_instance);
1149 if (adv_instance)
1150 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1151 }
1152
1153 hci_dev_unlock(hdev);
1154}
1155
1156static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1157{
1158 __u8 *sent, status = *((__u8 *) skb->data);
1159
1160 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1161
1162 if (status)
1163 return;
1164
1165 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1166 if (!sent)
1167 return;
1168
1169 hci_dev_lock(hdev);
1170
1171 /* If we're doing connection initiation as peripheral. Set a
1172 * timeout in case something goes wrong.
1173 */
1174 if (*sent) {
1175 struct hci_conn *conn;
1176
1177 hci_dev_set_flag(hdev, HCI_LE_ADV);
1178
1179 conn = hci_lookup_le_connect(hdev);
1180 if (conn)
1181 queue_delayed_work(hdev->workqueue,
1182 &conn->le_conn_timeout,
1183 conn->conn_timeout);
1184 } else {
1185 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1186 }
1187
1188 hci_dev_unlock(hdev);
1189}
1190
1191static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1192 struct sk_buff *skb)
1193{
1194 struct hci_cp_le_set_ext_adv_enable *cp;
1195 __u8 status = *((__u8 *) skb->data);
1196
1197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198
1199 if (status)
1200 return;
1201
1202 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1203 if (!cp)
1204 return;
1205
1206 hci_dev_lock(hdev);
1207
1208 if (cp->enable) {
1209 struct hci_conn *conn;
1210
1211 hci_dev_set_flag(hdev, HCI_LE_ADV);
1212
1213 conn = hci_lookup_le_connect(hdev);
1214 if (conn)
1215 queue_delayed_work(hdev->workqueue,
1216 &conn->le_conn_timeout,
1217 conn->conn_timeout);
1218 } else {
1219 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1220 }
1221
1222 hci_dev_unlock(hdev);
1223}
1224
1225static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1226{
1227 struct hci_cp_le_set_scan_param *cp;
1228 __u8 status = *((__u8 *) skb->data);
1229
1230 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1231
1232 if (status)
1233 return;
1234
1235 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1236 if (!cp)
1237 return;
1238
1239 hci_dev_lock(hdev);
1240
1241 hdev->le_scan_type = cp->type;
1242
1243 hci_dev_unlock(hdev);
1244}
1245
1246static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1248{
1249 struct hci_cp_le_set_ext_scan_params *cp;
1250 __u8 status = *((__u8 *) skb->data);
1251 struct hci_cp_le_scan_phy_params *phy_param;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1254
1255 if (status)
1256 return;
1257
1258 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1259 if (!cp)
1260 return;
1261
1262 phy_param = (void *)cp->data;
1263
1264 hci_dev_lock(hdev);
1265
1266 hdev->le_scan_type = phy_param->type;
1267
1268 hci_dev_unlock(hdev);
1269}
1270
1271static bool has_pending_adv_report(struct hci_dev *hdev)
1272{
1273 struct discovery_state *d = &hdev->discovery;
1274
1275 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1276}
1277
1278static void clear_pending_adv_report(struct hci_dev *hdev)
1279{
1280 struct discovery_state *d = &hdev->discovery;
1281
1282 bacpy(&d->last_adv_addr, BDADDR_ANY);
1283 d->last_adv_data_len = 0;
1284}
1285
1286static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1287 u8 bdaddr_type, s8 rssi, u32 flags,
1288 u8 *data, u8 len)
1289{
1290 struct discovery_state *d = &hdev->discovery;
1291
Olivier Deprez0e641232021-09-23 10:07:05 +02001292 if (len > HCI_MAX_AD_LENGTH)
1293 return;
1294
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001295 bacpy(&d->last_adv_addr, bdaddr);
1296 d->last_adv_addr_type = bdaddr_type;
1297 d->last_adv_rssi = rssi;
1298 d->last_adv_flags = flags;
1299 memcpy(d->last_adv_data, data, len);
1300 d->last_adv_data_len = len;
1301}
1302
1303static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1304{
1305 hci_dev_lock(hdev);
1306
1307 switch (enable) {
1308 case LE_SCAN_ENABLE:
1309 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1310 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1311 clear_pending_adv_report(hdev);
1312 break;
1313
1314 case LE_SCAN_DISABLE:
1315 /* We do this here instead of when setting DISCOVERY_STOPPED
1316 * since the latter would potentially require waiting for
1317 * inquiry to stop too.
1318 */
1319 if (has_pending_adv_report(hdev)) {
1320 struct discovery_state *d = &hdev->discovery;
1321
1322 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1323 d->last_adv_addr_type, NULL,
1324 d->last_adv_rssi, d->last_adv_flags,
1325 d->last_adv_data,
1326 d->last_adv_data_len, NULL, 0);
1327 }
1328
1329 /* Cancel this timer so that we don't try to disable scanning
1330 * when it's already disabled.
1331 */
1332 cancel_delayed_work(&hdev->le_scan_disable);
1333
1334 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1335
1336 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1337 * interrupted scanning due to a connect request. Mark
1338 * therefore discovery as stopped. If this was not
1339 * because of a connect request advertising might have
1340 * been disabled because of active scanning, so
1341 * re-enable it again if necessary.
1342 */
1343 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1344 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1345 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1346 hdev->discovery.state == DISCOVERY_FINDING)
1347 hci_req_reenable_advertising(hdev);
1348
1349 break;
1350
1351 default:
1352 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1353 enable);
1354 break;
1355 }
1356
1357 hci_dev_unlock(hdev);
1358}
1359
1360static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1361 struct sk_buff *skb)
1362{
1363 struct hci_cp_le_set_scan_enable *cp;
1364 __u8 status = *((__u8 *) skb->data);
1365
1366 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1367
1368 if (status)
1369 return;
1370
1371 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1372 if (!cp)
1373 return;
1374
1375 le_set_scan_enable_complete(hdev, cp->enable);
1376}
1377
1378static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1379 struct sk_buff *skb)
1380{
1381 struct hci_cp_le_set_ext_scan_enable *cp;
1382 __u8 status = *((__u8 *) skb->data);
1383
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385
1386 if (status)
1387 return;
1388
1389 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1390 if (!cp)
1391 return;
1392
1393 le_set_scan_enable_complete(hdev, cp->enable);
1394}
1395
1396static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1397 struct sk_buff *skb)
1398{
1399 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1400
1401 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1402 rp->num_of_sets);
1403
1404 if (rp->status)
1405 return;
1406
1407 hdev->le_num_of_adv_sets = rp->num_of_sets;
1408}
1409
1410static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1411 struct sk_buff *skb)
1412{
1413 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1414
1415 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1416
1417 if (rp->status)
1418 return;
1419
1420 hdev->le_white_list_size = rp->size;
1421}
1422
1423static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1424 struct sk_buff *skb)
1425{
1426 __u8 status = *((__u8 *) skb->data);
1427
1428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1429
1430 if (status)
1431 return;
1432
1433 hci_bdaddr_list_clear(&hdev->le_white_list);
1434}
1435
1436static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1437 struct sk_buff *skb)
1438{
1439 struct hci_cp_le_add_to_white_list *sent;
1440 __u8 status = *((__u8 *) skb->data);
1441
1442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1443
1444 if (status)
1445 return;
1446
1447 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1448 if (!sent)
1449 return;
1450
1451 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1452 sent->bdaddr_type);
1453}
1454
1455static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1456 struct sk_buff *skb)
1457{
1458 struct hci_cp_le_del_from_white_list *sent;
1459 __u8 status = *((__u8 *) skb->data);
1460
1461 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1462
1463 if (status)
1464 return;
1465
1466 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1467 if (!sent)
1468 return;
1469
1470 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1471 sent->bdaddr_type);
1472}
1473
1474static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1476{
1477 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1478
1479 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1480
1481 if (rp->status)
1482 return;
1483
1484 memcpy(hdev->le_states, rp->le_states, 8);
1485}
1486
1487static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1489{
1490 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1491
1492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1493
1494 if (rp->status)
1495 return;
1496
1497 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1498 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1499}
1500
1501static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1502 struct sk_buff *skb)
1503{
1504 struct hci_cp_le_write_def_data_len *sent;
1505 __u8 status = *((__u8 *) skb->data);
1506
1507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1508
1509 if (status)
1510 return;
1511
1512 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1513 if (!sent)
1514 return;
1515
1516 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1517 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1518}
1519
David Brazdil0f672f62019-12-10 10:32:29 +00001520static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1521 struct sk_buff *skb)
1522{
1523 struct hci_cp_le_add_to_resolv_list *sent;
1524 __u8 status = *((__u8 *) skb->data);
1525
1526 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1527
1528 if (status)
1529 return;
1530
1531 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1532 if (!sent)
1533 return;
1534
1535 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1536 sent->bdaddr_type, sent->peer_irk,
1537 sent->local_irk);
1538}
1539
1540static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1541 struct sk_buff *skb)
1542{
1543 struct hci_cp_le_del_from_resolv_list *sent;
1544 __u8 status = *((__u8 *) skb->data);
1545
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1547
1548 if (status)
1549 return;
1550
1551 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1552 if (!sent)
1553 return;
1554
1555 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1556 sent->bdaddr_type);
1557}
1558
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001559static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1560 struct sk_buff *skb)
1561{
1562 __u8 status = *((__u8 *) skb->data);
1563
1564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1565
1566 if (status)
1567 return;
1568
1569 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1570}
1571
1572static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1573 struct sk_buff *skb)
1574{
1575 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1576
1577 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1578
1579 if (rp->status)
1580 return;
1581
1582 hdev->le_resolv_list_size = rp->size;
1583}
1584
1585static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1586 struct sk_buff *skb)
1587{
1588 __u8 *sent, status = *((__u8 *) skb->data);
1589
1590 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1591
1592 if (status)
1593 return;
1594
1595 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1596 if (!sent)
1597 return;
1598
1599 hci_dev_lock(hdev);
1600
1601 if (*sent)
1602 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1603 else
1604 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1605
1606 hci_dev_unlock(hdev);
1607}
1608
1609static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1610 struct sk_buff *skb)
1611{
1612 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1613
1614 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1615
1616 if (rp->status)
1617 return;
1618
1619 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1620 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1621 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1622 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1623}
1624
1625static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1626 struct sk_buff *skb)
1627{
1628 struct hci_cp_write_le_host_supported *sent;
1629 __u8 status = *((__u8 *) skb->data);
1630
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1632
1633 if (status)
1634 return;
1635
1636 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1637 if (!sent)
1638 return;
1639
1640 hci_dev_lock(hdev);
1641
1642 if (sent->le) {
1643 hdev->features[1][0] |= LMP_HOST_LE;
1644 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1645 } else {
1646 hdev->features[1][0] &= ~LMP_HOST_LE;
1647 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1648 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1649 }
1650
1651 if (sent->simul)
1652 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1653 else
1654 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1655
1656 hci_dev_unlock(hdev);
1657}
1658
1659static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1660{
1661 struct hci_cp_le_set_adv_param *cp;
1662 u8 status = *((u8 *) skb->data);
1663
1664 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1665
1666 if (status)
1667 return;
1668
1669 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1670 if (!cp)
1671 return;
1672
1673 hci_dev_lock(hdev);
1674 hdev->adv_addr_type = cp->own_address_type;
1675 hci_dev_unlock(hdev);
1676}
1677
1678static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1679{
1680 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1681 struct hci_cp_le_set_ext_adv_params *cp;
1682 struct adv_info *adv_instance;
1683
1684 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1685
1686 if (rp->status)
1687 return;
1688
1689 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1690 if (!cp)
1691 return;
1692
1693 hci_dev_lock(hdev);
1694 hdev->adv_addr_type = cp->own_addr_type;
1695 if (!hdev->cur_adv_instance) {
1696 /* Store in hdev for instance 0 */
1697 hdev->adv_tx_power = rp->tx_power;
1698 } else {
1699 adv_instance = hci_find_adv_instance(hdev,
1700 hdev->cur_adv_instance);
1701 if (adv_instance)
1702 adv_instance->tx_power = rp->tx_power;
1703 }
1704 /* Update adv data as tx power is known now */
1705 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1706 hci_dev_unlock(hdev);
1707}
1708
1709static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1710{
1711 struct hci_rp_read_rssi *rp = (void *) skb->data;
1712 struct hci_conn *conn;
1713
1714 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1715
1716 if (rp->status)
1717 return;
1718
1719 hci_dev_lock(hdev);
1720
1721 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1722 if (conn)
1723 conn->rssi = rp->rssi;
1724
1725 hci_dev_unlock(hdev);
1726}
1727
1728static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1729{
1730 struct hci_cp_read_tx_power *sent;
1731 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1732 struct hci_conn *conn;
1733
1734 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1735
1736 if (rp->status)
1737 return;
1738
1739 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1740 if (!sent)
1741 return;
1742
1743 hci_dev_lock(hdev);
1744
1745 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1746 if (!conn)
1747 goto unlock;
1748
1749 switch (sent->type) {
1750 case 0x00:
1751 conn->tx_power = rp->tx_power;
1752 break;
1753 case 0x01:
1754 conn->max_tx_power = rp->tx_power;
1755 break;
1756 }
1757
1758unlock:
1759 hci_dev_unlock(hdev);
1760}
1761
1762static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1763{
1764 u8 status = *((u8 *) skb->data);
1765 u8 *mode;
1766
1767 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1768
1769 if (status)
1770 return;
1771
1772 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1773 if (mode)
1774 hdev->ssp_debug_mode = *mode;
1775}
1776
1777static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1778{
1779 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1780
1781 if (status) {
1782 hci_conn_check_pending(hdev);
1783 return;
1784 }
1785
1786 set_bit(HCI_INQUIRY, &hdev->flags);
1787}
1788
1789static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1790{
1791 struct hci_cp_create_conn *cp;
1792 struct hci_conn *conn;
1793
1794 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1797 if (!cp)
1798 return;
1799
1800 hci_dev_lock(hdev);
1801
1802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1803
1804 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1805
1806 if (status) {
1807 if (conn && conn->state == BT_CONNECT) {
1808 if (status != 0x0c || conn->attempt > 2) {
1809 conn->state = BT_CLOSED;
1810 hci_connect_cfm(conn, status);
1811 hci_conn_del(conn);
1812 } else
1813 conn->state = BT_CONNECT2;
1814 }
1815 } else {
1816 if (!conn) {
1817 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1818 HCI_ROLE_MASTER);
1819 if (!conn)
1820 bt_dev_err(hdev, "no memory for new connection");
1821 }
1822 }
1823
1824 hci_dev_unlock(hdev);
1825}
1826
1827static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1828{
1829 struct hci_cp_add_sco *cp;
1830 struct hci_conn *acl, *sco;
1831 __u16 handle;
1832
1833 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1834
1835 if (!status)
1836 return;
1837
1838 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1839 if (!cp)
1840 return;
1841
1842 handle = __le16_to_cpu(cp->handle);
1843
1844 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1845
1846 hci_dev_lock(hdev);
1847
1848 acl = hci_conn_hash_lookup_handle(hdev, handle);
1849 if (acl) {
1850 sco = acl->link;
1851 if (sco) {
1852 sco->state = BT_CLOSED;
1853
1854 hci_connect_cfm(sco, status);
1855 hci_conn_del(sco);
1856 }
1857 }
1858
1859 hci_dev_unlock(hdev);
1860}
1861
1862static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1863{
1864 struct hci_cp_auth_requested *cp;
1865 struct hci_conn *conn;
1866
1867 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1868
1869 if (!status)
1870 return;
1871
1872 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1873 if (!cp)
1874 return;
1875
1876 hci_dev_lock(hdev);
1877
1878 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1879 if (conn) {
1880 if (conn->state == BT_CONFIG) {
1881 hci_connect_cfm(conn, status);
1882 hci_conn_drop(conn);
1883 }
1884 }
1885
1886 hci_dev_unlock(hdev);
1887}
1888
1889static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1890{
1891 struct hci_cp_set_conn_encrypt *cp;
1892 struct hci_conn *conn;
1893
1894 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1895
1896 if (!status)
1897 return;
1898
1899 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1900 if (!cp)
1901 return;
1902
1903 hci_dev_lock(hdev);
1904
1905 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1906 if (conn) {
1907 if (conn->state == BT_CONFIG) {
1908 hci_connect_cfm(conn, status);
1909 hci_conn_drop(conn);
1910 }
1911 }
1912
1913 hci_dev_unlock(hdev);
1914}
1915
1916static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1917 struct hci_conn *conn)
1918{
1919 if (conn->state != BT_CONFIG || !conn->out)
1920 return 0;
1921
1922 if (conn->pending_sec_level == BT_SECURITY_SDP)
1923 return 0;
1924
1925 /* Only request authentication for SSP connections or non-SSP
1926 * devices with sec_level MEDIUM or HIGH or if MITM protection
1927 * is requested.
1928 */
1929 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1930 conn->pending_sec_level != BT_SECURITY_FIPS &&
1931 conn->pending_sec_level != BT_SECURITY_HIGH &&
1932 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1933 return 0;
1934
1935 return 1;
1936}
1937
1938static int hci_resolve_name(struct hci_dev *hdev,
1939 struct inquiry_entry *e)
1940{
1941 struct hci_cp_remote_name_req cp;
1942
1943 memset(&cp, 0, sizeof(cp));
1944
1945 bacpy(&cp.bdaddr, &e->data.bdaddr);
1946 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1947 cp.pscan_mode = e->data.pscan_mode;
1948 cp.clock_offset = e->data.clock_offset;
1949
1950 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1951}
1952
1953static bool hci_resolve_next_name(struct hci_dev *hdev)
1954{
1955 struct discovery_state *discov = &hdev->discovery;
1956 struct inquiry_entry *e;
1957
1958 if (list_empty(&discov->resolve))
1959 return false;
1960
1961 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1962 if (!e)
1963 return false;
1964
1965 if (hci_resolve_name(hdev, e) == 0) {
1966 e->name_state = NAME_PENDING;
1967 return true;
1968 }
1969
1970 return false;
1971}
1972
1973static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1974 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1975{
1976 struct discovery_state *discov = &hdev->discovery;
1977 struct inquiry_entry *e;
1978
1979 /* Update the mgmt connected state if necessary. Be careful with
1980 * conn objects that exist but are not (yet) connected however.
1981 * Only those in BT_CONFIG or BT_CONNECTED states can be
1982 * considered connected.
1983 */
1984 if (conn &&
1985 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1986 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1987 mgmt_device_connected(hdev, conn, 0, name, name_len);
1988
1989 if (discov->state == DISCOVERY_STOPPED)
1990 return;
1991
1992 if (discov->state == DISCOVERY_STOPPING)
1993 goto discov_complete;
1994
1995 if (discov->state != DISCOVERY_RESOLVING)
1996 return;
1997
1998 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1999 /* If the device was not found in a list of found devices names of which
2000 * are pending. there is no need to continue resolving a next name as it
2001 * will be done upon receiving another Remote Name Request Complete
2002 * Event */
2003 if (!e)
2004 return;
2005
2006 list_del(&e->list);
2007 if (name) {
2008 e->name_state = NAME_KNOWN;
2009 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2010 e->data.rssi, name, name_len);
2011 } else {
2012 e->name_state = NAME_NOT_KNOWN;
2013 }
2014
2015 if (hci_resolve_next_name(hdev))
2016 return;
2017
2018discov_complete:
2019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2020}
2021
2022static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2023{
2024 struct hci_cp_remote_name_req *cp;
2025 struct hci_conn *conn;
2026
2027 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2028
2029 /* If successful wait for the name req complete event before
2030 * checking for the need to do authentication */
2031 if (!status)
2032 return;
2033
2034 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2035 if (!cp)
2036 return;
2037
2038 hci_dev_lock(hdev);
2039
2040 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2041
2042 if (hci_dev_test_flag(hdev, HCI_MGMT))
2043 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2044
2045 if (!conn)
2046 goto unlock;
2047
2048 if (!hci_outgoing_auth_needed(hdev, conn))
2049 goto unlock;
2050
2051 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2052 struct hci_cp_auth_requested auth_cp;
2053
2054 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2055
2056 auth_cp.handle = __cpu_to_le16(conn->handle);
2057 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2058 sizeof(auth_cp), &auth_cp);
2059 }
2060
2061unlock:
2062 hci_dev_unlock(hdev);
2063}
2064
2065static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2066{
2067 struct hci_cp_read_remote_features *cp;
2068 struct hci_conn *conn;
2069
2070 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2071
2072 if (!status)
2073 return;
2074
2075 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2076 if (!cp)
2077 return;
2078
2079 hci_dev_lock(hdev);
2080
2081 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2082 if (conn) {
2083 if (conn->state == BT_CONFIG) {
2084 hci_connect_cfm(conn, status);
2085 hci_conn_drop(conn);
2086 }
2087 }
2088
2089 hci_dev_unlock(hdev);
2090}
2091
2092static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2093{
2094 struct hci_cp_read_remote_ext_features *cp;
2095 struct hci_conn *conn;
2096
2097 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2098
2099 if (!status)
2100 return;
2101
2102 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2103 if (!cp)
2104 return;
2105
2106 hci_dev_lock(hdev);
2107
2108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2109 if (conn) {
2110 if (conn->state == BT_CONFIG) {
2111 hci_connect_cfm(conn, status);
2112 hci_conn_drop(conn);
2113 }
2114 }
2115
2116 hci_dev_unlock(hdev);
2117}
2118
2119static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2120{
2121 struct hci_cp_setup_sync_conn *cp;
2122 struct hci_conn *acl, *sco;
2123 __u16 handle;
2124
2125 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2126
2127 if (!status)
2128 return;
2129
2130 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2131 if (!cp)
2132 return;
2133
2134 handle = __le16_to_cpu(cp->handle);
2135
2136 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2137
2138 hci_dev_lock(hdev);
2139
2140 acl = hci_conn_hash_lookup_handle(hdev, handle);
2141 if (acl) {
2142 sco = acl->link;
2143 if (sco) {
2144 sco->state = BT_CLOSED;
2145
2146 hci_connect_cfm(sco, status);
2147 hci_conn_del(sco);
2148 }
2149 }
2150
2151 hci_dev_unlock(hdev);
2152}
2153
2154static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2155{
2156 struct hci_cp_sniff_mode *cp;
2157 struct hci_conn *conn;
2158
2159 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2160
2161 if (!status)
2162 return;
2163
2164 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2165 if (!cp)
2166 return;
2167
2168 hci_dev_lock(hdev);
2169
2170 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2171 if (conn) {
2172 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2173
2174 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2175 hci_sco_setup(conn, status);
2176 }
2177
2178 hci_dev_unlock(hdev);
2179}
2180
2181static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2182{
2183 struct hci_cp_exit_sniff_mode *cp;
2184 struct hci_conn *conn;
2185
2186 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2187
2188 if (!status)
2189 return;
2190
2191 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2192 if (!cp)
2193 return;
2194
2195 hci_dev_lock(hdev);
2196
2197 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2198 if (conn) {
2199 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2200
2201 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2202 hci_sco_setup(conn, status);
2203 }
2204
2205 hci_dev_unlock(hdev);
2206}
2207
2208static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2209{
2210 struct hci_cp_disconnect *cp;
2211 struct hci_conn *conn;
2212
2213 if (!status)
2214 return;
2215
2216 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2217 if (!cp)
2218 return;
2219
2220 hci_dev_lock(hdev);
2221
2222 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2223 if (conn)
2224 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2225 conn->dst_type, status);
2226
2227 hci_dev_unlock(hdev);
2228}
2229
2230static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2231 u8 peer_addr_type, u8 own_address_type,
2232 u8 filter_policy)
2233{
2234 struct hci_conn *conn;
2235
2236 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2237 peer_addr_type);
2238 if (!conn)
2239 return;
2240
2241 /* Store the initiator and responder address information which
2242 * is needed for SMP. These values will not change during the
2243 * lifetime of the connection.
2244 */
2245 conn->init_addr_type = own_address_type;
2246 if (own_address_type == ADDR_LE_DEV_RANDOM)
2247 bacpy(&conn->init_addr, &hdev->random_addr);
2248 else
2249 bacpy(&conn->init_addr, &hdev->bdaddr);
2250
2251 conn->resp_addr_type = peer_addr_type;
2252 bacpy(&conn->resp_addr, peer_addr);
2253
2254 /* We don't want the connection attempt to stick around
2255 * indefinitely since LE doesn't have a page timeout concept
2256 * like BR/EDR. Set a timer for any connection that doesn't use
2257 * the white list for connecting.
2258 */
2259 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2260 queue_delayed_work(conn->hdev->workqueue,
2261 &conn->le_conn_timeout,
2262 conn->conn_timeout);
2263}
2264
2265static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2266{
2267 struct hci_cp_le_create_conn *cp;
2268
2269 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2270
2271 /* All connection failure handling is taken care of by the
2272 * hci_le_conn_failed function which is triggered by the HCI
2273 * request completion callbacks used for connecting.
2274 */
2275 if (status)
2276 return;
2277
2278 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2279 if (!cp)
2280 return;
2281
2282 hci_dev_lock(hdev);
2283
2284 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2285 cp->own_address_type, cp->filter_policy);
2286
2287 hci_dev_unlock(hdev);
2288}
2289
2290static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2291{
2292 struct hci_cp_le_ext_create_conn *cp;
2293
2294 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2295
2296 /* All connection failure handling is taken care of by the
2297 * hci_le_conn_failed function which is triggered by the HCI
2298 * request completion callbacks used for connecting.
2299 */
2300 if (status)
2301 return;
2302
2303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2304 if (!cp)
2305 return;
2306
2307 hci_dev_lock(hdev);
2308
2309 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2310 cp->own_addr_type, cp->filter_policy);
2311
2312 hci_dev_unlock(hdev);
2313}
2314
2315static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2316{
2317 struct hci_cp_le_read_remote_features *cp;
2318 struct hci_conn *conn;
2319
2320 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2321
2322 if (!status)
2323 return;
2324
2325 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2326 if (!cp)
2327 return;
2328
2329 hci_dev_lock(hdev);
2330
2331 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2332 if (conn) {
2333 if (conn->state == BT_CONFIG) {
2334 hci_connect_cfm(conn, status);
2335 hci_conn_drop(conn);
2336 }
2337 }
2338
2339 hci_dev_unlock(hdev);
2340}
2341
2342static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2343{
2344 struct hci_cp_le_start_enc *cp;
2345 struct hci_conn *conn;
2346
2347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2348
2349 if (!status)
2350 return;
2351
2352 hci_dev_lock(hdev);
2353
2354 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2355 if (!cp)
2356 goto unlock;
2357
2358 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2359 if (!conn)
2360 goto unlock;
2361
2362 if (conn->state != BT_CONNECTED)
2363 goto unlock;
2364
2365 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2366 hci_conn_drop(conn);
2367
2368unlock:
2369 hci_dev_unlock(hdev);
2370}
2371
2372static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2373{
2374 struct hci_cp_switch_role *cp;
2375 struct hci_conn *conn;
2376
2377 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2378
2379 if (!status)
2380 return;
2381
2382 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2383 if (!cp)
2384 return;
2385
2386 hci_dev_lock(hdev);
2387
2388 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2389 if (conn)
2390 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2391
2392 hci_dev_unlock(hdev);
2393}
2394
2395static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2396{
2397 __u8 status = *((__u8 *) skb->data);
2398 struct discovery_state *discov = &hdev->discovery;
2399 struct inquiry_entry *e;
2400
2401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2402
2403 hci_conn_check_pending(hdev);
2404
2405 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2406 return;
2407
2408 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2409 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2410
2411 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2412 return;
2413
2414 hci_dev_lock(hdev);
2415
2416 if (discov->state != DISCOVERY_FINDING)
2417 goto unlock;
2418
2419 if (list_empty(&discov->resolve)) {
2420 /* When BR/EDR inquiry is active and no LE scanning is in
2421 * progress, then change discovery state to indicate completion.
2422 *
2423 * When running LE scanning and BR/EDR inquiry simultaneously
2424 * and the LE scan already finished, then change the discovery
2425 * state to indicate completion.
2426 */
2427 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2428 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2430 goto unlock;
2431 }
2432
2433 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2434 if (e && hci_resolve_name(hdev, e) == 0) {
2435 e->name_state = NAME_PENDING;
2436 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2437 } else {
2438 /* When BR/EDR inquiry is active and no LE scanning is in
2439 * progress, then change discovery state to indicate completion.
2440 *
2441 * When running LE scanning and BR/EDR inquiry simultaneously
2442 * and the LE scan already finished, then change the discovery
2443 * state to indicate completion.
2444 */
2445 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2446 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2447 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2448 }
2449
2450unlock:
2451 hci_dev_unlock(hdev);
2452}
2453
2454static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2455{
2456 struct inquiry_data data;
2457 struct inquiry_info *info = (void *) (skb->data + 1);
2458 int num_rsp = *((__u8 *) skb->data);
2459
2460 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2461
Olivier Deprez0e641232021-09-23 10:07:05 +02002462 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002463 return;
2464
2465 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2466 return;
2467
2468 hci_dev_lock(hdev);
2469
2470 for (; num_rsp; num_rsp--, info++) {
2471 u32 flags;
2472
2473 bacpy(&data.bdaddr, &info->bdaddr);
2474 data.pscan_rep_mode = info->pscan_rep_mode;
2475 data.pscan_period_mode = info->pscan_period_mode;
2476 data.pscan_mode = info->pscan_mode;
2477 memcpy(data.dev_class, info->dev_class, 3);
2478 data.clock_offset = info->clock_offset;
2479 data.rssi = HCI_RSSI_INVALID;
2480 data.ssp_mode = 0x00;
2481
2482 flags = hci_inquiry_cache_update(hdev, &data, false);
2483
2484 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2485 info->dev_class, HCI_RSSI_INVALID,
2486 flags, NULL, 0, NULL, 0);
2487 }
2488
2489 hci_dev_unlock(hdev);
2490}
2491
2492static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2493{
2494 struct hci_ev_conn_complete *ev = (void *) skb->data;
2495 struct hci_conn *conn;
2496
2497 BT_DBG("%s", hdev->name);
2498
2499 hci_dev_lock(hdev);
2500
2501 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2502 if (!conn) {
2503 if (ev->link_type != SCO_LINK)
2504 goto unlock;
2505
2506 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2507 if (!conn)
2508 goto unlock;
2509
2510 conn->type = SCO_LINK;
2511 }
2512
2513 if (!ev->status) {
2514 conn->handle = __le16_to_cpu(ev->handle);
2515
2516 if (conn->type == ACL_LINK) {
2517 conn->state = BT_CONFIG;
2518 hci_conn_hold(conn);
2519
2520 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2521 !hci_find_link_key(hdev, &ev->bdaddr))
2522 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2523 else
2524 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2525 } else
2526 conn->state = BT_CONNECTED;
2527
2528 hci_debugfs_create_conn(conn);
2529 hci_conn_add_sysfs(conn);
2530
2531 if (test_bit(HCI_AUTH, &hdev->flags))
2532 set_bit(HCI_CONN_AUTH, &conn->flags);
2533
2534 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2535 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2536
2537 /* Get remote features */
2538 if (conn->type == ACL_LINK) {
2539 struct hci_cp_read_remote_features cp;
2540 cp.handle = ev->handle;
2541 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2542 sizeof(cp), &cp);
2543
2544 hci_req_update_scan(hdev);
2545 }
2546
2547 /* Set packet type for incoming connection */
2548 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2549 struct hci_cp_change_conn_ptype cp;
2550 cp.handle = ev->handle;
2551 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2552 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2553 &cp);
2554 }
2555 } else {
2556 conn->state = BT_CLOSED;
2557 if (conn->type == ACL_LINK)
2558 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2559 conn->dst_type, ev->status);
2560 }
2561
2562 if (conn->type == ACL_LINK)
2563 hci_sco_setup(conn, ev->status);
2564
2565 if (ev->status) {
2566 hci_connect_cfm(conn, ev->status);
2567 hci_conn_del(conn);
2568 } else if (ev->link_type != ACL_LINK)
2569 hci_connect_cfm(conn, ev->status);
2570
2571unlock:
2572 hci_dev_unlock(hdev);
2573
2574 hci_conn_check_pending(hdev);
2575}
2576
2577static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2578{
2579 struct hci_cp_reject_conn_req cp;
2580
2581 bacpy(&cp.bdaddr, bdaddr);
2582 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2583 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2584}
2585
2586static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2587{
2588 struct hci_ev_conn_request *ev = (void *) skb->data;
2589 int mask = hdev->link_mode;
2590 struct inquiry_entry *ie;
2591 struct hci_conn *conn;
2592 __u8 flags = 0;
2593
2594 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2595 ev->link_type);
2596
2597 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2598 &flags);
2599
2600 if (!(mask & HCI_LM_ACCEPT)) {
2601 hci_reject_conn(hdev, &ev->bdaddr);
2602 return;
2603 }
2604
2605 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2606 BDADDR_BREDR)) {
2607 hci_reject_conn(hdev, &ev->bdaddr);
2608 return;
2609 }
2610
2611 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2612 * connection. These features are only touched through mgmt so
2613 * only do the checks if HCI_MGMT is set.
2614 */
2615 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2616 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2617 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2618 BDADDR_BREDR)) {
2619 hci_reject_conn(hdev, &ev->bdaddr);
2620 return;
2621 }
2622
2623 /* Connection accepted */
2624
2625 hci_dev_lock(hdev);
2626
2627 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2628 if (ie)
2629 memcpy(ie->data.dev_class, ev->dev_class, 3);
2630
2631 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2632 &ev->bdaddr);
2633 if (!conn) {
2634 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2635 HCI_ROLE_SLAVE);
2636 if (!conn) {
2637 bt_dev_err(hdev, "no memory for new connection");
2638 hci_dev_unlock(hdev);
2639 return;
2640 }
2641 }
2642
2643 memcpy(conn->dev_class, ev->dev_class, 3);
2644
2645 hci_dev_unlock(hdev);
2646
2647 if (ev->link_type == ACL_LINK ||
2648 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2649 struct hci_cp_accept_conn_req cp;
2650 conn->state = BT_CONNECT;
2651
2652 bacpy(&cp.bdaddr, &ev->bdaddr);
2653
2654 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2655 cp.role = 0x00; /* Become master */
2656 else
2657 cp.role = 0x01; /* Remain slave */
2658
2659 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2660 } else if (!(flags & HCI_PROTO_DEFER)) {
2661 struct hci_cp_accept_sync_conn_req cp;
2662 conn->state = BT_CONNECT;
2663
2664 bacpy(&cp.bdaddr, &ev->bdaddr);
2665 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2666
2667 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2668 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2669 cp.max_latency = cpu_to_le16(0xffff);
2670 cp.content_format = cpu_to_le16(hdev->voice_setting);
2671 cp.retrans_effort = 0xff;
2672
2673 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2674 &cp);
2675 } else {
2676 conn->state = BT_CONNECT2;
2677 hci_connect_cfm(conn, 0);
2678 }
2679}
2680
2681static u8 hci_to_mgmt_reason(u8 err)
2682{
2683 switch (err) {
2684 case HCI_ERROR_CONNECTION_TIMEOUT:
2685 return MGMT_DEV_DISCONN_TIMEOUT;
2686 case HCI_ERROR_REMOTE_USER_TERM:
2687 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2688 case HCI_ERROR_REMOTE_POWER_OFF:
2689 return MGMT_DEV_DISCONN_REMOTE;
2690 case HCI_ERROR_LOCAL_HOST_TERM:
2691 return MGMT_DEV_DISCONN_LOCAL_HOST;
2692 default:
2693 return MGMT_DEV_DISCONN_UNKNOWN;
2694 }
2695}
2696
2697static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2698{
2699 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2700 u8 reason;
2701 struct hci_conn_params *params;
2702 struct hci_conn *conn;
2703 bool mgmt_connected;
2704 u8 type;
2705
2706 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2707
2708 hci_dev_lock(hdev);
2709
2710 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2711 if (!conn)
2712 goto unlock;
2713
2714 if (ev->status) {
2715 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2716 conn->dst_type, ev->status);
2717 goto unlock;
2718 }
2719
2720 conn->state = BT_CLOSED;
2721
2722 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2723
2724 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2725 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2726 else
2727 reason = hci_to_mgmt_reason(ev->reason);
2728
2729 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2730 reason, mgmt_connected);
2731
2732 if (conn->type == ACL_LINK) {
2733 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2734 hci_remove_link_key(hdev, &conn->dst);
2735
2736 hci_req_update_scan(hdev);
2737 }
2738
2739 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2740 if (params) {
2741 switch (params->auto_connect) {
2742 case HCI_AUTO_CONN_LINK_LOSS:
2743 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2744 break;
2745 /* Fall through */
2746
2747 case HCI_AUTO_CONN_DIRECT:
2748 case HCI_AUTO_CONN_ALWAYS:
2749 list_del_init(&params->action);
2750 list_add(&params->action, &hdev->pend_le_conns);
2751 hci_update_background_scan(hdev);
2752 break;
2753
2754 default:
2755 break;
2756 }
2757 }
2758
2759 type = conn->type;
2760
2761 hci_disconn_cfm(conn, ev->reason);
2762 hci_conn_del(conn);
2763
2764 /* Re-enable advertising if necessary, since it might
2765 * have been disabled by the connection. From the
2766 * HCI_LE_Set_Advertise_Enable command description in
2767 * the core specification (v4.0):
2768 * "The Controller shall continue advertising until the Host
2769 * issues an LE_Set_Advertise_Enable command with
2770 * Advertising_Enable set to 0x00 (Advertising is disabled)
2771 * or until a connection is created or until the Advertising
2772 * is timed out due to Directed Advertising."
2773 */
2774 if (type == LE_LINK)
2775 hci_req_reenable_advertising(hdev);
2776
2777unlock:
2778 hci_dev_unlock(hdev);
2779}
2780
2781static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2782{
2783 struct hci_ev_auth_complete *ev = (void *) skb->data;
2784 struct hci_conn *conn;
2785
2786 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2787
2788 hci_dev_lock(hdev);
2789
2790 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2791 if (!conn)
2792 goto unlock;
2793
2794 if (!ev->status) {
2795 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2796
2797 if (!hci_conn_ssp_enabled(conn) &&
2798 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2799 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2800 } else {
2801 set_bit(HCI_CONN_AUTH, &conn->flags);
2802 conn->sec_level = conn->pending_sec_level;
2803 }
2804 } else {
2805 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2806 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2807
2808 mgmt_auth_failed(conn, ev->status);
2809 }
2810
2811 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2812 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2813
2814 if (conn->state == BT_CONFIG) {
2815 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2816 struct hci_cp_set_conn_encrypt cp;
2817 cp.handle = ev->handle;
2818 cp.encrypt = 0x01;
2819 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2820 &cp);
2821 } else {
2822 conn->state = BT_CONNECTED;
2823 hci_connect_cfm(conn, ev->status);
2824 hci_conn_drop(conn);
2825 }
2826 } else {
2827 hci_auth_cfm(conn, ev->status);
2828
2829 hci_conn_hold(conn);
2830 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2831 hci_conn_drop(conn);
2832 }
2833
2834 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2835 if (!ev->status) {
2836 struct hci_cp_set_conn_encrypt cp;
2837 cp.handle = ev->handle;
2838 cp.encrypt = 0x01;
2839 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2840 &cp);
2841 } else {
2842 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
Olivier Deprez0e641232021-09-23 10:07:05 +02002843 hci_encrypt_cfm(conn, ev->status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002844 }
2845 }
2846
2847unlock:
2848 hci_dev_unlock(hdev);
2849}
2850
2851static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2852{
2853 struct hci_ev_remote_name *ev = (void *) skb->data;
2854 struct hci_conn *conn;
2855
2856 BT_DBG("%s", hdev->name);
2857
2858 hci_conn_check_pending(hdev);
2859
2860 hci_dev_lock(hdev);
2861
2862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2863
2864 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2865 goto check_auth;
2866
2867 if (ev->status == 0)
2868 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2869 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2870 else
2871 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2872
2873check_auth:
2874 if (!conn)
2875 goto unlock;
2876
2877 if (!hci_outgoing_auth_needed(hdev, conn))
2878 goto unlock;
2879
2880 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2881 struct hci_cp_auth_requested cp;
2882
2883 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2884
2885 cp.handle = __cpu_to_le16(conn->handle);
2886 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2887 }
2888
2889unlock:
2890 hci_dev_unlock(hdev);
2891}
2892
2893static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2894 u16 opcode, struct sk_buff *skb)
2895{
2896 const struct hci_rp_read_enc_key_size *rp;
2897 struct hci_conn *conn;
2898 u16 handle;
2899
2900 BT_DBG("%s status 0x%02x", hdev->name, status);
2901
2902 if (!skb || skb->len < sizeof(*rp)) {
2903 bt_dev_err(hdev, "invalid read key size response");
2904 return;
2905 }
2906
2907 rp = (void *)skb->data;
2908 handle = le16_to_cpu(rp->handle);
2909
2910 hci_dev_lock(hdev);
2911
2912 conn = hci_conn_hash_lookup_handle(hdev, handle);
2913 if (!conn)
2914 goto unlock;
2915
2916 /* If we fail to read the encryption key size, assume maximum
2917 * (which is the same we do also when this HCI command isn't
2918 * supported.
2919 */
2920 if (rp->status) {
2921 bt_dev_err(hdev, "failed to read key size for handle %u",
2922 handle);
2923 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2924 } else {
2925 conn->enc_key_size = rp->key_size;
2926 }
2927
Olivier Deprez0e641232021-09-23 10:07:05 +02002928 hci_encrypt_cfm(conn, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002929
2930unlock:
2931 hci_dev_unlock(hdev);
2932}
2933
2934static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2935{
2936 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2937 struct hci_conn *conn;
2938
2939 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2940
2941 hci_dev_lock(hdev);
2942
2943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2944 if (!conn)
2945 goto unlock;
2946
2947 if (!ev->status) {
2948 if (ev->encrypt) {
2949 /* Encryption implies authentication */
2950 set_bit(HCI_CONN_AUTH, &conn->flags);
2951 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2952 conn->sec_level = conn->pending_sec_level;
2953
2954 /* P-256 authentication key implies FIPS */
2955 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2956 set_bit(HCI_CONN_FIPS, &conn->flags);
2957
2958 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2959 conn->type == LE_LINK)
2960 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2961 } else {
2962 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2963 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2964 }
2965 }
2966
2967 /* We should disregard the current RPA and generate a new one
2968 * whenever the encryption procedure fails.
2969 */
2970 if (ev->status && conn->type == LE_LINK) {
2971 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2972 hci_adv_instances_set_rpa_expired(hdev, true);
2973 }
2974
2975 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2976
Olivier Deprez0e641232021-09-23 10:07:05 +02002977 /* Check link security requirements are met */
2978 if (!hci_conn_check_link_mode(conn))
2979 ev->status = HCI_ERROR_AUTH_FAILURE;
2980
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002981 if (ev->status && conn->state == BT_CONNECTED) {
2982 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2983 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2984
Olivier Deprez0e641232021-09-23 10:07:05 +02002985 /* Notify upper layers so they can cleanup before
2986 * disconnecting.
2987 */
2988 hci_encrypt_cfm(conn, ev->status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002989 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2990 hci_conn_drop(conn);
2991 goto unlock;
2992 }
2993
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002994 /* Try reading the encryption key size for encrypted ACL links */
2995 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2996 struct hci_cp_read_enc_key_size cp;
2997 struct hci_request req;
2998
2999 /* Only send HCI_Read_Encryption_Key_Size if the
3000 * controller really supports it. If it doesn't, assume
3001 * the default size (16).
3002 */
3003 if (!(hdev->commands[20] & 0x10)) {
3004 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3005 goto notify;
3006 }
3007
3008 hci_req_init(&req, hdev);
3009
3010 cp.handle = cpu_to_le16(conn->handle);
3011 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3012
3013 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3014 bt_dev_err(hdev, "sending read key size failed");
3015 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3016 goto notify;
3017 }
3018
3019 goto unlock;
3020 }
3021
David Brazdil0f672f62019-12-10 10:32:29 +00003022 /* Set the default Authenticated Payload Timeout after
3023 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3024 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3025 * sent when the link is active and Encryption is enabled, the conn
3026 * type can be either LE or ACL and controller must support LMP Ping.
3027 * Ensure for AES-CCM encryption as well.
3028 */
3029 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3030 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3031 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3032 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3033 struct hci_cp_write_auth_payload_to cp;
3034
3035 cp.handle = cpu_to_le16(conn->handle);
3036 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3037 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3038 sizeof(cp), &cp);
3039 }
3040
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003041notify:
Olivier Deprez0e641232021-09-23 10:07:05 +02003042 hci_encrypt_cfm(conn, ev->status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003043
3044unlock:
3045 hci_dev_unlock(hdev);
3046}
3047
3048static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3049 struct sk_buff *skb)
3050{
3051 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3052 struct hci_conn *conn;
3053
3054 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3055
3056 hci_dev_lock(hdev);
3057
3058 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3059 if (conn) {
3060 if (!ev->status)
3061 set_bit(HCI_CONN_SECURE, &conn->flags);
3062
3063 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3064
3065 hci_key_change_cfm(conn, ev->status);
3066 }
3067
3068 hci_dev_unlock(hdev);
3069}
3070
3071static void hci_remote_features_evt(struct hci_dev *hdev,
3072 struct sk_buff *skb)
3073{
3074 struct hci_ev_remote_features *ev = (void *) skb->data;
3075 struct hci_conn *conn;
3076
3077 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3078
3079 hci_dev_lock(hdev);
3080
3081 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3082 if (!conn)
3083 goto unlock;
3084
3085 if (!ev->status)
3086 memcpy(conn->features[0], ev->features, 8);
3087
3088 if (conn->state != BT_CONFIG)
3089 goto unlock;
3090
3091 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3092 lmp_ext_feat_capable(conn)) {
3093 struct hci_cp_read_remote_ext_features cp;
3094 cp.handle = ev->handle;
3095 cp.page = 0x01;
3096 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3097 sizeof(cp), &cp);
3098 goto unlock;
3099 }
3100
3101 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3102 struct hci_cp_remote_name_req cp;
3103 memset(&cp, 0, sizeof(cp));
3104 bacpy(&cp.bdaddr, &conn->dst);
3105 cp.pscan_rep_mode = 0x02;
3106 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3107 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3108 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3109
3110 if (!hci_outgoing_auth_needed(hdev, conn)) {
3111 conn->state = BT_CONNECTED;
3112 hci_connect_cfm(conn, ev->status);
3113 hci_conn_drop(conn);
3114 }
3115
3116unlock:
3117 hci_dev_unlock(hdev);
3118}
3119
3120static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3121 u16 *opcode, u8 *status,
3122 hci_req_complete_t *req_complete,
3123 hci_req_complete_skb_t *req_complete_skb)
3124{
3125 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3126
3127 *opcode = __le16_to_cpu(ev->opcode);
3128 *status = skb->data[sizeof(*ev)];
3129
3130 skb_pull(skb, sizeof(*ev));
3131
3132 switch (*opcode) {
3133 case HCI_OP_INQUIRY_CANCEL:
Olivier Deprez0e641232021-09-23 10:07:05 +02003134 hci_cc_inquiry_cancel(hdev, skb, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003135 break;
3136
3137 case HCI_OP_PERIODIC_INQ:
3138 hci_cc_periodic_inq(hdev, skb);
3139 break;
3140
3141 case HCI_OP_EXIT_PERIODIC_INQ:
3142 hci_cc_exit_periodic_inq(hdev, skb);
3143 break;
3144
3145 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3146 hci_cc_remote_name_req_cancel(hdev, skb);
3147 break;
3148
3149 case HCI_OP_ROLE_DISCOVERY:
3150 hci_cc_role_discovery(hdev, skb);
3151 break;
3152
3153 case HCI_OP_READ_LINK_POLICY:
3154 hci_cc_read_link_policy(hdev, skb);
3155 break;
3156
3157 case HCI_OP_WRITE_LINK_POLICY:
3158 hci_cc_write_link_policy(hdev, skb);
3159 break;
3160
3161 case HCI_OP_READ_DEF_LINK_POLICY:
3162 hci_cc_read_def_link_policy(hdev, skb);
3163 break;
3164
3165 case HCI_OP_WRITE_DEF_LINK_POLICY:
3166 hci_cc_write_def_link_policy(hdev, skb);
3167 break;
3168
3169 case HCI_OP_RESET:
3170 hci_cc_reset(hdev, skb);
3171 break;
3172
3173 case HCI_OP_READ_STORED_LINK_KEY:
3174 hci_cc_read_stored_link_key(hdev, skb);
3175 break;
3176
3177 case HCI_OP_DELETE_STORED_LINK_KEY:
3178 hci_cc_delete_stored_link_key(hdev, skb);
3179 break;
3180
3181 case HCI_OP_WRITE_LOCAL_NAME:
3182 hci_cc_write_local_name(hdev, skb);
3183 break;
3184
3185 case HCI_OP_READ_LOCAL_NAME:
3186 hci_cc_read_local_name(hdev, skb);
3187 break;
3188
3189 case HCI_OP_WRITE_AUTH_ENABLE:
3190 hci_cc_write_auth_enable(hdev, skb);
3191 break;
3192
3193 case HCI_OP_WRITE_ENCRYPT_MODE:
3194 hci_cc_write_encrypt_mode(hdev, skb);
3195 break;
3196
3197 case HCI_OP_WRITE_SCAN_ENABLE:
3198 hci_cc_write_scan_enable(hdev, skb);
3199 break;
3200
3201 case HCI_OP_READ_CLASS_OF_DEV:
3202 hci_cc_read_class_of_dev(hdev, skb);
3203 break;
3204
3205 case HCI_OP_WRITE_CLASS_OF_DEV:
3206 hci_cc_write_class_of_dev(hdev, skb);
3207 break;
3208
3209 case HCI_OP_READ_VOICE_SETTING:
3210 hci_cc_read_voice_setting(hdev, skb);
3211 break;
3212
3213 case HCI_OP_WRITE_VOICE_SETTING:
3214 hci_cc_write_voice_setting(hdev, skb);
3215 break;
3216
3217 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3218 hci_cc_read_num_supported_iac(hdev, skb);
3219 break;
3220
3221 case HCI_OP_WRITE_SSP_MODE:
3222 hci_cc_write_ssp_mode(hdev, skb);
3223 break;
3224
3225 case HCI_OP_WRITE_SC_SUPPORT:
3226 hci_cc_write_sc_support(hdev, skb);
3227 break;
3228
David Brazdil0f672f62019-12-10 10:32:29 +00003229 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3230 hci_cc_read_auth_payload_timeout(hdev, skb);
3231 break;
3232
3233 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3234 hci_cc_write_auth_payload_timeout(hdev, skb);
3235 break;
3236
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003237 case HCI_OP_READ_LOCAL_VERSION:
3238 hci_cc_read_local_version(hdev, skb);
3239 break;
3240
3241 case HCI_OP_READ_LOCAL_COMMANDS:
3242 hci_cc_read_local_commands(hdev, skb);
3243 break;
3244
3245 case HCI_OP_READ_LOCAL_FEATURES:
3246 hci_cc_read_local_features(hdev, skb);
3247 break;
3248
3249 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3250 hci_cc_read_local_ext_features(hdev, skb);
3251 break;
3252
3253 case HCI_OP_READ_BUFFER_SIZE:
3254 hci_cc_read_buffer_size(hdev, skb);
3255 break;
3256
3257 case HCI_OP_READ_BD_ADDR:
3258 hci_cc_read_bd_addr(hdev, skb);
3259 break;
3260
3261 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3262 hci_cc_read_page_scan_activity(hdev, skb);
3263 break;
3264
3265 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3266 hci_cc_write_page_scan_activity(hdev, skb);
3267 break;
3268
3269 case HCI_OP_READ_PAGE_SCAN_TYPE:
3270 hci_cc_read_page_scan_type(hdev, skb);
3271 break;
3272
3273 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3274 hci_cc_write_page_scan_type(hdev, skb);
3275 break;
3276
3277 case HCI_OP_READ_DATA_BLOCK_SIZE:
3278 hci_cc_read_data_block_size(hdev, skb);
3279 break;
3280
3281 case HCI_OP_READ_FLOW_CONTROL_MODE:
3282 hci_cc_read_flow_control_mode(hdev, skb);
3283 break;
3284
3285 case HCI_OP_READ_LOCAL_AMP_INFO:
3286 hci_cc_read_local_amp_info(hdev, skb);
3287 break;
3288
3289 case HCI_OP_READ_CLOCK:
3290 hci_cc_read_clock(hdev, skb);
3291 break;
3292
3293 case HCI_OP_READ_INQ_RSP_TX_POWER:
3294 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3295 break;
3296
3297 case HCI_OP_PIN_CODE_REPLY:
3298 hci_cc_pin_code_reply(hdev, skb);
3299 break;
3300
3301 case HCI_OP_PIN_CODE_NEG_REPLY:
3302 hci_cc_pin_code_neg_reply(hdev, skb);
3303 break;
3304
3305 case HCI_OP_READ_LOCAL_OOB_DATA:
3306 hci_cc_read_local_oob_data(hdev, skb);
3307 break;
3308
3309 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3310 hci_cc_read_local_oob_ext_data(hdev, skb);
3311 break;
3312
3313 case HCI_OP_LE_READ_BUFFER_SIZE:
3314 hci_cc_le_read_buffer_size(hdev, skb);
3315 break;
3316
3317 case HCI_OP_LE_READ_LOCAL_FEATURES:
3318 hci_cc_le_read_local_features(hdev, skb);
3319 break;
3320
3321 case HCI_OP_LE_READ_ADV_TX_POWER:
3322 hci_cc_le_read_adv_tx_power(hdev, skb);
3323 break;
3324
3325 case HCI_OP_USER_CONFIRM_REPLY:
3326 hci_cc_user_confirm_reply(hdev, skb);
3327 break;
3328
3329 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3330 hci_cc_user_confirm_neg_reply(hdev, skb);
3331 break;
3332
3333 case HCI_OP_USER_PASSKEY_REPLY:
3334 hci_cc_user_passkey_reply(hdev, skb);
3335 break;
3336
3337 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3338 hci_cc_user_passkey_neg_reply(hdev, skb);
3339 break;
3340
3341 case HCI_OP_LE_SET_RANDOM_ADDR:
3342 hci_cc_le_set_random_addr(hdev, skb);
3343 break;
3344
3345 case HCI_OP_LE_SET_ADV_ENABLE:
3346 hci_cc_le_set_adv_enable(hdev, skb);
3347 break;
3348
3349 case HCI_OP_LE_SET_SCAN_PARAM:
3350 hci_cc_le_set_scan_param(hdev, skb);
3351 break;
3352
3353 case HCI_OP_LE_SET_SCAN_ENABLE:
3354 hci_cc_le_set_scan_enable(hdev, skb);
3355 break;
3356
3357 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3358 hci_cc_le_read_white_list_size(hdev, skb);
3359 break;
3360
3361 case HCI_OP_LE_CLEAR_WHITE_LIST:
3362 hci_cc_le_clear_white_list(hdev, skb);
3363 break;
3364
3365 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3366 hci_cc_le_add_to_white_list(hdev, skb);
3367 break;
3368
3369 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3370 hci_cc_le_del_from_white_list(hdev, skb);
3371 break;
3372
3373 case HCI_OP_LE_READ_SUPPORTED_STATES:
3374 hci_cc_le_read_supported_states(hdev, skb);
3375 break;
3376
3377 case HCI_OP_LE_READ_DEF_DATA_LEN:
3378 hci_cc_le_read_def_data_len(hdev, skb);
3379 break;
3380
3381 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3382 hci_cc_le_write_def_data_len(hdev, skb);
3383 break;
3384
David Brazdil0f672f62019-12-10 10:32:29 +00003385 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3386 hci_cc_le_add_to_resolv_list(hdev, skb);
3387 break;
3388
3389 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3390 hci_cc_le_del_from_resolv_list(hdev, skb);
3391 break;
3392
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003393 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3394 hci_cc_le_clear_resolv_list(hdev, skb);
3395 break;
3396
3397 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3398 hci_cc_le_read_resolv_list_size(hdev, skb);
3399 break;
3400
3401 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3402 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3403 break;
3404
3405 case HCI_OP_LE_READ_MAX_DATA_LEN:
3406 hci_cc_le_read_max_data_len(hdev, skb);
3407 break;
3408
3409 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3410 hci_cc_write_le_host_supported(hdev, skb);
3411 break;
3412
3413 case HCI_OP_LE_SET_ADV_PARAM:
3414 hci_cc_set_adv_param(hdev, skb);
3415 break;
3416
3417 case HCI_OP_READ_RSSI:
3418 hci_cc_read_rssi(hdev, skb);
3419 break;
3420
3421 case HCI_OP_READ_TX_POWER:
3422 hci_cc_read_tx_power(hdev, skb);
3423 break;
3424
3425 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3426 hci_cc_write_ssp_debug_mode(hdev, skb);
3427 break;
3428
3429 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3430 hci_cc_le_set_ext_scan_param(hdev, skb);
3431 break;
3432
3433 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3434 hci_cc_le_set_ext_scan_enable(hdev, skb);
3435 break;
3436
3437 case HCI_OP_LE_SET_DEFAULT_PHY:
3438 hci_cc_le_set_default_phy(hdev, skb);
3439 break;
3440
3441 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3442 hci_cc_le_read_num_adv_sets(hdev, skb);
3443 break;
3444
3445 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3446 hci_cc_set_ext_adv_param(hdev, skb);
3447 break;
3448
3449 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3450 hci_cc_le_set_ext_adv_enable(hdev, skb);
3451 break;
3452
3453 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3454 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3455 break;
3456
3457 default:
3458 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3459 break;
3460 }
3461
3462 if (*opcode != HCI_OP_NOP)
3463 cancel_delayed_work(&hdev->cmd_timer);
3464
3465 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3466 atomic_set(&hdev->cmd_cnt, 1);
3467
3468 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3469 req_complete_skb);
3470
David Brazdil0f672f62019-12-10 10:32:29 +00003471 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3472 bt_dev_err(hdev,
3473 "unexpected event for opcode 0x%4.4x", *opcode);
3474 return;
3475 }
3476
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003477 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3478 queue_work(hdev->workqueue, &hdev->cmd_work);
3479}
3480
3481static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3482 u16 *opcode, u8 *status,
3483 hci_req_complete_t *req_complete,
3484 hci_req_complete_skb_t *req_complete_skb)
3485{
3486 struct hci_ev_cmd_status *ev = (void *) skb->data;
3487
3488 skb_pull(skb, sizeof(*ev));
3489
3490 *opcode = __le16_to_cpu(ev->opcode);
3491 *status = ev->status;
3492
3493 switch (*opcode) {
3494 case HCI_OP_INQUIRY:
3495 hci_cs_inquiry(hdev, ev->status);
3496 break;
3497
3498 case HCI_OP_CREATE_CONN:
3499 hci_cs_create_conn(hdev, ev->status);
3500 break;
3501
3502 case HCI_OP_DISCONNECT:
3503 hci_cs_disconnect(hdev, ev->status);
3504 break;
3505
3506 case HCI_OP_ADD_SCO:
3507 hci_cs_add_sco(hdev, ev->status);
3508 break;
3509
3510 case HCI_OP_AUTH_REQUESTED:
3511 hci_cs_auth_requested(hdev, ev->status);
3512 break;
3513
3514 case HCI_OP_SET_CONN_ENCRYPT:
3515 hci_cs_set_conn_encrypt(hdev, ev->status);
3516 break;
3517
3518 case HCI_OP_REMOTE_NAME_REQ:
3519 hci_cs_remote_name_req(hdev, ev->status);
3520 break;
3521
3522 case HCI_OP_READ_REMOTE_FEATURES:
3523 hci_cs_read_remote_features(hdev, ev->status);
3524 break;
3525
3526 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3527 hci_cs_read_remote_ext_features(hdev, ev->status);
3528 break;
3529
3530 case HCI_OP_SETUP_SYNC_CONN:
3531 hci_cs_setup_sync_conn(hdev, ev->status);
3532 break;
3533
3534 case HCI_OP_SNIFF_MODE:
3535 hci_cs_sniff_mode(hdev, ev->status);
3536 break;
3537
3538 case HCI_OP_EXIT_SNIFF_MODE:
3539 hci_cs_exit_sniff_mode(hdev, ev->status);
3540 break;
3541
3542 case HCI_OP_SWITCH_ROLE:
3543 hci_cs_switch_role(hdev, ev->status);
3544 break;
3545
3546 case HCI_OP_LE_CREATE_CONN:
3547 hci_cs_le_create_conn(hdev, ev->status);
3548 break;
3549
3550 case HCI_OP_LE_READ_REMOTE_FEATURES:
3551 hci_cs_le_read_remote_features(hdev, ev->status);
3552 break;
3553
3554 case HCI_OP_LE_START_ENC:
3555 hci_cs_le_start_enc(hdev, ev->status);
3556 break;
3557
3558 case HCI_OP_LE_EXT_CREATE_CONN:
3559 hci_cs_le_ext_create_conn(hdev, ev->status);
3560 break;
3561
3562 default:
3563 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3564 break;
3565 }
3566
3567 if (*opcode != HCI_OP_NOP)
3568 cancel_delayed_work(&hdev->cmd_timer);
3569
3570 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3571 atomic_set(&hdev->cmd_cnt, 1);
3572
3573 /* Indicate request completion if the command failed. Also, if
3574 * we're not waiting for a special event and we get a success
3575 * command status we should try to flag the request as completed
3576 * (since for this kind of commands there will not be a command
3577 * complete event).
3578 */
3579 if (ev->status ||
3580 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3581 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3582 req_complete_skb);
3583
David Brazdil0f672f62019-12-10 10:32:29 +00003584 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3585 bt_dev_err(hdev,
3586 "unexpected event for opcode 0x%4.4x", *opcode);
3587 return;
3588 }
3589
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003590 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3591 queue_work(hdev->workqueue, &hdev->cmd_work);
3592}
3593
3594static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3595{
3596 struct hci_ev_hardware_error *ev = (void *) skb->data;
3597
3598 hdev->hw_error_code = ev->code;
3599
3600 queue_work(hdev->req_workqueue, &hdev->error_reset);
3601}
3602
3603static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3604{
3605 struct hci_ev_role_change *ev = (void *) skb->data;
3606 struct hci_conn *conn;
3607
3608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3609
3610 hci_dev_lock(hdev);
3611
3612 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3613 if (conn) {
3614 if (!ev->status)
3615 conn->role = ev->role;
3616
3617 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3618
3619 hci_role_switch_cfm(conn, ev->status, ev->role);
3620 }
3621
3622 hci_dev_unlock(hdev);
3623}
3624
3625static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3626{
3627 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3628 int i;
3629
3630 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3631 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3632 return;
3633 }
3634
David Brazdil0f672f62019-12-10 10:32:29 +00003635 if (skb->len < sizeof(*ev) ||
3636 skb->len < struct_size(ev, handles, ev->num_hndl)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003637 BT_DBG("%s bad parameters", hdev->name);
3638 return;
3639 }
3640
3641 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3642
3643 for (i = 0; i < ev->num_hndl; i++) {
3644 struct hci_comp_pkts_info *info = &ev->handles[i];
3645 struct hci_conn *conn;
3646 __u16 handle, count;
3647
3648 handle = __le16_to_cpu(info->handle);
3649 count = __le16_to_cpu(info->count);
3650
3651 conn = hci_conn_hash_lookup_handle(hdev, handle);
3652 if (!conn)
3653 continue;
3654
3655 conn->sent -= count;
3656
3657 switch (conn->type) {
3658 case ACL_LINK:
3659 hdev->acl_cnt += count;
3660 if (hdev->acl_cnt > hdev->acl_pkts)
3661 hdev->acl_cnt = hdev->acl_pkts;
3662 break;
3663
3664 case LE_LINK:
3665 if (hdev->le_pkts) {
3666 hdev->le_cnt += count;
3667 if (hdev->le_cnt > hdev->le_pkts)
3668 hdev->le_cnt = hdev->le_pkts;
3669 } else {
3670 hdev->acl_cnt += count;
3671 if (hdev->acl_cnt > hdev->acl_pkts)
3672 hdev->acl_cnt = hdev->acl_pkts;
3673 }
3674 break;
3675
3676 case SCO_LINK:
3677 hdev->sco_cnt += count;
3678 if (hdev->sco_cnt > hdev->sco_pkts)
3679 hdev->sco_cnt = hdev->sco_pkts;
3680 break;
3681
3682 default:
3683 bt_dev_err(hdev, "unknown type %d conn %p",
3684 conn->type, conn);
3685 break;
3686 }
3687 }
3688
3689 queue_work(hdev->workqueue, &hdev->tx_work);
3690}
3691
3692static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3693 __u16 handle)
3694{
3695 struct hci_chan *chan;
3696
3697 switch (hdev->dev_type) {
3698 case HCI_PRIMARY:
3699 return hci_conn_hash_lookup_handle(hdev, handle);
3700 case HCI_AMP:
3701 chan = hci_chan_lookup_handle(hdev, handle);
3702 if (chan)
3703 return chan->conn;
3704 break;
3705 default:
3706 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3707 break;
3708 }
3709
3710 return NULL;
3711}
3712
3713static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3714{
3715 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3716 int i;
3717
3718 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3719 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3720 return;
3721 }
3722
David Brazdil0f672f62019-12-10 10:32:29 +00003723 if (skb->len < sizeof(*ev) ||
3724 skb->len < struct_size(ev, handles, ev->num_hndl)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003725 BT_DBG("%s bad parameters", hdev->name);
3726 return;
3727 }
3728
3729 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3730 ev->num_hndl);
3731
3732 for (i = 0; i < ev->num_hndl; i++) {
3733 struct hci_comp_blocks_info *info = &ev->handles[i];
3734 struct hci_conn *conn = NULL;
3735 __u16 handle, block_count;
3736
3737 handle = __le16_to_cpu(info->handle);
3738 block_count = __le16_to_cpu(info->blocks);
3739
3740 conn = __hci_conn_lookup_handle(hdev, handle);
3741 if (!conn)
3742 continue;
3743
3744 conn->sent -= block_count;
3745
3746 switch (conn->type) {
3747 case ACL_LINK:
3748 case AMP_LINK:
3749 hdev->block_cnt += block_count;
3750 if (hdev->block_cnt > hdev->num_blocks)
3751 hdev->block_cnt = hdev->num_blocks;
3752 break;
3753
3754 default:
3755 bt_dev_err(hdev, "unknown type %d conn %p",
3756 conn->type, conn);
3757 break;
3758 }
3759 }
3760
3761 queue_work(hdev->workqueue, &hdev->tx_work);
3762}
3763
3764static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3765{
3766 struct hci_ev_mode_change *ev = (void *) skb->data;
3767 struct hci_conn *conn;
3768
3769 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3770
3771 hci_dev_lock(hdev);
3772
3773 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3774 if (conn) {
3775 conn->mode = ev->mode;
3776
3777 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3778 &conn->flags)) {
3779 if (conn->mode == HCI_CM_ACTIVE)
3780 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3781 else
3782 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3783 }
3784
3785 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3786 hci_sco_setup(conn, ev->status);
3787 }
3788
3789 hci_dev_unlock(hdev);
3790}
3791
3792static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3793{
3794 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3795 struct hci_conn *conn;
3796
3797 BT_DBG("%s", hdev->name);
3798
3799 hci_dev_lock(hdev);
3800
3801 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3802 if (!conn)
3803 goto unlock;
3804
3805 if (conn->state == BT_CONNECTED) {
3806 hci_conn_hold(conn);
3807 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3808 hci_conn_drop(conn);
3809 }
3810
3811 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3812 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3813 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3814 sizeof(ev->bdaddr), &ev->bdaddr);
3815 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3816 u8 secure;
3817
3818 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3819 secure = 1;
3820 else
3821 secure = 0;
3822
3823 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3824 }
3825
3826unlock:
3827 hci_dev_unlock(hdev);
3828}
3829
3830static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3831{
3832 if (key_type == HCI_LK_CHANGED_COMBINATION)
3833 return;
3834
3835 conn->pin_length = pin_len;
3836 conn->key_type = key_type;
3837
3838 switch (key_type) {
3839 case HCI_LK_LOCAL_UNIT:
3840 case HCI_LK_REMOTE_UNIT:
3841 case HCI_LK_DEBUG_COMBINATION:
3842 return;
3843 case HCI_LK_COMBINATION:
3844 if (pin_len == 16)
3845 conn->pending_sec_level = BT_SECURITY_HIGH;
3846 else
3847 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3848 break;
3849 case HCI_LK_UNAUTH_COMBINATION_P192:
3850 case HCI_LK_UNAUTH_COMBINATION_P256:
3851 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3852 break;
3853 case HCI_LK_AUTH_COMBINATION_P192:
3854 conn->pending_sec_level = BT_SECURITY_HIGH;
3855 break;
3856 case HCI_LK_AUTH_COMBINATION_P256:
3857 conn->pending_sec_level = BT_SECURITY_FIPS;
3858 break;
3859 }
3860}
3861
3862static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3863{
3864 struct hci_ev_link_key_req *ev = (void *) skb->data;
3865 struct hci_cp_link_key_reply cp;
3866 struct hci_conn *conn;
3867 struct link_key *key;
3868
3869 BT_DBG("%s", hdev->name);
3870
3871 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3872 return;
3873
3874 hci_dev_lock(hdev);
3875
3876 key = hci_find_link_key(hdev, &ev->bdaddr);
3877 if (!key) {
3878 BT_DBG("%s link key not found for %pMR", hdev->name,
3879 &ev->bdaddr);
3880 goto not_found;
3881 }
3882
3883 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3884 &ev->bdaddr);
3885
3886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3887 if (conn) {
3888 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3889
3890 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3891 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3892 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3893 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3894 goto not_found;
3895 }
3896
3897 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3898 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3899 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3900 BT_DBG("%s ignoring key unauthenticated for high security",
3901 hdev->name);
3902 goto not_found;
3903 }
3904
3905 conn_set_key(conn, key->type, key->pin_len);
3906 }
3907
3908 bacpy(&cp.bdaddr, &ev->bdaddr);
3909 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3910
3911 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3912
3913 hci_dev_unlock(hdev);
3914
3915 return;
3916
3917not_found:
3918 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3919 hci_dev_unlock(hdev);
3920}
3921
3922static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3923{
3924 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3925 struct hci_conn *conn;
3926 struct link_key *key;
3927 bool persistent;
3928 u8 pin_len = 0;
3929
3930 BT_DBG("%s", hdev->name);
3931
3932 hci_dev_lock(hdev);
3933
3934 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3935 if (!conn)
3936 goto unlock;
3937
3938 hci_conn_hold(conn);
3939 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3940 hci_conn_drop(conn);
3941
3942 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3943 conn_set_key(conn, ev->key_type, conn->pin_length);
3944
3945 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3946 goto unlock;
3947
3948 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3949 ev->key_type, pin_len, &persistent);
3950 if (!key)
3951 goto unlock;
3952
3953 /* Update connection information since adding the key will have
3954 * fixed up the type in the case of changed combination keys.
3955 */
3956 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3957 conn_set_key(conn, key->type, key->pin_len);
3958
3959 mgmt_new_link_key(hdev, key, persistent);
3960
3961 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3962 * is set. If it's not set simply remove the key from the kernel
3963 * list (we've still notified user space about it but with
3964 * store_hint being 0).
3965 */
3966 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3967 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3968 list_del_rcu(&key->list);
3969 kfree_rcu(key, rcu);
3970 goto unlock;
3971 }
3972
3973 if (persistent)
3974 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3975 else
3976 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3977
3978unlock:
3979 hci_dev_unlock(hdev);
3980}
3981
3982static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3983{
3984 struct hci_ev_clock_offset *ev = (void *) skb->data;
3985 struct hci_conn *conn;
3986
3987 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3988
3989 hci_dev_lock(hdev);
3990
3991 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3992 if (conn && !ev->status) {
3993 struct inquiry_entry *ie;
3994
3995 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3996 if (ie) {
3997 ie->data.clock_offset = ev->clock_offset;
3998 ie->timestamp = jiffies;
3999 }
4000 }
4001
4002 hci_dev_unlock(hdev);
4003}
4004
4005static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4006{
4007 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4008 struct hci_conn *conn;
4009
4010 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4011
4012 hci_dev_lock(hdev);
4013
4014 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4015 if (conn && !ev->status)
4016 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4017
4018 hci_dev_unlock(hdev);
4019}
4020
4021static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4022{
4023 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4024 struct inquiry_entry *ie;
4025
4026 BT_DBG("%s", hdev->name);
4027
4028 hci_dev_lock(hdev);
4029
4030 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4031 if (ie) {
4032 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4033 ie->timestamp = jiffies;
4034 }
4035
4036 hci_dev_unlock(hdev);
4037}
4038
4039static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4040 struct sk_buff *skb)
4041{
4042 struct inquiry_data data;
4043 int num_rsp = *((__u8 *) skb->data);
4044
4045 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4046
4047 if (!num_rsp)
4048 return;
4049
4050 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4051 return;
4052
4053 hci_dev_lock(hdev);
4054
4055 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4056 struct inquiry_info_with_rssi_and_pscan_mode *info;
4057 info = (void *) (skb->data + 1);
4058
Olivier Deprez0e641232021-09-23 10:07:05 +02004059 if (skb->len < num_rsp * sizeof(*info) + 1)
4060 goto unlock;
4061
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004062 for (; num_rsp; num_rsp--, info++) {
4063 u32 flags;
4064
4065 bacpy(&data.bdaddr, &info->bdaddr);
4066 data.pscan_rep_mode = info->pscan_rep_mode;
4067 data.pscan_period_mode = info->pscan_period_mode;
4068 data.pscan_mode = info->pscan_mode;
4069 memcpy(data.dev_class, info->dev_class, 3);
4070 data.clock_offset = info->clock_offset;
4071 data.rssi = info->rssi;
4072 data.ssp_mode = 0x00;
4073
4074 flags = hci_inquiry_cache_update(hdev, &data, false);
4075
4076 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4077 info->dev_class, info->rssi,
4078 flags, NULL, 0, NULL, 0);
4079 }
4080 } else {
4081 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4082
Olivier Deprez0e641232021-09-23 10:07:05 +02004083 if (skb->len < num_rsp * sizeof(*info) + 1)
4084 goto unlock;
4085
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004086 for (; num_rsp; num_rsp--, info++) {
4087 u32 flags;
4088
4089 bacpy(&data.bdaddr, &info->bdaddr);
4090 data.pscan_rep_mode = info->pscan_rep_mode;
4091 data.pscan_period_mode = info->pscan_period_mode;
4092 data.pscan_mode = 0x00;
4093 memcpy(data.dev_class, info->dev_class, 3);
4094 data.clock_offset = info->clock_offset;
4095 data.rssi = info->rssi;
4096 data.ssp_mode = 0x00;
4097
4098 flags = hci_inquiry_cache_update(hdev, &data, false);
4099
4100 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4101 info->dev_class, info->rssi,
4102 flags, NULL, 0, NULL, 0);
4103 }
4104 }
4105
Olivier Deprez0e641232021-09-23 10:07:05 +02004106unlock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004107 hci_dev_unlock(hdev);
4108}
4109
4110static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4111 struct sk_buff *skb)
4112{
4113 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4114 struct hci_conn *conn;
4115
4116 BT_DBG("%s", hdev->name);
4117
4118 hci_dev_lock(hdev);
4119
4120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4121 if (!conn)
4122 goto unlock;
4123
4124 if (ev->page < HCI_MAX_PAGES)
4125 memcpy(conn->features[ev->page], ev->features, 8);
4126
4127 if (!ev->status && ev->page == 0x01) {
4128 struct inquiry_entry *ie;
4129
4130 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4131 if (ie)
4132 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4133
4134 if (ev->features[0] & LMP_HOST_SSP) {
4135 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4136 } else {
4137 /* It is mandatory by the Bluetooth specification that
4138 * Extended Inquiry Results are only used when Secure
4139 * Simple Pairing is enabled, but some devices violate
4140 * this.
4141 *
4142 * To make these devices work, the internal SSP
4143 * enabled flag needs to be cleared if the remote host
4144 * features do not indicate SSP support */
4145 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4146 }
4147
4148 if (ev->features[0] & LMP_HOST_SC)
4149 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4150 }
4151
4152 if (conn->state != BT_CONFIG)
4153 goto unlock;
4154
4155 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4156 struct hci_cp_remote_name_req cp;
4157 memset(&cp, 0, sizeof(cp));
4158 bacpy(&cp.bdaddr, &conn->dst);
4159 cp.pscan_rep_mode = 0x02;
4160 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4161 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4162 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4163
4164 if (!hci_outgoing_auth_needed(hdev, conn)) {
4165 conn->state = BT_CONNECTED;
4166 hci_connect_cfm(conn, ev->status);
4167 hci_conn_drop(conn);
4168 }
4169
4170unlock:
4171 hci_dev_unlock(hdev);
4172}
4173
4174static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4175 struct sk_buff *skb)
4176{
4177 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4178 struct hci_conn *conn;
4179
4180 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4181
4182 hci_dev_lock(hdev);
4183
4184 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4185 if (!conn) {
4186 if (ev->link_type == ESCO_LINK)
4187 goto unlock;
4188
4189 /* When the link type in the event indicates SCO connection
4190 * and lookup of the connection object fails, then check
4191 * if an eSCO connection object exists.
4192 *
4193 * The core limits the synchronous connections to either
4194 * SCO or eSCO. The eSCO connection is preferred and tried
4195 * to be setup first and until successfully established,
4196 * the link type will be hinted as eSCO.
4197 */
4198 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4199 if (!conn)
4200 goto unlock;
4201 }
4202
4203 switch (ev->status) {
4204 case 0x00:
Olivier Deprez0e641232021-09-23 10:07:05 +02004205 /* The synchronous connection complete event should only be
4206 * sent once per new connection. Receiving a successful
4207 * complete event when the connection status is already
4208 * BT_CONNECTED means that the device is misbehaving and sent
4209 * multiple complete event packets for the same new connection.
4210 *
4211 * Registering the device more than once can corrupt kernel
4212 * memory, hence upon detecting this invalid event, we report
4213 * an error and ignore the packet.
4214 */
4215 if (conn->state == BT_CONNECTED) {
4216 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4217 goto unlock;
4218 }
4219
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004220 conn->handle = __le16_to_cpu(ev->handle);
4221 conn->state = BT_CONNECTED;
4222 conn->type = ev->link_type;
4223
4224 hci_debugfs_create_conn(conn);
4225 hci_conn_add_sysfs(conn);
4226 break;
4227
4228 case 0x10: /* Connection Accept Timeout */
4229 case 0x0d: /* Connection Rejected due to Limited Resources */
4230 case 0x11: /* Unsupported Feature or Parameter Value */
4231 case 0x1c: /* SCO interval rejected */
4232 case 0x1a: /* Unsupported Remote Feature */
Olivier Deprez0e641232021-09-23 10:07:05 +02004233 case 0x1e: /* Invalid LMP Parameters */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004234 case 0x1f: /* Unspecified error */
4235 case 0x20: /* Unsupported LMP Parameter value */
4236 if (conn->out) {
4237 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4238 (hdev->esco_type & EDR_ESCO_MASK);
4239 if (hci_setup_sync(conn, conn->link->handle))
4240 goto unlock;
4241 }
4242 /* fall through */
4243
4244 default:
4245 conn->state = BT_CLOSED;
4246 break;
4247 }
4248
4249 hci_connect_cfm(conn, ev->status);
4250 if (ev->status)
4251 hci_conn_del(conn);
4252
4253unlock:
4254 hci_dev_unlock(hdev);
4255}
4256
4257static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4258{
4259 size_t parsed = 0;
4260
4261 while (parsed < eir_len) {
4262 u8 field_len = eir[0];
4263
4264 if (field_len == 0)
4265 return parsed;
4266
4267 parsed += field_len + 1;
4268 eir += field_len + 1;
4269 }
4270
4271 return eir_len;
4272}
4273
4274static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4275 struct sk_buff *skb)
4276{
4277 struct inquiry_data data;
4278 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4279 int num_rsp = *((__u8 *) skb->data);
4280 size_t eir_len;
4281
4282 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4283
Olivier Deprez0e641232021-09-23 10:07:05 +02004284 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004285 return;
4286
4287 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4288 return;
4289
4290 hci_dev_lock(hdev);
4291
4292 for (; num_rsp; num_rsp--, info++) {
4293 u32 flags;
4294 bool name_known;
4295
4296 bacpy(&data.bdaddr, &info->bdaddr);
4297 data.pscan_rep_mode = info->pscan_rep_mode;
4298 data.pscan_period_mode = info->pscan_period_mode;
4299 data.pscan_mode = 0x00;
4300 memcpy(data.dev_class, info->dev_class, 3);
4301 data.clock_offset = info->clock_offset;
4302 data.rssi = info->rssi;
4303 data.ssp_mode = 0x01;
4304
4305 if (hci_dev_test_flag(hdev, HCI_MGMT))
4306 name_known = eir_get_data(info->data,
4307 sizeof(info->data),
4308 EIR_NAME_COMPLETE, NULL);
4309 else
4310 name_known = true;
4311
4312 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4313
4314 eir_len = eir_get_length(info->data, sizeof(info->data));
4315
4316 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4317 info->dev_class, info->rssi,
4318 flags, info->data, eir_len, NULL, 0);
4319 }
4320
4321 hci_dev_unlock(hdev);
4322}
4323
4324static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4325 struct sk_buff *skb)
4326{
4327 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4328 struct hci_conn *conn;
4329
4330 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4331 __le16_to_cpu(ev->handle));
4332
4333 hci_dev_lock(hdev);
4334
4335 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4336 if (!conn)
4337 goto unlock;
4338
4339 /* For BR/EDR the necessary steps are taken through the
4340 * auth_complete event.
4341 */
4342 if (conn->type != LE_LINK)
4343 goto unlock;
4344
4345 if (!ev->status)
4346 conn->sec_level = conn->pending_sec_level;
4347
4348 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4349
4350 if (ev->status && conn->state == BT_CONNECTED) {
4351 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4352 hci_conn_drop(conn);
4353 goto unlock;
4354 }
4355
4356 if (conn->state == BT_CONFIG) {
4357 if (!ev->status)
4358 conn->state = BT_CONNECTED;
4359
4360 hci_connect_cfm(conn, ev->status);
4361 hci_conn_drop(conn);
4362 } else {
4363 hci_auth_cfm(conn, ev->status);
4364
4365 hci_conn_hold(conn);
4366 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4367 hci_conn_drop(conn);
4368 }
4369
4370unlock:
4371 hci_dev_unlock(hdev);
4372}
4373
4374static u8 hci_get_auth_req(struct hci_conn *conn)
4375{
4376 /* If remote requests no-bonding follow that lead */
4377 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4378 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4379 return conn->remote_auth | (conn->auth_type & 0x01);
4380
4381 /* If both remote and local have enough IO capabilities, require
4382 * MITM protection
4383 */
4384 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4385 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4386 return conn->remote_auth | 0x01;
4387
4388 /* No MITM protection possible so ignore remote requirement */
4389 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4390}
4391
4392static u8 bredr_oob_data_present(struct hci_conn *conn)
4393{
4394 struct hci_dev *hdev = conn->hdev;
4395 struct oob_data *data;
4396
4397 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4398 if (!data)
4399 return 0x00;
4400
4401 if (bredr_sc_enabled(hdev)) {
4402 /* When Secure Connections is enabled, then just
4403 * return the present value stored with the OOB
4404 * data. The stored value contains the right present
4405 * information. However it can only be trusted when
4406 * not in Secure Connection Only mode.
4407 */
4408 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4409 return data->present;
4410
4411 /* When Secure Connections Only mode is enabled, then
4412 * the P-256 values are required. If they are not
4413 * available, then do not declare that OOB data is
4414 * present.
4415 */
4416 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4417 !memcmp(data->hash256, ZERO_KEY, 16))
4418 return 0x00;
4419
4420 return 0x02;
4421 }
4422
4423 /* When Secure Connections is not enabled or actually
4424 * not supported by the hardware, then check that if
4425 * P-192 data values are present.
4426 */
4427 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4428 !memcmp(data->hash192, ZERO_KEY, 16))
4429 return 0x00;
4430
4431 return 0x01;
4432}
4433
4434static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4435{
4436 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4437 struct hci_conn *conn;
4438
4439 BT_DBG("%s", hdev->name);
4440
4441 hci_dev_lock(hdev);
4442
4443 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4444 if (!conn)
4445 goto unlock;
4446
4447 hci_conn_hold(conn);
4448
4449 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4450 goto unlock;
4451
4452 /* Allow pairing if we're pairable, the initiators of the
4453 * pairing or if the remote is not requesting bonding.
4454 */
4455 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4456 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4457 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4458 struct hci_cp_io_capability_reply cp;
4459
4460 bacpy(&cp.bdaddr, &ev->bdaddr);
4461 /* Change the IO capability from KeyboardDisplay
4462 * to DisplayYesNo as it is not supported by BT spec. */
4463 cp.capability = (conn->io_capability == 0x04) ?
4464 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4465
4466 /* If we are initiators, there is no remote information yet */
4467 if (conn->remote_auth == 0xff) {
4468 /* Request MITM protection if our IO caps allow it
4469 * except for the no-bonding case.
4470 */
4471 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4472 conn->auth_type != HCI_AT_NO_BONDING)
4473 conn->auth_type |= 0x01;
4474 } else {
4475 conn->auth_type = hci_get_auth_req(conn);
4476 }
4477
4478 /* If we're not bondable, force one of the non-bondable
4479 * authentication requirement values.
4480 */
4481 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4482 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4483
4484 cp.authentication = conn->auth_type;
4485 cp.oob_data = bredr_oob_data_present(conn);
4486
4487 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4488 sizeof(cp), &cp);
4489 } else {
4490 struct hci_cp_io_capability_neg_reply cp;
4491
4492 bacpy(&cp.bdaddr, &ev->bdaddr);
4493 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4494
4495 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4496 sizeof(cp), &cp);
4497 }
4498
4499unlock:
4500 hci_dev_unlock(hdev);
4501}
4502
4503static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4504{
4505 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4506 struct hci_conn *conn;
4507
4508 BT_DBG("%s", hdev->name);
4509
4510 hci_dev_lock(hdev);
4511
4512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4513 if (!conn)
4514 goto unlock;
4515
4516 conn->remote_cap = ev->capability;
4517 conn->remote_auth = ev->authentication;
4518
4519unlock:
4520 hci_dev_unlock(hdev);
4521}
4522
4523static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4524 struct sk_buff *skb)
4525{
4526 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4527 int loc_mitm, rem_mitm, confirm_hint = 0;
4528 struct hci_conn *conn;
4529
4530 BT_DBG("%s", hdev->name);
4531
4532 hci_dev_lock(hdev);
4533
4534 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4535 goto unlock;
4536
4537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4538 if (!conn)
4539 goto unlock;
4540
4541 loc_mitm = (conn->auth_type & 0x01);
4542 rem_mitm = (conn->remote_auth & 0x01);
4543
4544 /* If we require MITM but the remote device can't provide that
4545 * (it has NoInputNoOutput) then reject the confirmation
4546 * request. We check the security level here since it doesn't
4547 * necessarily match conn->auth_type.
4548 */
4549 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4550 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4551 BT_DBG("Rejecting request: remote device can't provide MITM");
4552 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4553 sizeof(ev->bdaddr), &ev->bdaddr);
4554 goto unlock;
4555 }
4556
4557 /* If no side requires MITM protection; auto-accept */
4558 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4559 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4560
4561 /* If we're not the initiators request authorization to
4562 * proceed from user space (mgmt_user_confirm with
4563 * confirm_hint set to 1). The exception is if neither
4564 * side had MITM or if the local IO capability is
4565 * NoInputNoOutput, in which case we do auto-accept
4566 */
4567 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4568 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4569 (loc_mitm || rem_mitm)) {
4570 BT_DBG("Confirming auto-accept as acceptor");
4571 confirm_hint = 1;
4572 goto confirm;
4573 }
4574
4575 BT_DBG("Auto-accept of user confirmation with %ums delay",
4576 hdev->auto_accept_delay);
4577
4578 if (hdev->auto_accept_delay > 0) {
4579 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4580 queue_delayed_work(conn->hdev->workqueue,
4581 &conn->auto_accept_work, delay);
4582 goto unlock;
4583 }
4584
4585 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4586 sizeof(ev->bdaddr), &ev->bdaddr);
4587 goto unlock;
4588 }
4589
4590confirm:
4591 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4592 le32_to_cpu(ev->passkey), confirm_hint);
4593
4594unlock:
4595 hci_dev_unlock(hdev);
4596}
4597
4598static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4599 struct sk_buff *skb)
4600{
4601 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4602
4603 BT_DBG("%s", hdev->name);
4604
4605 if (hci_dev_test_flag(hdev, HCI_MGMT))
4606 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4607}
4608
4609static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4610 struct sk_buff *skb)
4611{
4612 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4613 struct hci_conn *conn;
4614
4615 BT_DBG("%s", hdev->name);
4616
4617 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4618 if (!conn)
4619 return;
4620
4621 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4622 conn->passkey_entered = 0;
4623
4624 if (hci_dev_test_flag(hdev, HCI_MGMT))
4625 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4626 conn->dst_type, conn->passkey_notify,
4627 conn->passkey_entered);
4628}
4629
4630static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4631{
4632 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4633 struct hci_conn *conn;
4634
4635 BT_DBG("%s", hdev->name);
4636
4637 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4638 if (!conn)
4639 return;
4640
4641 switch (ev->type) {
4642 case HCI_KEYPRESS_STARTED:
4643 conn->passkey_entered = 0;
4644 return;
4645
4646 case HCI_KEYPRESS_ENTERED:
4647 conn->passkey_entered++;
4648 break;
4649
4650 case HCI_KEYPRESS_ERASED:
4651 conn->passkey_entered--;
4652 break;
4653
4654 case HCI_KEYPRESS_CLEARED:
4655 conn->passkey_entered = 0;
4656 break;
4657
4658 case HCI_KEYPRESS_COMPLETED:
4659 return;
4660 }
4661
4662 if (hci_dev_test_flag(hdev, HCI_MGMT))
4663 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4664 conn->dst_type, conn->passkey_notify,
4665 conn->passkey_entered);
4666}
4667
4668static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4669 struct sk_buff *skb)
4670{
4671 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4672 struct hci_conn *conn;
4673
4674 BT_DBG("%s", hdev->name);
4675
4676 hci_dev_lock(hdev);
4677
4678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4679 if (!conn)
4680 goto unlock;
4681
4682 /* Reset the authentication requirement to unknown */
4683 conn->remote_auth = 0xff;
4684
4685 /* To avoid duplicate auth_failed events to user space we check
4686 * the HCI_CONN_AUTH_PEND flag which will be set if we
4687 * initiated the authentication. A traditional auth_complete
4688 * event gets always produced as initiator and is also mapped to
4689 * the mgmt_auth_failed event */
4690 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4691 mgmt_auth_failed(conn, ev->status);
4692
4693 hci_conn_drop(conn);
4694
4695unlock:
4696 hci_dev_unlock(hdev);
4697}
4698
4699static void hci_remote_host_features_evt(struct hci_dev *hdev,
4700 struct sk_buff *skb)
4701{
4702 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4703 struct inquiry_entry *ie;
4704 struct hci_conn *conn;
4705
4706 BT_DBG("%s", hdev->name);
4707
4708 hci_dev_lock(hdev);
4709
4710 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4711 if (conn)
4712 memcpy(conn->features[1], ev->features, 8);
4713
4714 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4715 if (ie)
4716 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4717
4718 hci_dev_unlock(hdev);
4719}
4720
4721static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4722 struct sk_buff *skb)
4723{
4724 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4725 struct oob_data *data;
4726
4727 BT_DBG("%s", hdev->name);
4728
4729 hci_dev_lock(hdev);
4730
4731 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4732 goto unlock;
4733
4734 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4735 if (!data) {
4736 struct hci_cp_remote_oob_data_neg_reply cp;
4737
4738 bacpy(&cp.bdaddr, &ev->bdaddr);
4739 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4740 sizeof(cp), &cp);
4741 goto unlock;
4742 }
4743
4744 if (bredr_sc_enabled(hdev)) {
4745 struct hci_cp_remote_oob_ext_data_reply cp;
4746
4747 bacpy(&cp.bdaddr, &ev->bdaddr);
4748 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4749 memset(cp.hash192, 0, sizeof(cp.hash192));
4750 memset(cp.rand192, 0, sizeof(cp.rand192));
4751 } else {
4752 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4753 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4754 }
4755 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4756 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4757
4758 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4759 sizeof(cp), &cp);
4760 } else {
4761 struct hci_cp_remote_oob_data_reply cp;
4762
4763 bacpy(&cp.bdaddr, &ev->bdaddr);
4764 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4765 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4766
4767 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4768 sizeof(cp), &cp);
4769 }
4770
4771unlock:
4772 hci_dev_unlock(hdev);
4773}
4774
4775#if IS_ENABLED(CONFIG_BT_HS)
4776static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4777{
4778 struct hci_ev_channel_selected *ev = (void *)skb->data;
4779 struct hci_conn *hcon;
4780
4781 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4782
4783 skb_pull(skb, sizeof(*ev));
4784
4785 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4786 if (!hcon)
4787 return;
4788
4789 amp_read_loc_assoc_final_data(hdev, hcon);
4790}
4791
4792static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4793 struct sk_buff *skb)
4794{
4795 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4796 struct hci_conn *hcon, *bredr_hcon;
4797
4798 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4799 ev->status);
4800
4801 hci_dev_lock(hdev);
4802
4803 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4804 if (!hcon) {
4805 hci_dev_unlock(hdev);
4806 return;
4807 }
4808
Olivier Deprez0e641232021-09-23 10:07:05 +02004809 if (!hcon->amp_mgr) {
4810 hci_dev_unlock(hdev);
4811 return;
4812 }
4813
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004814 if (ev->status) {
4815 hci_conn_del(hcon);
4816 hci_dev_unlock(hdev);
4817 return;
4818 }
4819
4820 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4821
4822 hcon->state = BT_CONNECTED;
4823 bacpy(&hcon->dst, &bredr_hcon->dst);
4824
4825 hci_conn_hold(hcon);
4826 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4827 hci_conn_drop(hcon);
4828
4829 hci_debugfs_create_conn(hcon);
4830 hci_conn_add_sysfs(hcon);
4831
4832 amp_physical_cfm(bredr_hcon, hcon);
4833
4834 hci_dev_unlock(hdev);
4835}
4836
4837static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4838{
4839 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4840 struct hci_conn *hcon;
4841 struct hci_chan *hchan;
4842 struct amp_mgr *mgr;
4843
4844 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4845 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4846 ev->status);
4847
4848 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4849 if (!hcon)
4850 return;
4851
4852 /* Create AMP hchan */
4853 hchan = hci_chan_create(hcon);
4854 if (!hchan)
4855 return;
4856
4857 hchan->handle = le16_to_cpu(ev->handle);
Olivier Deprez0e641232021-09-23 10:07:05 +02004858 hchan->amp = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004859
4860 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4861
4862 mgr = hcon->amp_mgr;
4863 if (mgr && mgr->bredr_chan) {
4864 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4865
4866 l2cap_chan_lock(bredr_chan);
4867
4868 bredr_chan->conn->mtu = hdev->block_mtu;
4869 l2cap_logical_cfm(bredr_chan, hchan, 0);
4870 hci_conn_hold(hcon);
4871
4872 l2cap_chan_unlock(bredr_chan);
4873 }
4874}
4875
4876static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4877 struct sk_buff *skb)
4878{
4879 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4880 struct hci_chan *hchan;
4881
4882 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4883 le16_to_cpu(ev->handle), ev->status);
4884
4885 if (ev->status)
4886 return;
4887
4888 hci_dev_lock(hdev);
4889
4890 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
Olivier Deprez0e641232021-09-23 10:07:05 +02004891 if (!hchan || !hchan->amp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004892 goto unlock;
4893
4894 amp_destroy_logical_link(hchan, ev->reason);
4895
4896unlock:
4897 hci_dev_unlock(hdev);
4898}
4899
4900static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4901 struct sk_buff *skb)
4902{
4903 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4904 struct hci_conn *hcon;
4905
4906 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4907
4908 if (ev->status)
4909 return;
4910
4911 hci_dev_lock(hdev);
4912
4913 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4914 if (hcon) {
4915 hcon->state = BT_CLOSED;
4916 hci_conn_del(hcon);
4917 }
4918
4919 hci_dev_unlock(hdev);
4920}
4921#endif
4922
Olivier Deprez0e641232021-09-23 10:07:05 +02004923static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
4924 u8 bdaddr_type, bdaddr_t *local_rpa)
4925{
4926 if (conn->out) {
4927 conn->dst_type = bdaddr_type;
4928 conn->resp_addr_type = bdaddr_type;
4929 bacpy(&conn->resp_addr, bdaddr);
4930
4931 /* Check if the controller has set a Local RPA then it must be
4932 * used instead or hdev->rpa.
4933 */
4934 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4935 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4936 bacpy(&conn->init_addr, local_rpa);
4937 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
4938 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4939 bacpy(&conn->init_addr, &conn->hdev->rpa);
4940 } else {
4941 hci_copy_identity_address(conn->hdev, &conn->init_addr,
4942 &conn->init_addr_type);
4943 }
4944 } else {
4945 conn->resp_addr_type = conn->hdev->adv_addr_type;
4946 /* Check if the controller has set a Local RPA then it must be
4947 * used instead or hdev->rpa.
4948 */
4949 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4950 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
4951 bacpy(&conn->resp_addr, local_rpa);
4952 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4953 /* In case of ext adv, resp_addr will be updated in
4954 * Adv Terminated event.
4955 */
4956 if (!ext_adv_capable(conn->hdev))
4957 bacpy(&conn->resp_addr,
4958 &conn->hdev->random_addr);
4959 } else {
4960 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
4961 }
4962
4963 conn->init_addr_type = bdaddr_type;
4964 bacpy(&conn->init_addr, bdaddr);
4965
4966 /* For incoming connections, set the default minimum
4967 * and maximum connection interval. They will be used
4968 * to check if the parameters are in range and if not
4969 * trigger the connection update procedure.
4970 */
4971 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
4972 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
4973 }
4974}
4975
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004976static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
Olivier Deprez0e641232021-09-23 10:07:05 +02004977 bdaddr_t *bdaddr, u8 bdaddr_type,
4978 bdaddr_t *local_rpa, u8 role, u16 handle,
4979 u16 interval, u16 latency,
4980 u16 supervision_timeout)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004981{
4982 struct hci_conn_params *params;
4983 struct hci_conn *conn;
4984 struct smp_irk *irk;
4985 u8 addr_type;
4986
4987 hci_dev_lock(hdev);
4988
4989 /* All controllers implicitly stop advertising in the event of a
4990 * connection, so ensure that the state bit is cleared.
4991 */
4992 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4993
4994 conn = hci_lookup_le_connect(hdev);
4995 if (!conn) {
4996 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4997 if (!conn) {
4998 bt_dev_err(hdev, "no memory for new connection");
4999 goto unlock;
5000 }
5001
5002 conn->dst_type = bdaddr_type;
5003
5004 /* If we didn't have a hci_conn object previously
5005 * but we're in master role this must be something
5006 * initiated using a white list. Since white list based
5007 * connections are not "first class citizens" we don't
5008 * have full tracking of them. Therefore, we go ahead
5009 * with a "best effort" approach of determining the
5010 * initiator address based on the HCI_PRIVACY flag.
5011 */
5012 if (conn->out) {
5013 conn->resp_addr_type = bdaddr_type;
5014 bacpy(&conn->resp_addr, bdaddr);
5015 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5016 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5017 bacpy(&conn->init_addr, &hdev->rpa);
5018 } else {
5019 hci_copy_identity_address(hdev,
5020 &conn->init_addr,
5021 &conn->init_addr_type);
5022 }
5023 }
5024 } else {
5025 cancel_delayed_work(&conn->le_conn_timeout);
5026 }
5027
Olivier Deprez0e641232021-09-23 10:07:05 +02005028 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005029
5030 /* Lookup the identity address from the stored connection
5031 * address and address type.
5032 *
5033 * When establishing connections to an identity address, the
5034 * connection procedure will store the resolvable random
5035 * address first. Now if it can be converted back into the
5036 * identity address, start using the identity address from
5037 * now on.
5038 */
5039 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5040 if (irk) {
5041 bacpy(&conn->dst, &irk->bdaddr);
5042 conn->dst_type = irk->addr_type;
5043 }
5044
5045 if (status) {
5046 hci_le_conn_failed(conn, status);
5047 goto unlock;
5048 }
5049
5050 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5051 addr_type = BDADDR_LE_PUBLIC;
5052 else
5053 addr_type = BDADDR_LE_RANDOM;
5054
5055 /* Drop the connection if the device is blocked */
5056 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5057 hci_conn_drop(conn);
5058 goto unlock;
5059 }
5060
5061 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5062 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5063
5064 conn->sec_level = BT_SECURITY_LOW;
5065 conn->handle = handle;
5066 conn->state = BT_CONFIG;
5067
5068 conn->le_conn_interval = interval;
5069 conn->le_conn_latency = latency;
5070 conn->le_supv_timeout = supervision_timeout;
5071
5072 hci_debugfs_create_conn(conn);
5073 hci_conn_add_sysfs(conn);
5074
David Brazdil0f672f62019-12-10 10:32:29 +00005075 /* The remote features procedure is defined for master
5076 * role only. So only in case of an initiated connection
5077 * request the remote features.
5078 *
5079 * If the local controller supports slave-initiated features
5080 * exchange, then requesting the remote features in slave
5081 * role is possible. Otherwise just transition into the
5082 * connected state without requesting the remote features.
5083 */
5084 if (conn->out ||
5085 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5086 struct hci_cp_le_read_remote_features cp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005087
David Brazdil0f672f62019-12-10 10:32:29 +00005088 cp.handle = __cpu_to_le16(conn->handle);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005089
David Brazdil0f672f62019-12-10 10:32:29 +00005090 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5091 sizeof(cp), &cp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005092
David Brazdil0f672f62019-12-10 10:32:29 +00005093 hci_conn_hold(conn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005094 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00005095 conn->state = BT_CONNECTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005096 hci_connect_cfm(conn, status);
5097 }
5098
5099 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5100 conn->dst_type);
5101 if (params) {
5102 list_del_init(&params->action);
5103 if (params->conn) {
5104 hci_conn_drop(params->conn);
5105 hci_conn_put(params->conn);
5106 params->conn = NULL;
5107 }
5108 }
5109
5110unlock:
5111 hci_update_background_scan(hdev);
5112 hci_dev_unlock(hdev);
5113}
5114
5115static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5116{
5117 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5118
5119 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5120
5121 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
Olivier Deprez0e641232021-09-23 10:07:05 +02005122 NULL, ev->role, le16_to_cpu(ev->handle),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005123 le16_to_cpu(ev->interval),
5124 le16_to_cpu(ev->latency),
5125 le16_to_cpu(ev->supervision_timeout));
5126}
5127
5128static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5129 struct sk_buff *skb)
5130{
5131 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5132
5133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5134
5135 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
Olivier Deprez0e641232021-09-23 10:07:05 +02005136 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005137 le16_to_cpu(ev->interval),
5138 le16_to_cpu(ev->latency),
5139 le16_to_cpu(ev->supervision_timeout));
5140}
5141
5142static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5143{
5144 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5145 struct hci_conn *conn;
5146
5147 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5148
Olivier Deprez0e641232021-09-23 10:07:05 +02005149 if (ev->status) {
5150 struct adv_info *adv;
5151
5152 adv = hci_find_adv_instance(hdev, ev->handle);
5153 if (!adv)
5154 return;
5155
5156 /* Remove advertising as it has been terminated */
5157 hci_remove_adv_instance(hdev, ev->handle);
5158 mgmt_advertising_removed(NULL, hdev, ev->handle);
5159
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005160 return;
Olivier Deprez0e641232021-09-23 10:07:05 +02005161 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005162
5163 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5164 if (conn) {
5165 struct adv_info *adv_instance;
5166
Olivier Deprez0e641232021-09-23 10:07:05 +02005167 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5168 bacmp(&conn->resp_addr, BDADDR_ANY))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005169 return;
5170
5171 if (!hdev->cur_adv_instance) {
5172 bacpy(&conn->resp_addr, &hdev->random_addr);
5173 return;
5174 }
5175
5176 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5177 if (adv_instance)
5178 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5179 }
5180}
5181
5182static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5183 struct sk_buff *skb)
5184{
5185 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5186 struct hci_conn *conn;
5187
5188 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5189
5190 if (ev->status)
5191 return;
5192
5193 hci_dev_lock(hdev);
5194
5195 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5196 if (conn) {
5197 conn->le_conn_interval = le16_to_cpu(ev->interval);
5198 conn->le_conn_latency = le16_to_cpu(ev->latency);
5199 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5200 }
5201
5202 hci_dev_unlock(hdev);
5203}
5204
5205/* This function requires the caller holds hdev->lock */
5206static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5207 bdaddr_t *addr,
5208 u8 addr_type, u8 adv_type,
5209 bdaddr_t *direct_rpa)
5210{
5211 struct hci_conn *conn;
5212 struct hci_conn_params *params;
5213
5214 /* If the event is not connectable don't proceed further */
5215 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5216 return NULL;
5217
5218 /* Ignore if the device is blocked */
5219 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5220 return NULL;
5221
5222 /* Most controller will fail if we try to create new connections
5223 * while we have an existing one in slave role.
5224 */
5225 if (hdev->conn_hash.le_num_slave > 0)
5226 return NULL;
5227
5228 /* If we're not connectable only connect devices that we have in
5229 * our pend_le_conns list.
5230 */
5231 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5232 addr_type);
5233 if (!params)
5234 return NULL;
5235
5236 if (!params->explicit_connect) {
5237 switch (params->auto_connect) {
5238 case HCI_AUTO_CONN_DIRECT:
5239 /* Only devices advertising with ADV_DIRECT_IND are
5240 * triggering a connection attempt. This is allowing
5241 * incoming connections from slave devices.
5242 */
5243 if (adv_type != LE_ADV_DIRECT_IND)
5244 return NULL;
5245 break;
5246 case HCI_AUTO_CONN_ALWAYS:
5247 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5248 * are triggering a connection attempt. This means
5249 * that incoming connectioms from slave device are
5250 * accepted and also outgoing connections to slave
5251 * devices are established when found.
5252 */
5253 break;
5254 default:
5255 return NULL;
5256 }
5257 }
5258
5259 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5260 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5261 direct_rpa);
5262 if (!IS_ERR(conn)) {
5263 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5264 * by higher layer that tried to connect, if no then
5265 * store the pointer since we don't really have any
5266 * other owner of the object besides the params that
5267 * triggered it. This way we can abort the connection if
5268 * the parameters get removed and keep the reference
5269 * count consistent once the connection is established.
5270 */
5271
5272 if (!params->explicit_connect)
5273 params->conn = hci_conn_get(conn);
5274
5275 return conn;
5276 }
5277
5278 switch (PTR_ERR(conn)) {
5279 case -EBUSY:
5280 /* If hci_connect() returns -EBUSY it means there is already
5281 * an LE connection attempt going on. Since controllers don't
5282 * support more than one connection attempt at the time, we
5283 * don't consider this an error case.
5284 */
5285 break;
5286 default:
5287 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5288 return NULL;
5289 }
5290
5291 return NULL;
5292}
5293
5294static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5295 u8 bdaddr_type, bdaddr_t *direct_addr,
Olivier Deprez0e641232021-09-23 10:07:05 +02005296 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5297 bool ext_adv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005298{
5299 struct discovery_state *d = &hdev->discovery;
5300 struct smp_irk *irk;
5301 struct hci_conn *conn;
5302 bool match;
5303 u32 flags;
5304 u8 *ptr, real_len;
5305
5306 switch (type) {
5307 case LE_ADV_IND:
5308 case LE_ADV_DIRECT_IND:
5309 case LE_ADV_SCAN_IND:
5310 case LE_ADV_NONCONN_IND:
5311 case LE_ADV_SCAN_RSP:
5312 break;
5313 default:
5314 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5315 "type: 0x%02x", type);
5316 return;
5317 }
5318
Olivier Deprez0e641232021-09-23 10:07:05 +02005319 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5320 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5321 return;
5322 }
5323
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005324 /* Find the end of the data in case the report contains padded zero
5325 * bytes at the end causing an invalid length value.
5326 *
5327 * When data is NULL, len is 0 so there is no need for extra ptr
5328 * check as 'ptr < data + 0' is already false in such case.
5329 */
5330 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5331 if (ptr + 1 + *ptr > data + len)
5332 break;
5333 }
5334
5335 real_len = ptr - data;
5336
5337 /* Adjust for actual length */
5338 if (len != real_len) {
5339 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5340 len = real_len;
5341 }
5342
5343 /* If the direct address is present, then this report is from
5344 * a LE Direct Advertising Report event. In that case it is
5345 * important to see if the address is matching the local
5346 * controller address.
5347 */
5348 if (direct_addr) {
5349 /* Only resolvable random addresses are valid for these
5350 * kind of reports and others can be ignored.
5351 */
5352 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5353 return;
5354
5355 /* If the controller is not using resolvable random
5356 * addresses, then this report can be ignored.
5357 */
5358 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5359 return;
5360
5361 /* If the local IRK of the controller does not match
5362 * with the resolvable random address provided, then
5363 * this report can be ignored.
5364 */
5365 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5366 return;
5367 }
5368
5369 /* Check if we need to convert to identity address */
5370 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5371 if (irk) {
5372 bdaddr = &irk->bdaddr;
5373 bdaddr_type = irk->addr_type;
5374 }
5375
5376 /* Check if we have been requested to connect to this device.
5377 *
5378 * direct_addr is set only for directed advertising reports (it is NULL
5379 * for advertising reports) and is already verified to be RPA above.
5380 */
5381 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5382 direct_addr);
Olivier Deprez0e641232021-09-23 10:07:05 +02005383 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005384 /* Store report for later inclusion by
5385 * mgmt_device_connected
5386 */
5387 memcpy(conn->le_adv_data, data, len);
5388 conn->le_adv_data_len = len;
5389 }
5390
5391 /* Passive scanning shouldn't trigger any device found events,
5392 * except for devices marked as CONN_REPORT for which we do send
5393 * device found events.
5394 */
5395 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5396 if (type == LE_ADV_DIRECT_IND)
5397 return;
5398
5399 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5400 bdaddr, bdaddr_type))
5401 return;
5402
5403 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5404 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5405 else
5406 flags = 0;
5407 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5408 rssi, flags, data, len, NULL, 0);
5409 return;
5410 }
5411
5412 /* When receiving non-connectable or scannable undirected
5413 * advertising reports, this means that the remote device is
5414 * not connectable and then clearly indicate this in the
5415 * device found event.
5416 *
5417 * When receiving a scan response, then there is no way to
5418 * know if the remote device is connectable or not. However
5419 * since scan responses are merged with a previously seen
5420 * advertising report, the flags field from that report
5421 * will be used.
5422 *
5423 * In the really unlikely case that a controller get confused
5424 * and just sends a scan response event, then it is marked as
5425 * not connectable as well.
5426 */
5427 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5428 type == LE_ADV_SCAN_RSP)
5429 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5430 else
5431 flags = 0;
5432
5433 /* If there's nothing pending either store the data from this
5434 * event or send an immediate device found event if the data
5435 * should not be stored for later.
5436 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005437 if (!ext_adv && !has_pending_adv_report(hdev)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005438 /* If the report will trigger a SCAN_REQ store it for
5439 * later merging.
5440 */
5441 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5442 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5443 rssi, flags, data, len);
5444 return;
5445 }
5446
5447 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5448 rssi, flags, data, len, NULL, 0);
5449 return;
5450 }
5451
5452 /* Check if the pending report is for the same device as the new one */
5453 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5454 bdaddr_type == d->last_adv_addr_type);
5455
5456 /* If the pending data doesn't match this report or this isn't a
5457 * scan response (e.g. we got a duplicate ADV_IND) then force
5458 * sending of the pending data.
5459 */
5460 if (type != LE_ADV_SCAN_RSP || !match) {
5461 /* Send out whatever is in the cache, but skip duplicates */
5462 if (!match)
5463 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5464 d->last_adv_addr_type, NULL,
5465 d->last_adv_rssi, d->last_adv_flags,
5466 d->last_adv_data,
5467 d->last_adv_data_len, NULL, 0);
5468
5469 /* If the new report will trigger a SCAN_REQ store it for
5470 * later merging.
5471 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005472 if (!ext_adv && (type == LE_ADV_IND ||
5473 type == LE_ADV_SCAN_IND)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005474 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5475 rssi, flags, data, len);
5476 return;
5477 }
5478
5479 /* The advertising reports cannot be merged, so clear
5480 * the pending report and send out a device found event.
5481 */
5482 clear_pending_adv_report(hdev);
5483 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5484 rssi, flags, data, len, NULL, 0);
5485 return;
5486 }
5487
5488 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5489 * the new event is a SCAN_RSP. We can therefore proceed with
5490 * sending a merged device found event.
5491 */
5492 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5493 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5494 d->last_adv_data, d->last_adv_data_len, data, len);
5495 clear_pending_adv_report(hdev);
5496}
5497
5498static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5499{
5500 u8 num_reports = skb->data[0];
5501 void *ptr = &skb->data[1];
5502
5503 hci_dev_lock(hdev);
5504
5505 while (num_reports--) {
5506 struct hci_ev_le_advertising_info *ev = ptr;
5507 s8 rssi;
5508
5509 if (ev->length <= HCI_MAX_AD_LENGTH) {
5510 rssi = ev->data[ev->length];
5511 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5512 ev->bdaddr_type, NULL, 0, rssi,
Olivier Deprez0e641232021-09-23 10:07:05 +02005513 ev->data, ev->length, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005514 } else {
5515 bt_dev_err(hdev, "Dropping invalid advertising data");
5516 }
5517
5518 ptr += sizeof(*ev) + ev->length + 1;
5519 }
5520
5521 hci_dev_unlock(hdev);
5522}
5523
5524static u8 ext_evt_type_to_legacy(u16 evt_type)
5525{
5526 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5527 switch (evt_type) {
5528 case LE_LEGACY_ADV_IND:
5529 return LE_ADV_IND;
5530 case LE_LEGACY_ADV_DIRECT_IND:
5531 return LE_ADV_DIRECT_IND;
5532 case LE_LEGACY_ADV_SCAN_IND:
5533 return LE_ADV_SCAN_IND;
5534 case LE_LEGACY_NONCONN_IND:
5535 return LE_ADV_NONCONN_IND;
5536 case LE_LEGACY_SCAN_RSP_ADV:
5537 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5538 return LE_ADV_SCAN_RSP;
5539 }
5540
5541 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5542 evt_type);
5543
5544 return LE_ADV_INVALID;
5545 }
5546
5547 if (evt_type & LE_EXT_ADV_CONN_IND) {
5548 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5549 return LE_ADV_DIRECT_IND;
5550
5551 return LE_ADV_IND;
5552 }
5553
5554 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5555 return LE_ADV_SCAN_RSP;
5556
5557 if (evt_type & LE_EXT_ADV_SCAN_IND)
5558 return LE_ADV_SCAN_IND;
5559
5560 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5561 evt_type & LE_EXT_ADV_DIRECT_IND)
5562 return LE_ADV_NONCONN_IND;
5563
5564 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5565 evt_type);
5566
5567 return LE_ADV_INVALID;
5568}
5569
5570static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5571{
5572 u8 num_reports = skb->data[0];
5573 void *ptr = &skb->data[1];
5574
5575 hci_dev_lock(hdev);
5576
5577 while (num_reports--) {
5578 struct hci_ev_le_ext_adv_report *ev = ptr;
5579 u8 legacy_evt_type;
5580 u16 evt_type;
5581
5582 evt_type = __le16_to_cpu(ev->evt_type);
5583 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5584 if (legacy_evt_type != LE_ADV_INVALID) {
5585 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5586 ev->bdaddr_type, NULL, 0, ev->rssi,
Olivier Deprez0e641232021-09-23 10:07:05 +02005587 ev->data, ev->length,
5588 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005589 }
5590
David Brazdil0f672f62019-12-10 10:32:29 +00005591 ptr += sizeof(*ev) + ev->length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005592 }
5593
5594 hci_dev_unlock(hdev);
5595}
5596
5597static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5598 struct sk_buff *skb)
5599{
5600 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5601 struct hci_conn *conn;
5602
5603 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5604
5605 hci_dev_lock(hdev);
5606
5607 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5608 if (conn) {
5609 if (!ev->status)
5610 memcpy(conn->features[0], ev->features, 8);
5611
5612 if (conn->state == BT_CONFIG) {
5613 __u8 status;
5614
5615 /* If the local controller supports slave-initiated
5616 * features exchange, but the remote controller does
5617 * not, then it is possible that the error code 0x1a
5618 * for unsupported remote feature gets returned.
5619 *
5620 * In this specific case, allow the connection to
5621 * transition into connected state and mark it as
5622 * successful.
5623 */
5624 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5625 !conn->out && ev->status == 0x1a)
5626 status = 0x00;
5627 else
5628 status = ev->status;
5629
5630 conn->state = BT_CONNECTED;
5631 hci_connect_cfm(conn, status);
5632 hci_conn_drop(conn);
5633 }
5634 }
5635
5636 hci_dev_unlock(hdev);
5637}
5638
5639static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5640{
5641 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5642 struct hci_cp_le_ltk_reply cp;
5643 struct hci_cp_le_ltk_neg_reply neg;
5644 struct hci_conn *conn;
5645 struct smp_ltk *ltk;
5646
5647 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5648
5649 hci_dev_lock(hdev);
5650
5651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5652 if (conn == NULL)
5653 goto not_found;
5654
5655 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5656 if (!ltk)
5657 goto not_found;
5658
5659 if (smp_ltk_is_sc(ltk)) {
5660 /* With SC both EDiv and Rand are set to zero */
5661 if (ev->ediv || ev->rand)
5662 goto not_found;
5663 } else {
5664 /* For non-SC keys check that EDiv and Rand match */
5665 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5666 goto not_found;
5667 }
5668
5669 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5670 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5671 cp.handle = cpu_to_le16(conn->handle);
5672
5673 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5674
5675 conn->enc_key_size = ltk->enc_size;
5676
5677 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5678
5679 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5680 * temporary key used to encrypt a connection following
5681 * pairing. It is used during the Encrypted Session Setup to
5682 * distribute the keys. Later, security can be re-established
5683 * using a distributed LTK.
5684 */
5685 if (ltk->type == SMP_STK) {
5686 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5687 list_del_rcu(&ltk->list);
5688 kfree_rcu(ltk, rcu);
5689 } else {
5690 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5691 }
5692
5693 hci_dev_unlock(hdev);
5694
5695 return;
5696
5697not_found:
5698 neg.handle = ev->handle;
5699 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5700 hci_dev_unlock(hdev);
5701}
5702
5703static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5704 u8 reason)
5705{
5706 struct hci_cp_le_conn_param_req_neg_reply cp;
5707
5708 cp.handle = cpu_to_le16(handle);
5709 cp.reason = reason;
5710
5711 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5712 &cp);
5713}
5714
5715static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5716 struct sk_buff *skb)
5717{
5718 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5719 struct hci_cp_le_conn_param_req_reply cp;
5720 struct hci_conn *hcon;
5721 u16 handle, min, max, latency, timeout;
5722
5723 handle = le16_to_cpu(ev->handle);
5724 min = le16_to_cpu(ev->interval_min);
5725 max = le16_to_cpu(ev->interval_max);
5726 latency = le16_to_cpu(ev->latency);
5727 timeout = le16_to_cpu(ev->timeout);
5728
5729 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5730 if (!hcon || hcon->state != BT_CONNECTED)
5731 return send_conn_param_neg_reply(hdev, handle,
5732 HCI_ERROR_UNKNOWN_CONN_ID);
5733
5734 if (hci_check_conn_params(min, max, latency, timeout))
5735 return send_conn_param_neg_reply(hdev, handle,
5736 HCI_ERROR_INVALID_LL_PARAMS);
5737
5738 if (hcon->role == HCI_ROLE_MASTER) {
5739 struct hci_conn_params *params;
5740 u8 store_hint;
5741
5742 hci_dev_lock(hdev);
5743
5744 params = hci_conn_params_lookup(hdev, &hcon->dst,
5745 hcon->dst_type);
5746 if (params) {
5747 params->conn_min_interval = min;
5748 params->conn_max_interval = max;
5749 params->conn_latency = latency;
5750 params->supervision_timeout = timeout;
5751 store_hint = 0x01;
5752 } else{
5753 store_hint = 0x00;
5754 }
5755
5756 hci_dev_unlock(hdev);
5757
5758 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5759 store_hint, min, max, latency, timeout);
5760 }
5761
5762 cp.handle = ev->handle;
5763 cp.interval_min = ev->interval_min;
5764 cp.interval_max = ev->interval_max;
5765 cp.latency = ev->latency;
5766 cp.timeout = ev->timeout;
5767 cp.min_ce_len = 0;
5768 cp.max_ce_len = 0;
5769
5770 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5771}
5772
5773static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5774 struct sk_buff *skb)
5775{
5776 u8 num_reports = skb->data[0];
Olivier Deprez0e641232021-09-23 10:07:05 +02005777 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5778
5779 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5780 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005781
5782 hci_dev_lock(hdev);
5783
Olivier Deprez0e641232021-09-23 10:07:05 +02005784 for (; num_reports; num_reports--, ev++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005785 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5786 ev->bdaddr_type, &ev->direct_addr,
Olivier Deprez0e641232021-09-23 10:07:05 +02005787 ev->direct_addr_type, ev->rssi, NULL, 0,
5788 false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005789
5790 hci_dev_unlock(hdev);
5791}
5792
5793static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5794{
5795 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5796
5797 skb_pull(skb, sizeof(*le_ev));
5798
5799 switch (le_ev->subevent) {
5800 case HCI_EV_LE_CONN_COMPLETE:
5801 hci_le_conn_complete_evt(hdev, skb);
5802 break;
5803
5804 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5805 hci_le_conn_update_complete_evt(hdev, skb);
5806 break;
5807
5808 case HCI_EV_LE_ADVERTISING_REPORT:
5809 hci_le_adv_report_evt(hdev, skb);
5810 break;
5811
5812 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5813 hci_le_remote_feat_complete_evt(hdev, skb);
5814 break;
5815
5816 case HCI_EV_LE_LTK_REQ:
5817 hci_le_ltk_request_evt(hdev, skb);
5818 break;
5819
5820 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5821 hci_le_remote_conn_param_req_evt(hdev, skb);
5822 break;
5823
5824 case HCI_EV_LE_DIRECT_ADV_REPORT:
5825 hci_le_direct_adv_report_evt(hdev, skb);
5826 break;
5827
5828 case HCI_EV_LE_EXT_ADV_REPORT:
5829 hci_le_ext_adv_report_evt(hdev, skb);
5830 break;
5831
5832 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5833 hci_le_enh_conn_complete_evt(hdev, skb);
5834 break;
5835
5836 case HCI_EV_LE_EXT_ADV_SET_TERM:
5837 hci_le_ext_adv_term_evt(hdev, skb);
5838 break;
5839
5840 default:
5841 break;
5842 }
5843}
5844
5845static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5846 u8 event, struct sk_buff *skb)
5847{
5848 struct hci_ev_cmd_complete *ev;
5849 struct hci_event_hdr *hdr;
5850
5851 if (!skb)
5852 return false;
5853
5854 if (skb->len < sizeof(*hdr)) {
5855 bt_dev_err(hdev, "too short HCI event");
5856 return false;
5857 }
5858
5859 hdr = (void *) skb->data;
5860 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5861
5862 if (event) {
5863 if (hdr->evt != event)
5864 return false;
5865 return true;
5866 }
5867
David Brazdil0f672f62019-12-10 10:32:29 +00005868 /* Check if request ended in Command Status - no way to retreive
5869 * any extra parameters in this case.
5870 */
5871 if (hdr->evt == HCI_EV_CMD_STATUS)
5872 return false;
5873
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005874 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5875 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5876 hdr->evt);
5877 return false;
5878 }
5879
5880 if (skb->len < sizeof(*ev)) {
5881 bt_dev_err(hdev, "too short cmd_complete event");
5882 return false;
5883 }
5884
5885 ev = (void *) skb->data;
5886 skb_pull(skb, sizeof(*ev));
5887
5888 if (opcode != __le16_to_cpu(ev->opcode)) {
5889 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5890 __le16_to_cpu(ev->opcode));
5891 return false;
5892 }
5893
5894 return true;
5895}
5896
5897void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5898{
5899 struct hci_event_hdr *hdr = (void *) skb->data;
5900 hci_req_complete_t req_complete = NULL;
5901 hci_req_complete_skb_t req_complete_skb = NULL;
5902 struct sk_buff *orig_skb = NULL;
5903 u8 status = 0, event = hdr->evt, req_evt = 0;
5904 u16 opcode = HCI_OP_NOP;
5905
Olivier Deprez0e641232021-09-23 10:07:05 +02005906 if (!event) {
5907 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5908 goto done;
5909 }
5910
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005911 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5912 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5913 opcode = __le16_to_cpu(cmd_hdr->opcode);
5914 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5915 &req_complete_skb);
5916 req_evt = event;
5917 }
5918
5919 /* If it looks like we might end up having to call
5920 * req_complete_skb, store a pristine copy of the skb since the
5921 * various handlers may modify the original one through
5922 * skb_pull() calls, etc.
5923 */
5924 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5925 event == HCI_EV_CMD_COMPLETE)
5926 orig_skb = skb_clone(skb, GFP_KERNEL);
5927
5928 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5929
5930 switch (event) {
5931 case HCI_EV_INQUIRY_COMPLETE:
5932 hci_inquiry_complete_evt(hdev, skb);
5933 break;
5934
5935 case HCI_EV_INQUIRY_RESULT:
5936 hci_inquiry_result_evt(hdev, skb);
5937 break;
5938
5939 case HCI_EV_CONN_COMPLETE:
5940 hci_conn_complete_evt(hdev, skb);
5941 break;
5942
5943 case HCI_EV_CONN_REQUEST:
5944 hci_conn_request_evt(hdev, skb);
5945 break;
5946
5947 case HCI_EV_DISCONN_COMPLETE:
5948 hci_disconn_complete_evt(hdev, skb);
5949 break;
5950
5951 case HCI_EV_AUTH_COMPLETE:
5952 hci_auth_complete_evt(hdev, skb);
5953 break;
5954
5955 case HCI_EV_REMOTE_NAME:
5956 hci_remote_name_evt(hdev, skb);
5957 break;
5958
5959 case HCI_EV_ENCRYPT_CHANGE:
5960 hci_encrypt_change_evt(hdev, skb);
5961 break;
5962
5963 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5964 hci_change_link_key_complete_evt(hdev, skb);
5965 break;
5966
5967 case HCI_EV_REMOTE_FEATURES:
5968 hci_remote_features_evt(hdev, skb);
5969 break;
5970
5971 case HCI_EV_CMD_COMPLETE:
5972 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5973 &req_complete, &req_complete_skb);
5974 break;
5975
5976 case HCI_EV_CMD_STATUS:
5977 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5978 &req_complete_skb);
5979 break;
5980
5981 case HCI_EV_HARDWARE_ERROR:
5982 hci_hardware_error_evt(hdev, skb);
5983 break;
5984
5985 case HCI_EV_ROLE_CHANGE:
5986 hci_role_change_evt(hdev, skb);
5987 break;
5988
5989 case HCI_EV_NUM_COMP_PKTS:
5990 hci_num_comp_pkts_evt(hdev, skb);
5991 break;
5992
5993 case HCI_EV_MODE_CHANGE:
5994 hci_mode_change_evt(hdev, skb);
5995 break;
5996
5997 case HCI_EV_PIN_CODE_REQ:
5998 hci_pin_code_request_evt(hdev, skb);
5999 break;
6000
6001 case HCI_EV_LINK_KEY_REQ:
6002 hci_link_key_request_evt(hdev, skb);
6003 break;
6004
6005 case HCI_EV_LINK_KEY_NOTIFY:
6006 hci_link_key_notify_evt(hdev, skb);
6007 break;
6008
6009 case HCI_EV_CLOCK_OFFSET:
6010 hci_clock_offset_evt(hdev, skb);
6011 break;
6012
6013 case HCI_EV_PKT_TYPE_CHANGE:
6014 hci_pkt_type_change_evt(hdev, skb);
6015 break;
6016
6017 case HCI_EV_PSCAN_REP_MODE:
6018 hci_pscan_rep_mode_evt(hdev, skb);
6019 break;
6020
6021 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6022 hci_inquiry_result_with_rssi_evt(hdev, skb);
6023 break;
6024
6025 case HCI_EV_REMOTE_EXT_FEATURES:
6026 hci_remote_ext_features_evt(hdev, skb);
6027 break;
6028
6029 case HCI_EV_SYNC_CONN_COMPLETE:
6030 hci_sync_conn_complete_evt(hdev, skb);
6031 break;
6032
6033 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6034 hci_extended_inquiry_result_evt(hdev, skb);
6035 break;
6036
6037 case HCI_EV_KEY_REFRESH_COMPLETE:
6038 hci_key_refresh_complete_evt(hdev, skb);
6039 break;
6040
6041 case HCI_EV_IO_CAPA_REQUEST:
6042 hci_io_capa_request_evt(hdev, skb);
6043 break;
6044
6045 case HCI_EV_IO_CAPA_REPLY:
6046 hci_io_capa_reply_evt(hdev, skb);
6047 break;
6048
6049 case HCI_EV_USER_CONFIRM_REQUEST:
6050 hci_user_confirm_request_evt(hdev, skb);
6051 break;
6052
6053 case HCI_EV_USER_PASSKEY_REQUEST:
6054 hci_user_passkey_request_evt(hdev, skb);
6055 break;
6056
6057 case HCI_EV_USER_PASSKEY_NOTIFY:
6058 hci_user_passkey_notify_evt(hdev, skb);
6059 break;
6060
6061 case HCI_EV_KEYPRESS_NOTIFY:
6062 hci_keypress_notify_evt(hdev, skb);
6063 break;
6064
6065 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6066 hci_simple_pair_complete_evt(hdev, skb);
6067 break;
6068
6069 case HCI_EV_REMOTE_HOST_FEATURES:
6070 hci_remote_host_features_evt(hdev, skb);
6071 break;
6072
6073 case HCI_EV_LE_META:
6074 hci_le_meta_evt(hdev, skb);
6075 break;
6076
6077 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6078 hci_remote_oob_data_request_evt(hdev, skb);
6079 break;
6080
6081#if IS_ENABLED(CONFIG_BT_HS)
6082 case HCI_EV_CHANNEL_SELECTED:
6083 hci_chan_selected_evt(hdev, skb);
6084 break;
6085
6086 case HCI_EV_PHY_LINK_COMPLETE:
6087 hci_phy_link_complete_evt(hdev, skb);
6088 break;
6089
6090 case HCI_EV_LOGICAL_LINK_COMPLETE:
6091 hci_loglink_complete_evt(hdev, skb);
6092 break;
6093
6094 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6095 hci_disconn_loglink_complete_evt(hdev, skb);
6096 break;
6097
6098 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6099 hci_disconn_phylink_complete_evt(hdev, skb);
6100 break;
6101#endif
6102
6103 case HCI_EV_NUM_COMP_BLOCKS:
6104 hci_num_comp_blocks_evt(hdev, skb);
6105 break;
6106
6107 default:
6108 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6109 break;
6110 }
6111
6112 if (req_complete) {
6113 req_complete(hdev, status, opcode);
6114 } else if (req_complete_skb) {
6115 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6116 kfree_skb(orig_skb);
6117 orig_skb = NULL;
6118 }
6119 req_complete_skb(hdev, status, opcode, orig_skb);
6120 }
6121
Olivier Deprez0e641232021-09-23 10:07:05 +02006122done:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006123 kfree_skb(orig_skb);
6124 kfree_skb(skb);
6125 hdev->stat.evt_rx++;
6126}