blob: befab857a39bef4f43adc364c907276969257604 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
27#include <linux/export.h>
28#include <linux/utsname.h>
29#include <linux/sched.h>
30#include <asm/unaligned.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
34#include <net/bluetooth/hci_mon.h>
35#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
38
39static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42static DEFINE_IDA(sock_cookie_ida);
43
44static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46/* ----- HCI socket interface ----- */
47
48/* Socket info */
49#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
57 unsigned long flags;
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
60};
61
Olivier Deprez0e641232021-09-23 10:07:05 +020062static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63{
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66 if (!hdev)
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
70 return hdev;
71}
72
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073void hci_sock_set_flag(struct sock *sk, int nr)
74{
75 set_bit(nr, &hci_pi(sk)->flags);
76}
77
78void hci_sock_clear_flag(struct sock *sk, int nr)
79{
80 clear_bit(nr, &hci_pi(sk)->flags);
81}
82
83int hci_sock_test_flag(struct sock *sk, int nr)
84{
85 return test_bit(nr, &hci_pi(sk)->flags);
86}
87
88unsigned short hci_sock_get_channel(struct sock *sk)
89{
90 return hci_pi(sk)->channel;
91}
92
93u32 hci_sock_get_cookie(struct sock *sk)
94{
95 return hci_pi(sk)->cookie;
96}
97
98static bool hci_sock_gen_cookie(struct sock *sk)
99{
100 int id = hci_pi(sk)->cookie;
101
102 if (!id) {
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104 if (id < 0)
105 id = 0xffffffff;
106
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
109 return true;
110 }
111
112 return false;
113}
114
115static void hci_sock_free_cookie(struct sock *sk)
116{
117 int id = hci_pi(sk)->cookie;
118
119 if (id) {
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
122 }
123}
124
125static inline int hci_test_bit(int nr, const void *addr)
126{
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128}
129
130/* Security filter */
131#define HCI_SFLT_MAX_OGF 5
132
133struct hci_sec_filter {
134 __u32 type_mask;
135 __u32 event_mask[2];
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137};
138
139static const struct hci_sec_filter hci_sec_filter = {
140 /* Packet types */
141 0x10,
142 /* Events */
143 { 0x1000d9fe, 0x0000b00c },
144 /* Commands */
145 {
146 { 0x0 },
147 /* OGF_LINK_CTL */
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151 /* OGF_HOST_CTL */
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153 /* OGF_INFO_PARAM */
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157 }
158};
159
160static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162};
163
164static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165{
166 struct hci_filter *flt;
167 int flt_type, flt_event;
168
169 /* Apply filter */
170 flt = &hci_pi(sk)->filter;
171
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174 if (!test_bit(flt_type, &flt->type_mask))
175 return true;
176
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179 return false;
180
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183 if (!hci_test_bit(flt_event, &flt->event_mask))
184 return true;
185
186 /* Check filter only when opcode is set */
187 if (!flt->opcode)
188 return false;
189
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192 return true;
193
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196 return true;
197
198 return false;
199}
200
201/* Send frame to RAW socket */
202void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203{
204 struct sock *sk;
205 struct sk_buff *skb_copy = NULL;
206
207 BT_DBG("hdev %p len %d", hdev, skb->len);
208
209 read_lock(&hci_sk_list.lock);
210
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
213
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215 continue;
216
217 /* Don't send frame to the socket it came from */
218 if (skb->sk == sk)
219 continue;
220
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
226 continue;
227 if (is_filtered_packet(sk, skb))
228 continue;
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
231 continue;
232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
235 continue;
236 } else {
237 /* Don't send frame to other channel types */
238 continue;
239 }
240
241 if (!skb_copy) {
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
244 if (!skb_copy)
245 continue;
246
247 /* Put type byte before the data */
248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
249 }
250
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
252 if (!nskb)
253 continue;
254
255 if (sock_queue_rcv_skb(sk, nskb))
256 kfree_skb(nskb);
257 }
258
259 read_unlock(&hci_sk_list.lock);
260
261 kfree_skb(skb_copy);
262}
263
264/* Send frame to sockets with specific channel */
265static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266 int flag, struct sock *skip_sk)
267{
268 struct sock *sk;
269
270 BT_DBG("channel %u len %d", channel, skb->len);
271
272 sk_for_each(sk, &hci_sk_list.head) {
273 struct sk_buff *nskb;
274
275 /* Ignore socket without the flag set */
276 if (!hci_sock_test_flag(sk, flag))
277 continue;
278
279 /* Skip the original socket */
280 if (sk == skip_sk)
281 continue;
282
283 if (sk->sk_state != BT_BOUND)
284 continue;
285
286 if (hci_pi(sk)->channel != channel)
287 continue;
288
289 nskb = skb_clone(skb, GFP_ATOMIC);
290 if (!nskb)
291 continue;
292
293 if (sock_queue_rcv_skb(sk, nskb))
294 kfree_skb(nskb);
295 }
296
297}
298
299void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
300 int flag, struct sock *skip_sk)
301{
302 read_lock(&hci_sk_list.lock);
303 __hci_send_to_channel(channel, skb, flag, skip_sk);
304 read_unlock(&hci_sk_list.lock);
305}
306
307/* Send frame to monitor socket */
308void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
309{
310 struct sk_buff *skb_copy = NULL;
311 struct hci_mon_hdr *hdr;
312 __le16 opcode;
313
314 if (!atomic_read(&monitor_promisc))
315 return;
316
317 BT_DBG("hdev %p len %d", hdev, skb->len);
318
319 switch (hci_skb_pkt_type(skb)) {
320 case HCI_COMMAND_PKT:
321 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
322 break;
323 case HCI_EVENT_PKT:
324 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
325 break;
326 case HCI_ACLDATA_PKT:
327 if (bt_cb(skb)->incoming)
328 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
329 else
330 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
331 break;
332 case HCI_SCODATA_PKT:
333 if (bt_cb(skb)->incoming)
334 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
335 else
336 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
337 break;
338 case HCI_DIAG_PKT:
339 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
340 break;
341 default:
342 return;
343 }
344
345 /* Create a private copy with headroom */
346 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
347 if (!skb_copy)
348 return;
349
350 /* Put header before the data */
351 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
352 hdr->opcode = opcode;
353 hdr->index = cpu_to_le16(hdev->id);
354 hdr->len = cpu_to_le16(skb->len);
355
356 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
357 HCI_SOCK_TRUSTED, NULL);
358 kfree_skb(skb_copy);
359}
360
361void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
362 void *data, u16 data_len, ktime_t tstamp,
363 int flag, struct sock *skip_sk)
364{
365 struct sock *sk;
366 __le16 index;
367
368 if (hdev)
369 index = cpu_to_le16(hdev->id);
370 else
371 index = cpu_to_le16(MGMT_INDEX_NONE);
372
373 read_lock(&hci_sk_list.lock);
374
375 sk_for_each(sk, &hci_sk_list.head) {
376 struct hci_mon_hdr *hdr;
377 struct sk_buff *skb;
378
379 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
380 continue;
381
382 /* Ignore socket without the flag set */
383 if (!hci_sock_test_flag(sk, flag))
384 continue;
385
386 /* Skip the original socket */
387 if (sk == skip_sk)
388 continue;
389
390 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
391 if (!skb)
392 continue;
393
394 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
395 put_unaligned_le16(event, skb_put(skb, 2));
396
397 if (data)
398 skb_put_data(skb, data, data_len);
399
400 skb->tstamp = tstamp;
401
402 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
404 hdr->index = index;
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
406
407 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
408 HCI_SOCK_TRUSTED, NULL);
409 kfree_skb(skb);
410 }
411
412 read_unlock(&hci_sk_list.lock);
413}
414
415static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
416{
417 struct hci_mon_hdr *hdr;
418 struct hci_mon_new_index *ni;
419 struct hci_mon_index_info *ii;
420 struct sk_buff *skb;
421 __le16 opcode;
422
423 switch (event) {
424 case HCI_DEV_REG:
425 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
426 if (!skb)
427 return NULL;
428
429 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
430 ni->type = hdev->dev_type;
431 ni->bus = hdev->bus;
432 bacpy(&ni->bdaddr, &hdev->bdaddr);
433 memcpy(ni->name, hdev->name, 8);
434
435 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
436 break;
437
438 case HCI_DEV_UNREG:
439 skb = bt_skb_alloc(0, GFP_ATOMIC);
440 if (!skb)
441 return NULL;
442
443 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
444 break;
445
446 case HCI_DEV_SETUP:
447 if (hdev->manufacturer == 0xffff)
448 return NULL;
449
450 /* fall through */
451
452 case HCI_DEV_UP:
453 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
454 if (!skb)
455 return NULL;
456
457 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
458 bacpy(&ii->bdaddr, &hdev->bdaddr);
459 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
460
461 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
462 break;
463
464 case HCI_DEV_OPEN:
465 skb = bt_skb_alloc(0, GFP_ATOMIC);
466 if (!skb)
467 return NULL;
468
469 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
470 break;
471
472 case HCI_DEV_CLOSE:
473 skb = bt_skb_alloc(0, GFP_ATOMIC);
474 if (!skb)
475 return NULL;
476
477 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
478 break;
479
480 default:
481 return NULL;
482 }
483
484 __net_timestamp(skb);
485
486 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
487 hdr->opcode = opcode;
488 hdr->index = cpu_to_le16(hdev->id);
489 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
490
491 return skb;
492}
493
494static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
495{
496 struct hci_mon_hdr *hdr;
497 struct sk_buff *skb;
498 u16 format;
499 u8 ver[3];
500 u32 flags;
501
502 /* No message needed when cookie is not present */
503 if (!hci_pi(sk)->cookie)
504 return NULL;
505
506 switch (hci_pi(sk)->channel) {
507 case HCI_CHANNEL_RAW:
508 format = 0x0000;
509 ver[0] = BT_SUBSYS_VERSION;
510 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
511 break;
512 case HCI_CHANNEL_USER:
513 format = 0x0001;
514 ver[0] = BT_SUBSYS_VERSION;
515 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
516 break;
517 case HCI_CHANNEL_CONTROL:
518 format = 0x0002;
519 mgmt_fill_version_info(ver);
520 break;
521 default:
522 /* No message for unsupported format */
523 return NULL;
524 }
525
526 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
527 if (!skb)
528 return NULL;
529
530 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
531
532 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
533 put_unaligned_le16(format, skb_put(skb, 2));
534 skb_put_data(skb, ver, sizeof(ver));
535 put_unaligned_le32(flags, skb_put(skb, 4));
536 skb_put_u8(skb, TASK_COMM_LEN);
537 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
538
539 __net_timestamp(skb);
540
541 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
542 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
543 if (hci_pi(sk)->hdev)
544 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
545 else
546 hdr->index = cpu_to_le16(HCI_DEV_NONE);
547 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
548
549 return skb;
550}
551
552static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
553{
554 struct hci_mon_hdr *hdr;
555 struct sk_buff *skb;
556
557 /* No message needed when cookie is not present */
558 if (!hci_pi(sk)->cookie)
559 return NULL;
560
561 switch (hci_pi(sk)->channel) {
562 case HCI_CHANNEL_RAW:
563 case HCI_CHANNEL_USER:
564 case HCI_CHANNEL_CONTROL:
565 break;
566 default:
567 /* No message for unsupported format */
568 return NULL;
569 }
570
571 skb = bt_skb_alloc(4, GFP_ATOMIC);
572 if (!skb)
573 return NULL;
574
575 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
576
577 __net_timestamp(skb);
578
579 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
580 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
581 if (hci_pi(sk)->hdev)
582 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
583 else
584 hdr->index = cpu_to_le16(HCI_DEV_NONE);
585 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
586
587 return skb;
588}
589
590static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
591 u16 opcode, u16 len,
592 const void *buf)
593{
594 struct hci_mon_hdr *hdr;
595 struct sk_buff *skb;
596
597 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
598 if (!skb)
599 return NULL;
600
601 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
602 put_unaligned_le16(opcode, skb_put(skb, 2));
603
604 if (buf)
605 skb_put_data(skb, buf, len);
606
607 __net_timestamp(skb);
608
609 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
610 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
611 hdr->index = cpu_to_le16(index);
612 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
613
614 return skb;
615}
616
617static void __printf(2, 3)
618send_monitor_note(struct sock *sk, const char *fmt, ...)
619{
620 size_t len;
621 struct hci_mon_hdr *hdr;
622 struct sk_buff *skb;
623 va_list args;
624
625 va_start(args, fmt);
626 len = vsnprintf(NULL, 0, fmt, args);
627 va_end(args);
628
629 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
630 if (!skb)
631 return;
632
633 va_start(args, fmt);
634 vsprintf(skb_put(skb, len), fmt, args);
635 *(u8 *)skb_put(skb, 1) = 0;
636 va_end(args);
637
638 __net_timestamp(skb);
639
640 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
641 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
642 hdr->index = cpu_to_le16(HCI_DEV_NONE);
643 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
644
645 if (sock_queue_rcv_skb(sk, skb))
646 kfree_skb(skb);
647}
648
649static void send_monitor_replay(struct sock *sk)
650{
651 struct hci_dev *hdev;
652
653 read_lock(&hci_dev_list_lock);
654
655 list_for_each_entry(hdev, &hci_dev_list, list) {
656 struct sk_buff *skb;
657
658 skb = create_monitor_event(hdev, HCI_DEV_REG);
659 if (!skb)
660 continue;
661
662 if (sock_queue_rcv_skb(sk, skb))
663 kfree_skb(skb);
664
665 if (!test_bit(HCI_RUNNING, &hdev->flags))
666 continue;
667
668 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
669 if (!skb)
670 continue;
671
672 if (sock_queue_rcv_skb(sk, skb))
673 kfree_skb(skb);
674
675 if (test_bit(HCI_UP, &hdev->flags))
676 skb = create_monitor_event(hdev, HCI_DEV_UP);
677 else if (hci_dev_test_flag(hdev, HCI_SETUP))
678 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
679 else
680 skb = NULL;
681
682 if (skb) {
683 if (sock_queue_rcv_skb(sk, skb))
684 kfree_skb(skb);
685 }
686 }
687
688 read_unlock(&hci_dev_list_lock);
689}
690
691static void send_monitor_control_replay(struct sock *mon_sk)
692{
693 struct sock *sk;
694
695 read_lock(&hci_sk_list.lock);
696
697 sk_for_each(sk, &hci_sk_list.head) {
698 struct sk_buff *skb;
699
700 skb = create_monitor_ctrl_open(sk);
701 if (!skb)
702 continue;
703
704 if (sock_queue_rcv_skb(mon_sk, skb))
705 kfree_skb(skb);
706 }
707
708 read_unlock(&hci_sk_list.lock);
709}
710
711/* Generate internal stack event */
712static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
713{
714 struct hci_event_hdr *hdr;
715 struct hci_ev_stack_internal *ev;
716 struct sk_buff *skb;
717
718 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
719 if (!skb)
720 return;
721
722 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
723 hdr->evt = HCI_EV_STACK_INTERNAL;
724 hdr->plen = sizeof(*ev) + dlen;
725
726 ev = skb_put(skb, sizeof(*ev) + dlen);
727 ev->type = type;
728 memcpy(ev->data, data, dlen);
729
730 bt_cb(skb)->incoming = 1;
731 __net_timestamp(skb);
732
733 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
734 hci_send_to_sock(hdev, skb);
735 kfree_skb(skb);
736}
737
738void hci_sock_dev_event(struct hci_dev *hdev, int event)
739{
740 BT_DBG("hdev %s event %d", hdev->name, event);
741
742 if (atomic_read(&monitor_promisc)) {
743 struct sk_buff *skb;
744
745 /* Send event to monitor */
746 skb = create_monitor_event(hdev, event);
747 if (skb) {
748 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
749 HCI_SOCK_TRUSTED, NULL);
750 kfree_skb(skb);
751 }
752 }
753
754 if (event <= HCI_DEV_DOWN) {
755 struct hci_ev_si_device ev;
756
757 /* Send event to sockets */
758 ev.event = event;
759 ev.dev_id = hdev->id;
760 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
761 }
762
763 if (event == HCI_DEV_UNREG) {
764 struct sock *sk;
765
Olivier Deprez0e641232021-09-23 10:07:05 +0200766 /* Wake up sockets using this dead device */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000767 read_lock(&hci_sk_list.lock);
768 sk_for_each(sk, &hci_sk_list.head) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000769 if (hci_pi(sk)->hdev == hdev) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 sk->sk_err = EPIPE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 sk->sk_state_change(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000772 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000773 }
774 read_unlock(&hci_sk_list.lock);
775 }
776}
777
778static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
779{
780 struct hci_mgmt_chan *c;
781
782 list_for_each_entry(c, &mgmt_chan_list, list) {
783 if (c->channel == channel)
784 return c;
785 }
786
787 return NULL;
788}
789
790static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
791{
792 struct hci_mgmt_chan *c;
793
794 mutex_lock(&mgmt_chan_list_lock);
795 c = __hci_mgmt_chan_find(channel);
796 mutex_unlock(&mgmt_chan_list_lock);
797
798 return c;
799}
800
801int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
802{
803 if (c->channel < HCI_CHANNEL_CONTROL)
804 return -EINVAL;
805
806 mutex_lock(&mgmt_chan_list_lock);
807 if (__hci_mgmt_chan_find(c->channel)) {
808 mutex_unlock(&mgmt_chan_list_lock);
809 return -EALREADY;
810 }
811
812 list_add_tail(&c->list, &mgmt_chan_list);
813
814 mutex_unlock(&mgmt_chan_list_lock);
815
816 return 0;
817}
818EXPORT_SYMBOL(hci_mgmt_chan_register);
819
820void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
821{
822 mutex_lock(&mgmt_chan_list_lock);
823 list_del(&c->list);
824 mutex_unlock(&mgmt_chan_list_lock);
825}
826EXPORT_SYMBOL(hci_mgmt_chan_unregister);
827
828static int hci_sock_release(struct socket *sock)
829{
830 struct sock *sk = sock->sk;
831 struct hci_dev *hdev;
832 struct sk_buff *skb;
833
834 BT_DBG("sock %p sk %p", sock, sk);
835
836 if (!sk)
837 return 0;
838
Olivier Deprez0e641232021-09-23 10:07:05 +0200839 lock_sock(sk);
840
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000841 switch (hci_pi(sk)->channel) {
842 case HCI_CHANNEL_MONITOR:
843 atomic_dec(&monitor_promisc);
844 break;
845 case HCI_CHANNEL_RAW:
846 case HCI_CHANNEL_USER:
847 case HCI_CHANNEL_CONTROL:
848 /* Send event to monitor */
849 skb = create_monitor_ctrl_close(sk);
850 if (skb) {
851 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
852 HCI_SOCK_TRUSTED, NULL);
853 kfree_skb(skb);
854 }
855
856 hci_sock_free_cookie(sk);
857 break;
858 }
859
860 bt_sock_unlink(&hci_sk_list, sk);
861
David Brazdil0f672f62019-12-10 10:32:29 +0000862 hdev = hci_pi(sk)->hdev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000863 if (hdev) {
864 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
865 /* When releasing a user channel exclusive access,
866 * call hci_dev_do_close directly instead of calling
867 * hci_dev_close to ensure the exclusive access will
868 * be released and the controller brought back down.
869 *
870 * The checking of HCI_AUTO_OFF is not needed in this
871 * case since it will have been cleared already when
872 * opening the user channel.
873 */
874 hci_dev_do_close(hdev);
875 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
876 mgmt_index_added(hdev);
877 }
878
879 atomic_dec(&hdev->promisc);
880 hci_dev_put(hdev);
881 }
882
883 sock_orphan(sk);
884
885 skb_queue_purge(&sk->sk_receive_queue);
886 skb_queue_purge(&sk->sk_write_queue);
887
Olivier Deprez0e641232021-09-23 10:07:05 +0200888 release_sock(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000889 sock_put(sk);
890 return 0;
891}
892
893static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
894{
895 bdaddr_t bdaddr;
896 int err;
897
898 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
899 return -EFAULT;
900
901 hci_dev_lock(hdev);
902
903 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
904
905 hci_dev_unlock(hdev);
906
907 return err;
908}
909
910static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
911{
912 bdaddr_t bdaddr;
913 int err;
914
915 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
916 return -EFAULT;
917
918 hci_dev_lock(hdev);
919
920 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
921
922 hci_dev_unlock(hdev);
923
924 return err;
925}
926
927/* Ioctls that require bound socket */
928static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
929 unsigned long arg)
930{
Olivier Deprez0e641232021-09-23 10:07:05 +0200931 struct hci_dev *hdev = hci_hdev_from_sock(sk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000932
Olivier Deprez0e641232021-09-23 10:07:05 +0200933 if (IS_ERR(hdev))
934 return PTR_ERR(hdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000935
936 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
937 return -EBUSY;
938
939 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
940 return -EOPNOTSUPP;
941
942 if (hdev->dev_type != HCI_PRIMARY)
943 return -EOPNOTSUPP;
944
945 switch (cmd) {
946 case HCISETRAW:
947 if (!capable(CAP_NET_ADMIN))
948 return -EPERM;
949 return -EOPNOTSUPP;
950
951 case HCIGETCONNINFO:
952 return hci_get_conn_info(hdev, (void __user *)arg);
953
954 case HCIGETAUTHINFO:
955 return hci_get_auth_info(hdev, (void __user *)arg);
956
957 case HCIBLOCKADDR:
958 if (!capable(CAP_NET_ADMIN))
959 return -EPERM;
960 return hci_sock_blacklist_add(hdev, (void __user *)arg);
961
962 case HCIUNBLOCKADDR:
963 if (!capable(CAP_NET_ADMIN))
964 return -EPERM;
965 return hci_sock_blacklist_del(hdev, (void __user *)arg);
966 }
967
968 return -ENOIOCTLCMD;
969}
970
971static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
972 unsigned long arg)
973{
974 void __user *argp = (void __user *)arg;
975 struct sock *sk = sock->sk;
976 int err;
977
978 BT_DBG("cmd %x arg %lx", cmd, arg);
979
980 lock_sock(sk);
981
982 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
983 err = -EBADFD;
984 goto done;
985 }
986
987 /* When calling an ioctl on an unbound raw socket, then ensure
988 * that the monitor gets informed. Ensure that the resulting event
989 * is only send once by checking if the cookie exists or not. The
990 * socket cookie will be only ever generated once for the lifetime
991 * of a given socket.
992 */
993 if (hci_sock_gen_cookie(sk)) {
994 struct sk_buff *skb;
995
996 if (capable(CAP_NET_ADMIN))
997 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
998
999 /* Send event to monitor */
1000 skb = create_monitor_ctrl_open(sk);
1001 if (skb) {
1002 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1003 HCI_SOCK_TRUSTED, NULL);
1004 kfree_skb(skb);
1005 }
1006 }
1007
1008 release_sock(sk);
1009
1010 switch (cmd) {
1011 case HCIGETDEVLIST:
1012 return hci_get_dev_list(argp);
1013
1014 case HCIGETDEVINFO:
1015 return hci_get_dev_info(argp);
1016
1017 case HCIGETCONNLIST:
1018 return hci_get_conn_list(argp);
1019
1020 case HCIDEVUP:
1021 if (!capable(CAP_NET_ADMIN))
1022 return -EPERM;
1023 return hci_dev_open(arg);
1024
1025 case HCIDEVDOWN:
1026 if (!capable(CAP_NET_ADMIN))
1027 return -EPERM;
1028 return hci_dev_close(arg);
1029
1030 case HCIDEVRESET:
1031 if (!capable(CAP_NET_ADMIN))
1032 return -EPERM;
1033 return hci_dev_reset(arg);
1034
1035 case HCIDEVRESTAT:
1036 if (!capable(CAP_NET_ADMIN))
1037 return -EPERM;
1038 return hci_dev_reset_stat(arg);
1039
1040 case HCISETSCAN:
1041 case HCISETAUTH:
1042 case HCISETENCRYPT:
1043 case HCISETPTYPE:
1044 case HCISETLINKPOL:
1045 case HCISETLINKMODE:
1046 case HCISETACLMTU:
1047 case HCISETSCOMTU:
1048 if (!capable(CAP_NET_ADMIN))
1049 return -EPERM;
1050 return hci_dev_cmd(cmd, argp);
1051
1052 case HCIINQUIRY:
1053 return hci_inquiry(argp);
1054 }
1055
1056 lock_sock(sk);
1057
1058 err = hci_sock_bound_ioctl(sk, cmd, arg);
1059
1060done:
1061 release_sock(sk);
1062 return err;
1063}
1064
1065static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1066 int addr_len)
1067{
1068 struct sockaddr_hci haddr;
1069 struct sock *sk = sock->sk;
1070 struct hci_dev *hdev = NULL;
1071 struct sk_buff *skb;
1072 int len, err = 0;
1073
1074 BT_DBG("sock %p sk %p", sock, sk);
1075
1076 if (!addr)
1077 return -EINVAL;
1078
1079 memset(&haddr, 0, sizeof(haddr));
1080 len = min_t(unsigned int, sizeof(haddr), addr_len);
1081 memcpy(&haddr, addr, len);
1082
1083 if (haddr.hci_family != AF_BLUETOOTH)
1084 return -EINVAL;
1085
1086 lock_sock(sk);
1087
Olivier Deprez0e641232021-09-23 10:07:05 +02001088 /* Allow detaching from dead device and attaching to alive device, if
1089 * the caller wants to re-bind (instead of close) this socket in
1090 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1091 */
1092 hdev = hci_pi(sk)->hdev;
1093 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1094 hci_pi(sk)->hdev = NULL;
1095 sk->sk_state = BT_OPEN;
1096 hci_dev_put(hdev);
1097 }
1098 hdev = NULL;
1099
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 if (sk->sk_state == BT_BOUND) {
1101 err = -EALREADY;
1102 goto done;
1103 }
1104
1105 switch (haddr.hci_channel) {
1106 case HCI_CHANNEL_RAW:
1107 if (hci_pi(sk)->hdev) {
1108 err = -EALREADY;
1109 goto done;
1110 }
1111
1112 if (haddr.hci_dev != HCI_DEV_NONE) {
1113 hdev = hci_dev_get(haddr.hci_dev);
1114 if (!hdev) {
1115 err = -ENODEV;
1116 goto done;
1117 }
1118
1119 atomic_inc(&hdev->promisc);
1120 }
1121
1122 hci_pi(sk)->channel = haddr.hci_channel;
1123
1124 if (!hci_sock_gen_cookie(sk)) {
1125 /* In the case when a cookie has already been assigned,
1126 * then there has been already an ioctl issued against
1127 * an unbound socket and with that triggerd an open
1128 * notification. Send a close notification first to
1129 * allow the state transition to bounded.
1130 */
1131 skb = create_monitor_ctrl_close(sk);
1132 if (skb) {
1133 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1134 HCI_SOCK_TRUSTED, NULL);
1135 kfree_skb(skb);
1136 }
1137 }
1138
1139 if (capable(CAP_NET_ADMIN))
1140 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1141
1142 hci_pi(sk)->hdev = hdev;
1143
1144 /* Send event to monitor */
1145 skb = create_monitor_ctrl_open(sk);
1146 if (skb) {
1147 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1148 HCI_SOCK_TRUSTED, NULL);
1149 kfree_skb(skb);
1150 }
1151 break;
1152
1153 case HCI_CHANNEL_USER:
1154 if (hci_pi(sk)->hdev) {
1155 err = -EALREADY;
1156 goto done;
1157 }
1158
1159 if (haddr.hci_dev == HCI_DEV_NONE) {
1160 err = -EINVAL;
1161 goto done;
1162 }
1163
1164 if (!capable(CAP_NET_ADMIN)) {
1165 err = -EPERM;
1166 goto done;
1167 }
1168
1169 hdev = hci_dev_get(haddr.hci_dev);
1170 if (!hdev) {
1171 err = -ENODEV;
1172 goto done;
1173 }
1174
1175 if (test_bit(HCI_INIT, &hdev->flags) ||
1176 hci_dev_test_flag(hdev, HCI_SETUP) ||
1177 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1178 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1179 test_bit(HCI_UP, &hdev->flags))) {
1180 err = -EBUSY;
1181 hci_dev_put(hdev);
1182 goto done;
1183 }
1184
1185 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1186 err = -EUSERS;
1187 hci_dev_put(hdev);
1188 goto done;
1189 }
1190
1191 mgmt_index_removed(hdev);
1192
1193 err = hci_dev_open(hdev->id);
1194 if (err) {
1195 if (err == -EALREADY) {
1196 /* In case the transport is already up and
1197 * running, clear the error here.
1198 *
1199 * This can happen when opening a user
1200 * channel and HCI_AUTO_OFF grace period
1201 * is still active.
1202 */
1203 err = 0;
1204 } else {
1205 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1206 mgmt_index_added(hdev);
1207 hci_dev_put(hdev);
1208 goto done;
1209 }
1210 }
1211
1212 hci_pi(sk)->channel = haddr.hci_channel;
1213
1214 if (!hci_sock_gen_cookie(sk)) {
1215 /* In the case when a cookie has already been assigned,
1216 * this socket will transition from a raw socket into
1217 * a user channel socket. For a clean transition, send
1218 * the close notification first.
1219 */
1220 skb = create_monitor_ctrl_close(sk);
1221 if (skb) {
1222 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1223 HCI_SOCK_TRUSTED, NULL);
1224 kfree_skb(skb);
1225 }
1226 }
1227
1228 /* The user channel is restricted to CAP_NET_ADMIN
1229 * capabilities and with that implicitly trusted.
1230 */
1231 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1232
1233 hci_pi(sk)->hdev = hdev;
1234
1235 /* Send event to monitor */
1236 skb = create_monitor_ctrl_open(sk);
1237 if (skb) {
1238 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1239 HCI_SOCK_TRUSTED, NULL);
1240 kfree_skb(skb);
1241 }
1242
1243 atomic_inc(&hdev->promisc);
1244 break;
1245
1246 case HCI_CHANNEL_MONITOR:
1247 if (haddr.hci_dev != HCI_DEV_NONE) {
1248 err = -EINVAL;
1249 goto done;
1250 }
1251
1252 if (!capable(CAP_NET_RAW)) {
1253 err = -EPERM;
1254 goto done;
1255 }
1256
1257 hci_pi(sk)->channel = haddr.hci_channel;
1258
1259 /* The monitor interface is restricted to CAP_NET_RAW
1260 * capabilities and with that implicitly trusted.
1261 */
1262 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1263
1264 send_monitor_note(sk, "Linux version %s (%s)",
1265 init_utsname()->release,
1266 init_utsname()->machine);
1267 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1268 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1269 send_monitor_replay(sk);
1270 send_monitor_control_replay(sk);
1271
1272 atomic_inc(&monitor_promisc);
1273 break;
1274
1275 case HCI_CHANNEL_LOGGING:
1276 if (haddr.hci_dev != HCI_DEV_NONE) {
1277 err = -EINVAL;
1278 goto done;
1279 }
1280
1281 if (!capable(CAP_NET_ADMIN)) {
1282 err = -EPERM;
1283 goto done;
1284 }
1285
1286 hci_pi(sk)->channel = haddr.hci_channel;
1287 break;
1288
1289 default:
1290 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1291 err = -EINVAL;
1292 goto done;
1293 }
1294
1295 if (haddr.hci_dev != HCI_DEV_NONE) {
1296 err = -EINVAL;
1297 goto done;
1298 }
1299
1300 /* Users with CAP_NET_ADMIN capabilities are allowed
1301 * access to all management commands and events. For
1302 * untrusted users the interface is restricted and
1303 * also only untrusted events are sent.
1304 */
1305 if (capable(CAP_NET_ADMIN))
1306 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1307
1308 hci_pi(sk)->channel = haddr.hci_channel;
1309
1310 /* At the moment the index and unconfigured index events
1311 * are enabled unconditionally. Setting them on each
1312 * socket when binding keeps this functionality. They
1313 * however might be cleared later and then sending of these
1314 * events will be disabled, but that is then intentional.
1315 *
1316 * This also enables generic events that are safe to be
1317 * received by untrusted users. Example for such events
1318 * are changes to settings, class of device, name etc.
1319 */
1320 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1321 if (!hci_sock_gen_cookie(sk)) {
1322 /* In the case when a cookie has already been
1323 * assigned, this socket will transtion from
1324 * a raw socket into a control socket. To
1325 * allow for a clean transtion, send the
1326 * close notification first.
1327 */
1328 skb = create_monitor_ctrl_close(sk);
1329 if (skb) {
1330 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1331 HCI_SOCK_TRUSTED, NULL);
1332 kfree_skb(skb);
1333 }
1334 }
1335
1336 /* Send event to monitor */
1337 skb = create_monitor_ctrl_open(sk);
1338 if (skb) {
1339 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1340 HCI_SOCK_TRUSTED, NULL);
1341 kfree_skb(skb);
1342 }
1343
1344 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1345 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1346 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1347 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1348 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1349 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1350 }
1351 break;
1352 }
1353
1354 sk->sk_state = BT_BOUND;
1355
1356done:
1357 release_sock(sk);
1358 return err;
1359}
1360
1361static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1362 int peer)
1363{
1364 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1365 struct sock *sk = sock->sk;
1366 struct hci_dev *hdev;
1367 int err = 0;
1368
1369 BT_DBG("sock %p sk %p", sock, sk);
1370
1371 if (peer)
1372 return -EOPNOTSUPP;
1373
1374 lock_sock(sk);
1375
Olivier Deprez0e641232021-09-23 10:07:05 +02001376 hdev = hci_hdev_from_sock(sk);
1377 if (IS_ERR(hdev)) {
1378 err = PTR_ERR(hdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001379 goto done;
1380 }
1381
1382 haddr->hci_family = AF_BLUETOOTH;
1383 haddr->hci_dev = hdev->id;
1384 haddr->hci_channel= hci_pi(sk)->channel;
1385 err = sizeof(*haddr);
1386
1387done:
1388 release_sock(sk);
1389 return err;
1390}
1391
1392static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1393 struct sk_buff *skb)
1394{
1395 __u32 mask = hci_pi(sk)->cmsg_mask;
1396
1397 if (mask & HCI_CMSG_DIR) {
1398 int incoming = bt_cb(skb)->incoming;
1399 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1400 &incoming);
1401 }
1402
1403 if (mask & HCI_CMSG_TSTAMP) {
1404#ifdef CONFIG_COMPAT
David Brazdil0f672f62019-12-10 10:32:29 +00001405 struct old_timeval32 ctv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001406#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001407 struct __kernel_old_timeval tv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001408 void *data;
1409 int len;
1410
1411 skb_get_timestamp(skb, &tv);
1412
1413 data = &tv;
1414 len = sizeof(tv);
1415#ifdef CONFIG_COMPAT
1416 if (!COMPAT_USE_64BIT_TIME &&
1417 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1418 ctv.tv_sec = tv.tv_sec;
1419 ctv.tv_usec = tv.tv_usec;
1420 data = &ctv;
1421 len = sizeof(ctv);
1422 }
1423#endif
1424
1425 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1426 }
1427}
1428
1429static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1430 size_t len, int flags)
1431{
1432 int noblock = flags & MSG_DONTWAIT;
1433 struct sock *sk = sock->sk;
1434 struct sk_buff *skb;
1435 int copied, err;
1436 unsigned int skblen;
1437
1438 BT_DBG("sock %p, sk %p", sock, sk);
1439
1440 if (flags & MSG_OOB)
1441 return -EOPNOTSUPP;
1442
1443 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1444 return -EOPNOTSUPP;
1445
1446 if (sk->sk_state == BT_CLOSED)
1447 return 0;
1448
1449 skb = skb_recv_datagram(sk, flags, noblock, &err);
1450 if (!skb)
1451 return err;
1452
1453 skblen = skb->len;
1454 copied = skb->len;
1455 if (len < copied) {
1456 msg->msg_flags |= MSG_TRUNC;
1457 copied = len;
1458 }
1459
1460 skb_reset_transport_header(skb);
1461 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1462
1463 switch (hci_pi(sk)->channel) {
1464 case HCI_CHANNEL_RAW:
1465 hci_sock_cmsg(sk, msg, skb);
1466 break;
1467 case HCI_CHANNEL_USER:
1468 case HCI_CHANNEL_MONITOR:
1469 sock_recv_timestamp(msg, sk, skb);
1470 break;
1471 default:
1472 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1473 sock_recv_timestamp(msg, sk, skb);
1474 break;
1475 }
1476
1477 skb_free_datagram(sk, skb);
1478
1479 if (flags & MSG_TRUNC)
1480 copied = skblen;
1481
1482 return err ? : copied;
1483}
1484
1485static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1486 struct msghdr *msg, size_t msglen)
1487{
1488 void *buf;
1489 u8 *cp;
1490 struct mgmt_hdr *hdr;
1491 u16 opcode, index, len;
1492 struct hci_dev *hdev = NULL;
1493 const struct hci_mgmt_handler *handler;
1494 bool var_len, no_hdev;
1495 int err;
1496
1497 BT_DBG("got %zu bytes", msglen);
1498
1499 if (msglen < sizeof(*hdr))
1500 return -EINVAL;
1501
1502 buf = kmalloc(msglen, GFP_KERNEL);
1503 if (!buf)
1504 return -ENOMEM;
1505
1506 if (memcpy_from_msg(buf, msg, msglen)) {
1507 err = -EFAULT;
1508 goto done;
1509 }
1510
1511 hdr = buf;
1512 opcode = __le16_to_cpu(hdr->opcode);
1513 index = __le16_to_cpu(hdr->index);
1514 len = __le16_to_cpu(hdr->len);
1515
1516 if (len != msglen - sizeof(*hdr)) {
1517 err = -EINVAL;
1518 goto done;
1519 }
1520
1521 if (chan->channel == HCI_CHANNEL_CONTROL) {
1522 struct sk_buff *skb;
1523
1524 /* Send event to monitor */
1525 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1526 buf + sizeof(*hdr));
1527 if (skb) {
1528 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1529 HCI_SOCK_TRUSTED, NULL);
1530 kfree_skb(skb);
1531 }
1532 }
1533
1534 if (opcode >= chan->handler_count ||
1535 chan->handlers[opcode].func == NULL) {
1536 BT_DBG("Unknown op %u", opcode);
1537 err = mgmt_cmd_status(sk, index, opcode,
1538 MGMT_STATUS_UNKNOWN_COMMAND);
1539 goto done;
1540 }
1541
1542 handler = &chan->handlers[opcode];
1543
1544 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1545 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1546 err = mgmt_cmd_status(sk, index, opcode,
1547 MGMT_STATUS_PERMISSION_DENIED);
1548 goto done;
1549 }
1550
1551 if (index != MGMT_INDEX_NONE) {
1552 hdev = hci_dev_get(index);
1553 if (!hdev) {
1554 err = mgmt_cmd_status(sk, index, opcode,
1555 MGMT_STATUS_INVALID_INDEX);
1556 goto done;
1557 }
1558
1559 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1560 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1561 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1562 err = mgmt_cmd_status(sk, index, opcode,
1563 MGMT_STATUS_INVALID_INDEX);
1564 goto done;
1565 }
1566
1567 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1568 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1569 err = mgmt_cmd_status(sk, index, opcode,
1570 MGMT_STATUS_INVALID_INDEX);
1571 goto done;
1572 }
1573 }
1574
1575 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1576 if (no_hdev != !hdev) {
1577 err = mgmt_cmd_status(sk, index, opcode,
1578 MGMT_STATUS_INVALID_INDEX);
1579 goto done;
1580 }
1581
1582 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1583 if ((var_len && len < handler->data_len) ||
1584 (!var_len && len != handler->data_len)) {
1585 err = mgmt_cmd_status(sk, index, opcode,
1586 MGMT_STATUS_INVALID_PARAMS);
1587 goto done;
1588 }
1589
1590 if (hdev && chan->hdev_init)
1591 chan->hdev_init(sk, hdev);
1592
1593 cp = buf + sizeof(*hdr);
1594
1595 err = handler->func(sk, hdev, cp, len);
1596 if (err < 0)
1597 goto done;
1598
1599 err = msglen;
1600
1601done:
1602 if (hdev)
1603 hci_dev_put(hdev);
1604
1605 kfree(buf);
1606 return err;
1607}
1608
1609static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1610{
1611 struct hci_mon_hdr *hdr;
1612 struct sk_buff *skb;
1613 struct hci_dev *hdev;
1614 u16 index;
1615 int err;
1616
1617 /* The logging frame consists at minimum of the standard header,
1618 * the priority byte, the ident length byte and at least one string
1619 * terminator NUL byte. Anything shorter are invalid packets.
1620 */
1621 if (len < sizeof(*hdr) + 3)
1622 return -EINVAL;
1623
1624 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1625 if (!skb)
1626 return err;
1627
1628 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1629 err = -EFAULT;
1630 goto drop;
1631 }
1632
1633 hdr = (void *)skb->data;
1634
1635 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1636 err = -EINVAL;
1637 goto drop;
1638 }
1639
1640 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1641 __u8 priority = skb->data[sizeof(*hdr)];
1642 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1643
1644 /* Only the priorities 0-7 are valid and with that any other
1645 * value results in an invalid packet.
1646 *
1647 * The priority byte is followed by an ident length byte and
1648 * the NUL terminated ident string. Check that the ident
1649 * length is not overflowing the packet and also that the
1650 * ident string itself is NUL terminated. In case the ident
1651 * length is zero, the length value actually doubles as NUL
1652 * terminator identifier.
1653 *
1654 * The message follows the ident string (if present) and
1655 * must be NUL terminated. Otherwise it is not a valid packet.
1656 */
1657 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1658 ident_len > len - sizeof(*hdr) - 3 ||
1659 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1660 err = -EINVAL;
1661 goto drop;
1662 }
1663 } else {
1664 err = -EINVAL;
1665 goto drop;
1666 }
1667
1668 index = __le16_to_cpu(hdr->index);
1669
1670 if (index != MGMT_INDEX_NONE) {
1671 hdev = hci_dev_get(index);
1672 if (!hdev) {
1673 err = -ENODEV;
1674 goto drop;
1675 }
1676 } else {
1677 hdev = NULL;
1678 }
1679
1680 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1681
1682 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1683 err = len;
1684
1685 if (hdev)
1686 hci_dev_put(hdev);
1687
1688drop:
1689 kfree_skb(skb);
1690 return err;
1691}
1692
1693static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1694 size_t len)
1695{
1696 struct sock *sk = sock->sk;
1697 struct hci_mgmt_chan *chan;
1698 struct hci_dev *hdev;
1699 struct sk_buff *skb;
1700 int err;
1701
1702 BT_DBG("sock %p sk %p", sock, sk);
1703
1704 if (msg->msg_flags & MSG_OOB)
1705 return -EOPNOTSUPP;
1706
1707 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1708 MSG_CMSG_COMPAT))
1709 return -EINVAL;
1710
1711 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1712 return -EINVAL;
1713
1714 lock_sock(sk);
1715
1716 switch (hci_pi(sk)->channel) {
1717 case HCI_CHANNEL_RAW:
1718 case HCI_CHANNEL_USER:
1719 break;
1720 case HCI_CHANNEL_MONITOR:
1721 err = -EOPNOTSUPP;
1722 goto done;
1723 case HCI_CHANNEL_LOGGING:
1724 err = hci_logging_frame(sk, msg, len);
1725 goto done;
1726 default:
1727 mutex_lock(&mgmt_chan_list_lock);
1728 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1729 if (chan)
1730 err = hci_mgmt_cmd(chan, sk, msg, len);
1731 else
1732 err = -EINVAL;
1733
1734 mutex_unlock(&mgmt_chan_list_lock);
1735 goto done;
1736 }
1737
Olivier Deprez0e641232021-09-23 10:07:05 +02001738 hdev = hci_hdev_from_sock(sk);
1739 if (IS_ERR(hdev)) {
1740 err = PTR_ERR(hdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001741 goto done;
1742 }
1743
1744 if (!test_bit(HCI_UP, &hdev->flags)) {
1745 err = -ENETDOWN;
1746 goto done;
1747 }
1748
1749 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1750 if (!skb)
1751 goto done;
1752
1753 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1754 err = -EFAULT;
1755 goto drop;
1756 }
1757
1758 hci_skb_pkt_type(skb) = skb->data[0];
1759 skb_pull(skb, 1);
1760
1761 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1762 /* No permission check is needed for user channel
1763 * since that gets enforced when binding the socket.
1764 *
1765 * However check that the packet type is valid.
1766 */
1767 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1768 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1769 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1770 err = -EINVAL;
1771 goto drop;
1772 }
1773
1774 skb_queue_tail(&hdev->raw_q, skb);
1775 queue_work(hdev->workqueue, &hdev->tx_work);
1776 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1777 u16 opcode = get_unaligned_le16(skb->data);
1778 u16 ogf = hci_opcode_ogf(opcode);
1779 u16 ocf = hci_opcode_ocf(opcode);
1780
1781 if (((ogf > HCI_SFLT_MAX_OGF) ||
1782 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1783 &hci_sec_filter.ocf_mask[ogf])) &&
1784 !capable(CAP_NET_RAW)) {
1785 err = -EPERM;
1786 goto drop;
1787 }
1788
1789 /* Since the opcode has already been extracted here, store
1790 * a copy of the value for later use by the drivers.
1791 */
1792 hci_skb_opcode(skb) = opcode;
1793
1794 if (ogf == 0x3f) {
1795 skb_queue_tail(&hdev->raw_q, skb);
1796 queue_work(hdev->workqueue, &hdev->tx_work);
1797 } else {
1798 /* Stand-alone HCI commands must be flagged as
1799 * single-command requests.
1800 */
1801 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1802
1803 skb_queue_tail(&hdev->cmd_q, skb);
1804 queue_work(hdev->workqueue, &hdev->cmd_work);
1805 }
1806 } else {
1807 if (!capable(CAP_NET_RAW)) {
1808 err = -EPERM;
1809 goto drop;
1810 }
1811
1812 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1813 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1814 err = -EINVAL;
1815 goto drop;
1816 }
1817
1818 skb_queue_tail(&hdev->raw_q, skb);
1819 queue_work(hdev->workqueue, &hdev->tx_work);
1820 }
1821
1822 err = len;
1823
1824done:
1825 release_sock(sk);
1826 return err;
1827
1828drop:
1829 kfree_skb(skb);
1830 goto done;
1831}
1832
1833static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1834 char __user *optval, unsigned int len)
1835{
1836 struct hci_ufilter uf = { .opcode = 0 };
1837 struct sock *sk = sock->sk;
1838 int err = 0, opt = 0;
1839
1840 BT_DBG("sk %p, opt %d", sk, optname);
1841
1842 if (level != SOL_HCI)
1843 return -ENOPROTOOPT;
1844
1845 lock_sock(sk);
1846
1847 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1848 err = -EBADFD;
1849 goto done;
1850 }
1851
1852 switch (optname) {
1853 case HCI_DATA_DIR:
1854 if (get_user(opt, (int __user *)optval)) {
1855 err = -EFAULT;
1856 break;
1857 }
1858
1859 if (opt)
1860 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1861 else
1862 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1863 break;
1864
1865 case HCI_TIME_STAMP:
1866 if (get_user(opt, (int __user *)optval)) {
1867 err = -EFAULT;
1868 break;
1869 }
1870
1871 if (opt)
1872 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1873 else
1874 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1875 break;
1876
1877 case HCI_FILTER:
1878 {
1879 struct hci_filter *f = &hci_pi(sk)->filter;
1880
1881 uf.type_mask = f->type_mask;
1882 uf.opcode = f->opcode;
1883 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1884 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1885 }
1886
1887 len = min_t(unsigned int, len, sizeof(uf));
1888 if (copy_from_user(&uf, optval, len)) {
1889 err = -EFAULT;
1890 break;
1891 }
1892
1893 if (!capable(CAP_NET_RAW)) {
1894 uf.type_mask &= hci_sec_filter.type_mask;
1895 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1896 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1897 }
1898
1899 {
1900 struct hci_filter *f = &hci_pi(sk)->filter;
1901
1902 f->type_mask = uf.type_mask;
1903 f->opcode = uf.opcode;
1904 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1905 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1906 }
1907 break;
1908
1909 default:
1910 err = -ENOPROTOOPT;
1911 break;
1912 }
1913
1914done:
1915 release_sock(sk);
1916 return err;
1917}
1918
1919static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1920 char __user *optval, int __user *optlen)
1921{
1922 struct hci_ufilter uf;
1923 struct sock *sk = sock->sk;
1924 int len, opt, err = 0;
1925
1926 BT_DBG("sk %p, opt %d", sk, optname);
1927
1928 if (level != SOL_HCI)
1929 return -ENOPROTOOPT;
1930
1931 if (get_user(len, optlen))
1932 return -EFAULT;
1933
1934 lock_sock(sk);
1935
1936 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1937 err = -EBADFD;
1938 goto done;
1939 }
1940
1941 switch (optname) {
1942 case HCI_DATA_DIR:
1943 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1944 opt = 1;
1945 else
1946 opt = 0;
1947
1948 if (put_user(opt, optval))
1949 err = -EFAULT;
1950 break;
1951
1952 case HCI_TIME_STAMP:
1953 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1954 opt = 1;
1955 else
1956 opt = 0;
1957
1958 if (put_user(opt, optval))
1959 err = -EFAULT;
1960 break;
1961
1962 case HCI_FILTER:
1963 {
1964 struct hci_filter *f = &hci_pi(sk)->filter;
1965
1966 memset(&uf, 0, sizeof(uf));
1967 uf.type_mask = f->type_mask;
1968 uf.opcode = f->opcode;
1969 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1970 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1971 }
1972
1973 len = min_t(unsigned int, len, sizeof(uf));
1974 if (copy_to_user(optval, &uf, len))
1975 err = -EFAULT;
1976 break;
1977
1978 default:
1979 err = -ENOPROTOOPT;
1980 break;
1981 }
1982
1983done:
1984 release_sock(sk);
1985 return err;
1986}
1987
1988static const struct proto_ops hci_sock_ops = {
1989 .family = PF_BLUETOOTH,
1990 .owner = THIS_MODULE,
1991 .release = hci_sock_release,
1992 .bind = hci_sock_bind,
1993 .getname = hci_sock_getname,
1994 .sendmsg = hci_sock_sendmsg,
1995 .recvmsg = hci_sock_recvmsg,
1996 .ioctl = hci_sock_ioctl,
1997 .poll = datagram_poll,
1998 .listen = sock_no_listen,
1999 .shutdown = sock_no_shutdown,
2000 .setsockopt = hci_sock_setsockopt,
2001 .getsockopt = hci_sock_getsockopt,
2002 .connect = sock_no_connect,
2003 .socketpair = sock_no_socketpair,
2004 .accept = sock_no_accept,
2005 .mmap = sock_no_mmap
2006};
2007
2008static struct proto hci_sk_proto = {
2009 .name = "HCI",
2010 .owner = THIS_MODULE,
2011 .obj_size = sizeof(struct hci_pinfo)
2012};
2013
2014static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2015 int kern)
2016{
2017 struct sock *sk;
2018
2019 BT_DBG("sock %p", sock);
2020
2021 if (sock->type != SOCK_RAW)
2022 return -ESOCKTNOSUPPORT;
2023
2024 sock->ops = &hci_sock_ops;
2025
2026 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2027 if (!sk)
2028 return -ENOMEM;
2029
2030 sock_init_data(sock, sk);
2031
2032 sock_reset_flag(sk, SOCK_ZAPPED);
2033
2034 sk->sk_protocol = protocol;
2035
2036 sock->state = SS_UNCONNECTED;
2037 sk->sk_state = BT_OPEN;
2038
2039 bt_sock_link(&hci_sk_list, sk);
2040 return 0;
2041}
2042
2043static const struct net_proto_family hci_sock_family_ops = {
2044 .family = PF_BLUETOOTH,
2045 .owner = THIS_MODULE,
2046 .create = hci_sock_create,
2047};
2048
2049int __init hci_sock_init(void)
2050{
2051 int err;
2052
2053 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2054
2055 err = proto_register(&hci_sk_proto, 0);
2056 if (err < 0)
2057 return err;
2058
2059 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2060 if (err < 0) {
2061 BT_ERR("HCI socket registration failed");
2062 goto error;
2063 }
2064
2065 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2066 if (err < 0) {
2067 BT_ERR("Failed to create HCI proc file");
2068 bt_sock_unregister(BTPROTO_HCI);
2069 goto error;
2070 }
2071
2072 BT_INFO("HCI socket layer initialized");
2073
2074 return 0;
2075
2076error:
2077 proto_unregister(&hci_sk_proto);
2078 return err;
2079}
2080
2081void hci_sock_cleanup(void)
2082{
2083 bt_procfs_cleanup(&init_net, "hci");
2084 bt_sock_unregister(BTPROTO_HCI);
2085 proto_unregister(&hci_sk_proto);
2086}