blob: 52fb6d6d6d5856d56f08270e0368f70466cc6dd1 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 Copyright (c) 2013-2014 Intel Corp.
4
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005*/
6
7#include <linux/if_arp.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/module.h>
11#include <linux/debugfs.h>
12
13#include <net/ipv6.h>
14#include <net/ip6_route.h>
15#include <net/addrconf.h>
16#include <net/pkt_sched.h>
17
18#include <net/bluetooth/bluetooth.h>
19#include <net/bluetooth/hci_core.h>
20#include <net/bluetooth/l2cap.h>
21
22#include <net/6lowpan.h> /* for the compression support */
23
24#define VERSION "0.1"
25
26static struct dentry *lowpan_enable_debugfs;
27static struct dentry *lowpan_control_debugfs;
28
29#define IFACE_NAME_TEMPLATE "bt%d"
30
31struct skb_cb {
32 struct in6_addr addr;
33 struct in6_addr gw;
34 struct l2cap_chan *chan;
35};
36#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
37
38/* The devices list contains those devices that we are acting
39 * as a proxy. The BT 6LoWPAN device is a virtual device that
40 * connects to the Bluetooth LE device. The real connection to
41 * BT device is done via l2cap layer. There exists one
42 * virtual device / one BT 6LoWPAN network (=hciX device).
43 * The list contains struct lowpan_dev elements.
44 */
45static LIST_HEAD(bt_6lowpan_devices);
46static DEFINE_SPINLOCK(devices_lock);
47
48static bool enable_6lowpan;
49
50/* We are listening incoming connections via this channel
51 */
52static struct l2cap_chan *listen_chan;
Olivier Deprez0e641232021-09-23 10:07:05 +020053static DEFINE_MUTEX(set_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054
55struct lowpan_peer {
56 struct list_head list;
57 struct rcu_head rcu;
58 struct l2cap_chan *chan;
59
60 /* peer addresses in various formats */
61 unsigned char lladdr[ETH_ALEN];
62 struct in6_addr peer_addr;
63};
64
65struct lowpan_btle_dev {
66 struct list_head list;
67
68 struct hci_dev *hdev;
69 struct net_device *netdev;
70 struct list_head peers;
71 atomic_t peer_count; /* number of items in peers list */
72
73 struct work_struct delete_netdev;
74 struct delayed_work notify_peers;
75};
76
77static inline struct lowpan_btle_dev *
78lowpan_btle_dev(const struct net_device *netdev)
79{
80 return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
81}
82
83static inline void peer_add(struct lowpan_btle_dev *dev,
84 struct lowpan_peer *peer)
85{
86 list_add_rcu(&peer->list, &dev->peers);
87 atomic_inc(&dev->peer_count);
88}
89
90static inline bool peer_del(struct lowpan_btle_dev *dev,
91 struct lowpan_peer *peer)
92{
93 list_del_rcu(&peer->list);
94 kfree_rcu(peer, rcu);
95
96 module_put(THIS_MODULE);
97
98 if (atomic_dec_and_test(&dev->peer_count)) {
99 BT_DBG("last peer");
100 return true;
101 }
102
103 return false;
104}
105
106static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
107 bdaddr_t *ba, __u8 type)
108{
109 struct lowpan_peer *peer;
110
111 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
112 ba, type);
113
114 rcu_read_lock();
115
116 list_for_each_entry_rcu(peer, &dev->peers, list) {
117 BT_DBG("dst addr %pMR dst type %d",
118 &peer->chan->dst, peer->chan->dst_type);
119
120 if (bacmp(&peer->chan->dst, ba))
121 continue;
122
123 if (type == peer->chan->dst_type) {
124 rcu_read_unlock();
125 return peer;
126 }
127 }
128
129 rcu_read_unlock();
130
131 return NULL;
132}
133
134static inline struct lowpan_peer *
135__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
136{
137 struct lowpan_peer *peer;
138
139 list_for_each_entry_rcu(peer, &dev->peers, list) {
140 if (peer->chan == chan)
141 return peer;
142 }
143
144 return NULL;
145}
146
147static inline struct lowpan_peer *
148__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
149{
150 struct lowpan_peer *peer;
151
152 list_for_each_entry_rcu(peer, &dev->peers, list) {
153 if (peer->chan->conn == conn)
154 return peer;
155 }
156
157 return NULL;
158}
159
160static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
161 struct in6_addr *daddr,
162 struct sk_buff *skb)
163{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000164 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
165 int count = atomic_read(&dev->peer_count);
David Brazdil0f672f62019-12-10 10:32:29 +0000166 const struct in6_addr *nexthop;
167 struct lowpan_peer *peer;
168 struct neighbour *neigh;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169
170 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
171
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 if (!rt) {
David Brazdil0f672f62019-12-10 10:32:29 +0000173 if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
174 /* There is neither route nor gateway,
175 * probably the destination is a direct peer.
176 */
177 nexthop = daddr;
178 } else {
179 /* There is a known gateway
180 */
181 nexthop = &lowpan_cb(skb)->gw;
182 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183 } else {
184 nexthop = rt6_nexthop(rt, daddr);
185
186 /* We need to remember the address because it is needed
187 * by bt_xmit() when sending the packet. In bt_xmit(), the
188 * destination routing info is not set.
189 */
190 memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
191 }
192
193 BT_DBG("gw %pI6c", nexthop);
194
195 rcu_read_lock();
196
197 list_for_each_entry_rcu(peer, &dev->peers, list) {
198 BT_DBG("dst addr %pMR dst type %d ip %pI6c",
199 &peer->chan->dst, peer->chan->dst_type,
200 &peer->peer_addr);
201
202 if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
203 rcu_read_unlock();
204 return peer;
205 }
206 }
207
David Brazdil0f672f62019-12-10 10:32:29 +0000208 /* use the neighbour cache for matching addresses assigned by SLAAC
209 */
210 neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
211 if (neigh) {
212 list_for_each_entry_rcu(peer, &dev->peers, list) {
213 if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
214 neigh_release(neigh);
215 rcu_read_unlock();
216 return peer;
217 }
218 }
219 neigh_release(neigh);
220 }
221
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 rcu_read_unlock();
223
224 return NULL;
225}
226
227static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
228{
229 struct lowpan_btle_dev *entry;
230 struct lowpan_peer *peer = NULL;
231
232 rcu_read_lock();
233
234 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
235 peer = __peer_lookup_conn(entry, conn);
236 if (peer)
237 break;
238 }
239
240 rcu_read_unlock();
241
242 return peer;
243}
244
245static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
246{
247 struct lowpan_btle_dev *entry;
248 struct lowpan_btle_dev *dev = NULL;
249
250 rcu_read_lock();
251
252 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
253 if (conn->hcon->hdev == entry->hdev) {
254 dev = entry;
255 break;
256 }
257 }
258
259 rcu_read_unlock();
260
261 return dev;
262}
263
264static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
265{
266 struct sk_buff *skb_cp;
267
268 skb_cp = skb_copy(skb, GFP_ATOMIC);
269 if (!skb_cp)
270 return NET_RX_DROP;
271
272 return netif_rx_ni(skb_cp);
273}
274
275static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
276 struct lowpan_peer *peer)
277{
278 const u8 *saddr;
279
280 saddr = peer->lladdr;
281
282 return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
283}
284
285static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
286 struct lowpan_peer *peer)
287{
288 struct sk_buff *local_skb;
289 int ret;
290
291 if (!netif_running(dev))
292 goto drop;
293
294 if (dev->type != ARPHRD_6LOWPAN || !skb->len)
295 goto drop;
296
297 skb_reset_network_header(skb);
298
299 skb = skb_share_check(skb, GFP_ATOMIC);
300 if (!skb)
301 goto drop;
302
303 /* check that it's our buffer */
304 if (lowpan_is_ipv6(*skb_network_header(skb))) {
305 /* Pull off the 1-byte of 6lowpan header. */
306 skb_pull(skb, 1);
307
308 /* Copy the packet so that the IPv6 header is
309 * properly aligned.
310 */
311 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
312 skb_tailroom(skb), GFP_ATOMIC);
313 if (!local_skb)
314 goto drop;
315
316 local_skb->protocol = htons(ETH_P_IPV6);
317 local_skb->pkt_type = PACKET_HOST;
318 local_skb->dev = dev;
319
320 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
321
322 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
323 kfree_skb(local_skb);
324 goto drop;
325 }
326
327 dev->stats.rx_bytes += skb->len;
328 dev->stats.rx_packets++;
329
330 consume_skb(local_skb);
331 consume_skb(skb);
332 } else if (lowpan_is_iphc(*skb_network_header(skb))) {
333 local_skb = skb_clone(skb, GFP_ATOMIC);
334 if (!local_skb)
335 goto drop;
336
337 local_skb->dev = dev;
338
339 ret = iphc_decompress(local_skb, dev, peer);
340 if (ret < 0) {
341 BT_DBG("iphc_decompress failed: %d", ret);
342 kfree_skb(local_skb);
343 goto drop;
344 }
345
346 local_skb->protocol = htons(ETH_P_IPV6);
347 local_skb->pkt_type = PACKET_HOST;
348
349 if (give_skb_to_upper(local_skb, dev)
350 != NET_RX_SUCCESS) {
351 kfree_skb(local_skb);
352 goto drop;
353 }
354
355 dev->stats.rx_bytes += skb->len;
356 dev->stats.rx_packets++;
357
358 consume_skb(local_skb);
359 consume_skb(skb);
360 } else {
361 BT_DBG("unknown packet type");
362 goto drop;
363 }
364
365 return NET_RX_SUCCESS;
366
367drop:
368 dev->stats.rx_dropped++;
369 return NET_RX_DROP;
370}
371
372/* Packet from BT LE device */
373static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
374{
375 struct lowpan_btle_dev *dev;
376 struct lowpan_peer *peer;
377 int err;
378
379 peer = lookup_peer(chan->conn);
380 if (!peer)
381 return -ENOENT;
382
383 dev = lookup_dev(chan->conn);
384 if (!dev || !dev->netdev)
385 return -ENOENT;
386
387 err = recv_pkt(skb, dev->netdev, peer);
388 if (err) {
389 BT_DBG("recv pkt %d", err);
390 err = -EAGAIN;
391 }
392
393 return err;
394}
395
396static int setup_header(struct sk_buff *skb, struct net_device *netdev,
397 bdaddr_t *peer_addr, u8 *peer_addr_type)
398{
399 struct in6_addr ipv6_daddr;
400 struct ipv6hdr *hdr;
401 struct lowpan_btle_dev *dev;
402 struct lowpan_peer *peer;
403 u8 *daddr;
404 int err, status = 0;
405
406 hdr = ipv6_hdr(skb);
407
408 dev = lowpan_btle_dev(netdev);
409
410 memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
411
412 if (ipv6_addr_is_multicast(&ipv6_daddr)) {
413 lowpan_cb(skb)->chan = NULL;
414 daddr = NULL;
415 } else {
416 BT_DBG("dest IP %pI6c", &ipv6_daddr);
417
418 /* The packet might be sent to 6lowpan interface
419 * because of routing (either via default route
420 * or user set route) so get peer according to
421 * the destination address.
422 */
423 peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
424 if (!peer) {
425 BT_DBG("no such peer");
426 return -ENOENT;
427 }
428
429 daddr = peer->lladdr;
430 *peer_addr = peer->chan->dst;
431 *peer_addr_type = peer->chan->dst_type;
432 lowpan_cb(skb)->chan = peer->chan;
433
434 status = 1;
435 }
436
437 lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
438
439 err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
440 if (err < 0)
441 return err;
442
443 return status;
444}
445
446static int header_create(struct sk_buff *skb, struct net_device *netdev,
447 unsigned short type, const void *_daddr,
448 const void *_saddr, unsigned int len)
449{
450 if (type != ETH_P_IPV6)
451 return -EINVAL;
452
453 return 0;
454}
455
456/* Packet to BT LE device */
457static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
458 struct net_device *netdev)
459{
460 struct msghdr msg;
461 struct kvec iv;
462 int err;
463
464 /* Remember the skb so that we can send EAGAIN to the caller if
465 * we run out of credits.
466 */
467 chan->data = skb;
468
469 iv.iov_base = skb->data;
470 iv.iov_len = skb->len;
471
472 memset(&msg, 0, sizeof(msg));
David Brazdil0f672f62019-12-10 10:32:29 +0000473 iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474
475 err = l2cap_chan_send(chan, &msg, skb->len);
476 if (err > 0) {
477 netdev->stats.tx_bytes += err;
478 netdev->stats.tx_packets++;
479 return 0;
480 }
481
482 if (err < 0)
483 netdev->stats.tx_errors++;
484
485 return err;
486}
487
488static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
489{
490 struct sk_buff *local_skb;
491 struct lowpan_btle_dev *entry;
492 int err = 0;
493
494 rcu_read_lock();
495
496 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
497 struct lowpan_peer *pentry;
498 struct lowpan_btle_dev *dev;
499
500 if (entry->netdev != netdev)
501 continue;
502
503 dev = lowpan_btle_dev(entry->netdev);
504
505 list_for_each_entry_rcu(pentry, &dev->peers, list) {
506 int ret;
507
508 local_skb = skb_clone(skb, GFP_ATOMIC);
509
510 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
511 netdev->name,
512 &pentry->chan->dst, pentry->chan->dst_type,
513 &pentry->peer_addr, pentry->chan);
514 ret = send_pkt(pentry->chan, local_skb, netdev);
515 if (ret < 0)
516 err = ret;
517
518 kfree_skb(local_skb);
519 }
520 }
521
522 rcu_read_unlock();
523
524 return err;
525}
526
527static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
528{
529 int err = 0;
530 bdaddr_t addr;
531 u8 addr_type;
532
533 /* We must take a copy of the skb before we modify/replace the ipv6
534 * header as the header could be used elsewhere
535 */
536 skb = skb_unshare(skb, GFP_ATOMIC);
537 if (!skb)
538 return NET_XMIT_DROP;
539
540 /* Return values from setup_header()
541 * <0 - error, packet is dropped
542 * 0 - this is a multicast packet
543 * 1 - this is unicast packet
544 */
545 err = setup_header(skb, netdev, &addr, &addr_type);
546 if (err < 0) {
547 kfree_skb(skb);
548 return NET_XMIT_DROP;
549 }
550
551 if (err) {
552 if (lowpan_cb(skb)->chan) {
553 BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
554 netdev->name, &addr, addr_type,
555 &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
556 err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
557 } else {
558 err = -ENOENT;
559 }
560 } else {
561 /* We need to send the packet to every device behind this
562 * interface.
563 */
564 err = send_mcast_pkt(skb, netdev);
565 }
566
567 dev_kfree_skb(skb);
568
569 if (err)
570 BT_DBG("ERROR: xmit failed (%d)", err);
571
572 return err < 0 ? NET_XMIT_DROP : err;
573}
574
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575static const struct net_device_ops netdev_ops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000576 .ndo_start_xmit = bt_xmit,
577};
578
David Brazdil0f672f62019-12-10 10:32:29 +0000579static const struct header_ops header_ops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 .create = header_create,
581};
582
583static void netdev_setup(struct net_device *dev)
584{
585 dev->hard_header_len = 0;
586 dev->needed_tailroom = 0;
587 dev->flags = IFF_RUNNING | IFF_MULTICAST;
588 dev->watchdog_timeo = 0;
589 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
590
591 dev->netdev_ops = &netdev_ops;
592 dev->header_ops = &header_ops;
593 dev->needs_free_netdev = true;
594}
595
596static struct device_type bt_type = {
597 .name = "bluetooth",
598};
599
600static void ifup(struct net_device *netdev)
601{
602 int err;
603
604 rtnl_lock();
David Brazdil0f672f62019-12-10 10:32:29 +0000605 err = dev_open(netdev, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 if (err < 0)
607 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
608 rtnl_unlock();
609}
610
611static void ifdown(struct net_device *netdev)
612{
613 rtnl_lock();
614 dev_close(netdev);
615 rtnl_unlock();
616}
617
618static void do_notify_peers(struct work_struct *work)
619{
620 struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
621 notify_peers.work);
622
623 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
624}
625
626static bool is_bt_6lowpan(struct hci_conn *hcon)
627{
628 if (hcon->type != LE_LINK)
629 return false;
630
631 if (!enable_6lowpan)
632 return false;
633
634 return true;
635}
636
637static struct l2cap_chan *chan_create(void)
638{
639 struct l2cap_chan *chan;
640
641 chan = l2cap_chan_create();
642 if (!chan)
643 return NULL;
644
645 l2cap_chan_set_defaults(chan);
646
647 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
648 chan->mode = L2CAP_MODE_LE_FLOWCTL;
649 chan->imtu = 1280;
650
651 return chan;
652}
653
654static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
655 struct lowpan_btle_dev *dev,
656 bool new_netdev)
657{
658 struct lowpan_peer *peer;
659
660 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
661 if (!peer)
662 return NULL;
663
664 peer->chan = chan;
665 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
666
667 baswap((void *)peer->lladdr, &chan->dst);
668
669 lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
670
671 spin_lock(&devices_lock);
672 INIT_LIST_HEAD(&peer->list);
673 peer_add(dev, peer);
674 spin_unlock(&devices_lock);
675
676 /* Notifying peers about us needs to be done without locks held */
677 if (new_netdev)
678 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
679 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
680
681 return peer->chan;
682}
683
684static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
685{
686 struct net_device *netdev;
687 int err = 0;
688
689 netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
690 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
691 netdev_setup);
692 if (!netdev)
693 return -ENOMEM;
694
695 netdev->addr_assign_type = NET_ADDR_PERM;
696 baswap((void *)netdev->dev_addr, &chan->src);
697
698 netdev->netdev_ops = &netdev_ops;
699 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
700 SET_NETDEV_DEVTYPE(netdev, &bt_type);
701
702 *dev = lowpan_btle_dev(netdev);
703 (*dev)->netdev = netdev;
704 (*dev)->hdev = chan->conn->hcon->hdev;
705 INIT_LIST_HEAD(&(*dev)->peers);
706
707 spin_lock(&devices_lock);
708 INIT_LIST_HEAD(&(*dev)->list);
709 list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
710 spin_unlock(&devices_lock);
711
712 err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
713 if (err < 0) {
714 BT_INFO("register_netdev failed %d", err);
715 spin_lock(&devices_lock);
716 list_del_rcu(&(*dev)->list);
717 spin_unlock(&devices_lock);
718 free_netdev(netdev);
719 goto out;
720 }
721
722 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
723 netdev->ifindex, &chan->dst, chan->dst_type,
724 &chan->src, chan->src_type);
725 set_bit(__LINK_STATE_PRESENT, &netdev->state);
726
727 return 0;
728
729out:
730 return err;
731}
732
733static inline void chan_ready_cb(struct l2cap_chan *chan)
734{
735 struct lowpan_btle_dev *dev;
736 bool new_netdev = false;
737
738 dev = lookup_dev(chan->conn);
739
740 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
741
742 if (!dev) {
743 if (setup_netdev(chan, &dev) < 0) {
744 l2cap_chan_del(chan, -ENOENT);
745 return;
746 }
747 new_netdev = true;
748 }
749
750 if (!try_module_get(THIS_MODULE))
751 return;
752
753 add_peer_chan(chan, dev, new_netdev);
754 ifup(dev->netdev);
755}
756
757static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
758{
759 struct l2cap_chan *chan;
760
761 chan = chan_create();
762 if (!chan)
763 return NULL;
764
765 chan->ops = pchan->ops;
766
767 BT_DBG("chan %p pchan %p", chan, pchan);
768
769 return chan;
770}
771
772static void delete_netdev(struct work_struct *work)
773{
774 struct lowpan_btle_dev *entry = container_of(work,
775 struct lowpan_btle_dev,
776 delete_netdev);
777
778 lowpan_unregister_netdev(entry->netdev);
779
780 /* The entry pointer is deleted by the netdev destructor. */
781}
782
783static void chan_close_cb(struct l2cap_chan *chan)
784{
785 struct lowpan_btle_dev *entry;
786 struct lowpan_btle_dev *dev = NULL;
787 struct lowpan_peer *peer;
788 int err = -ENOENT;
789 bool last = false, remove = true;
790
791 BT_DBG("chan %p conn %p", chan, chan->conn);
792
793 if (chan->conn && chan->conn->hcon) {
794 if (!is_bt_6lowpan(chan->conn->hcon))
795 return;
796
797 /* If conn is set, then the netdev is also there and we should
798 * not remove it.
799 */
800 remove = false;
801 }
802
803 spin_lock(&devices_lock);
804
805 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
806 dev = lowpan_btle_dev(entry->netdev);
807 peer = __peer_lookup_chan(dev, chan);
808 if (peer) {
809 last = peer_del(dev, peer);
810 err = 0;
811
812 BT_DBG("dev %p removing %speer %p", dev,
813 last ? "last " : "1 ", peer);
814 BT_DBG("chan %p orig refcnt %d", chan,
815 kref_read(&chan->kref));
816
817 l2cap_chan_put(chan);
818 break;
819 }
820 }
821
822 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
823 spin_unlock(&devices_lock);
824
825 cancel_delayed_work_sync(&dev->notify_peers);
826
827 ifdown(dev->netdev);
828
829 if (remove) {
830 INIT_WORK(&entry->delete_netdev, delete_netdev);
831 schedule_work(&entry->delete_netdev);
832 }
833 } else {
834 spin_unlock(&devices_lock);
835 }
836
837 return;
838}
839
840static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
841{
842 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
843 state_to_string(state), err);
844}
845
846static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
847 unsigned long hdr_len,
848 unsigned long len, int nb)
849{
850 /* Note that we must allocate using GFP_ATOMIC here as
851 * this function is called originally from netdev hard xmit
852 * function in atomic context.
853 */
854 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
855}
856
857static void chan_suspend_cb(struct l2cap_chan *chan)
858{
859 struct lowpan_btle_dev *dev;
860
861 BT_DBG("chan %p suspend", chan);
862
863 dev = lookup_dev(chan->conn);
864 if (!dev || !dev->netdev)
865 return;
866
867 netif_stop_queue(dev->netdev);
868}
869
870static void chan_resume_cb(struct l2cap_chan *chan)
871{
872 struct lowpan_btle_dev *dev;
873
874 BT_DBG("chan %p resume", chan);
875
876 dev = lookup_dev(chan->conn);
877 if (!dev || !dev->netdev)
878 return;
879
880 netif_wake_queue(dev->netdev);
881}
882
883static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
884{
885 return L2CAP_CONN_TIMEOUT;
886}
887
888static const struct l2cap_ops bt_6lowpan_chan_ops = {
889 .name = "L2CAP 6LoWPAN channel",
890 .new_connection = chan_new_conn_cb,
891 .recv = chan_recv_cb,
892 .close = chan_close_cb,
893 .state_change = chan_state_change_cb,
894 .ready = chan_ready_cb,
895 .resume = chan_resume_cb,
896 .suspend = chan_suspend_cb,
897 .get_sndtimeo = chan_get_sndtimeo_cb,
898 .alloc_skb = chan_alloc_skb_cb,
899
900 .teardown = l2cap_chan_no_teardown,
901 .defer = l2cap_chan_no_defer,
902 .set_shutdown = l2cap_chan_no_set_shutdown,
903};
904
905static inline __u8 bdaddr_type(__u8 type)
906{
907 if (type == ADDR_LE_DEV_PUBLIC)
908 return BDADDR_LE_PUBLIC;
909 else
910 return BDADDR_LE_RANDOM;
911}
912
913static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
914{
915 struct l2cap_chan *chan;
916 int err;
917
918 chan = chan_create();
919 if (!chan)
920 return -EINVAL;
921
922 chan->ops = &bt_6lowpan_chan_ops;
923
924 err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
925 addr, dst_type);
926
927 BT_DBG("chan %p err %d", chan, err);
928 if (err < 0)
929 l2cap_chan_put(chan);
930
931 return err;
932}
933
934static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
935{
936 struct lowpan_peer *peer;
937
938 BT_DBG("conn %p dst type %d", conn, dst_type);
939
940 peer = lookup_peer(conn);
941 if (!peer)
942 return -ENOENT;
943
944 BT_DBG("peer %p chan %p", peer, peer->chan);
945
946 l2cap_chan_close(peer->chan, ENOENT);
947
948 return 0;
949}
950
951static struct l2cap_chan *bt_6lowpan_listen(void)
952{
953 bdaddr_t *addr = BDADDR_ANY;
954 struct l2cap_chan *chan;
955 int err;
956
957 if (!enable_6lowpan)
958 return NULL;
959
960 chan = chan_create();
961 if (!chan)
962 return NULL;
963
964 chan->ops = &bt_6lowpan_chan_ops;
965 chan->state = BT_LISTEN;
966 chan->src_type = BDADDR_LE_PUBLIC;
967
968 atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
969
970 BT_DBG("chan %p src type %d", chan, chan->src_type);
971
972 err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
973 if (err) {
974 l2cap_chan_put(chan);
975 BT_ERR("psm cannot be added err %d", err);
976 return NULL;
977 }
978
979 return chan;
980}
981
982static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
983 struct l2cap_conn **conn)
984{
985 struct hci_conn *hcon;
986 struct hci_dev *hdev;
987 int n;
988
989 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
990 &addr->b[5], &addr->b[4], &addr->b[3],
991 &addr->b[2], &addr->b[1], &addr->b[0],
992 addr_type);
993
994 if (n < 7)
995 return -EINVAL;
996
997 /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
998 hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
999 if (!hdev)
1000 return -ENOENT;
1001
1002 hci_dev_lock(hdev);
1003 hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
1004 hci_dev_unlock(hdev);
1005
1006 if (!hcon)
1007 return -ENOENT;
1008
1009 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1010
1011 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1012
1013 return 0;
1014}
1015
1016static void disconnect_all_peers(void)
1017{
1018 struct lowpan_btle_dev *entry;
1019 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1020 struct list_head peers;
1021
1022 INIT_LIST_HEAD(&peers);
1023
1024 /* We make a separate list of peers as the close_cb() will
1025 * modify the device peers list so it is better not to mess
1026 * with the same list at the same time.
1027 */
1028
1029 rcu_read_lock();
1030
1031 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1032 list_for_each_entry_rcu(peer, &entry->peers, list) {
1033 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1034 if (!new_peer)
1035 break;
1036
1037 new_peer->chan = peer->chan;
1038 INIT_LIST_HEAD(&new_peer->list);
1039
1040 list_add(&new_peer->list, &peers);
1041 }
1042 }
1043
1044 rcu_read_unlock();
1045
1046 spin_lock(&devices_lock);
1047 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1048 l2cap_chan_close(peer->chan, ENOENT);
1049
1050 list_del_rcu(&peer->list);
1051 kfree_rcu(peer, rcu);
1052 }
1053 spin_unlock(&devices_lock);
1054}
1055
1056struct set_enable {
1057 struct work_struct work;
1058 bool flag;
1059};
1060
1061static void do_enable_set(struct work_struct *work)
1062{
1063 struct set_enable *set_enable = container_of(work,
1064 struct set_enable, work);
1065
1066 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1067 /* Disconnect existing connections if 6lowpan is
1068 * disabled
1069 */
1070 disconnect_all_peers();
1071
1072 enable_6lowpan = set_enable->flag;
1073
Olivier Deprez0e641232021-09-23 10:07:05 +02001074 mutex_lock(&set_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001075 if (listen_chan) {
1076 l2cap_chan_close(listen_chan, 0);
1077 l2cap_chan_put(listen_chan);
1078 }
1079
1080 listen_chan = bt_6lowpan_listen();
Olivier Deprez0e641232021-09-23 10:07:05 +02001081 mutex_unlock(&set_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001082
1083 kfree(set_enable);
1084}
1085
1086static int lowpan_enable_set(void *data, u64 val)
1087{
1088 struct set_enable *set_enable;
1089
1090 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1091 if (!set_enable)
1092 return -ENOMEM;
1093
1094 set_enable->flag = !!val;
1095 INIT_WORK(&set_enable->work, do_enable_set);
1096
1097 schedule_work(&set_enable->work);
1098
1099 return 0;
1100}
1101
1102static int lowpan_enable_get(void *data, u64 *val)
1103{
1104 *val = enable_6lowpan;
1105 return 0;
1106}
1107
David Brazdil0f672f62019-12-10 10:32:29 +00001108DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1109 lowpan_enable_set, "%llu\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001110
1111static ssize_t lowpan_control_write(struct file *fp,
1112 const char __user *user_buffer,
1113 size_t count,
1114 loff_t *position)
1115{
1116 char buf[32];
1117 size_t buf_size = min(count, sizeof(buf) - 1);
1118 int ret;
1119 bdaddr_t addr;
1120 u8 addr_type;
1121 struct l2cap_conn *conn = NULL;
1122
1123 if (copy_from_user(buf, user_buffer, buf_size))
1124 return -EFAULT;
1125
1126 buf[buf_size] = '\0';
1127
1128 if (memcmp(buf, "connect ", 8) == 0) {
1129 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1130 if (ret == -EINVAL)
1131 return ret;
1132
Olivier Deprez0e641232021-09-23 10:07:05 +02001133 mutex_lock(&set_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001134 if (listen_chan) {
1135 l2cap_chan_close(listen_chan, 0);
1136 l2cap_chan_put(listen_chan);
1137 listen_chan = NULL;
1138 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001139 mutex_unlock(&set_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001140
1141 if (conn) {
1142 struct lowpan_peer *peer;
1143
1144 if (!is_bt_6lowpan(conn->hcon))
1145 return -EINVAL;
1146
1147 peer = lookup_peer(conn);
1148 if (peer) {
1149 BT_DBG("6LoWPAN connection already exists");
1150 return -EALREADY;
1151 }
1152
1153 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1154 &conn->hcon->dst, conn->hcon->dst_type,
1155 addr_type);
1156 }
1157
1158 ret = bt_6lowpan_connect(&addr, addr_type);
1159 if (ret < 0)
1160 return ret;
1161
1162 return count;
1163 }
1164
1165 if (memcmp(buf, "disconnect ", 11) == 0) {
1166 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1167 if (ret < 0)
1168 return ret;
1169
1170 ret = bt_6lowpan_disconnect(conn, addr_type);
1171 if (ret < 0)
1172 return ret;
1173
1174 return count;
1175 }
1176
1177 return count;
1178}
1179
1180static int lowpan_control_show(struct seq_file *f, void *ptr)
1181{
1182 struct lowpan_btle_dev *entry;
1183 struct lowpan_peer *peer;
1184
1185 spin_lock(&devices_lock);
1186
1187 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1188 list_for_each_entry(peer, &entry->peers, list)
1189 seq_printf(f, "%pMR (type %u)\n",
1190 &peer->chan->dst, peer->chan->dst_type);
1191 }
1192
1193 spin_unlock(&devices_lock);
1194
1195 return 0;
1196}
1197
1198static int lowpan_control_open(struct inode *inode, struct file *file)
1199{
1200 return single_open(file, lowpan_control_show, inode->i_private);
1201}
1202
1203static const struct file_operations lowpan_control_fops = {
1204 .open = lowpan_control_open,
1205 .read = seq_read,
1206 .write = lowpan_control_write,
1207 .llseek = seq_lseek,
1208 .release = single_release,
1209};
1210
1211static void disconnect_devices(void)
1212{
1213 struct lowpan_btle_dev *entry, *tmp, *new_dev;
1214 struct list_head devices;
1215
1216 INIT_LIST_HEAD(&devices);
1217
1218 /* We make a separate list of devices because the unregister_netdev()
1219 * will call device_event() which will also want to modify the same
1220 * devices list.
1221 */
1222
1223 rcu_read_lock();
1224
1225 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1226 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1227 if (!new_dev)
1228 break;
1229
1230 new_dev->netdev = entry->netdev;
1231 INIT_LIST_HEAD(&new_dev->list);
1232
1233 list_add_rcu(&new_dev->list, &devices);
1234 }
1235
1236 rcu_read_unlock();
1237
1238 list_for_each_entry_safe(entry, tmp, &devices, list) {
1239 ifdown(entry->netdev);
1240 BT_DBG("Unregistering netdev %s %p",
1241 entry->netdev->name, entry->netdev);
1242 lowpan_unregister_netdev(entry->netdev);
1243 kfree(entry);
1244 }
1245}
1246
1247static int device_event(struct notifier_block *unused,
1248 unsigned long event, void *ptr)
1249{
1250 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1251 struct lowpan_btle_dev *entry;
1252
1253 if (netdev->type != ARPHRD_6LOWPAN)
1254 return NOTIFY_DONE;
1255
1256 switch (event) {
1257 case NETDEV_UNREGISTER:
1258 spin_lock(&devices_lock);
1259 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1260 if (entry->netdev == netdev) {
1261 BT_DBG("Unregistered netdev %s %p",
1262 netdev->name, netdev);
1263 list_del(&entry->list);
1264 break;
1265 }
1266 }
1267 spin_unlock(&devices_lock);
1268 break;
1269 }
1270
1271 return NOTIFY_DONE;
1272}
1273
1274static struct notifier_block bt_6lowpan_dev_notifier = {
1275 .notifier_call = device_event,
1276};
1277
1278static int __init bt_6lowpan_init(void)
1279{
David Brazdil0f672f62019-12-10 10:32:29 +00001280 lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1281 0644, bt_debugfs,
1282 NULL,
1283 &lowpan_enable_fops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001284 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1285 bt_debugfs, NULL,
1286 &lowpan_control_fops);
1287
1288 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1289}
1290
1291static void __exit bt_6lowpan_exit(void)
1292{
1293 debugfs_remove(lowpan_enable_debugfs);
1294 debugfs_remove(lowpan_control_debugfs);
1295
1296 if (listen_chan) {
1297 l2cap_chan_close(listen_chan, 0);
1298 l2cap_chan_put(listen_chan);
1299 }
1300
1301 disconnect_devices();
1302
1303 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1304}
1305
1306module_init(bt_6lowpan_init);
1307module_exit(bt_6lowpan_exit);
1308
1309MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1310MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1311MODULE_VERSION(VERSION);
1312MODULE_LICENSE("GPL");