Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * net/dsa/dsa.c - Hardware switch handling |
| 3 | * Copyright (c) 2008-2009 Marvell Semiconductor |
| 4 | * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/device.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/notifier.h> |
| 18 | #include <linux/of.h> |
| 19 | #include <linux/of_mdio.h> |
| 20 | #include <linux/of_platform.h> |
| 21 | #include <linux/of_net.h> |
| 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/sysfs.h> |
| 24 | #include <linux/phy_fixed.h> |
| 25 | #include <linux/ptp_classify.h> |
| 26 | #include <linux/etherdevice.h> |
| 27 | |
| 28 | #include "dsa_priv.h" |
| 29 | |
| 30 | static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb, |
| 31 | struct net_device *dev) |
| 32 | { |
| 33 | /* Just return the original SKB */ |
| 34 | return skb; |
| 35 | } |
| 36 | |
| 37 | static const struct dsa_device_ops none_ops = { |
| 38 | .xmit = dsa_slave_notag_xmit, |
| 39 | .rcv = NULL, |
| 40 | }; |
| 41 | |
| 42 | const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { |
| 43 | #ifdef CONFIG_NET_DSA_TAG_BRCM |
| 44 | [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops, |
| 45 | #endif |
| 46 | #ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND |
| 47 | [DSA_TAG_PROTO_BRCM_PREPEND] = &brcm_prepend_netdev_ops, |
| 48 | #endif |
| 49 | #ifdef CONFIG_NET_DSA_TAG_DSA |
| 50 | [DSA_TAG_PROTO_DSA] = &dsa_netdev_ops, |
| 51 | #endif |
| 52 | #ifdef CONFIG_NET_DSA_TAG_EDSA |
| 53 | [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops, |
| 54 | #endif |
| 55 | #ifdef CONFIG_NET_DSA_TAG_KSZ |
| 56 | [DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops, |
| 57 | #endif |
| 58 | #ifdef CONFIG_NET_DSA_TAG_LAN9303 |
| 59 | [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops, |
| 60 | #endif |
| 61 | #ifdef CONFIG_NET_DSA_TAG_MTK |
| 62 | [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops, |
| 63 | #endif |
| 64 | #ifdef CONFIG_NET_DSA_TAG_QCA |
| 65 | [DSA_TAG_PROTO_QCA] = &qca_netdev_ops, |
| 66 | #endif |
| 67 | #ifdef CONFIG_NET_DSA_TAG_TRAILER |
| 68 | [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops, |
| 69 | #endif |
| 70 | [DSA_TAG_PROTO_NONE] = &none_ops, |
| 71 | }; |
| 72 | |
| 73 | const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol) |
| 74 | { |
| 75 | const struct dsa_device_ops *ops; |
| 76 | |
| 77 | if (tag_protocol >= DSA_TAG_LAST) |
| 78 | return ERR_PTR(-EINVAL); |
| 79 | ops = dsa_device_ops[tag_protocol]; |
| 80 | |
| 81 | if (!ops) |
| 82 | return ERR_PTR(-ENOPROTOOPT); |
| 83 | |
| 84 | return ops; |
| 85 | } |
| 86 | |
| 87 | static int dev_is_class(struct device *dev, void *class) |
| 88 | { |
| 89 | if (dev->class != NULL && !strcmp(dev->class->name, class)) |
| 90 | return 1; |
| 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | static struct device *dev_find_class(struct device *parent, char *class) |
| 96 | { |
| 97 | if (dev_is_class(parent, class)) { |
| 98 | get_device(parent); |
| 99 | return parent; |
| 100 | } |
| 101 | |
| 102 | return device_find_child(parent, class, dev_is_class); |
| 103 | } |
| 104 | |
| 105 | struct net_device *dsa_dev_to_net_device(struct device *dev) |
| 106 | { |
| 107 | struct device *d; |
| 108 | |
| 109 | d = dev_find_class(dev, "net"); |
| 110 | if (d != NULL) { |
| 111 | struct net_device *nd; |
| 112 | |
| 113 | nd = to_net_dev(d); |
| 114 | dev_hold(nd); |
| 115 | put_device(d); |
| 116 | |
| 117 | return nd; |
| 118 | } |
| 119 | |
| 120 | return NULL; |
| 121 | } |
| 122 | EXPORT_SYMBOL_GPL(dsa_dev_to_net_device); |
| 123 | |
| 124 | /* Determine if we should defer delivery of skb until we have a rx timestamp. |
| 125 | * |
| 126 | * Called from dsa_switch_rcv. For now, this will only work if tagging is |
| 127 | * enabled on the switch. Normally the MAC driver would retrieve the hardware |
| 128 | * timestamp when it reads the packet out of the hardware. However in a DSA |
| 129 | * switch, the DSA driver owning the interface to which the packet is |
| 130 | * delivered is never notified unless we do so here. |
| 131 | */ |
| 132 | static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p, |
| 133 | struct sk_buff *skb) |
| 134 | { |
| 135 | struct dsa_switch *ds = p->dp->ds; |
| 136 | unsigned int type; |
| 137 | |
| 138 | if (skb_headroom(skb) < ETH_HLEN) |
| 139 | return false; |
| 140 | |
| 141 | __skb_push(skb, ETH_HLEN); |
| 142 | |
| 143 | type = ptp_classify_raw(skb); |
| 144 | |
| 145 | __skb_pull(skb, ETH_HLEN); |
| 146 | |
| 147 | if (type == PTP_CLASS_NONE) |
| 148 | return false; |
| 149 | |
| 150 | if (likely(ds->ops->port_rxtstamp)) |
| 151 | return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type); |
| 152 | |
| 153 | return false; |
| 154 | } |
| 155 | |
| 156 | static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, |
| 157 | struct packet_type *pt, struct net_device *unused) |
| 158 | { |
| 159 | struct dsa_port *cpu_dp = dev->dsa_ptr; |
| 160 | struct sk_buff *nskb = NULL; |
| 161 | struct pcpu_sw_netstats *s; |
| 162 | struct dsa_slave_priv *p; |
| 163 | |
| 164 | if (unlikely(!cpu_dp)) { |
| 165 | kfree_skb(skb); |
| 166 | return 0; |
| 167 | } |
| 168 | |
| 169 | skb = skb_unshare(skb, GFP_ATOMIC); |
| 170 | if (!skb) |
| 171 | return 0; |
| 172 | |
| 173 | nskb = cpu_dp->rcv(skb, dev, pt); |
| 174 | if (!nskb) { |
| 175 | kfree_skb(skb); |
| 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | skb = nskb; |
| 180 | p = netdev_priv(skb->dev); |
| 181 | skb_push(skb, ETH_HLEN); |
| 182 | skb->pkt_type = PACKET_HOST; |
| 183 | skb->protocol = eth_type_trans(skb, skb->dev); |
| 184 | |
| 185 | s = this_cpu_ptr(p->stats64); |
| 186 | u64_stats_update_begin(&s->syncp); |
| 187 | s->rx_packets++; |
| 188 | s->rx_bytes += skb->len; |
| 189 | u64_stats_update_end(&s->syncp); |
| 190 | |
| 191 | if (dsa_skb_defer_rx_timestamp(p, skb)) |
| 192 | return 0; |
| 193 | |
| 194 | netif_receive_skb(skb); |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | #ifdef CONFIG_PM_SLEEP |
| 200 | static bool dsa_is_port_initialized(struct dsa_switch *ds, int p) |
| 201 | { |
| 202 | return dsa_is_user_port(ds, p) && ds->ports[p].slave; |
| 203 | } |
| 204 | |
| 205 | int dsa_switch_suspend(struct dsa_switch *ds) |
| 206 | { |
| 207 | int i, ret = 0; |
| 208 | |
| 209 | /* Suspend slave network devices */ |
| 210 | for (i = 0; i < ds->num_ports; i++) { |
| 211 | if (!dsa_is_port_initialized(ds, i)) |
| 212 | continue; |
| 213 | |
| 214 | ret = dsa_slave_suspend(ds->ports[i].slave); |
| 215 | if (ret) |
| 216 | return ret; |
| 217 | } |
| 218 | |
| 219 | if (ds->ops->suspend) |
| 220 | ret = ds->ops->suspend(ds); |
| 221 | |
| 222 | return ret; |
| 223 | } |
| 224 | EXPORT_SYMBOL_GPL(dsa_switch_suspend); |
| 225 | |
| 226 | int dsa_switch_resume(struct dsa_switch *ds) |
| 227 | { |
| 228 | int i, ret = 0; |
| 229 | |
| 230 | if (ds->ops->resume) |
| 231 | ret = ds->ops->resume(ds); |
| 232 | |
| 233 | if (ret) |
| 234 | return ret; |
| 235 | |
| 236 | /* Resume slave network devices */ |
| 237 | for (i = 0; i < ds->num_ports; i++) { |
| 238 | if (!dsa_is_port_initialized(ds, i)) |
| 239 | continue; |
| 240 | |
| 241 | ret = dsa_slave_resume(ds->ports[i].slave); |
| 242 | if (ret) |
| 243 | return ret; |
| 244 | } |
| 245 | |
| 246 | return 0; |
| 247 | } |
| 248 | EXPORT_SYMBOL_GPL(dsa_switch_resume); |
| 249 | #endif |
| 250 | |
| 251 | static struct packet_type dsa_pack_type __read_mostly = { |
| 252 | .type = cpu_to_be16(ETH_P_XDSA), |
| 253 | .func = dsa_switch_rcv, |
| 254 | }; |
| 255 | |
| 256 | static struct workqueue_struct *dsa_owq; |
| 257 | |
| 258 | bool dsa_schedule_work(struct work_struct *work) |
| 259 | { |
| 260 | return queue_work(dsa_owq, work); |
| 261 | } |
| 262 | |
| 263 | static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain); |
| 264 | |
| 265 | int register_dsa_notifier(struct notifier_block *nb) |
| 266 | { |
| 267 | return atomic_notifier_chain_register(&dsa_notif_chain, nb); |
| 268 | } |
| 269 | EXPORT_SYMBOL_GPL(register_dsa_notifier); |
| 270 | |
| 271 | int unregister_dsa_notifier(struct notifier_block *nb) |
| 272 | { |
| 273 | return atomic_notifier_chain_unregister(&dsa_notif_chain, nb); |
| 274 | } |
| 275 | EXPORT_SYMBOL_GPL(unregister_dsa_notifier); |
| 276 | |
| 277 | int call_dsa_notifiers(unsigned long val, struct net_device *dev, |
| 278 | struct dsa_notifier_info *info) |
| 279 | { |
| 280 | info->dev = dev; |
| 281 | return atomic_notifier_call_chain(&dsa_notif_chain, val, info); |
| 282 | } |
| 283 | EXPORT_SYMBOL_GPL(call_dsa_notifiers); |
| 284 | |
| 285 | static int __init dsa_init_module(void) |
| 286 | { |
| 287 | int rc; |
| 288 | |
| 289 | dsa_owq = alloc_ordered_workqueue("dsa_ordered", |
| 290 | WQ_MEM_RECLAIM); |
| 291 | if (!dsa_owq) |
| 292 | return -ENOMEM; |
| 293 | |
| 294 | rc = dsa_slave_register_notifier(); |
| 295 | if (rc) |
| 296 | return rc; |
| 297 | |
| 298 | rc = dsa_legacy_register(); |
| 299 | if (rc) |
| 300 | return rc; |
| 301 | |
| 302 | dev_add_pack(&dsa_pack_type); |
| 303 | |
| 304 | return 0; |
| 305 | } |
| 306 | module_init(dsa_init_module); |
| 307 | |
| 308 | static void __exit dsa_cleanup_module(void) |
| 309 | { |
| 310 | dsa_slave_unregister_notifier(); |
| 311 | dev_remove_pack(&dsa_pack_type); |
| 312 | dsa_legacy_unregister(); |
| 313 | destroy_workqueue(dsa_owq); |
| 314 | } |
| 315 | module_exit(dsa_cleanup_module); |
| 316 | |
| 317 | MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); |
| 318 | MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips"); |
| 319 | MODULE_LICENSE("GPL"); |
| 320 | MODULE_ALIAS("platform:dsa"); |