blob: 0ee1c0a7b165b4ee6917dc71db9ff680fa894f5a [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Broadcom Starfighter 2 DSA switch driver
4 *
5 * Copyright (C) 2014, Broadcom Corporation
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#include <linux/list.h>
9#include <linux/module.h>
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/phy.h>
14#include <linux/phy_fixed.h>
15#include <linux/phylink.h>
16#include <linux/mii.h>
17#include <linux/of.h>
18#include <linux/of_irq.h>
19#include <linux/of_address.h>
20#include <linux/of_net.h>
21#include <linux/of_mdio.h>
22#include <net/dsa.h>
23#include <linux/ethtool.h>
24#include <linux/if_bridge.h>
25#include <linux/brcmphy.h>
26#include <linux/etherdevice.h>
27#include <linux/platform_data/b53.h>
28
29#include "bcm_sf2.h"
30#include "bcm_sf2_regs.h"
31#include "b53/b53_priv.h"
32#include "b53/b53_regs.h"
33
34static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
35{
36 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
37 unsigned int i;
38 u32 reg, offset;
39
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040 /* Enable the port memories */
41 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
42 reg &= ~P_TXQ_PSM_VDD(port);
43 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
44
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 /* Enable forwarding */
46 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
47
48 /* Enable IMP port in dumb mode */
49 reg = core_readl(priv, CORE_SWITCH_CTRL);
50 reg |= MII_DUMB_FWDG_EN;
51 core_writel(priv, reg, CORE_SWITCH_CTRL);
52
53 /* Configure Traffic Class to QoS mapping, allow each priority to map
54 * to a different queue number
55 */
56 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
57 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
58 reg |= i << (PRT_TO_QID_SHIFT * i);
59 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
60
61 b53_brcm_hdr_setup(ds, port);
62
David Brazdil0f672f62019-12-10 10:32:29 +000063 if (port == 8) {
64 if (priv->type == BCM7445_DEVICE_ID)
65 offset = CORE_STS_OVERRIDE_IMP;
66 else
67 offset = CORE_STS_OVERRIDE_IMP2;
68
69 /* Force link status for IMP port */
70 reg = core_readl(priv, offset);
71 reg |= (MII_SW_OR | LINK_STS);
Olivier Deprez0e641232021-09-23 10:07:05 +020072 reg &= ~GMII_SPEED_UP_2G;
David Brazdil0f672f62019-12-10 10:32:29 +000073 core_writel(priv, reg, offset);
74
75 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
76 reg = core_readl(priv, CORE_IMP_CTL);
77 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
78 reg &= ~(RX_DIS | TX_DIS);
79 core_writel(priv, reg, CORE_IMP_CTL);
80 } else {
81 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
82 reg &= ~(RX_DIS | TX_DIS);
83 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
84 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085}
86
87static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
88{
89 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
90 u32 reg;
91
92 reg = reg_readl(priv, REG_SPHY_CNTRL);
93 if (enable) {
94 reg |= PHY_RESET;
95 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
96 reg_writel(priv, reg, REG_SPHY_CNTRL);
97 udelay(21);
98 reg = reg_readl(priv, REG_SPHY_CNTRL);
99 reg &= ~PHY_RESET;
100 } else {
101 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
102 reg_writel(priv, reg, REG_SPHY_CNTRL);
103 mdelay(1);
104 reg |= CK25_DIS;
105 }
106 reg_writel(priv, reg, REG_SPHY_CNTRL);
107
108 /* Use PHY-driven LED signaling */
109 if (!enable) {
110 reg = reg_readl(priv, REG_LED_CNTRL(0));
111 reg |= SPDLNK_SRC_SEL;
112 reg_writel(priv, reg, REG_LED_CNTRL(0));
113 }
114}
115
116static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
117 int port)
118{
119 unsigned int off;
120
121 switch (port) {
122 case 7:
123 off = P7_IRQ_OFF;
124 break;
125 case 0:
126 /* Port 0 interrupts are located on the first bank */
127 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
128 return;
129 default:
130 off = P_IRQ_OFF(port);
131 break;
132 }
133
134 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
135}
136
137static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
138 int port)
139{
140 unsigned int off;
141
142 switch (port) {
143 case 7:
144 off = P7_IRQ_OFF;
145 break;
146 case 0:
147 /* Port 0 interrupts are located on the first bank */
148 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
149 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
150 return;
151 default:
152 off = P_IRQ_OFF(port);
153 break;
154 }
155
156 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
157 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
158}
159
160static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
161 struct phy_device *phy)
162{
163 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
164 unsigned int i;
165 u32 reg;
166
David Brazdil0f672f62019-12-10 10:32:29 +0000167 if (!dsa_is_user_port(ds, port))
168 return 0;
169
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000170 /* Clear the memory power down */
171 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
172 reg &= ~P_TXQ_PSM_VDD(port);
173 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
174
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 /* Enable Broadcom tags for that port if requested */
176 if (priv->brcm_tag_mask & BIT(port))
177 b53_brcm_hdr_setup(ds, port);
178
179 /* Configure Traffic Class to QoS mapping, allow each priority to map
180 * to a different queue number
181 */
182 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
183 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
184 reg |= i << (PRT_TO_QID_SHIFT * i);
185 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
186
187 /* Re-enable the GPHY and re-apply workarounds */
188 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
189 bcm_sf2_gphy_enable_set(ds, true);
190 if (phy) {
191 /* if phy_stop() has been called before, phy
192 * will be in halted state, and phy_start()
193 * will call resume.
194 *
195 * the resume path does not configure back
196 * autoneg settings, and since we hard reset
197 * the phy manually here, we need to reset the
198 * state machine also.
199 */
200 phy->state = PHY_READY;
201 phy_init_hw(phy);
202 }
203 }
204
205 /* Enable MoCA port interrupts to get notified */
206 if (port == priv->moca_port)
207 bcm_sf2_port_intr_enable(priv, port);
208
209 /* Set per-queue pause threshold to 32 */
210 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
211
212 /* Set ACB threshold to 24 */
213 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
214 reg = acb_readl(priv, ACB_QUEUE_CFG(port *
215 SF2_NUM_EGRESS_QUEUES + i));
216 reg &= ~XOFF_THRESHOLD_MASK;
217 reg |= 24;
218 acb_writel(priv, reg, ACB_QUEUE_CFG(port *
219 SF2_NUM_EGRESS_QUEUES + i));
220 }
221
222 return b53_enable_port(ds, port, phy);
223}
224
David Brazdil0f672f62019-12-10 10:32:29 +0000225static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226{
227 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
228 u32 reg;
229
230 /* Disable learning while in WoL mode */
231 if (priv->wol_ports_mask & (1 << port)) {
232 reg = core_readl(priv, CORE_DIS_LEARN);
233 reg |= BIT(port);
234 core_writel(priv, reg, CORE_DIS_LEARN);
235 return;
236 }
237
238 if (port == priv->moca_port)
239 bcm_sf2_port_intr_disable(priv, port);
240
241 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
242 bcm_sf2_gphy_enable_set(ds, false);
243
David Brazdil0f672f62019-12-10 10:32:29 +0000244 b53_disable_port(ds, port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245
246 /* Power down the port memory */
247 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
248 reg |= P_TXQ_PSM_VDD(port);
249 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
250}
251
252
253static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
254 int regnum, u16 val)
255{
256 int ret = 0;
257 u32 reg;
258
259 reg = reg_readl(priv, REG_SWITCH_CNTRL);
260 reg |= MDIO_MASTER_SEL;
261 reg_writel(priv, reg, REG_SWITCH_CNTRL);
262
263 /* Page << 8 | offset */
264 reg = 0x70;
265 reg <<= 2;
266 core_writel(priv, addr, reg);
267
268 /* Page << 8 | offset */
269 reg = 0x80 << 8 | regnum << 1;
270 reg <<= 2;
271
272 if (op)
273 ret = core_readl(priv, reg);
274 else
275 core_writel(priv, val, reg);
276
277 reg = reg_readl(priv, REG_SWITCH_CNTRL);
278 reg &= ~MDIO_MASTER_SEL;
279 reg_writel(priv, reg, REG_SWITCH_CNTRL);
280
281 return ret & 0xffff;
282}
283
284static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
285{
286 struct bcm_sf2_priv *priv = bus->priv;
287
288 /* Intercept reads from Broadcom pseudo-PHY address, else, send
289 * them to our master MDIO bus controller
290 */
291 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
292 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
293 else
294 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
295}
296
297static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
298 u16 val)
299{
300 struct bcm_sf2_priv *priv = bus->priv;
301
302 /* Intercept writes to the Broadcom pseudo-PHY address, else,
303 * send them to our master MDIO bus controller
304 */
305 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
David Brazdil0f672f62019-12-10 10:32:29 +0000306 return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 else
David Brazdil0f672f62019-12-10 10:32:29 +0000308 return mdiobus_write_nested(priv->master_mii_bus, addr,
309 regnum, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310}
311
312static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
313{
314 struct dsa_switch *ds = dev_id;
315 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
316
317 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
318 ~priv->irq0_mask;
319 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
320
321 return IRQ_HANDLED;
322}
323
324static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
325{
326 struct dsa_switch *ds = dev_id;
327 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
328
329 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
330 ~priv->irq1_mask;
331 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
332
333 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
334 priv->port_sts[7].link = true;
335 dsa_port_phylink_mac_change(ds, 7, true);
336 }
337 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
338 priv->port_sts[7].link = false;
339 dsa_port_phylink_mac_change(ds, 7, false);
340 }
341
342 return IRQ_HANDLED;
343}
344
345static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
346{
347 unsigned int timeout = 1000;
348 u32 reg;
349
350 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
351 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
352 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
353
354 do {
355 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
356 if (!(reg & SOFTWARE_RESET))
357 break;
358
359 usleep_range(1000, 2000);
360 } while (timeout-- > 0);
361
362 if (timeout == 0)
363 return -ETIMEDOUT;
364
365 return 0;
366}
367
368static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
369{
370 intrl2_0_mask_set(priv, 0xffffffff);
371 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
372 intrl2_1_mask_set(priv, 0xffffffff);
373 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
374}
375
376static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
377 struct device_node *dn)
378{
379 struct device_node *port;
380 int mode;
381 unsigned int port_num;
382
383 priv->moca_port = -1;
384
385 for_each_available_child_of_node(dn, port) {
386 if (of_property_read_u32(port, "reg", &port_num))
387 continue;
388
389 /* Internal PHYs get assigned a specific 'phy-mode' property
390 * value: "internal" to help flag them before MDIO probing
391 * has completed, since they might be turned off at that
392 * time
393 */
394 mode = of_get_phy_mode(port);
395 if (mode < 0)
396 continue;
397
398 if (mode == PHY_INTERFACE_MODE_INTERNAL)
399 priv->int_phy_mask |= 1 << port_num;
400
401 if (mode == PHY_INTERFACE_MODE_MOCA)
402 priv->moca_port = port_num;
403
404 if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
405 priv->brcm_tag_mask |= 1 << port_num;
406 }
407}
408
409static int bcm_sf2_mdio_register(struct dsa_switch *ds)
410{
411 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
412 struct device_node *dn;
413 static int index;
414 int err;
415
416 /* Find our integrated MDIO bus node */
417 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
418 priv->master_mii_bus = of_mdio_find_bus(dn);
Olivier Deprez0e641232021-09-23 10:07:05 +0200419 if (!priv->master_mii_bus) {
420 of_node_put(dn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421 return -EPROBE_DEFER;
Olivier Deprez0e641232021-09-23 10:07:05 +0200422 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423
424 get_device(&priv->master_mii_bus->dev);
425 priv->master_mii_dn = dn;
426
427 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
Olivier Deprez0e641232021-09-23 10:07:05 +0200428 if (!priv->slave_mii_bus) {
429 of_node_put(dn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430 return -ENOMEM;
Olivier Deprez0e641232021-09-23 10:07:05 +0200431 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000432
433 priv->slave_mii_bus->priv = priv;
434 priv->slave_mii_bus->name = "sf2 slave mii";
435 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
436 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
437 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
438 index++);
439 priv->slave_mii_bus->dev.of_node = dn;
440
441 /* Include the pseudo-PHY address to divert reads towards our
442 * workaround. This is only required for 7445D0, since 7445E0
443 * disconnects the internal switch pseudo-PHY such that we can use the
444 * regular SWITCH_MDIO master controller instead.
445 *
446 * Here we flag the pseudo PHY as needing special treatment and would
447 * otherwise make all other PHY read/writes go to the master MDIO bus
448 * controller that comes with this switch backed by the "mdio-unimac"
449 * driver.
450 */
451 if (of_machine_is_compatible("brcm,bcm7445d0"))
452 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
453 else
454 priv->indir_phy_mask = 0;
455
456 ds->phys_mii_mask = priv->indir_phy_mask;
457 ds->slave_mii_bus = priv->slave_mii_bus;
458 priv->slave_mii_bus->parent = ds->dev->parent;
459 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
460
Olivier Deprez0e641232021-09-23 10:07:05 +0200461 err = mdiobus_register(priv->slave_mii_bus);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000462 if (err && dn)
463 of_node_put(dn);
464
465 return err;
466}
467
468static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
469{
470 mdiobus_unregister(priv->slave_mii_bus);
David Brazdil0f672f62019-12-10 10:32:29 +0000471 of_node_put(priv->master_mii_dn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472}
473
474static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
475{
476 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
477
478 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
479 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
480 * the REG_PHY_REVISION register layout is.
481 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200482 if (priv->int_phy_mask & BIT(port))
483 return priv->hw_params.gphy_rev;
484 else
485 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000486}
487
488static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
489 unsigned long *supported,
490 struct phylink_link_state *state)
491{
David Brazdil0f672f62019-12-10 10:32:29 +0000492 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000493 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
494
495 if (!phy_interface_mode_is_rgmii(state->interface) &&
496 state->interface != PHY_INTERFACE_MODE_MII &&
497 state->interface != PHY_INTERFACE_MODE_REVMII &&
498 state->interface != PHY_INTERFACE_MODE_GMII &&
499 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
500 state->interface != PHY_INTERFACE_MODE_MOCA) {
501 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
David Brazdil0f672f62019-12-10 10:32:29 +0000502 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
503 dev_err(ds->dev,
504 "Unsupported interface: %d for port %d\n",
505 state->interface, port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506 return;
507 }
508
509 /* Allow all the expected bits */
510 phylink_set(mask, Autoneg);
511 phylink_set_port_modes(mask);
512 phylink_set(mask, Pause);
513 phylink_set(mask, Asym_Pause);
514
515 /* With the exclusion of MII and Reverse MII, we support Gigabit,
516 * including Half duplex
517 */
518 if (state->interface != PHY_INTERFACE_MODE_MII &&
519 state->interface != PHY_INTERFACE_MODE_REVMII) {
520 phylink_set(mask, 1000baseT_Full);
521 phylink_set(mask, 1000baseT_Half);
522 }
523
524 phylink_set(mask, 10baseT_Half);
525 phylink_set(mask, 10baseT_Full);
526 phylink_set(mask, 100baseT_Half);
527 phylink_set(mask, 100baseT_Full);
528
529 bitmap_and(supported, supported, mask,
530 __ETHTOOL_LINK_MODE_MASK_NBITS);
531 bitmap_and(state->advertising, state->advertising, mask,
532 __ETHTOOL_LINK_MODE_MASK_NBITS);
533}
534
535static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
536 unsigned int mode,
537 const struct phylink_link_state *state)
538{
539 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
540 u32 id_mode_dis = 0, port_mode;
541 u32 reg, offset;
542
David Brazdil0f672f62019-12-10 10:32:29 +0000543 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
544 return;
545
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546 if (priv->type == BCM7445_DEVICE_ID)
547 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
548 else
549 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
550
551 switch (state->interface) {
552 case PHY_INTERFACE_MODE_RGMII:
553 id_mode_dis = 1;
554 /* fallthrough */
555 case PHY_INTERFACE_MODE_RGMII_TXID:
556 port_mode = EXT_GPHY;
557 break;
558 case PHY_INTERFACE_MODE_MII:
559 port_mode = EXT_EPHY;
560 break;
561 case PHY_INTERFACE_MODE_REVMII:
562 port_mode = EXT_REVMII;
563 break;
564 default:
565 /* all other PHYs: internal and MoCA */
566 goto force_link;
567 }
568
569 /* Clear id_mode_dis bit, and the existing port mode, let
570 * RGMII_MODE_EN bet set by mac_link_{up,down}
571 */
572 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
573 reg &= ~ID_MODE_DIS;
574 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
575 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
576
577 reg |= port_mode;
578 if (id_mode_dis)
579 reg |= ID_MODE_DIS;
580
581 if (state->pause & MLO_PAUSE_TXRX_MASK) {
582 if (state->pause & MLO_PAUSE_TX)
583 reg |= TX_PAUSE_EN;
584 reg |= RX_PAUSE_EN;
585 }
586
587 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
588
589force_link:
590 /* Force link settings detected from the PHY */
591 reg = SW_OVERRIDE;
592 switch (state->speed) {
593 case SPEED_1000:
594 reg |= SPDSTS_1000 << SPEED_SHIFT;
595 break;
596 case SPEED_100:
597 reg |= SPDSTS_100 << SPEED_SHIFT;
598 break;
599 }
600
601 if (state->link)
602 reg |= LINK_STS;
603 if (state->duplex == DUPLEX_FULL)
604 reg |= DUPLX_MODE;
605
606 core_writel(priv, reg, offset);
607}
608
609static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
610 phy_interface_t interface, bool link)
611{
612 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
613 u32 reg;
614
615 if (!phy_interface_mode_is_rgmii(interface) &&
616 interface != PHY_INTERFACE_MODE_MII &&
617 interface != PHY_INTERFACE_MODE_REVMII)
618 return;
619
620 /* If the link is down, just disable the interface to conserve power */
621 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
622 if (link)
623 reg |= RGMII_MODE_EN;
624 else
625 reg &= ~RGMII_MODE_EN;
626 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
627}
628
629static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
630 unsigned int mode,
631 phy_interface_t interface)
632{
633 bcm_sf2_sw_mac_link_set(ds, port, interface, false);
634}
635
636static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
637 unsigned int mode,
638 phy_interface_t interface,
639 struct phy_device *phydev)
640{
641 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
642 struct ethtool_eee *p = &priv->dev->ports[port].eee;
643
644 bcm_sf2_sw_mac_link_set(ds, port, interface, true);
645
646 if (mode == MLO_AN_PHY && phydev)
647 p->eee_enabled = b53_eee_init(ds, port, phydev);
648}
649
650static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
651 struct phylink_link_state *status)
652{
653 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
654
655 status->link = false;
656
657 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
658 * which means that we need to force the link at the port override
659 * level to get the data to flow. We do use what the interrupt handler
660 * did determine before.
661 *
662 * For the other ports, we just force the link status, since this is
663 * a fixed PHY device.
664 */
665 if (port == priv->moca_port) {
666 status->link = priv->port_sts[port].link;
667 /* For MoCA interfaces, also force a link down notification
668 * since some version of the user-space daemon (mocad) use
669 * cmd->autoneg to force the link, which messes up the PHY
670 * state machine and make it go in PHY_FORCING state instead.
671 */
672 if (!status->link)
673 netif_carrier_off(ds->ports[port].slave);
674 status->duplex = DUPLEX_FULL;
675 } else {
676 status->link = true;
677 }
678}
679
680static void bcm_sf2_enable_acb(struct dsa_switch *ds)
681{
682 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
683 u32 reg;
684
685 /* Enable ACB globally */
686 reg = acb_readl(priv, ACB_CONTROL);
687 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
688 acb_writel(priv, reg, ACB_CONTROL);
689 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
690 reg |= ACB_EN | ACB_ALGORITHM;
691 acb_writel(priv, reg, ACB_CONTROL);
692}
693
694static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
695{
696 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
697 unsigned int port;
698
699 bcm_sf2_intr_disable(priv);
700
701 /* Disable all ports physically present including the IMP
702 * port, the other ones have already been disabled during
703 * bcm_sf2_sw_setup
704 */
David Brazdil0f672f62019-12-10 10:32:29 +0000705 for (port = 0; port < ds->num_ports; port++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000706 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
David Brazdil0f672f62019-12-10 10:32:29 +0000707 bcm_sf2_port_disable(ds, port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000708 }
709
710 return 0;
711}
712
713static int bcm_sf2_sw_resume(struct dsa_switch *ds)
714{
715 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
716 int ret;
717
718 ret = bcm_sf2_sw_rst(priv);
719 if (ret) {
720 pr_err("%s: failed to software reset switch\n", __func__);
721 return ret;
722 }
723
David Brazdil0f672f62019-12-10 10:32:29 +0000724 ret = bcm_sf2_cfp_resume(ds);
725 if (ret)
726 return ret;
727
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728 if (priv->hw_params.num_gphy == 1)
729 bcm_sf2_gphy_enable_set(ds, true);
730
731 ds->ops->setup(ds);
732
733 return 0;
734}
735
736static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
737 struct ethtool_wolinfo *wol)
738{
739 struct net_device *p = ds->ports[port].cpu_dp->master;
740 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
David Brazdil0f672f62019-12-10 10:32:29 +0000741 struct ethtool_wolinfo pwol = { };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000742
743 /* Get the parent device WoL settings */
David Brazdil0f672f62019-12-10 10:32:29 +0000744 if (p->ethtool_ops->get_wol)
745 p->ethtool_ops->get_wol(p, &pwol);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000746
747 /* Advertise the parent device supported settings */
748 wol->supported = pwol.supported;
749 memset(&wol->sopass, 0, sizeof(wol->sopass));
750
751 if (pwol.wolopts & WAKE_MAGICSECURE)
752 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
753
754 if (priv->wol_ports_mask & (1 << port))
755 wol->wolopts = pwol.wolopts;
756 else
757 wol->wolopts = 0;
758}
759
760static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
761 struct ethtool_wolinfo *wol)
762{
763 struct net_device *p = ds->ports[port].cpu_dp->master;
764 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
765 s8 cpu_port = ds->ports[port].cpu_dp->index;
David Brazdil0f672f62019-12-10 10:32:29 +0000766 struct ethtool_wolinfo pwol = { };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000767
David Brazdil0f672f62019-12-10 10:32:29 +0000768 if (p->ethtool_ops->get_wol)
769 p->ethtool_ops->get_wol(p, &pwol);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 if (wol->wolopts & ~pwol.supported)
771 return -EINVAL;
772
773 if (wol->wolopts)
774 priv->wol_ports_mask |= (1 << port);
775 else
776 priv->wol_ports_mask &= ~(1 << port);
777
778 /* If we have at least one port enabled, make sure the CPU port
779 * is also enabled. If the CPU port is the last one enabled, we disable
780 * it since this configuration does not make sense.
781 */
782 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
783 priv->wol_ports_mask |= (1 << cpu_port);
784 else
785 priv->wol_ports_mask &= ~(1 << cpu_port);
786
787 return p->ethtool_ops->set_wol(p, wol);
788}
789
790static int bcm_sf2_sw_setup(struct dsa_switch *ds)
791{
792 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
793 unsigned int port;
794
795 /* Enable all valid ports and disable those unused */
796 for (port = 0; port < priv->hw_params.num_ports; port++) {
797 /* IMP port receives special treatment */
798 if (dsa_is_user_port(ds, port))
799 bcm_sf2_port_setup(ds, port, NULL);
800 else if (dsa_is_cpu_port(ds, port))
801 bcm_sf2_imp_setup(ds, port);
802 else
David Brazdil0f672f62019-12-10 10:32:29 +0000803 bcm_sf2_port_disable(ds, port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000804 }
805
806 b53_configure_vlan(ds);
807 bcm_sf2_enable_acb(ds);
808
809 return 0;
810}
811
812/* The SWITCH_CORE register space is managed by b53 but operates on a page +
813 * register basis so we need to translate that into an address that the
814 * bus-glue understands.
815 */
816#define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
817
818static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
819 u8 *val)
820{
821 struct bcm_sf2_priv *priv = dev->priv;
822
823 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
824
825 return 0;
826}
827
828static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
829 u16 *val)
830{
831 struct bcm_sf2_priv *priv = dev->priv;
832
833 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
834
835 return 0;
836}
837
838static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
839 u32 *val)
840{
841 struct bcm_sf2_priv *priv = dev->priv;
842
843 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
844
845 return 0;
846}
847
848static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
849 u64 *val)
850{
851 struct bcm_sf2_priv *priv = dev->priv;
852
853 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
854
855 return 0;
856}
857
858static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
859 u8 value)
860{
861 struct bcm_sf2_priv *priv = dev->priv;
862
863 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
864
865 return 0;
866}
867
868static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
869 u16 value)
870{
871 struct bcm_sf2_priv *priv = dev->priv;
872
873 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
874
875 return 0;
876}
877
878static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
879 u32 value)
880{
881 struct bcm_sf2_priv *priv = dev->priv;
882
883 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
884
885 return 0;
886}
887
888static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
889 u64 value)
890{
891 struct bcm_sf2_priv *priv = dev->priv;
892
893 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
894
895 return 0;
896}
897
898static const struct b53_io_ops bcm_sf2_io_ops = {
899 .read8 = bcm_sf2_core_read8,
900 .read16 = bcm_sf2_core_read16,
901 .read32 = bcm_sf2_core_read32,
902 .read48 = bcm_sf2_core_read64,
903 .read64 = bcm_sf2_core_read64,
904 .write8 = bcm_sf2_core_write8,
905 .write16 = bcm_sf2_core_write16,
906 .write32 = bcm_sf2_core_write32,
907 .write48 = bcm_sf2_core_write64,
908 .write64 = bcm_sf2_core_write64,
909};
910
David Brazdil0f672f62019-12-10 10:32:29 +0000911static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
912 u32 stringset, uint8_t *data)
913{
914 int cnt = b53_get_sset_count(ds, port, stringset);
915
916 b53_get_strings(ds, port, stringset, data);
917 bcm_sf2_cfp_get_strings(ds, port, stringset,
918 data + cnt * ETH_GSTRING_LEN);
919}
920
921static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
922 uint64_t *data)
923{
924 int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
925
926 b53_get_ethtool_stats(ds, port, data);
927 bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
928}
929
930static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
931 int sset)
932{
933 int cnt = b53_get_sset_count(ds, port, sset);
934
935 if (cnt < 0)
936 return cnt;
937
938 cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
939
940 return cnt;
941}
942
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000943static const struct dsa_switch_ops bcm_sf2_ops = {
944 .get_tag_protocol = b53_get_tag_protocol,
945 .setup = bcm_sf2_sw_setup,
David Brazdil0f672f62019-12-10 10:32:29 +0000946 .get_strings = bcm_sf2_sw_get_strings,
947 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
948 .get_sset_count = bcm_sf2_sw_get_sset_count,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000949 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
950 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
951 .phylink_validate = bcm_sf2_sw_validate,
952 .phylink_mac_config = bcm_sf2_sw_mac_config,
953 .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
954 .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
955 .phylink_fixed_state = bcm_sf2_sw_fixed_state,
956 .suspend = bcm_sf2_sw_suspend,
957 .resume = bcm_sf2_sw_resume,
958 .get_wol = bcm_sf2_sw_get_wol,
959 .set_wol = bcm_sf2_sw_set_wol,
960 .port_enable = bcm_sf2_port_setup,
961 .port_disable = bcm_sf2_port_disable,
962 .get_mac_eee = b53_get_mac_eee,
963 .set_mac_eee = b53_set_mac_eee,
964 .port_bridge_join = b53_br_join,
965 .port_bridge_leave = b53_br_leave,
966 .port_stp_state_set = b53_br_set_stp_state,
967 .port_fast_age = b53_br_fast_age,
968 .port_vlan_filtering = b53_vlan_filtering,
969 .port_vlan_prepare = b53_vlan_prepare,
970 .port_vlan_add = b53_vlan_add,
971 .port_vlan_del = b53_vlan_del,
972 .port_fdb_dump = b53_fdb_dump,
973 .port_fdb_add = b53_fdb_add,
974 .port_fdb_del = b53_fdb_del,
975 .get_rxnfc = bcm_sf2_get_rxnfc,
976 .set_rxnfc = bcm_sf2_set_rxnfc,
977 .port_mirror_add = b53_mirror_add,
978 .port_mirror_del = b53_mirror_del,
979};
980
981struct bcm_sf2_of_data {
982 u32 type;
983 const u16 *reg_offsets;
984 unsigned int core_reg_align;
985 unsigned int num_cfp_rules;
986};
987
988/* Register offsets for the SWITCH_REG_* block */
989static const u16 bcm_sf2_7445_reg_offsets[] = {
990 [REG_SWITCH_CNTRL] = 0x00,
991 [REG_SWITCH_STATUS] = 0x04,
992 [REG_DIR_DATA_WRITE] = 0x08,
993 [REG_DIR_DATA_READ] = 0x0C,
994 [REG_SWITCH_REVISION] = 0x18,
995 [REG_PHY_REVISION] = 0x1C,
996 [REG_SPHY_CNTRL] = 0x2C,
997 [REG_RGMII_0_CNTRL] = 0x34,
998 [REG_RGMII_1_CNTRL] = 0x40,
999 [REG_RGMII_2_CNTRL] = 0x4c,
1000 [REG_LED_0_CNTRL] = 0x90,
1001 [REG_LED_1_CNTRL] = 0x94,
1002 [REG_LED_2_CNTRL] = 0x98,
1003};
1004
1005static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
1006 .type = BCM7445_DEVICE_ID,
1007 .core_reg_align = 0,
1008 .reg_offsets = bcm_sf2_7445_reg_offsets,
1009 .num_cfp_rules = 256,
1010};
1011
1012static const u16 bcm_sf2_7278_reg_offsets[] = {
1013 [REG_SWITCH_CNTRL] = 0x00,
1014 [REG_SWITCH_STATUS] = 0x04,
1015 [REG_DIR_DATA_WRITE] = 0x08,
1016 [REG_DIR_DATA_READ] = 0x0c,
1017 [REG_SWITCH_REVISION] = 0x10,
1018 [REG_PHY_REVISION] = 0x14,
1019 [REG_SPHY_CNTRL] = 0x24,
1020 [REG_RGMII_0_CNTRL] = 0xe0,
1021 [REG_RGMII_1_CNTRL] = 0xec,
1022 [REG_RGMII_2_CNTRL] = 0xf8,
1023 [REG_LED_0_CNTRL] = 0x40,
1024 [REG_LED_1_CNTRL] = 0x4c,
1025 [REG_LED_2_CNTRL] = 0x58,
1026};
1027
1028static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
1029 .type = BCM7278_DEVICE_ID,
1030 .core_reg_align = 1,
1031 .reg_offsets = bcm_sf2_7278_reg_offsets,
1032 .num_cfp_rules = 128,
1033};
1034
1035static const struct of_device_id bcm_sf2_of_match[] = {
1036 { .compatible = "brcm,bcm7445-switch-v4.0",
1037 .data = &bcm_sf2_7445_data
1038 },
1039 { .compatible = "brcm,bcm7278-switch-v4.0",
1040 .data = &bcm_sf2_7278_data
1041 },
1042 { .compatible = "brcm,bcm7278-switch-v4.8",
1043 .data = &bcm_sf2_7278_data
1044 },
1045 { /* sentinel */ },
1046};
1047MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
1048
1049static int bcm_sf2_sw_probe(struct platform_device *pdev)
1050{
1051 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1052 struct device_node *dn = pdev->dev.of_node;
1053 const struct of_device_id *of_id = NULL;
1054 const struct bcm_sf2_of_data *data;
1055 struct b53_platform_data *pdata;
1056 struct dsa_switch_ops *ops;
Olivier Deprez0e641232021-09-23 10:07:05 +02001057 struct device_node *ports;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001058 struct bcm_sf2_priv *priv;
1059 struct b53_device *dev;
1060 struct dsa_switch *ds;
1061 void __iomem **base;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062 unsigned int i;
1063 u32 reg, rev;
1064 int ret;
1065
1066 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1067 if (!priv)
1068 return -ENOMEM;
1069
1070 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
1071 if (!ops)
1072 return -ENOMEM;
1073
1074 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
1075 if (!dev)
1076 return -ENOMEM;
1077
1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1079 if (!pdata)
1080 return -ENOMEM;
1081
1082 of_id = of_match_node(bcm_sf2_of_match, dn);
1083 if (!of_id || !of_id->data)
1084 return -EINVAL;
1085
1086 data = of_id->data;
1087
1088 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
1089 priv->type = data->type;
1090 priv->reg_offsets = data->reg_offsets;
1091 priv->core_reg_align = data->core_reg_align;
1092 priv->num_cfp_rules = data->num_cfp_rules;
1093
1094 /* Auto-detection using standard registers will not work, so
1095 * provide an indication of what kind of device we are for
1096 * b53_common to work with
1097 */
1098 pdata->chip_id = priv->type;
1099 dev->pdata = pdata;
1100
1101 priv->dev = dev;
1102 ds = dev->ds;
1103 ds->ops = &bcm_sf2_ops;
1104
1105 /* Advertise the 8 egress queues */
1106 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
1107
1108 dev_set_drvdata(&pdev->dev, priv);
1109
1110 spin_lock_init(&priv->indir_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 mutex_init(&priv->cfp.lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001112 INIT_LIST_HEAD(&priv->cfp.rules_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001113
1114 /* CFP rule #0 cannot be used for specific classifications, flag it as
1115 * permanently used
1116 */
1117 set_bit(0, priv->cfp.used);
1118 set_bit(0, priv->cfp.unique);
1119
Olivier Deprez0e641232021-09-23 10:07:05 +02001120 /* Balance of_node_put() done by of_find_node_by_name() */
1121 of_node_get(dn);
1122 ports = of_find_node_by_name(dn, "ports");
1123 if (ports) {
1124 bcm_sf2_identify_ports(priv, ports);
1125 of_node_put(ports);
1126 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001127
1128 priv->irq0 = irq_of_parse_and_map(dn, 0);
1129 priv->irq1 = irq_of_parse_and_map(dn, 1);
1130
1131 base = &priv->core;
1132 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00001133 *base = devm_platform_ioremap_resource(pdev, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001134 if (IS_ERR(*base)) {
1135 pr_err("unable to find register: %s\n", reg_names[i]);
1136 return PTR_ERR(*base);
1137 }
1138 base++;
1139 }
1140
1141 ret = bcm_sf2_sw_rst(priv);
1142 if (ret) {
1143 pr_err("unable to software reset switch: %d\n", ret);
1144 return ret;
1145 }
1146
David Brazdil0f672f62019-12-10 10:32:29 +00001147 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1148
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001149 ret = bcm_sf2_mdio_register(ds);
1150 if (ret) {
1151 pr_err("failed to register MDIO bus\n");
1152 return ret;
1153 }
1154
David Brazdil0f672f62019-12-10 10:32:29 +00001155 bcm_sf2_gphy_enable_set(priv->dev->ds, false);
1156
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001157 ret = bcm_sf2_cfp_rst(priv);
1158 if (ret) {
1159 pr_err("failed to reset CFP\n");
1160 goto out_mdio;
1161 }
1162
1163 /* Disable all interrupts and request them */
1164 bcm_sf2_intr_disable(priv);
1165
1166 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1167 "switch_0", ds);
1168 if (ret < 0) {
1169 pr_err("failed to request switch_0 IRQ\n");
1170 goto out_mdio;
1171 }
1172
1173 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1174 "switch_1", ds);
1175 if (ret < 0) {
1176 pr_err("failed to request switch_1 IRQ\n");
1177 goto out_mdio;
1178 }
1179
1180 /* Reset the MIB counters */
1181 reg = core_readl(priv, CORE_GMNCFGCFG);
1182 reg |= RST_MIB_CNT;
1183 core_writel(priv, reg, CORE_GMNCFGCFG);
1184 reg &= ~RST_MIB_CNT;
1185 core_writel(priv, reg, CORE_GMNCFGCFG);
1186
1187 /* Get the maximum number of ports for this switch */
1188 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1189 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1190 priv->hw_params.num_ports = DSA_MAX_PORTS;
1191
1192 /* Assume a single GPHY setup if we can't read that property */
1193 if (of_property_read_u32(dn, "brcm,num-gphy",
1194 &priv->hw_params.num_gphy))
1195 priv->hw_params.num_gphy = 1;
1196
1197 rev = reg_readl(priv, REG_SWITCH_REVISION);
1198 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1199 SWITCH_TOP_REV_MASK;
1200 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1201
1202 rev = reg_readl(priv, REG_PHY_REVISION);
1203 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1204
1205 ret = b53_switch_register(dev);
1206 if (ret)
1207 goto out_mdio;
1208
David Brazdil0f672f62019-12-10 10:32:29 +00001209 dev_info(&pdev->dev,
1210 "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
1211 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1212 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1213 priv->irq0, priv->irq1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001214
1215 return 0;
1216
1217out_mdio:
1218 bcm_sf2_mdio_unregister(priv);
1219 return ret;
1220}
1221
1222static int bcm_sf2_sw_remove(struct platform_device *pdev)
1223{
1224 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1225
1226 priv->wol_ports_mask = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001227 /* Disable interrupts */
1228 bcm_sf2_intr_disable(priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001229 dsa_unregister_switch(priv->dev->ds);
David Brazdil0f672f62019-12-10 10:32:29 +00001230 bcm_sf2_cfp_exit(priv->dev->ds);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001231 bcm_sf2_mdio_unregister(priv);
1232
1233 return 0;
1234}
1235
1236static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
1237{
1238 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1239
1240 /* For a kernel about to be kexec'd we want to keep the GPHY on for a
1241 * successful MDIO bus scan to occur. If we did turn off the GPHY
1242 * before (e.g: port_disable), this will also power it back on.
1243 *
1244 * Do not rely on kexec_in_progress, just power the PHY on.
1245 */
1246 if (priv->hw_params.num_gphy == 1)
1247 bcm_sf2_gphy_enable_set(priv->dev->ds, true);
1248}
1249
1250#ifdef CONFIG_PM_SLEEP
1251static int bcm_sf2_suspend(struct device *dev)
1252{
David Brazdil0f672f62019-12-10 10:32:29 +00001253 struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254
1255 return dsa_switch_suspend(priv->dev->ds);
1256}
1257
1258static int bcm_sf2_resume(struct device *dev)
1259{
David Brazdil0f672f62019-12-10 10:32:29 +00001260 struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001261
1262 return dsa_switch_resume(priv->dev->ds);
1263}
1264#endif /* CONFIG_PM_SLEEP */
1265
1266static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1267 bcm_sf2_suspend, bcm_sf2_resume);
1268
1269
1270static struct platform_driver bcm_sf2_driver = {
1271 .probe = bcm_sf2_sw_probe,
1272 .remove = bcm_sf2_sw_remove,
1273 .shutdown = bcm_sf2_sw_shutdown,
1274 .driver = {
1275 .name = "brcm-sf2",
1276 .of_match_table = bcm_sf2_of_match,
1277 .pm = &bcm_sf2_pm_ops,
1278 },
1279};
1280module_platform_driver(bcm_sf2_driver);
1281
1282MODULE_AUTHOR("Broadcom Corporation");
1283MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1284MODULE_LICENSE("GPL");
1285MODULE_ALIAS("platform:brcm-sf2");