blob: 382a45d84cc326ae6016dc41882e0a2ce4e7557a [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* drivers/net/ethernet/freescale/gianfar.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
15 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016 * Gianfar: AKA Lambda Draconis, "Dragon"
17 * RA 11 31 24.2
18 * Dec +69 19 52
19 * V 3.84
20 * B-V +1.62
21 *
22 * Theory of operation
23 *
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
26 *
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
32 *
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
48 * skb.
49 *
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
58 */
59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61#define DEBUG
62
63#include <linux/kernel.h>
64#include <linux/string.h>
65#include <linux/errno.h>
66#include <linux/unistd.h>
67#include <linux/slab.h>
68#include <linux/interrupt.h>
69#include <linux/delay.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/skbuff.h>
73#include <linux/if_vlan.h>
74#include <linux/spinlock.h>
75#include <linux/mm.h>
76#include <linux/of_address.h>
77#include <linux/of_irq.h>
78#include <linux/of_mdio.h>
79#include <linux/of_platform.h>
80#include <linux/ip.h>
81#include <linux/tcp.h>
82#include <linux/udp.h>
83#include <linux/in.h>
84#include <linux/net_tstamp.h>
85
86#include <asm/io.h>
87#ifdef CONFIG_PPC
88#include <asm/reg.h>
89#include <asm/mpc85xx.h>
90#endif
91#include <asm/irq.h>
92#include <linux/uaccess.h>
93#include <linux/module.h>
94#include <linux/dma-mapping.h>
95#include <linux/crc32.h>
96#include <linux/mii.h>
97#include <linux/phy.h>
98#include <linux/phy_fixed.h>
99#include <linux/of.h>
100#include <linux/of_net.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101
102#include "gianfar.h"
103
104#define TX_TIMEOUT (5*HZ)
105
106const char gfar_driver_version[] = "2.0";
107
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108MODULE_AUTHOR("Freescale Semiconductor, Inc");
109MODULE_DESCRIPTION("Gianfar Ethernet Driver");
110MODULE_LICENSE("GPL");
111
112static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 dma_addr_t buf)
114{
115 u32 lstatus;
116
117 bdp->bufPtr = cpu_to_be32(buf);
118
119 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
120 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
121 lstatus |= BD_LFLAG(RXBD_WRAP);
122
123 gfar_wmb();
124
125 bdp->lstatus = cpu_to_be32(lstatus);
126}
127
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128static void gfar_init_tx_rx_base(struct gfar_private *priv)
129{
130 struct gfar __iomem *regs = priv->gfargrp[0].regs;
131 u32 __iomem *baddr;
132 int i;
133
134 baddr = &regs->tbase0;
135 for (i = 0; i < priv->num_tx_queues; i++) {
136 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
137 baddr += 2;
138 }
139
140 baddr = &regs->rbase0;
141 for (i = 0; i < priv->num_rx_queues; i++) {
142 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
143 baddr += 2;
144 }
145}
146
147static void gfar_init_rqprm(struct gfar_private *priv)
148{
149 struct gfar __iomem *regs = priv->gfargrp[0].regs;
150 u32 __iomem *baddr;
151 int i;
152
153 baddr = &regs->rqprm0;
154 for (i = 0; i < priv->num_rx_queues; i++) {
155 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
156 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
157 baddr++;
158 }
159}
160
161static void gfar_rx_offload_en(struct gfar_private *priv)
162{
163 /* set this when rx hw offload (TOE) functions are being used */
164 priv->uses_rxfcb = 0;
165
166 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
167 priv->uses_rxfcb = 1;
168
169 if (priv->hwts_rx_en || priv->rx_filer_enable)
170 priv->uses_rxfcb = 1;
171}
172
173static void gfar_mac_rx_config(struct gfar_private *priv)
174{
175 struct gfar __iomem *regs = priv->gfargrp[0].regs;
176 u32 rctrl = 0;
177
178 if (priv->rx_filer_enable) {
179 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
180 /* Program the RIR0 reg with the required distribution */
181 if (priv->poll_mode == GFAR_SQ_POLLING)
182 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
183 else /* GFAR_MQ_POLLING */
184 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
185 }
186
187 /* Restore PROMISC mode */
188 if (priv->ndev->flags & IFF_PROMISC)
189 rctrl |= RCTRL_PROM;
190
191 if (priv->ndev->features & NETIF_F_RXCSUM)
192 rctrl |= RCTRL_CHECKSUMMING;
193
194 if (priv->extended_hash)
195 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
196
197 if (priv->padding) {
198 rctrl &= ~RCTRL_PAL_MASK;
199 rctrl |= RCTRL_PADDING(priv->padding);
200 }
201
202 /* Enable HW time stamping if requested from user space */
203 if (priv->hwts_rx_en)
204 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
205
206 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
207 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
208
209 /* Clear the LFC bit */
210 gfar_write(&regs->rctrl, rctrl);
211 /* Init flow control threshold values */
212 gfar_init_rqprm(priv);
213 gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
214 rctrl |= RCTRL_LFC;
215
216 /* Init rctrl based on our settings */
217 gfar_write(&regs->rctrl, rctrl);
218}
219
220static void gfar_mac_tx_config(struct gfar_private *priv)
221{
222 struct gfar __iomem *regs = priv->gfargrp[0].regs;
223 u32 tctrl = 0;
224
225 if (priv->ndev->features & NETIF_F_IP_CSUM)
226 tctrl |= TCTRL_INIT_CSUM;
227
228 if (priv->prio_sched_en)
229 tctrl |= TCTRL_TXSCHED_PRIO;
230 else {
231 tctrl |= TCTRL_TXSCHED_WRRS;
232 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
233 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
234 }
235
236 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
237 tctrl |= TCTRL_VLINS;
238
239 gfar_write(&regs->tctrl, tctrl);
240}
241
242static void gfar_configure_coalescing(struct gfar_private *priv,
243 unsigned long tx_mask, unsigned long rx_mask)
244{
245 struct gfar __iomem *regs = priv->gfargrp[0].regs;
246 u32 __iomem *baddr;
247
248 if (priv->mode == MQ_MG_MODE) {
249 int i = 0;
250
251 baddr = &regs->txic0;
252 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
253 gfar_write(baddr + i, 0);
254 if (likely(priv->tx_queue[i]->txcoalescing))
255 gfar_write(baddr + i, priv->tx_queue[i]->txic);
256 }
257
258 baddr = &regs->rxic0;
259 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
260 gfar_write(baddr + i, 0);
261 if (likely(priv->rx_queue[i]->rxcoalescing))
262 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
263 }
264 } else {
265 /* Backward compatible case -- even if we enable
266 * multiple queues, there's only single reg to program
267 */
268 gfar_write(&regs->txic, 0);
269 if (likely(priv->tx_queue[0]->txcoalescing))
270 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
271
272 gfar_write(&regs->rxic, 0);
273 if (unlikely(priv->rx_queue[0]->rxcoalescing))
274 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
275 }
276}
277
David Brazdil0f672f62019-12-10 10:32:29 +0000278static void gfar_configure_coalescing_all(struct gfar_private *priv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279{
280 gfar_configure_coalescing(priv, 0xFF, 0xFF);
281}
282
283static struct net_device_stats *gfar_get_stats(struct net_device *dev)
284{
285 struct gfar_private *priv = netdev_priv(dev);
286 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
287 unsigned long tx_packets = 0, tx_bytes = 0;
288 int i;
289
290 for (i = 0; i < priv->num_rx_queues; i++) {
291 rx_packets += priv->rx_queue[i]->stats.rx_packets;
292 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
293 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
294 }
295
296 dev->stats.rx_packets = rx_packets;
297 dev->stats.rx_bytes = rx_bytes;
298 dev->stats.rx_dropped = rx_dropped;
299
300 for (i = 0; i < priv->num_tx_queues; i++) {
301 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
302 tx_packets += priv->tx_queue[i]->stats.tx_packets;
303 }
304
305 dev->stats.tx_bytes = tx_bytes;
306 dev->stats.tx_packets = tx_packets;
307
308 return &dev->stats;
309}
310
David Brazdil0f672f62019-12-10 10:32:29 +0000311/* Set the appropriate hash bit for the given addr */
312/* The algorithm works like so:
313 * 1) Take the Destination Address (ie the multicast address), and
314 * do a CRC on it (little endian), and reverse the bits of the
315 * result.
316 * 2) Use the 8 most significant bits as a hash into a 256-entry
317 * table. The table is controlled through 8 32-bit registers:
318 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
319 * gaddr7. This means that the 3 most significant bits in the
320 * hash index which gaddr register to use, and the 5 other bits
321 * indicate which bit (assuming an IBM numbering scheme, which
322 * for PowerPC (tm) is usually the case) in the register holds
323 * the entry.
324 */
325static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
326{
327 u32 tempval;
328 struct gfar_private *priv = netdev_priv(dev);
329 u32 result = ether_crc(ETH_ALEN, addr);
330 int width = priv->hash_width;
331 u8 whichbit = (result >> (32 - width)) & 0x1f;
332 u8 whichreg = result >> (32 - width + 5);
333 u32 value = (1 << (31-whichbit));
334
335 tempval = gfar_read(priv->hash_regs[whichreg]);
336 tempval |= value;
337 gfar_write(priv->hash_regs[whichreg], tempval);
338}
339
340/* There are multiple MAC Address register pairs on some controllers
341 * This function sets the numth pair to a given address
342 */
343static void gfar_set_mac_for_addr(struct net_device *dev, int num,
344 const u8 *addr)
345{
346 struct gfar_private *priv = netdev_priv(dev);
347 struct gfar __iomem *regs = priv->gfargrp[0].regs;
348 u32 tempval;
349 u32 __iomem *macptr = &regs->macstnaddr1;
350
351 macptr += num*2;
352
353 /* For a station address of 0x12345678ABCD in transmission
354 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
355 * MACnADDR2 is set to 0x34120000.
356 */
357 tempval = (addr[5] << 24) | (addr[4] << 16) |
358 (addr[3] << 8) | addr[2];
359
360 gfar_write(macptr, tempval);
361
362 tempval = (addr[1] << 24) | (addr[0] << 16);
363
364 gfar_write(macptr+1, tempval);
365}
366
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367static int gfar_set_mac_addr(struct net_device *dev, void *p)
368{
Olivier Deprez0e641232021-09-23 10:07:05 +0200369 int ret;
370
371 ret = eth_mac_addr(dev, p);
372 if (ret)
373 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000374
375 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
376
377 return 0;
378}
379
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380static void gfar_ints_disable(struct gfar_private *priv)
381{
382 int i;
383 for (i = 0; i < priv->num_grps; i++) {
384 struct gfar __iomem *regs = priv->gfargrp[i].regs;
385 /* Clear IEVENT */
386 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
387
388 /* Initialize IMASK */
389 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
390 }
391}
392
393static void gfar_ints_enable(struct gfar_private *priv)
394{
395 int i;
396 for (i = 0; i < priv->num_grps; i++) {
397 struct gfar __iomem *regs = priv->gfargrp[i].regs;
398 /* Unmask the interrupts we look for */
399 gfar_write(&regs->imask, IMASK_DEFAULT);
400 }
401}
402
403static int gfar_alloc_tx_queues(struct gfar_private *priv)
404{
405 int i;
406
407 for (i = 0; i < priv->num_tx_queues; i++) {
408 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
409 GFP_KERNEL);
410 if (!priv->tx_queue[i])
411 return -ENOMEM;
412
413 priv->tx_queue[i]->tx_skbuff = NULL;
414 priv->tx_queue[i]->qindex = i;
415 priv->tx_queue[i]->dev = priv->ndev;
416 spin_lock_init(&(priv->tx_queue[i]->txlock));
417 }
418 return 0;
419}
420
421static int gfar_alloc_rx_queues(struct gfar_private *priv)
422{
423 int i;
424
425 for (i = 0; i < priv->num_rx_queues; i++) {
426 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
427 GFP_KERNEL);
428 if (!priv->rx_queue[i])
429 return -ENOMEM;
430
431 priv->rx_queue[i]->qindex = i;
432 priv->rx_queue[i]->ndev = priv->ndev;
433 }
434 return 0;
435}
436
437static void gfar_free_tx_queues(struct gfar_private *priv)
438{
439 int i;
440
441 for (i = 0; i < priv->num_tx_queues; i++)
442 kfree(priv->tx_queue[i]);
443}
444
445static void gfar_free_rx_queues(struct gfar_private *priv)
446{
447 int i;
448
449 for (i = 0; i < priv->num_rx_queues; i++)
450 kfree(priv->rx_queue[i]);
451}
452
453static void unmap_group_regs(struct gfar_private *priv)
454{
455 int i;
456
457 for (i = 0; i < MAXGROUPS; i++)
458 if (priv->gfargrp[i].regs)
459 iounmap(priv->gfargrp[i].regs);
460}
461
462static void free_gfar_dev(struct gfar_private *priv)
463{
464 int i, j;
465
466 for (i = 0; i < priv->num_grps; i++)
467 for (j = 0; j < GFAR_NUM_IRQS; j++) {
468 kfree(priv->gfargrp[i].irqinfo[j]);
469 priv->gfargrp[i].irqinfo[j] = NULL;
470 }
471
472 free_netdev(priv->ndev);
473}
474
475static void disable_napi(struct gfar_private *priv)
476{
477 int i;
478
479 for (i = 0; i < priv->num_grps; i++) {
480 napi_disable(&priv->gfargrp[i].napi_rx);
481 napi_disable(&priv->gfargrp[i].napi_tx);
482 }
483}
484
485static void enable_napi(struct gfar_private *priv)
486{
487 int i;
488
489 for (i = 0; i < priv->num_grps; i++) {
490 napi_enable(&priv->gfargrp[i].napi_rx);
491 napi_enable(&priv->gfargrp[i].napi_tx);
492 }
493}
494
495static int gfar_parse_group(struct device_node *np,
496 struct gfar_private *priv, const char *model)
497{
498 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
499 int i;
500
501 for (i = 0; i < GFAR_NUM_IRQS; i++) {
502 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
503 GFP_KERNEL);
504 if (!grp->irqinfo[i])
505 return -ENOMEM;
506 }
507
508 grp->regs = of_iomap(np, 0);
509 if (!grp->regs)
510 return -ENOMEM;
511
512 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
513
514 /* If we aren't the FEC we have multiple interrupts */
515 if (model && strcasecmp(model, "FEC")) {
516 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
517 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
518 if (!gfar_irq(grp, TX)->irq ||
519 !gfar_irq(grp, RX)->irq ||
520 !gfar_irq(grp, ER)->irq)
521 return -EINVAL;
522 }
523
524 grp->priv = priv;
525 spin_lock_init(&grp->grplock);
526 if (priv->mode == MQ_MG_MODE) {
527 u32 rxq_mask, txq_mask;
528 int ret;
529
530 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
531 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
532
533 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
534 if (!ret) {
535 grp->rx_bit_map = rxq_mask ?
536 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
537 }
538
539 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
540 if (!ret) {
541 grp->tx_bit_map = txq_mask ?
542 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
543 }
544
545 if (priv->poll_mode == GFAR_SQ_POLLING) {
546 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
547 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
548 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
549 }
550 } else {
551 grp->rx_bit_map = 0xFF;
552 grp->tx_bit_map = 0xFF;
553 }
554
555 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
556 * right to left, so we need to revert the 8 bits to get the q index
557 */
558 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
559 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
560
561 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
562 * also assign queues to groups
563 */
564 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
565 if (!grp->rx_queue)
566 grp->rx_queue = priv->rx_queue[i];
567 grp->num_rx_queues++;
568 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
569 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
570 priv->rx_queue[i]->grp = grp;
571 }
572
573 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
574 if (!grp->tx_queue)
575 grp->tx_queue = priv->tx_queue[i];
576 grp->num_tx_queues++;
577 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
578 priv->tqueue |= (TQUEUE_EN0 >> i);
579 priv->tx_queue[i]->grp = grp;
580 }
581
582 priv->num_grps++;
583
584 return 0;
585}
586
587static int gfar_of_group_count(struct device_node *np)
588{
589 struct device_node *child;
590 int num = 0;
591
592 for_each_available_child_of_node(np, child)
David Brazdil0f672f62019-12-10 10:32:29 +0000593 if (of_node_name_eq(child, "queue-group"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594 num++;
595
596 return num;
597}
598
David Brazdil0f672f62019-12-10 10:32:29 +0000599/* Reads the controller's registers to determine what interface
600 * connects it to the PHY.
601 */
602static phy_interface_t gfar_get_interface(struct net_device *dev)
603{
604 struct gfar_private *priv = netdev_priv(dev);
605 struct gfar __iomem *regs = priv->gfargrp[0].regs;
606 u32 ecntrl;
607
608 ecntrl = gfar_read(&regs->ecntrl);
609
610 if (ecntrl & ECNTRL_SGMII_MODE)
611 return PHY_INTERFACE_MODE_SGMII;
612
613 if (ecntrl & ECNTRL_TBI_MODE) {
614 if (ecntrl & ECNTRL_REDUCED_MODE)
615 return PHY_INTERFACE_MODE_RTBI;
616 else
617 return PHY_INTERFACE_MODE_TBI;
618 }
619
620 if (ecntrl & ECNTRL_REDUCED_MODE) {
621 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
622 return PHY_INTERFACE_MODE_RMII;
623 }
624 else {
625 phy_interface_t interface = priv->interface;
626
627 /* This isn't autodetected right now, so it must
628 * be set by the device tree or platform code.
629 */
630 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
631 return PHY_INTERFACE_MODE_RGMII_ID;
632
633 return PHY_INTERFACE_MODE_RGMII;
634 }
635 }
636
637 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
638 return PHY_INTERFACE_MODE_GMII;
639
640 return PHY_INTERFACE_MODE_MII;
641}
642
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
644{
645 const char *model;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 const void *mac_addr;
647 int err = 0, i;
648 struct net_device *dev = NULL;
649 struct gfar_private *priv = NULL;
650 struct device_node *np = ofdev->dev.of_node;
651 struct device_node *child = NULL;
652 u32 stash_len = 0;
653 u32 stash_idx = 0;
654 unsigned int num_tx_qs, num_rx_qs;
655 unsigned short mode, poll_mode;
656
657 if (!np)
658 return -ENODEV;
659
660 if (of_device_is_compatible(np, "fsl,etsec2")) {
661 mode = MQ_MG_MODE;
662 poll_mode = GFAR_SQ_POLLING;
663 } else {
664 mode = SQ_SG_MODE;
665 poll_mode = GFAR_SQ_POLLING;
666 }
667
668 if (mode == SQ_SG_MODE) {
669 num_tx_qs = 1;
670 num_rx_qs = 1;
671 } else { /* MQ_MG_MODE */
672 /* get the actual number of supported groups */
673 unsigned int num_grps = gfar_of_group_count(np);
674
675 if (num_grps == 0 || num_grps > MAXGROUPS) {
676 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
677 num_grps);
678 pr_err("Cannot do alloc_etherdev, aborting\n");
679 return -EINVAL;
680 }
681
682 if (poll_mode == GFAR_SQ_POLLING) {
683 num_tx_qs = num_grps; /* one txq per int group */
684 num_rx_qs = num_grps; /* one rxq per int group */
685 } else { /* GFAR_MQ_POLLING */
686 u32 tx_queues, rx_queues;
687 int ret;
688
689 /* parse the num of HW tx and rx queues */
690 ret = of_property_read_u32(np, "fsl,num_tx_queues",
691 &tx_queues);
692 num_tx_qs = ret ? 1 : tx_queues;
693
694 ret = of_property_read_u32(np, "fsl,num_rx_queues",
695 &rx_queues);
696 num_rx_qs = ret ? 1 : rx_queues;
697 }
698 }
699
700 if (num_tx_qs > MAX_TX_QS) {
701 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
702 num_tx_qs, MAX_TX_QS);
703 pr_err("Cannot do alloc_etherdev, aborting\n");
704 return -EINVAL;
705 }
706
707 if (num_rx_qs > MAX_RX_QS) {
708 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
709 num_rx_qs, MAX_RX_QS);
710 pr_err("Cannot do alloc_etherdev, aborting\n");
711 return -EINVAL;
712 }
713
714 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
715 dev = *pdev;
716 if (NULL == dev)
717 return -ENOMEM;
718
719 priv = netdev_priv(dev);
720 priv->ndev = dev;
721
722 priv->mode = mode;
723 priv->poll_mode = poll_mode;
724
725 priv->num_tx_queues = num_tx_qs;
726 netif_set_real_num_rx_queues(dev, num_rx_qs);
727 priv->num_rx_queues = num_rx_qs;
728
729 err = gfar_alloc_tx_queues(priv);
730 if (err)
731 goto tx_alloc_failed;
732
733 err = gfar_alloc_rx_queues(priv);
734 if (err)
735 goto rx_alloc_failed;
736
737 err = of_property_read_string(np, "model", &model);
738 if (err) {
739 pr_err("Device model property missing, aborting\n");
740 goto rx_alloc_failed;
741 }
742
743 /* Init Rx queue filer rule set linked list */
744 INIT_LIST_HEAD(&priv->rx_list.list);
745 priv->rx_list.count = 0;
746 mutex_init(&priv->rx_queue_access);
747
748 for (i = 0; i < MAXGROUPS; i++)
749 priv->gfargrp[i].regs = NULL;
750
751 /* Parse and initialize group specific information */
752 if (priv->mode == MQ_MG_MODE) {
753 for_each_available_child_of_node(np, child) {
David Brazdil0f672f62019-12-10 10:32:29 +0000754 if (!of_node_name_eq(child, "queue-group"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000755 continue;
756
757 err = gfar_parse_group(child, priv, model);
Olivier Deprez0e641232021-09-23 10:07:05 +0200758 if (err) {
759 of_node_put(child);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000760 goto err_grp_init;
Olivier Deprez0e641232021-09-23 10:07:05 +0200761 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000762 }
763 } else { /* SQ_SG_MODE */
764 err = gfar_parse_group(np, priv, model);
765 if (err)
766 goto err_grp_init;
767 }
768
769 if (of_property_read_bool(np, "bd-stash")) {
770 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
771 priv->bd_stash_en = 1;
772 }
773
774 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
775
776 if (err == 0)
777 priv->rx_stash_size = stash_len;
778
779 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
780
781 if (err == 0)
782 priv->rx_stash_index = stash_idx;
783
784 if (stash_len || stash_idx)
785 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
786
787 mac_addr = of_get_mac_address(np);
788
David Brazdil0f672f62019-12-10 10:32:29 +0000789 if (!IS_ERR(mac_addr))
790 ether_addr_copy(dev->dev_addr, mac_addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791
792 if (model && !strcasecmp(model, "TSEC"))
793 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
794 FSL_GIANFAR_DEV_HAS_COALESCE |
795 FSL_GIANFAR_DEV_HAS_RMON |
796 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
797
798 if (model && !strcasecmp(model, "eTSEC"))
799 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
800 FSL_GIANFAR_DEV_HAS_COALESCE |
801 FSL_GIANFAR_DEV_HAS_RMON |
802 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
803 FSL_GIANFAR_DEV_HAS_CSUM |
804 FSL_GIANFAR_DEV_HAS_VLAN |
805 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
806 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
807 FSL_GIANFAR_DEV_HAS_TIMER |
808 FSL_GIANFAR_DEV_HAS_RX_FILER;
809
David Brazdil0f672f62019-12-10 10:32:29 +0000810 /* Use PHY connection type from the DT node if one is specified there.
811 * rgmii-id really needs to be specified. Other types can be
812 * detected by hardware
813 */
814 err = of_get_phy_mode(np);
815 if (err >= 0)
816 priv->interface = err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000817 else
David Brazdil0f672f62019-12-10 10:32:29 +0000818 priv->interface = gfar_get_interface(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819
820 if (of_find_property(np, "fsl,magic-packet", NULL))
821 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
822
823 if (of_get_property(np, "fsl,wake-on-filer", NULL))
824 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
825
826 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
827
828 /* In the case of a fixed PHY, the DT node associated
829 * to the PHY is the Ethernet MAC DT node.
830 */
831 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
832 err = of_phy_register_fixed_link(np);
833 if (err)
834 goto err_grp_init;
835
836 priv->phy_node = of_node_get(np);
837 }
838
839 /* Find the TBI PHY. If it's not there, we don't support SGMII */
840 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
841
842 return 0;
843
844err_grp_init:
845 unmap_group_regs(priv);
846rx_alloc_failed:
847 gfar_free_rx_queues(priv);
848tx_alloc_failed:
849 gfar_free_tx_queues(priv);
850 free_gfar_dev(priv);
851 return err;
852}
853
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000854static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
855 u32 class)
856{
857 u32 rqfpr = FPR_FILER_MASK;
858 u32 rqfcr = 0x0;
859
860 rqfar--;
861 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
862 priv->ftp_rqfpr[rqfar] = rqfpr;
863 priv->ftp_rqfcr[rqfar] = rqfcr;
864 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
865
866 rqfar--;
867 rqfcr = RQFCR_CMP_NOMATCH;
868 priv->ftp_rqfpr[rqfar] = rqfpr;
869 priv->ftp_rqfcr[rqfar] = rqfcr;
870 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
871
872 rqfar--;
873 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
874 rqfpr = class;
875 priv->ftp_rqfcr[rqfar] = rqfcr;
876 priv->ftp_rqfpr[rqfar] = rqfpr;
877 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
878
879 rqfar--;
880 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
881 rqfpr = class;
882 priv->ftp_rqfcr[rqfar] = rqfcr;
883 priv->ftp_rqfpr[rqfar] = rqfpr;
884 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
885
886 return rqfar;
887}
888
889static void gfar_init_filer_table(struct gfar_private *priv)
890{
891 int i = 0x0;
892 u32 rqfar = MAX_FILER_IDX;
893 u32 rqfcr = 0x0;
894 u32 rqfpr = FPR_FILER_MASK;
895
896 /* Default rule */
897 rqfcr = RQFCR_CMP_MATCH;
898 priv->ftp_rqfcr[rqfar] = rqfcr;
899 priv->ftp_rqfpr[rqfar] = rqfpr;
900 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
901
902 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
903 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
904 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
905 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
906 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
907 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
908
909 /* cur_filer_idx indicated the first non-masked rule */
910 priv->cur_filer_idx = rqfar;
911
912 /* Rest are masked rules */
913 rqfcr = RQFCR_CMP_NOMATCH;
914 for (i = 0; i < rqfar; i++) {
915 priv->ftp_rqfcr[i] = rqfcr;
916 priv->ftp_rqfpr[i] = rqfpr;
917 gfar_write_filer(priv, i, rqfcr, rqfpr);
918 }
919}
920
921#ifdef CONFIG_PPC
922static void __gfar_detect_errata_83xx(struct gfar_private *priv)
923{
924 unsigned int pvr = mfspr(SPRN_PVR);
925 unsigned int svr = mfspr(SPRN_SVR);
926 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
927 unsigned int rev = svr & 0xffff;
928
929 /* MPC8313 Rev 2.0 and higher; All MPC837x */
930 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
931 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
932 priv->errata |= GFAR_ERRATA_74;
933
934 /* MPC8313 and MPC837x all rev */
935 if ((pvr == 0x80850010 && mod == 0x80b0) ||
936 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
937 priv->errata |= GFAR_ERRATA_76;
938
939 /* MPC8313 Rev < 2.0 */
940 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
941 priv->errata |= GFAR_ERRATA_12;
942}
943
944static void __gfar_detect_errata_85xx(struct gfar_private *priv)
945{
946 unsigned int svr = mfspr(SPRN_SVR);
947
948 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
949 priv->errata |= GFAR_ERRATA_12;
950 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
951 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
952 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
953 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
954 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
955}
956#endif
957
958static void gfar_detect_errata(struct gfar_private *priv)
959{
960 struct device *dev = &priv->ofdev->dev;
961
962 /* no plans to fix */
963 priv->errata |= GFAR_ERRATA_A002;
964
965#ifdef CONFIG_PPC
966 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
967 __gfar_detect_errata_85xx(priv);
968 else /* non-mpc85xx parts, i.e. e300 core based */
969 __gfar_detect_errata_83xx(priv);
970#endif
971
972 if (priv->errata)
973 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
974 priv->errata);
975}
976
David Brazdil0f672f62019-12-10 10:32:29 +0000977static void gfar_init_addr_hash_table(struct gfar_private *priv)
978{
979 struct gfar __iomem *regs = priv->gfargrp[0].regs;
980
981 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
982 priv->extended_hash = 1;
983 priv->hash_width = 9;
984
985 priv->hash_regs[0] = &regs->igaddr0;
986 priv->hash_regs[1] = &regs->igaddr1;
987 priv->hash_regs[2] = &regs->igaddr2;
988 priv->hash_regs[3] = &regs->igaddr3;
989 priv->hash_regs[4] = &regs->igaddr4;
990 priv->hash_regs[5] = &regs->igaddr5;
991 priv->hash_regs[6] = &regs->igaddr6;
992 priv->hash_regs[7] = &regs->igaddr7;
993 priv->hash_regs[8] = &regs->gaddr0;
994 priv->hash_regs[9] = &regs->gaddr1;
995 priv->hash_regs[10] = &regs->gaddr2;
996 priv->hash_regs[11] = &regs->gaddr3;
997 priv->hash_regs[12] = &regs->gaddr4;
998 priv->hash_regs[13] = &regs->gaddr5;
999 priv->hash_regs[14] = &regs->gaddr6;
1000 priv->hash_regs[15] = &regs->gaddr7;
1001
1002 } else {
1003 priv->extended_hash = 0;
1004 priv->hash_width = 8;
1005
1006 priv->hash_regs[0] = &regs->gaddr0;
1007 priv->hash_regs[1] = &regs->gaddr1;
1008 priv->hash_regs[2] = &regs->gaddr2;
1009 priv->hash_regs[3] = &regs->gaddr3;
1010 priv->hash_regs[4] = &regs->gaddr4;
1011 priv->hash_regs[5] = &regs->gaddr5;
1012 priv->hash_regs[6] = &regs->gaddr6;
1013 priv->hash_regs[7] = &regs->gaddr7;
1014 }
1015}
1016
1017static int __gfar_is_rx_idle(struct gfar_private *priv)
1018{
1019 u32 res;
1020
1021 /* Normaly TSEC should not hang on GRS commands, so we should
1022 * actually wait for IEVENT_GRSC flag.
1023 */
1024 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1025 return 0;
1026
1027 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1028 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1029 * and the Rx can be safely reset.
1030 */
1031 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1032 res &= 0x7f807f80;
1033 if ((res & 0xffff) == (res >> 16))
1034 return 1;
1035
1036 return 0;
1037}
1038
1039/* Halt the receive and transmit queues */
1040static void gfar_halt_nodisable(struct gfar_private *priv)
1041{
1042 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1043 u32 tempval;
1044 unsigned int timeout;
1045 int stopped;
1046
1047 gfar_ints_disable(priv);
1048
1049 if (gfar_is_dma_stopped(priv))
1050 return;
1051
1052 /* Stop the DMA, and wait for it to stop */
1053 tempval = gfar_read(&regs->dmactrl);
1054 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1055 gfar_write(&regs->dmactrl, tempval);
1056
1057retry:
1058 timeout = 1000;
1059 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1060 cpu_relax();
1061 timeout--;
1062 }
1063
1064 if (!timeout)
1065 stopped = gfar_is_dma_stopped(priv);
1066
1067 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1068 !__gfar_is_rx_idle(priv))
1069 goto retry;
1070}
1071
1072/* Halt the receive and transmit queues */
1073static void gfar_halt(struct gfar_private *priv)
1074{
1075 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1076 u32 tempval;
1077
1078 /* Dissable the Rx/Tx hw queues */
1079 gfar_write(&regs->rqueue, 0);
1080 gfar_write(&regs->tqueue, 0);
1081
1082 mdelay(10);
1083
1084 gfar_halt_nodisable(priv);
1085
1086 /* Disable Rx/Tx DMA */
1087 tempval = gfar_read(&regs->maccfg1);
1088 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1089 gfar_write(&regs->maccfg1, tempval);
1090}
1091
1092static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1093{
1094 struct txbd8 *txbdp;
1095 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1096 int i, j;
1097
1098 txbdp = tx_queue->tx_bd_base;
1099
1100 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1101 if (!tx_queue->tx_skbuff[i])
1102 continue;
1103
1104 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1105 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1106 txbdp->lstatus = 0;
1107 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1108 j++) {
1109 txbdp++;
1110 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1111 be16_to_cpu(txbdp->length),
1112 DMA_TO_DEVICE);
1113 }
1114 txbdp++;
1115 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1116 tx_queue->tx_skbuff[i] = NULL;
1117 }
1118 kfree(tx_queue->tx_skbuff);
1119 tx_queue->tx_skbuff = NULL;
1120}
1121
1122static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1123{
1124 int i;
1125
1126 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1127
1128 dev_kfree_skb(rx_queue->skb);
1129
1130 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1131 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1132
1133 rxbdp->lstatus = 0;
1134 rxbdp->bufPtr = 0;
1135 rxbdp++;
1136
1137 if (!rxb->page)
1138 continue;
1139
1140 dma_unmap_page(rx_queue->dev, rxb->dma,
1141 PAGE_SIZE, DMA_FROM_DEVICE);
1142 __free_page(rxb->page);
1143
1144 rxb->page = NULL;
1145 }
1146
1147 kfree(rx_queue->rx_buff);
1148 rx_queue->rx_buff = NULL;
1149}
1150
1151/* If there are any tx skbs or rx skbs still around, free them.
1152 * Then free tx_skbuff and rx_skbuff
1153 */
1154static void free_skb_resources(struct gfar_private *priv)
1155{
1156 struct gfar_priv_tx_q *tx_queue = NULL;
1157 struct gfar_priv_rx_q *rx_queue = NULL;
1158 int i;
1159
1160 /* Go through all the buffer descriptors and free their data buffers */
1161 for (i = 0; i < priv->num_tx_queues; i++) {
1162 struct netdev_queue *txq;
1163
1164 tx_queue = priv->tx_queue[i];
1165 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1166 if (tx_queue->tx_skbuff)
1167 free_skb_tx_queue(tx_queue);
1168 netdev_tx_reset_queue(txq);
1169 }
1170
1171 for (i = 0; i < priv->num_rx_queues; i++) {
1172 rx_queue = priv->rx_queue[i];
1173 if (rx_queue->rx_buff)
1174 free_skb_rx_queue(rx_queue);
1175 }
1176
1177 dma_free_coherent(priv->dev,
1178 sizeof(struct txbd8) * priv->total_tx_ring_size +
1179 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1180 priv->tx_queue[0]->tx_bd_base,
1181 priv->tx_queue[0]->tx_bd_dma_base);
1182}
1183
1184void stop_gfar(struct net_device *dev)
1185{
1186 struct gfar_private *priv = netdev_priv(dev);
1187
1188 netif_tx_stop_all_queues(dev);
1189
1190 smp_mb__before_atomic();
1191 set_bit(GFAR_DOWN, &priv->state);
1192 smp_mb__after_atomic();
1193
1194 disable_napi(priv);
1195
1196 /* disable ints and gracefully shut down Rx/Tx DMA */
1197 gfar_halt(priv);
1198
1199 phy_stop(dev->phydev);
1200
1201 free_skb_resources(priv);
1202}
1203
1204static void gfar_start(struct gfar_private *priv)
1205{
1206 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1207 u32 tempval;
1208 int i = 0;
1209
1210 /* Enable Rx/Tx hw queues */
1211 gfar_write(&regs->rqueue, priv->rqueue);
1212 gfar_write(&regs->tqueue, priv->tqueue);
1213
1214 /* Initialize DMACTRL to have WWR and WOP */
1215 tempval = gfar_read(&regs->dmactrl);
1216 tempval |= DMACTRL_INIT_SETTINGS;
1217 gfar_write(&regs->dmactrl, tempval);
1218
1219 /* Make sure we aren't stopped */
1220 tempval = gfar_read(&regs->dmactrl);
1221 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1222 gfar_write(&regs->dmactrl, tempval);
1223
1224 for (i = 0; i < priv->num_grps; i++) {
1225 regs = priv->gfargrp[i].regs;
1226 /* Clear THLT/RHLT, so that the DMA starts polling now */
1227 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1228 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1229 }
1230
1231 /* Enable Rx/Tx DMA */
1232 tempval = gfar_read(&regs->maccfg1);
1233 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1234 gfar_write(&regs->maccfg1, tempval);
1235
1236 gfar_ints_enable(priv);
1237
1238 netif_trans_update(priv->ndev); /* prevent tx timeout */
1239}
1240
1241static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1242{
1243 struct page *page;
1244 dma_addr_t addr;
1245
1246 page = dev_alloc_page();
1247 if (unlikely(!page))
1248 return false;
1249
1250 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1251 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1252 __free_page(page);
1253
1254 return false;
1255 }
1256
1257 rxb->dma = addr;
1258 rxb->page = page;
1259 rxb->page_offset = 0;
1260
1261 return true;
1262}
1263
1264static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1265{
1266 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1267 struct gfar_extra_stats *estats = &priv->extra_stats;
1268
1269 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1270 atomic64_inc(&estats->rx_alloc_err);
1271}
1272
1273static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1274 int alloc_cnt)
1275{
1276 struct rxbd8 *bdp;
1277 struct gfar_rx_buff *rxb;
1278 int i;
1279
1280 i = rx_queue->next_to_use;
1281 bdp = &rx_queue->rx_bd_base[i];
1282 rxb = &rx_queue->rx_buff[i];
1283
1284 while (alloc_cnt--) {
1285 /* try reuse page */
1286 if (unlikely(!rxb->page)) {
1287 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1288 gfar_rx_alloc_err(rx_queue);
1289 break;
1290 }
1291 }
1292
1293 /* Setup the new RxBD */
1294 gfar_init_rxbdp(rx_queue, bdp,
1295 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1296
1297 /* Update to the next pointer */
1298 bdp++;
1299 rxb++;
1300
1301 if (unlikely(++i == rx_queue->rx_ring_size)) {
1302 i = 0;
1303 bdp = rx_queue->rx_bd_base;
1304 rxb = rx_queue->rx_buff;
1305 }
1306 }
1307
1308 rx_queue->next_to_use = i;
1309 rx_queue->next_to_alloc = i;
1310}
1311
1312static void gfar_init_bds(struct net_device *ndev)
1313{
1314 struct gfar_private *priv = netdev_priv(ndev);
1315 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1316 struct gfar_priv_tx_q *tx_queue = NULL;
1317 struct gfar_priv_rx_q *rx_queue = NULL;
1318 struct txbd8 *txbdp;
1319 u32 __iomem *rfbptr;
1320 int i, j;
1321
1322 for (i = 0; i < priv->num_tx_queues; i++) {
1323 tx_queue = priv->tx_queue[i];
1324 /* Initialize some variables in our dev structure */
1325 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1326 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1327 tx_queue->cur_tx = tx_queue->tx_bd_base;
1328 tx_queue->skb_curtx = 0;
1329 tx_queue->skb_dirtytx = 0;
1330
1331 /* Initialize Transmit Descriptor Ring */
1332 txbdp = tx_queue->tx_bd_base;
1333 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1334 txbdp->lstatus = 0;
1335 txbdp->bufPtr = 0;
1336 txbdp++;
1337 }
1338
1339 /* Set the last descriptor in the ring to indicate wrap */
1340 txbdp--;
1341 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1342 TXBD_WRAP);
1343 }
1344
1345 rfbptr = &regs->rfbptr0;
1346 for (i = 0; i < priv->num_rx_queues; i++) {
1347 rx_queue = priv->rx_queue[i];
1348
1349 rx_queue->next_to_clean = 0;
1350 rx_queue->next_to_use = 0;
1351 rx_queue->next_to_alloc = 0;
1352
1353 /* make sure next_to_clean != next_to_use after this
1354 * by leaving at least 1 unused descriptor
1355 */
1356 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1357
1358 rx_queue->rfbptr = rfbptr;
1359 rfbptr += 2;
1360 }
1361}
1362
1363static int gfar_alloc_skb_resources(struct net_device *ndev)
1364{
1365 void *vaddr;
1366 dma_addr_t addr;
1367 int i, j;
1368 struct gfar_private *priv = netdev_priv(ndev);
1369 struct device *dev = priv->dev;
1370 struct gfar_priv_tx_q *tx_queue = NULL;
1371 struct gfar_priv_rx_q *rx_queue = NULL;
1372
1373 priv->total_tx_ring_size = 0;
1374 for (i = 0; i < priv->num_tx_queues; i++)
1375 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1376
1377 priv->total_rx_ring_size = 0;
1378 for (i = 0; i < priv->num_rx_queues; i++)
1379 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1380
1381 /* Allocate memory for the buffer descriptors */
1382 vaddr = dma_alloc_coherent(dev,
1383 (priv->total_tx_ring_size *
1384 sizeof(struct txbd8)) +
1385 (priv->total_rx_ring_size *
1386 sizeof(struct rxbd8)),
1387 &addr, GFP_KERNEL);
1388 if (!vaddr)
1389 return -ENOMEM;
1390
1391 for (i = 0; i < priv->num_tx_queues; i++) {
1392 tx_queue = priv->tx_queue[i];
1393 tx_queue->tx_bd_base = vaddr;
1394 tx_queue->tx_bd_dma_base = addr;
1395 tx_queue->dev = ndev;
1396 /* enet DMA only understands physical addresses */
1397 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1398 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1399 }
1400
1401 /* Start the rx descriptor ring where the tx ring leaves off */
1402 for (i = 0; i < priv->num_rx_queues; i++) {
1403 rx_queue = priv->rx_queue[i];
1404 rx_queue->rx_bd_base = vaddr;
1405 rx_queue->rx_bd_dma_base = addr;
1406 rx_queue->ndev = ndev;
1407 rx_queue->dev = dev;
1408 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1409 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1410 }
1411
1412 /* Setup the skbuff rings */
1413 for (i = 0; i < priv->num_tx_queues; i++) {
1414 tx_queue = priv->tx_queue[i];
1415 tx_queue->tx_skbuff =
1416 kmalloc_array(tx_queue->tx_ring_size,
1417 sizeof(*tx_queue->tx_skbuff),
1418 GFP_KERNEL);
1419 if (!tx_queue->tx_skbuff)
1420 goto cleanup;
1421
1422 for (j = 0; j < tx_queue->tx_ring_size; j++)
1423 tx_queue->tx_skbuff[j] = NULL;
1424 }
1425
1426 for (i = 0; i < priv->num_rx_queues; i++) {
1427 rx_queue = priv->rx_queue[i];
1428 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1429 sizeof(*rx_queue->rx_buff),
1430 GFP_KERNEL);
1431 if (!rx_queue->rx_buff)
1432 goto cleanup;
1433 }
1434
1435 gfar_init_bds(ndev);
1436
1437 return 0;
1438
1439cleanup:
1440 free_skb_resources(priv);
1441 return -ENOMEM;
1442}
1443
1444/* Bring the controller up and running */
1445int startup_gfar(struct net_device *ndev)
1446{
1447 struct gfar_private *priv = netdev_priv(ndev);
1448 int err;
1449
1450 gfar_mac_reset(priv);
1451
1452 err = gfar_alloc_skb_resources(ndev);
1453 if (err)
1454 return err;
1455
1456 gfar_init_tx_rx_base(priv);
1457
1458 smp_mb__before_atomic();
1459 clear_bit(GFAR_DOWN, &priv->state);
1460 smp_mb__after_atomic();
1461
1462 /* Start Rx/Tx DMA and enable the interrupts */
1463 gfar_start(priv);
1464
1465 /* force link state update after mac reset */
1466 priv->oldlink = 0;
1467 priv->oldspeed = 0;
1468 priv->oldduplex = -1;
1469
1470 phy_start(ndev->phydev);
1471
1472 enable_napi(priv);
1473
1474 netif_tx_wake_all_queues(ndev);
1475
1476 return 0;
1477}
1478
1479static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1480{
1481 struct net_device *ndev = priv->ndev;
1482 struct phy_device *phydev = ndev->phydev;
1483 u32 val = 0;
1484
1485 if (!phydev->duplex)
1486 return val;
1487
1488 if (!priv->pause_aneg_en) {
1489 if (priv->tx_pause_en)
1490 val |= MACCFG1_TX_FLOW;
1491 if (priv->rx_pause_en)
1492 val |= MACCFG1_RX_FLOW;
1493 } else {
1494 u16 lcl_adv, rmt_adv;
1495 u8 flowctrl;
1496 /* get link partner capabilities */
1497 rmt_adv = 0;
1498 if (phydev->pause)
1499 rmt_adv = LPA_PAUSE_CAP;
1500 if (phydev->asym_pause)
1501 rmt_adv |= LPA_PAUSE_ASYM;
1502
1503 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1504 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1505 if (flowctrl & FLOW_CTRL_TX)
1506 val |= MACCFG1_TX_FLOW;
1507 if (flowctrl & FLOW_CTRL_RX)
1508 val |= MACCFG1_RX_FLOW;
1509 }
1510
1511 return val;
1512}
1513
1514static noinline void gfar_update_link_state(struct gfar_private *priv)
1515{
1516 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1517 struct net_device *ndev = priv->ndev;
1518 struct phy_device *phydev = ndev->phydev;
1519 struct gfar_priv_rx_q *rx_queue = NULL;
1520 int i;
1521
1522 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1523 return;
1524
1525 if (phydev->link) {
1526 u32 tempval1 = gfar_read(&regs->maccfg1);
1527 u32 tempval = gfar_read(&regs->maccfg2);
1528 u32 ecntrl = gfar_read(&regs->ecntrl);
1529 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1530
1531 if (phydev->duplex != priv->oldduplex) {
1532 if (!(phydev->duplex))
1533 tempval &= ~(MACCFG2_FULL_DUPLEX);
1534 else
1535 tempval |= MACCFG2_FULL_DUPLEX;
1536
1537 priv->oldduplex = phydev->duplex;
1538 }
1539
1540 if (phydev->speed != priv->oldspeed) {
1541 switch (phydev->speed) {
1542 case 1000:
1543 tempval =
1544 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1545
1546 ecntrl &= ~(ECNTRL_R100);
1547 break;
1548 case 100:
1549 case 10:
1550 tempval =
1551 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1552
1553 /* Reduced mode distinguishes
1554 * between 10 and 100
1555 */
1556 if (phydev->speed == SPEED_100)
1557 ecntrl |= ECNTRL_R100;
1558 else
1559 ecntrl &= ~(ECNTRL_R100);
1560 break;
1561 default:
1562 netif_warn(priv, link, priv->ndev,
1563 "Ack! Speed (%d) is not 10/100/1000!\n",
1564 phydev->speed);
1565 break;
1566 }
1567
1568 priv->oldspeed = phydev->speed;
1569 }
1570
1571 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1572 tempval1 |= gfar_get_flowctrl_cfg(priv);
1573
1574 /* Turn last free buffer recording on */
1575 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1576 for (i = 0; i < priv->num_rx_queues; i++) {
1577 u32 bdp_dma;
1578
1579 rx_queue = priv->rx_queue[i];
1580 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1581 gfar_write(rx_queue->rfbptr, bdp_dma);
1582 }
1583
1584 priv->tx_actual_en = 1;
1585 }
1586
1587 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1588 priv->tx_actual_en = 0;
1589
1590 gfar_write(&regs->maccfg1, tempval1);
1591 gfar_write(&regs->maccfg2, tempval);
1592 gfar_write(&regs->ecntrl, ecntrl);
1593
1594 if (!priv->oldlink)
1595 priv->oldlink = 1;
1596
1597 } else if (priv->oldlink) {
1598 priv->oldlink = 0;
1599 priv->oldspeed = 0;
1600 priv->oldduplex = -1;
1601 }
1602
1603 if (netif_msg_link(priv))
1604 phy_print_status(phydev);
1605}
1606
1607/* Called every time the controller might need to be made
1608 * aware of new link state. The PHY code conveys this
1609 * information through variables in the phydev structure, and this
1610 * function converts those variables into the appropriate
1611 * register values, and can bring down the device if needed.
1612 */
1613static void adjust_link(struct net_device *dev)
1614{
1615 struct gfar_private *priv = netdev_priv(dev);
1616 struct phy_device *phydev = dev->phydev;
1617
1618 if (unlikely(phydev->link != priv->oldlink ||
1619 (phydev->link && (phydev->duplex != priv->oldduplex ||
1620 phydev->speed != priv->oldspeed))))
1621 gfar_update_link_state(priv);
1622}
1623
1624/* Initialize TBI PHY interface for communicating with the
1625 * SERDES lynx PHY on the chip. We communicate with this PHY
1626 * through the MDIO bus on each controller, treating it as a
1627 * "normal" PHY at the address found in the TBIPA register. We assume
1628 * that the TBIPA register is valid. Either the MDIO bus code will set
1629 * it to a value that doesn't conflict with other PHYs on the bus, or the
1630 * value doesn't matter, as there are no other PHYs on the bus.
1631 */
1632static void gfar_configure_serdes(struct net_device *dev)
1633{
1634 struct gfar_private *priv = netdev_priv(dev);
1635 struct phy_device *tbiphy;
1636
1637 if (!priv->tbi_node) {
1638 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1639 "device tree specify a tbi-handle\n");
1640 return;
1641 }
1642
1643 tbiphy = of_phy_find_device(priv->tbi_node);
1644 if (!tbiphy) {
1645 dev_err(&dev->dev, "error: Could not get TBI device\n");
1646 return;
1647 }
1648
1649 /* If the link is already up, we must already be ok, and don't need to
1650 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1651 * everything for us? Resetting it takes the link down and requires
1652 * several seconds for it to come back.
1653 */
1654 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1655 put_device(&tbiphy->mdio.dev);
1656 return;
1657 }
1658
1659 /* Single clk mode, mii mode off(for serdes communication) */
1660 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1661
1662 phy_write(tbiphy, MII_ADVERTISE,
1663 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1664 ADVERTISE_1000XPSE_ASYM);
1665
1666 phy_write(tbiphy, MII_BMCR,
1667 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1668 BMCR_SPEED1000);
1669
1670 put_device(&tbiphy->mdio.dev);
1671}
1672
1673/* Initializes driver's PHY state, and attaches to the PHY.
1674 * Returns 0 on success.
1675 */
1676static int init_phy(struct net_device *dev)
1677{
1678 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1679 struct gfar_private *priv = netdev_priv(dev);
1680 phy_interface_t interface = priv->interface;
1681 struct phy_device *phydev;
1682 struct ethtool_eee edata;
1683
1684 linkmode_set_bit_array(phy_10_100_features_array,
1685 ARRAY_SIZE(phy_10_100_features_array),
1686 mask);
1687 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1688 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1689 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1690 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1691
1692 priv->oldlink = 0;
1693 priv->oldspeed = 0;
1694 priv->oldduplex = -1;
1695
1696 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1697 interface);
1698 if (!phydev) {
1699 dev_err(&dev->dev, "could not attach to PHY\n");
1700 return -ENODEV;
1701 }
1702
1703 if (interface == PHY_INTERFACE_MODE_SGMII)
1704 gfar_configure_serdes(dev);
1705
1706 /* Remove any features not supported by the controller */
1707 linkmode_and(phydev->supported, phydev->supported, mask);
1708 linkmode_copy(phydev->advertising, phydev->supported);
1709
1710 /* Add support for flow control */
1711 phy_support_asym_pause(phydev);
1712
1713 /* disable EEE autoneg, EEE not supported by eTSEC */
1714 memset(&edata, 0, sizeof(struct ethtool_eee));
1715 phy_ethtool_set_eee(phydev, &edata);
1716
1717 return 0;
1718}
1719
1720static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1721{
1722 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1723
1724 memset(fcb, 0, GMAC_FCB_LEN);
1725
1726 return fcb;
1727}
1728
1729static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1730 int fcb_length)
1731{
1732 /* If we're here, it's a IP packet with a TCP or UDP
1733 * payload. We set it to checksum, using a pseudo-header
1734 * we provide
1735 */
1736 u8 flags = TXFCB_DEFAULT;
1737
1738 /* Tell the controller what the protocol is
1739 * And provide the already calculated phcs
1740 */
1741 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1742 flags |= TXFCB_UDP;
1743 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1744 } else
1745 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1746
1747 /* l3os is the distance between the start of the
1748 * frame (skb->data) and the start of the IP hdr.
1749 * l4os is the distance between the start of the
1750 * l3 hdr and the l4 hdr
1751 */
1752 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1753 fcb->l4os = skb_network_header_len(skb);
1754
1755 fcb->flags = flags;
1756}
1757
1758static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1759{
1760 fcb->flags |= TXFCB_VLN;
1761 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1762}
1763
1764static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1765 struct txbd8 *base, int ring_size)
1766{
1767 struct txbd8 *new_bd = bdp + stride;
1768
1769 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1770}
1771
1772static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1773 int ring_size)
1774{
1775 return skip_txbd(bdp, 1, base, ring_size);
1776}
1777
1778/* eTSEC12: csum generation not supported for some fcb offsets */
1779static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1780 unsigned long fcb_addr)
1781{
1782 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1783 (fcb_addr % 0x20) > 0x18);
1784}
1785
1786/* eTSEC76: csum generation for frames larger than 2500 may
1787 * cause excess delays before start of transmission
1788 */
1789static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1790 unsigned int len)
1791{
1792 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1793 (len > 2500));
1794}
1795
1796/* This is called by the kernel when a frame is ready for transmission.
1797 * It is pointed to by the dev->hard_start_xmit function pointer
1798 */
1799static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1800{
1801 struct gfar_private *priv = netdev_priv(dev);
1802 struct gfar_priv_tx_q *tx_queue = NULL;
1803 struct netdev_queue *txq;
1804 struct gfar __iomem *regs = NULL;
1805 struct txfcb *fcb = NULL;
1806 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1807 u32 lstatus;
1808 skb_frag_t *frag;
1809 int i, rq = 0;
1810 int do_tstamp, do_csum, do_vlan;
1811 u32 bufaddr;
1812 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1813
1814 rq = skb->queue_mapping;
1815 tx_queue = priv->tx_queue[rq];
1816 txq = netdev_get_tx_queue(dev, rq);
1817 base = tx_queue->tx_bd_base;
1818 regs = tx_queue->grp->regs;
1819
1820 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1821 do_vlan = skb_vlan_tag_present(skb);
1822 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1823 priv->hwts_tx_en;
1824
1825 if (do_csum || do_vlan)
1826 fcb_len = GMAC_FCB_LEN;
1827
1828 /* check if time stamp should be generated */
1829 if (unlikely(do_tstamp))
1830 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1831
1832 /* make space for additional header when fcb is needed */
Olivier Deprez0e641232021-09-23 10:07:05 +02001833 if (fcb_len) {
1834 if (unlikely(skb_cow_head(skb, fcb_len))) {
David Brazdil0f672f62019-12-10 10:32:29 +00001835 dev->stats.tx_errors++;
1836 dev_kfree_skb_any(skb);
1837 return NETDEV_TX_OK;
1838 }
David Brazdil0f672f62019-12-10 10:32:29 +00001839 }
1840
1841 /* total number of fragments in the SKB */
1842 nr_frags = skb_shinfo(skb)->nr_frags;
1843
1844 /* calculate the required number of TxBDs for this skb */
1845 if (unlikely(do_tstamp))
1846 nr_txbds = nr_frags + 2;
1847 else
1848 nr_txbds = nr_frags + 1;
1849
1850 /* check if there is space to queue this packet */
1851 if (nr_txbds > tx_queue->num_txbdfree) {
1852 /* no space, stop the queue */
1853 netif_tx_stop_queue(txq);
1854 dev->stats.tx_fifo_errors++;
1855 return NETDEV_TX_BUSY;
1856 }
1857
1858 /* Update transmit stats */
1859 bytes_sent = skb->len;
1860 tx_queue->stats.tx_bytes += bytes_sent;
1861 /* keep Tx bytes on wire for BQL accounting */
1862 GFAR_CB(skb)->bytes_sent = bytes_sent;
1863 tx_queue->stats.tx_packets++;
1864
1865 txbdp = txbdp_start = tx_queue->cur_tx;
1866 lstatus = be32_to_cpu(txbdp->lstatus);
1867
1868 /* Add TxPAL between FCB and frame if required */
1869 if (unlikely(do_tstamp)) {
1870 skb_push(skb, GMAC_TXPAL_LEN);
1871 memset(skb->data, 0, GMAC_TXPAL_LEN);
1872 }
1873
1874 /* Add TxFCB if required */
1875 if (fcb_len) {
1876 fcb = gfar_add_fcb(skb);
1877 lstatus |= BD_LFLAG(TXBD_TOE);
1878 }
1879
1880 /* Set up checksumming */
1881 if (do_csum) {
1882 gfar_tx_checksum(skb, fcb, fcb_len);
1883
1884 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1885 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1886 __skb_pull(skb, GMAC_FCB_LEN);
1887 skb_checksum_help(skb);
1888 if (do_vlan || do_tstamp) {
1889 /* put back a new fcb for vlan/tstamp TOE */
1890 fcb = gfar_add_fcb(skb);
1891 } else {
1892 /* Tx TOE not used */
1893 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1894 fcb = NULL;
1895 }
1896 }
1897 }
1898
1899 if (do_vlan)
1900 gfar_tx_vlan(skb, fcb);
1901
1902 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1903 DMA_TO_DEVICE);
1904 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1905 goto dma_map_err;
1906
1907 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1908
1909 /* Time stamp insertion requires one additional TxBD */
1910 if (unlikely(do_tstamp))
1911 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1912 tx_queue->tx_ring_size);
1913
1914 if (likely(!nr_frags)) {
1915 if (likely(!do_tstamp))
1916 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1917 } else {
1918 u32 lstatus_start = lstatus;
1919
1920 /* Place the fragment addresses and lengths into the TxBDs */
1921 frag = &skb_shinfo(skb)->frags[0];
1922 for (i = 0; i < nr_frags; i++, frag++) {
1923 unsigned int size;
1924
1925 /* Point at the next BD, wrapping as needed */
1926 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1927
1928 size = skb_frag_size(frag);
1929
1930 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1931 BD_LFLAG(TXBD_READY);
1932
1933 /* Handle the last BD specially */
1934 if (i == nr_frags - 1)
1935 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1936
1937 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1938 size, DMA_TO_DEVICE);
1939 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1940 goto dma_map_err;
1941
1942 /* set the TxBD length and buffer pointer */
1943 txbdp->bufPtr = cpu_to_be32(bufaddr);
1944 txbdp->lstatus = cpu_to_be32(lstatus);
1945 }
1946
1947 lstatus = lstatus_start;
1948 }
1949
1950 /* If time stamping is requested one additional TxBD must be set up. The
1951 * first TxBD points to the FCB and must have a data length of
1952 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1953 * the full frame length.
1954 */
1955 if (unlikely(do_tstamp)) {
1956 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1957
1958 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1959 bufaddr += fcb_len;
1960
1961 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1962 (skb_headlen(skb) - fcb_len);
1963 if (!nr_frags)
1964 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1965
1966 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1967 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1968 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1969
1970 /* Setup tx hardware time stamping */
1971 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1972 fcb->ptp = 1;
1973 } else {
1974 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1975 }
1976
1977 netdev_tx_sent_queue(txq, bytes_sent);
1978
1979 gfar_wmb();
1980
1981 txbdp_start->lstatus = cpu_to_be32(lstatus);
1982
1983 gfar_wmb(); /* force lstatus write before tx_skbuff */
1984
1985 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1986
1987 /* Update the current skb pointer to the next entry we will use
1988 * (wrapping if necessary)
1989 */
1990 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1991 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1992
1993 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1994
1995 /* We can work in parallel with gfar_clean_tx_ring(), except
1996 * when modifying num_txbdfree. Note that we didn't grab the lock
1997 * when we were reading the num_txbdfree and checking for available
1998 * space, that's because outside of this function it can only grow.
1999 */
2000 spin_lock_bh(&tx_queue->txlock);
2001 /* reduce TxBD free count */
2002 tx_queue->num_txbdfree -= (nr_txbds);
2003 spin_unlock_bh(&tx_queue->txlock);
2004
2005 /* If the next BD still needs to be cleaned up, then the bds
2006 * are full. We need to tell the kernel to stop sending us stuff.
2007 */
2008 if (!tx_queue->num_txbdfree) {
2009 netif_tx_stop_queue(txq);
2010
2011 dev->stats.tx_fifo_errors++;
2012 }
2013
2014 /* Tell the DMA to go go go */
2015 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2016
2017 return NETDEV_TX_OK;
2018
2019dma_map_err:
2020 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2021 if (do_tstamp)
2022 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2023 for (i = 0; i < nr_frags; i++) {
2024 lstatus = be32_to_cpu(txbdp->lstatus);
2025 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2026 break;
2027
2028 lstatus &= ~BD_LFLAG(TXBD_READY);
2029 txbdp->lstatus = cpu_to_be32(lstatus);
2030 bufaddr = be32_to_cpu(txbdp->bufPtr);
2031 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2032 DMA_TO_DEVICE);
2033 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2034 }
2035 gfar_wmb();
2036 dev_kfree_skb_any(skb);
2037 return NETDEV_TX_OK;
2038}
2039
2040/* Changes the mac address if the controller is not running. */
2041static int gfar_set_mac_address(struct net_device *dev)
2042{
2043 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2044
2045 return 0;
2046}
2047
2048static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2049{
2050 struct gfar_private *priv = netdev_priv(dev);
2051
2052 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2053 cpu_relax();
2054
2055 if (dev->flags & IFF_UP)
2056 stop_gfar(dev);
2057
2058 dev->mtu = new_mtu;
2059
2060 if (dev->flags & IFF_UP)
2061 startup_gfar(dev);
2062
2063 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2064
2065 return 0;
2066}
2067
2068static void reset_gfar(struct net_device *ndev)
2069{
2070 struct gfar_private *priv = netdev_priv(ndev);
2071
2072 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2073 cpu_relax();
2074
2075 stop_gfar(ndev);
2076 startup_gfar(ndev);
2077
2078 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2079}
2080
2081/* gfar_reset_task gets scheduled when a packet has not been
2082 * transmitted after a set amount of time.
2083 * For now, assume that clearing out all the structures, and
2084 * starting over will fix the problem.
2085 */
2086static void gfar_reset_task(struct work_struct *work)
2087{
2088 struct gfar_private *priv = container_of(work, struct gfar_private,
2089 reset_task);
2090 reset_gfar(priv->ndev);
2091}
2092
2093static void gfar_timeout(struct net_device *dev)
2094{
2095 struct gfar_private *priv = netdev_priv(dev);
2096
2097 dev->stats.tx_errors++;
2098 schedule_work(&priv->reset_task);
2099}
2100
2101static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2102{
2103 struct hwtstamp_config config;
2104 struct gfar_private *priv = netdev_priv(netdev);
2105
2106 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2107 return -EFAULT;
2108
2109 /* reserved for future extensions */
2110 if (config.flags)
2111 return -EINVAL;
2112
2113 switch (config.tx_type) {
2114 case HWTSTAMP_TX_OFF:
2115 priv->hwts_tx_en = 0;
2116 break;
2117 case HWTSTAMP_TX_ON:
2118 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2119 return -ERANGE;
2120 priv->hwts_tx_en = 1;
2121 break;
2122 default:
2123 return -ERANGE;
2124 }
2125
2126 switch (config.rx_filter) {
2127 case HWTSTAMP_FILTER_NONE:
2128 if (priv->hwts_rx_en) {
2129 priv->hwts_rx_en = 0;
2130 reset_gfar(netdev);
2131 }
2132 break;
2133 default:
2134 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2135 return -ERANGE;
2136 if (!priv->hwts_rx_en) {
2137 priv->hwts_rx_en = 1;
2138 reset_gfar(netdev);
2139 }
2140 config.rx_filter = HWTSTAMP_FILTER_ALL;
2141 break;
2142 }
2143
2144 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2145 -EFAULT : 0;
2146}
2147
2148static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2149{
2150 struct hwtstamp_config config;
2151 struct gfar_private *priv = netdev_priv(netdev);
2152
2153 config.flags = 0;
2154 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2155 config.rx_filter = (priv->hwts_rx_en ?
2156 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2157
2158 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2159 -EFAULT : 0;
2160}
2161
2162static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2163{
2164 struct phy_device *phydev = dev->phydev;
2165
2166 if (!netif_running(dev))
2167 return -EINVAL;
2168
2169 if (cmd == SIOCSHWTSTAMP)
2170 return gfar_hwtstamp_set(dev, rq);
2171 if (cmd == SIOCGHWTSTAMP)
2172 return gfar_hwtstamp_get(dev, rq);
2173
2174 if (!phydev)
2175 return -ENODEV;
2176
2177 return phy_mii_ioctl(phydev, rq, cmd);
2178}
2179
2180/* Interrupt Handler for Transmit complete */
2181static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2182{
2183 struct net_device *dev = tx_queue->dev;
2184 struct netdev_queue *txq;
2185 struct gfar_private *priv = netdev_priv(dev);
2186 struct txbd8 *bdp, *next = NULL;
2187 struct txbd8 *lbdp = NULL;
2188 struct txbd8 *base = tx_queue->tx_bd_base;
2189 struct sk_buff *skb;
2190 int skb_dirtytx;
2191 int tx_ring_size = tx_queue->tx_ring_size;
2192 int frags = 0, nr_txbds = 0;
2193 int i;
2194 int howmany = 0;
2195 int tqi = tx_queue->qindex;
2196 unsigned int bytes_sent = 0;
2197 u32 lstatus;
2198 size_t buflen;
2199
2200 txq = netdev_get_tx_queue(dev, tqi);
2201 bdp = tx_queue->dirty_tx;
2202 skb_dirtytx = tx_queue->skb_dirtytx;
2203
2204 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002205 bool do_tstamp;
2206
2207 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2208 priv->hwts_tx_en;
David Brazdil0f672f62019-12-10 10:32:29 +00002209
2210 frags = skb_shinfo(skb)->nr_frags;
2211
2212 /* When time stamping, one additional TxBD must be freed.
2213 * Also, we need to dma_unmap_single() the TxPAL.
2214 */
Olivier Deprez0e641232021-09-23 10:07:05 +02002215 if (unlikely(do_tstamp))
David Brazdil0f672f62019-12-10 10:32:29 +00002216 nr_txbds = frags + 2;
2217 else
2218 nr_txbds = frags + 1;
2219
2220 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2221
2222 lstatus = be32_to_cpu(lbdp->lstatus);
2223
2224 /* Only clean completed frames */
2225 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2226 (lstatus & BD_LENGTH_MASK))
2227 break;
2228
Olivier Deprez0e641232021-09-23 10:07:05 +02002229 if (unlikely(do_tstamp)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002230 next = next_txbd(bdp, base, tx_ring_size);
2231 buflen = be16_to_cpu(next->length) +
2232 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2233 } else
2234 buflen = be16_to_cpu(bdp->length);
2235
2236 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2237 buflen, DMA_TO_DEVICE);
2238
Olivier Deprez0e641232021-09-23 10:07:05 +02002239 if (unlikely(do_tstamp)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002240 struct skb_shared_hwtstamps shhwtstamps;
2241 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2242 ~0x7UL);
2243
2244 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2245 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2246 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2247 skb_tstamp_tx(skb, &shhwtstamps);
2248 gfar_clear_txbd_status(bdp);
2249 bdp = next;
2250 }
2251
2252 gfar_clear_txbd_status(bdp);
2253 bdp = next_txbd(bdp, base, tx_ring_size);
2254
2255 for (i = 0; i < frags; i++) {
2256 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2257 be16_to_cpu(bdp->length),
2258 DMA_TO_DEVICE);
2259 gfar_clear_txbd_status(bdp);
2260 bdp = next_txbd(bdp, base, tx_ring_size);
2261 }
2262
2263 bytes_sent += GFAR_CB(skb)->bytes_sent;
2264
2265 dev_kfree_skb_any(skb);
2266
2267 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2268
2269 skb_dirtytx = (skb_dirtytx + 1) &
2270 TX_RING_MOD_MASK(tx_ring_size);
2271
2272 howmany++;
2273 spin_lock(&tx_queue->txlock);
2274 tx_queue->num_txbdfree += nr_txbds;
2275 spin_unlock(&tx_queue->txlock);
2276 }
2277
2278 /* If we freed a buffer, we can restart transmission, if necessary */
2279 if (tx_queue->num_txbdfree &&
2280 netif_tx_queue_stopped(txq) &&
2281 !(test_bit(GFAR_DOWN, &priv->state)))
2282 netif_wake_subqueue(priv->ndev, tqi);
2283
2284 /* Update dirty indicators */
2285 tx_queue->skb_dirtytx = skb_dirtytx;
2286 tx_queue->dirty_tx = bdp;
2287
2288 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2289}
2290
2291static void count_errors(u32 lstatus, struct net_device *ndev)
2292{
2293 struct gfar_private *priv = netdev_priv(ndev);
2294 struct net_device_stats *stats = &ndev->stats;
2295 struct gfar_extra_stats *estats = &priv->extra_stats;
2296
2297 /* If the packet was truncated, none of the other errors matter */
2298 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2299 stats->rx_length_errors++;
2300
2301 atomic64_inc(&estats->rx_trunc);
2302
2303 return;
2304 }
2305 /* Count the errors, if there were any */
2306 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2307 stats->rx_length_errors++;
2308
2309 if (lstatus & BD_LFLAG(RXBD_LARGE))
2310 atomic64_inc(&estats->rx_large);
2311 else
2312 atomic64_inc(&estats->rx_short);
2313 }
2314 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2315 stats->rx_frame_errors++;
2316 atomic64_inc(&estats->rx_nonoctet);
2317 }
2318 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2319 atomic64_inc(&estats->rx_crcerr);
2320 stats->rx_crc_errors++;
2321 }
2322 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2323 atomic64_inc(&estats->rx_overrun);
2324 stats->rx_over_errors++;
2325 }
2326}
2327
2328static irqreturn_t gfar_receive(int irq, void *grp_id)
2329{
2330 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2331 unsigned long flags;
2332 u32 imask, ievent;
2333
2334 ievent = gfar_read(&grp->regs->ievent);
2335
2336 if (unlikely(ievent & IEVENT_FGPI)) {
2337 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2338 return IRQ_HANDLED;
2339 }
2340
2341 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2342 spin_lock_irqsave(&grp->grplock, flags);
2343 imask = gfar_read(&grp->regs->imask);
2344 imask &= IMASK_RX_DISABLED;
2345 gfar_write(&grp->regs->imask, imask);
2346 spin_unlock_irqrestore(&grp->grplock, flags);
2347 __napi_schedule(&grp->napi_rx);
2348 } else {
2349 /* Clear IEVENT, so interrupts aren't called again
2350 * because of the packets that have already arrived.
2351 */
2352 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2353 }
2354
2355 return IRQ_HANDLED;
2356}
2357
2358/* Interrupt Handler for Transmit complete */
2359static irqreturn_t gfar_transmit(int irq, void *grp_id)
2360{
2361 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2362 unsigned long flags;
2363 u32 imask;
2364
2365 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2366 spin_lock_irqsave(&grp->grplock, flags);
2367 imask = gfar_read(&grp->regs->imask);
2368 imask &= IMASK_TX_DISABLED;
2369 gfar_write(&grp->regs->imask, imask);
2370 spin_unlock_irqrestore(&grp->grplock, flags);
2371 __napi_schedule(&grp->napi_tx);
2372 } else {
2373 /* Clear IEVENT, so interrupts aren't called again
2374 * because of the packets that have already arrived.
2375 */
2376 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2377 }
2378
2379 return IRQ_HANDLED;
2380}
2381
2382static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2383 struct sk_buff *skb, bool first)
2384{
2385 int size = lstatus & BD_LENGTH_MASK;
2386 struct page *page = rxb->page;
2387
2388 if (likely(first)) {
2389 skb_put(skb, size);
2390 } else {
2391 /* the last fragments' length contains the full frame length */
2392 if (lstatus & BD_LFLAG(RXBD_LAST))
2393 size -= skb->len;
2394
Olivier Deprez0e641232021-09-23 10:07:05 +02002395 WARN(size < 0, "gianfar: rx fragment size underflow");
2396 if (size < 0)
2397 return false;
2398
David Brazdil0f672f62019-12-10 10:32:29 +00002399 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2400 rxb->page_offset + RXBUF_ALIGNMENT,
2401 size, GFAR_RXB_TRUESIZE);
2402 }
2403
2404 /* try reuse page */
2405 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2406 return false;
2407
2408 /* change offset to the other half */
2409 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2410
2411 page_ref_inc(page);
2412
2413 return true;
2414}
2415
2416static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2417 struct gfar_rx_buff *old_rxb)
2418{
2419 struct gfar_rx_buff *new_rxb;
2420 u16 nta = rxq->next_to_alloc;
2421
2422 new_rxb = &rxq->rx_buff[nta];
2423
2424 /* find next buf that can reuse a page */
2425 nta++;
2426 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2427
2428 /* copy page reference */
2429 *new_rxb = *old_rxb;
2430
2431 /* sync for use by the device */
2432 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2433 old_rxb->page_offset,
2434 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2435}
2436
2437static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2438 u32 lstatus, struct sk_buff *skb)
2439{
2440 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2441 struct page *page = rxb->page;
2442 bool first = false;
2443
2444 if (likely(!skb)) {
2445 void *buff_addr = page_address(page) + rxb->page_offset;
2446
2447 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2448 if (unlikely(!skb)) {
2449 gfar_rx_alloc_err(rx_queue);
2450 return NULL;
2451 }
2452 skb_reserve(skb, RXBUF_ALIGNMENT);
2453 first = true;
2454 }
2455
2456 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2457 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2458
2459 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2460 /* reuse the free half of the page */
2461 gfar_reuse_rx_page(rx_queue, rxb);
2462 } else {
2463 /* page cannot be reused, unmap it */
2464 dma_unmap_page(rx_queue->dev, rxb->dma,
2465 PAGE_SIZE, DMA_FROM_DEVICE);
2466 }
2467
2468 /* clear rxb content */
2469 rxb->page = NULL;
2470
2471 return skb;
2472}
2473
2474static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2475{
2476 /* If valid headers were found, and valid sums
2477 * were verified, then we tell the kernel that no
2478 * checksumming is necessary. Otherwise, it is [FIXME]
2479 */
2480 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2481 (RXFCB_CIP | RXFCB_CTU))
2482 skb->ip_summed = CHECKSUM_UNNECESSARY;
2483 else
2484 skb_checksum_none_assert(skb);
2485}
2486
2487/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2488static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2489{
2490 struct gfar_private *priv = netdev_priv(ndev);
2491 struct rxfcb *fcb = NULL;
2492
2493 /* fcb is at the beginning if exists */
2494 fcb = (struct rxfcb *)skb->data;
2495
2496 /* Remove the FCB from the skb
2497 * Remove the padded bytes, if there are any
2498 */
2499 if (priv->uses_rxfcb)
2500 skb_pull(skb, GMAC_FCB_LEN);
2501
2502 /* Get receive timestamp from the skb */
2503 if (priv->hwts_rx_en) {
2504 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2505 u64 *ns = (u64 *) skb->data;
2506
2507 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2508 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2509 }
2510
2511 if (priv->padding)
2512 skb_pull(skb, priv->padding);
2513
2514 /* Trim off the FCS */
2515 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2516
2517 if (ndev->features & NETIF_F_RXCSUM)
2518 gfar_rx_checksum(skb, fcb);
2519
2520 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2521 * Even if vlan rx accel is disabled, on some chips
2522 * RXFCB_VLN is pseudo randomly set.
2523 */
2524 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2525 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2526 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2527 be16_to_cpu(fcb->vlctl));
2528}
2529
2530/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2531 * until the budget/quota has been reached. Returns the number
2532 * of frames handled
2533 */
2534static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2535 int rx_work_limit)
2536{
2537 struct net_device *ndev = rx_queue->ndev;
2538 struct gfar_private *priv = netdev_priv(ndev);
2539 struct rxbd8 *bdp;
2540 int i, howmany = 0;
2541 struct sk_buff *skb = rx_queue->skb;
2542 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2543 unsigned int total_bytes = 0, total_pkts = 0;
2544
2545 /* Get the first full descriptor */
2546 i = rx_queue->next_to_clean;
2547
2548 while (rx_work_limit--) {
2549 u32 lstatus;
2550
2551 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2552 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2553 cleaned_cnt = 0;
2554 }
2555
2556 bdp = &rx_queue->rx_bd_base[i];
2557 lstatus = be32_to_cpu(bdp->lstatus);
2558 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2559 break;
2560
Olivier Deprez0e641232021-09-23 10:07:05 +02002561 /* lost RXBD_LAST descriptor due to overrun */
2562 if (skb &&
2563 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2564 /* discard faulty buffer */
2565 dev_kfree_skb(skb);
2566 skb = NULL;
2567 rx_queue->stats.rx_dropped++;
2568
2569 /* can continue normally */
2570 }
2571
David Brazdil0f672f62019-12-10 10:32:29 +00002572 /* order rx buffer descriptor reads */
2573 rmb();
2574
2575 /* fetch next to clean buffer from the ring */
2576 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2577 if (unlikely(!skb))
2578 break;
2579
2580 cleaned_cnt++;
2581 howmany++;
2582
2583 if (unlikely(++i == rx_queue->rx_ring_size))
2584 i = 0;
2585
2586 rx_queue->next_to_clean = i;
2587
2588 /* fetch next buffer if not the last in frame */
2589 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2590 continue;
2591
2592 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2593 count_errors(lstatus, ndev);
2594
2595 /* discard faulty buffer */
2596 dev_kfree_skb(skb);
2597 skb = NULL;
2598 rx_queue->stats.rx_dropped++;
2599 continue;
2600 }
2601
2602 gfar_process_frame(ndev, skb);
2603
2604 /* Increment the number of packets */
2605 total_pkts++;
2606 total_bytes += skb->len;
2607
2608 skb_record_rx_queue(skb, rx_queue->qindex);
2609
2610 skb->protocol = eth_type_trans(skb, ndev);
2611
2612 /* Send the packet up the stack */
2613 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2614
2615 skb = NULL;
2616 }
2617
2618 /* Store incomplete frames for completion */
2619 rx_queue->skb = skb;
2620
2621 rx_queue->stats.rx_packets += total_pkts;
2622 rx_queue->stats.rx_bytes += total_bytes;
2623
2624 if (cleaned_cnt)
2625 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2626
2627 /* Update Last Free RxBD pointer for LFC */
2628 if (unlikely(priv->tx_actual_en)) {
2629 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2630
2631 gfar_write(rx_queue->rfbptr, bdp_dma);
2632 }
2633
2634 return howmany;
2635}
2636
2637static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2638{
2639 struct gfar_priv_grp *gfargrp =
2640 container_of(napi, struct gfar_priv_grp, napi_rx);
2641 struct gfar __iomem *regs = gfargrp->regs;
2642 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2643 int work_done = 0;
2644
2645 /* Clear IEVENT, so interrupts aren't called again
2646 * because of the packets that have already arrived
2647 */
2648 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2649
2650 work_done = gfar_clean_rx_ring(rx_queue, budget);
2651
2652 if (work_done < budget) {
2653 u32 imask;
2654 napi_complete_done(napi, work_done);
2655 /* Clear the halt bit in RSTAT */
2656 gfar_write(&regs->rstat, gfargrp->rstat);
2657
2658 spin_lock_irq(&gfargrp->grplock);
2659 imask = gfar_read(&regs->imask);
2660 imask |= IMASK_RX_DEFAULT;
2661 gfar_write(&regs->imask, imask);
2662 spin_unlock_irq(&gfargrp->grplock);
2663 }
2664
2665 return work_done;
2666}
2667
2668static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2669{
2670 struct gfar_priv_grp *gfargrp =
2671 container_of(napi, struct gfar_priv_grp, napi_tx);
2672 struct gfar __iomem *regs = gfargrp->regs;
2673 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2674 u32 imask;
2675
2676 /* Clear IEVENT, so interrupts aren't called again
2677 * because of the packets that have already arrived
2678 */
2679 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2680
2681 /* run Tx cleanup to completion */
2682 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2683 gfar_clean_tx_ring(tx_queue);
2684
2685 napi_complete(napi);
2686
2687 spin_lock_irq(&gfargrp->grplock);
2688 imask = gfar_read(&regs->imask);
2689 imask |= IMASK_TX_DEFAULT;
2690 gfar_write(&regs->imask, imask);
2691 spin_unlock_irq(&gfargrp->grplock);
2692
2693 return 0;
2694}
2695
2696static int gfar_poll_rx(struct napi_struct *napi, int budget)
2697{
2698 struct gfar_priv_grp *gfargrp =
2699 container_of(napi, struct gfar_priv_grp, napi_rx);
2700 struct gfar_private *priv = gfargrp->priv;
2701 struct gfar __iomem *regs = gfargrp->regs;
2702 struct gfar_priv_rx_q *rx_queue = NULL;
2703 int work_done = 0, work_done_per_q = 0;
2704 int i, budget_per_q = 0;
2705 unsigned long rstat_rxf;
2706 int num_act_queues;
2707
2708 /* Clear IEVENT, so interrupts aren't called again
2709 * because of the packets that have already arrived
2710 */
2711 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2712
2713 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2714
2715 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2716 if (num_act_queues)
2717 budget_per_q = budget/num_act_queues;
2718
2719 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2720 /* skip queue if not active */
2721 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2722 continue;
2723
2724 rx_queue = priv->rx_queue[i];
2725 work_done_per_q =
2726 gfar_clean_rx_ring(rx_queue, budget_per_q);
2727 work_done += work_done_per_q;
2728
2729 /* finished processing this queue */
2730 if (work_done_per_q < budget_per_q) {
2731 /* clear active queue hw indication */
2732 gfar_write(&regs->rstat,
2733 RSTAT_CLEAR_RXF0 >> i);
2734 num_act_queues--;
2735
2736 if (!num_act_queues)
2737 break;
2738 }
2739 }
2740
2741 if (!num_act_queues) {
2742 u32 imask;
2743 napi_complete_done(napi, work_done);
2744
2745 /* Clear the halt bit in RSTAT */
2746 gfar_write(&regs->rstat, gfargrp->rstat);
2747
2748 spin_lock_irq(&gfargrp->grplock);
2749 imask = gfar_read(&regs->imask);
2750 imask |= IMASK_RX_DEFAULT;
2751 gfar_write(&regs->imask, imask);
2752 spin_unlock_irq(&gfargrp->grplock);
2753 }
2754
2755 return work_done;
2756}
2757
2758static int gfar_poll_tx(struct napi_struct *napi, int budget)
2759{
2760 struct gfar_priv_grp *gfargrp =
2761 container_of(napi, struct gfar_priv_grp, napi_tx);
2762 struct gfar_private *priv = gfargrp->priv;
2763 struct gfar __iomem *regs = gfargrp->regs;
2764 struct gfar_priv_tx_q *tx_queue = NULL;
2765 int has_tx_work = 0;
2766 int i;
2767
2768 /* Clear IEVENT, so interrupts aren't called again
2769 * because of the packets that have already arrived
2770 */
2771 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2772
2773 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2774 tx_queue = priv->tx_queue[i];
2775 /* run Tx cleanup to completion */
2776 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2777 gfar_clean_tx_ring(tx_queue);
2778 has_tx_work = 1;
2779 }
2780 }
2781
2782 if (!has_tx_work) {
2783 u32 imask;
2784 napi_complete(napi);
2785
2786 spin_lock_irq(&gfargrp->grplock);
2787 imask = gfar_read(&regs->imask);
2788 imask |= IMASK_TX_DEFAULT;
2789 gfar_write(&regs->imask, imask);
2790 spin_unlock_irq(&gfargrp->grplock);
2791 }
2792
2793 return 0;
2794}
2795
2796/* GFAR error interrupt handler */
2797static irqreturn_t gfar_error(int irq, void *grp_id)
2798{
2799 struct gfar_priv_grp *gfargrp = grp_id;
2800 struct gfar __iomem *regs = gfargrp->regs;
2801 struct gfar_private *priv= gfargrp->priv;
2802 struct net_device *dev = priv->ndev;
2803
2804 /* Save ievent for future reference */
2805 u32 events = gfar_read(&regs->ievent);
2806
2807 /* Clear IEVENT */
2808 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2809
2810 /* Magic Packet is not an error. */
2811 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2812 (events & IEVENT_MAG))
2813 events &= ~IEVENT_MAG;
2814
2815 /* Hmm... */
2816 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2817 netdev_dbg(dev,
2818 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2819 events, gfar_read(&regs->imask));
2820
2821 /* Update the error counters */
2822 if (events & IEVENT_TXE) {
2823 dev->stats.tx_errors++;
2824
2825 if (events & IEVENT_LC)
2826 dev->stats.tx_window_errors++;
2827 if (events & IEVENT_CRL)
2828 dev->stats.tx_aborted_errors++;
2829 if (events & IEVENT_XFUN) {
2830 netif_dbg(priv, tx_err, dev,
2831 "TX FIFO underrun, packet dropped\n");
2832 dev->stats.tx_dropped++;
2833 atomic64_inc(&priv->extra_stats.tx_underrun);
2834
2835 schedule_work(&priv->reset_task);
2836 }
2837 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2838 }
2839 if (events & IEVENT_BSY) {
2840 dev->stats.rx_over_errors++;
2841 atomic64_inc(&priv->extra_stats.rx_bsy);
2842
2843 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2844 gfar_read(&regs->rstat));
2845 }
2846 if (events & IEVENT_BABR) {
2847 dev->stats.rx_errors++;
2848 atomic64_inc(&priv->extra_stats.rx_babr);
2849
2850 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2851 }
2852 if (events & IEVENT_EBERR) {
2853 atomic64_inc(&priv->extra_stats.eberr);
2854 netif_dbg(priv, rx_err, dev, "bus error\n");
2855 }
2856 if (events & IEVENT_RXC)
2857 netif_dbg(priv, rx_status, dev, "control frame\n");
2858
2859 if (events & IEVENT_BABT) {
2860 atomic64_inc(&priv->extra_stats.tx_babt);
2861 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2862 }
2863 return IRQ_HANDLED;
2864}
2865
2866/* The interrupt handler for devices with one interrupt */
2867static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2868{
2869 struct gfar_priv_grp *gfargrp = grp_id;
2870
2871 /* Save ievent for future reference */
2872 u32 events = gfar_read(&gfargrp->regs->ievent);
2873
2874 /* Check for reception */
2875 if (events & IEVENT_RX_MASK)
2876 gfar_receive(irq, grp_id);
2877
2878 /* Check for transmit completion */
2879 if (events & IEVENT_TX_MASK)
2880 gfar_transmit(irq, grp_id);
2881
2882 /* Check for errors */
2883 if (events & IEVENT_ERR_MASK)
2884 gfar_error(irq, grp_id);
2885
2886 return IRQ_HANDLED;
2887}
2888
2889#ifdef CONFIG_NET_POLL_CONTROLLER
2890/* Polling 'interrupt' - used by things like netconsole to send skbs
2891 * without having to re-enable interrupts. It's not called while
2892 * the interrupt routine is executing.
2893 */
2894static void gfar_netpoll(struct net_device *dev)
2895{
2896 struct gfar_private *priv = netdev_priv(dev);
2897 int i;
2898
2899 /* If the device has multiple interrupts, run tx/rx */
2900 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2901 for (i = 0; i < priv->num_grps; i++) {
2902 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2903
2904 disable_irq(gfar_irq(grp, TX)->irq);
2905 disable_irq(gfar_irq(grp, RX)->irq);
2906 disable_irq(gfar_irq(grp, ER)->irq);
2907 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2908 enable_irq(gfar_irq(grp, ER)->irq);
2909 enable_irq(gfar_irq(grp, RX)->irq);
2910 enable_irq(gfar_irq(grp, TX)->irq);
2911 }
2912 } else {
2913 for (i = 0; i < priv->num_grps; i++) {
2914 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2915
2916 disable_irq(gfar_irq(grp, TX)->irq);
2917 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2918 enable_irq(gfar_irq(grp, TX)->irq);
2919 }
2920 }
2921}
2922#endif
2923
2924static void free_grp_irqs(struct gfar_priv_grp *grp)
2925{
2926 free_irq(gfar_irq(grp, TX)->irq, grp);
2927 free_irq(gfar_irq(grp, RX)->irq, grp);
2928 free_irq(gfar_irq(grp, ER)->irq, grp);
2929}
2930
2931static int register_grp_irqs(struct gfar_priv_grp *grp)
2932{
2933 struct gfar_private *priv = grp->priv;
2934 struct net_device *dev = priv->ndev;
2935 int err;
2936
2937 /* If the device has multiple interrupts, register for
2938 * them. Otherwise, only register for the one
2939 */
2940 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2941 /* Install our interrupt handlers for Error,
2942 * Transmit, and Receive
2943 */
2944 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2945 gfar_irq(grp, ER)->name, grp);
2946 if (err < 0) {
2947 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2948 gfar_irq(grp, ER)->irq);
2949
2950 goto err_irq_fail;
2951 }
2952 enable_irq_wake(gfar_irq(grp, ER)->irq);
2953
2954 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2955 gfar_irq(grp, TX)->name, grp);
2956 if (err < 0) {
2957 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2958 gfar_irq(grp, TX)->irq);
2959 goto tx_irq_fail;
2960 }
2961 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2962 gfar_irq(grp, RX)->name, grp);
2963 if (err < 0) {
2964 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2965 gfar_irq(grp, RX)->irq);
2966 goto rx_irq_fail;
2967 }
2968 enable_irq_wake(gfar_irq(grp, RX)->irq);
2969
2970 } else {
2971 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2972 gfar_irq(grp, TX)->name, grp);
2973 if (err < 0) {
2974 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2975 gfar_irq(grp, TX)->irq);
2976 goto err_irq_fail;
2977 }
2978 enable_irq_wake(gfar_irq(grp, TX)->irq);
2979 }
2980
2981 return 0;
2982
2983rx_irq_fail:
2984 free_irq(gfar_irq(grp, TX)->irq, grp);
2985tx_irq_fail:
2986 free_irq(gfar_irq(grp, ER)->irq, grp);
2987err_irq_fail:
2988 return err;
2989
2990}
2991
2992static void gfar_free_irq(struct gfar_private *priv)
2993{
2994 int i;
2995
2996 /* Free the IRQs */
2997 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2998 for (i = 0; i < priv->num_grps; i++)
2999 free_grp_irqs(&priv->gfargrp[i]);
3000 } else {
3001 for (i = 0; i < priv->num_grps; i++)
3002 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
3003 &priv->gfargrp[i]);
3004 }
3005}
3006
3007static int gfar_request_irq(struct gfar_private *priv)
3008{
3009 int err, i, j;
3010
3011 for (i = 0; i < priv->num_grps; i++) {
3012 err = register_grp_irqs(&priv->gfargrp[i]);
3013 if (err) {
3014 for (j = 0; j < i; j++)
3015 free_grp_irqs(&priv->gfargrp[j]);
3016 return err;
3017 }
3018 }
3019
3020 return 0;
3021}
3022
3023/* Called when something needs to use the ethernet device
3024 * Returns 0 for success.
3025 */
3026static int gfar_enet_open(struct net_device *dev)
3027{
3028 struct gfar_private *priv = netdev_priv(dev);
3029 int err;
3030
3031 err = init_phy(dev);
3032 if (err)
3033 return err;
3034
3035 err = gfar_request_irq(priv);
3036 if (err)
3037 return err;
3038
3039 err = startup_gfar(dev);
3040 if (err)
3041 return err;
3042
3043 return err;
3044}
3045
3046/* Stops the kernel queue, and halts the controller */
3047static int gfar_close(struct net_device *dev)
3048{
3049 struct gfar_private *priv = netdev_priv(dev);
3050
3051 cancel_work_sync(&priv->reset_task);
3052 stop_gfar(dev);
3053
3054 /* Disconnect from the PHY */
3055 phy_disconnect(dev->phydev);
3056
3057 gfar_free_irq(priv);
3058
3059 return 0;
3060}
3061
3062/* Clears each of the exact match registers to zero, so they
3063 * don't interfere with normal reception
3064 */
3065static void gfar_clear_exact_match(struct net_device *dev)
3066{
3067 int idx;
3068 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3069
3070 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3071 gfar_set_mac_for_addr(dev, idx, zero_arr);
3072}
3073
3074/* Update the hash table based on the current list of multicast
3075 * addresses we subscribe to. Also, change the promiscuity of
3076 * the device based on the flags (this function is called
3077 * whenever dev->flags is changed
3078 */
3079static void gfar_set_multi(struct net_device *dev)
3080{
3081 struct netdev_hw_addr *ha;
3082 struct gfar_private *priv = netdev_priv(dev);
3083 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3084 u32 tempval;
3085
3086 if (dev->flags & IFF_PROMISC) {
3087 /* Set RCTRL to PROM */
3088 tempval = gfar_read(&regs->rctrl);
3089 tempval |= RCTRL_PROM;
3090 gfar_write(&regs->rctrl, tempval);
3091 } else {
3092 /* Set RCTRL to not PROM */
3093 tempval = gfar_read(&regs->rctrl);
3094 tempval &= ~(RCTRL_PROM);
3095 gfar_write(&regs->rctrl, tempval);
3096 }
3097
3098 if (dev->flags & IFF_ALLMULTI) {
3099 /* Set the hash to rx all multicast frames */
3100 gfar_write(&regs->igaddr0, 0xffffffff);
3101 gfar_write(&regs->igaddr1, 0xffffffff);
3102 gfar_write(&regs->igaddr2, 0xffffffff);
3103 gfar_write(&regs->igaddr3, 0xffffffff);
3104 gfar_write(&regs->igaddr4, 0xffffffff);
3105 gfar_write(&regs->igaddr5, 0xffffffff);
3106 gfar_write(&regs->igaddr6, 0xffffffff);
3107 gfar_write(&regs->igaddr7, 0xffffffff);
3108 gfar_write(&regs->gaddr0, 0xffffffff);
3109 gfar_write(&regs->gaddr1, 0xffffffff);
3110 gfar_write(&regs->gaddr2, 0xffffffff);
3111 gfar_write(&regs->gaddr3, 0xffffffff);
3112 gfar_write(&regs->gaddr4, 0xffffffff);
3113 gfar_write(&regs->gaddr5, 0xffffffff);
3114 gfar_write(&regs->gaddr6, 0xffffffff);
3115 gfar_write(&regs->gaddr7, 0xffffffff);
3116 } else {
3117 int em_num;
3118 int idx;
3119
3120 /* zero out the hash */
3121 gfar_write(&regs->igaddr0, 0x0);
3122 gfar_write(&regs->igaddr1, 0x0);
3123 gfar_write(&regs->igaddr2, 0x0);
3124 gfar_write(&regs->igaddr3, 0x0);
3125 gfar_write(&regs->igaddr4, 0x0);
3126 gfar_write(&regs->igaddr5, 0x0);
3127 gfar_write(&regs->igaddr6, 0x0);
3128 gfar_write(&regs->igaddr7, 0x0);
3129 gfar_write(&regs->gaddr0, 0x0);
3130 gfar_write(&regs->gaddr1, 0x0);
3131 gfar_write(&regs->gaddr2, 0x0);
3132 gfar_write(&regs->gaddr3, 0x0);
3133 gfar_write(&regs->gaddr4, 0x0);
3134 gfar_write(&regs->gaddr5, 0x0);
3135 gfar_write(&regs->gaddr6, 0x0);
3136 gfar_write(&regs->gaddr7, 0x0);
3137
3138 /* If we have extended hash tables, we need to
3139 * clear the exact match registers to prepare for
3140 * setting them
3141 */
3142 if (priv->extended_hash) {
3143 em_num = GFAR_EM_NUM + 1;
3144 gfar_clear_exact_match(dev);
3145 idx = 1;
3146 } else {
3147 idx = 0;
3148 em_num = 0;
3149 }
3150
3151 if (netdev_mc_empty(dev))
3152 return;
3153
3154 /* Parse the list, and set the appropriate bits */
3155 netdev_for_each_mc_addr(ha, dev) {
3156 if (idx < em_num) {
3157 gfar_set_mac_for_addr(dev, idx, ha->addr);
3158 idx++;
3159 } else
3160 gfar_set_hash_for_addr(dev, ha->addr);
3161 }
3162 }
3163}
3164
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003165void gfar_mac_reset(struct gfar_private *priv)
3166{
3167 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3168 u32 tempval;
3169
3170 /* Reset MAC layer */
3171 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
3172
3173 /* We need to delay at least 3 TX clocks */
3174 udelay(3);
3175
3176 /* the soft reset bit is not self-resetting, so we need to
3177 * clear it before resuming normal operation
3178 */
3179 gfar_write(&regs->maccfg1, 0);
3180
3181 udelay(3);
3182
3183 gfar_rx_offload_en(priv);
3184
3185 /* Initialize the max receive frame/buffer lengths */
3186 gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3187 gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
3188
3189 /* Initialize the Minimum Frame Length Register */
3190 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
3191
3192 /* Initialize MACCFG2. */
3193 tempval = MACCFG2_INIT_SETTINGS;
3194
3195 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3196 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3197 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3198 */
3199 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3200 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3201
3202 gfar_write(&regs->maccfg2, tempval);
3203
3204 /* Clear mac addr hash registers */
3205 gfar_write(&regs->igaddr0, 0);
3206 gfar_write(&regs->igaddr1, 0);
3207 gfar_write(&regs->igaddr2, 0);
3208 gfar_write(&regs->igaddr3, 0);
3209 gfar_write(&regs->igaddr4, 0);
3210 gfar_write(&regs->igaddr5, 0);
3211 gfar_write(&regs->igaddr6, 0);
3212 gfar_write(&regs->igaddr7, 0);
3213
3214 gfar_write(&regs->gaddr0, 0);
3215 gfar_write(&regs->gaddr1, 0);
3216 gfar_write(&regs->gaddr2, 0);
3217 gfar_write(&regs->gaddr3, 0);
3218 gfar_write(&regs->gaddr4, 0);
3219 gfar_write(&regs->gaddr5, 0);
3220 gfar_write(&regs->gaddr6, 0);
3221 gfar_write(&regs->gaddr7, 0);
3222
3223 if (priv->extended_hash)
3224 gfar_clear_exact_match(priv->ndev);
3225
3226 gfar_mac_rx_config(priv);
3227
3228 gfar_mac_tx_config(priv);
3229
3230 gfar_set_mac_address(priv->ndev);
3231
3232 gfar_set_multi(priv->ndev);
3233
3234 /* clear ievent and imask before configuring coalescing */
3235 gfar_ints_disable(priv);
3236
3237 /* Configure the coalescing support */
3238 gfar_configure_coalescing_all(priv);
3239}
3240
3241static void gfar_hw_init(struct gfar_private *priv)
3242{
3243 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3244 u32 attrs;
3245
3246 /* Stop the DMA engine now, in case it was running before
3247 * (The firmware could have used it, and left it running).
3248 */
3249 gfar_halt(priv);
3250
3251 gfar_mac_reset(priv);
3252
3253 /* Zero out the rmon mib registers if it has them */
3254 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3255 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3256
3257 /* Mask off the CAM interrupts */
3258 gfar_write(&regs->rmon.cam1, 0xffffffff);
3259 gfar_write(&regs->rmon.cam2, 0xffffffff);
3260 }
3261
3262 /* Initialize ECNTRL */
3263 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
3264
3265 /* Set the extraction length and index */
3266 attrs = ATTRELI_EL(priv->rx_stash_size) |
3267 ATTRELI_EI(priv->rx_stash_index);
3268
3269 gfar_write(&regs->attreli, attrs);
3270
3271 /* Start with defaults, and add stashing
3272 * depending on driver parameters
3273 */
3274 attrs = ATTR_INIT_SETTINGS;
3275
3276 if (priv->bd_stash_en)
3277 attrs |= ATTR_BDSTASH;
3278
3279 if (priv->rx_stash_size != 0)
3280 attrs |= ATTR_BUFSTASH;
3281
3282 gfar_write(&regs->attr, attrs);
3283
3284 /* FIFO configs */
3285 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3286 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3287 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3288
3289 /* Program the interrupt steering regs, only for MG devices */
3290 if (priv->num_grps > 1)
3291 gfar_write_isrg(priv);
3292}
3293
David Brazdil0f672f62019-12-10 10:32:29 +00003294static const struct net_device_ops gfar_netdev_ops = {
3295 .ndo_open = gfar_enet_open,
3296 .ndo_start_xmit = gfar_start_xmit,
3297 .ndo_stop = gfar_close,
3298 .ndo_change_mtu = gfar_change_mtu,
3299 .ndo_set_features = gfar_set_features,
3300 .ndo_set_rx_mode = gfar_set_multi,
3301 .ndo_tx_timeout = gfar_timeout,
3302 .ndo_do_ioctl = gfar_ioctl,
3303 .ndo_get_stats = gfar_get_stats,
3304 .ndo_change_carrier = fixed_phy_change_carrier,
3305 .ndo_set_mac_address = gfar_set_mac_addr,
3306 .ndo_validate_addr = eth_validate_addr,
3307#ifdef CONFIG_NET_POLL_CONTROLLER
3308 .ndo_poll_controller = gfar_netpoll,
3309#endif
3310};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003311
3312/* Set up the ethernet device structure, private data,
3313 * and anything else we need before we start
3314 */
3315static int gfar_probe(struct platform_device *ofdev)
3316{
3317 struct device_node *np = ofdev->dev.of_node;
3318 struct net_device *dev = NULL;
3319 struct gfar_private *priv = NULL;
3320 int err = 0, i;
3321
3322 err = gfar_of_init(ofdev, &dev);
3323
3324 if (err)
3325 return err;
3326
3327 priv = netdev_priv(dev);
3328 priv->ndev = dev;
3329 priv->ofdev = ofdev;
3330 priv->dev = &ofdev->dev;
3331 SET_NETDEV_DEV(dev, &ofdev->dev);
3332
3333 INIT_WORK(&priv->reset_task, gfar_reset_task);
3334
3335 platform_set_drvdata(ofdev, priv);
3336
3337 gfar_detect_errata(priv);
3338
3339 /* Set the dev->base_addr to the gfar reg region */
3340 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3341
3342 /* Fill in the dev structure */
3343 dev->watchdog_timeo = TX_TIMEOUT;
3344 /* MTU range: 50 - 9586 */
3345 dev->mtu = 1500;
3346 dev->min_mtu = 50;
3347 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3348 dev->netdev_ops = &gfar_netdev_ops;
3349 dev->ethtool_ops = &gfar_ethtool_ops;
3350
3351 /* Register for napi ...We are registering NAPI for each grp */
3352 for (i = 0; i < priv->num_grps; i++) {
3353 if (priv->poll_mode == GFAR_SQ_POLLING) {
3354 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3355 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3356 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3357 gfar_poll_tx_sq, 2);
3358 } else {
3359 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3360 gfar_poll_rx, GFAR_DEV_WEIGHT);
3361 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3362 gfar_poll_tx, 2);
3363 }
3364 }
3365
3366 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3367 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3368 NETIF_F_RXCSUM;
3369 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3370 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3371 }
3372
3373 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3374 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3375 NETIF_F_HW_VLAN_CTAG_RX;
3376 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3377 }
3378
3379 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3380
3381 gfar_init_addr_hash_table(priv);
3382
3383 /* Insert receive time stamps into padding alignment bytes, and
3384 * plus 2 bytes padding to ensure the cpu alignment.
3385 */
3386 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3387 priv->padding = 8 + DEFAULT_PADDING;
3388
3389 if (dev->features & NETIF_F_IP_CSUM ||
3390 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
Olivier Deprez0e641232021-09-23 10:07:05 +02003391 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003392
3393 /* Initializing some of the rx/tx queue level parameters */
3394 for (i = 0; i < priv->num_tx_queues; i++) {
3395 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3396 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3397 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3398 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3399 }
3400
3401 for (i = 0; i < priv->num_rx_queues; i++) {
3402 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3403 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3404 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3405 }
3406
3407 /* Always enable rx filer if available */
3408 priv->rx_filer_enable =
3409 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3410 /* Enable most messages by default */
3411 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3412 /* use pritority h/w tx queue scheduling for single queue devices */
3413 if (priv->num_tx_queues == 1)
3414 priv->prio_sched_en = 1;
3415
3416 set_bit(GFAR_DOWN, &priv->state);
3417
3418 gfar_hw_init(priv);
3419
3420 /* Carrier starts down, phylib will bring it up */
3421 netif_carrier_off(dev);
3422
3423 err = register_netdev(dev);
3424
3425 if (err) {
3426 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3427 goto register_fail;
3428 }
3429
3430 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3431 priv->wol_supported |= GFAR_WOL_MAGIC;
3432
3433 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3434 priv->rx_filer_enable)
3435 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3436
3437 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3438
3439 /* fill out IRQ number and name fields */
3440 for (i = 0; i < priv->num_grps; i++) {
3441 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3442 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3443 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3444 dev->name, "_g", '0' + i, "_tx");
3445 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3446 dev->name, "_g", '0' + i, "_rx");
3447 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3448 dev->name, "_g", '0' + i, "_er");
3449 } else
3450 strcpy(gfar_irq(grp, TX)->name, dev->name);
3451 }
3452
3453 /* Initialize the filer table */
3454 gfar_init_filer_table(priv);
3455
3456 /* Print out the device info */
3457 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3458
3459 /* Even more device info helps when determining which kernel
3460 * provided which set of benchmarks.
3461 */
3462 netdev_info(dev, "Running with NAPI enabled\n");
3463 for (i = 0; i < priv->num_rx_queues; i++)
3464 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3465 i, priv->rx_queue[i]->rx_ring_size);
3466 for (i = 0; i < priv->num_tx_queues; i++)
3467 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3468 i, priv->tx_queue[i]->tx_ring_size);
3469
3470 return 0;
3471
3472register_fail:
3473 if (of_phy_is_fixed_link(np))
3474 of_phy_deregister_fixed_link(np);
3475 unmap_group_regs(priv);
3476 gfar_free_rx_queues(priv);
3477 gfar_free_tx_queues(priv);
3478 of_node_put(priv->phy_node);
3479 of_node_put(priv->tbi_node);
3480 free_gfar_dev(priv);
3481 return err;
3482}
3483
3484static int gfar_remove(struct platform_device *ofdev)
3485{
3486 struct gfar_private *priv = platform_get_drvdata(ofdev);
3487 struct device_node *np = ofdev->dev.of_node;
3488
3489 of_node_put(priv->phy_node);
3490 of_node_put(priv->tbi_node);
3491
3492 unregister_netdev(priv->ndev);
3493
3494 if (of_phy_is_fixed_link(np))
3495 of_phy_deregister_fixed_link(np);
3496
3497 unmap_group_regs(priv);
3498 gfar_free_rx_queues(priv);
3499 gfar_free_tx_queues(priv);
3500 free_gfar_dev(priv);
3501
3502 return 0;
3503}
3504
3505#ifdef CONFIG_PM
3506
3507static void __gfar_filer_disable(struct gfar_private *priv)
3508{
3509 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3510 u32 temp;
3511
3512 temp = gfar_read(&regs->rctrl);
3513 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3514 gfar_write(&regs->rctrl, temp);
3515}
3516
3517static void __gfar_filer_enable(struct gfar_private *priv)
3518{
3519 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3520 u32 temp;
3521
3522 temp = gfar_read(&regs->rctrl);
3523 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3524 gfar_write(&regs->rctrl, temp);
3525}
3526
3527/* Filer rules implementing wol capabilities */
3528static void gfar_filer_config_wol(struct gfar_private *priv)
3529{
3530 unsigned int i;
3531 u32 rqfcr;
3532
3533 __gfar_filer_disable(priv);
3534
3535 /* clear the filer table, reject any packet by default */
3536 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3537 for (i = 0; i <= MAX_FILER_IDX; i++)
3538 gfar_write_filer(priv, i, rqfcr, 0);
3539
3540 i = 0;
3541 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3542 /* unicast packet, accept it */
3543 struct net_device *ndev = priv->ndev;
3544 /* get the default rx queue index */
3545 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3546 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3547 (ndev->dev_addr[1] << 8) |
3548 ndev->dev_addr[2];
3549
3550 rqfcr = (qindex << 10) | RQFCR_AND |
3551 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3552
3553 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3554
3555 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3556 (ndev->dev_addr[4] << 8) |
3557 ndev->dev_addr[5];
3558 rqfcr = (qindex << 10) | RQFCR_GPI |
3559 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3560 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3561 }
3562
3563 __gfar_filer_enable(priv);
3564}
3565
3566static void gfar_filer_restore_table(struct gfar_private *priv)
3567{
3568 u32 rqfcr, rqfpr;
3569 unsigned int i;
3570
3571 __gfar_filer_disable(priv);
3572
3573 for (i = 0; i <= MAX_FILER_IDX; i++) {
3574 rqfcr = priv->ftp_rqfcr[i];
3575 rqfpr = priv->ftp_rqfpr[i];
3576 gfar_write_filer(priv, i, rqfcr, rqfpr);
3577 }
3578
3579 __gfar_filer_enable(priv);
3580}
3581
3582/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
3583static void gfar_start_wol_filer(struct gfar_private *priv)
3584{
3585 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3586 u32 tempval;
3587 int i = 0;
3588
3589 /* Enable Rx hw queues */
3590 gfar_write(&regs->rqueue, priv->rqueue);
3591
3592 /* Initialize DMACTRL to have WWR and WOP */
3593 tempval = gfar_read(&regs->dmactrl);
3594 tempval |= DMACTRL_INIT_SETTINGS;
3595 gfar_write(&regs->dmactrl, tempval);
3596
3597 /* Make sure we aren't stopped */
3598 tempval = gfar_read(&regs->dmactrl);
3599 tempval &= ~DMACTRL_GRS;
3600 gfar_write(&regs->dmactrl, tempval);
3601
3602 for (i = 0; i < priv->num_grps; i++) {
3603 regs = priv->gfargrp[i].regs;
3604 /* Clear RHLT, so that the DMA starts polling now */
3605 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
3606 /* enable the Filer General Purpose Interrupt */
3607 gfar_write(&regs->imask, IMASK_FGPI);
3608 }
3609
3610 /* Enable Rx DMA */
3611 tempval = gfar_read(&regs->maccfg1);
3612 tempval |= MACCFG1_RX_EN;
3613 gfar_write(&regs->maccfg1, tempval);
3614}
3615
3616static int gfar_suspend(struct device *dev)
3617{
3618 struct gfar_private *priv = dev_get_drvdata(dev);
3619 struct net_device *ndev = priv->ndev;
3620 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3621 u32 tempval;
3622 u16 wol = priv->wol_opts;
3623
3624 if (!netif_running(ndev))
3625 return 0;
3626
3627 disable_napi(priv);
3628 netif_tx_lock(ndev);
3629 netif_device_detach(ndev);
3630 netif_tx_unlock(ndev);
3631
3632 gfar_halt(priv);
3633
3634 if (wol & GFAR_WOL_MAGIC) {
3635 /* Enable interrupt on Magic Packet */
3636 gfar_write(&regs->imask, IMASK_MAG);
3637
3638 /* Enable Magic Packet mode */
3639 tempval = gfar_read(&regs->maccfg2);
3640 tempval |= MACCFG2_MPEN;
3641 gfar_write(&regs->maccfg2, tempval);
3642
3643 /* re-enable the Rx block */
3644 tempval = gfar_read(&regs->maccfg1);
3645 tempval |= MACCFG1_RX_EN;
3646 gfar_write(&regs->maccfg1, tempval);
3647
3648 } else if (wol & GFAR_WOL_FILER_UCAST) {
3649 gfar_filer_config_wol(priv);
3650 gfar_start_wol_filer(priv);
3651
3652 } else {
3653 phy_stop(ndev->phydev);
3654 }
3655
3656 return 0;
3657}
3658
3659static int gfar_resume(struct device *dev)
3660{
3661 struct gfar_private *priv = dev_get_drvdata(dev);
3662 struct net_device *ndev = priv->ndev;
3663 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3664 u32 tempval;
3665 u16 wol = priv->wol_opts;
3666
3667 if (!netif_running(ndev))
3668 return 0;
3669
3670 if (wol & GFAR_WOL_MAGIC) {
3671 /* Disable Magic Packet mode */
3672 tempval = gfar_read(&regs->maccfg2);
3673 tempval &= ~MACCFG2_MPEN;
3674 gfar_write(&regs->maccfg2, tempval);
3675
3676 } else if (wol & GFAR_WOL_FILER_UCAST) {
3677 /* need to stop rx only, tx is already down */
3678 gfar_halt(priv);
3679 gfar_filer_restore_table(priv);
3680
3681 } else {
3682 phy_start(ndev->phydev);
3683 }
3684
3685 gfar_start(priv);
3686
3687 netif_device_attach(ndev);
3688 enable_napi(priv);
3689
3690 return 0;
3691}
3692
3693static int gfar_restore(struct device *dev)
3694{
3695 struct gfar_private *priv = dev_get_drvdata(dev);
3696 struct net_device *ndev = priv->ndev;
3697
3698 if (!netif_running(ndev)) {
3699 netif_device_attach(ndev);
3700
3701 return 0;
3702 }
3703
3704 gfar_init_bds(ndev);
3705
3706 gfar_mac_reset(priv);
3707
3708 gfar_init_tx_rx_base(priv);
3709
3710 gfar_start(priv);
3711
3712 priv->oldlink = 0;
3713 priv->oldspeed = 0;
3714 priv->oldduplex = -1;
3715
3716 if (ndev->phydev)
3717 phy_start(ndev->phydev);
3718
3719 netif_device_attach(ndev);
3720 enable_napi(priv);
3721
3722 return 0;
3723}
3724
3725static const struct dev_pm_ops gfar_pm_ops = {
3726 .suspend = gfar_suspend,
3727 .resume = gfar_resume,
3728 .freeze = gfar_suspend,
3729 .thaw = gfar_resume,
3730 .restore = gfar_restore,
3731};
3732
3733#define GFAR_PM_OPS (&gfar_pm_ops)
3734
3735#else
3736
3737#define GFAR_PM_OPS NULL
3738
3739#endif
3740
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003741static const struct of_device_id gfar_match[] =
3742{
3743 {
3744 .type = "network",
3745 .compatible = "gianfar",
3746 },
3747 {
3748 .compatible = "fsl,etsec2",
3749 },
3750 {},
3751};
3752MODULE_DEVICE_TABLE(of, gfar_match);
3753
3754/* Structure for a device driver */
3755static struct platform_driver gfar_driver = {
3756 .driver = {
3757 .name = "fsl-gianfar",
3758 .pm = GFAR_PM_OPS,
3759 .of_match_table = gfar_match,
3760 },
3761 .probe = gfar_probe,
3762 .remove = gfar_remove,
3763};
3764
3765module_platform_driver(gfar_driver);