blob: 92d9d3407b79bd2f834599a8c1875438e32c8342 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0+
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2015 Microchip Technology
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5#include <linux/version.h>
6#include <linux/module.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
10#include <linux/usb.h>
11#include <linux/crc32.h>
12#include <linux/signal.h>
13#include <linux/slab.h>
14#include <linux/if_vlan.h>
15#include <linux/uaccess.h>
David Brazdil0f672f62019-12-10 10:32:29 +000016#include <linux/linkmode.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include <linux/list.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/mdio.h>
21#include <linux/phy.h>
22#include <net/ip6_checksum.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020023#include <net/vxlan.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#include <linux/interrupt.h>
25#include <linux/irqdomain.h>
26#include <linux/irq.h>
27#include <linux/irqchip/chained_irq.h>
28#include <linux/microchipphy.h>
29#include <linux/phy_fixed.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include "lan78xx.h"
33
34#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36#define DRIVER_NAME "lan78xx"
37
38#define TX_TIMEOUT_JIFFIES (5 * HZ)
39#define THROTTLE_JIFFIES (HZ / 8)
40#define UNLINK_TIMEOUT_MS 3
41
42#define RX_MAX_QUEUE_MEMORY (60 * 1518)
43
44#define SS_USB_PKT_SIZE (1024)
45#define HS_USB_PKT_SIZE (512)
46#define FS_USB_PKT_SIZE (64)
47
48#define MAX_RX_FIFO_SIZE (12 * 1024)
49#define MAX_TX_FIFO_SIZE (12 * 1024)
50#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51#define DEFAULT_BULK_IN_DELAY (0x0800)
52#define MAX_SINGLE_PACKET_SIZE (9000)
53#define DEFAULT_TX_CSUM_ENABLE (true)
54#define DEFAULT_RX_CSUM_ENABLE (true)
55#define DEFAULT_TSO_CSUM_ENABLE (true)
56#define DEFAULT_VLAN_FILTER_ENABLE (true)
57#define DEFAULT_VLAN_RX_OFFLOAD (true)
58#define TX_OVERHEAD (8)
59#define RXW_PADDING 2
60
61#define LAN78XX_USB_VENDOR_ID (0x0424)
62#define LAN7800_USB_PRODUCT_ID (0x7800)
63#define LAN7850_USB_PRODUCT_ID (0x7850)
64#define LAN7801_USB_PRODUCT_ID (0x7801)
65#define LAN78XX_EEPROM_MAGIC (0x78A5)
66#define LAN78XX_OTP_MAGIC (0x78F3)
67
68#define MII_READ 1
69#define MII_WRITE 0
70
71#define EEPROM_INDICATOR (0xA5)
72#define EEPROM_MAC_OFFSET (0x01)
73#define MAX_EEPROM_SIZE 512
74#define OTP_INDICATOR_1 (0xF3)
75#define OTP_INDICATOR_2 (0xF7)
76
77#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
78 WAKE_MCAST | WAKE_BCAST | \
79 WAKE_ARP | WAKE_MAGIC)
80
81/* USB related defines */
82#define BULK_IN_PIPE 1
83#define BULK_OUT_PIPE 2
84
85/* default autosuspend delay (mSec)*/
86#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
87
88/* statistic update interval (mSec) */
89#define STAT_UPDATE_TIMER (1 * 1000)
90
91/* defines interrupts from interrupt EP */
92#define MAX_INT_EP (32)
93#define INT_EP_INTEP (31)
94#define INT_EP_OTP_WR_DONE (28)
95#define INT_EP_EEE_TX_LPI_START (26)
96#define INT_EP_EEE_TX_LPI_STOP (25)
97#define INT_EP_EEE_RX_LPI (24)
98#define INT_EP_MAC_RESET_TIMEOUT (23)
99#define INT_EP_RDFO (22)
100#define INT_EP_TXE (21)
101#define INT_EP_USB_STATUS (20)
102#define INT_EP_TX_DIS (19)
103#define INT_EP_RX_DIS (18)
104#define INT_EP_PHY (17)
105#define INT_EP_DP (16)
106#define INT_EP_MAC_ERR (15)
107#define INT_EP_TDFU (14)
108#define INT_EP_TDFO (13)
109#define INT_EP_UTX (12)
110#define INT_EP_GPIO_11 (11)
111#define INT_EP_GPIO_10 (10)
112#define INT_EP_GPIO_9 (9)
113#define INT_EP_GPIO_8 (8)
114#define INT_EP_GPIO_7 (7)
115#define INT_EP_GPIO_6 (6)
116#define INT_EP_GPIO_5 (5)
117#define INT_EP_GPIO_4 (4)
118#define INT_EP_GPIO_3 (3)
119#define INT_EP_GPIO_2 (2)
120#define INT_EP_GPIO_1 (1)
121#define INT_EP_GPIO_0 (0)
122
123static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
124 "RX FCS Errors",
125 "RX Alignment Errors",
126 "Rx Fragment Errors",
127 "RX Jabber Errors",
128 "RX Undersize Frame Errors",
129 "RX Oversize Frame Errors",
130 "RX Dropped Frames",
131 "RX Unicast Byte Count",
132 "RX Broadcast Byte Count",
133 "RX Multicast Byte Count",
134 "RX Unicast Frames",
135 "RX Broadcast Frames",
136 "RX Multicast Frames",
137 "RX Pause Frames",
138 "RX 64 Byte Frames",
139 "RX 65 - 127 Byte Frames",
140 "RX 128 - 255 Byte Frames",
141 "RX 256 - 511 Bytes Frames",
142 "RX 512 - 1023 Byte Frames",
143 "RX 1024 - 1518 Byte Frames",
144 "RX Greater 1518 Byte Frames",
145 "EEE RX LPI Transitions",
146 "EEE RX LPI Time",
147 "TX FCS Errors",
148 "TX Excess Deferral Errors",
149 "TX Carrier Errors",
150 "TX Bad Byte Count",
151 "TX Single Collisions",
152 "TX Multiple Collisions",
153 "TX Excessive Collision",
154 "TX Late Collisions",
155 "TX Unicast Byte Count",
156 "TX Broadcast Byte Count",
157 "TX Multicast Byte Count",
158 "TX Unicast Frames",
159 "TX Broadcast Frames",
160 "TX Multicast Frames",
161 "TX Pause Frames",
162 "TX 64 Byte Frames",
163 "TX 65 - 127 Byte Frames",
164 "TX 128 - 255 Byte Frames",
165 "TX 256 - 511 Bytes Frames",
166 "TX 512 - 1023 Byte Frames",
167 "TX 1024 - 1518 Byte Frames",
168 "TX Greater 1518 Byte Frames",
169 "EEE TX LPI Transitions",
170 "EEE TX LPI Time",
171};
172
173struct lan78xx_statstage {
174 u32 rx_fcs_errors;
175 u32 rx_alignment_errors;
176 u32 rx_fragment_errors;
177 u32 rx_jabber_errors;
178 u32 rx_undersize_frame_errors;
179 u32 rx_oversize_frame_errors;
180 u32 rx_dropped_frames;
181 u32 rx_unicast_byte_count;
182 u32 rx_broadcast_byte_count;
183 u32 rx_multicast_byte_count;
184 u32 rx_unicast_frames;
185 u32 rx_broadcast_frames;
186 u32 rx_multicast_frames;
187 u32 rx_pause_frames;
188 u32 rx_64_byte_frames;
189 u32 rx_65_127_byte_frames;
190 u32 rx_128_255_byte_frames;
191 u32 rx_256_511_bytes_frames;
192 u32 rx_512_1023_byte_frames;
193 u32 rx_1024_1518_byte_frames;
194 u32 rx_greater_1518_byte_frames;
195 u32 eee_rx_lpi_transitions;
196 u32 eee_rx_lpi_time;
197 u32 tx_fcs_errors;
198 u32 tx_excess_deferral_errors;
199 u32 tx_carrier_errors;
200 u32 tx_bad_byte_count;
201 u32 tx_single_collisions;
202 u32 tx_multiple_collisions;
203 u32 tx_excessive_collision;
204 u32 tx_late_collisions;
205 u32 tx_unicast_byte_count;
206 u32 tx_broadcast_byte_count;
207 u32 tx_multicast_byte_count;
208 u32 tx_unicast_frames;
209 u32 tx_broadcast_frames;
210 u32 tx_multicast_frames;
211 u32 tx_pause_frames;
212 u32 tx_64_byte_frames;
213 u32 tx_65_127_byte_frames;
214 u32 tx_128_255_byte_frames;
215 u32 tx_256_511_bytes_frames;
216 u32 tx_512_1023_byte_frames;
217 u32 tx_1024_1518_byte_frames;
218 u32 tx_greater_1518_byte_frames;
219 u32 eee_tx_lpi_transitions;
220 u32 eee_tx_lpi_time;
221};
222
223struct lan78xx_statstage64 {
224 u64 rx_fcs_errors;
225 u64 rx_alignment_errors;
226 u64 rx_fragment_errors;
227 u64 rx_jabber_errors;
228 u64 rx_undersize_frame_errors;
229 u64 rx_oversize_frame_errors;
230 u64 rx_dropped_frames;
231 u64 rx_unicast_byte_count;
232 u64 rx_broadcast_byte_count;
233 u64 rx_multicast_byte_count;
234 u64 rx_unicast_frames;
235 u64 rx_broadcast_frames;
236 u64 rx_multicast_frames;
237 u64 rx_pause_frames;
238 u64 rx_64_byte_frames;
239 u64 rx_65_127_byte_frames;
240 u64 rx_128_255_byte_frames;
241 u64 rx_256_511_bytes_frames;
242 u64 rx_512_1023_byte_frames;
243 u64 rx_1024_1518_byte_frames;
244 u64 rx_greater_1518_byte_frames;
245 u64 eee_rx_lpi_transitions;
246 u64 eee_rx_lpi_time;
247 u64 tx_fcs_errors;
248 u64 tx_excess_deferral_errors;
249 u64 tx_carrier_errors;
250 u64 tx_bad_byte_count;
251 u64 tx_single_collisions;
252 u64 tx_multiple_collisions;
253 u64 tx_excessive_collision;
254 u64 tx_late_collisions;
255 u64 tx_unicast_byte_count;
256 u64 tx_broadcast_byte_count;
257 u64 tx_multicast_byte_count;
258 u64 tx_unicast_frames;
259 u64 tx_broadcast_frames;
260 u64 tx_multicast_frames;
261 u64 tx_pause_frames;
262 u64 tx_64_byte_frames;
263 u64 tx_65_127_byte_frames;
264 u64 tx_128_255_byte_frames;
265 u64 tx_256_511_bytes_frames;
266 u64 tx_512_1023_byte_frames;
267 u64 tx_1024_1518_byte_frames;
268 u64 tx_greater_1518_byte_frames;
269 u64 eee_tx_lpi_transitions;
270 u64 eee_tx_lpi_time;
271};
272
273static u32 lan78xx_regs[] = {
274 ID_REV,
275 INT_STS,
276 HW_CFG,
277 PMT_CTL,
278 E2P_CMD,
279 E2P_DATA,
280 USB_STATUS,
281 VLAN_TYPE,
282 MAC_CR,
283 MAC_RX,
284 MAC_TX,
285 FLOW,
286 ERR_STS,
287 MII_ACC,
288 MII_DATA,
289 EEE_TX_LPI_REQ_DLY,
290 EEE_TW_TX_SYS,
291 EEE_TX_LPI_REM_DLY,
292 WUCSR
293};
294
295#define PHY_REG_SIZE (32 * sizeof(u32))
296
297struct lan78xx_net;
298
299struct lan78xx_priv {
300 struct lan78xx_net *dev;
301 u32 rfe_ctl;
302 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
303 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
304 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
305 struct mutex dataport_mutex; /* for dataport access */
306 spinlock_t rfe_ctl_lock; /* for rfe register access */
307 struct work_struct set_multicast;
308 struct work_struct set_vlan;
309 u32 wol;
310};
311
312enum skb_state {
313 illegal = 0,
314 tx_start,
315 tx_done,
316 rx_start,
317 rx_done,
318 rx_cleanup,
319 unlink_start
320};
321
322struct skb_data { /* skb->cb is one of these */
323 struct urb *urb;
324 struct lan78xx_net *dev;
325 enum skb_state state;
326 size_t length;
327 int num_of_packet;
328};
329
330struct usb_context {
331 struct usb_ctrlrequest req;
332 struct lan78xx_net *dev;
333};
334
335#define EVENT_TX_HALT 0
336#define EVENT_RX_HALT 1
337#define EVENT_RX_MEMORY 2
338#define EVENT_STS_SPLIT 3
339#define EVENT_LINK_RESET 4
340#define EVENT_RX_PAUSED 5
341#define EVENT_DEV_WAKING 6
342#define EVENT_DEV_ASLEEP 7
343#define EVENT_DEV_OPEN 8
344#define EVENT_STAT_UPDATE 9
345
346struct statstage {
347 struct mutex access_lock; /* for stats access */
348 struct lan78xx_statstage saved;
349 struct lan78xx_statstage rollover_count;
350 struct lan78xx_statstage rollover_max;
351 struct lan78xx_statstage64 curr_stat;
352};
353
354struct irq_domain_data {
355 struct irq_domain *irqdomain;
356 unsigned int phyirq;
357 struct irq_chip *irqchip;
358 irq_flow_handler_t irq_handler;
359 u32 irqenable;
360 struct mutex irq_lock; /* for irq bus access */
361};
362
363struct lan78xx_net {
364 struct net_device *net;
365 struct usb_device *udev;
366 struct usb_interface *intf;
367 void *driver_priv;
368
369 int rx_qlen;
370 int tx_qlen;
371 struct sk_buff_head rxq;
372 struct sk_buff_head txq;
373 struct sk_buff_head done;
374 struct sk_buff_head rxq_pause;
375 struct sk_buff_head txq_pend;
376
377 struct tasklet_struct bh;
378 struct delayed_work wq;
379
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380 int msg_enable;
381
382 struct urb *urb_intr;
383 struct usb_anchor deferred;
384
385 struct mutex phy_mutex; /* for phy access */
386 unsigned pipe_in, pipe_out, pipe_intr;
387
388 u32 hard_mtu; /* count any extra framing */
389 size_t rx_urb_size; /* size for rx urbs */
390
391 unsigned long flags;
392
393 wait_queue_head_t *wait;
394 unsigned char suspend_count;
395
396 unsigned maxpacket;
397 struct timer_list delay;
398 struct timer_list stat_monitor;
399
400 unsigned long data[5];
401
402 int link_on;
403 u8 mdix_ctrl;
404
405 u32 chipid;
406 u32 chiprev;
407 struct mii_bus *mdiobus;
408 phy_interface_t interface;
409
410 int fc_autoneg;
411 u8 fc_request_control;
412
413 int delta;
414 struct statstage stats;
415
416 struct irq_domain_data domain_data;
417};
418
419/* define external phy id */
420#define PHY_LAN8835 (0x0007C130)
421#define PHY_KSZ9031RNX (0x00221620)
422
423/* use ethtool to change the level for any given device */
424static int msg_level = -1;
425module_param(msg_level, int, 0);
426MODULE_PARM_DESC(msg_level, "Override default message level");
427
428static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
429{
430 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
431 int ret;
432
433 if (!buf)
434 return -ENOMEM;
435
436 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
437 USB_VENDOR_REQUEST_READ_REGISTER,
438 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
439 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
440 if (likely(ret >= 0)) {
441 le32_to_cpus(buf);
442 *data = *buf;
443 } else {
444 netdev_warn(dev->net,
445 "Failed to read register index 0x%08x. ret = %d",
446 index, ret);
447 }
448
449 kfree(buf);
450
451 return ret;
452}
453
454static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
455{
456 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
457 int ret;
458
459 if (!buf)
460 return -ENOMEM;
461
462 *buf = data;
463 cpu_to_le32s(buf);
464
465 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
466 USB_VENDOR_REQUEST_WRITE_REGISTER,
467 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
468 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
469 if (unlikely(ret < 0)) {
470 netdev_warn(dev->net,
471 "Failed to write register index 0x%08x. ret = %d",
472 index, ret);
473 }
474
475 kfree(buf);
476
477 return ret;
478}
479
480static int lan78xx_read_stats(struct lan78xx_net *dev,
481 struct lan78xx_statstage *data)
482{
483 int ret = 0;
484 int i;
485 struct lan78xx_statstage *stats;
486 u32 *src;
487 u32 *dst;
488
489 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
490 if (!stats)
491 return -ENOMEM;
492
493 ret = usb_control_msg(dev->udev,
494 usb_rcvctrlpipe(dev->udev, 0),
495 USB_VENDOR_REQUEST_GET_STATS,
496 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
497 0,
498 0,
499 (void *)stats,
500 sizeof(*stats),
501 USB_CTRL_SET_TIMEOUT);
502 if (likely(ret >= 0)) {
503 src = (u32 *)stats;
504 dst = (u32 *)data;
505 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
506 le32_to_cpus(&src[i]);
507 dst[i] = src[i];
508 }
509 } else {
510 netdev_warn(dev->net,
Olivier Deprez0e641232021-09-23 10:07:05 +0200511 "Failed to read stat ret = %d", ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512 }
513
514 kfree(stats);
515
516 return ret;
517}
518
519#define check_counter_rollover(struct1, dev_stats, member) { \
520 if (struct1->member < dev_stats.saved.member) \
521 dev_stats.rollover_count.member++; \
522 }
523
524static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
525 struct lan78xx_statstage *stats)
526{
527 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
528 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
529 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
530 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
531 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
532 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
533 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
534 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
535 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
536 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
537 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
538 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
539 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
540 check_counter_rollover(stats, dev->stats, rx_pause_frames);
541 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
542 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
543 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
544 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
545 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
546 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
547 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
548 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
549 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
550 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
551 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
552 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
553 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
554 check_counter_rollover(stats, dev->stats, tx_single_collisions);
555 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
556 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
557 check_counter_rollover(stats, dev->stats, tx_late_collisions);
558 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
559 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
560 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
561 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
562 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
563 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
564 check_counter_rollover(stats, dev->stats, tx_pause_frames);
565 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
566 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
567 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
568 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
569 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
570 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
571 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
572 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
573 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
574
575 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
576}
577
578static void lan78xx_update_stats(struct lan78xx_net *dev)
579{
580 u32 *p, *count, *max;
581 u64 *data;
582 int i;
583 struct lan78xx_statstage lan78xx_stats;
584
585 if (usb_autopm_get_interface(dev->intf) < 0)
586 return;
587
588 p = (u32 *)&lan78xx_stats;
589 count = (u32 *)&dev->stats.rollover_count;
590 max = (u32 *)&dev->stats.rollover_max;
591 data = (u64 *)&dev->stats.curr_stat;
592
593 mutex_lock(&dev->stats.access_lock);
594
595 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
596 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
597
598 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
599 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
600
601 mutex_unlock(&dev->stats.access_lock);
602
603 usb_autopm_put_interface(dev->intf);
604}
605
606/* Loop until the read is completed with timeout called with phy_mutex held */
607static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
608{
609 unsigned long start_time = jiffies;
610 u32 val;
611 int ret;
612
613 do {
614 ret = lan78xx_read_reg(dev, MII_ACC, &val);
615 if (unlikely(ret < 0))
616 return -EIO;
617
618 if (!(val & MII_ACC_MII_BUSY_))
619 return 0;
620 } while (!time_after(jiffies, start_time + HZ));
621
622 return -EIO;
623}
624
625static inline u32 mii_access(int id, int index, int read)
626{
627 u32 ret;
628
629 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
630 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
631 if (read)
632 ret |= MII_ACC_MII_READ_;
633 else
634 ret |= MII_ACC_MII_WRITE_;
635 ret |= MII_ACC_MII_BUSY_;
636
637 return ret;
638}
639
640static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
641{
642 unsigned long start_time = jiffies;
643 u32 val;
644 int ret;
645
646 do {
647 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
648 if (unlikely(ret < 0))
649 return -EIO;
650
651 if (!(val & E2P_CMD_EPC_BUSY_) ||
652 (val & E2P_CMD_EPC_TIMEOUT_))
653 break;
654 usleep_range(40, 100);
655 } while (!time_after(jiffies, start_time + HZ));
656
657 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
658 netdev_warn(dev->net, "EEPROM read operation timeout");
659 return -EIO;
660 }
661
662 return 0;
663}
664
665static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
666{
667 unsigned long start_time = jiffies;
668 u32 val;
669 int ret;
670
671 do {
672 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
673 if (unlikely(ret < 0))
674 return -EIO;
675
676 if (!(val & E2P_CMD_EPC_BUSY_))
677 return 0;
678
679 usleep_range(40, 100);
680 } while (!time_after(jiffies, start_time + HZ));
681
682 netdev_warn(dev->net, "EEPROM is busy");
683 return -EIO;
684}
685
686static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
687 u32 length, u8 *data)
688{
689 u32 val;
690 u32 saved;
691 int i, ret;
692 int retval;
693
694 /* depends on chip, some EEPROM pins are muxed with LED function.
695 * disable & restore LED function to access EEPROM.
696 */
697 ret = lan78xx_read_reg(dev, HW_CFG, &val);
698 saved = val;
699 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
700 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
701 ret = lan78xx_write_reg(dev, HW_CFG, val);
702 }
703
704 retval = lan78xx_eeprom_confirm_not_busy(dev);
705 if (retval)
706 return retval;
707
708 for (i = 0; i < length; i++) {
709 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
710 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
711 ret = lan78xx_write_reg(dev, E2P_CMD, val);
712 if (unlikely(ret < 0)) {
713 retval = -EIO;
714 goto exit;
715 }
716
717 retval = lan78xx_wait_eeprom(dev);
718 if (retval < 0)
719 goto exit;
720
721 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
722 if (unlikely(ret < 0)) {
723 retval = -EIO;
724 goto exit;
725 }
726
727 data[i] = val & 0xFF;
728 offset++;
729 }
730
731 retval = 0;
732exit:
733 if (dev->chipid == ID_REV_CHIP_ID_7800_)
734 ret = lan78xx_write_reg(dev, HW_CFG, saved);
735
736 return retval;
737}
738
739static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
740 u32 length, u8 *data)
741{
742 u8 sig;
743 int ret;
744
745 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
746 if ((ret == 0) && (sig == EEPROM_INDICATOR))
747 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
748 else
749 ret = -EINVAL;
750
751 return ret;
752}
753
754static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
755 u32 length, u8 *data)
756{
757 u32 val;
758 u32 saved;
759 int i, ret;
760 int retval;
761
762 /* depends on chip, some EEPROM pins are muxed with LED function.
763 * disable & restore LED function to access EEPROM.
764 */
765 ret = lan78xx_read_reg(dev, HW_CFG, &val);
766 saved = val;
767 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
768 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
769 ret = lan78xx_write_reg(dev, HW_CFG, val);
770 }
771
772 retval = lan78xx_eeprom_confirm_not_busy(dev);
773 if (retval)
774 goto exit;
775
776 /* Issue write/erase enable command */
777 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
778 ret = lan78xx_write_reg(dev, E2P_CMD, val);
779 if (unlikely(ret < 0)) {
780 retval = -EIO;
781 goto exit;
782 }
783
784 retval = lan78xx_wait_eeprom(dev);
785 if (retval < 0)
786 goto exit;
787
788 for (i = 0; i < length; i++) {
789 /* Fill data register */
790 val = data[i];
791 ret = lan78xx_write_reg(dev, E2P_DATA, val);
792 if (ret < 0) {
793 retval = -EIO;
794 goto exit;
795 }
796
797 /* Send "write" command */
798 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
799 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
800 ret = lan78xx_write_reg(dev, E2P_CMD, val);
801 if (ret < 0) {
802 retval = -EIO;
803 goto exit;
804 }
805
806 retval = lan78xx_wait_eeprom(dev);
807 if (retval < 0)
808 goto exit;
809
810 offset++;
811 }
812
813 retval = 0;
814exit:
815 if (dev->chipid == ID_REV_CHIP_ID_7800_)
816 ret = lan78xx_write_reg(dev, HW_CFG, saved);
817
818 return retval;
819}
820
821static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
822 u32 length, u8 *data)
823{
824 int i;
825 int ret;
826 u32 buf;
827 unsigned long timeout;
828
829 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
830
831 if (buf & OTP_PWR_DN_PWRDN_N_) {
832 /* clear it and wait to be cleared */
833 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
834
835 timeout = jiffies + HZ;
836 do {
837 usleep_range(1, 10);
838 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
839 if (time_after(jiffies, timeout)) {
840 netdev_warn(dev->net,
841 "timeout on OTP_PWR_DN");
842 return -EIO;
843 }
844 } while (buf & OTP_PWR_DN_PWRDN_N_);
845 }
846
847 for (i = 0; i < length; i++) {
848 ret = lan78xx_write_reg(dev, OTP_ADDR1,
849 ((offset + i) >> 8) & OTP_ADDR1_15_11);
850 ret = lan78xx_write_reg(dev, OTP_ADDR2,
851 ((offset + i) & OTP_ADDR2_10_3));
852
853 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
854 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
855
856 timeout = jiffies + HZ;
857 do {
858 udelay(1);
859 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
860 if (time_after(jiffies, timeout)) {
861 netdev_warn(dev->net,
862 "timeout on OTP_STATUS");
863 return -EIO;
864 }
865 } while (buf & OTP_STATUS_BUSY_);
866
867 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
868
869 data[i] = (u8)(buf & 0xFF);
870 }
871
872 return 0;
873}
874
875static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
876 u32 length, u8 *data)
877{
878 int i;
879 int ret;
880 u32 buf;
881 unsigned long timeout;
882
883 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
884
885 if (buf & OTP_PWR_DN_PWRDN_N_) {
886 /* clear it and wait to be cleared */
887 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
888
889 timeout = jiffies + HZ;
890 do {
891 udelay(1);
892 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
893 if (time_after(jiffies, timeout)) {
894 netdev_warn(dev->net,
895 "timeout on OTP_PWR_DN completion");
896 return -EIO;
897 }
898 } while (buf & OTP_PWR_DN_PWRDN_N_);
899 }
900
901 /* set to BYTE program mode */
902 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
903
904 for (i = 0; i < length; i++) {
905 ret = lan78xx_write_reg(dev, OTP_ADDR1,
906 ((offset + i) >> 8) & OTP_ADDR1_15_11);
907 ret = lan78xx_write_reg(dev, OTP_ADDR2,
908 ((offset + i) & OTP_ADDR2_10_3));
909 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
910 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
911 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
912
913 timeout = jiffies + HZ;
914 do {
915 udelay(1);
916 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
917 if (time_after(jiffies, timeout)) {
918 netdev_warn(dev->net,
919 "Timeout on OTP_STATUS completion");
920 return -EIO;
921 }
922 } while (buf & OTP_STATUS_BUSY_);
923 }
924
925 return 0;
926}
927
928static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
929 u32 length, u8 *data)
930{
931 u8 sig;
932 int ret;
933
934 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
935
936 if (ret == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000937 if (sig == OTP_INDICATOR_2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000938 offset += 0x100;
David Brazdil0f672f62019-12-10 10:32:29 +0000939 else if (sig != OTP_INDICATOR_1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000940 ret = -EINVAL;
941 if (!ret)
942 ret = lan78xx_read_raw_otp(dev, offset, length, data);
943 }
944
945 return ret;
946}
947
948static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
949{
950 int i, ret;
951
952 for (i = 0; i < 100; i++) {
953 u32 dp_sel;
954
955 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
956 if (unlikely(ret < 0))
957 return -EIO;
958
959 if (dp_sel & DP_SEL_DPRDY_)
960 return 0;
961
962 usleep_range(40, 100);
963 }
964
965 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
966
967 return -EIO;
968}
969
970static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
971 u32 addr, u32 length, u32 *buf)
972{
973 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
974 u32 dp_sel;
975 int i, ret;
976
977 if (usb_autopm_get_interface(dev->intf) < 0)
978 return 0;
979
980 mutex_lock(&pdata->dataport_mutex);
981
982 ret = lan78xx_dataport_wait_not_busy(dev);
983 if (ret < 0)
984 goto done;
985
986 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
987
988 dp_sel &= ~DP_SEL_RSEL_MASK_;
989 dp_sel |= ram_select;
990 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
991
992 for (i = 0; i < length; i++) {
993 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
994
995 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
996
997 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
998
999 ret = lan78xx_dataport_wait_not_busy(dev);
1000 if (ret < 0)
1001 goto done;
1002 }
1003
1004done:
1005 mutex_unlock(&pdata->dataport_mutex);
1006 usb_autopm_put_interface(dev->intf);
1007
1008 return ret;
1009}
1010
1011static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1012 int index, u8 addr[ETH_ALEN])
1013{
David Brazdil0f672f62019-12-10 10:32:29 +00001014 u32 temp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001015
1016 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1017 temp = addr[3];
1018 temp = addr[2] | (temp << 8);
1019 temp = addr[1] | (temp << 8);
1020 temp = addr[0] | (temp << 8);
1021 pdata->pfilter_table[index][1] = temp;
1022 temp = addr[5];
1023 temp = addr[4] | (temp << 8);
1024 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1025 pdata->pfilter_table[index][0] = temp;
1026 }
1027}
1028
1029/* returns hash bit number for given MAC address */
1030static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1031{
1032 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1033}
1034
1035static void lan78xx_deferred_multicast_write(struct work_struct *param)
1036{
1037 struct lan78xx_priv *pdata =
1038 container_of(param, struct lan78xx_priv, set_multicast);
1039 struct lan78xx_net *dev = pdata->dev;
1040 int i;
1041 int ret;
1042
1043 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1044 pdata->rfe_ctl);
1045
1046 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1047 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1048
1049 for (i = 1; i < NUM_OF_MAF; i++) {
1050 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1051 ret = lan78xx_write_reg(dev, MAF_LO(i),
1052 pdata->pfilter_table[i][1]);
1053 ret = lan78xx_write_reg(dev, MAF_HI(i),
1054 pdata->pfilter_table[i][0]);
1055 }
1056
1057 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1058}
1059
1060static void lan78xx_set_multicast(struct net_device *netdev)
1061{
1062 struct lan78xx_net *dev = netdev_priv(netdev);
1063 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1064 unsigned long flags;
1065 int i;
1066
1067 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1068
1069 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1070 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1071
1072 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1073 pdata->mchash_table[i] = 0;
1074 /* pfilter_table[0] has own HW address */
1075 for (i = 1; i < NUM_OF_MAF; i++) {
1076 pdata->pfilter_table[i][0] =
1077 pdata->pfilter_table[i][1] = 0;
1078 }
1079
1080 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1081
1082 if (dev->net->flags & IFF_PROMISC) {
1083 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1084 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1085 } else {
1086 if (dev->net->flags & IFF_ALLMULTI) {
1087 netif_dbg(dev, drv, dev->net,
1088 "receive all multicast enabled");
1089 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1090 }
1091 }
1092
1093 if (netdev_mc_count(dev->net)) {
1094 struct netdev_hw_addr *ha;
1095 int i;
1096
1097 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1098
1099 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1100
1101 i = 1;
1102 netdev_for_each_mc_addr(ha, netdev) {
1103 /* set first 32 into Perfect Filter */
1104 if (i < 33) {
1105 lan78xx_set_addr_filter(pdata, i, ha->addr);
1106 } else {
1107 u32 bitnum = lan78xx_hash(ha->addr);
1108
1109 pdata->mchash_table[bitnum / 32] |=
1110 (1 << (bitnum % 32));
1111 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1112 }
1113 i++;
1114 }
1115 }
1116
1117 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1118
1119 /* defer register writes to a sleepable context */
1120 schedule_work(&pdata->set_multicast);
1121}
1122
1123static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1124 u16 lcladv, u16 rmtadv)
1125{
1126 u32 flow = 0, fct_flow = 0;
1127 int ret;
1128 u8 cap;
1129
1130 if (dev->fc_autoneg)
1131 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1132 else
1133 cap = dev->fc_request_control;
1134
1135 if (cap & FLOW_CTRL_TX)
1136 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1137
1138 if (cap & FLOW_CTRL_RX)
1139 flow |= FLOW_CR_RX_FCEN_;
1140
1141 if (dev->udev->speed == USB_SPEED_SUPER)
1142 fct_flow = 0x817;
1143 else if (dev->udev->speed == USB_SPEED_HIGH)
1144 fct_flow = 0x211;
1145
1146 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1147 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1148 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1149
1150 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1151
1152 /* threshold value should be set before enabling flow */
1153 ret = lan78xx_write_reg(dev, FLOW, flow);
1154
1155 return 0;
1156}
1157
1158static int lan78xx_link_reset(struct lan78xx_net *dev)
1159{
1160 struct phy_device *phydev = dev->net->phydev;
1161 struct ethtool_link_ksettings ecmd;
Olivier Deprez0e641232021-09-23 10:07:05 +02001162 int ladv, radv, ret, link;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001163 u32 buf;
1164
1165 /* clear LAN78xx interrupt status */
1166 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1167 if (unlikely(ret < 0))
1168 return -EIO;
1169
Olivier Deprez0e641232021-09-23 10:07:05 +02001170 mutex_lock(&phydev->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001171 phy_read_status(phydev);
Olivier Deprez0e641232021-09-23 10:07:05 +02001172 link = phydev->link;
1173 mutex_unlock(&phydev->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001174
Olivier Deprez0e641232021-09-23 10:07:05 +02001175 if (!link && dev->link_on) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176 dev->link_on = false;
1177
1178 /* reset MAC */
1179 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1180 if (unlikely(ret < 0))
1181 return -EIO;
1182 buf |= MAC_CR_RST_;
1183 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1184 if (unlikely(ret < 0))
1185 return -EIO;
1186
1187 del_timer(&dev->stat_monitor);
Olivier Deprez0e641232021-09-23 10:07:05 +02001188 } else if (link && !dev->link_on) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001189 dev->link_on = true;
1190
1191 phy_ethtool_ksettings_get(phydev, &ecmd);
1192
1193 if (dev->udev->speed == USB_SPEED_SUPER) {
1194 if (ecmd.base.speed == 1000) {
1195 /* disable U2 */
1196 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1197 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1198 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1199 /* enable U1 */
1200 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1201 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1202 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1203 } else {
1204 /* enable U1 & U2 */
1205 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1206 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1207 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1208 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1209 }
1210 }
1211
1212 ladv = phy_read(phydev, MII_ADVERTISE);
1213 if (ladv < 0)
1214 return ladv;
1215
1216 radv = phy_read(phydev, MII_LPA);
1217 if (radv < 0)
1218 return radv;
1219
1220 netif_dbg(dev, link, dev->net,
1221 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1222 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1223
1224 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1225 radv);
1226
1227 if (!timer_pending(&dev->stat_monitor)) {
1228 dev->delta = 1;
1229 mod_timer(&dev->stat_monitor,
1230 jiffies + STAT_UPDATE_TIMER);
1231 }
1232
1233 tasklet_schedule(&dev->bh);
1234 }
1235
1236 return ret;
1237}
1238
1239/* some work can't be done in tasklets, so we use keventd
1240 *
1241 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1242 * but tasklet_schedule() doesn't. hope the failure is rare.
1243 */
1244static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1245{
1246 set_bit(work, &dev->flags);
1247 if (!schedule_delayed_work(&dev->wq, 0))
1248 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1249}
1250
1251static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1252{
1253 u32 intdata;
1254
1255 if (urb->actual_length != 4) {
1256 netdev_warn(dev->net,
1257 "unexpected urb length %d", urb->actual_length);
1258 return;
1259 }
1260
David Brazdil0f672f62019-12-10 10:32:29 +00001261 intdata = get_unaligned_le32(urb->transfer_buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001262
1263 if (intdata & INT_ENP_PHY_INT) {
1264 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1265 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1266
David Brazdil0f672f62019-12-10 10:32:29 +00001267 if (dev->domain_data.phyirq > 0) {
1268 local_irq_disable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269 generic_handle_irq(dev->domain_data.phyirq);
David Brazdil0f672f62019-12-10 10:32:29 +00001270 local_irq_enable();
1271 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272 } else
1273 netdev_warn(dev->net,
1274 "unexpected interrupt: 0x%08x\n", intdata);
1275}
1276
1277static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1278{
1279 return MAX_EEPROM_SIZE;
1280}
1281
1282static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1283 struct ethtool_eeprom *ee, u8 *data)
1284{
1285 struct lan78xx_net *dev = netdev_priv(netdev);
1286 int ret;
1287
1288 ret = usb_autopm_get_interface(dev->intf);
1289 if (ret)
1290 return ret;
1291
1292 ee->magic = LAN78XX_EEPROM_MAGIC;
1293
1294 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1295
1296 usb_autopm_put_interface(dev->intf);
1297
1298 return ret;
1299}
1300
1301static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1302 struct ethtool_eeprom *ee, u8 *data)
1303{
1304 struct lan78xx_net *dev = netdev_priv(netdev);
1305 int ret;
1306
1307 ret = usb_autopm_get_interface(dev->intf);
1308 if (ret)
1309 return ret;
1310
1311 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1312 * to load data from EEPROM
1313 */
1314 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1315 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1316 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1317 (ee->offset == 0) &&
1318 (ee->len == 512) &&
1319 (data[0] == OTP_INDICATOR_1))
1320 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1321
1322 usb_autopm_put_interface(dev->intf);
1323
1324 return ret;
1325}
1326
1327static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1328 u8 *data)
1329{
1330 if (stringset == ETH_SS_STATS)
1331 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1332}
1333
1334static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1335{
1336 if (sset == ETH_SS_STATS)
1337 return ARRAY_SIZE(lan78xx_gstrings);
1338 else
1339 return -EOPNOTSUPP;
1340}
1341
1342static void lan78xx_get_stats(struct net_device *netdev,
1343 struct ethtool_stats *stats, u64 *data)
1344{
1345 struct lan78xx_net *dev = netdev_priv(netdev);
1346
1347 lan78xx_update_stats(dev);
1348
1349 mutex_lock(&dev->stats.access_lock);
1350 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1351 mutex_unlock(&dev->stats.access_lock);
1352}
1353
1354static void lan78xx_get_wol(struct net_device *netdev,
1355 struct ethtool_wolinfo *wol)
1356{
1357 struct lan78xx_net *dev = netdev_priv(netdev);
1358 int ret;
1359 u32 buf;
1360 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1361
1362 if (usb_autopm_get_interface(dev->intf) < 0)
1363 return;
1364
1365 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1366 if (unlikely(ret < 0)) {
1367 wol->supported = 0;
1368 wol->wolopts = 0;
1369 } else {
1370 if (buf & USB_CFG_RMT_WKP_) {
1371 wol->supported = WAKE_ALL;
1372 wol->wolopts = pdata->wol;
1373 } else {
1374 wol->supported = 0;
1375 wol->wolopts = 0;
1376 }
1377 }
1378
1379 usb_autopm_put_interface(dev->intf);
1380}
1381
1382static int lan78xx_set_wol(struct net_device *netdev,
1383 struct ethtool_wolinfo *wol)
1384{
1385 struct lan78xx_net *dev = netdev_priv(netdev);
1386 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1387 int ret;
1388
1389 ret = usb_autopm_get_interface(dev->intf);
1390 if (ret < 0)
1391 return ret;
1392
1393 if (wol->wolopts & ~WAKE_ALL)
1394 return -EINVAL;
1395
1396 pdata->wol = wol->wolopts;
1397
1398 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1399
1400 phy_ethtool_set_wol(netdev->phydev, wol);
1401
1402 usb_autopm_put_interface(dev->intf);
1403
1404 return ret;
1405}
1406
1407static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1408{
1409 struct lan78xx_net *dev = netdev_priv(net);
1410 struct phy_device *phydev = net->phydev;
1411 int ret;
1412 u32 buf;
1413
1414 ret = usb_autopm_get_interface(dev->intf);
1415 if (ret < 0)
1416 return ret;
1417
1418 ret = phy_ethtool_get_eee(phydev, edata);
1419 if (ret < 0)
1420 goto exit;
1421
1422 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1423 if (buf & MAC_CR_EEE_EN_) {
1424 edata->eee_enabled = true;
1425 edata->eee_active = !!(edata->advertised &
1426 edata->lp_advertised);
1427 edata->tx_lpi_enabled = true;
1428 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1429 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1430 edata->tx_lpi_timer = buf;
1431 } else {
1432 edata->eee_enabled = false;
1433 edata->eee_active = false;
1434 edata->tx_lpi_enabled = false;
1435 edata->tx_lpi_timer = 0;
1436 }
1437
1438 ret = 0;
1439exit:
1440 usb_autopm_put_interface(dev->intf);
1441
1442 return ret;
1443}
1444
1445static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1446{
1447 struct lan78xx_net *dev = netdev_priv(net);
1448 int ret;
1449 u32 buf;
1450
1451 ret = usb_autopm_get_interface(dev->intf);
1452 if (ret < 0)
1453 return ret;
1454
1455 if (edata->eee_enabled) {
1456 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1457 buf |= MAC_CR_EEE_EN_;
1458 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1459
1460 phy_ethtool_set_eee(net->phydev, edata);
1461
1462 buf = (u32)edata->tx_lpi_timer;
1463 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1464 } else {
1465 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1466 buf &= ~MAC_CR_EEE_EN_;
1467 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1468 }
1469
1470 usb_autopm_put_interface(dev->intf);
1471
1472 return 0;
1473}
1474
1475static u32 lan78xx_get_link(struct net_device *net)
1476{
Olivier Deprez0e641232021-09-23 10:07:05 +02001477 u32 link;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001478
Olivier Deprez0e641232021-09-23 10:07:05 +02001479 mutex_lock(&net->phydev->lock);
1480 phy_read_status(net->phydev);
1481 link = net->phydev->link;
1482 mutex_unlock(&net->phydev->lock);
1483
1484 return link;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001485}
1486
1487static void lan78xx_get_drvinfo(struct net_device *net,
1488 struct ethtool_drvinfo *info)
1489{
1490 struct lan78xx_net *dev = netdev_priv(net);
1491
1492 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1493 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1494}
1495
1496static u32 lan78xx_get_msglevel(struct net_device *net)
1497{
1498 struct lan78xx_net *dev = netdev_priv(net);
1499
1500 return dev->msg_enable;
1501}
1502
1503static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1504{
1505 struct lan78xx_net *dev = netdev_priv(net);
1506
1507 dev->msg_enable = level;
1508}
1509
1510static int lan78xx_get_link_ksettings(struct net_device *net,
1511 struct ethtool_link_ksettings *cmd)
1512{
1513 struct lan78xx_net *dev = netdev_priv(net);
1514 struct phy_device *phydev = net->phydev;
1515 int ret;
1516
1517 ret = usb_autopm_get_interface(dev->intf);
1518 if (ret < 0)
1519 return ret;
1520
1521 phy_ethtool_ksettings_get(phydev, cmd);
1522
1523 usb_autopm_put_interface(dev->intf);
1524
1525 return ret;
1526}
1527
1528static int lan78xx_set_link_ksettings(struct net_device *net,
1529 const struct ethtool_link_ksettings *cmd)
1530{
1531 struct lan78xx_net *dev = netdev_priv(net);
1532 struct phy_device *phydev = net->phydev;
1533 int ret = 0;
1534 int temp;
1535
1536 ret = usb_autopm_get_interface(dev->intf);
1537 if (ret < 0)
1538 return ret;
1539
1540 /* change speed & duplex */
1541 ret = phy_ethtool_ksettings_set(phydev, cmd);
1542
1543 if (!cmd->base.autoneg) {
1544 /* force link down */
1545 temp = phy_read(phydev, MII_BMCR);
1546 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1547 mdelay(1);
1548 phy_write(phydev, MII_BMCR, temp);
1549 }
1550
1551 usb_autopm_put_interface(dev->intf);
1552
1553 return ret;
1554}
1555
1556static void lan78xx_get_pause(struct net_device *net,
1557 struct ethtool_pauseparam *pause)
1558{
1559 struct lan78xx_net *dev = netdev_priv(net);
1560 struct phy_device *phydev = net->phydev;
1561 struct ethtool_link_ksettings ecmd;
1562
1563 phy_ethtool_ksettings_get(phydev, &ecmd);
1564
1565 pause->autoneg = dev->fc_autoneg;
1566
1567 if (dev->fc_request_control & FLOW_CTRL_TX)
1568 pause->tx_pause = 1;
1569
1570 if (dev->fc_request_control & FLOW_CTRL_RX)
1571 pause->rx_pause = 1;
1572}
1573
1574static int lan78xx_set_pause(struct net_device *net,
1575 struct ethtool_pauseparam *pause)
1576{
1577 struct lan78xx_net *dev = netdev_priv(net);
1578 struct phy_device *phydev = net->phydev;
1579 struct ethtool_link_ksettings ecmd;
1580 int ret;
1581
1582 phy_ethtool_ksettings_get(phydev, &ecmd);
1583
1584 if (pause->autoneg && !ecmd.base.autoneg) {
1585 ret = -EINVAL;
1586 goto exit;
1587 }
1588
1589 dev->fc_request_control = 0;
1590 if (pause->rx_pause)
1591 dev->fc_request_control |= FLOW_CTRL_RX;
1592
1593 if (pause->tx_pause)
1594 dev->fc_request_control |= FLOW_CTRL_TX;
1595
1596 if (ecmd.base.autoneg) {
David Brazdil0f672f62019-12-10 10:32:29 +00001597 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001598 u32 mii_adv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001599
David Brazdil0f672f62019-12-10 10:32:29 +00001600 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1601 ecmd.link_modes.advertising);
1602 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1603 ecmd.link_modes.advertising);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001604 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
David Brazdil0f672f62019-12-10 10:32:29 +00001605 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1606 linkmode_or(ecmd.link_modes.advertising, fc,
1607 ecmd.link_modes.advertising);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001608
1609 phy_ethtool_ksettings_set(phydev, &ecmd);
1610 }
1611
1612 dev->fc_autoneg = pause->autoneg;
1613
1614 ret = 0;
1615exit:
1616 return ret;
1617}
1618
1619static int lan78xx_get_regs_len(struct net_device *netdev)
1620{
1621 if (!netdev->phydev)
1622 return (sizeof(lan78xx_regs));
1623 else
1624 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1625}
1626
1627static void
1628lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1629 void *buf)
1630{
1631 u32 *data = buf;
1632 int i, j;
1633 struct lan78xx_net *dev = netdev_priv(netdev);
1634
1635 /* Read Device/MAC registers */
1636 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1637 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1638
1639 if (!netdev->phydev)
1640 return;
1641
1642 /* Read PHY registers */
1643 for (j = 0; j < 32; i++, j++)
1644 data[i] = phy_read(netdev->phydev, j);
1645}
1646
1647static const struct ethtool_ops lan78xx_ethtool_ops = {
1648 .get_link = lan78xx_get_link,
1649 .nway_reset = phy_ethtool_nway_reset,
1650 .get_drvinfo = lan78xx_get_drvinfo,
1651 .get_msglevel = lan78xx_get_msglevel,
1652 .set_msglevel = lan78xx_set_msglevel,
1653 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1654 .get_eeprom = lan78xx_ethtool_get_eeprom,
1655 .set_eeprom = lan78xx_ethtool_set_eeprom,
1656 .get_ethtool_stats = lan78xx_get_stats,
1657 .get_sset_count = lan78xx_get_sset_count,
1658 .get_strings = lan78xx_get_strings,
1659 .get_wol = lan78xx_get_wol,
1660 .set_wol = lan78xx_set_wol,
1661 .get_eee = lan78xx_get_eee,
1662 .set_eee = lan78xx_set_eee,
1663 .get_pauseparam = lan78xx_get_pause,
1664 .set_pauseparam = lan78xx_set_pause,
1665 .get_link_ksettings = lan78xx_get_link_ksettings,
1666 .set_link_ksettings = lan78xx_set_link_ksettings,
1667 .get_regs_len = lan78xx_get_regs_len,
1668 .get_regs = lan78xx_get_regs,
1669};
1670
1671static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1672{
1673 if (!netif_running(netdev))
1674 return -EINVAL;
1675
1676 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1677}
1678
1679static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1680{
1681 u32 addr_lo, addr_hi;
1682 int ret;
1683 u8 addr[6];
1684
1685 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1686 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1687
1688 addr[0] = addr_lo & 0xFF;
1689 addr[1] = (addr_lo >> 8) & 0xFF;
1690 addr[2] = (addr_lo >> 16) & 0xFF;
1691 addr[3] = (addr_lo >> 24) & 0xFF;
1692 addr[4] = addr_hi & 0xFF;
1693 addr[5] = (addr_hi >> 8) & 0xFF;
1694
1695 if (!is_valid_ether_addr(addr)) {
1696 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1697 /* valid address present in Device Tree */
1698 netif_dbg(dev, ifup, dev->net,
1699 "MAC address read from Device Tree");
1700 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1701 ETH_ALEN, addr) == 0) ||
1702 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1703 ETH_ALEN, addr) == 0)) &&
1704 is_valid_ether_addr(addr)) {
1705 /* eeprom values are valid so use them */
1706 netif_dbg(dev, ifup, dev->net,
1707 "MAC address read from EEPROM");
1708 } else {
1709 /* generate random MAC */
1710 eth_random_addr(addr);
1711 netif_dbg(dev, ifup, dev->net,
1712 "MAC address set to random addr");
1713 }
1714
1715 addr_lo = addr[0] | (addr[1] << 8) |
1716 (addr[2] << 16) | (addr[3] << 24);
1717 addr_hi = addr[4] | (addr[5] << 8);
1718
1719 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1720 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1721 }
1722
1723 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1724 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1725
1726 ether_addr_copy(dev->net->dev_addr, addr);
1727}
1728
1729/* MDIO read and write wrappers for phylib */
1730static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1731{
1732 struct lan78xx_net *dev = bus->priv;
1733 u32 val, addr;
1734 int ret;
1735
1736 ret = usb_autopm_get_interface(dev->intf);
1737 if (ret < 0)
1738 return ret;
1739
1740 mutex_lock(&dev->phy_mutex);
1741
1742 /* confirm MII not busy */
1743 ret = lan78xx_phy_wait_not_busy(dev);
1744 if (ret < 0)
1745 goto done;
1746
1747 /* set the address, index & direction (read from PHY) */
1748 addr = mii_access(phy_id, idx, MII_READ);
1749 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1750
1751 ret = lan78xx_phy_wait_not_busy(dev);
1752 if (ret < 0)
1753 goto done;
1754
1755 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1756
1757 ret = (int)(val & 0xFFFF);
1758
1759done:
1760 mutex_unlock(&dev->phy_mutex);
1761 usb_autopm_put_interface(dev->intf);
1762
1763 return ret;
1764}
1765
1766static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1767 u16 regval)
1768{
1769 struct lan78xx_net *dev = bus->priv;
1770 u32 val, addr;
1771 int ret;
1772
1773 ret = usb_autopm_get_interface(dev->intf);
1774 if (ret < 0)
1775 return ret;
1776
1777 mutex_lock(&dev->phy_mutex);
1778
1779 /* confirm MII not busy */
1780 ret = lan78xx_phy_wait_not_busy(dev);
1781 if (ret < 0)
1782 goto done;
1783
1784 val = (u32)regval;
1785 ret = lan78xx_write_reg(dev, MII_DATA, val);
1786
1787 /* set the address, index & direction (write to PHY) */
1788 addr = mii_access(phy_id, idx, MII_WRITE);
1789 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1790
1791 ret = lan78xx_phy_wait_not_busy(dev);
1792 if (ret < 0)
1793 goto done;
1794
1795done:
1796 mutex_unlock(&dev->phy_mutex);
1797 usb_autopm_put_interface(dev->intf);
1798 return 0;
1799}
1800
1801static int lan78xx_mdio_init(struct lan78xx_net *dev)
1802{
1803 struct device_node *node;
1804 int ret;
1805
1806 dev->mdiobus = mdiobus_alloc();
1807 if (!dev->mdiobus) {
1808 netdev_err(dev->net, "can't allocate MDIO bus\n");
1809 return -ENOMEM;
1810 }
1811
1812 dev->mdiobus->priv = (void *)dev;
1813 dev->mdiobus->read = lan78xx_mdiobus_read;
1814 dev->mdiobus->write = lan78xx_mdiobus_write;
1815 dev->mdiobus->name = "lan78xx-mdiobus";
Olivier Deprez0e641232021-09-23 10:07:05 +02001816 dev->mdiobus->parent = &dev->udev->dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001817
1818 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1819 dev->udev->bus->busnum, dev->udev->devnum);
1820
1821 switch (dev->chipid) {
1822 case ID_REV_CHIP_ID_7800_:
1823 case ID_REV_CHIP_ID_7850_:
1824 /* set to internal PHY id */
1825 dev->mdiobus->phy_mask = ~(1 << 1);
1826 break;
1827 case ID_REV_CHIP_ID_7801_:
1828 /* scan thru PHYAD[2..0] */
1829 dev->mdiobus->phy_mask = ~(0xFF);
1830 break;
1831 }
1832
1833 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1834 ret = of_mdiobus_register(dev->mdiobus, node);
David Brazdil0f672f62019-12-10 10:32:29 +00001835 of_node_put(node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001836 if (ret) {
1837 netdev_err(dev->net, "can't register MDIO bus\n");
1838 goto exit1;
1839 }
1840
1841 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1842 return 0;
1843exit1:
1844 mdiobus_free(dev->mdiobus);
1845 return ret;
1846}
1847
1848static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1849{
1850 mdiobus_unregister(dev->mdiobus);
1851 mdiobus_free(dev->mdiobus);
1852}
1853
1854static void lan78xx_link_status_change(struct net_device *net)
1855{
1856 struct phy_device *phydev = net->phydev;
1857 int ret, temp;
1858
1859 /* At forced 100 F/H mode, chip may fail to set mode correctly
1860 * when cable is switched between long(~50+m) and short one.
1861 * As workaround, set to 10 before setting to 100
1862 * at forced 100 F/H mode.
1863 */
1864 if (!phydev->autoneg && (phydev->speed == 100)) {
1865 /* disable phy interrupt */
1866 temp = phy_read(phydev, LAN88XX_INT_MASK);
1867 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1868 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1869
1870 temp = phy_read(phydev, MII_BMCR);
1871 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1872 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1873 temp |= BMCR_SPEED100;
1874 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1875
1876 /* clear pending interrupt generated while workaround */
1877 temp = phy_read(phydev, LAN88XX_INT_STS);
1878
1879 /* enable phy interrupt back */
1880 temp = phy_read(phydev, LAN88XX_INT_MASK);
1881 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1882 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1883 }
1884}
1885
1886static int irq_map(struct irq_domain *d, unsigned int irq,
1887 irq_hw_number_t hwirq)
1888{
1889 struct irq_domain_data *data = d->host_data;
1890
1891 irq_set_chip_data(irq, data);
1892 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1893 irq_set_noprobe(irq);
1894
1895 return 0;
1896}
1897
1898static void irq_unmap(struct irq_domain *d, unsigned int irq)
1899{
1900 irq_set_chip_and_handler(irq, NULL, NULL);
1901 irq_set_chip_data(irq, NULL);
1902}
1903
1904static const struct irq_domain_ops chip_domain_ops = {
1905 .map = irq_map,
1906 .unmap = irq_unmap,
1907};
1908
1909static void lan78xx_irq_mask(struct irq_data *irqd)
1910{
1911 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1912
1913 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1914}
1915
1916static void lan78xx_irq_unmask(struct irq_data *irqd)
1917{
1918 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1919
1920 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1921}
1922
1923static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1924{
1925 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1926
1927 mutex_lock(&data->irq_lock);
1928}
1929
1930static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1931{
1932 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1933 struct lan78xx_net *dev =
1934 container_of(data, struct lan78xx_net, domain_data);
1935 u32 buf;
1936 int ret;
1937
1938 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1939 * are only two callbacks executed in non-atomic contex.
1940 */
1941 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1942 if (buf != data->irqenable)
1943 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1944
1945 mutex_unlock(&data->irq_lock);
1946}
1947
1948static struct irq_chip lan78xx_irqchip = {
1949 .name = "lan78xx-irqs",
1950 .irq_mask = lan78xx_irq_mask,
1951 .irq_unmask = lan78xx_irq_unmask,
1952 .irq_bus_lock = lan78xx_irq_bus_lock,
1953 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1954};
1955
1956static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1957{
1958 struct device_node *of_node;
1959 struct irq_domain *irqdomain;
1960 unsigned int irqmap = 0;
1961 u32 buf;
1962 int ret = 0;
1963
1964 of_node = dev->udev->dev.parent->of_node;
1965
1966 mutex_init(&dev->domain_data.irq_lock);
1967
1968 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1969 dev->domain_data.irqenable = buf;
1970
1971 dev->domain_data.irqchip = &lan78xx_irqchip;
1972 dev->domain_data.irq_handler = handle_simple_irq;
1973
1974 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1975 &chip_domain_ops, &dev->domain_data);
1976 if (irqdomain) {
1977 /* create mapping for PHY interrupt */
1978 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1979 if (!irqmap) {
1980 irq_domain_remove(irqdomain);
1981
1982 irqdomain = NULL;
1983 ret = -EINVAL;
1984 }
1985 } else {
1986 ret = -EINVAL;
1987 }
1988
1989 dev->domain_data.irqdomain = irqdomain;
1990 dev->domain_data.phyirq = irqmap;
1991
1992 return ret;
1993}
1994
1995static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1996{
1997 if (dev->domain_data.phyirq > 0) {
1998 irq_dispose_mapping(dev->domain_data.phyirq);
1999
2000 if (dev->domain_data.irqdomain)
2001 irq_domain_remove(dev->domain_data.irqdomain);
2002 }
2003 dev->domain_data.phyirq = 0;
2004 dev->domain_data.irqdomain = NULL;
2005}
2006
2007static int lan8835_fixup(struct phy_device *phydev)
2008{
2009 int buf;
2010 int ret;
2011 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2012
2013 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2014 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2015 buf &= ~0x1800;
2016 buf |= 0x0800;
2017 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2018
2019 /* RGMII MAC TXC Delay Enable */
2020 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2021 MAC_RGMII_ID_TXC_DELAY_EN_);
2022
2023 /* RGMII TX DLL Tune Adjust */
2024 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2025
2026 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2027
2028 return 1;
2029}
2030
2031static int ksz9031rnx_fixup(struct phy_device *phydev)
2032{
2033 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2034
2035 /* Micrel9301RNX PHY configuration */
2036 /* RGMII Control Signal Pad Skew */
2037 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2038 /* RGMII RX Data Pad Skew */
2039 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2040 /* RGMII RX Clock Pad Skew */
2041 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2042
2043 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2044
2045 return 1;
2046}
2047
2048static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2049{
2050 u32 buf;
2051 int ret;
2052 struct fixed_phy_status fphy_status = {
2053 .link = 1,
2054 .speed = SPEED_1000,
2055 .duplex = DUPLEX_FULL,
2056 };
2057 struct phy_device *phydev;
2058
2059 phydev = phy_find_first(dev->mdiobus);
2060 if (!phydev) {
2061 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
David Brazdil0f672f62019-12-10 10:32:29 +00002062 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002063 if (IS_ERR(phydev)) {
2064 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2065 return NULL;
2066 }
2067 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2068 dev->interface = PHY_INTERFACE_MODE_RGMII;
2069 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2070 MAC_RGMII_ID_TXC_DELAY_EN_);
2071 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2072 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2073 buf |= HW_CFG_CLK125_EN_;
2074 buf |= HW_CFG_REFCLK25_EN_;
2075 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2076 } else {
2077 if (!phydev->drv) {
2078 netdev_err(dev->net, "no PHY driver found\n");
2079 return NULL;
2080 }
2081 dev->interface = PHY_INTERFACE_MODE_RGMII;
2082 /* external PHY fixup for KSZ9031RNX */
2083 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2084 ksz9031rnx_fixup);
2085 if (ret < 0) {
2086 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2087 return NULL;
2088 }
2089 /* external PHY fixup for LAN8835 */
2090 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2091 lan8835_fixup);
2092 if (ret < 0) {
2093 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2094 return NULL;
2095 }
2096 /* add more external PHY fixup here if needed */
2097
2098 phydev->is_internal = false;
2099 }
2100 return phydev;
2101}
2102
2103static int lan78xx_phy_init(struct lan78xx_net *dev)
2104{
David Brazdil0f672f62019-12-10 10:32:29 +00002105 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002106 int ret;
2107 u32 mii_adv;
2108 struct phy_device *phydev;
2109
2110 switch (dev->chipid) {
2111 case ID_REV_CHIP_ID_7801_:
2112 phydev = lan7801_phy_init(dev);
2113 if (!phydev) {
2114 netdev_err(dev->net, "lan7801: PHY Init Failed");
2115 return -EIO;
2116 }
2117 break;
2118
2119 case ID_REV_CHIP_ID_7800_:
2120 case ID_REV_CHIP_ID_7850_:
2121 phydev = phy_find_first(dev->mdiobus);
2122 if (!phydev) {
2123 netdev_err(dev->net, "no PHY found\n");
2124 return -EIO;
2125 }
2126 phydev->is_internal = true;
2127 dev->interface = PHY_INTERFACE_MODE_GMII;
2128 break;
2129
2130 default:
2131 netdev_err(dev->net, "Unknown CHIP ID found\n");
2132 return -EIO;
2133 }
2134
2135 /* if phyirq is not set, use polling mode in phylib */
2136 if (dev->domain_data.phyirq > 0)
2137 phydev->irq = dev->domain_data.phyirq;
2138 else
2139 phydev->irq = 0;
2140 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2141
2142 /* set to AUTOMDIX */
2143 phydev->mdix = ETH_TP_MDI_AUTO;
2144
2145 ret = phy_connect_direct(dev->net, phydev,
2146 lan78xx_link_status_change,
2147 dev->interface);
2148 if (ret) {
2149 netdev_err(dev->net, "can't attach PHY to %s\n",
2150 dev->mdiobus->id);
2151 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2152 if (phy_is_pseudo_fixed_link(phydev)) {
2153 fixed_phy_unregister(phydev);
2154 } else {
2155 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2156 0xfffffff0);
2157 phy_unregister_fixup_for_uid(PHY_LAN8835,
2158 0xfffffff0);
2159 }
2160 }
2161 return -EIO;
2162 }
2163
2164 /* MAC doesn't support 1000T Half */
David Brazdil0f672f62019-12-10 10:32:29 +00002165 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002166
2167 /* support both flow controls */
2168 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
David Brazdil0f672f62019-12-10 10:32:29 +00002169 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2170 phydev->advertising);
2171 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2172 phydev->advertising);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002173 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
David Brazdil0f672f62019-12-10 10:32:29 +00002174 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2175 linkmode_or(phydev->advertising, fc, phydev->advertising);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002176
2177 if (phydev->mdio.dev.of_node) {
2178 u32 reg;
2179 int len;
2180
2181 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2182 "microchip,led-modes",
2183 sizeof(u32));
2184 if (len >= 0) {
2185 /* Ensure the appropriate LEDs are enabled */
2186 lan78xx_read_reg(dev, HW_CFG, &reg);
2187 reg &= ~(HW_CFG_LED0_EN_ |
2188 HW_CFG_LED1_EN_ |
2189 HW_CFG_LED2_EN_ |
2190 HW_CFG_LED3_EN_);
2191 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2192 (len > 1) * HW_CFG_LED1_EN_ |
2193 (len > 2) * HW_CFG_LED2_EN_ |
2194 (len > 3) * HW_CFG_LED3_EN_;
2195 lan78xx_write_reg(dev, HW_CFG, reg);
2196 }
2197 }
2198
2199 genphy_config_aneg(phydev);
2200
2201 dev->fc_autoneg = phydev->autoneg;
2202
2203 return 0;
2204}
2205
2206static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2207{
2208 int ret = 0;
2209 u32 buf;
2210 bool rxenabled;
2211
2212 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2213
2214 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2215
2216 if (rxenabled) {
2217 buf &= ~MAC_RX_RXEN_;
2218 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2219 }
2220
2221 /* add 4 to size for FCS */
2222 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2223 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2224
2225 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2226
2227 if (rxenabled) {
2228 buf |= MAC_RX_RXEN_;
2229 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2230 }
2231
2232 return 0;
2233}
2234
2235static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2236{
2237 struct sk_buff *skb;
2238 unsigned long flags;
2239 int count = 0;
2240
2241 spin_lock_irqsave(&q->lock, flags);
2242 while (!skb_queue_empty(q)) {
2243 struct skb_data *entry;
2244 struct urb *urb;
2245 int ret;
2246
2247 skb_queue_walk(q, skb) {
2248 entry = (struct skb_data *)skb->cb;
2249 if (entry->state != unlink_start)
2250 goto found;
2251 }
2252 break;
2253found:
2254 entry->state = unlink_start;
2255 urb = entry->urb;
2256
2257 /* Get reference count of the URB to avoid it to be
2258 * freed during usb_unlink_urb, which may trigger
2259 * use-after-free problem inside usb_unlink_urb since
2260 * usb_unlink_urb is always racing with .complete
2261 * handler(include defer_bh).
2262 */
2263 usb_get_urb(urb);
2264 spin_unlock_irqrestore(&q->lock, flags);
2265 /* during some PM-driven resume scenarios,
2266 * these (async) unlinks complete immediately
2267 */
2268 ret = usb_unlink_urb(urb);
2269 if (ret != -EINPROGRESS && ret != 0)
2270 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2271 else
2272 count++;
2273 usb_put_urb(urb);
2274 spin_lock_irqsave(&q->lock, flags);
2275 }
2276 spin_unlock_irqrestore(&q->lock, flags);
2277 return count;
2278}
2279
2280static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2281{
2282 struct lan78xx_net *dev = netdev_priv(netdev);
2283 int ll_mtu = new_mtu + netdev->hard_header_len;
2284 int old_hard_mtu = dev->hard_mtu;
2285 int old_rx_urb_size = dev->rx_urb_size;
2286 int ret;
2287
2288 /* no second zero-length packet read wanted after mtu-sized packets */
2289 if ((ll_mtu % dev->maxpacket) == 0)
2290 return -EDOM;
2291
2292 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2293
2294 netdev->mtu = new_mtu;
2295
2296 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2297 if (dev->rx_urb_size == old_hard_mtu) {
2298 dev->rx_urb_size = dev->hard_mtu;
2299 if (dev->rx_urb_size > old_rx_urb_size) {
2300 if (netif_running(dev->net)) {
2301 unlink_urbs(dev, &dev->rxq);
2302 tasklet_schedule(&dev->bh);
2303 }
2304 }
2305 }
2306
2307 return 0;
2308}
2309
2310static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2311{
2312 struct lan78xx_net *dev = netdev_priv(netdev);
2313 struct sockaddr *addr = p;
2314 u32 addr_lo, addr_hi;
2315 int ret;
2316
2317 if (netif_running(netdev))
2318 return -EBUSY;
2319
2320 if (!is_valid_ether_addr(addr->sa_data))
2321 return -EADDRNOTAVAIL;
2322
2323 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2324
2325 addr_lo = netdev->dev_addr[0] |
2326 netdev->dev_addr[1] << 8 |
2327 netdev->dev_addr[2] << 16 |
2328 netdev->dev_addr[3] << 24;
2329 addr_hi = netdev->dev_addr[4] |
2330 netdev->dev_addr[5] << 8;
2331
2332 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2333 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2334
David Brazdil0f672f62019-12-10 10:32:29 +00002335 /* Added to support MAC address changes */
2336 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2337 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2338
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002339 return 0;
2340}
2341
2342/* Enable or disable Rx checksum offload engine */
2343static int lan78xx_set_features(struct net_device *netdev,
2344 netdev_features_t features)
2345{
2346 struct lan78xx_net *dev = netdev_priv(netdev);
2347 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2348 unsigned long flags;
2349 int ret;
2350
2351 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2352
2353 if (features & NETIF_F_RXCSUM) {
2354 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2355 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2356 } else {
2357 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2358 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2359 }
2360
2361 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2362 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2363 else
2364 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2365
2366 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2367 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2368 else
2369 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2370
2371 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2372
2373 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2374
2375 return 0;
2376}
2377
2378static void lan78xx_deferred_vlan_write(struct work_struct *param)
2379{
2380 struct lan78xx_priv *pdata =
2381 container_of(param, struct lan78xx_priv, set_vlan);
2382 struct lan78xx_net *dev = pdata->dev;
2383
2384 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2385 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2386}
2387
2388static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2389 __be16 proto, u16 vid)
2390{
2391 struct lan78xx_net *dev = netdev_priv(netdev);
2392 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2393 u16 vid_bit_index;
2394 u16 vid_dword_index;
2395
2396 vid_dword_index = (vid >> 5) & 0x7F;
2397 vid_bit_index = vid & 0x1F;
2398
2399 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2400
2401 /* defer register writes to a sleepable context */
2402 schedule_work(&pdata->set_vlan);
2403
2404 return 0;
2405}
2406
2407static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2408 __be16 proto, u16 vid)
2409{
2410 struct lan78xx_net *dev = netdev_priv(netdev);
2411 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2412 u16 vid_bit_index;
2413 u16 vid_dword_index;
2414
2415 vid_dword_index = (vid >> 5) & 0x7F;
2416 vid_bit_index = vid & 0x1F;
2417
2418 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2419
2420 /* defer register writes to a sleepable context */
2421 schedule_work(&pdata->set_vlan);
2422
2423 return 0;
2424}
2425
2426static void lan78xx_init_ltm(struct lan78xx_net *dev)
2427{
2428 int ret;
2429 u32 buf;
2430 u32 regs[6] = { 0 };
2431
2432 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2433 if (buf & USB_CFG1_LTM_ENABLE_) {
2434 u8 temp[2];
2435 /* Get values from EEPROM first */
2436 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2437 if (temp[0] == 24) {
2438 ret = lan78xx_read_raw_eeprom(dev,
2439 temp[1] * 2,
2440 24,
2441 (u8 *)regs);
2442 if (ret < 0)
2443 return;
2444 }
2445 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2446 if (temp[0] == 24) {
2447 ret = lan78xx_read_raw_otp(dev,
2448 temp[1] * 2,
2449 24,
2450 (u8 *)regs);
2451 if (ret < 0)
2452 return;
2453 }
2454 }
2455 }
2456
2457 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2458 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2459 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2460 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2461 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2462 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2463}
2464
2465static int lan78xx_reset(struct lan78xx_net *dev)
2466{
2467 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2468 u32 buf;
2469 int ret = 0;
2470 unsigned long timeout;
2471 u8 sig;
2472
2473 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2474 buf |= HW_CFG_LRST_;
2475 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2476
2477 timeout = jiffies + HZ;
2478 do {
2479 mdelay(1);
2480 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2481 if (time_after(jiffies, timeout)) {
2482 netdev_warn(dev->net,
2483 "timeout on completion of LiteReset");
2484 return -EIO;
2485 }
2486 } while (buf & HW_CFG_LRST_);
2487
2488 lan78xx_init_mac_address(dev);
2489
2490 /* save DEVID for later usage */
2491 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2492 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2493 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2494
2495 /* Respond to the IN token with a NAK */
2496 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2497 buf |= USB_CFG_BIR_;
2498 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2499
2500 /* Init LTM */
2501 lan78xx_init_ltm(dev);
2502
2503 if (dev->udev->speed == USB_SPEED_SUPER) {
2504 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2505 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2506 dev->rx_qlen = 4;
2507 dev->tx_qlen = 4;
2508 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2509 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2510 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2511 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2512 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2513 } else {
2514 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2515 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2516 dev->rx_qlen = 4;
2517 dev->tx_qlen = 4;
2518 }
2519
2520 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2521 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2522
2523 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2524 buf |= HW_CFG_MEF_;
2525 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2526
2527 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2528 buf |= USB_CFG_BCE_;
2529 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2530
2531 /* set FIFO sizes */
2532 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2533 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2534
2535 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2536 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2537
2538 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2539 ret = lan78xx_write_reg(dev, FLOW, 0);
2540 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2541
2542 /* Don't need rfe_ctl_lock during initialisation */
2543 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2544 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2545 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2546
2547 /* Enable or disable checksum offload engines */
2548 lan78xx_set_features(dev->net, dev->net->features);
2549
2550 lan78xx_set_multicast(dev->net);
2551
2552 /* reset PHY */
2553 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2554 buf |= PMT_CTL_PHY_RST_;
2555 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2556
2557 timeout = jiffies + HZ;
2558 do {
2559 mdelay(1);
2560 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2561 if (time_after(jiffies, timeout)) {
2562 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2563 return -EIO;
2564 }
2565 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2566
2567 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2568 /* LAN7801 only has RGMII mode */
2569 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2570 buf &= ~MAC_CR_GMII_EN_;
2571
2572 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2573 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2574 if (!ret && sig != EEPROM_INDICATOR) {
2575 /* Implies there is no external eeprom. Set mac speed */
2576 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2577 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2578 }
2579 }
2580 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2581
2582 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2583 buf |= MAC_TX_TXEN_;
2584 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2585
2586 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2587 buf |= FCT_TX_CTL_EN_;
2588 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2589
2590 ret = lan78xx_set_rx_max_frame_length(dev,
2591 dev->net->mtu + VLAN_ETH_HLEN);
2592
2593 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2594 buf |= MAC_RX_RXEN_;
2595 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2596
2597 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2598 buf |= FCT_RX_CTL_EN_;
2599 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2600
2601 return 0;
2602}
2603
2604static void lan78xx_init_stats(struct lan78xx_net *dev)
2605{
2606 u32 *p;
2607 int i;
2608
2609 /* initialize for stats update
2610 * some counters are 20bits and some are 32bits
2611 */
2612 p = (u32 *)&dev->stats.rollover_max;
2613 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2614 p[i] = 0xFFFFF;
2615
2616 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2617 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2618 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2619 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2620 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2621 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2622 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2623 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2624 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2625 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2626
2627 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2628}
2629
2630static int lan78xx_open(struct net_device *net)
2631{
2632 struct lan78xx_net *dev = netdev_priv(net);
2633 int ret;
2634
2635 ret = usb_autopm_get_interface(dev->intf);
2636 if (ret < 0)
2637 goto out;
2638
2639 phy_start(net->phydev);
2640
2641 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2642
2643 /* for Link Check */
2644 if (dev->urb_intr) {
2645 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2646 if (ret < 0) {
2647 netif_err(dev, ifup, dev->net,
2648 "intr submit %d\n", ret);
2649 goto done;
2650 }
2651 }
2652
2653 lan78xx_init_stats(dev);
2654
2655 set_bit(EVENT_DEV_OPEN, &dev->flags);
2656
2657 netif_start_queue(net);
2658
2659 dev->link_on = false;
2660
2661 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2662done:
2663 usb_autopm_put_interface(dev->intf);
2664
2665out:
2666 return ret;
2667}
2668
2669static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2670{
2671 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2672 DECLARE_WAITQUEUE(wait, current);
2673 int temp;
2674
2675 /* ensure there are no more active urbs */
2676 add_wait_queue(&unlink_wakeup, &wait);
2677 set_current_state(TASK_UNINTERRUPTIBLE);
2678 dev->wait = &unlink_wakeup;
2679 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2680
2681 /* maybe wait for deletions to finish. */
2682 while (!skb_queue_empty(&dev->rxq) &&
2683 !skb_queue_empty(&dev->txq) &&
2684 !skb_queue_empty(&dev->done)) {
2685 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2686 set_current_state(TASK_UNINTERRUPTIBLE);
2687 netif_dbg(dev, ifdown, dev->net,
2688 "waited for %d urb completions\n", temp);
2689 }
2690 set_current_state(TASK_RUNNING);
2691 dev->wait = NULL;
2692 remove_wait_queue(&unlink_wakeup, &wait);
2693}
2694
2695static int lan78xx_stop(struct net_device *net)
2696{
David Brazdil0f672f62019-12-10 10:32:29 +00002697 struct lan78xx_net *dev = netdev_priv(net);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002698
2699 if (timer_pending(&dev->stat_monitor))
2700 del_timer_sync(&dev->stat_monitor);
2701
2702 if (net->phydev)
2703 phy_stop(net->phydev);
2704
2705 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2706 netif_stop_queue(net);
2707
2708 netif_info(dev, ifdown, dev->net,
2709 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2710 net->stats.rx_packets, net->stats.tx_packets,
2711 net->stats.rx_errors, net->stats.tx_errors);
2712
2713 lan78xx_terminate_urbs(dev);
2714
2715 usb_kill_urb(dev->urb_intr);
2716
2717 skb_queue_purge(&dev->rxq_pause);
2718
2719 /* deferred work (task, timer, softirq) must also stop.
2720 * can't flush_scheduled_work() until we drop rtnl (later),
2721 * else workers could deadlock; so make workers a NOP.
2722 */
2723 dev->flags = 0;
2724 cancel_delayed_work_sync(&dev->wq);
2725 tasklet_kill(&dev->bh);
2726
2727 usb_autopm_put_interface(dev->intf);
2728
2729 return 0;
2730}
2731
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002732static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2733 struct sk_buff *skb, gfp_t flags)
2734{
2735 u32 tx_cmd_a, tx_cmd_b;
David Brazdil0f672f62019-12-10 10:32:29 +00002736 void *ptr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002737
2738 if (skb_cow_head(skb, TX_OVERHEAD)) {
2739 dev_kfree_skb_any(skb);
2740 return NULL;
2741 }
2742
Olivier Deprez0e641232021-09-23 10:07:05 +02002743 if (skb_linearize(skb)) {
2744 dev_kfree_skb_any(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002745 return NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +02002746 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002747
2748 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2749
2750 if (skb->ip_summed == CHECKSUM_PARTIAL)
2751 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2752
2753 tx_cmd_b = 0;
2754 if (skb_is_gso(skb)) {
2755 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2756
2757 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2758
2759 tx_cmd_a |= TX_CMD_A_LSO_;
2760 }
2761
2762 if (skb_vlan_tag_present(skb)) {
2763 tx_cmd_a |= TX_CMD_A_IVTG_;
2764 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2765 }
2766
David Brazdil0f672f62019-12-10 10:32:29 +00002767 ptr = skb_push(skb, 8);
2768 put_unaligned_le32(tx_cmd_a, ptr);
2769 put_unaligned_le32(tx_cmd_b, ptr + 4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002770
2771 return skb;
2772}
2773
2774static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2775 struct sk_buff_head *list, enum skb_state state)
2776{
2777 unsigned long flags;
2778 enum skb_state old_state;
2779 struct skb_data *entry = (struct skb_data *)skb->cb;
2780
2781 spin_lock_irqsave(&list->lock, flags);
2782 old_state = entry->state;
2783 entry->state = state;
2784
2785 __skb_unlink(skb, list);
2786 spin_unlock(&list->lock);
2787 spin_lock(&dev->done.lock);
2788
2789 __skb_queue_tail(&dev->done, skb);
2790 if (skb_queue_len(&dev->done) == 1)
2791 tasklet_schedule(&dev->bh);
2792 spin_unlock_irqrestore(&dev->done.lock, flags);
2793
2794 return old_state;
2795}
2796
2797static void tx_complete(struct urb *urb)
2798{
2799 struct sk_buff *skb = (struct sk_buff *)urb->context;
2800 struct skb_data *entry = (struct skb_data *)skb->cb;
2801 struct lan78xx_net *dev = entry->dev;
2802
2803 if (urb->status == 0) {
2804 dev->net->stats.tx_packets += entry->num_of_packet;
2805 dev->net->stats.tx_bytes += entry->length;
2806 } else {
2807 dev->net->stats.tx_errors++;
2808
2809 switch (urb->status) {
2810 case -EPIPE:
2811 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2812 break;
2813
2814 /* software-driven interface shutdown */
2815 case -ECONNRESET:
2816 case -ESHUTDOWN:
2817 break;
2818
2819 case -EPROTO:
2820 case -ETIME:
2821 case -EILSEQ:
2822 netif_stop_queue(dev->net);
2823 break;
2824 default:
2825 netif_dbg(dev, tx_err, dev->net,
2826 "tx err %d\n", entry->urb->status);
2827 break;
2828 }
2829 }
2830
2831 usb_autopm_put_interface_async(dev->intf);
2832
2833 defer_bh(dev, skb, &dev->txq, tx_done);
2834}
2835
2836static void lan78xx_queue_skb(struct sk_buff_head *list,
2837 struct sk_buff *newsk, enum skb_state state)
2838{
2839 struct skb_data *entry = (struct skb_data *)newsk->cb;
2840
2841 __skb_queue_tail(list, newsk);
2842 entry->state = state;
2843}
2844
2845static netdev_tx_t
2846lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2847{
2848 struct lan78xx_net *dev = netdev_priv(net);
2849 struct sk_buff *skb2 = NULL;
2850
2851 if (skb) {
2852 skb_tx_timestamp(skb);
2853 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2854 }
2855
2856 if (skb2) {
2857 skb_queue_tail(&dev->txq_pend, skb2);
2858
2859 /* throttle TX patch at slower than SUPER SPEED USB */
2860 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2861 (skb_queue_len(&dev->txq_pend) > 10))
2862 netif_stop_queue(net);
2863 } else {
2864 netif_dbg(dev, tx_err, dev->net,
2865 "lan78xx_tx_prep return NULL\n");
2866 dev->net->stats.tx_errors++;
2867 dev->net->stats.tx_dropped++;
2868 }
2869
2870 tasklet_schedule(&dev->bh);
2871
2872 return NETDEV_TX_OK;
2873}
2874
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002875static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2876{
2877 struct lan78xx_priv *pdata = NULL;
2878 int ret;
2879 int i;
2880
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002881 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2882
2883 pdata = (struct lan78xx_priv *)(dev->data[0]);
2884 if (!pdata) {
2885 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2886 return -ENOMEM;
2887 }
2888
2889 pdata->dev = dev;
2890
2891 spin_lock_init(&pdata->rfe_ctl_lock);
2892 mutex_init(&pdata->dataport_mutex);
2893
2894 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2895
2896 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2897 pdata->vlan_table[i] = 0;
2898
2899 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2900
2901 dev->net->features = 0;
2902
2903 if (DEFAULT_TX_CSUM_ENABLE)
2904 dev->net->features |= NETIF_F_HW_CSUM;
2905
2906 if (DEFAULT_RX_CSUM_ENABLE)
2907 dev->net->features |= NETIF_F_RXCSUM;
2908
2909 if (DEFAULT_TSO_CSUM_ENABLE)
2910 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2911
2912 if (DEFAULT_VLAN_RX_OFFLOAD)
2913 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2914
2915 if (DEFAULT_VLAN_FILTER_ENABLE)
2916 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2917
2918 dev->net->hw_features = dev->net->features;
2919
2920 ret = lan78xx_setup_irq_domain(dev);
2921 if (ret < 0) {
2922 netdev_warn(dev->net,
2923 "lan78xx_setup_irq_domain() failed : %d", ret);
2924 goto out1;
2925 }
2926
2927 dev->net->hard_header_len += TX_OVERHEAD;
2928 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2929
2930 /* Init all registers */
2931 ret = lan78xx_reset(dev);
2932 if (ret) {
2933 netdev_warn(dev->net, "Registers INIT FAILED....");
2934 goto out2;
2935 }
2936
2937 ret = lan78xx_mdio_init(dev);
2938 if (ret) {
2939 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2940 goto out2;
2941 }
2942
2943 dev->net->flags |= IFF_MULTICAST;
2944
2945 pdata->wol = WAKE_MAGIC;
2946
2947 return ret;
2948
2949out2:
2950 lan78xx_remove_irq_domain(dev);
2951
2952out1:
2953 netdev_warn(dev->net, "Bind routine FAILED");
2954 cancel_work_sync(&pdata->set_multicast);
2955 cancel_work_sync(&pdata->set_vlan);
2956 kfree(pdata);
2957 return ret;
2958}
2959
2960static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2961{
2962 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2963
2964 lan78xx_remove_irq_domain(dev);
2965
2966 lan78xx_remove_mdio(dev);
2967
2968 if (pdata) {
2969 cancel_work_sync(&pdata->set_multicast);
2970 cancel_work_sync(&pdata->set_vlan);
2971 netif_dbg(dev, ifdown, dev->net, "free pdata");
2972 kfree(pdata);
2973 pdata = NULL;
2974 dev->data[0] = 0;
2975 }
2976}
2977
2978static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2979 struct sk_buff *skb,
2980 u32 rx_cmd_a, u32 rx_cmd_b)
2981{
2982 /* HW Checksum offload appears to be flawed if used when not stripping
2983 * VLAN headers. Drop back to S/W checksums under these conditions.
2984 */
2985 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2986 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
2987 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
2988 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
2989 skb->ip_summed = CHECKSUM_NONE;
2990 } else {
2991 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2992 skb->ip_summed = CHECKSUM_COMPLETE;
2993 }
2994}
2995
2996static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
2997 struct sk_buff *skb,
2998 u32 rx_cmd_a, u32 rx_cmd_b)
2999{
3000 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3001 (rx_cmd_a & RX_CMD_A_FVTG_))
3002 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3003 (rx_cmd_b & 0xffff));
3004}
3005
3006static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3007{
David Brazdil0f672f62019-12-10 10:32:29 +00003008 int status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003009
3010 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3011 skb_queue_tail(&dev->rxq_pause, skb);
3012 return;
3013 }
3014
3015 dev->net->stats.rx_packets++;
3016 dev->net->stats.rx_bytes += skb->len;
3017
3018 skb->protocol = eth_type_trans(skb, dev->net);
3019
3020 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3021 skb->len + sizeof(struct ethhdr), skb->protocol);
3022 memset(skb->cb, 0, sizeof(struct skb_data));
3023
3024 if (skb_defer_rx_timestamp(skb))
3025 return;
3026
3027 status = netif_rx(skb);
3028 if (status != NET_RX_SUCCESS)
3029 netif_dbg(dev, rx_err, dev->net,
3030 "netif_rx status %d\n", status);
3031}
3032
3033static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3034{
3035 if (skb->len < dev->net->hard_header_len)
3036 return 0;
3037
3038 while (skb->len > 0) {
3039 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3040 u16 rx_cmd_c;
3041 struct sk_buff *skb2;
3042 unsigned char *packet;
3043
David Brazdil0f672f62019-12-10 10:32:29 +00003044 rx_cmd_a = get_unaligned_le32(skb->data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003045 skb_pull(skb, sizeof(rx_cmd_a));
3046
David Brazdil0f672f62019-12-10 10:32:29 +00003047 rx_cmd_b = get_unaligned_le32(skb->data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003048 skb_pull(skb, sizeof(rx_cmd_b));
3049
David Brazdil0f672f62019-12-10 10:32:29 +00003050 rx_cmd_c = get_unaligned_le16(skb->data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003051 skb_pull(skb, sizeof(rx_cmd_c));
3052
3053 packet = skb->data;
3054
3055 /* get the packet length */
3056 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3057 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3058
3059 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3060 netif_dbg(dev, rx_err, dev->net,
3061 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3062 } else {
3063 /* last frame in this batch */
3064 if (skb->len == size) {
3065 lan78xx_rx_csum_offload(dev, skb,
3066 rx_cmd_a, rx_cmd_b);
3067 lan78xx_rx_vlan_offload(dev, skb,
3068 rx_cmd_a, rx_cmd_b);
3069
3070 skb_trim(skb, skb->len - 4); /* remove fcs */
3071 skb->truesize = size + sizeof(struct sk_buff);
3072
3073 return 1;
3074 }
3075
3076 skb2 = skb_clone(skb, GFP_ATOMIC);
3077 if (unlikely(!skb2)) {
3078 netdev_warn(dev->net, "Error allocating skb");
3079 return 0;
3080 }
3081
3082 skb2->len = size;
3083 skb2->data = packet;
3084 skb_set_tail_pointer(skb2, size);
3085
3086 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3087 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3088
3089 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3090 skb2->truesize = size + sizeof(struct sk_buff);
3091
3092 lan78xx_skb_return(dev, skb2);
3093 }
3094
3095 skb_pull(skb, size);
3096
3097 /* padding bytes before the next frame starts */
3098 if (skb->len)
3099 skb_pull(skb, align_count);
3100 }
3101
3102 return 1;
3103}
3104
3105static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3106{
3107 if (!lan78xx_rx(dev, skb)) {
3108 dev->net->stats.rx_errors++;
3109 goto done;
3110 }
3111
3112 if (skb->len) {
3113 lan78xx_skb_return(dev, skb);
3114 return;
3115 }
3116
3117 netif_dbg(dev, rx_err, dev->net, "drop\n");
3118 dev->net->stats.rx_errors++;
3119done:
3120 skb_queue_tail(&dev->done, skb);
3121}
3122
3123static void rx_complete(struct urb *urb);
3124
3125static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3126{
3127 struct sk_buff *skb;
3128 struct skb_data *entry;
3129 unsigned long lockflags;
3130 size_t size = dev->rx_urb_size;
3131 int ret = 0;
3132
3133 skb = netdev_alloc_skb_ip_align(dev->net, size);
3134 if (!skb) {
3135 usb_free_urb(urb);
3136 return -ENOMEM;
3137 }
3138
3139 entry = (struct skb_data *)skb->cb;
3140 entry->urb = urb;
3141 entry->dev = dev;
3142 entry->length = 0;
3143
3144 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3145 skb->data, size, rx_complete, skb);
3146
3147 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3148
3149 if (netif_device_present(dev->net) &&
3150 netif_running(dev->net) &&
3151 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3152 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3153 ret = usb_submit_urb(urb, GFP_ATOMIC);
3154 switch (ret) {
3155 case 0:
3156 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3157 break;
3158 case -EPIPE:
3159 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3160 break;
3161 case -ENODEV:
3162 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3163 netif_device_detach(dev->net);
3164 break;
3165 case -EHOSTUNREACH:
3166 ret = -ENOLINK;
3167 break;
3168 default:
3169 netif_dbg(dev, rx_err, dev->net,
3170 "rx submit, %d\n", ret);
3171 tasklet_schedule(&dev->bh);
3172 }
3173 } else {
3174 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3175 ret = -ENOLINK;
3176 }
3177 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3178 if (ret) {
3179 dev_kfree_skb_any(skb);
3180 usb_free_urb(urb);
3181 }
3182 return ret;
3183}
3184
3185static void rx_complete(struct urb *urb)
3186{
3187 struct sk_buff *skb = (struct sk_buff *)urb->context;
3188 struct skb_data *entry = (struct skb_data *)skb->cb;
3189 struct lan78xx_net *dev = entry->dev;
3190 int urb_status = urb->status;
3191 enum skb_state state;
3192
3193 skb_put(skb, urb->actual_length);
3194 state = rx_done;
3195 entry->urb = NULL;
3196
3197 switch (urb_status) {
3198 case 0:
3199 if (skb->len < dev->net->hard_header_len) {
3200 state = rx_cleanup;
3201 dev->net->stats.rx_errors++;
3202 dev->net->stats.rx_length_errors++;
3203 netif_dbg(dev, rx_err, dev->net,
3204 "rx length %d\n", skb->len);
3205 }
3206 usb_mark_last_busy(dev->udev);
3207 break;
3208 case -EPIPE:
3209 dev->net->stats.rx_errors++;
3210 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3211 /* FALLTHROUGH */
3212 case -ECONNRESET: /* async unlink */
3213 case -ESHUTDOWN: /* hardware gone */
3214 netif_dbg(dev, ifdown, dev->net,
3215 "rx shutdown, code %d\n", urb_status);
3216 state = rx_cleanup;
3217 entry->urb = urb;
3218 urb = NULL;
3219 break;
3220 case -EPROTO:
3221 case -ETIME:
3222 case -EILSEQ:
3223 dev->net->stats.rx_errors++;
3224 state = rx_cleanup;
3225 entry->urb = urb;
3226 urb = NULL;
3227 break;
3228
3229 /* data overrun ... flush fifo? */
3230 case -EOVERFLOW:
3231 dev->net->stats.rx_over_errors++;
3232 /* FALLTHROUGH */
3233
3234 default:
3235 state = rx_cleanup;
3236 dev->net->stats.rx_errors++;
3237 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3238 break;
3239 }
3240
3241 state = defer_bh(dev, skb, &dev->rxq, state);
3242
3243 if (urb) {
3244 if (netif_running(dev->net) &&
3245 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3246 state != unlink_start) {
3247 rx_submit(dev, urb, GFP_ATOMIC);
3248 return;
3249 }
3250 usb_free_urb(urb);
3251 }
3252 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3253}
3254
3255static void lan78xx_tx_bh(struct lan78xx_net *dev)
3256{
3257 int length;
3258 struct urb *urb = NULL;
3259 struct skb_data *entry;
3260 unsigned long flags;
3261 struct sk_buff_head *tqp = &dev->txq_pend;
3262 struct sk_buff *skb, *skb2;
3263 int ret;
3264 int count, pos;
3265 int skb_totallen, pkt_cnt;
3266
3267 skb_totallen = 0;
3268 pkt_cnt = 0;
3269 count = 0;
3270 length = 0;
3271 spin_lock_irqsave(&tqp->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +00003272 skb_queue_walk(tqp, skb) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003273 if (skb_is_gso(skb)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003274 if (!skb_queue_is_first(tqp, skb)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003275 /* handle previous packets first */
3276 break;
3277 }
3278 count = 1;
3279 length = skb->len - TX_OVERHEAD;
3280 __skb_unlink(skb, tqp);
3281 spin_unlock_irqrestore(&tqp->lock, flags);
3282 goto gso_skb;
3283 }
3284
3285 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3286 break;
3287 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3288 pkt_cnt++;
3289 }
3290 spin_unlock_irqrestore(&tqp->lock, flags);
3291
3292 /* copy to a single skb */
3293 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3294 if (!skb)
3295 goto drop;
3296
3297 skb_put(skb, skb_totallen);
3298
3299 for (count = pos = 0; count < pkt_cnt; count++) {
3300 skb2 = skb_dequeue(tqp);
3301 if (skb2) {
3302 length += (skb2->len - TX_OVERHEAD);
3303 memcpy(skb->data + pos, skb2->data, skb2->len);
3304 pos += roundup(skb2->len, sizeof(u32));
3305 dev_kfree_skb(skb2);
3306 }
3307 }
3308
3309gso_skb:
3310 urb = usb_alloc_urb(0, GFP_ATOMIC);
3311 if (!urb)
3312 goto drop;
3313
3314 entry = (struct skb_data *)skb->cb;
3315 entry->urb = urb;
3316 entry->dev = dev;
3317 entry->length = length;
3318 entry->num_of_packet = count;
3319
3320 spin_lock_irqsave(&dev->txq.lock, flags);
3321 ret = usb_autopm_get_interface_async(dev->intf);
3322 if (ret < 0) {
3323 spin_unlock_irqrestore(&dev->txq.lock, flags);
3324 goto drop;
3325 }
3326
3327 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3328 skb->data, skb->len, tx_complete, skb);
3329
3330 if (length % dev->maxpacket == 0) {
3331 /* send USB_ZERO_PACKET */
3332 urb->transfer_flags |= URB_ZERO_PACKET;
3333 }
3334
3335#ifdef CONFIG_PM
3336 /* if this triggers the device is still a sleep */
3337 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3338 /* transmission will be done in resume */
3339 usb_anchor_urb(urb, &dev->deferred);
3340 /* no use to process more packets */
3341 netif_stop_queue(dev->net);
3342 usb_put_urb(urb);
3343 spin_unlock_irqrestore(&dev->txq.lock, flags);
3344 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3345 return;
3346 }
3347#endif
3348
3349 ret = usb_submit_urb(urb, GFP_ATOMIC);
3350 switch (ret) {
3351 case 0:
3352 netif_trans_update(dev->net);
3353 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3354 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3355 netif_stop_queue(dev->net);
3356 break;
3357 case -EPIPE:
3358 netif_stop_queue(dev->net);
3359 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3360 usb_autopm_put_interface_async(dev->intf);
3361 break;
3362 default:
3363 usb_autopm_put_interface_async(dev->intf);
3364 netif_dbg(dev, tx_err, dev->net,
3365 "tx: submit urb err %d\n", ret);
3366 break;
3367 }
3368
3369 spin_unlock_irqrestore(&dev->txq.lock, flags);
3370
3371 if (ret) {
3372 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3373drop:
3374 dev->net->stats.tx_dropped++;
3375 if (skb)
3376 dev_kfree_skb_any(skb);
3377 usb_free_urb(urb);
3378 } else
3379 netif_dbg(dev, tx_queued, dev->net,
3380 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3381}
3382
3383static void lan78xx_rx_bh(struct lan78xx_net *dev)
3384{
3385 struct urb *urb;
3386 int i;
3387
3388 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3389 for (i = 0; i < 10; i++) {
3390 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3391 break;
3392 urb = usb_alloc_urb(0, GFP_ATOMIC);
3393 if (urb)
3394 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3395 return;
3396 }
3397
3398 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3399 tasklet_schedule(&dev->bh);
3400 }
3401 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3402 netif_wake_queue(dev->net);
3403}
3404
3405static void lan78xx_bh(unsigned long param)
3406{
3407 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3408 struct sk_buff *skb;
3409 struct skb_data *entry;
3410
3411 while ((skb = skb_dequeue(&dev->done))) {
3412 entry = (struct skb_data *)(skb->cb);
3413 switch (entry->state) {
3414 case rx_done:
3415 entry->state = rx_cleanup;
3416 rx_process(dev, skb);
3417 continue;
3418 case tx_done:
3419 usb_free_urb(entry->urb);
3420 dev_kfree_skb(skb);
3421 continue;
3422 case rx_cleanup:
3423 usb_free_urb(entry->urb);
3424 dev_kfree_skb(skb);
3425 continue;
3426 default:
3427 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3428 return;
3429 }
3430 }
3431
3432 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3433 /* reset update timer delta */
3434 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3435 dev->delta = 1;
3436 mod_timer(&dev->stat_monitor,
3437 jiffies + STAT_UPDATE_TIMER);
3438 }
3439
3440 if (!skb_queue_empty(&dev->txq_pend))
3441 lan78xx_tx_bh(dev);
3442
3443 if (!timer_pending(&dev->delay) &&
3444 !test_bit(EVENT_RX_HALT, &dev->flags))
3445 lan78xx_rx_bh(dev);
3446 }
3447}
3448
3449static void lan78xx_delayedwork(struct work_struct *work)
3450{
3451 int status;
3452 struct lan78xx_net *dev;
3453
3454 dev = container_of(work, struct lan78xx_net, wq.work);
3455
3456 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3457 unlink_urbs(dev, &dev->txq);
3458 status = usb_autopm_get_interface(dev->intf);
3459 if (status < 0)
3460 goto fail_pipe;
3461 status = usb_clear_halt(dev->udev, dev->pipe_out);
3462 usb_autopm_put_interface(dev->intf);
3463 if (status < 0 &&
3464 status != -EPIPE &&
3465 status != -ESHUTDOWN) {
3466 if (netif_msg_tx_err(dev))
3467fail_pipe:
3468 netdev_err(dev->net,
3469 "can't clear tx halt, status %d\n",
3470 status);
3471 } else {
3472 clear_bit(EVENT_TX_HALT, &dev->flags);
3473 if (status != -ESHUTDOWN)
3474 netif_wake_queue(dev->net);
3475 }
3476 }
3477 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3478 unlink_urbs(dev, &dev->rxq);
3479 status = usb_autopm_get_interface(dev->intf);
3480 if (status < 0)
3481 goto fail_halt;
3482 status = usb_clear_halt(dev->udev, dev->pipe_in);
3483 usb_autopm_put_interface(dev->intf);
3484 if (status < 0 &&
3485 status != -EPIPE &&
3486 status != -ESHUTDOWN) {
3487 if (netif_msg_rx_err(dev))
3488fail_halt:
3489 netdev_err(dev->net,
3490 "can't clear rx halt, status %d\n",
3491 status);
3492 } else {
3493 clear_bit(EVENT_RX_HALT, &dev->flags);
3494 tasklet_schedule(&dev->bh);
3495 }
3496 }
3497
3498 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3499 int ret = 0;
3500
3501 clear_bit(EVENT_LINK_RESET, &dev->flags);
3502 status = usb_autopm_get_interface(dev->intf);
3503 if (status < 0)
3504 goto skip_reset;
3505 if (lan78xx_link_reset(dev) < 0) {
3506 usb_autopm_put_interface(dev->intf);
3507skip_reset:
3508 netdev_info(dev->net, "link reset failed (%d)\n",
3509 ret);
3510 } else {
3511 usb_autopm_put_interface(dev->intf);
3512 }
3513 }
3514
3515 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3516 lan78xx_update_stats(dev);
3517
3518 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3519
3520 mod_timer(&dev->stat_monitor,
3521 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3522
3523 dev->delta = min((dev->delta * 2), 50);
3524 }
3525}
3526
3527static void intr_complete(struct urb *urb)
3528{
3529 struct lan78xx_net *dev = urb->context;
3530 int status = urb->status;
3531
3532 switch (status) {
3533 /* success */
3534 case 0:
3535 lan78xx_status(dev, urb);
3536 break;
3537
3538 /* software-driven interface shutdown */
3539 case -ENOENT: /* urb killed */
3540 case -ESHUTDOWN: /* hardware gone */
3541 netif_dbg(dev, ifdown, dev->net,
3542 "intr shutdown, code %d\n", status);
3543 return;
3544
3545 /* NOTE: not throttling like RX/TX, since this endpoint
3546 * already polls infrequently
3547 */
3548 default:
3549 netdev_dbg(dev->net, "intr status %d\n", status);
3550 break;
3551 }
3552
3553 if (!netif_running(dev->net))
3554 return;
3555
3556 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3557 status = usb_submit_urb(urb, GFP_ATOMIC);
3558 if (status != 0)
3559 netif_err(dev, timer, dev->net,
3560 "intr resubmit --> %d\n", status);
3561}
3562
3563static void lan78xx_disconnect(struct usb_interface *intf)
3564{
David Brazdil0f672f62019-12-10 10:32:29 +00003565 struct lan78xx_net *dev;
3566 struct usb_device *udev;
3567 struct net_device *net;
3568 struct phy_device *phydev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003569
3570 dev = usb_get_intfdata(intf);
3571 usb_set_intfdata(intf, NULL);
3572 if (!dev)
3573 return;
3574
3575 udev = interface_to_usbdev(intf);
3576 net = dev->net;
3577 phydev = net->phydev;
3578
3579 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3580 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3581
3582 phy_disconnect(net->phydev);
3583
3584 if (phy_is_pseudo_fixed_link(phydev))
3585 fixed_phy_unregister(phydev);
3586
3587 unregister_netdev(net);
3588
3589 cancel_delayed_work_sync(&dev->wq);
3590
3591 usb_scuttle_anchored_urbs(&dev->deferred);
3592
3593 lan78xx_unbind(dev, intf);
3594
3595 usb_kill_urb(dev->urb_intr);
3596 usb_free_urb(dev->urb_intr);
3597
3598 free_netdev(net);
3599 usb_put_dev(udev);
3600}
3601
3602static void lan78xx_tx_timeout(struct net_device *net)
3603{
3604 struct lan78xx_net *dev = netdev_priv(net);
3605
3606 unlink_urbs(dev, &dev->txq);
3607 tasklet_schedule(&dev->bh);
3608}
3609
Olivier Deprez0e641232021-09-23 10:07:05 +02003610static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3611 struct net_device *netdev,
3612 netdev_features_t features)
3613{
3614 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3615 features &= ~NETIF_F_GSO_MASK;
3616
3617 features = vlan_features_check(skb, features);
3618 features = vxlan_features_check(skb, features);
3619
3620 return features;
3621}
3622
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003623static const struct net_device_ops lan78xx_netdev_ops = {
3624 .ndo_open = lan78xx_open,
3625 .ndo_stop = lan78xx_stop,
3626 .ndo_start_xmit = lan78xx_start_xmit,
3627 .ndo_tx_timeout = lan78xx_tx_timeout,
3628 .ndo_change_mtu = lan78xx_change_mtu,
3629 .ndo_set_mac_address = lan78xx_set_mac_addr,
3630 .ndo_validate_addr = eth_validate_addr,
3631 .ndo_do_ioctl = lan78xx_ioctl,
3632 .ndo_set_rx_mode = lan78xx_set_multicast,
3633 .ndo_set_features = lan78xx_set_features,
3634 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3635 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
Olivier Deprez0e641232021-09-23 10:07:05 +02003636 .ndo_features_check = lan78xx_features_check,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003637};
3638
3639static void lan78xx_stat_monitor(struct timer_list *t)
3640{
3641 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3642
3643 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3644}
3645
3646static int lan78xx_probe(struct usb_interface *intf,
3647 const struct usb_device_id *id)
3648{
Olivier Deprez0e641232021-09-23 10:07:05 +02003649 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003650 struct lan78xx_net *dev;
3651 struct net_device *netdev;
3652 struct usb_device *udev;
3653 int ret;
3654 unsigned maxp;
3655 unsigned period;
3656 u8 *buf = NULL;
3657
3658 udev = interface_to_usbdev(intf);
3659 udev = usb_get_dev(udev);
3660
3661 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3662 if (!netdev) {
3663 dev_err(&intf->dev, "Error: OOM\n");
3664 ret = -ENOMEM;
3665 goto out1;
3666 }
3667
3668 /* netdev_printk() needs this */
3669 SET_NETDEV_DEV(netdev, &intf->dev);
3670
3671 dev = netdev_priv(netdev);
3672 dev->udev = udev;
3673 dev->intf = intf;
3674 dev->net = netdev;
3675 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3676 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3677
3678 skb_queue_head_init(&dev->rxq);
3679 skb_queue_head_init(&dev->txq);
3680 skb_queue_head_init(&dev->done);
3681 skb_queue_head_init(&dev->rxq_pause);
3682 skb_queue_head_init(&dev->txq_pend);
3683 mutex_init(&dev->phy_mutex);
3684
3685 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3686 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3687 init_usb_anchor(&dev->deferred);
3688
3689 netdev->netdev_ops = &lan78xx_netdev_ops;
3690 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3691 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3692
3693 dev->delta = 1;
3694 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3695
3696 mutex_init(&dev->stats.access_lock);
3697
Olivier Deprez0e641232021-09-23 10:07:05 +02003698 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3699 ret = -ENODEV;
3700 goto out2;
3701 }
3702
3703 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3704 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3705 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3706 ret = -ENODEV;
3707 goto out2;
3708 }
3709
3710 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3711 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3712 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3713 ret = -ENODEV;
3714 goto out2;
3715 }
3716
3717 ep_intr = &intf->cur_altsetting->endpoint[2];
3718 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3719 ret = -ENODEV;
3720 goto out2;
3721 }
3722
3723 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3724 usb_endpoint_num(&ep_intr->desc));
3725
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003726 ret = lan78xx_bind(dev, intf);
3727 if (ret < 0)
3728 goto out2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003729
3730 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3731 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3732
3733 /* MTU range: 68 - 9000 */
3734 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
Olivier Deprez0e641232021-09-23 10:07:05 +02003735 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003736
Olivier Deprez0e641232021-09-23 10:07:05 +02003737 period = ep_intr->desc.bInterval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003738 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3739 buf = kmalloc(maxp, GFP_KERNEL);
3740 if (buf) {
3741 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3742 if (!dev->urb_intr) {
3743 ret = -ENOMEM;
3744 kfree(buf);
3745 goto out3;
3746 } else {
3747 usb_fill_int_urb(dev->urb_intr, dev->udev,
3748 dev->pipe_intr, buf, maxp,
3749 intr_complete, dev, period);
Olivier Deprez0e641232021-09-23 10:07:05 +02003750 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003751 }
3752 }
3753
3754 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3755
3756 /* driver requires remote-wakeup capability during autosuspend. */
3757 intf->needs_remote_wakeup = 1;
3758
David Brazdil0f672f62019-12-10 10:32:29 +00003759 ret = lan78xx_phy_init(dev);
3760 if (ret < 0)
3761 goto out4;
3762
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003763 ret = register_netdev(netdev);
3764 if (ret != 0) {
3765 netif_err(dev, probe, netdev, "couldn't register the device\n");
David Brazdil0f672f62019-12-10 10:32:29 +00003766 goto out5;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003767 }
3768
3769 usb_set_intfdata(intf, dev);
3770
3771 ret = device_set_wakeup_enable(&udev->dev, true);
3772
3773 /* Default delay of 2sec has more overhead than advantage.
3774 * Set to 10sec as default.
3775 */
3776 pm_runtime_set_autosuspend_delay(&udev->dev,
3777 DEFAULT_AUTOSUSPEND_DELAY);
3778
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003779 return 0;
3780
David Brazdil0f672f62019-12-10 10:32:29 +00003781out5:
3782 phy_disconnect(netdev->phydev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003783out4:
David Brazdil0f672f62019-12-10 10:32:29 +00003784 usb_free_urb(dev->urb_intr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003785out3:
3786 lan78xx_unbind(dev, intf);
3787out2:
3788 free_netdev(netdev);
3789out1:
3790 usb_put_dev(udev);
3791
3792 return ret;
3793}
3794
3795static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3796{
3797 const u16 crc16poly = 0x8005;
3798 int i;
3799 u16 bit, crc, msb;
3800 u8 data;
3801
3802 crc = 0xFFFF;
3803 for (i = 0; i < len; i++) {
3804 data = *buf++;
3805 for (bit = 0; bit < 8; bit++) {
3806 msb = crc >> 15;
3807 crc <<= 1;
3808
3809 if (msb ^ (u16)(data & 1)) {
3810 crc ^= crc16poly;
3811 crc |= (u16)0x0001U;
3812 }
3813 data >>= 1;
3814 }
3815 }
3816
3817 return crc;
3818}
3819
3820static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3821{
3822 u32 buf;
3823 int ret;
3824 int mask_index;
3825 u16 crc;
3826 u32 temp_wucsr;
3827 u32 temp_pmt_ctl;
3828 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3829 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3830 const u8 arp_type[2] = { 0x08, 0x06 };
3831
3832 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3833 buf &= ~MAC_TX_TXEN_;
3834 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3835 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3836 buf &= ~MAC_RX_RXEN_;
3837 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3838
3839 ret = lan78xx_write_reg(dev, WUCSR, 0);
3840 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3841 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3842
3843 temp_wucsr = 0;
3844
3845 temp_pmt_ctl = 0;
3846 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3847 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3848 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3849
3850 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3851 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3852
3853 mask_index = 0;
3854 if (wol & WAKE_PHY) {
3855 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3856
3857 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3858 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3859 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3860 }
3861 if (wol & WAKE_MAGIC) {
3862 temp_wucsr |= WUCSR_MPEN_;
3863
3864 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3865 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3866 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3867 }
3868 if (wol & WAKE_BCAST) {
3869 temp_wucsr |= WUCSR_BCST_EN_;
3870
3871 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3872 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3873 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3874 }
3875 if (wol & WAKE_MCAST) {
3876 temp_wucsr |= WUCSR_WAKE_EN_;
3877
3878 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3879 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3880 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3881 WUF_CFGX_EN_ |
3882 WUF_CFGX_TYPE_MCAST_ |
3883 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3884 (crc & WUF_CFGX_CRC16_MASK_));
3885
3886 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3887 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3888 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3889 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3890 mask_index++;
3891
3892 /* for IPv6 Multicast */
3893 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3894 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3895 WUF_CFGX_EN_ |
3896 WUF_CFGX_TYPE_MCAST_ |
3897 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3898 (crc & WUF_CFGX_CRC16_MASK_));
3899
3900 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3901 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3902 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3903 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3904 mask_index++;
3905
3906 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3907 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3908 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3909 }
3910 if (wol & WAKE_UCAST) {
3911 temp_wucsr |= WUCSR_PFDA_EN_;
3912
3913 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3914 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3915 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3916 }
3917 if (wol & WAKE_ARP) {
3918 temp_wucsr |= WUCSR_WAKE_EN_;
3919
3920 /* set WUF_CFG & WUF_MASK
3921 * for packettype (offset 12,13) = ARP (0x0806)
3922 */
3923 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3924 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3925 WUF_CFGX_EN_ |
3926 WUF_CFGX_TYPE_ALL_ |
3927 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3928 (crc & WUF_CFGX_CRC16_MASK_));
3929
3930 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3931 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3932 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3933 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3934 mask_index++;
3935
3936 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3937 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3938 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3939 }
3940
3941 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3942
3943 /* when multiple WOL bits are set */
3944 if (hweight_long((unsigned long)wol) > 1) {
3945 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3946 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3947 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3948 }
3949 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3950
3951 /* clear WUPS */
3952 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3953 buf |= PMT_CTL_WUPS_MASK_;
3954 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3955
3956 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3957 buf |= MAC_RX_RXEN_;
3958 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3959
3960 return 0;
3961}
3962
3963static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3964{
3965 struct lan78xx_net *dev = usb_get_intfdata(intf);
3966 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3967 u32 buf;
3968 int ret;
3969 int event;
3970
3971 event = message.event;
3972
3973 if (!dev->suspend_count++) {
3974 spin_lock_irq(&dev->txq.lock);
3975 /* don't autosuspend while transmitting */
3976 if ((skb_queue_len(&dev->txq) ||
3977 skb_queue_len(&dev->txq_pend)) &&
3978 PMSG_IS_AUTO(message)) {
3979 spin_unlock_irq(&dev->txq.lock);
3980 ret = -EBUSY;
3981 goto out;
3982 } else {
3983 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3984 spin_unlock_irq(&dev->txq.lock);
3985 }
3986
3987 /* stop TX & RX */
3988 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3989 buf &= ~MAC_TX_TXEN_;
3990 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3991 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3992 buf &= ~MAC_RX_RXEN_;
3993 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3994
3995 /* empty out the rx and queues */
3996 netif_device_detach(dev->net);
3997 lan78xx_terminate_urbs(dev);
3998 usb_kill_urb(dev->urb_intr);
3999
4000 /* reattach */
4001 netif_device_attach(dev->net);
4002 }
4003
4004 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4005 del_timer(&dev->stat_monitor);
4006
4007 if (PMSG_IS_AUTO(message)) {
4008 /* auto suspend (selective suspend) */
4009 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4010 buf &= ~MAC_TX_TXEN_;
4011 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4012 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4013 buf &= ~MAC_RX_RXEN_;
4014 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4015
4016 ret = lan78xx_write_reg(dev, WUCSR, 0);
4017 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4018 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4019
4020 /* set goodframe wakeup */
4021 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4022
4023 buf |= WUCSR_RFE_WAKE_EN_;
4024 buf |= WUCSR_STORE_WAKE_;
4025
4026 ret = lan78xx_write_reg(dev, WUCSR, buf);
4027
4028 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4029
4030 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4031 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4032
4033 buf |= PMT_CTL_PHY_WAKE_EN_;
4034 buf |= PMT_CTL_WOL_EN_;
4035 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4036 buf |= PMT_CTL_SUS_MODE_3_;
4037
4038 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4039
4040 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4041
4042 buf |= PMT_CTL_WUPS_MASK_;
4043
4044 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4045
4046 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4047 buf |= MAC_RX_RXEN_;
4048 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4049 } else {
4050 lan78xx_set_suspend(dev, pdata->wol);
4051 }
4052 }
4053
4054 ret = 0;
4055out:
4056 return ret;
4057}
4058
4059static int lan78xx_resume(struct usb_interface *intf)
4060{
4061 struct lan78xx_net *dev = usb_get_intfdata(intf);
4062 struct sk_buff *skb;
4063 struct urb *res;
4064 int ret;
4065 u32 buf;
4066
4067 if (!timer_pending(&dev->stat_monitor)) {
4068 dev->delta = 1;
4069 mod_timer(&dev->stat_monitor,
4070 jiffies + STAT_UPDATE_TIMER);
4071 }
4072
4073 if (!--dev->suspend_count) {
4074 /* resume interrupt URBs */
4075 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4076 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4077
4078 spin_lock_irq(&dev->txq.lock);
4079 while ((res = usb_get_from_anchor(&dev->deferred))) {
4080 skb = (struct sk_buff *)res->context;
4081 ret = usb_submit_urb(res, GFP_ATOMIC);
4082 if (ret < 0) {
4083 dev_kfree_skb_any(skb);
4084 usb_free_urb(res);
4085 usb_autopm_put_interface_async(dev->intf);
4086 } else {
4087 netif_trans_update(dev->net);
4088 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4089 }
4090 }
4091
4092 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4093 spin_unlock_irq(&dev->txq.lock);
4094
4095 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4096 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4097 netif_start_queue(dev->net);
4098 tasklet_schedule(&dev->bh);
4099 }
4100 }
4101
4102 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4103 ret = lan78xx_write_reg(dev, WUCSR, 0);
4104 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4105
4106 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4107 WUCSR2_ARP_RCD_ |
4108 WUCSR2_IPV6_TCPSYN_RCD_ |
4109 WUCSR2_IPV4_TCPSYN_RCD_);
4110
4111 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4112 WUCSR_EEE_RX_WAKE_ |
4113 WUCSR_PFDA_FR_ |
4114 WUCSR_RFE_WAKE_FR_ |
4115 WUCSR_WUFR_ |
4116 WUCSR_MPR_ |
4117 WUCSR_BCST_FR_);
4118
4119 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4120 buf |= MAC_TX_TXEN_;
4121 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4122
4123 return 0;
4124}
4125
4126static int lan78xx_reset_resume(struct usb_interface *intf)
4127{
4128 struct lan78xx_net *dev = usb_get_intfdata(intf);
4129
4130 lan78xx_reset(dev);
4131
4132 phy_start(dev->net->phydev);
4133
4134 return lan78xx_resume(intf);
4135}
4136
4137static const struct usb_device_id products[] = {
4138 {
4139 /* LAN7800 USB Gigabit Ethernet Device */
4140 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4141 },
4142 {
4143 /* LAN7850 USB Gigabit Ethernet Device */
4144 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4145 },
4146 {
4147 /* LAN7801 USB Gigabit Ethernet Device */
4148 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4149 },
4150 {},
4151};
4152MODULE_DEVICE_TABLE(usb, products);
4153
4154static struct usb_driver lan78xx_driver = {
4155 .name = DRIVER_NAME,
4156 .id_table = products,
4157 .probe = lan78xx_probe,
4158 .disconnect = lan78xx_disconnect,
4159 .suspend = lan78xx_suspend,
4160 .resume = lan78xx_resume,
4161 .reset_resume = lan78xx_reset_resume,
4162 .supports_autosuspend = 1,
4163 .disable_hub_initiated_lpm = 1,
4164};
4165
4166module_usb_driver(lan78xx_driver);
4167
4168MODULE_AUTHOR(DRIVER_AUTHOR);
4169MODULE_DESCRIPTION(DRIVER_DESC);
4170MODULE_LICENSE("GPL");