blob: a12962702611f798d149cb93772177497be3fc9f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/stringify.h>
28#include <linux/kernel.h>
29#include <linux/sched/signal.h>
30#include <linux/types.h>
31#include <linux/compiler.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/in.h>
35#include <linux/interrupt.h>
36#include <linux/ioport.h>
37#include <linux/pci.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/skbuff.h>
41#include <linux/ethtool.h>
42#include <linux/mdio.h>
43#include <linux/mii.h>
44#include <linux/phy.h>
45#include <linux/brcmphy.h>
46#include <linux/if.h>
47#include <linux/if_vlan.h>
48#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/workqueue.h>
51#include <linux/prefetch.h>
52#include <linux/dma-mapping.h>
53#include <linux/firmware.h>
54#include <linux/ssb/ssb_driver_gige.h>
55#include <linux/hwmon.h>
56#include <linux/hwmon-sysfs.h>
57#include <linux/crc32poly.h>
58
59#include <net/checksum.h>
60#include <net/ip.h>
61
62#include <linux/io.h>
63#include <asm/byteorder.h>
64#include <linux/uaccess.h>
65
66#include <uapi/linux/net_tstamp.h>
67#include <linux/ptp_clock_kernel.h>
68
69#ifdef CONFIG_SPARC
70#include <asm/idprom.h>
71#include <asm/prom.h>
72#endif
73
74#define BAR_0 0
75#define BAR_2 2
76
77#include "tg3.h"
78
79/* Functions & macros to verify TG3_FLAGS types */
80
81static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
82{
83 return test_bit(flag, bits);
84}
85
86static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87{
88 set_bit(flag, bits);
89}
90
91static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92{
93 clear_bit(flag, bits);
94}
95
96#define tg3_flag(tp, flag) \
97 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
98#define tg3_flag_set(tp, flag) \
99 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
100#define tg3_flag_clear(tp, flag) \
101 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
102
103#define DRV_MODULE_NAME "tg3"
104#define TG3_MAJ_NUM 3
105#define TG3_MIN_NUM 137
106#define DRV_MODULE_VERSION \
107 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
108#define DRV_MODULE_RELDATE "May 11, 2014"
109
110#define RESET_KIND_SHUTDOWN 0
111#define RESET_KIND_INIT 1
112#define RESET_KIND_SUSPEND 2
113
114#define TG3_DEF_RX_MODE 0
115#define TG3_DEF_TX_MODE 0
116#define TG3_DEF_MSG_ENABLE \
117 (NETIF_MSG_DRV | \
118 NETIF_MSG_PROBE | \
119 NETIF_MSG_LINK | \
120 NETIF_MSG_TIMER | \
121 NETIF_MSG_IFDOWN | \
122 NETIF_MSG_IFUP | \
123 NETIF_MSG_RX_ERR | \
124 NETIF_MSG_TX_ERR)
125
126#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
127
128/* length of time before we decide the hardware is borked,
129 * and dev->tx_timeout() should be called to fix the problem
130 */
131
132#define TG3_TX_TIMEOUT (5 * HZ)
133
134/* hardware minimum and maximum for a single frame's data payload */
135#define TG3_MIN_MTU ETH_ZLEN
136#define TG3_MAX_MTU(tp) \
137 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
138
139/* These numbers seem to be hard coded in the NIC firmware somehow.
140 * You can't change the ring sizes, but you can change where you place
141 * them in the NIC onboard memory.
142 */
143#define TG3_RX_STD_RING_SIZE(tp) \
144 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
146#define TG3_DEF_RX_RING_PENDING 200
147#define TG3_RX_JMB_RING_SIZE(tp) \
148 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
149 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
150#define TG3_DEF_RX_JUMBO_RING_PENDING 100
151
152/* Do not place this n-ring entries value into the tp struct itself,
153 * we really want to expose these constants to GCC so that modulo et
154 * al. operations are done with shifts and masks instead of with
155 * hw multiply/modulo instructions. Another solution would be to
156 * replace things like '% foo' with '& (foo - 1)'.
157 */
158
159#define TG3_TX_RING_SIZE 512
160#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
161
162#define TG3_RX_STD_RING_BYTES(tp) \
163 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
164#define TG3_RX_JMB_RING_BYTES(tp) \
165 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
166#define TG3_RX_RCB_RING_BYTES(tp) \
167 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
168#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
169 TG3_TX_RING_SIZE)
170#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
171
172#define TG3_DMA_BYTE_ENAB 64
173
174#define TG3_RX_STD_DMA_SZ 1536
175#define TG3_RX_JMB_DMA_SZ 9046
176
177#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
178
179#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
180#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
181
182#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
183 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
184
185#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
186 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
187
188/* Due to a hardware bug, the 5701 can only DMA to memory addresses
189 * that are at least dword aligned when used in PCIX mode. The driver
190 * works around this bug by double copying the packet. This workaround
191 * is built into the normal double copy length check for efficiency.
192 *
193 * However, the double copy is only necessary on those architectures
194 * where unaligned memory accesses are inefficient. For those architectures
195 * where unaligned memory accesses incur little penalty, we can reintegrate
196 * the 5701 in the normal rx path. Doing so saves a device structure
197 * dereference by hardcoding the double copy threshold in place.
198 */
199#define TG3_RX_COPY_THRESHOLD 256
200#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
201 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
202#else
203 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
204#endif
205
206#if (NET_IP_ALIGN != 0)
207#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
208#else
209#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
210#endif
211
212/* minimum number of free TX descriptors required to wake up TX process */
213#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
214#define TG3_TX_BD_DMA_MAX_2K 2048
215#define TG3_TX_BD_DMA_MAX_4K 4096
216
217#define TG3_RAW_IP_ALIGN 2
218
219#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
220#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
221
222#define TG3_FW_UPDATE_TIMEOUT_SEC 5
223#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
224
225#define FIRMWARE_TG3 "tigon/tg3.bin"
226#define FIRMWARE_TG357766 "tigon/tg357766.bin"
227#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
228#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
229
230static char version[] =
231 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
232
233MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
234MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
235MODULE_LICENSE("GPL");
236MODULE_VERSION(DRV_MODULE_VERSION);
237MODULE_FIRMWARE(FIRMWARE_TG3);
238MODULE_FIRMWARE(FIRMWARE_TG3TSO);
239MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
240
241static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
242module_param(tg3_debug, int, 0);
243MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
244
245#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
246#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
247
248static const struct pci_device_id tg3_pci_tbl[] = {
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
268 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269 TG3_DRV_DATA_FLAG_5705_10_100},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
272 TG3_DRV_DATA_FLAG_5705_10_100},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
276 TG3_DRV_DATA_FLAG_5705_10_100},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
297 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
298 PCI_VENDOR_ID_LENOVO,
299 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
300 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
303 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
322 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
323 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
326 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
341 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
343 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
351 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
352 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
353 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
354 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
355 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
356 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
357 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
358 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
359 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
360 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
361 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
362 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
363 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
364 {}
365};
366
367MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
368
369static const struct {
370 const char string[ETH_GSTRING_LEN];
371} ethtool_stats_keys[] = {
372 { "rx_octets" },
373 { "rx_fragments" },
374 { "rx_ucast_packets" },
375 { "rx_mcast_packets" },
376 { "rx_bcast_packets" },
377 { "rx_fcs_errors" },
378 { "rx_align_errors" },
379 { "rx_xon_pause_rcvd" },
380 { "rx_xoff_pause_rcvd" },
381 { "rx_mac_ctrl_rcvd" },
382 { "rx_xoff_entered" },
383 { "rx_frame_too_long_errors" },
384 { "rx_jabbers" },
385 { "rx_undersize_packets" },
386 { "rx_in_length_errors" },
387 { "rx_out_length_errors" },
388 { "rx_64_or_less_octet_packets" },
389 { "rx_65_to_127_octet_packets" },
390 { "rx_128_to_255_octet_packets" },
391 { "rx_256_to_511_octet_packets" },
392 { "rx_512_to_1023_octet_packets" },
393 { "rx_1024_to_1522_octet_packets" },
394 { "rx_1523_to_2047_octet_packets" },
395 { "rx_2048_to_4095_octet_packets" },
396 { "rx_4096_to_8191_octet_packets" },
397 { "rx_8192_to_9022_octet_packets" },
398
399 { "tx_octets" },
400 { "tx_collisions" },
401
402 { "tx_xon_sent" },
403 { "tx_xoff_sent" },
404 { "tx_flow_control" },
405 { "tx_mac_errors" },
406 { "tx_single_collisions" },
407 { "tx_mult_collisions" },
408 { "tx_deferred" },
409 { "tx_excessive_collisions" },
410 { "tx_late_collisions" },
411 { "tx_collide_2times" },
412 { "tx_collide_3times" },
413 { "tx_collide_4times" },
414 { "tx_collide_5times" },
415 { "tx_collide_6times" },
416 { "tx_collide_7times" },
417 { "tx_collide_8times" },
418 { "tx_collide_9times" },
419 { "tx_collide_10times" },
420 { "tx_collide_11times" },
421 { "tx_collide_12times" },
422 { "tx_collide_13times" },
423 { "tx_collide_14times" },
424 { "tx_collide_15times" },
425 { "tx_ucast_packets" },
426 { "tx_mcast_packets" },
427 { "tx_bcast_packets" },
428 { "tx_carrier_sense_errors" },
429 { "tx_discards" },
430 { "tx_errors" },
431
432 { "dma_writeq_full" },
433 { "dma_write_prioq_full" },
434 { "rxbds_empty" },
435 { "rx_discards" },
436 { "rx_errors" },
437 { "rx_threshold_hit" },
438
439 { "dma_readq_full" },
440 { "dma_read_prioq_full" },
441 { "tx_comp_queue_full" },
442
443 { "ring_set_send_prod_index" },
444 { "ring_status_update" },
445 { "nic_irqs" },
446 { "nic_avoided_irqs" },
447 { "nic_tx_threshold_hit" },
448
449 { "mbuf_lwm_thresh_hit" },
450};
451
452#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
453#define TG3_NVRAM_TEST 0
454#define TG3_LINK_TEST 1
455#define TG3_REGISTER_TEST 2
456#define TG3_MEMORY_TEST 3
457#define TG3_MAC_LOOPB_TEST 4
458#define TG3_PHY_LOOPB_TEST 5
459#define TG3_EXT_LOOPB_TEST 6
460#define TG3_INTERRUPT_TEST 7
461
462
463static const struct {
464 const char string[ETH_GSTRING_LEN];
465} ethtool_test_keys[] = {
466 [TG3_NVRAM_TEST] = { "nvram test (online) " },
467 [TG3_LINK_TEST] = { "link test (online) " },
468 [TG3_REGISTER_TEST] = { "register test (offline)" },
469 [TG3_MEMORY_TEST] = { "memory test (offline)" },
470 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
471 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
472 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
473 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
474};
475
476#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
477
478
479static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
480{
481 writel(val, tp->regs + off);
482}
483
484static u32 tg3_read32(struct tg3 *tp, u32 off)
485{
486 return readl(tp->regs + off);
487}
488
489static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
490{
491 writel(val, tp->aperegs + off);
492}
493
494static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
495{
496 return readl(tp->aperegs + off);
497}
498
499static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
500{
501 unsigned long flags;
502
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507}
508
509static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
510{
511 writel(val, tp->regs + off);
512 readl(tp->regs + off);
513}
514
515static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
516{
517 unsigned long flags;
518 u32 val;
519
520 spin_lock_irqsave(&tp->indirect_lock, flags);
521 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
522 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 return val;
525}
526
527static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
528{
529 unsigned long flags;
530
531 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
532 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
533 TG3_64BIT_REG_LOW, val);
534 return;
535 }
536 if (off == TG3_RX_STD_PROD_IDX_REG) {
537 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
538 TG3_64BIT_REG_LOW, val);
539 return;
540 }
541
542 spin_lock_irqsave(&tp->indirect_lock, flags);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546
547 /* In indirect mode when disabling interrupts, we also need
548 * to clear the interrupt bit in the GRC local ctrl register.
549 */
550 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
551 (val == 0x1)) {
552 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
553 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
554 }
555}
556
557static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
558{
559 unsigned long flags;
560 u32 val;
561
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
564 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
565 spin_unlock_irqrestore(&tp->indirect_lock, flags);
566 return val;
567}
568
569/* usec_wait specifies the wait time in usec when writing to certain registers
570 * where it is unsafe to read back the register without some delay.
571 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
572 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
573 */
574static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
575{
576 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
577 /* Non-posted methods */
578 tp->write32(tp, off, val);
579 else {
580 /* Posted method */
581 tg3_write32(tp, off, val);
582 if (usec_wait)
583 udelay(usec_wait);
584 tp->read32(tp, off);
585 }
586 /* Wait again after the read for the posted method to guarantee that
587 * the wait time is met.
588 */
589 if (usec_wait)
590 udelay(usec_wait);
591}
592
593static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
594{
595 tp->write32_mbox(tp, off, val);
596 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
597 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
598 !tg3_flag(tp, ICH_WORKAROUND)))
599 tp->read32_mbox(tp, off);
600}
601
602static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
603{
604 void __iomem *mbox = tp->regs + off;
605 writel(val, mbox);
606 if (tg3_flag(tp, TXD_MBOX_HWBUG))
607 writel(val, mbox);
608 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
609 tg3_flag(tp, FLUSH_POSTED_WRITES))
610 readl(mbox);
611}
612
613static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
614{
615 return readl(tp->regs + off + GRCMBOX_BASE);
616}
617
618static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
619{
620 writel(val, tp->regs + off + GRCMBOX_BASE);
621}
622
623#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
624#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
625#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
626#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
627#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
628
629#define tw32(reg, val) tp->write32(tp, reg, val)
630#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
631#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
632#define tr32(reg) tp->read32(tp, reg)
633
634static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
635{
636 unsigned long flags;
637
638 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
639 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
640 return;
641
642 spin_lock_irqsave(&tp->indirect_lock, flags);
643 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
644 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
645 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
646
647 /* Always leave this as zero. */
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
649 } else {
650 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
651 tw32_f(TG3PCI_MEM_WIN_DATA, val);
652
653 /* Always leave this as zero. */
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
655 }
656 spin_unlock_irqrestore(&tp->indirect_lock, flags);
657}
658
659static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
660{
661 unsigned long flags;
662
663 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
664 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
665 *val = 0;
666 return;
667 }
668
669 spin_lock_irqsave(&tp->indirect_lock, flags);
670 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
671 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
672 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
673
674 /* Always leave this as zero. */
675 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
676 } else {
677 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
678 *val = tr32(TG3PCI_MEM_WIN_DATA);
679
680 /* Always leave this as zero. */
681 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
682 }
683 spin_unlock_irqrestore(&tp->indirect_lock, flags);
684}
685
686static void tg3_ape_lock_init(struct tg3 *tp)
687{
688 int i;
689 u32 regbase, bit;
690
691 if (tg3_asic_rev(tp) == ASIC_REV_5761)
692 regbase = TG3_APE_LOCK_GRANT;
693 else
694 regbase = TG3_APE_PER_LOCK_GRANT;
695
696 /* Make sure the driver hasn't any stale locks. */
697 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
698 switch (i) {
699 case TG3_APE_LOCK_PHY0:
700 case TG3_APE_LOCK_PHY1:
701 case TG3_APE_LOCK_PHY2:
702 case TG3_APE_LOCK_PHY3:
703 bit = APE_LOCK_GRANT_DRIVER;
704 break;
705 default:
706 if (!tp->pci_fn)
707 bit = APE_LOCK_GRANT_DRIVER;
708 else
709 bit = 1 << tp->pci_fn;
710 }
711 tg3_ape_write32(tp, regbase + 4 * i, bit);
712 }
713
714}
715
716static int tg3_ape_lock(struct tg3 *tp, int locknum)
717{
718 int i, off;
719 int ret = 0;
720 u32 status, req, gnt, bit;
721
722 if (!tg3_flag(tp, ENABLE_APE))
723 return 0;
724
725 switch (locknum) {
726 case TG3_APE_LOCK_GPIO:
727 if (tg3_asic_rev(tp) == ASIC_REV_5761)
728 return 0;
729 /* else: fall through */
730 case TG3_APE_LOCK_GRC:
731 case TG3_APE_LOCK_MEM:
732 if (!tp->pci_fn)
733 bit = APE_LOCK_REQ_DRIVER;
734 else
735 bit = 1 << tp->pci_fn;
736 break;
737 case TG3_APE_LOCK_PHY0:
738 case TG3_APE_LOCK_PHY1:
739 case TG3_APE_LOCK_PHY2:
740 case TG3_APE_LOCK_PHY3:
741 bit = APE_LOCK_REQ_DRIVER;
742 break;
743 default:
744 return -EINVAL;
745 }
746
747 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
748 req = TG3_APE_LOCK_REQ;
749 gnt = TG3_APE_LOCK_GRANT;
750 } else {
751 req = TG3_APE_PER_LOCK_REQ;
752 gnt = TG3_APE_PER_LOCK_GRANT;
753 }
754
755 off = 4 * locknum;
756
757 tg3_ape_write32(tp, req + off, bit);
758
759 /* Wait for up to 1 millisecond to acquire lock. */
760 for (i = 0; i < 100; i++) {
761 status = tg3_ape_read32(tp, gnt + off);
762 if (status == bit)
763 break;
764 if (pci_channel_offline(tp->pdev))
765 break;
766
767 udelay(10);
768 }
769
770 if (status != bit) {
771 /* Revoke the lock request. */
772 tg3_ape_write32(tp, gnt + off, bit);
773 ret = -EBUSY;
774 }
775
776 return ret;
777}
778
779static void tg3_ape_unlock(struct tg3 *tp, int locknum)
780{
781 u32 gnt, bit;
782
783 if (!tg3_flag(tp, ENABLE_APE))
784 return;
785
786 switch (locknum) {
787 case TG3_APE_LOCK_GPIO:
788 if (tg3_asic_rev(tp) == ASIC_REV_5761)
789 return;
790 /* else: fall through */
791 case TG3_APE_LOCK_GRC:
792 case TG3_APE_LOCK_MEM:
793 if (!tp->pci_fn)
794 bit = APE_LOCK_GRANT_DRIVER;
795 else
796 bit = 1 << tp->pci_fn;
797 break;
798 case TG3_APE_LOCK_PHY0:
799 case TG3_APE_LOCK_PHY1:
800 case TG3_APE_LOCK_PHY2:
801 case TG3_APE_LOCK_PHY3:
802 bit = APE_LOCK_GRANT_DRIVER;
803 break;
804 default:
805 return;
806 }
807
808 if (tg3_asic_rev(tp) == ASIC_REV_5761)
809 gnt = TG3_APE_LOCK_GRANT;
810 else
811 gnt = TG3_APE_PER_LOCK_GRANT;
812
813 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
814}
815
816static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
817{
818 u32 apedata;
819
820 while (timeout_us) {
821 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
822 return -EBUSY;
823
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
826 break;
827
828 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
829
830 udelay(10);
831 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
832 }
833
834 return timeout_us ? 0 : -EBUSY;
835}
836
837#ifdef CONFIG_TIGON3_HWMON
838static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
839{
840 u32 i, apedata;
841
842 for (i = 0; i < timeout_us / 10; i++) {
843 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
844
845 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
846 break;
847
848 udelay(10);
849 }
850
851 return i == timeout_us / 10;
852}
853
854static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
855 u32 len)
856{
857 int err;
858 u32 i, bufoff, msgoff, maxlen, apedata;
859
860 if (!tg3_flag(tp, APE_HAS_NCSI))
861 return 0;
862
863 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
864 if (apedata != APE_SEG_SIG_MAGIC)
865 return -ENODEV;
866
867 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
868 if (!(apedata & APE_FW_STATUS_READY))
869 return -EAGAIN;
870
871 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
872 TG3_APE_SHMEM_BASE;
873 msgoff = bufoff + 2 * sizeof(u32);
874 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
875
876 while (len) {
877 u32 length;
878
879 /* Cap xfer sizes to scratchpad limits. */
880 length = (len > maxlen) ? maxlen : len;
881 len -= length;
882
883 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
884 if (!(apedata & APE_FW_STATUS_READY))
885 return -EAGAIN;
886
887 /* Wait for up to 1 msec for APE to service previous event. */
888 err = tg3_ape_event_lock(tp, 1000);
889 if (err)
890 return err;
891
892 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
893 APE_EVENT_STATUS_SCRTCHPD_READ |
894 APE_EVENT_STATUS_EVENT_PENDING;
895 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
896
897 tg3_ape_write32(tp, bufoff, base_off);
898 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
899
900 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
901 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
902
903 base_off += length;
904
905 if (tg3_ape_wait_for_event(tp, 30000))
906 return -EAGAIN;
907
908 for (i = 0; length; i += 4, length -= 4) {
909 u32 val = tg3_ape_read32(tp, msgoff + i);
910 memcpy(data, &val, sizeof(u32));
911 data++;
912 }
913 }
914
915 return 0;
916}
917#endif
918
919static int tg3_ape_send_event(struct tg3 *tp, u32 event)
920{
921 int err;
922 u32 apedata;
923
924 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
925 if (apedata != APE_SEG_SIG_MAGIC)
926 return -EAGAIN;
927
928 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
929 if (!(apedata & APE_FW_STATUS_READY))
930 return -EAGAIN;
931
932 /* Wait for up to 20 millisecond for APE to service previous event. */
933 err = tg3_ape_event_lock(tp, 20000);
934 if (err)
935 return err;
936
937 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
938 event | APE_EVENT_STATUS_EVENT_PENDING);
939
940 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
941 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
942
943 return 0;
944}
945
946static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
947{
948 u32 event;
949 u32 apedata;
950
951 if (!tg3_flag(tp, ENABLE_APE))
952 return;
953
954 switch (kind) {
955 case RESET_KIND_INIT:
956 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
958 APE_HOST_SEG_SIG_MAGIC);
959 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
960 APE_HOST_SEG_LEN_MAGIC);
961 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
962 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
963 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
964 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
965 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
966 APE_HOST_BEHAV_NO_PHYLOCK);
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
968 TG3_APE_HOST_DRVR_STATE_START);
969
970 event = APE_EVENT_STATUS_STATE_START;
971 break;
972 case RESET_KIND_SHUTDOWN:
973 if (device_may_wakeup(&tp->pdev->dev) &&
974 tg3_flag(tp, WOL_ENABLE)) {
975 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
976 TG3_APE_HOST_WOL_SPEED_AUTO);
977 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
978 } else
979 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
980
981 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
982
983 event = APE_EVENT_STATUS_STATE_UNLOAD;
984 break;
985 default:
986 return;
987 }
988
989 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
990
991 tg3_ape_send_event(tp, event);
992}
993
994static void tg3_send_ape_heartbeat(struct tg3 *tp,
995 unsigned long interval)
996{
997 /* Check if hb interval has exceeded */
998 if (!tg3_flag(tp, ENABLE_APE) ||
999 time_before(jiffies, tp->ape_hb_jiffies + interval))
1000 return;
1001
1002 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1003 tp->ape_hb_jiffies = jiffies;
1004}
1005
1006static void tg3_disable_ints(struct tg3 *tp)
1007{
1008 int i;
1009
1010 tw32(TG3PCI_MISC_HOST_CTRL,
1011 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1012 for (i = 0; i < tp->irq_max; i++)
1013 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1014}
1015
1016static void tg3_enable_ints(struct tg3 *tp)
1017{
1018 int i;
1019
1020 tp->irq_sync = 0;
1021 wmb();
1022
1023 tw32(TG3PCI_MISC_HOST_CTRL,
1024 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1025
1026 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1027 for (i = 0; i < tp->irq_cnt; i++) {
1028 struct tg3_napi *tnapi = &tp->napi[i];
1029
1030 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1031 if (tg3_flag(tp, 1SHOT_MSI))
1032 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1033
1034 tp->coal_now |= tnapi->coal_now;
1035 }
1036
1037 /* Force an initial interrupt */
1038 if (!tg3_flag(tp, TAGGED_STATUS) &&
1039 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1040 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1041 else
1042 tw32(HOSTCC_MODE, tp->coal_now);
1043
1044 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1045}
1046
1047static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1048{
1049 struct tg3 *tp = tnapi->tp;
1050 struct tg3_hw_status *sblk = tnapi->hw_status;
1051 unsigned int work_exists = 0;
1052
1053 /* check for phy events */
1054 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1055 if (sblk->status & SD_STATUS_LINK_CHG)
1056 work_exists = 1;
1057 }
1058
1059 /* check for TX work to do */
1060 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1061 work_exists = 1;
1062
1063 /* check for RX work to do */
1064 if (tnapi->rx_rcb_prod_idx &&
1065 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1066 work_exists = 1;
1067
1068 return work_exists;
1069}
1070
1071/* tg3_int_reenable
1072 * similar to tg3_enable_ints, but it accurately determines whether there
1073 * is new work pending and can return without flushing the PIO write
1074 * which reenables interrupts
1075 */
1076static void tg3_int_reenable(struct tg3_napi *tnapi)
1077{
1078 struct tg3 *tp = tnapi->tp;
1079
1080 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1081 mmiowb();
1082
1083 /* When doing tagged status, this work check is unnecessary.
1084 * The last_tag we write above tells the chip which piece of
1085 * work we've completed.
1086 */
1087 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1088 tw32(HOSTCC_MODE, tp->coalesce_mode |
1089 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1090}
1091
1092static void tg3_switch_clocks(struct tg3 *tp)
1093{
1094 u32 clock_ctrl;
1095 u32 orig_clock_ctrl;
1096
1097 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1098 return;
1099
1100 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1101
1102 orig_clock_ctrl = clock_ctrl;
1103 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1104 CLOCK_CTRL_CLKRUN_OENABLE |
1105 0x1f);
1106 tp->pci_clock_ctrl = clock_ctrl;
1107
1108 if (tg3_flag(tp, 5705_PLUS)) {
1109 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1111 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1112 }
1113 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1114 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1115 clock_ctrl |
1116 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1117 40);
1118 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1119 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1120 40);
1121 }
1122 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1123}
1124
1125#define PHY_BUSY_LOOPS 5000
1126
1127static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1128 u32 *val)
1129{
1130 u32 frame_val;
1131 unsigned int loops;
1132 int ret;
1133
1134 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135 tw32_f(MAC_MI_MODE,
1136 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1137 udelay(80);
1138 }
1139
1140 tg3_ape_lock(tp, tp->phy_ape_lock);
1141
1142 *val = 0x0;
1143
1144 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1145 MI_COM_PHY_ADDR_MASK);
1146 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1147 MI_COM_REG_ADDR_MASK);
1148 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1149
1150 tw32_f(MAC_MI_COM, frame_val);
1151
1152 loops = PHY_BUSY_LOOPS;
1153 while (loops != 0) {
1154 udelay(10);
1155 frame_val = tr32(MAC_MI_COM);
1156
1157 if ((frame_val & MI_COM_BUSY) == 0) {
1158 udelay(5);
1159 frame_val = tr32(MAC_MI_COM);
1160 break;
1161 }
1162 loops -= 1;
1163 }
1164
1165 ret = -EBUSY;
1166 if (loops != 0) {
1167 *val = frame_val & MI_COM_DATA_MASK;
1168 ret = 0;
1169 }
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE, tp->mi_mode);
1173 udelay(80);
1174 }
1175
1176 tg3_ape_unlock(tp, tp->phy_ape_lock);
1177
1178 return ret;
1179}
1180
1181static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1182{
1183 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1184}
1185
1186static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1187 u32 val)
1188{
1189 u32 frame_val;
1190 unsigned int loops;
1191 int ret;
1192
1193 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1194 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1195 return 0;
1196
1197 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1198 tw32_f(MAC_MI_MODE,
1199 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1200 udelay(80);
1201 }
1202
1203 tg3_ape_lock(tp, tp->phy_ape_lock);
1204
1205 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1206 MI_COM_PHY_ADDR_MASK);
1207 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1208 MI_COM_REG_ADDR_MASK);
1209 frame_val |= (val & MI_COM_DATA_MASK);
1210 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1211
1212 tw32_f(MAC_MI_COM, frame_val);
1213
1214 loops = PHY_BUSY_LOOPS;
1215 while (loops != 0) {
1216 udelay(10);
1217 frame_val = tr32(MAC_MI_COM);
1218 if ((frame_val & MI_COM_BUSY) == 0) {
1219 udelay(5);
1220 frame_val = tr32(MAC_MI_COM);
1221 break;
1222 }
1223 loops -= 1;
1224 }
1225
1226 ret = -EBUSY;
1227 if (loops != 0)
1228 ret = 0;
1229
1230 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1231 tw32_f(MAC_MI_MODE, tp->mi_mode);
1232 udelay(80);
1233 }
1234
1235 tg3_ape_unlock(tp, tp->phy_ape_lock);
1236
1237 return ret;
1238}
1239
1240static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1241{
1242 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1243}
1244
1245static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1246{
1247 int err;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 if (err)
1255 goto done;
1256
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264done:
1265 return err;
1266}
1267
1268static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1269{
1270 int err;
1271
1272 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1273 if (err)
1274 goto done;
1275
1276 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1277 if (err)
1278 goto done;
1279
1280 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1281 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1282 if (err)
1283 goto done;
1284
1285 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1286
1287done:
1288 return err;
1289}
1290
1291static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1292{
1293 int err;
1294
1295 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 if (!err)
1297 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1298
1299 return err;
1300}
1301
1302static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1303{
1304 int err;
1305
1306 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1307 if (!err)
1308 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1309
1310 return err;
1311}
1312
1313static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1314{
1315 int err;
1316
1317 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1318 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1319 MII_TG3_AUXCTL_SHDWSEL_MISC);
1320 if (!err)
1321 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1322
1323 return err;
1324}
1325
1326static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1327{
1328 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1329 set |= MII_TG3_AUXCTL_MISC_WREN;
1330
1331 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1332}
1333
1334static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1335{
1336 u32 val;
1337 int err;
1338
1339 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1340
1341 if (err)
1342 return err;
1343
1344 if (enable)
1345 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1346 else
1347 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1348
1349 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1350 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1351
1352 return err;
1353}
1354
1355static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1356{
1357 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1358 reg | val | MII_TG3_MISC_SHDW_WREN);
1359}
1360
1361static int tg3_bmcr_reset(struct tg3 *tp)
1362{
1363 u32 phy_control;
1364 int limit, err;
1365
1366 /* OK, reset it, and poll the BMCR_RESET bit until it
1367 * clears or we time out.
1368 */
1369 phy_control = BMCR_RESET;
1370 err = tg3_writephy(tp, MII_BMCR, phy_control);
1371 if (err != 0)
1372 return -EBUSY;
1373
1374 limit = 5000;
1375 while (limit--) {
1376 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1377 if (err != 0)
1378 return -EBUSY;
1379
1380 if ((phy_control & BMCR_RESET) == 0) {
1381 udelay(40);
1382 break;
1383 }
1384 udelay(10);
1385 }
1386 if (limit < 0)
1387 return -EBUSY;
1388
1389 return 0;
1390}
1391
1392static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1393{
1394 struct tg3 *tp = bp->priv;
1395 u32 val;
1396
1397 spin_lock_bh(&tp->lock);
1398
1399 if (__tg3_readphy(tp, mii_id, reg, &val))
1400 val = -EIO;
1401
1402 spin_unlock_bh(&tp->lock);
1403
1404 return val;
1405}
1406
1407static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1408{
1409 struct tg3 *tp = bp->priv;
1410 u32 ret = 0;
1411
1412 spin_lock_bh(&tp->lock);
1413
1414 if (__tg3_writephy(tp, mii_id, reg, val))
1415 ret = -EIO;
1416
1417 spin_unlock_bh(&tp->lock);
1418
1419 return ret;
1420}
1421
1422static void tg3_mdio_config_5785(struct tg3 *tp)
1423{
1424 u32 val;
1425 struct phy_device *phydev;
1426
1427 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1428 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1429 case PHY_ID_BCM50610:
1430 case PHY_ID_BCM50610M:
1431 val = MAC_PHYCFG2_50610_LED_MODES;
1432 break;
1433 case PHY_ID_BCMAC131:
1434 val = MAC_PHYCFG2_AC131_LED_MODES;
1435 break;
1436 case PHY_ID_RTL8211C:
1437 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1438 break;
1439 case PHY_ID_RTL8201E:
1440 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1441 break;
1442 default:
1443 return;
1444 }
1445
1446 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1447 tw32(MAC_PHYCFG2, val);
1448
1449 val = tr32(MAC_PHYCFG1);
1450 val &= ~(MAC_PHYCFG1_RGMII_INT |
1451 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1452 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1453 tw32(MAC_PHYCFG1, val);
1454
1455 return;
1456 }
1457
1458 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1459 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1460 MAC_PHYCFG2_FMODE_MASK_MASK |
1461 MAC_PHYCFG2_GMODE_MASK_MASK |
1462 MAC_PHYCFG2_ACT_MASK_MASK |
1463 MAC_PHYCFG2_QUAL_MASK_MASK |
1464 MAC_PHYCFG2_INBAND_ENABLE;
1465
1466 tw32(MAC_PHYCFG2, val);
1467
1468 val = tr32(MAC_PHYCFG1);
1469 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1470 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1474 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1475 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1476 }
1477 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1478 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1479 tw32(MAC_PHYCFG1, val);
1480
1481 val = tr32(MAC_EXT_RGMII_MODE);
1482 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1483 MAC_RGMII_MODE_RX_QUALITY |
1484 MAC_RGMII_MODE_RX_ACTIVITY |
1485 MAC_RGMII_MODE_RX_ENG_DET |
1486 MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET);
1489 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1490 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1491 val |= MAC_RGMII_MODE_RX_INT_B |
1492 MAC_RGMII_MODE_RX_QUALITY |
1493 MAC_RGMII_MODE_RX_ACTIVITY |
1494 MAC_RGMII_MODE_RX_ENG_DET;
1495 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1496 val |= MAC_RGMII_MODE_TX_ENABLE |
1497 MAC_RGMII_MODE_TX_LOWPWR |
1498 MAC_RGMII_MODE_TX_RESET;
1499 }
1500 tw32(MAC_EXT_RGMII_MODE, val);
1501}
1502
1503static void tg3_mdio_start(struct tg3 *tp)
1504{
1505 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1506 tw32_f(MAC_MI_MODE, tp->mi_mode);
1507 udelay(80);
1508
1509 if (tg3_flag(tp, MDIOBUS_INITED) &&
1510 tg3_asic_rev(tp) == ASIC_REV_5785)
1511 tg3_mdio_config_5785(tp);
1512}
1513
1514static int tg3_mdio_init(struct tg3 *tp)
1515{
1516 int i;
1517 u32 reg;
1518 struct phy_device *phydev;
1519
1520 if (tg3_flag(tp, 5717_PLUS)) {
1521 u32 is_serdes;
1522
1523 tp->phy_addr = tp->pci_fn + 1;
1524
1525 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1526 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1527 else
1528 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1529 TG3_CPMU_PHY_STRAP_IS_SERDES;
1530 if (is_serdes)
1531 tp->phy_addr += 7;
1532 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1533 int addr;
1534
1535 addr = ssb_gige_get_phyaddr(tp->pdev);
1536 if (addr < 0)
1537 return addr;
1538 tp->phy_addr = addr;
1539 } else
1540 tp->phy_addr = TG3_PHY_MII_ADDR;
1541
1542 tg3_mdio_start(tp);
1543
1544 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1545 return 0;
1546
1547 tp->mdio_bus = mdiobus_alloc();
1548 if (tp->mdio_bus == NULL)
1549 return -ENOMEM;
1550
1551 tp->mdio_bus->name = "tg3 mdio bus";
1552 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1553 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1554 tp->mdio_bus->priv = tp;
1555 tp->mdio_bus->parent = &tp->pdev->dev;
1556 tp->mdio_bus->read = &tg3_mdio_read;
1557 tp->mdio_bus->write = &tg3_mdio_write;
1558 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1559
1560 /* The bus registration will look for all the PHYs on the mdio bus.
1561 * Unfortunately, it does not ensure the PHY is powered up before
1562 * accessing the PHY ID registers. A chip reset is the
1563 * quickest way to bring the device back to an operational state..
1564 */
1565 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1566 tg3_bmcr_reset(tp);
1567
1568 i = mdiobus_register(tp->mdio_bus);
1569 if (i) {
1570 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1571 mdiobus_free(tp->mdio_bus);
1572 return i;
1573 }
1574
1575 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1576
1577 if (!phydev || !phydev->drv) {
1578 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1579 mdiobus_unregister(tp->mdio_bus);
1580 mdiobus_free(tp->mdio_bus);
1581 return -ENODEV;
1582 }
1583
1584 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1585 case PHY_ID_BCM57780:
1586 phydev->interface = PHY_INTERFACE_MODE_GMII;
1587 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588 break;
1589 case PHY_ID_BCM50610:
1590 case PHY_ID_BCM50610M:
1591 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1592 PHY_BRCM_RX_REFCLK_UNUSED |
1593 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1594 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1596 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1597 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1598 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1599 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1600 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1601 /* fallthru */
1602 case PHY_ID_RTL8211C:
1603 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1604 break;
1605 case PHY_ID_RTL8201E:
1606 case PHY_ID_BCMAC131:
1607 phydev->interface = PHY_INTERFACE_MODE_MII;
1608 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1609 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1610 break;
1611 }
1612
1613 tg3_flag_set(tp, MDIOBUS_INITED);
1614
1615 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1616 tg3_mdio_config_5785(tp);
1617
1618 return 0;
1619}
1620
1621static void tg3_mdio_fini(struct tg3 *tp)
1622{
1623 if (tg3_flag(tp, MDIOBUS_INITED)) {
1624 tg3_flag_clear(tp, MDIOBUS_INITED);
1625 mdiobus_unregister(tp->mdio_bus);
1626 mdiobus_free(tp->mdio_bus);
1627 }
1628}
1629
1630/* tp->lock is held. */
1631static inline void tg3_generate_fw_event(struct tg3 *tp)
1632{
1633 u32 val;
1634
1635 val = tr32(GRC_RX_CPU_EVENT);
1636 val |= GRC_RX_CPU_DRIVER_EVENT;
1637 tw32_f(GRC_RX_CPU_EVENT, val);
1638
1639 tp->last_event_jiffies = jiffies;
1640}
1641
1642#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1643
1644/* tp->lock is held. */
1645static void tg3_wait_for_event_ack(struct tg3 *tp)
1646{
1647 int i;
1648 unsigned int delay_cnt;
1649 long time_remain;
1650
1651 /* If enough time has passed, no wait is necessary. */
1652 time_remain = (long)(tp->last_event_jiffies + 1 +
1653 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1654 (long)jiffies;
1655 if (time_remain < 0)
1656 return;
1657
1658 /* Check if we can shorten the wait time. */
1659 delay_cnt = jiffies_to_usecs(time_remain);
1660 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1661 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1662 delay_cnt = (delay_cnt >> 3) + 1;
1663
1664 for (i = 0; i < delay_cnt; i++) {
1665 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1666 break;
1667 if (pci_channel_offline(tp->pdev))
1668 break;
1669
1670 udelay(8);
1671 }
1672}
1673
1674/* tp->lock is held. */
1675static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1676{
1677 u32 reg, val;
1678
1679 val = 0;
1680 if (!tg3_readphy(tp, MII_BMCR, &reg))
1681 val = reg << 16;
1682 if (!tg3_readphy(tp, MII_BMSR, &reg))
1683 val |= (reg & 0xffff);
1684 *data++ = val;
1685
1686 val = 0;
1687 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1688 val = reg << 16;
1689 if (!tg3_readphy(tp, MII_LPA, &reg))
1690 val |= (reg & 0xffff);
1691 *data++ = val;
1692
1693 val = 0;
1694 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1695 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1696 val = reg << 16;
1697 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1698 val |= (reg & 0xffff);
1699 }
1700 *data++ = val;
1701
1702 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1703 val = reg << 16;
1704 else
1705 val = 0;
1706 *data++ = val;
1707}
1708
1709/* tp->lock is held. */
1710static void tg3_ump_link_report(struct tg3 *tp)
1711{
1712 u32 data[4];
1713
1714 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1715 return;
1716
1717 tg3_phy_gather_ump_data(tp, data);
1718
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1723 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1725 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1726 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1727
1728 tg3_generate_fw_event(tp);
1729}
1730
1731/* tp->lock is held. */
1732static void tg3_stop_fw(struct tg3 *tp)
1733{
1734 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1735 /* Wait for RX cpu to ACK the previous event. */
1736 tg3_wait_for_event_ack(tp);
1737
1738 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1739
1740 tg3_generate_fw_event(tp);
1741
1742 /* Wait for RX cpu to ACK this event. */
1743 tg3_wait_for_event_ack(tp);
1744 }
1745}
1746
1747/* tp->lock is held. */
1748static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1749{
1750 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1751 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1752
1753 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1754 switch (kind) {
1755 case RESET_KIND_INIT:
1756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757 DRV_STATE_START);
1758 break;
1759
1760 case RESET_KIND_SHUTDOWN:
1761 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762 DRV_STATE_UNLOAD);
1763 break;
1764
1765 case RESET_KIND_SUSPEND:
1766 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 DRV_STATE_SUSPEND);
1768 break;
1769
1770 default:
1771 break;
1772 }
1773 }
1774}
1775
1776/* tp->lock is held. */
1777static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1778{
1779 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1780 switch (kind) {
1781 case RESET_KIND_INIT:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_START_DONE);
1784 break;
1785
1786 case RESET_KIND_SHUTDOWN:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_UNLOAD_DONE);
1789 break;
1790
1791 default:
1792 break;
1793 }
1794 }
1795}
1796
1797/* tp->lock is held. */
1798static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1799{
1800 if (tg3_flag(tp, ENABLE_ASF)) {
1801 switch (kind) {
1802 case RESET_KIND_INIT:
1803 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804 DRV_STATE_START);
1805 break;
1806
1807 case RESET_KIND_SHUTDOWN:
1808 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809 DRV_STATE_UNLOAD);
1810 break;
1811
1812 case RESET_KIND_SUSPEND:
1813 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1814 DRV_STATE_SUSPEND);
1815 break;
1816
1817 default:
1818 break;
1819 }
1820 }
1821}
1822
1823static int tg3_poll_fw(struct tg3 *tp)
1824{
1825 int i;
1826 u32 val;
1827
1828 if (tg3_flag(tp, NO_FWARE_REPORTED))
1829 return 0;
1830
1831 if (tg3_flag(tp, IS_SSB_CORE)) {
1832 /* We don't use firmware. */
1833 return 0;
1834 }
1835
1836 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1837 /* Wait up to 20ms for init done. */
1838 for (i = 0; i < 200; i++) {
1839 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1840 return 0;
1841 if (pci_channel_offline(tp->pdev))
1842 return -ENODEV;
1843
1844 udelay(100);
1845 }
1846 return -ENODEV;
1847 }
1848
1849 /* Wait for firmware initialization to complete. */
1850 for (i = 0; i < 100000; i++) {
1851 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1852 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1853 break;
1854 if (pci_channel_offline(tp->pdev)) {
1855 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 break;
1861 }
1862
1863 udelay(10);
1864 }
1865
1866 /* Chip might not be fitted with firmware. Some Sun onboard
1867 * parts are configured like that. So don't signal the timeout
1868 * of the above loop as an error, but do report the lack of
1869 * running firmware once.
1870 */
1871 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1872 tg3_flag_set(tp, NO_FWARE_REPORTED);
1873
1874 netdev_info(tp->dev, "No firmware running\n");
1875 }
1876
1877 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1878 /* The 57765 A0 needs a little more
1879 * time to do some important work.
1880 */
1881 mdelay(10);
1882 }
1883
1884 return 0;
1885}
1886
1887static void tg3_link_report(struct tg3 *tp)
1888{
1889 if (!netif_carrier_ok(tp->dev)) {
1890 netif_info(tp, link, tp->dev, "Link is down\n");
1891 tg3_ump_link_report(tp);
1892 } else if (netif_msg_link(tp)) {
1893 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1894 (tp->link_config.active_speed == SPEED_1000 ?
1895 1000 :
1896 (tp->link_config.active_speed == SPEED_100 ?
1897 100 : 10)),
1898 (tp->link_config.active_duplex == DUPLEX_FULL ?
1899 "full" : "half"));
1900
1901 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1902 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1903 "on" : "off",
1904 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1905 "on" : "off");
1906
1907 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1908 netdev_info(tp->dev, "EEE is %s\n",
1909 tp->setlpicnt ? "enabled" : "disabled");
1910
1911 tg3_ump_link_report(tp);
1912 }
1913
1914 tp->link_up = netif_carrier_ok(tp->dev);
1915}
1916
1917static u32 tg3_decode_flowctrl_1000T(u32 adv)
1918{
1919 u32 flowctrl = 0;
1920
1921 if (adv & ADVERTISE_PAUSE_CAP) {
1922 flowctrl |= FLOW_CTRL_RX;
1923 if (!(adv & ADVERTISE_PAUSE_ASYM))
1924 flowctrl |= FLOW_CTRL_TX;
1925 } else if (adv & ADVERTISE_PAUSE_ASYM)
1926 flowctrl |= FLOW_CTRL_TX;
1927
1928 return flowctrl;
1929}
1930
1931static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1932{
1933 u16 miireg;
1934
1935 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1936 miireg = ADVERTISE_1000XPAUSE;
1937 else if (flow_ctrl & FLOW_CTRL_TX)
1938 miireg = ADVERTISE_1000XPSE_ASYM;
1939 else if (flow_ctrl & FLOW_CTRL_RX)
1940 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1941 else
1942 miireg = 0;
1943
1944 return miireg;
1945}
1946
1947static u32 tg3_decode_flowctrl_1000X(u32 adv)
1948{
1949 u32 flowctrl = 0;
1950
1951 if (adv & ADVERTISE_1000XPAUSE) {
1952 flowctrl |= FLOW_CTRL_RX;
1953 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1954 flowctrl |= FLOW_CTRL_TX;
1955 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1956 flowctrl |= FLOW_CTRL_TX;
1957
1958 return flowctrl;
1959}
1960
1961static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1962{
1963 u8 cap = 0;
1964
1965 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1966 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1967 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1968 if (lcladv & ADVERTISE_1000XPAUSE)
1969 cap = FLOW_CTRL_RX;
1970 if (rmtadv & ADVERTISE_1000XPAUSE)
1971 cap = FLOW_CTRL_TX;
1972 }
1973
1974 return cap;
1975}
1976
1977static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1978{
1979 u8 autoneg;
1980 u8 flowctrl = 0;
1981 u32 old_rx_mode = tp->rx_mode;
1982 u32 old_tx_mode = tp->tx_mode;
1983
1984 if (tg3_flag(tp, USE_PHYLIB))
1985 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1986 else
1987 autoneg = tp->link_config.autoneg;
1988
1989 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1990 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1991 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1992 else
1993 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1994 } else
1995 flowctrl = tp->link_config.flowctrl;
1996
1997 tp->link_config.active_flowctrl = flowctrl;
1998
1999 if (flowctrl & FLOW_CTRL_RX)
2000 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
2001 else
2002 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2003
2004 if (old_rx_mode != tp->rx_mode)
2005 tw32_f(MAC_RX_MODE, tp->rx_mode);
2006
2007 if (flowctrl & FLOW_CTRL_TX)
2008 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2009 else
2010 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2011
2012 if (old_tx_mode != tp->tx_mode)
2013 tw32_f(MAC_TX_MODE, tp->tx_mode);
2014}
2015
2016static void tg3_adjust_link(struct net_device *dev)
2017{
2018 u8 oldflowctrl, linkmesg = 0;
2019 u32 mac_mode, lcl_adv, rmt_adv;
2020 struct tg3 *tp = netdev_priv(dev);
2021 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2022
2023 spin_lock_bh(&tp->lock);
2024
2025 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2026 MAC_MODE_HALF_DUPLEX);
2027
2028 oldflowctrl = tp->link_config.active_flowctrl;
2029
2030 if (phydev->link) {
2031 lcl_adv = 0;
2032 rmt_adv = 0;
2033
2034 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2035 mac_mode |= MAC_MODE_PORT_MODE_MII;
2036 else if (phydev->speed == SPEED_1000 ||
2037 tg3_asic_rev(tp) != ASIC_REV_5785)
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039 else
2040 mac_mode |= MAC_MODE_PORT_MODE_MII;
2041
2042 if (phydev->duplex == DUPLEX_HALF)
2043 mac_mode |= MAC_MODE_HALF_DUPLEX;
2044 else {
2045 lcl_adv = mii_advertise_flowctrl(
2046 tp->link_config.flowctrl);
2047
2048 if (phydev->pause)
2049 rmt_adv = LPA_PAUSE_CAP;
2050 if (phydev->asym_pause)
2051 rmt_adv |= LPA_PAUSE_ASYM;
2052 }
2053
2054 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2055 } else
2056 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2057
2058 if (mac_mode != tp->mac_mode) {
2059 tp->mac_mode = mac_mode;
2060 tw32_f(MAC_MODE, tp->mac_mode);
2061 udelay(40);
2062 }
2063
2064 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2065 if (phydev->speed == SPEED_10)
2066 tw32(MAC_MI_STAT,
2067 MAC_MI_STAT_10MBPS_MODE |
2068 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2069 else
2070 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2071 }
2072
2073 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2074 tw32(MAC_TX_LENGTHS,
2075 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076 (6 << TX_LENGTHS_IPG_SHIFT) |
2077 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078 else
2079 tw32(MAC_TX_LENGTHS,
2080 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2081 (6 << TX_LENGTHS_IPG_SHIFT) |
2082 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2083
2084 if (phydev->link != tp->old_link ||
2085 phydev->speed != tp->link_config.active_speed ||
2086 phydev->duplex != tp->link_config.active_duplex ||
2087 oldflowctrl != tp->link_config.active_flowctrl)
2088 linkmesg = 1;
2089
2090 tp->old_link = phydev->link;
2091 tp->link_config.active_speed = phydev->speed;
2092 tp->link_config.active_duplex = phydev->duplex;
2093
2094 spin_unlock_bh(&tp->lock);
2095
2096 if (linkmesg)
2097 tg3_link_report(tp);
2098}
2099
2100static int tg3_phy_init(struct tg3 *tp)
2101{
2102 struct phy_device *phydev;
2103
2104 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2105 return 0;
2106
2107 /* Bring the PHY back to a known state. */
2108 tg3_bmcr_reset(tp);
2109
2110 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2111
2112 /* Attach the MAC to the PHY. */
2113 phydev = phy_connect(tp->dev, phydev_name(phydev),
2114 tg3_adjust_link, phydev->interface);
2115 if (IS_ERR(phydev)) {
2116 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2117 return PTR_ERR(phydev);
2118 }
2119
2120 /* Mask with MAC supported features. */
2121 switch (phydev->interface) {
2122 case PHY_INTERFACE_MODE_GMII:
2123 case PHY_INTERFACE_MODE_RGMII:
2124 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2125 phydev->supported &= (PHY_GBIT_FEATURES |
2126 SUPPORTED_Pause |
2127 SUPPORTED_Asym_Pause);
2128 break;
2129 }
2130 /* fallthru */
2131 case PHY_INTERFACE_MODE_MII:
2132 phydev->supported &= (PHY_BASIC_FEATURES |
2133 SUPPORTED_Pause |
2134 SUPPORTED_Asym_Pause);
2135 break;
2136 default:
2137 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2138 return -EINVAL;
2139 }
2140
2141 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2142
2143 phydev->advertising = phydev->supported;
2144
2145 phy_attached_info(phydev);
2146
2147 return 0;
2148}
2149
2150static void tg3_phy_start(struct tg3 *tp)
2151{
2152 struct phy_device *phydev;
2153
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2158
2159 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2160 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2161 phydev->speed = tp->link_config.speed;
2162 phydev->duplex = tp->link_config.duplex;
2163 phydev->autoneg = tp->link_config.autoneg;
2164 phydev->advertising = tp->link_config.advertising;
2165 }
2166
2167 phy_start(phydev);
2168
2169 phy_start_aneg(phydev);
2170}
2171
2172static void tg3_phy_stop(struct tg3 *tp)
2173{
2174 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2175 return;
2176
2177 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2178}
2179
2180static void tg3_phy_fini(struct tg3 *tp)
2181{
2182 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2183 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2184 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2185 }
2186}
2187
2188static int tg3_phy_set_extloopbk(struct tg3 *tp)
2189{
2190 int err;
2191 u32 val;
2192
2193 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2194 return 0;
2195
2196 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2197 /* Cannot do read-modify-write on 5401 */
2198 err = tg3_phy_auxctl_write(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2200 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2201 0x4c20);
2202 goto done;
2203 }
2204
2205 err = tg3_phy_auxctl_read(tp,
2206 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2207 if (err)
2208 return err;
2209
2210 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2211 err = tg3_phy_auxctl_write(tp,
2212 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2213
2214done:
2215 return err;
2216}
2217
2218static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2219{
2220 u32 phytest;
2221
2222 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2223 u32 phy;
2224
2225 tg3_writephy(tp, MII_TG3_FET_TEST,
2226 phytest | MII_TG3_FET_SHADOW_EN);
2227 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2228 if (enable)
2229 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2230 else
2231 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2232 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2233 }
2234 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2235 }
2236}
2237
2238static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2239{
2240 u32 reg;
2241
2242 if (!tg3_flag(tp, 5705_PLUS) ||
2243 (tg3_flag(tp, 5717_PLUS) &&
2244 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2245 return;
2246
2247 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2248 tg3_phy_fet_toggle_apd(tp, enable);
2249 return;
2250 }
2251
2252 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2253 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2254 MII_TG3_MISC_SHDW_SCR5_SDTL |
2255 MII_TG3_MISC_SHDW_SCR5_C125OE;
2256 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2257 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2258
2259 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2260
2261
2262 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2263 if (enable)
2264 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2265
2266 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2267}
2268
2269static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2270{
2271 u32 phy;
2272
2273 if (!tg3_flag(tp, 5705_PLUS) ||
2274 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2275 return;
2276
2277 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2278 u32 ephy;
2279
2280 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2281 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2282
2283 tg3_writephy(tp, MII_TG3_FET_TEST,
2284 ephy | MII_TG3_FET_SHADOW_EN);
2285 if (!tg3_readphy(tp, reg, &phy)) {
2286 if (enable)
2287 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2288 else
2289 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2290 tg3_writephy(tp, reg, phy);
2291 }
2292 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2293 }
2294 } else {
2295 int ret;
2296
2297 ret = tg3_phy_auxctl_read(tp,
2298 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2299 if (!ret) {
2300 if (enable)
2301 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2302 else
2303 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2304 tg3_phy_auxctl_write(tp,
2305 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2306 }
2307 }
2308}
2309
2310static void tg3_phy_set_wirespeed(struct tg3 *tp)
2311{
2312 int ret;
2313 u32 val;
2314
2315 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2316 return;
2317
2318 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2319 if (!ret)
2320 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2321 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2322}
2323
2324static void tg3_phy_apply_otp(struct tg3 *tp)
2325{
2326 u32 otp, phy;
2327
2328 if (!tp->phy_otp)
2329 return;
2330
2331 otp = tp->phy_otp;
2332
2333 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2334 return;
2335
2336 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2337 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2338 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2339
2340 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2341 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2342 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2343
2344 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2345 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2347
2348 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2349 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2350
2351 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2352 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2353
2354 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2355 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2356 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2357
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2359}
2360
2361static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2362{
2363 u32 val;
2364 struct ethtool_eee *dest = &tp->eee;
2365
2366 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2367 return;
2368
2369 if (eee)
2370 dest = eee;
2371
2372 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2373 return;
2374
2375 /* Pull eee_active */
2376 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2377 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2378 dest->eee_active = 1;
2379 } else
2380 dest->eee_active = 0;
2381
2382 /* Pull lp advertised settings */
2383 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2384 return;
2385 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2386
2387 /* Pull advertised and eee_enabled settings */
2388 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2389 return;
2390 dest->eee_enabled = !!val;
2391 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2392
2393 /* Pull tx_lpi_enabled */
2394 val = tr32(TG3_CPMU_EEE_MODE);
2395 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2396
2397 /* Pull lpi timer value */
2398 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2399}
2400
2401static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2402{
2403 u32 val;
2404
2405 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2406 return;
2407
2408 tp->setlpicnt = 0;
2409
2410 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2411 current_link_up &&
2412 tp->link_config.active_duplex == DUPLEX_FULL &&
2413 (tp->link_config.active_speed == SPEED_100 ||
2414 tp->link_config.active_speed == SPEED_1000)) {
2415 u32 eeectl;
2416
2417 if (tp->link_config.active_speed == SPEED_1000)
2418 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2419 else
2420 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2421
2422 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2423
2424 tg3_eee_pull_config(tp, NULL);
2425 if (tp->eee.eee_active)
2426 tp->setlpicnt = 2;
2427 }
2428
2429 if (!tp->setlpicnt) {
2430 if (current_link_up &&
2431 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439}
2440
2441static void tg3_phy_eee_enable(struct tg3 *tp)
2442{
2443 u32 val;
2444
2445 if (tp->link_config.active_speed == SPEED_1000 &&
2446 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2447 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2448 tg3_flag(tp, 57765_CLASS)) &&
2449 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2450 val = MII_TG3_DSP_TAP26_ALNOKO |
2451 MII_TG3_DSP_TAP26_RMRXSTO;
2452 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2453 tg3_phy_toggle_auxctl_smdsp(tp, false);
2454 }
2455
2456 val = tr32(TG3_CPMU_EEE_MODE);
2457 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2458}
2459
2460static int tg3_wait_macro_done(struct tg3 *tp)
2461{
2462 int limit = 100;
2463
2464 while (limit--) {
2465 u32 tmp32;
2466
2467 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2468 if ((tmp32 & 0x1000) == 0)
2469 break;
2470 }
2471 }
2472 if (limit < 0)
2473 return -EBUSY;
2474
2475 return 0;
2476}
2477
2478static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2479{
2480 static const u32 test_pat[4][6] = {
2481 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2482 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2483 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2484 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2485 };
2486 int chan;
2487
2488 for (chan = 0; chan < 4; chan++) {
2489 int i;
2490
2491 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2492 (chan * 0x2000) | 0x0200);
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2494
2495 for (i = 0; i < 6; i++)
2496 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2497 test_pat[chan][i]);
2498
2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2500 if (tg3_wait_macro_done(tp)) {
2501 *resetp = 1;
2502 return -EBUSY;
2503 }
2504
2505 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2506 (chan * 0x2000) | 0x0200);
2507 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2508 if (tg3_wait_macro_done(tp)) {
2509 *resetp = 1;
2510 return -EBUSY;
2511 }
2512
2513 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2514 if (tg3_wait_macro_done(tp)) {
2515 *resetp = 1;
2516 return -EBUSY;
2517 }
2518
2519 for (i = 0; i < 6; i += 2) {
2520 u32 low, high;
2521
2522 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2523 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2524 tg3_wait_macro_done(tp)) {
2525 *resetp = 1;
2526 return -EBUSY;
2527 }
2528 low &= 0x7fff;
2529 high &= 0x000f;
2530 if (low != test_pat[chan][i] ||
2531 high != test_pat[chan][i+1]) {
2532 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2533 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2535
2536 return -EBUSY;
2537 }
2538 }
2539 }
2540
2541 return 0;
2542}
2543
2544static int tg3_phy_reset_chanpat(struct tg3 *tp)
2545{
2546 int chan;
2547
2548 for (chan = 0; chan < 4; chan++) {
2549 int i;
2550
2551 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2552 (chan * 0x2000) | 0x0200);
2553 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2554 for (i = 0; i < 6; i++)
2555 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2556 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2557 if (tg3_wait_macro_done(tp))
2558 return -EBUSY;
2559 }
2560
2561 return 0;
2562}
2563
2564static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2565{
2566 u32 reg32, phy9_orig;
2567 int retries, do_phy_reset, err;
2568
2569 retries = 10;
2570 do_phy_reset = 1;
2571 do {
2572 if (do_phy_reset) {
2573 err = tg3_bmcr_reset(tp);
2574 if (err)
2575 return err;
2576 do_phy_reset = 0;
2577 }
2578
2579 /* Disable transmitter and interrupt. */
2580 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2581 continue;
2582
2583 reg32 |= 0x3000;
2584 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2585
2586 /* Set full-duplex, 1000 mbps. */
2587 tg3_writephy(tp, MII_BMCR,
2588 BMCR_FULLDPLX | BMCR_SPEED1000);
2589
2590 /* Set to master mode. */
2591 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2592 continue;
2593
2594 tg3_writephy(tp, MII_CTRL1000,
2595 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2596
2597 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2598 if (err)
2599 return err;
2600
2601 /* Block the PHY control access. */
2602 tg3_phydsp_write(tp, 0x8005, 0x0800);
2603
2604 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2605 if (!err)
2606 break;
2607 } while (--retries);
2608
2609 err = tg3_phy_reset_chanpat(tp);
2610 if (err)
2611 return err;
2612
2613 tg3_phydsp_write(tp, 0x8005, 0x0000);
2614
2615 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2616 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2617
2618 tg3_phy_toggle_auxctl_smdsp(tp, false);
2619
2620 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2621
2622 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2623 if (err)
2624 return err;
2625
2626 reg32 &= ~0x3000;
2627 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2628
2629 return 0;
2630}
2631
2632static void tg3_carrier_off(struct tg3 *tp)
2633{
2634 netif_carrier_off(tp->dev);
2635 tp->link_up = false;
2636}
2637
2638static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2639{
2640 if (tg3_flag(tp, ENABLE_ASF))
2641 netdev_warn(tp->dev,
2642 "Management side-band traffic will be interrupted during phy settings change\n");
2643}
2644
2645/* This will reset the tigon3 PHY if there is no valid
2646 * link unless the FORCE argument is non-zero.
2647 */
2648static int tg3_phy_reset(struct tg3 *tp)
2649{
2650 u32 val, cpmuctrl;
2651 int err;
2652
2653 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2654 val = tr32(GRC_MISC_CFG);
2655 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2656 udelay(40);
2657 }
2658 err = tg3_readphy(tp, MII_BMSR, &val);
2659 err |= tg3_readphy(tp, MII_BMSR, &val);
2660 if (err != 0)
2661 return -EBUSY;
2662
2663 if (netif_running(tp->dev) && tp->link_up) {
2664 netif_carrier_off(tp->dev);
2665 tg3_link_report(tp);
2666 }
2667
2668 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2669 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2670 tg3_asic_rev(tp) == ASIC_REV_5705) {
2671 err = tg3_phy_reset_5703_4_5(tp);
2672 if (err)
2673 return err;
2674 goto out;
2675 }
2676
2677 cpmuctrl = 0;
2678 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2679 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2680 cpmuctrl = tr32(TG3_CPMU_CTRL);
2681 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2682 tw32(TG3_CPMU_CTRL,
2683 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2684 }
2685
2686 err = tg3_bmcr_reset(tp);
2687 if (err)
2688 return err;
2689
2690 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2691 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2692 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2693
2694 tw32(TG3_CPMU_CTRL, cpmuctrl);
2695 }
2696
2697 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2698 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2699 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2700 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2701 CPMU_LSPD_1000MB_MACCLK_12_5) {
2702 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2703 udelay(40);
2704 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2705 }
2706 }
2707
2708 if (tg3_flag(tp, 5717_PLUS) &&
2709 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2710 return 0;
2711
2712 tg3_phy_apply_otp(tp);
2713
2714 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2715 tg3_phy_toggle_apd(tp, true);
2716 else
2717 tg3_phy_toggle_apd(tp, false);
2718
2719out:
2720 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2721 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2723 tg3_phydsp_write(tp, 0x000a, 0x0323);
2724 tg3_phy_toggle_auxctl_smdsp(tp, false);
2725 }
2726
2727 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2728 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2729 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2730 }
2731
2732 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2733 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2734 tg3_phydsp_write(tp, 0x000a, 0x310b);
2735 tg3_phydsp_write(tp, 0x201f, 0x9506);
2736 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2737 tg3_phy_toggle_auxctl_smdsp(tp, false);
2738 }
2739 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2740 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2741 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2742 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2743 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2744 tg3_writephy(tp, MII_TG3_TEST1,
2745 MII_TG3_TEST1_TRIM_EN | 0x4);
2746 } else
2747 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2748
2749 tg3_phy_toggle_auxctl_smdsp(tp, false);
2750 }
2751 }
2752
2753 /* Set Extended packet length bit (bit 14) on all chips that */
2754 /* support jumbo frames */
2755 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2756 /* Cannot do read-modify-write on 5401 */
2757 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2758 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2759 /* Set bit 14 with read-modify-write to preserve other bits */
2760 err = tg3_phy_auxctl_read(tp,
2761 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2762 if (!err)
2763 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2764 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2765 }
2766
2767 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2768 * jumbo frames transmission.
2769 */
2770 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2771 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2772 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2773 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2774 }
2775
2776 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2777 /* adjust output voltage */
2778 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2779 }
2780
2781 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2782 tg3_phydsp_write(tp, 0xffb, 0x4000);
2783
2784 tg3_phy_toggle_automdix(tp, true);
2785 tg3_phy_set_wirespeed(tp);
2786 return 0;
2787}
2788
2789#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2790#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2791#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2792 TG3_GPIO_MSG_NEED_VAUX)
2793#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2794 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2795 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2796 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2797 (TG3_GPIO_MSG_DRVR_PRES << 12))
2798
2799#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2800 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2801 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2802 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2803 (TG3_GPIO_MSG_NEED_VAUX << 12))
2804
2805static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2806{
2807 u32 status, shift;
2808
2809 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2810 tg3_asic_rev(tp) == ASIC_REV_5719)
2811 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2812 else
2813 status = tr32(TG3_CPMU_DRV_STATUS);
2814
2815 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2816 status &= ~(TG3_GPIO_MSG_MASK << shift);
2817 status |= (newstat << shift);
2818
2819 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 tg3_asic_rev(tp) == ASIC_REV_5719)
2821 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2822 else
2823 tw32(TG3_CPMU_DRV_STATUS, status);
2824
2825 return status >> TG3_APE_GPIO_MSG_SHIFT;
2826}
2827
2828static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2829{
2830 if (!tg3_flag(tp, IS_NIC))
2831 return 0;
2832
2833 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2834 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2835 tg3_asic_rev(tp) == ASIC_REV_5720) {
2836 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2837 return -EIO;
2838
2839 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2840
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843
2844 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2845 } else {
2846 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2847 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 }
2849
2850 return 0;
2851}
2852
2853static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2854{
2855 u32 grc_local_ctrl;
2856
2857 if (!tg3_flag(tp, IS_NIC) ||
2858 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2859 tg3_asic_rev(tp) == ASIC_REV_5701)
2860 return;
2861
2862 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2863
2864 tw32_wait_f(GRC_LOCAL_CTRL,
2865 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2866 TG3_GRC_LCLCTL_PWRSW_DELAY);
2867
2868 tw32_wait_f(GRC_LOCAL_CTRL,
2869 grc_local_ctrl,
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871
2872 tw32_wait_f(GRC_LOCAL_CTRL,
2873 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2874 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875}
2876
2877static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2878{
2879 if (!tg3_flag(tp, IS_NIC))
2880 return;
2881
2882 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2883 tg3_asic_rev(tp) == ASIC_REV_5701) {
2884 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2885 (GRC_LCLCTRL_GPIO_OE0 |
2886 GRC_LCLCTRL_GPIO_OE1 |
2887 GRC_LCLCTRL_GPIO_OE2 |
2888 GRC_LCLCTRL_GPIO_OUTPUT0 |
2889 GRC_LCLCTRL_GPIO_OUTPUT1),
2890 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2892 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2893 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2894 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2895 GRC_LCLCTRL_GPIO_OE1 |
2896 GRC_LCLCTRL_GPIO_OE2 |
2897 GRC_LCLCTRL_GPIO_OUTPUT0 |
2898 GRC_LCLCTRL_GPIO_OUTPUT1 |
2899 tp->grc_local_ctrl;
2900 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901 TG3_GRC_LCLCTL_PWRSW_DELAY);
2902
2903 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2904 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2905 TG3_GRC_LCLCTL_PWRSW_DELAY);
2906
2907 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2908 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY);
2910 } else {
2911 u32 no_gpio2;
2912 u32 grc_local_ctrl = 0;
2913
2914 /* Workaround to prevent overdrawing Amps. */
2915 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2916 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2917 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2918 grc_local_ctrl,
2919 TG3_GRC_LCLCTL_PWRSW_DELAY);
2920 }
2921
2922 /* On 5753 and variants, GPIO2 cannot be used. */
2923 no_gpio2 = tp->nic_sram_data_cfg &
2924 NIC_SRAM_DATA_CFG_NO_GPIO2;
2925
2926 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2927 GRC_LCLCTRL_GPIO_OE1 |
2928 GRC_LCLCTRL_GPIO_OE2 |
2929 GRC_LCLCTRL_GPIO_OUTPUT1 |
2930 GRC_LCLCTRL_GPIO_OUTPUT2;
2931 if (no_gpio2) {
2932 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2933 GRC_LCLCTRL_GPIO_OUTPUT2);
2934 }
2935 tw32_wait_f(GRC_LOCAL_CTRL,
2936 tp->grc_local_ctrl | grc_local_ctrl,
2937 TG3_GRC_LCLCTL_PWRSW_DELAY);
2938
2939 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2940
2941 tw32_wait_f(GRC_LOCAL_CTRL,
2942 tp->grc_local_ctrl | grc_local_ctrl,
2943 TG3_GRC_LCLCTL_PWRSW_DELAY);
2944
2945 if (!no_gpio2) {
2946 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2947 tw32_wait_f(GRC_LOCAL_CTRL,
2948 tp->grc_local_ctrl | grc_local_ctrl,
2949 TG3_GRC_LCLCTL_PWRSW_DELAY);
2950 }
2951 }
2952}
2953
2954static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2955{
2956 u32 msg = 0;
2957
2958 /* Serialize power state transitions */
2959 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2960 return;
2961
2962 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2963 msg = TG3_GPIO_MSG_NEED_VAUX;
2964
2965 msg = tg3_set_function_status(tp, msg);
2966
2967 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2968 goto done;
2969
2970 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2971 tg3_pwrsrc_switch_to_vaux(tp);
2972 else
2973 tg3_pwrsrc_die_with_vmain(tp);
2974
2975done:
2976 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2977}
2978
2979static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2980{
2981 bool need_vaux = false;
2982
2983 /* The GPIOs do something completely different on 57765. */
2984 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2985 return;
2986
2987 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2988 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2989 tg3_asic_rev(tp) == ASIC_REV_5720) {
2990 tg3_frob_aux_power_5717(tp, include_wol ?
2991 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2992 return;
2993 }
2994
2995 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2996 struct net_device *dev_peer;
2997
2998 dev_peer = pci_get_drvdata(tp->pdev_peer);
2999
3000 /* remove_one() may have been run on the peer. */
3001 if (dev_peer) {
3002 struct tg3 *tp_peer = netdev_priv(dev_peer);
3003
3004 if (tg3_flag(tp_peer, INIT_COMPLETE))
3005 return;
3006
3007 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3008 tg3_flag(tp_peer, ENABLE_ASF))
3009 need_vaux = true;
3010 }
3011 }
3012
3013 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3014 tg3_flag(tp, ENABLE_ASF))
3015 need_vaux = true;
3016
3017 if (need_vaux)
3018 tg3_pwrsrc_switch_to_vaux(tp);
3019 else
3020 tg3_pwrsrc_die_with_vmain(tp);
3021}
3022
3023static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3024{
3025 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3026 return 1;
3027 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3028 if (speed != SPEED_10)
3029 return 1;
3030 } else if (speed == SPEED_10)
3031 return 1;
3032
3033 return 0;
3034}
3035
3036static bool tg3_phy_power_bug(struct tg3 *tp)
3037{
3038 switch (tg3_asic_rev(tp)) {
3039 case ASIC_REV_5700:
3040 case ASIC_REV_5704:
3041 return true;
3042 case ASIC_REV_5780:
3043 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3044 return true;
3045 return false;
3046 case ASIC_REV_5717:
3047 if (!tp->pci_fn)
3048 return true;
3049 return false;
3050 case ASIC_REV_5719:
3051 case ASIC_REV_5720:
3052 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3053 !tp->pci_fn)
3054 return true;
3055 return false;
3056 }
3057
3058 return false;
3059}
3060
3061static bool tg3_phy_led_bug(struct tg3 *tp)
3062{
3063 switch (tg3_asic_rev(tp)) {
3064 case ASIC_REV_5719:
3065 case ASIC_REV_5720:
3066 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3067 !tp->pci_fn)
3068 return true;
3069 return false;
3070 }
3071
3072 return false;
3073}
3074
3075static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3076{
3077 u32 val;
3078
3079 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3080 return;
3081
3082 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3083 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3084 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3085 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3086
3087 sg_dig_ctrl |=
3088 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3089 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3090 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3091 }
3092 return;
3093 }
3094
3095 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3096 tg3_bmcr_reset(tp);
3097 val = tr32(GRC_MISC_CFG);
3098 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3099 udelay(40);
3100 return;
3101 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3102 u32 phytest;
3103 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3104 u32 phy;
3105
3106 tg3_writephy(tp, MII_ADVERTISE, 0);
3107 tg3_writephy(tp, MII_BMCR,
3108 BMCR_ANENABLE | BMCR_ANRESTART);
3109
3110 tg3_writephy(tp, MII_TG3_FET_TEST,
3111 phytest | MII_TG3_FET_SHADOW_EN);
3112 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3113 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3114 tg3_writephy(tp,
3115 MII_TG3_FET_SHDW_AUXMODE4,
3116 phy);
3117 }
3118 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3119 }
3120 return;
3121 } else if (do_low_power) {
3122 if (!tg3_phy_led_bug(tp))
3123 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3124 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3125
3126 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3127 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3128 MII_TG3_AUXCTL_PCTL_VREG_11V;
3129 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3130 }
3131
3132 /* The PHY should not be powered down on some chips because
3133 * of bugs.
3134 */
3135 if (tg3_phy_power_bug(tp))
3136 return;
3137
3138 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3139 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3140 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3141 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3142 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3143 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3144 }
3145
3146 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3147}
3148
3149/* tp->lock is held. */
3150static int tg3_nvram_lock(struct tg3 *tp)
3151{
3152 if (tg3_flag(tp, NVRAM)) {
3153 int i;
3154
3155 if (tp->nvram_lock_cnt == 0) {
3156 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3157 for (i = 0; i < 8000; i++) {
3158 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3159 break;
3160 udelay(20);
3161 }
3162 if (i == 8000) {
3163 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 return -ENODEV;
3165 }
3166 }
3167 tp->nvram_lock_cnt++;
3168 }
3169 return 0;
3170}
3171
3172/* tp->lock is held. */
3173static void tg3_nvram_unlock(struct tg3 *tp)
3174{
3175 if (tg3_flag(tp, NVRAM)) {
3176 if (tp->nvram_lock_cnt > 0)
3177 tp->nvram_lock_cnt--;
3178 if (tp->nvram_lock_cnt == 0)
3179 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3180 }
3181}
3182
3183/* tp->lock is held. */
3184static void tg3_enable_nvram_access(struct tg3 *tp)
3185{
3186 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3187 u32 nvaccess = tr32(NVRAM_ACCESS);
3188
3189 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3190 }
3191}
3192
3193/* tp->lock is held. */
3194static void tg3_disable_nvram_access(struct tg3 *tp)
3195{
3196 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3197 u32 nvaccess = tr32(NVRAM_ACCESS);
3198
3199 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3200 }
3201}
3202
3203static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3204 u32 offset, u32 *val)
3205{
3206 u32 tmp;
3207 int i;
3208
3209 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3210 return -EINVAL;
3211
3212 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3213 EEPROM_ADDR_DEVID_MASK |
3214 EEPROM_ADDR_READ);
3215 tw32(GRC_EEPROM_ADDR,
3216 tmp |
3217 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3218 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3219 EEPROM_ADDR_ADDR_MASK) |
3220 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3221
3222 for (i = 0; i < 1000; i++) {
3223 tmp = tr32(GRC_EEPROM_ADDR);
3224
3225 if (tmp & EEPROM_ADDR_COMPLETE)
3226 break;
3227 msleep(1);
3228 }
3229 if (!(tmp & EEPROM_ADDR_COMPLETE))
3230 return -EBUSY;
3231
3232 tmp = tr32(GRC_EEPROM_DATA);
3233
3234 /*
3235 * The data will always be opposite the native endian
3236 * format. Perform a blind byteswap to compensate.
3237 */
3238 *val = swab32(tmp);
3239
3240 return 0;
3241}
3242
3243#define NVRAM_CMD_TIMEOUT 10000
3244
3245static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3246{
3247 int i;
3248
3249 tw32(NVRAM_CMD, nvram_cmd);
3250 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3251 usleep_range(10, 40);
3252 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3253 udelay(10);
3254 break;
3255 }
3256 }
3257
3258 if (i == NVRAM_CMD_TIMEOUT)
3259 return -EBUSY;
3260
3261 return 0;
3262}
3263
3264static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3265{
3266 if (tg3_flag(tp, NVRAM) &&
3267 tg3_flag(tp, NVRAM_BUFFERED) &&
3268 tg3_flag(tp, FLASH) &&
3269 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270 (tp->nvram_jedecnum == JEDEC_ATMEL))
3271
3272 addr = ((addr / tp->nvram_pagesize) <<
3273 ATMEL_AT45DB0X1B_PAGE_POS) +
3274 (addr % tp->nvram_pagesize);
3275
3276 return addr;
3277}
3278
3279static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3280{
3281 if (tg3_flag(tp, NVRAM) &&
3282 tg3_flag(tp, NVRAM_BUFFERED) &&
3283 tg3_flag(tp, FLASH) &&
3284 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3285 (tp->nvram_jedecnum == JEDEC_ATMEL))
3286
3287 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3288 tp->nvram_pagesize) +
3289 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3290
3291 return addr;
3292}
3293
3294/* NOTE: Data read in from NVRAM is byteswapped according to
3295 * the byteswapping settings for all other register accesses.
3296 * tg3 devices are BE devices, so on a BE machine, the data
3297 * returned will be exactly as it is seen in NVRAM. On a LE
3298 * machine, the 32-bit value will be byteswapped.
3299 */
3300static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3301{
3302 int ret;
3303
3304 if (!tg3_flag(tp, NVRAM))
3305 return tg3_nvram_read_using_eeprom(tp, offset, val);
3306
3307 offset = tg3_nvram_phys_addr(tp, offset);
3308
3309 if (offset > NVRAM_ADDR_MSK)
3310 return -EINVAL;
3311
3312 ret = tg3_nvram_lock(tp);
3313 if (ret)
3314 return ret;
3315
3316 tg3_enable_nvram_access(tp);
3317
3318 tw32(NVRAM_ADDR, offset);
3319 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3320 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3321
3322 if (ret == 0)
3323 *val = tr32(NVRAM_RDDATA);
3324
3325 tg3_disable_nvram_access(tp);
3326
3327 tg3_nvram_unlock(tp);
3328
3329 return ret;
3330}
3331
3332/* Ensures NVRAM data is in bytestream format. */
3333static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3334{
3335 u32 v;
3336 int res = tg3_nvram_read(tp, offset, &v);
3337 if (!res)
3338 *val = cpu_to_be32(v);
3339 return res;
3340}
3341
3342static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3343 u32 offset, u32 len, u8 *buf)
3344{
3345 int i, j, rc = 0;
3346 u32 val;
3347
3348 for (i = 0; i < len; i += 4) {
3349 u32 addr;
3350 __be32 data;
3351
3352 addr = offset + i;
3353
3354 memcpy(&data, buf + i, 4);
3355
3356 /*
3357 * The SEEPROM interface expects the data to always be opposite
3358 * the native endian format. We accomplish this by reversing
3359 * all the operations that would have been performed on the
3360 * data from a call to tg3_nvram_read_be32().
3361 */
3362 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3363
3364 val = tr32(GRC_EEPROM_ADDR);
3365 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3366
3367 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3368 EEPROM_ADDR_READ);
3369 tw32(GRC_EEPROM_ADDR, val |
3370 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3371 (addr & EEPROM_ADDR_ADDR_MASK) |
3372 EEPROM_ADDR_START |
3373 EEPROM_ADDR_WRITE);
3374
3375 for (j = 0; j < 1000; j++) {
3376 val = tr32(GRC_EEPROM_ADDR);
3377
3378 if (val & EEPROM_ADDR_COMPLETE)
3379 break;
3380 msleep(1);
3381 }
3382 if (!(val & EEPROM_ADDR_COMPLETE)) {
3383 rc = -EBUSY;
3384 break;
3385 }
3386 }
3387
3388 return rc;
3389}
3390
3391/* offset and length are dword aligned */
3392static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3393 u8 *buf)
3394{
3395 int ret = 0;
3396 u32 pagesize = tp->nvram_pagesize;
3397 u32 pagemask = pagesize - 1;
3398 u32 nvram_cmd;
3399 u8 *tmp;
3400
3401 tmp = kmalloc(pagesize, GFP_KERNEL);
3402 if (tmp == NULL)
3403 return -ENOMEM;
3404
3405 while (len) {
3406 int j;
3407 u32 phy_addr, page_off, size;
3408
3409 phy_addr = offset & ~pagemask;
3410
3411 for (j = 0; j < pagesize; j += 4) {
3412 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3413 (__be32 *) (tmp + j));
3414 if (ret)
3415 break;
3416 }
3417 if (ret)
3418 break;
3419
3420 page_off = offset & pagemask;
3421 size = pagesize;
3422 if (len < size)
3423 size = len;
3424
3425 len -= size;
3426
3427 memcpy(tmp + page_off, buf, size);
3428
3429 offset = offset + (pagesize - page_off);
3430
3431 tg3_enable_nvram_access(tp);
3432
3433 /*
3434 * Before we can erase the flash page, we need
3435 * to issue a special "write enable" command.
3436 */
3437 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438
3439 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440 break;
3441
3442 /* Erase the target page */
3443 tw32(NVRAM_ADDR, phy_addr);
3444
3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3446 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3447
3448 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3449 break;
3450
3451 /* Issue another write enable to start the write. */
3452 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3453
3454 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3455 break;
3456
3457 for (j = 0; j < pagesize; j += 4) {
3458 __be32 data;
3459
3460 data = *((__be32 *) (tmp + j));
3461
3462 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3463
3464 tw32(NVRAM_ADDR, phy_addr + j);
3465
3466 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3467 NVRAM_CMD_WR;
3468
3469 if (j == 0)
3470 nvram_cmd |= NVRAM_CMD_FIRST;
3471 else if (j == (pagesize - 4))
3472 nvram_cmd |= NVRAM_CMD_LAST;
3473
3474 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3475 if (ret)
3476 break;
3477 }
3478 if (ret)
3479 break;
3480 }
3481
3482 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3483 tg3_nvram_exec_cmd(tp, nvram_cmd);
3484
3485 kfree(tmp);
3486
3487 return ret;
3488}
3489
3490/* offset and length are dword aligned */
3491static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3492 u8 *buf)
3493{
3494 int i, ret = 0;
3495
3496 for (i = 0; i < len; i += 4, offset += 4) {
3497 u32 page_off, phy_addr, nvram_cmd;
3498 __be32 data;
3499
3500 memcpy(&data, buf + i, 4);
3501 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3502
3503 page_off = offset % tp->nvram_pagesize;
3504
3505 phy_addr = tg3_nvram_phys_addr(tp, offset);
3506
3507 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3508
3509 if (page_off == 0 || i == 0)
3510 nvram_cmd |= NVRAM_CMD_FIRST;
3511 if (page_off == (tp->nvram_pagesize - 4))
3512 nvram_cmd |= NVRAM_CMD_LAST;
3513
3514 if (i == (len - 4))
3515 nvram_cmd |= NVRAM_CMD_LAST;
3516
3517 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3518 !tg3_flag(tp, FLASH) ||
3519 !tg3_flag(tp, 57765_PLUS))
3520 tw32(NVRAM_ADDR, phy_addr);
3521
3522 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3523 !tg3_flag(tp, 5755_PLUS) &&
3524 (tp->nvram_jedecnum == JEDEC_ST) &&
3525 (nvram_cmd & NVRAM_CMD_FIRST)) {
3526 u32 cmd;
3527
3528 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3529 ret = tg3_nvram_exec_cmd(tp, cmd);
3530 if (ret)
3531 break;
3532 }
3533 if (!tg3_flag(tp, FLASH)) {
3534 /* We always do complete word writes to eeprom. */
3535 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3536 }
3537
3538 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3539 if (ret)
3540 break;
3541 }
3542 return ret;
3543}
3544
3545/* offset and length are dword aligned */
3546static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3547{
3548 int ret;
3549
3550 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3552 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3553 udelay(40);
3554 }
3555
3556 if (!tg3_flag(tp, NVRAM)) {
3557 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3558 } else {
3559 u32 grc_mode;
3560
3561 ret = tg3_nvram_lock(tp);
3562 if (ret)
3563 return ret;
3564
3565 tg3_enable_nvram_access(tp);
3566 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3567 tw32(NVRAM_WRITE1, 0x406);
3568
3569 grc_mode = tr32(GRC_MODE);
3570 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3571
3572 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3573 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3574 buf);
3575 } else {
3576 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3577 buf);
3578 }
3579
3580 grc_mode = tr32(GRC_MODE);
3581 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3582
3583 tg3_disable_nvram_access(tp);
3584 tg3_nvram_unlock(tp);
3585 }
3586
3587 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3588 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3589 udelay(40);
3590 }
3591
3592 return ret;
3593}
3594
3595#define RX_CPU_SCRATCH_BASE 0x30000
3596#define RX_CPU_SCRATCH_SIZE 0x04000
3597#define TX_CPU_SCRATCH_BASE 0x34000
3598#define TX_CPU_SCRATCH_SIZE 0x04000
3599
3600/* tp->lock is held. */
3601static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3602{
3603 int i;
3604 const int iters = 10000;
3605
3606 for (i = 0; i < iters; i++) {
3607 tw32(cpu_base + CPU_STATE, 0xffffffff);
3608 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3609 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3610 break;
3611 if (pci_channel_offline(tp->pdev))
3612 return -EBUSY;
3613 }
3614
3615 return (i == iters) ? -EBUSY : 0;
3616}
3617
3618/* tp->lock is held. */
3619static int tg3_rxcpu_pause(struct tg3 *tp)
3620{
3621 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3622
3623 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3624 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3625 udelay(10);
3626
3627 return rc;
3628}
3629
3630/* tp->lock is held. */
3631static int tg3_txcpu_pause(struct tg3 *tp)
3632{
3633 return tg3_pause_cpu(tp, TX_CPU_BASE);
3634}
3635
3636/* tp->lock is held. */
3637static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3638{
3639 tw32(cpu_base + CPU_STATE, 0xffffffff);
3640 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3641}
3642
3643/* tp->lock is held. */
3644static void tg3_rxcpu_resume(struct tg3 *tp)
3645{
3646 tg3_resume_cpu(tp, RX_CPU_BASE);
3647}
3648
3649/* tp->lock is held. */
3650static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3651{
3652 int rc;
3653
3654 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3655
3656 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3657 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3658
3659 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3660 return 0;
3661 }
3662 if (cpu_base == RX_CPU_BASE) {
3663 rc = tg3_rxcpu_pause(tp);
3664 } else {
3665 /*
3666 * There is only an Rx CPU for the 5750 derivative in the
3667 * BCM4785.
3668 */
3669 if (tg3_flag(tp, IS_SSB_CORE))
3670 return 0;
3671
3672 rc = tg3_txcpu_pause(tp);
3673 }
3674
3675 if (rc) {
3676 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3677 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3678 return -ENODEV;
3679 }
3680
3681 /* Clear firmware's nvram arbitration. */
3682 if (tg3_flag(tp, NVRAM))
3683 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3684 return 0;
3685}
3686
3687static int tg3_fw_data_len(struct tg3 *tp,
3688 const struct tg3_firmware_hdr *fw_hdr)
3689{
3690 int fw_len;
3691
3692 /* Non fragmented firmware have one firmware header followed by a
3693 * contiguous chunk of data to be written. The length field in that
3694 * header is not the length of data to be written but the complete
3695 * length of the bss. The data length is determined based on
3696 * tp->fw->size minus headers.
3697 *
3698 * Fragmented firmware have a main header followed by multiple
3699 * fragments. Each fragment is identical to non fragmented firmware
3700 * with a firmware header followed by a contiguous chunk of data. In
3701 * the main header, the length field is unused and set to 0xffffffff.
3702 * In each fragment header the length is the entire size of that
3703 * fragment i.e. fragment data + header length. Data length is
3704 * therefore length field in the header minus TG3_FW_HDR_LEN.
3705 */
3706 if (tp->fw_len == 0xffffffff)
3707 fw_len = be32_to_cpu(fw_hdr->len);
3708 else
3709 fw_len = tp->fw->size;
3710
3711 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3712}
3713
3714/* tp->lock is held. */
3715static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3716 u32 cpu_scratch_base, int cpu_scratch_size,
3717 const struct tg3_firmware_hdr *fw_hdr)
3718{
3719 int err, i;
3720 void (*write_op)(struct tg3 *, u32, u32);
3721 int total_len = tp->fw->size;
3722
3723 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3724 netdev_err(tp->dev,
3725 "%s: Trying to load TX cpu firmware which is 5705\n",
3726 __func__);
3727 return -EINVAL;
3728 }
3729
3730 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3731 write_op = tg3_write_mem;
3732 else
3733 write_op = tg3_write_indirect_reg32;
3734
3735 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3736 /* It is possible that bootcode is still loading at this point.
3737 * Get the nvram lock first before halting the cpu.
3738 */
3739 int lock_err = tg3_nvram_lock(tp);
3740 err = tg3_halt_cpu(tp, cpu_base);
3741 if (!lock_err)
3742 tg3_nvram_unlock(tp);
3743 if (err)
3744 goto out;
3745
3746 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3747 write_op(tp, cpu_scratch_base + i, 0);
3748 tw32(cpu_base + CPU_STATE, 0xffffffff);
3749 tw32(cpu_base + CPU_MODE,
3750 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3751 } else {
3752 /* Subtract additional main header for fragmented firmware and
3753 * advance to the first fragment
3754 */
3755 total_len -= TG3_FW_HDR_LEN;
3756 fw_hdr++;
3757 }
3758
3759 do {
3760 u32 *fw_data = (u32 *)(fw_hdr + 1);
3761 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3762 write_op(tp, cpu_scratch_base +
3763 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3764 (i * sizeof(u32)),
3765 be32_to_cpu(fw_data[i]));
3766
3767 total_len -= be32_to_cpu(fw_hdr->len);
3768
3769 /* Advance to next fragment */
3770 fw_hdr = (struct tg3_firmware_hdr *)
3771 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3772 } while (total_len > 0);
3773
3774 err = 0;
3775
3776out:
3777 return err;
3778}
3779
3780/* tp->lock is held. */
3781static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3782{
3783 int i;
3784 const int iters = 5;
3785
3786 tw32(cpu_base + CPU_STATE, 0xffffffff);
3787 tw32_f(cpu_base + CPU_PC, pc);
3788
3789 for (i = 0; i < iters; i++) {
3790 if (tr32(cpu_base + CPU_PC) == pc)
3791 break;
3792 tw32(cpu_base + CPU_STATE, 0xffffffff);
3793 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3794 tw32_f(cpu_base + CPU_PC, pc);
3795 udelay(1000);
3796 }
3797
3798 return (i == iters) ? -EBUSY : 0;
3799}
3800
3801/* tp->lock is held. */
3802static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3803{
3804 const struct tg3_firmware_hdr *fw_hdr;
3805 int err;
3806
3807 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3808
3809 /* Firmware blob starts with version numbers, followed by
3810 start address and length. We are setting complete length.
3811 length = end_address_of_bss - start_address_of_text.
3812 Remainder is the blob to be loaded contiguously
3813 from start address. */
3814
3815 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3816 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3817 fw_hdr);
3818 if (err)
3819 return err;
3820
3821 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3822 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3823 fw_hdr);
3824 if (err)
3825 return err;
3826
3827 /* Now startup only the RX cpu. */
3828 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3829 be32_to_cpu(fw_hdr->base_addr));
3830 if (err) {
3831 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3832 "should be %08x\n", __func__,
3833 tr32(RX_CPU_BASE + CPU_PC),
3834 be32_to_cpu(fw_hdr->base_addr));
3835 return -ENODEV;
3836 }
3837
3838 tg3_rxcpu_resume(tp);
3839
3840 return 0;
3841}
3842
3843static int tg3_validate_rxcpu_state(struct tg3 *tp)
3844{
3845 const int iters = 1000;
3846 int i;
3847 u32 val;
3848
3849 /* Wait for boot code to complete initialization and enter service
3850 * loop. It is then safe to download service patches
3851 */
3852 for (i = 0; i < iters; i++) {
3853 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3854 break;
3855
3856 udelay(10);
3857 }
3858
3859 if (i == iters) {
3860 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3861 return -EBUSY;
3862 }
3863
3864 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3865 if (val & 0xff) {
3866 netdev_warn(tp->dev,
3867 "Other patches exist. Not downloading EEE patch\n");
3868 return -EEXIST;
3869 }
3870
3871 return 0;
3872}
3873
3874/* tp->lock is held. */
3875static void tg3_load_57766_firmware(struct tg3 *tp)
3876{
3877 struct tg3_firmware_hdr *fw_hdr;
3878
3879 if (!tg3_flag(tp, NO_NVRAM))
3880 return;
3881
3882 if (tg3_validate_rxcpu_state(tp))
3883 return;
3884
3885 if (!tp->fw)
3886 return;
3887
3888 /* This firmware blob has a different format than older firmware
3889 * releases as given below. The main difference is we have fragmented
3890 * data to be written to non-contiguous locations.
3891 *
3892 * In the beginning we have a firmware header identical to other
3893 * firmware which consists of version, base addr and length. The length
3894 * here is unused and set to 0xffffffff.
3895 *
3896 * This is followed by a series of firmware fragments which are
3897 * individually identical to previous firmware. i.e. they have the
3898 * firmware header and followed by data for that fragment. The version
3899 * field of the individual fragment header is unused.
3900 */
3901
3902 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3903 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3904 return;
3905
3906 if (tg3_rxcpu_pause(tp))
3907 return;
3908
3909 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3910 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3911
3912 tg3_rxcpu_resume(tp);
3913}
3914
3915/* tp->lock is held. */
3916static int tg3_load_tso_firmware(struct tg3 *tp)
3917{
3918 const struct tg3_firmware_hdr *fw_hdr;
3919 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3920 int err;
3921
3922 if (!tg3_flag(tp, FW_TSO))
3923 return 0;
3924
3925 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3926
3927 /* Firmware blob starts with version numbers, followed by
3928 start address and length. We are setting complete length.
3929 length = end_address_of_bss - start_address_of_text.
3930 Remainder is the blob to be loaded contiguously
3931 from start address. */
3932
3933 cpu_scratch_size = tp->fw_len;
3934
3935 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3936 cpu_base = RX_CPU_BASE;
3937 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3938 } else {
3939 cpu_base = TX_CPU_BASE;
3940 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3941 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3942 }
3943
3944 err = tg3_load_firmware_cpu(tp, cpu_base,
3945 cpu_scratch_base, cpu_scratch_size,
3946 fw_hdr);
3947 if (err)
3948 return err;
3949
3950 /* Now startup the cpu. */
3951 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3952 be32_to_cpu(fw_hdr->base_addr));
3953 if (err) {
3954 netdev_err(tp->dev,
3955 "%s fails to set CPU PC, is %08x should be %08x\n",
3956 __func__, tr32(cpu_base + CPU_PC),
3957 be32_to_cpu(fw_hdr->base_addr));
3958 return -ENODEV;
3959 }
3960
3961 tg3_resume_cpu(tp, cpu_base);
3962 return 0;
3963}
3964
3965/* tp->lock is held. */
3966static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3967{
3968 u32 addr_high, addr_low;
3969
3970 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3971 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3972 (mac_addr[4] << 8) | mac_addr[5]);
3973
3974 if (index < 4) {
3975 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3976 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3977 } else {
3978 index -= 4;
3979 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3980 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3981 }
3982}
3983
3984/* tp->lock is held. */
3985static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3986{
3987 u32 addr_high;
3988 int i;
3989
3990 for (i = 0; i < 4; i++) {
3991 if (i == 1 && skip_mac_1)
3992 continue;
3993 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3994 }
3995
3996 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3997 tg3_asic_rev(tp) == ASIC_REV_5704) {
3998 for (i = 4; i < 16; i++)
3999 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
4000 }
4001
4002 addr_high = (tp->dev->dev_addr[0] +
4003 tp->dev->dev_addr[1] +
4004 tp->dev->dev_addr[2] +
4005 tp->dev->dev_addr[3] +
4006 tp->dev->dev_addr[4] +
4007 tp->dev->dev_addr[5]) &
4008 TX_BACKOFF_SEED_MASK;
4009 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4010}
4011
4012static void tg3_enable_register_access(struct tg3 *tp)
4013{
4014 /*
4015 * Make sure register accesses (indirect or otherwise) will function
4016 * correctly.
4017 */
4018 pci_write_config_dword(tp->pdev,
4019 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4020}
4021
4022static int tg3_power_up(struct tg3 *tp)
4023{
4024 int err;
4025
4026 tg3_enable_register_access(tp);
4027
4028 err = pci_set_power_state(tp->pdev, PCI_D0);
4029 if (!err) {
4030 /* Switch out of Vaux if it is a NIC */
4031 tg3_pwrsrc_switch_to_vmain(tp);
4032 } else {
4033 netdev_err(tp->dev, "Transition to D0 failed\n");
4034 }
4035
4036 return err;
4037}
4038
4039static int tg3_setup_phy(struct tg3 *, bool);
4040
4041static int tg3_power_down_prepare(struct tg3 *tp)
4042{
4043 u32 misc_host_ctrl;
4044 bool device_should_wake, do_low_power;
4045
4046 tg3_enable_register_access(tp);
4047
4048 /* Restore the CLKREQ setting. */
4049 if (tg3_flag(tp, CLKREQ_BUG))
4050 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4051 PCI_EXP_LNKCTL_CLKREQ_EN);
4052
4053 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4054 tw32(TG3PCI_MISC_HOST_CTRL,
4055 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4056
4057 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4058 tg3_flag(tp, WOL_ENABLE);
4059
4060 if (tg3_flag(tp, USE_PHYLIB)) {
4061 do_low_power = false;
4062 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4063 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4064 struct phy_device *phydev;
4065 u32 phyid, advertising;
4066
4067 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4068
4069 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4070
4071 tp->link_config.speed = phydev->speed;
4072 tp->link_config.duplex = phydev->duplex;
4073 tp->link_config.autoneg = phydev->autoneg;
4074 tp->link_config.advertising = phydev->advertising;
4075
4076 advertising = ADVERTISED_TP |
4077 ADVERTISED_Pause |
4078 ADVERTISED_Autoneg |
4079 ADVERTISED_10baseT_Half;
4080
4081 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4082 if (tg3_flag(tp, WOL_SPEED_100MB))
4083 advertising |=
4084 ADVERTISED_100baseT_Half |
4085 ADVERTISED_100baseT_Full |
4086 ADVERTISED_10baseT_Full;
4087 else
4088 advertising |= ADVERTISED_10baseT_Full;
4089 }
4090
4091 phydev->advertising = advertising;
4092
4093 phy_start_aneg(phydev);
4094
4095 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4096 if (phyid != PHY_ID_BCMAC131) {
4097 phyid &= PHY_BCM_OUI_MASK;
4098 if (phyid == PHY_BCM_OUI_1 ||
4099 phyid == PHY_BCM_OUI_2 ||
4100 phyid == PHY_BCM_OUI_3)
4101 do_low_power = true;
4102 }
4103 }
4104 } else {
4105 do_low_power = true;
4106
4107 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4108 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4109
4110 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4111 tg3_setup_phy(tp, false);
4112 }
4113
4114 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4115 u32 val;
4116
4117 val = tr32(GRC_VCPU_EXT_CTRL);
4118 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4119 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4120 int i;
4121 u32 val;
4122
4123 for (i = 0; i < 200; i++) {
4124 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4125 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4126 break;
4127 msleep(1);
4128 }
4129 }
4130 if (tg3_flag(tp, WOL_CAP))
4131 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4132 WOL_DRV_STATE_SHUTDOWN |
4133 WOL_DRV_WOL |
4134 WOL_SET_MAGIC_PKT);
4135
4136 if (device_should_wake) {
4137 u32 mac_mode;
4138
4139 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4140 if (do_low_power &&
4141 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4142 tg3_phy_auxctl_write(tp,
4143 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4144 MII_TG3_AUXCTL_PCTL_WOL_EN |
4145 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4146 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4147 udelay(40);
4148 }
4149
4150 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4151 mac_mode = MAC_MODE_PORT_MODE_GMII;
4152 else if (tp->phy_flags &
4153 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4154 if (tp->link_config.active_speed == SPEED_1000)
4155 mac_mode = MAC_MODE_PORT_MODE_GMII;
4156 else
4157 mac_mode = MAC_MODE_PORT_MODE_MII;
4158 } else
4159 mac_mode = MAC_MODE_PORT_MODE_MII;
4160
4161 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4162 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4163 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4164 SPEED_100 : SPEED_10;
4165 if (tg3_5700_link_polarity(tp, speed))
4166 mac_mode |= MAC_MODE_LINK_POLARITY;
4167 else
4168 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4169 }
4170 } else {
4171 mac_mode = MAC_MODE_PORT_MODE_TBI;
4172 }
4173
4174 if (!tg3_flag(tp, 5750_PLUS))
4175 tw32(MAC_LED_CTRL, tp->led_ctrl);
4176
4177 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4178 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4179 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4180 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4181
4182 if (tg3_flag(tp, ENABLE_APE))
4183 mac_mode |= MAC_MODE_APE_TX_EN |
4184 MAC_MODE_APE_RX_EN |
4185 MAC_MODE_TDE_ENABLE;
4186
4187 tw32_f(MAC_MODE, mac_mode);
4188 udelay(100);
4189
4190 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4191 udelay(10);
4192 }
4193
4194 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4195 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4196 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4197 u32 base_val;
4198
4199 base_val = tp->pci_clock_ctrl;
4200 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4201 CLOCK_CTRL_TXCLK_DISABLE);
4202
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4204 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4205 } else if (tg3_flag(tp, 5780_CLASS) ||
4206 tg3_flag(tp, CPMU_PRESENT) ||
4207 tg3_asic_rev(tp) == ASIC_REV_5906) {
4208 /* do nothing */
4209 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4210 u32 newbits1, newbits2;
4211
4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 CLOCK_CTRL_TXCLK_DISABLE |
4216 CLOCK_CTRL_ALTCLK);
4217 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218 } else if (tg3_flag(tp, 5705_PLUS)) {
4219 newbits1 = CLOCK_CTRL_625_CORE;
4220 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4221 } else {
4222 newbits1 = CLOCK_CTRL_ALTCLK;
4223 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4224 }
4225
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4227 40);
4228
4229 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4230 40);
4231
4232 if (!tg3_flag(tp, 5705_PLUS)) {
4233 u32 newbits3;
4234
4235 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4236 tg3_asic_rev(tp) == ASIC_REV_5701) {
4237 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4238 CLOCK_CTRL_TXCLK_DISABLE |
4239 CLOCK_CTRL_44MHZ_CORE);
4240 } else {
4241 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4242 }
4243
4244 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4245 tp->pci_clock_ctrl | newbits3, 40);
4246 }
4247 }
4248
4249 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4250 tg3_power_down_phy(tp, do_low_power);
4251
4252 tg3_frob_aux_power(tp, true);
4253
4254 /* Workaround for unstable PLL clock */
4255 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4256 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4257 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4258 u32 val = tr32(0x7d00);
4259
4260 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4261 tw32(0x7d00, val);
4262 if (!tg3_flag(tp, ENABLE_ASF)) {
4263 int err;
4264
4265 err = tg3_nvram_lock(tp);
4266 tg3_halt_cpu(tp, RX_CPU_BASE);
4267 if (!err)
4268 tg3_nvram_unlock(tp);
4269 }
4270 }
4271
4272 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4273
4274 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4275
4276 return 0;
4277}
4278
4279static void tg3_power_down(struct tg3 *tp)
4280{
4281 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4282 pci_set_power_state(tp->pdev, PCI_D3hot);
4283}
4284
4285static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4286{
4287 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4288 case MII_TG3_AUX_STAT_10HALF:
4289 *speed = SPEED_10;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_10FULL:
4294 *speed = SPEED_10;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_100HALF:
4299 *speed = SPEED_100;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_100FULL:
4304 *speed = SPEED_100;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 case MII_TG3_AUX_STAT_1000HALF:
4309 *speed = SPEED_1000;
4310 *duplex = DUPLEX_HALF;
4311 break;
4312
4313 case MII_TG3_AUX_STAT_1000FULL:
4314 *speed = SPEED_1000;
4315 *duplex = DUPLEX_FULL;
4316 break;
4317
4318 default:
4319 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4320 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4321 SPEED_10;
4322 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4323 DUPLEX_HALF;
4324 break;
4325 }
4326 *speed = SPEED_UNKNOWN;
4327 *duplex = DUPLEX_UNKNOWN;
4328 break;
4329 }
4330}
4331
4332static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4333{
4334 int err = 0;
4335 u32 val, new_adv;
4336
4337 new_adv = ADVERTISE_CSMA;
4338 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4339 new_adv |= mii_advertise_flowctrl(flowctrl);
4340
4341 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4342 if (err)
4343 goto done;
4344
4345 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4346 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4347
4348 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4349 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4350 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4351
4352 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4353 if (err)
4354 goto done;
4355 }
4356
4357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4358 goto done;
4359
4360 tw32(TG3_CPMU_EEE_MODE,
4361 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4362
4363 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4364 if (!err) {
4365 u32 err2;
4366
4367 val = 0;
4368 /* Advertise 100-BaseTX EEE ability */
4369 if (advertise & ADVERTISED_100baseT_Full)
4370 val |= MDIO_AN_EEE_ADV_100TX;
4371 /* Advertise 1000-BaseT EEE ability */
4372 if (advertise & ADVERTISED_1000baseT_Full)
4373 val |= MDIO_AN_EEE_ADV_1000T;
4374
4375 if (!tp->eee.eee_enabled) {
4376 val = 0;
4377 tp->eee.advertised = 0;
4378 } else {
4379 tp->eee.advertised = advertise &
4380 (ADVERTISED_100baseT_Full |
4381 ADVERTISED_1000baseT_Full);
4382 }
4383
4384 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4385 if (err)
4386 val = 0;
4387
4388 switch (tg3_asic_rev(tp)) {
4389 case ASIC_REV_5717:
4390 case ASIC_REV_57765:
4391 case ASIC_REV_57766:
4392 case ASIC_REV_5719:
4393 /* If we advertised any eee advertisements above... */
4394 if (val)
4395 val = MII_TG3_DSP_TAP26_ALNOKO |
4396 MII_TG3_DSP_TAP26_RMRXSTO |
4397 MII_TG3_DSP_TAP26_OPCSINPT;
4398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4399 /* Fall through */
4400 case ASIC_REV_5720:
4401 case ASIC_REV_5762:
4402 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4403 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4404 MII_TG3_DSP_CH34TP2_HIBW01);
4405 }
4406
4407 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4408 if (!err)
4409 err = err2;
4410 }
4411
4412done:
4413 return err;
4414}
4415
4416static void tg3_phy_copper_begin(struct tg3 *tp)
4417{
4418 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4419 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4420 u32 adv, fc;
4421
4422 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 adv = ADVERTISED_10baseT_Half |
4425 ADVERTISED_10baseT_Full;
4426 if (tg3_flag(tp, WOL_SPEED_100MB))
4427 adv |= ADVERTISED_100baseT_Half |
4428 ADVERTISED_100baseT_Full;
4429 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4430 if (!(tp->phy_flags &
4431 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4432 adv |= ADVERTISED_1000baseT_Half;
4433 adv |= ADVERTISED_1000baseT_Full;
4434 }
4435
4436 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4437 } else {
4438 adv = tp->link_config.advertising;
4439 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4440 adv &= ~(ADVERTISED_1000baseT_Half |
4441 ADVERTISED_1000baseT_Full);
4442
4443 fc = tp->link_config.flowctrl;
4444 }
4445
4446 tg3_phy_autoneg_cfg(tp, adv, fc);
4447
4448 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4449 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4450 /* Normally during power down we want to autonegotiate
4451 * the lowest possible speed for WOL. However, to avoid
4452 * link flap, we leave it untouched.
4453 */
4454 return;
4455 }
4456
4457 tg3_writephy(tp, MII_BMCR,
4458 BMCR_ANENABLE | BMCR_ANRESTART);
4459 } else {
4460 int i;
4461 u32 bmcr, orig_bmcr;
4462
4463 tp->link_config.active_speed = tp->link_config.speed;
4464 tp->link_config.active_duplex = tp->link_config.duplex;
4465
4466 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4467 /* With autoneg disabled, 5715 only links up when the
4468 * advertisement register has the configured speed
4469 * enabled.
4470 */
4471 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4472 }
4473
4474 bmcr = 0;
4475 switch (tp->link_config.speed) {
4476 default:
4477 case SPEED_10:
4478 break;
4479
4480 case SPEED_100:
4481 bmcr |= BMCR_SPEED100;
4482 break;
4483
4484 case SPEED_1000:
4485 bmcr |= BMCR_SPEED1000;
4486 break;
4487 }
4488
4489 if (tp->link_config.duplex == DUPLEX_FULL)
4490 bmcr |= BMCR_FULLDPLX;
4491
4492 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4493 (bmcr != orig_bmcr)) {
4494 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4495 for (i = 0; i < 1500; i++) {
4496 u32 tmp;
4497
4498 udelay(10);
4499 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4500 tg3_readphy(tp, MII_BMSR, &tmp))
4501 continue;
4502 if (!(tmp & BMSR_LSTATUS)) {
4503 udelay(40);
4504 break;
4505 }
4506 }
4507 tg3_writephy(tp, MII_BMCR, bmcr);
4508 udelay(40);
4509 }
4510 }
4511}
4512
4513static int tg3_phy_pull_config(struct tg3 *tp)
4514{
4515 int err;
4516 u32 val;
4517
4518 err = tg3_readphy(tp, MII_BMCR, &val);
4519 if (err)
4520 goto done;
4521
4522 if (!(val & BMCR_ANENABLE)) {
4523 tp->link_config.autoneg = AUTONEG_DISABLE;
4524 tp->link_config.advertising = 0;
4525 tg3_flag_clear(tp, PAUSE_AUTONEG);
4526
4527 err = -EIO;
4528
4529 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4530 case 0:
4531 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532 goto done;
4533
4534 tp->link_config.speed = SPEED_10;
4535 break;
4536 case BMCR_SPEED100:
4537 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4538 goto done;
4539
4540 tp->link_config.speed = SPEED_100;
4541 break;
4542 case BMCR_SPEED1000:
4543 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4544 tp->link_config.speed = SPEED_1000;
4545 break;
4546 }
4547 /* Fall through */
4548 default:
4549 goto done;
4550 }
4551
4552 if (val & BMCR_FULLDPLX)
4553 tp->link_config.duplex = DUPLEX_FULL;
4554 else
4555 tp->link_config.duplex = DUPLEX_HALF;
4556
4557 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4558
4559 err = 0;
4560 goto done;
4561 }
4562
4563 tp->link_config.autoneg = AUTONEG_ENABLE;
4564 tp->link_config.advertising = ADVERTISED_Autoneg;
4565 tg3_flag_set(tp, PAUSE_AUTONEG);
4566
4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568 u32 adv;
4569
4570 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4571 if (err)
4572 goto done;
4573
4574 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4575 tp->link_config.advertising |= adv | ADVERTISED_TP;
4576
4577 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4578 } else {
4579 tp->link_config.advertising |= ADVERTISED_FIBRE;
4580 }
4581
4582 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4583 u32 adv;
4584
4585 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4586 err = tg3_readphy(tp, MII_CTRL1000, &val);
4587 if (err)
4588 goto done;
4589
4590 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4591 } else {
4592 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4593 if (err)
4594 goto done;
4595
4596 adv = tg3_decode_flowctrl_1000X(val);
4597 tp->link_config.flowctrl = adv;
4598
4599 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4600 adv = mii_adv_to_ethtool_adv_x(val);
4601 }
4602
4603 tp->link_config.advertising |= adv;
4604 }
4605
4606done:
4607 return err;
4608}
4609
4610static int tg3_init_5401phy_dsp(struct tg3 *tp)
4611{
4612 int err;
4613
4614 /* Turn off tap power management. */
4615 /* Set Extended packet length bit */
4616 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4617
4618 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4619 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4620 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4621 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4622 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4623
4624 udelay(40);
4625
4626 return err;
4627}
4628
4629static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4630{
4631 struct ethtool_eee eee;
4632
4633 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4634 return true;
4635
4636 tg3_eee_pull_config(tp, &eee);
4637
4638 if (tp->eee.eee_enabled) {
4639 if (tp->eee.advertised != eee.advertised ||
4640 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4641 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4642 return false;
4643 } else {
4644 /* EEE is disabled but we're advertising */
4645 if (eee.advertised)
4646 return false;
4647 }
4648
4649 return true;
4650}
4651
4652static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4653{
4654 u32 advmsk, tgtadv, advertising;
4655
4656 advertising = tp->link_config.advertising;
4657 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4658
4659 advmsk = ADVERTISE_ALL;
4660 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4661 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4662 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4663 }
4664
4665 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4666 return false;
4667
4668 if ((*lcladv & advmsk) != tgtadv)
4669 return false;
4670
4671 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4672 u32 tg3_ctrl;
4673
4674 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4675
4676 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4677 return false;
4678
4679 if (tgtadv &&
4680 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4681 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4682 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4683 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4684 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4685 } else {
4686 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4687 }
4688
4689 if (tg3_ctrl != tgtadv)
4690 return false;
4691 }
4692
4693 return true;
4694}
4695
4696static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4697{
4698 u32 lpeth = 0;
4699
4700 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4701 u32 val;
4702
4703 if (tg3_readphy(tp, MII_STAT1000, &val))
4704 return false;
4705
4706 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4707 }
4708
4709 if (tg3_readphy(tp, MII_LPA, rmtadv))
4710 return false;
4711
4712 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4713 tp->link_config.rmt_adv = lpeth;
4714
4715 return true;
4716}
4717
4718static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4719{
4720 if (curr_link_up != tp->link_up) {
4721 if (curr_link_up) {
4722 netif_carrier_on(tp->dev);
4723 } else {
4724 netif_carrier_off(tp->dev);
4725 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4726 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4727 }
4728
4729 tg3_link_report(tp);
4730 return true;
4731 }
4732
4733 return false;
4734}
4735
4736static void tg3_clear_mac_status(struct tg3 *tp)
4737{
4738 tw32(MAC_EVENT, 0);
4739
4740 tw32_f(MAC_STATUS,
4741 MAC_STATUS_SYNC_CHANGED |
4742 MAC_STATUS_CFG_CHANGED |
4743 MAC_STATUS_MI_COMPLETION |
4744 MAC_STATUS_LNKSTATE_CHANGED);
4745 udelay(40);
4746}
4747
4748static void tg3_setup_eee(struct tg3 *tp)
4749{
4750 u32 val;
4751
4752 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4753 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4754 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4755 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4756
4757 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4758
4759 tw32_f(TG3_CPMU_EEE_CTRL,
4760 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4761
4762 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4763 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4764 TG3_CPMU_EEEMD_LPI_IN_RX |
4765 TG3_CPMU_EEEMD_EEE_ENABLE;
4766
4767 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4768 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4769
4770 if (tg3_flag(tp, ENABLE_APE))
4771 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4772
4773 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4774
4775 tw32_f(TG3_CPMU_EEE_DBTMR1,
4776 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4777 (tp->eee.tx_lpi_timer & 0xffff));
4778
4779 tw32_f(TG3_CPMU_EEE_DBTMR2,
4780 TG3_CPMU_DBTMR2_APE_TX_2047US |
4781 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4782}
4783
4784static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4785{
4786 bool current_link_up;
4787 u32 bmsr, val;
4788 u32 lcl_adv, rmt_adv;
4789 u16 current_speed;
4790 u8 current_duplex;
4791 int i, err;
4792
4793 tg3_clear_mac_status(tp);
4794
4795 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4796 tw32_f(MAC_MI_MODE,
4797 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4798 udelay(80);
4799 }
4800
4801 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4802
4803 /* Some third-party PHYs need to be reset on link going
4804 * down.
4805 */
4806 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4807 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4808 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4809 tp->link_up) {
4810 tg3_readphy(tp, MII_BMSR, &bmsr);
4811 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 !(bmsr & BMSR_LSTATUS))
4813 force_reset = true;
4814 }
4815 if (force_reset)
4816 tg3_phy_reset(tp);
4817
4818 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4821 !tg3_flag(tp, INIT_COMPLETE))
4822 bmsr = 0;
4823
4824 if (!(bmsr & BMSR_LSTATUS)) {
4825 err = tg3_init_5401phy_dsp(tp);
4826 if (err)
4827 return err;
4828
4829 tg3_readphy(tp, MII_BMSR, &bmsr);
4830 for (i = 0; i < 1000; i++) {
4831 udelay(10);
4832 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4833 (bmsr & BMSR_LSTATUS)) {
4834 udelay(40);
4835 break;
4836 }
4837 }
4838
4839 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4840 TG3_PHY_REV_BCM5401_B0 &&
4841 !(bmsr & BMSR_LSTATUS) &&
4842 tp->link_config.active_speed == SPEED_1000) {
4843 err = tg3_phy_reset(tp);
4844 if (!err)
4845 err = tg3_init_5401phy_dsp(tp);
4846 if (err)
4847 return err;
4848 }
4849 }
4850 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4851 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4852 /* 5701 {A0,B0} CRC bug workaround */
4853 tg3_writephy(tp, 0x15, 0x0a75);
4854 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4855 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4856 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4857 }
4858
4859 /* Clear pending interrupts... */
4860 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4861 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4862
4863 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4864 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4865 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4866 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4867
4868 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4869 tg3_asic_rev(tp) == ASIC_REV_5701) {
4870 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4871 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4872 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4873 else
4874 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4875 }
4876
4877 current_link_up = false;
4878 current_speed = SPEED_UNKNOWN;
4879 current_duplex = DUPLEX_UNKNOWN;
4880 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4881 tp->link_config.rmt_adv = 0;
4882
4883 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4884 err = tg3_phy_auxctl_read(tp,
4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886 &val);
4887 if (!err && !(val & (1 << 10))) {
4888 tg3_phy_auxctl_write(tp,
4889 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4890 val | (1 << 10));
4891 goto relink;
4892 }
4893 }
4894
4895 bmsr = 0;
4896 for (i = 0; i < 100; i++) {
4897 tg3_readphy(tp, MII_BMSR, &bmsr);
4898 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4899 (bmsr & BMSR_LSTATUS))
4900 break;
4901 udelay(40);
4902 }
4903
4904 if (bmsr & BMSR_LSTATUS) {
4905 u32 aux_stat, bmcr;
4906
4907 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4908 for (i = 0; i < 2000; i++) {
4909 udelay(10);
4910 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4911 aux_stat)
4912 break;
4913 }
4914
4915 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4916 &current_speed,
4917 &current_duplex);
4918
4919 bmcr = 0;
4920 for (i = 0; i < 200; i++) {
4921 tg3_readphy(tp, MII_BMCR, &bmcr);
4922 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4923 continue;
4924 if (bmcr && bmcr != 0x7fff)
4925 break;
4926 udelay(10);
4927 }
4928
4929 lcl_adv = 0;
4930 rmt_adv = 0;
4931
4932 tp->link_config.active_speed = current_speed;
4933 tp->link_config.active_duplex = current_duplex;
4934
4935 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4936 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4937
4938 if ((bmcr & BMCR_ANENABLE) &&
4939 eee_config_ok &&
4940 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4941 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4942 current_link_up = true;
4943
4944 /* EEE settings changes take effect only after a phy
4945 * reset. If we have skipped a reset due to Link Flap
4946 * Avoidance being enabled, do it now.
4947 */
4948 if (!eee_config_ok &&
4949 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4950 !force_reset) {
4951 tg3_setup_eee(tp);
4952 tg3_phy_reset(tp);
4953 }
4954 } else {
4955 if (!(bmcr & BMCR_ANENABLE) &&
4956 tp->link_config.speed == current_speed &&
4957 tp->link_config.duplex == current_duplex) {
4958 current_link_up = true;
4959 }
4960 }
4961
4962 if (current_link_up &&
4963 tp->link_config.active_duplex == DUPLEX_FULL) {
4964 u32 reg, bit;
4965
4966 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4967 reg = MII_TG3_FET_GEN_STAT;
4968 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4969 } else {
4970 reg = MII_TG3_EXT_STAT;
4971 bit = MII_TG3_EXT_STAT_MDIX;
4972 }
4973
4974 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4975 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4976
4977 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4978 }
4979 }
4980
4981relink:
4982 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4983 tg3_phy_copper_begin(tp);
4984
4985 if (tg3_flag(tp, ROBOSWITCH)) {
4986 current_link_up = true;
4987 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988 current_speed = SPEED_1000;
4989 current_duplex = DUPLEX_FULL;
4990 tp->link_config.active_speed = current_speed;
4991 tp->link_config.active_duplex = current_duplex;
4992 }
4993
4994 tg3_readphy(tp, MII_BMSR, &bmsr);
4995 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4996 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4997 current_link_up = true;
4998 }
4999
5000 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5001 if (current_link_up) {
5002 if (tp->link_config.active_speed == SPEED_100 ||
5003 tp->link_config.active_speed == SPEED_10)
5004 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5005 else
5006 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5008 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5009 else
5010 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5011
5012 /* In order for the 5750 core in BCM4785 chip to work properly
5013 * in RGMII mode, the Led Control Register must be set up.
5014 */
5015 if (tg3_flag(tp, RGMII_MODE)) {
5016 u32 led_ctrl = tr32(MAC_LED_CTRL);
5017 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5018
5019 if (tp->link_config.active_speed == SPEED_10)
5020 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5021 else if (tp->link_config.active_speed == SPEED_100)
5022 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023 LED_CTRL_100MBPS_ON);
5024 else if (tp->link_config.active_speed == SPEED_1000)
5025 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5026 LED_CTRL_1000MBPS_ON);
5027
5028 tw32(MAC_LED_CTRL, led_ctrl);
5029 udelay(40);
5030 }
5031
5032 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5033 if (tp->link_config.active_duplex == DUPLEX_HALF)
5034 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5035
5036 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5037 if (current_link_up &&
5038 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5039 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5040 else
5041 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5042 }
5043
5044 /* ??? Without this setting Netgear GA302T PHY does not
5045 * ??? send/receive packets...
5046 */
5047 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5048 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5049 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5050 tw32_f(MAC_MI_MODE, tp->mi_mode);
5051 udelay(80);
5052 }
5053
5054 tw32_f(MAC_MODE, tp->mac_mode);
5055 udelay(40);
5056
5057 tg3_phy_eee_adjust(tp, current_link_up);
5058
5059 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5060 /* Polled via timer. */
5061 tw32_f(MAC_EVENT, 0);
5062 } else {
5063 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5064 }
5065 udelay(40);
5066
5067 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5068 current_link_up &&
5069 tp->link_config.active_speed == SPEED_1000 &&
5070 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5071 udelay(120);
5072 tw32_f(MAC_STATUS,
5073 (MAC_STATUS_SYNC_CHANGED |
5074 MAC_STATUS_CFG_CHANGED));
5075 udelay(40);
5076 tg3_write_mem(tp,
5077 NIC_SRAM_FIRMWARE_MBOX,
5078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5079 }
5080
5081 /* Prevent send BD corruption. */
5082 if (tg3_flag(tp, CLKREQ_BUG)) {
5083 if (tp->link_config.active_speed == SPEED_100 ||
5084 tp->link_config.active_speed == SPEED_10)
5085 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5086 PCI_EXP_LNKCTL_CLKREQ_EN);
5087 else
5088 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5089 PCI_EXP_LNKCTL_CLKREQ_EN);
5090 }
5091
5092 tg3_test_and_report_link_chg(tp, current_link_up);
5093
5094 return 0;
5095}
5096
5097struct tg3_fiber_aneginfo {
5098 int state;
5099#define ANEG_STATE_UNKNOWN 0
5100#define ANEG_STATE_AN_ENABLE 1
5101#define ANEG_STATE_RESTART_INIT 2
5102#define ANEG_STATE_RESTART 3
5103#define ANEG_STATE_DISABLE_LINK_OK 4
5104#define ANEG_STATE_ABILITY_DETECT_INIT 5
5105#define ANEG_STATE_ABILITY_DETECT 6
5106#define ANEG_STATE_ACK_DETECT_INIT 7
5107#define ANEG_STATE_ACK_DETECT 8
5108#define ANEG_STATE_COMPLETE_ACK_INIT 9
5109#define ANEG_STATE_COMPLETE_ACK 10
5110#define ANEG_STATE_IDLE_DETECT_INIT 11
5111#define ANEG_STATE_IDLE_DETECT 12
5112#define ANEG_STATE_LINK_OK 13
5113#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5114#define ANEG_STATE_NEXT_PAGE_WAIT 15
5115
5116 u32 flags;
5117#define MR_AN_ENABLE 0x00000001
5118#define MR_RESTART_AN 0x00000002
5119#define MR_AN_COMPLETE 0x00000004
5120#define MR_PAGE_RX 0x00000008
5121#define MR_NP_LOADED 0x00000010
5122#define MR_TOGGLE_TX 0x00000020
5123#define MR_LP_ADV_FULL_DUPLEX 0x00000040
5124#define MR_LP_ADV_HALF_DUPLEX 0x00000080
5125#define MR_LP_ADV_SYM_PAUSE 0x00000100
5126#define MR_LP_ADV_ASYM_PAUSE 0x00000200
5127#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5128#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5129#define MR_LP_ADV_NEXT_PAGE 0x00001000
5130#define MR_TOGGLE_RX 0x00002000
5131#define MR_NP_RX 0x00004000
5132
5133#define MR_LINK_OK 0x80000000
5134
5135 unsigned long link_time, cur_time;
5136
5137 u32 ability_match_cfg;
5138 int ability_match_count;
5139
5140 char ability_match, idle_match, ack_match;
5141
5142 u32 txconfig, rxconfig;
5143#define ANEG_CFG_NP 0x00000080
5144#define ANEG_CFG_ACK 0x00000040
5145#define ANEG_CFG_RF2 0x00000020
5146#define ANEG_CFG_RF1 0x00000010
5147#define ANEG_CFG_PS2 0x00000001
5148#define ANEG_CFG_PS1 0x00008000
5149#define ANEG_CFG_HD 0x00004000
5150#define ANEG_CFG_FD 0x00002000
5151#define ANEG_CFG_INVAL 0x00001f06
5152
5153};
5154#define ANEG_OK 0
5155#define ANEG_DONE 1
5156#define ANEG_TIMER_ENAB 2
5157#define ANEG_FAILED -1
5158
5159#define ANEG_STATE_SETTLE_TIME 10000
5160
5161static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5162 struct tg3_fiber_aneginfo *ap)
5163{
5164 u16 flowctrl;
5165 unsigned long delta;
5166 u32 rx_cfg_reg;
5167 int ret;
5168
5169 if (ap->state == ANEG_STATE_UNKNOWN) {
5170 ap->rxconfig = 0;
5171 ap->link_time = 0;
5172 ap->cur_time = 0;
5173 ap->ability_match_cfg = 0;
5174 ap->ability_match_count = 0;
5175 ap->ability_match = 0;
5176 ap->idle_match = 0;
5177 ap->ack_match = 0;
5178 }
5179 ap->cur_time++;
5180
5181 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5182 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5183
5184 if (rx_cfg_reg != ap->ability_match_cfg) {
5185 ap->ability_match_cfg = rx_cfg_reg;
5186 ap->ability_match = 0;
5187 ap->ability_match_count = 0;
5188 } else {
5189 if (++ap->ability_match_count > 1) {
5190 ap->ability_match = 1;
5191 ap->ability_match_cfg = rx_cfg_reg;
5192 }
5193 }
5194 if (rx_cfg_reg & ANEG_CFG_ACK)
5195 ap->ack_match = 1;
5196 else
5197 ap->ack_match = 0;
5198
5199 ap->idle_match = 0;
5200 } else {
5201 ap->idle_match = 1;
5202 ap->ability_match_cfg = 0;
5203 ap->ability_match_count = 0;
5204 ap->ability_match = 0;
5205 ap->ack_match = 0;
5206
5207 rx_cfg_reg = 0;
5208 }
5209
5210 ap->rxconfig = rx_cfg_reg;
5211 ret = ANEG_OK;
5212
5213 switch (ap->state) {
5214 case ANEG_STATE_UNKNOWN:
5215 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5216 ap->state = ANEG_STATE_AN_ENABLE;
5217
5218 /* fallthru */
5219 case ANEG_STATE_AN_ENABLE:
5220 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5221 if (ap->flags & MR_AN_ENABLE) {
5222 ap->link_time = 0;
5223 ap->cur_time = 0;
5224 ap->ability_match_cfg = 0;
5225 ap->ability_match_count = 0;
5226 ap->ability_match = 0;
5227 ap->idle_match = 0;
5228 ap->ack_match = 0;
5229
5230 ap->state = ANEG_STATE_RESTART_INIT;
5231 } else {
5232 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5233 }
5234 break;
5235
5236 case ANEG_STATE_RESTART_INIT:
5237 ap->link_time = ap->cur_time;
5238 ap->flags &= ~(MR_NP_LOADED);
5239 ap->txconfig = 0;
5240 tw32(MAC_TX_AUTO_NEG, 0);
5241 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5242 tw32_f(MAC_MODE, tp->mac_mode);
5243 udelay(40);
5244
5245 ret = ANEG_TIMER_ENAB;
5246 ap->state = ANEG_STATE_RESTART;
5247
5248 /* fallthru */
5249 case ANEG_STATE_RESTART:
5250 delta = ap->cur_time - ap->link_time;
5251 if (delta > ANEG_STATE_SETTLE_TIME)
5252 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5253 else
5254 ret = ANEG_TIMER_ENAB;
5255 break;
5256
5257 case ANEG_STATE_DISABLE_LINK_OK:
5258 ret = ANEG_DONE;
5259 break;
5260
5261 case ANEG_STATE_ABILITY_DETECT_INIT:
5262 ap->flags &= ~(MR_TOGGLE_TX);
5263 ap->txconfig = ANEG_CFG_FD;
5264 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5265 if (flowctrl & ADVERTISE_1000XPAUSE)
5266 ap->txconfig |= ANEG_CFG_PS1;
5267 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5268 ap->txconfig |= ANEG_CFG_PS2;
5269 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271 tw32_f(MAC_MODE, tp->mac_mode);
5272 udelay(40);
5273
5274 ap->state = ANEG_STATE_ABILITY_DETECT;
5275 break;
5276
5277 case ANEG_STATE_ABILITY_DETECT:
5278 if (ap->ability_match != 0 && ap->rxconfig != 0)
5279 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5280 break;
5281
5282 case ANEG_STATE_ACK_DETECT_INIT:
5283 ap->txconfig |= ANEG_CFG_ACK;
5284 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5285 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5286 tw32_f(MAC_MODE, tp->mac_mode);
5287 udelay(40);
5288
5289 ap->state = ANEG_STATE_ACK_DETECT;
5290
5291 /* fallthru */
5292 case ANEG_STATE_ACK_DETECT:
5293 if (ap->ack_match != 0) {
5294 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5295 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5296 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5297 } else {
5298 ap->state = ANEG_STATE_AN_ENABLE;
5299 }
5300 } else if (ap->ability_match != 0 &&
5301 ap->rxconfig == 0) {
5302 ap->state = ANEG_STATE_AN_ENABLE;
5303 }
5304 break;
5305
5306 case ANEG_STATE_COMPLETE_ACK_INIT:
5307 if (ap->rxconfig & ANEG_CFG_INVAL) {
5308 ret = ANEG_FAILED;
5309 break;
5310 }
5311 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5312 MR_LP_ADV_HALF_DUPLEX |
5313 MR_LP_ADV_SYM_PAUSE |
5314 MR_LP_ADV_ASYM_PAUSE |
5315 MR_LP_ADV_REMOTE_FAULT1 |
5316 MR_LP_ADV_REMOTE_FAULT2 |
5317 MR_LP_ADV_NEXT_PAGE |
5318 MR_TOGGLE_RX |
5319 MR_NP_RX);
5320 if (ap->rxconfig & ANEG_CFG_FD)
5321 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5322 if (ap->rxconfig & ANEG_CFG_HD)
5323 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5324 if (ap->rxconfig & ANEG_CFG_PS1)
5325 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5326 if (ap->rxconfig & ANEG_CFG_PS2)
5327 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5328 if (ap->rxconfig & ANEG_CFG_RF1)
5329 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5330 if (ap->rxconfig & ANEG_CFG_RF2)
5331 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5332 if (ap->rxconfig & ANEG_CFG_NP)
5333 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5334
5335 ap->link_time = ap->cur_time;
5336
5337 ap->flags ^= (MR_TOGGLE_TX);
5338 if (ap->rxconfig & 0x0008)
5339 ap->flags |= MR_TOGGLE_RX;
5340 if (ap->rxconfig & ANEG_CFG_NP)
5341 ap->flags |= MR_NP_RX;
5342 ap->flags |= MR_PAGE_RX;
5343
5344 ap->state = ANEG_STATE_COMPLETE_ACK;
5345 ret = ANEG_TIMER_ENAB;
5346 break;
5347
5348 case ANEG_STATE_COMPLETE_ACK:
5349 if (ap->ability_match != 0 &&
5350 ap->rxconfig == 0) {
5351 ap->state = ANEG_STATE_AN_ENABLE;
5352 break;
5353 }
5354 delta = ap->cur_time - ap->link_time;
5355 if (delta > ANEG_STATE_SETTLE_TIME) {
5356 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5357 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358 } else {
5359 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5360 !(ap->flags & MR_NP_RX)) {
5361 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5362 } else {
5363 ret = ANEG_FAILED;
5364 }
5365 }
5366 }
5367 break;
5368
5369 case ANEG_STATE_IDLE_DETECT_INIT:
5370 ap->link_time = ap->cur_time;
5371 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5372 tw32_f(MAC_MODE, tp->mac_mode);
5373 udelay(40);
5374
5375 ap->state = ANEG_STATE_IDLE_DETECT;
5376 ret = ANEG_TIMER_ENAB;
5377 break;
5378
5379 case ANEG_STATE_IDLE_DETECT:
5380 if (ap->ability_match != 0 &&
5381 ap->rxconfig == 0) {
5382 ap->state = ANEG_STATE_AN_ENABLE;
5383 break;
5384 }
5385 delta = ap->cur_time - ap->link_time;
5386 if (delta > ANEG_STATE_SETTLE_TIME) {
5387 /* XXX another gem from the Broadcom driver :( */
5388 ap->state = ANEG_STATE_LINK_OK;
5389 }
5390 break;
5391
5392 case ANEG_STATE_LINK_OK:
5393 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5394 ret = ANEG_DONE;
5395 break;
5396
5397 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5398 /* ??? unimplemented */
5399 break;
5400
5401 case ANEG_STATE_NEXT_PAGE_WAIT:
5402 /* ??? unimplemented */
5403 break;
5404
5405 default:
5406 ret = ANEG_FAILED;
5407 break;
5408 }
5409
5410 return ret;
5411}
5412
5413static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5414{
5415 int res = 0;
5416 struct tg3_fiber_aneginfo aninfo;
5417 int status = ANEG_FAILED;
5418 unsigned int tick;
5419 u32 tmp;
5420
5421 tw32_f(MAC_TX_AUTO_NEG, 0);
5422
5423 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5424 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5425 udelay(40);
5426
5427 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5428 udelay(40);
5429
5430 memset(&aninfo, 0, sizeof(aninfo));
5431 aninfo.flags |= MR_AN_ENABLE;
5432 aninfo.state = ANEG_STATE_UNKNOWN;
5433 aninfo.cur_time = 0;
5434 tick = 0;
5435 while (++tick < 195000) {
5436 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5437 if (status == ANEG_DONE || status == ANEG_FAILED)
5438 break;
5439
5440 udelay(1);
5441 }
5442
5443 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5444 tw32_f(MAC_MODE, tp->mac_mode);
5445 udelay(40);
5446
5447 *txflags = aninfo.txconfig;
5448 *rxflags = aninfo.flags;
5449
5450 if (status == ANEG_DONE &&
5451 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5452 MR_LP_ADV_FULL_DUPLEX)))
5453 res = 1;
5454
5455 return res;
5456}
5457
5458static void tg3_init_bcm8002(struct tg3 *tp)
5459{
5460 u32 mac_status = tr32(MAC_STATUS);
5461 int i;
5462
5463 /* Reset when initting first time or we have a link. */
5464 if (tg3_flag(tp, INIT_COMPLETE) &&
5465 !(mac_status & MAC_STATUS_PCS_SYNCED))
5466 return;
5467
5468 /* Set PLL lock range. */
5469 tg3_writephy(tp, 0x16, 0x8007);
5470
5471 /* SW reset */
5472 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5473
5474 /* Wait for reset to complete. */
5475 /* XXX schedule_timeout() ... */
5476 for (i = 0; i < 500; i++)
5477 udelay(10);
5478
5479 /* Config mode; select PMA/Ch 1 regs. */
5480 tg3_writephy(tp, 0x10, 0x8411);
5481
5482 /* Enable auto-lock and comdet, select txclk for tx. */
5483 tg3_writephy(tp, 0x11, 0x0a10);
5484
5485 tg3_writephy(tp, 0x18, 0x00a0);
5486 tg3_writephy(tp, 0x16, 0x41ff);
5487
5488 /* Assert and deassert POR. */
5489 tg3_writephy(tp, 0x13, 0x0400);
5490 udelay(40);
5491 tg3_writephy(tp, 0x13, 0x0000);
5492
5493 tg3_writephy(tp, 0x11, 0x0a50);
5494 udelay(40);
5495 tg3_writephy(tp, 0x11, 0x0a10);
5496
5497 /* Wait for signal to stabilize */
5498 /* XXX schedule_timeout() ... */
5499 for (i = 0; i < 15000; i++)
5500 udelay(10);
5501
5502 /* Deselect the channel register so we can read the PHYID
5503 * later.
5504 */
5505 tg3_writephy(tp, 0x10, 0x8011);
5506}
5507
5508static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5509{
5510 u16 flowctrl;
5511 bool current_link_up;
5512 u32 sg_dig_ctrl, sg_dig_status;
5513 u32 serdes_cfg, expected_sg_dig_ctrl;
5514 int workaround, port_a;
5515
5516 serdes_cfg = 0;
5517 expected_sg_dig_ctrl = 0;
5518 workaround = 0;
5519 port_a = 1;
5520 current_link_up = false;
5521
5522 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5523 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5524 workaround = 1;
5525 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5526 port_a = 0;
5527
5528 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5529 /* preserve bits 20-23 for voltage regulator */
5530 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5531 }
5532
5533 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5534
5535 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5536 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5537 if (workaround) {
5538 u32 val = serdes_cfg;
5539
5540 if (port_a)
5541 val |= 0xc010000;
5542 else
5543 val |= 0x4010000;
5544 tw32_f(MAC_SERDES_CFG, val);
5545 }
5546
5547 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5548 }
5549 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5550 tg3_setup_flow_control(tp, 0, 0);
5551 current_link_up = true;
5552 }
5553 goto out;
5554 }
5555
5556 /* Want auto-negotiation. */
5557 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5558
5559 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5560 if (flowctrl & ADVERTISE_1000XPAUSE)
5561 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5562 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5563 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5564
5565 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5566 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5567 tp->serdes_counter &&
5568 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5569 MAC_STATUS_RCVD_CFG)) ==
5570 MAC_STATUS_PCS_SYNCED)) {
5571 tp->serdes_counter--;
5572 current_link_up = true;
5573 goto out;
5574 }
5575restart_autoneg:
5576 if (workaround)
5577 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5578 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5579 udelay(5);
5580 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5581
5582 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5583 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5585 MAC_STATUS_SIGNAL_DET)) {
5586 sg_dig_status = tr32(SG_DIG_STATUS);
5587 mac_status = tr32(MAC_STATUS);
5588
5589 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5590 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5591 u32 local_adv = 0, remote_adv = 0;
5592
5593 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5594 local_adv |= ADVERTISE_1000XPAUSE;
5595 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5596 local_adv |= ADVERTISE_1000XPSE_ASYM;
5597
5598 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5599 remote_adv |= LPA_1000XPAUSE;
5600 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5601 remote_adv |= LPA_1000XPAUSE_ASYM;
5602
5603 tp->link_config.rmt_adv =
5604 mii_adv_to_ethtool_adv_x(remote_adv);
5605
5606 tg3_setup_flow_control(tp, local_adv, remote_adv);
5607 current_link_up = true;
5608 tp->serdes_counter = 0;
5609 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5610 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5611 if (tp->serdes_counter)
5612 tp->serdes_counter--;
5613 else {
5614 if (workaround) {
5615 u32 val = serdes_cfg;
5616
5617 if (port_a)
5618 val |= 0xc010000;
5619 else
5620 val |= 0x4010000;
5621
5622 tw32_f(MAC_SERDES_CFG, val);
5623 }
5624
5625 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5626 udelay(40);
5627
5628 /* Link parallel detection - link is up */
5629 /* only if we have PCS_SYNC and not */
5630 /* receiving config code words */
5631 mac_status = tr32(MAC_STATUS);
5632 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5633 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5634 tg3_setup_flow_control(tp, 0, 0);
5635 current_link_up = true;
5636 tp->phy_flags |=
5637 TG3_PHYFLG_PARALLEL_DETECT;
5638 tp->serdes_counter =
5639 SERDES_PARALLEL_DET_TIMEOUT;
5640 } else
5641 goto restart_autoneg;
5642 }
5643 }
5644 } else {
5645 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5646 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5647 }
5648
5649out:
5650 return current_link_up;
5651}
5652
5653static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5654{
5655 bool current_link_up = false;
5656
5657 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5658 goto out;
5659
5660 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5661 u32 txflags, rxflags;
5662 int i;
5663
5664 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5665 u32 local_adv = 0, remote_adv = 0;
5666
5667 if (txflags & ANEG_CFG_PS1)
5668 local_adv |= ADVERTISE_1000XPAUSE;
5669 if (txflags & ANEG_CFG_PS2)
5670 local_adv |= ADVERTISE_1000XPSE_ASYM;
5671
5672 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5673 remote_adv |= LPA_1000XPAUSE;
5674 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5675 remote_adv |= LPA_1000XPAUSE_ASYM;
5676
5677 tp->link_config.rmt_adv =
5678 mii_adv_to_ethtool_adv_x(remote_adv);
5679
5680 tg3_setup_flow_control(tp, local_adv, remote_adv);
5681
5682 current_link_up = true;
5683 }
5684 for (i = 0; i < 30; i++) {
5685 udelay(20);
5686 tw32_f(MAC_STATUS,
5687 (MAC_STATUS_SYNC_CHANGED |
5688 MAC_STATUS_CFG_CHANGED));
5689 udelay(40);
5690 if ((tr32(MAC_STATUS) &
5691 (MAC_STATUS_SYNC_CHANGED |
5692 MAC_STATUS_CFG_CHANGED)) == 0)
5693 break;
5694 }
5695
5696 mac_status = tr32(MAC_STATUS);
5697 if (!current_link_up &&
5698 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5699 !(mac_status & MAC_STATUS_RCVD_CFG))
5700 current_link_up = true;
5701 } else {
5702 tg3_setup_flow_control(tp, 0, 0);
5703
5704 /* Forcing 1000FD link up. */
5705 current_link_up = true;
5706
5707 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5708 udelay(40);
5709
5710 tw32_f(MAC_MODE, tp->mac_mode);
5711 udelay(40);
5712 }
5713
5714out:
5715 return current_link_up;
5716}
5717
5718static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5719{
5720 u32 orig_pause_cfg;
5721 u16 orig_active_speed;
5722 u8 orig_active_duplex;
5723 u32 mac_status;
5724 bool current_link_up;
5725 int i;
5726
5727 orig_pause_cfg = tp->link_config.active_flowctrl;
5728 orig_active_speed = tp->link_config.active_speed;
5729 orig_active_duplex = tp->link_config.active_duplex;
5730
5731 if (!tg3_flag(tp, HW_AUTONEG) &&
5732 tp->link_up &&
5733 tg3_flag(tp, INIT_COMPLETE)) {
5734 mac_status = tr32(MAC_STATUS);
5735 mac_status &= (MAC_STATUS_PCS_SYNCED |
5736 MAC_STATUS_SIGNAL_DET |
5737 MAC_STATUS_CFG_CHANGED |
5738 MAC_STATUS_RCVD_CFG);
5739 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5740 MAC_STATUS_SIGNAL_DET)) {
5741 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5742 MAC_STATUS_CFG_CHANGED));
5743 return 0;
5744 }
5745 }
5746
5747 tw32_f(MAC_TX_AUTO_NEG, 0);
5748
5749 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5750 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5751 tw32_f(MAC_MODE, tp->mac_mode);
5752 udelay(40);
5753
5754 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5755 tg3_init_bcm8002(tp);
5756
5757 /* Enable link change event even when serdes polling. */
5758 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5759 udelay(40);
5760
5761 current_link_up = false;
5762 tp->link_config.rmt_adv = 0;
5763 mac_status = tr32(MAC_STATUS);
5764
5765 if (tg3_flag(tp, HW_AUTONEG))
5766 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5767 else
5768 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5769
5770 tp->napi[0].hw_status->status =
5771 (SD_STATUS_UPDATED |
5772 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5773
5774 for (i = 0; i < 100; i++) {
5775 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5776 MAC_STATUS_CFG_CHANGED));
5777 udelay(5);
5778 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5779 MAC_STATUS_CFG_CHANGED |
5780 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5781 break;
5782 }
5783
5784 mac_status = tr32(MAC_STATUS);
5785 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5786 current_link_up = false;
5787 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5788 tp->serdes_counter == 0) {
5789 tw32_f(MAC_MODE, (tp->mac_mode |
5790 MAC_MODE_SEND_CONFIGS));
5791 udelay(1);
5792 tw32_f(MAC_MODE, tp->mac_mode);
5793 }
5794 }
5795
5796 if (current_link_up) {
5797 tp->link_config.active_speed = SPEED_1000;
5798 tp->link_config.active_duplex = DUPLEX_FULL;
5799 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800 LED_CTRL_LNKLED_OVERRIDE |
5801 LED_CTRL_1000MBPS_ON));
5802 } else {
5803 tp->link_config.active_speed = SPEED_UNKNOWN;
5804 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5805 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5806 LED_CTRL_LNKLED_OVERRIDE |
5807 LED_CTRL_TRAFFIC_OVERRIDE));
5808 }
5809
5810 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5811 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5812 if (orig_pause_cfg != now_pause_cfg ||
5813 orig_active_speed != tp->link_config.active_speed ||
5814 orig_active_duplex != tp->link_config.active_duplex)
5815 tg3_link_report(tp);
5816 }
5817
5818 return 0;
5819}
5820
5821static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5822{
5823 int err = 0;
5824 u32 bmsr, bmcr;
5825 u16 current_speed = SPEED_UNKNOWN;
5826 u8 current_duplex = DUPLEX_UNKNOWN;
5827 bool current_link_up = false;
5828 u32 local_adv, remote_adv, sgsr;
5829
5830 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5831 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5832 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5833 (sgsr & SERDES_TG3_SGMII_MODE)) {
5834
5835 if (force_reset)
5836 tg3_phy_reset(tp);
5837
5838 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5839
5840 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5841 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5842 } else {
5843 current_link_up = true;
5844 if (sgsr & SERDES_TG3_SPEED_1000) {
5845 current_speed = SPEED_1000;
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 } else if (sgsr & SERDES_TG3_SPEED_100) {
5848 current_speed = SPEED_100;
5849 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5850 } else {
5851 current_speed = SPEED_10;
5852 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5853 }
5854
5855 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5856 current_duplex = DUPLEX_FULL;
5857 else
5858 current_duplex = DUPLEX_HALF;
5859 }
5860
5861 tw32_f(MAC_MODE, tp->mac_mode);
5862 udelay(40);
5863
5864 tg3_clear_mac_status(tp);
5865
5866 goto fiber_setup_done;
5867 }
5868
5869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5870 tw32_f(MAC_MODE, tp->mac_mode);
5871 udelay(40);
5872
5873 tg3_clear_mac_status(tp);
5874
5875 if (force_reset)
5876 tg3_phy_reset(tp);
5877
5878 tp->link_config.rmt_adv = 0;
5879
5880 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5881 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5883 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5884 bmsr |= BMSR_LSTATUS;
5885 else
5886 bmsr &= ~BMSR_LSTATUS;
5887 }
5888
5889 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5890
5891 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5892 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5893 /* do nothing, just check for link up at the end */
5894 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5895 u32 adv, newadv;
5896
5897 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5898 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5899 ADVERTISE_1000XPAUSE |
5900 ADVERTISE_1000XPSE_ASYM |
5901 ADVERTISE_SLCT);
5902
5903 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5904 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5905
5906 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5907 tg3_writephy(tp, MII_ADVERTISE, newadv);
5908 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5909 tg3_writephy(tp, MII_BMCR, bmcr);
5910
5911 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5912 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5913 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5914
5915 return err;
5916 }
5917 } else {
5918 u32 new_bmcr;
5919
5920 bmcr &= ~BMCR_SPEED1000;
5921 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5922
5923 if (tp->link_config.duplex == DUPLEX_FULL)
5924 new_bmcr |= BMCR_FULLDPLX;
5925
5926 if (new_bmcr != bmcr) {
5927 /* BMCR_SPEED1000 is a reserved bit that needs
5928 * to be set on write.
5929 */
5930 new_bmcr |= BMCR_SPEED1000;
5931
5932 /* Force a linkdown */
5933 if (tp->link_up) {
5934 u32 adv;
5935
5936 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5937 adv &= ~(ADVERTISE_1000XFULL |
5938 ADVERTISE_1000XHALF |
5939 ADVERTISE_SLCT);
5940 tg3_writephy(tp, MII_ADVERTISE, adv);
5941 tg3_writephy(tp, MII_BMCR, bmcr |
5942 BMCR_ANRESTART |
5943 BMCR_ANENABLE);
5944 udelay(10);
5945 tg3_carrier_off(tp);
5946 }
5947 tg3_writephy(tp, MII_BMCR, new_bmcr);
5948 bmcr = new_bmcr;
5949 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5950 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5952 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5953 bmsr |= BMSR_LSTATUS;
5954 else
5955 bmsr &= ~BMSR_LSTATUS;
5956 }
5957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5958 }
5959 }
5960
5961 if (bmsr & BMSR_LSTATUS) {
5962 current_speed = SPEED_1000;
5963 current_link_up = true;
5964 if (bmcr & BMCR_FULLDPLX)
5965 current_duplex = DUPLEX_FULL;
5966 else
5967 current_duplex = DUPLEX_HALF;
5968
5969 local_adv = 0;
5970 remote_adv = 0;
5971
5972 if (bmcr & BMCR_ANENABLE) {
5973 u32 common;
5974
5975 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5976 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5977 common = local_adv & remote_adv;
5978 if (common & (ADVERTISE_1000XHALF |
5979 ADVERTISE_1000XFULL)) {
5980 if (common & ADVERTISE_1000XFULL)
5981 current_duplex = DUPLEX_FULL;
5982 else
5983 current_duplex = DUPLEX_HALF;
5984
5985 tp->link_config.rmt_adv =
5986 mii_adv_to_ethtool_adv_x(remote_adv);
5987 } else if (!tg3_flag(tp, 5780_CLASS)) {
5988 /* Link is up via parallel detect */
5989 } else {
5990 current_link_up = false;
5991 }
5992 }
5993 }
5994
5995fiber_setup_done:
5996 if (current_link_up && current_duplex == DUPLEX_FULL)
5997 tg3_setup_flow_control(tp, local_adv, remote_adv);
5998
5999 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6000 if (tp->link_config.active_duplex == DUPLEX_HALF)
6001 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6002
6003 tw32_f(MAC_MODE, tp->mac_mode);
6004 udelay(40);
6005
6006 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6007
6008 tp->link_config.active_speed = current_speed;
6009 tp->link_config.active_duplex = current_duplex;
6010
6011 tg3_test_and_report_link_chg(tp, current_link_up);
6012 return err;
6013}
6014
6015static void tg3_serdes_parallel_detect(struct tg3 *tp)
6016{
6017 if (tp->serdes_counter) {
6018 /* Give autoneg time to complete. */
6019 tp->serdes_counter--;
6020 return;
6021 }
6022
6023 if (!tp->link_up &&
6024 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6025 u32 bmcr;
6026
6027 tg3_readphy(tp, MII_BMCR, &bmcr);
6028 if (bmcr & BMCR_ANENABLE) {
6029 u32 phy1, phy2;
6030
6031 /* Select shadow register 0x1f */
6032 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6033 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6034
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 MII_TG3_DSP_EXP1_INT_STAT);
6038 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040
6041 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6042 /* We have signal detect and not receiving
6043 * config code words, link is up by parallel
6044 * detection.
6045 */
6046
6047 bmcr &= ~BMCR_ANENABLE;
6048 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6049 tg3_writephy(tp, MII_BMCR, bmcr);
6050 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6051 }
6052 }
6053 } else if (tp->link_up &&
6054 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6055 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6056 u32 phy2;
6057
6058 /* Select expansion interrupt status register */
6059 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6060 MII_TG3_DSP_EXP1_INT_STAT);
6061 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6062 if (phy2 & 0x20) {
6063 u32 bmcr;
6064
6065 /* Config code words received, turn on autoneg. */
6066 tg3_readphy(tp, MII_BMCR, &bmcr);
6067 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6068
6069 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6070
6071 }
6072 }
6073}
6074
6075static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6076{
6077 u32 val;
6078 int err;
6079
6080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6081 err = tg3_setup_fiber_phy(tp, force_reset);
6082 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6083 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6084 else
6085 err = tg3_setup_copper_phy(tp, force_reset);
6086
6087 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6088 u32 scale;
6089
6090 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6091 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6092 scale = 65;
6093 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6094 scale = 6;
6095 else
6096 scale = 12;
6097
6098 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6099 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6100 tw32(GRC_MISC_CFG, val);
6101 }
6102
6103 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6104 (6 << TX_LENGTHS_IPG_SHIFT);
6105 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6106 tg3_asic_rev(tp) == ASIC_REV_5762)
6107 val |= tr32(MAC_TX_LENGTHS) &
6108 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6109 TX_LENGTHS_CNT_DWN_VAL_MSK);
6110
6111 if (tp->link_config.active_speed == SPEED_1000 &&
6112 tp->link_config.active_duplex == DUPLEX_HALF)
6113 tw32(MAC_TX_LENGTHS, val |
6114 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6115 else
6116 tw32(MAC_TX_LENGTHS, val |
6117 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6118
6119 if (!tg3_flag(tp, 5705_PLUS)) {
6120 if (tp->link_up) {
6121 tw32(HOSTCC_STAT_COAL_TICKS,
6122 tp->coal.stats_block_coalesce_usecs);
6123 } else {
6124 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6125 }
6126 }
6127
6128 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6129 val = tr32(PCIE_PWR_MGMT_THRESH);
6130 if (!tp->link_up)
6131 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6132 tp->pwrmgmt_thresh;
6133 else
6134 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6135 tw32(PCIE_PWR_MGMT_THRESH, val);
6136 }
6137
6138 return err;
6139}
6140
6141/* tp->lock must be held */
6142static u64 tg3_refclk_read(struct tg3 *tp)
6143{
6144 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6145 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6146}
6147
6148/* tp->lock must be held */
6149static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6150{
6151 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6152
6153 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6154 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6155 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6156 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6157}
6158
6159static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6160static inline void tg3_full_unlock(struct tg3 *tp);
6161static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6162{
6163 struct tg3 *tp = netdev_priv(dev);
6164
6165 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6166 SOF_TIMESTAMPING_RX_SOFTWARE |
6167 SOF_TIMESTAMPING_SOFTWARE;
6168
6169 if (tg3_flag(tp, PTP_CAPABLE)) {
6170 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6171 SOF_TIMESTAMPING_RX_HARDWARE |
6172 SOF_TIMESTAMPING_RAW_HARDWARE;
6173 }
6174
6175 if (tp->ptp_clock)
6176 info->phc_index = ptp_clock_index(tp->ptp_clock);
6177 else
6178 info->phc_index = -1;
6179
6180 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6181
6182 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6183 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6184 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6185 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6186 return 0;
6187}
6188
6189static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6190{
6191 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6192 bool neg_adj = false;
6193 u32 correction = 0;
6194
6195 if (ppb < 0) {
6196 neg_adj = true;
6197 ppb = -ppb;
6198 }
6199
6200 /* Frequency adjustment is performed using hardware with a 24 bit
6201 * accumulator and a programmable correction value. On each clk, the
6202 * correction value gets added to the accumulator and when it
6203 * overflows, the time counter is incremented/decremented.
6204 *
6205 * So conversion from ppb to correction value is
6206 * ppb * (1 << 24) / 1000000000
6207 */
6208 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6209 TG3_EAV_REF_CLK_CORRECT_MASK;
6210
6211 tg3_full_lock(tp, 0);
6212
6213 if (correction)
6214 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6215 TG3_EAV_REF_CLK_CORRECT_EN |
6216 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6217 else
6218 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6219
6220 tg3_full_unlock(tp);
6221
6222 return 0;
6223}
6224
6225static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6226{
6227 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229 tg3_full_lock(tp, 0);
6230 tp->ptp_adjust += delta;
6231 tg3_full_unlock(tp);
6232
6233 return 0;
6234}
6235
6236static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6237{
6238 u64 ns;
6239 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6240
6241 tg3_full_lock(tp, 0);
6242 ns = tg3_refclk_read(tp);
6243 ns += tp->ptp_adjust;
6244 tg3_full_unlock(tp);
6245
6246 *ts = ns_to_timespec64(ns);
6247
6248 return 0;
6249}
6250
6251static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6252 const struct timespec64 *ts)
6253{
6254 u64 ns;
6255 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6256
6257 ns = timespec64_to_ns(ts);
6258
6259 tg3_full_lock(tp, 0);
6260 tg3_refclk_write(tp, ns);
6261 tp->ptp_adjust = 0;
6262 tg3_full_unlock(tp);
6263
6264 return 0;
6265}
6266
6267static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6268 struct ptp_clock_request *rq, int on)
6269{
6270 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6271 u32 clock_ctl;
6272 int rval = 0;
6273
6274 switch (rq->type) {
6275 case PTP_CLK_REQ_PEROUT:
6276 if (rq->perout.index != 0)
6277 return -EINVAL;
6278
6279 tg3_full_lock(tp, 0);
6280 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6281 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6282
6283 if (on) {
6284 u64 nsec;
6285
6286 nsec = rq->perout.start.sec * 1000000000ULL +
6287 rq->perout.start.nsec;
6288
6289 if (rq->perout.period.sec || rq->perout.period.nsec) {
6290 netdev_warn(tp->dev,
6291 "Device supports only a one-shot timesync output, period must be 0\n");
6292 rval = -EINVAL;
6293 goto err_out;
6294 }
6295
6296 if (nsec & (1ULL << 63)) {
6297 netdev_warn(tp->dev,
6298 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6299 rval = -EINVAL;
6300 goto err_out;
6301 }
6302
6303 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6304 tw32(TG3_EAV_WATCHDOG0_MSB,
6305 TG3_EAV_WATCHDOG0_EN |
6306 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6307
6308 tw32(TG3_EAV_REF_CLCK_CTL,
6309 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6310 } else {
6311 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6312 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6313 }
6314
6315err_out:
6316 tg3_full_unlock(tp);
6317 return rval;
6318
6319 default:
6320 break;
6321 }
6322
6323 return -EOPNOTSUPP;
6324}
6325
6326static const struct ptp_clock_info tg3_ptp_caps = {
6327 .owner = THIS_MODULE,
6328 .name = "tg3 clock",
6329 .max_adj = 250000000,
6330 .n_alarm = 0,
6331 .n_ext_ts = 0,
6332 .n_per_out = 1,
6333 .n_pins = 0,
6334 .pps = 0,
6335 .adjfreq = tg3_ptp_adjfreq,
6336 .adjtime = tg3_ptp_adjtime,
6337 .gettime64 = tg3_ptp_gettime,
6338 .settime64 = tg3_ptp_settime,
6339 .enable = tg3_ptp_enable,
6340};
6341
6342static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6343 struct skb_shared_hwtstamps *timestamp)
6344{
6345 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6346 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6347 tp->ptp_adjust);
6348}
6349
6350/* tp->lock must be held */
6351static void tg3_ptp_init(struct tg3 *tp)
6352{
6353 if (!tg3_flag(tp, PTP_CAPABLE))
6354 return;
6355
6356 /* Initialize the hardware clock to the system time. */
6357 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6358 tp->ptp_adjust = 0;
6359 tp->ptp_info = tg3_ptp_caps;
6360}
6361
6362/* tp->lock must be held */
6363static void tg3_ptp_resume(struct tg3 *tp)
6364{
6365 if (!tg3_flag(tp, PTP_CAPABLE))
6366 return;
6367
6368 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6369 tp->ptp_adjust = 0;
6370}
6371
6372static void tg3_ptp_fini(struct tg3 *tp)
6373{
6374 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6375 return;
6376
6377 ptp_clock_unregister(tp->ptp_clock);
6378 tp->ptp_clock = NULL;
6379 tp->ptp_adjust = 0;
6380}
6381
6382static inline int tg3_irq_sync(struct tg3 *tp)
6383{
6384 return tp->irq_sync;
6385}
6386
6387static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6388{
6389 int i;
6390
6391 dst = (u32 *)((u8 *)dst + off);
6392 for (i = 0; i < len; i += sizeof(u32))
6393 *dst++ = tr32(off + i);
6394}
6395
6396static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6397{
6398 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6399 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6400 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6401 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6402 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6404 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6405 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6406 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6407 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6408 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6409 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6410 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6411 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6412 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6413 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6414 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6417
6418 if (tg3_flag(tp, SUPPORT_MSIX))
6419 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6420
6421 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6422 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6423 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6424 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6425 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6426 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6427 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6428 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6429
6430 if (!tg3_flag(tp, 5705_PLUS)) {
6431 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6432 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6433 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6434 }
6435
6436 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6437 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6438 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6439 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6440 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6441
6442 if (tg3_flag(tp, NVRAM))
6443 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6444}
6445
6446static void tg3_dump_state(struct tg3 *tp)
6447{
6448 int i;
6449 u32 *regs;
6450
6451 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6452 if (!regs)
6453 return;
6454
6455 if (tg3_flag(tp, PCI_EXPRESS)) {
6456 /* Read up to but not including private PCI registers */
6457 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6458 regs[i / sizeof(u32)] = tr32(i);
6459 } else
6460 tg3_dump_legacy_regs(tp, regs);
6461
6462 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6463 if (!regs[i + 0] && !regs[i + 1] &&
6464 !regs[i + 2] && !regs[i + 3])
6465 continue;
6466
6467 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6468 i * 4,
6469 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6470 }
6471
6472 kfree(regs);
6473
6474 for (i = 0; i < tp->irq_cnt; i++) {
6475 struct tg3_napi *tnapi = &tp->napi[i];
6476
6477 /* SW status block */
6478 netdev_err(tp->dev,
6479 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6480 i,
6481 tnapi->hw_status->status,
6482 tnapi->hw_status->status_tag,
6483 tnapi->hw_status->rx_jumbo_consumer,
6484 tnapi->hw_status->rx_consumer,
6485 tnapi->hw_status->rx_mini_consumer,
6486 tnapi->hw_status->idx[0].rx_producer,
6487 tnapi->hw_status->idx[0].tx_consumer);
6488
6489 netdev_err(tp->dev,
6490 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6491 i,
6492 tnapi->last_tag, tnapi->last_irq_tag,
6493 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6494 tnapi->rx_rcb_ptr,
6495 tnapi->prodring.rx_std_prod_idx,
6496 tnapi->prodring.rx_std_cons_idx,
6497 tnapi->prodring.rx_jmb_prod_idx,
6498 tnapi->prodring.rx_jmb_cons_idx);
6499 }
6500}
6501
6502/* This is called whenever we suspect that the system chipset is re-
6503 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6504 * is bogus tx completions. We try to recover by setting the
6505 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6506 * in the workqueue.
6507 */
6508static void tg3_tx_recover(struct tg3 *tp)
6509{
6510 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6511 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6512
6513 netdev_warn(tp->dev,
6514 "The system may be re-ordering memory-mapped I/O "
6515 "cycles to the network device, attempting to recover. "
6516 "Please report the problem to the driver maintainer "
6517 "and include system chipset information.\n");
6518
6519 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6520}
6521
6522static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6523{
6524 /* Tell compiler to fetch tx indices from memory. */
6525 barrier();
6526 return tnapi->tx_pending -
6527 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6528}
6529
6530/* Tigon3 never reports partial packet sends. So we do not
6531 * need special logic to handle SKBs that have not had all
6532 * of their frags sent yet, like SunGEM does.
6533 */
6534static void tg3_tx(struct tg3_napi *tnapi)
6535{
6536 struct tg3 *tp = tnapi->tp;
6537 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6538 u32 sw_idx = tnapi->tx_cons;
6539 struct netdev_queue *txq;
6540 int index = tnapi - tp->napi;
6541 unsigned int pkts_compl = 0, bytes_compl = 0;
6542
6543 if (tg3_flag(tp, ENABLE_TSS))
6544 index--;
6545
6546 txq = netdev_get_tx_queue(tp->dev, index);
6547
6548 while (sw_idx != hw_idx) {
6549 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6550 struct sk_buff *skb = ri->skb;
6551 int i, tx_bug = 0;
6552
6553 if (unlikely(skb == NULL)) {
6554 tg3_tx_recover(tp);
6555 return;
6556 }
6557
6558 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6559 struct skb_shared_hwtstamps timestamp;
6560 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6561 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6562
6563 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6564
6565 skb_tstamp_tx(skb, &timestamp);
6566 }
6567
6568 pci_unmap_single(tp->pdev,
6569 dma_unmap_addr(ri, mapping),
6570 skb_headlen(skb),
6571 PCI_DMA_TODEVICE);
6572
6573 ri->skb = NULL;
6574
6575 while (ri->fragmented) {
6576 ri->fragmented = false;
6577 sw_idx = NEXT_TX(sw_idx);
6578 ri = &tnapi->tx_buffers[sw_idx];
6579 }
6580
6581 sw_idx = NEXT_TX(sw_idx);
6582
6583 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6584 ri = &tnapi->tx_buffers[sw_idx];
6585 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6586 tx_bug = 1;
6587
6588 pci_unmap_page(tp->pdev,
6589 dma_unmap_addr(ri, mapping),
6590 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6591 PCI_DMA_TODEVICE);
6592
6593 while (ri->fragmented) {
6594 ri->fragmented = false;
6595 sw_idx = NEXT_TX(sw_idx);
6596 ri = &tnapi->tx_buffers[sw_idx];
6597 }
6598
6599 sw_idx = NEXT_TX(sw_idx);
6600 }
6601
6602 pkts_compl++;
6603 bytes_compl += skb->len;
6604
6605 dev_consume_skb_any(skb);
6606
6607 if (unlikely(tx_bug)) {
6608 tg3_tx_recover(tp);
6609 return;
6610 }
6611 }
6612
6613 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6614
6615 tnapi->tx_cons = sw_idx;
6616
6617 /* Need to make the tx_cons update visible to tg3_start_xmit()
6618 * before checking for netif_queue_stopped(). Without the
6619 * memory barrier, there is a small possibility that tg3_start_xmit()
6620 * will miss it and cause the queue to be stopped forever.
6621 */
6622 smp_mb();
6623
6624 if (unlikely(netif_tx_queue_stopped(txq) &&
6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6626 __netif_tx_lock(txq, smp_processor_id());
6627 if (netif_tx_queue_stopped(txq) &&
6628 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6629 netif_tx_wake_queue(txq);
6630 __netif_tx_unlock(txq);
6631 }
6632}
6633
6634static void tg3_frag_free(bool is_frag, void *data)
6635{
6636 if (is_frag)
6637 skb_free_frag(data);
6638 else
6639 kfree(data);
6640}
6641
6642static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6643{
6644 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6645 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6646
6647 if (!ri->data)
6648 return;
6649
6650 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6651 map_sz, PCI_DMA_FROMDEVICE);
6652 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6653 ri->data = NULL;
6654}
6655
6656
6657/* Returns size of skb allocated or < 0 on error.
6658 *
6659 * We only need to fill in the address because the other members
6660 * of the RX descriptor are invariant, see tg3_init_rings.
6661 *
6662 * Note the purposeful assymetry of cpu vs. chip accesses. For
6663 * posting buffers we only dirty the first cache line of the RX
6664 * descriptor (containing the address). Whereas for the RX status
6665 * buffers the cpu only reads the last cacheline of the RX descriptor
6666 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6667 */
6668static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6669 u32 opaque_key, u32 dest_idx_unmasked,
6670 unsigned int *frag_size)
6671{
6672 struct tg3_rx_buffer_desc *desc;
6673 struct ring_info *map;
6674 u8 *data;
6675 dma_addr_t mapping;
6676 int skb_size, data_size, dest_idx;
6677
6678 switch (opaque_key) {
6679 case RXD_OPAQUE_RING_STD:
6680 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6681 desc = &tpr->rx_std[dest_idx];
6682 map = &tpr->rx_std_buffers[dest_idx];
6683 data_size = tp->rx_pkt_map_sz;
6684 break;
6685
6686 case RXD_OPAQUE_RING_JUMBO:
6687 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6688 desc = &tpr->rx_jmb[dest_idx].std;
6689 map = &tpr->rx_jmb_buffers[dest_idx];
6690 data_size = TG3_RX_JMB_MAP_SZ;
6691 break;
6692
6693 default:
6694 return -EINVAL;
6695 }
6696
6697 /* Do not overwrite any of the map or rp information
6698 * until we are sure we can commit to a new buffer.
6699 *
6700 * Callers depend upon this behavior and assume that
6701 * we leave everything unchanged if we fail.
6702 */
6703 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6704 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6705 if (skb_size <= PAGE_SIZE) {
6706 data = netdev_alloc_frag(skb_size);
6707 *frag_size = skb_size;
6708 } else {
6709 data = kmalloc(skb_size, GFP_ATOMIC);
6710 *frag_size = 0;
6711 }
6712 if (!data)
6713 return -ENOMEM;
6714
6715 mapping = pci_map_single(tp->pdev,
6716 data + TG3_RX_OFFSET(tp),
6717 data_size,
6718 PCI_DMA_FROMDEVICE);
6719 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6720 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6721 return -EIO;
6722 }
6723
6724 map->data = data;
6725 dma_unmap_addr_set(map, mapping, mapping);
6726
6727 desc->addr_hi = ((u64)mapping >> 32);
6728 desc->addr_lo = ((u64)mapping & 0xffffffff);
6729
6730 return data_size;
6731}
6732
6733/* We only need to move over in the address because the other
6734 * members of the RX descriptor are invariant. See notes above
6735 * tg3_alloc_rx_data for full details.
6736 */
6737static void tg3_recycle_rx(struct tg3_napi *tnapi,
6738 struct tg3_rx_prodring_set *dpr,
6739 u32 opaque_key, int src_idx,
6740 u32 dest_idx_unmasked)
6741{
6742 struct tg3 *tp = tnapi->tp;
6743 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6744 struct ring_info *src_map, *dest_map;
6745 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6746 int dest_idx;
6747
6748 switch (opaque_key) {
6749 case RXD_OPAQUE_RING_STD:
6750 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6751 dest_desc = &dpr->rx_std[dest_idx];
6752 dest_map = &dpr->rx_std_buffers[dest_idx];
6753 src_desc = &spr->rx_std[src_idx];
6754 src_map = &spr->rx_std_buffers[src_idx];
6755 break;
6756
6757 case RXD_OPAQUE_RING_JUMBO:
6758 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6759 dest_desc = &dpr->rx_jmb[dest_idx].std;
6760 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6761 src_desc = &spr->rx_jmb[src_idx].std;
6762 src_map = &spr->rx_jmb_buffers[src_idx];
6763 break;
6764
6765 default:
6766 return;
6767 }
6768
6769 dest_map->data = src_map->data;
6770 dma_unmap_addr_set(dest_map, mapping,
6771 dma_unmap_addr(src_map, mapping));
6772 dest_desc->addr_hi = src_desc->addr_hi;
6773 dest_desc->addr_lo = src_desc->addr_lo;
6774
6775 /* Ensure that the update to the skb happens after the physical
6776 * addresses have been transferred to the new BD location.
6777 */
6778 smp_wmb();
6779
6780 src_map->data = NULL;
6781}
6782
6783/* The RX ring scheme is composed of multiple rings which post fresh
6784 * buffers to the chip, and one special ring the chip uses to report
6785 * status back to the host.
6786 *
6787 * The special ring reports the status of received packets to the
6788 * host. The chip does not write into the original descriptor the
6789 * RX buffer was obtained from. The chip simply takes the original
6790 * descriptor as provided by the host, updates the status and length
6791 * field, then writes this into the next status ring entry.
6792 *
6793 * Each ring the host uses to post buffers to the chip is described
6794 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6795 * it is first placed into the on-chip ram. When the packet's length
6796 * is known, it walks down the TG3_BDINFO entries to select the ring.
6797 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6798 * which is within the range of the new packet's length is chosen.
6799 *
6800 * The "separate ring for rx status" scheme may sound queer, but it makes
6801 * sense from a cache coherency perspective. If only the host writes
6802 * to the buffer post rings, and only the chip writes to the rx status
6803 * rings, then cache lines never move beyond shared-modified state.
6804 * If both the host and chip were to write into the same ring, cache line
6805 * eviction could occur since both entities want it in an exclusive state.
6806 */
6807static int tg3_rx(struct tg3_napi *tnapi, int budget)
6808{
6809 struct tg3 *tp = tnapi->tp;
6810 u32 work_mask, rx_std_posted = 0;
6811 u32 std_prod_idx, jmb_prod_idx;
6812 u32 sw_idx = tnapi->rx_rcb_ptr;
6813 u16 hw_idx;
6814 int received;
6815 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6816
6817 hw_idx = *(tnapi->rx_rcb_prod_idx);
6818 /*
6819 * We need to order the read of hw_idx and the read of
6820 * the opaque cookie.
6821 */
6822 rmb();
6823 work_mask = 0;
6824 received = 0;
6825 std_prod_idx = tpr->rx_std_prod_idx;
6826 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6827 while (sw_idx != hw_idx && budget > 0) {
6828 struct ring_info *ri;
6829 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6830 unsigned int len;
6831 struct sk_buff *skb;
6832 dma_addr_t dma_addr;
6833 u32 opaque_key, desc_idx, *post_ptr;
6834 u8 *data;
6835 u64 tstamp = 0;
6836
6837 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6838 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6839 if (opaque_key == RXD_OPAQUE_RING_STD) {
6840 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6841 dma_addr = dma_unmap_addr(ri, mapping);
6842 data = ri->data;
6843 post_ptr = &std_prod_idx;
6844 rx_std_posted++;
6845 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6846 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6847 dma_addr = dma_unmap_addr(ri, mapping);
6848 data = ri->data;
6849 post_ptr = &jmb_prod_idx;
6850 } else
6851 goto next_pkt_nopost;
6852
6853 work_mask |= opaque_key;
6854
6855 if (desc->err_vlan & RXD_ERR_MASK) {
6856 drop_it:
6857 tg3_recycle_rx(tnapi, tpr, opaque_key,
6858 desc_idx, *post_ptr);
6859 drop_it_no_recycle:
6860 /* Other statistics kept track of by card. */
6861 tp->rx_dropped++;
6862 goto next_pkt;
6863 }
6864
6865 prefetch(data + TG3_RX_OFFSET(tp));
6866 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6867 ETH_FCS_LEN;
6868
6869 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6870 RXD_FLAG_PTPSTAT_PTPV1 ||
6871 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6872 RXD_FLAG_PTPSTAT_PTPV2) {
6873 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6874 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6875 }
6876
6877 if (len > TG3_RX_COPY_THRESH(tp)) {
6878 int skb_size;
6879 unsigned int frag_size;
6880
6881 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6882 *post_ptr, &frag_size);
6883 if (skb_size < 0)
6884 goto drop_it;
6885
6886 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6887 PCI_DMA_FROMDEVICE);
6888
6889 /* Ensure that the update to the data happens
6890 * after the usage of the old DMA mapping.
6891 */
6892 smp_wmb();
6893
6894 ri->data = NULL;
6895
6896 skb = build_skb(data, frag_size);
6897 if (!skb) {
6898 tg3_frag_free(frag_size != 0, data);
6899 goto drop_it_no_recycle;
6900 }
6901 skb_reserve(skb, TG3_RX_OFFSET(tp));
6902 } else {
6903 tg3_recycle_rx(tnapi, tpr, opaque_key,
6904 desc_idx, *post_ptr);
6905
6906 skb = netdev_alloc_skb(tp->dev,
6907 len + TG3_RAW_IP_ALIGN);
6908 if (skb == NULL)
6909 goto drop_it_no_recycle;
6910
6911 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6912 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6913 memcpy(skb->data,
6914 data + TG3_RX_OFFSET(tp),
6915 len);
6916 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6917 }
6918
6919 skb_put(skb, len);
6920 if (tstamp)
6921 tg3_hwclock_to_timestamp(tp, tstamp,
6922 skb_hwtstamps(skb));
6923
6924 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6925 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6926 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6927 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6928 skb->ip_summed = CHECKSUM_UNNECESSARY;
6929 else
6930 skb_checksum_none_assert(skb);
6931
6932 skb->protocol = eth_type_trans(skb, tp->dev);
6933
6934 if (len > (tp->dev->mtu + ETH_HLEN) &&
6935 skb->protocol != htons(ETH_P_8021Q) &&
6936 skb->protocol != htons(ETH_P_8021AD)) {
6937 dev_kfree_skb_any(skb);
6938 goto drop_it_no_recycle;
6939 }
6940
6941 if (desc->type_flags & RXD_FLAG_VLAN &&
6942 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6943 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6944 desc->err_vlan & RXD_VLAN_MASK);
6945
6946 napi_gro_receive(&tnapi->napi, skb);
6947
6948 received++;
6949 budget--;
6950
6951next_pkt:
6952 (*post_ptr)++;
6953
6954 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6955 tpr->rx_std_prod_idx = std_prod_idx &
6956 tp->rx_std_ring_mask;
6957 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6958 tpr->rx_std_prod_idx);
6959 work_mask &= ~RXD_OPAQUE_RING_STD;
6960 rx_std_posted = 0;
6961 }
6962next_pkt_nopost:
6963 sw_idx++;
6964 sw_idx &= tp->rx_ret_ring_mask;
6965
6966 /* Refresh hw_idx to see if there is new work */
6967 if (sw_idx == hw_idx) {
6968 hw_idx = *(tnapi->rx_rcb_prod_idx);
6969 rmb();
6970 }
6971 }
6972
6973 /* ACK the status ring. */
6974 tnapi->rx_rcb_ptr = sw_idx;
6975 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6976
6977 /* Refill RX ring(s). */
6978 if (!tg3_flag(tp, ENABLE_RSS)) {
6979 /* Sync BD data before updating mailbox */
6980 wmb();
6981
6982 if (work_mask & RXD_OPAQUE_RING_STD) {
6983 tpr->rx_std_prod_idx = std_prod_idx &
6984 tp->rx_std_ring_mask;
6985 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6986 tpr->rx_std_prod_idx);
6987 }
6988 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6989 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6990 tp->rx_jmb_ring_mask;
6991 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6992 tpr->rx_jmb_prod_idx);
6993 }
6994 mmiowb();
6995 } else if (work_mask) {
6996 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6997 * updated before the producer indices can be updated.
6998 */
6999 smp_wmb();
7000
7001 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7002 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7003
7004 if (tnapi != &tp->napi[1]) {
7005 tp->rx_refill = true;
7006 napi_schedule(&tp->napi[1].napi);
7007 }
7008 }
7009
7010 return received;
7011}
7012
7013static void tg3_poll_link(struct tg3 *tp)
7014{
7015 /* handle link change and other phy events */
7016 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7017 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7018
7019 if (sblk->status & SD_STATUS_LINK_CHG) {
7020 sblk->status = SD_STATUS_UPDATED |
7021 (sblk->status & ~SD_STATUS_LINK_CHG);
7022 spin_lock(&tp->lock);
7023 if (tg3_flag(tp, USE_PHYLIB)) {
7024 tw32_f(MAC_STATUS,
7025 (MAC_STATUS_SYNC_CHANGED |
7026 MAC_STATUS_CFG_CHANGED |
7027 MAC_STATUS_MI_COMPLETION |
7028 MAC_STATUS_LNKSTATE_CHANGED));
7029 udelay(40);
7030 } else
7031 tg3_setup_phy(tp, false);
7032 spin_unlock(&tp->lock);
7033 }
7034 }
7035}
7036
7037static int tg3_rx_prodring_xfer(struct tg3 *tp,
7038 struct tg3_rx_prodring_set *dpr,
7039 struct tg3_rx_prodring_set *spr)
7040{
7041 u32 si, di, cpycnt, src_prod_idx;
7042 int i, err = 0;
7043
7044 while (1) {
7045 src_prod_idx = spr->rx_std_prod_idx;
7046
7047 /* Make sure updates to the rx_std_buffers[] entries and the
7048 * standard producer index are seen in the correct order.
7049 */
7050 smp_rmb();
7051
7052 if (spr->rx_std_cons_idx == src_prod_idx)
7053 break;
7054
7055 if (spr->rx_std_cons_idx < src_prod_idx)
7056 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7057 else
7058 cpycnt = tp->rx_std_ring_mask + 1 -
7059 spr->rx_std_cons_idx;
7060
7061 cpycnt = min(cpycnt,
7062 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7063
7064 si = spr->rx_std_cons_idx;
7065 di = dpr->rx_std_prod_idx;
7066
7067 for (i = di; i < di + cpycnt; i++) {
7068 if (dpr->rx_std_buffers[i].data) {
7069 cpycnt = i - di;
7070 err = -ENOSPC;
7071 break;
7072 }
7073 }
7074
7075 if (!cpycnt)
7076 break;
7077
7078 /* Ensure that updates to the rx_std_buffers ring and the
7079 * shadowed hardware producer ring from tg3_recycle_skb() are
7080 * ordered correctly WRT the skb check above.
7081 */
7082 smp_rmb();
7083
7084 memcpy(&dpr->rx_std_buffers[di],
7085 &spr->rx_std_buffers[si],
7086 cpycnt * sizeof(struct ring_info));
7087
7088 for (i = 0; i < cpycnt; i++, di++, si++) {
7089 struct tg3_rx_buffer_desc *sbd, *dbd;
7090 sbd = &spr->rx_std[si];
7091 dbd = &dpr->rx_std[di];
7092 dbd->addr_hi = sbd->addr_hi;
7093 dbd->addr_lo = sbd->addr_lo;
7094 }
7095
7096 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7097 tp->rx_std_ring_mask;
7098 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7099 tp->rx_std_ring_mask;
7100 }
7101
7102 while (1) {
7103 src_prod_idx = spr->rx_jmb_prod_idx;
7104
7105 /* Make sure updates to the rx_jmb_buffers[] entries and
7106 * the jumbo producer index are seen in the correct order.
7107 */
7108 smp_rmb();
7109
7110 if (spr->rx_jmb_cons_idx == src_prod_idx)
7111 break;
7112
7113 if (spr->rx_jmb_cons_idx < src_prod_idx)
7114 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7115 else
7116 cpycnt = tp->rx_jmb_ring_mask + 1 -
7117 spr->rx_jmb_cons_idx;
7118
7119 cpycnt = min(cpycnt,
7120 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7121
7122 si = spr->rx_jmb_cons_idx;
7123 di = dpr->rx_jmb_prod_idx;
7124
7125 for (i = di; i < di + cpycnt; i++) {
7126 if (dpr->rx_jmb_buffers[i].data) {
7127 cpycnt = i - di;
7128 err = -ENOSPC;
7129 break;
7130 }
7131 }
7132
7133 if (!cpycnt)
7134 break;
7135
7136 /* Ensure that updates to the rx_jmb_buffers ring and the
7137 * shadowed hardware producer ring from tg3_recycle_skb() are
7138 * ordered correctly WRT the skb check above.
7139 */
7140 smp_rmb();
7141
7142 memcpy(&dpr->rx_jmb_buffers[di],
7143 &spr->rx_jmb_buffers[si],
7144 cpycnt * sizeof(struct ring_info));
7145
7146 for (i = 0; i < cpycnt; i++, di++, si++) {
7147 struct tg3_rx_buffer_desc *sbd, *dbd;
7148 sbd = &spr->rx_jmb[si].std;
7149 dbd = &dpr->rx_jmb[di].std;
7150 dbd->addr_hi = sbd->addr_hi;
7151 dbd->addr_lo = sbd->addr_lo;
7152 }
7153
7154 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7155 tp->rx_jmb_ring_mask;
7156 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7157 tp->rx_jmb_ring_mask;
7158 }
7159
7160 return err;
7161}
7162
7163static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7164{
7165 struct tg3 *tp = tnapi->tp;
7166
7167 /* run TX completion thread */
7168 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7169 tg3_tx(tnapi);
7170 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7171 return work_done;
7172 }
7173
7174 if (!tnapi->rx_rcb_prod_idx)
7175 return work_done;
7176
7177 /* run RX thread, within the bounds set by NAPI.
7178 * All RX "locking" is done by ensuring outside
7179 * code synchronizes with tg3->napi.poll()
7180 */
7181 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7182 work_done += tg3_rx(tnapi, budget - work_done);
7183
7184 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7185 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7186 int i, err = 0;
7187 u32 std_prod_idx = dpr->rx_std_prod_idx;
7188 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7189
7190 tp->rx_refill = false;
7191 for (i = 1; i <= tp->rxq_cnt; i++)
7192 err |= tg3_rx_prodring_xfer(tp, dpr,
7193 &tp->napi[i].prodring);
7194
7195 wmb();
7196
7197 if (std_prod_idx != dpr->rx_std_prod_idx)
7198 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7199 dpr->rx_std_prod_idx);
7200
7201 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7202 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7203 dpr->rx_jmb_prod_idx);
7204
7205 mmiowb();
7206
7207 if (err)
7208 tw32_f(HOSTCC_MODE, tp->coal_now);
7209 }
7210
7211 return work_done;
7212}
7213
7214static inline void tg3_reset_task_schedule(struct tg3 *tp)
7215{
7216 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7217 schedule_work(&tp->reset_task);
7218}
7219
7220static inline void tg3_reset_task_cancel(struct tg3 *tp)
7221{
7222 cancel_work_sync(&tp->reset_task);
7223 tg3_flag_clear(tp, RESET_TASK_PENDING);
7224 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7225}
7226
7227static int tg3_poll_msix(struct napi_struct *napi, int budget)
7228{
7229 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7230 struct tg3 *tp = tnapi->tp;
7231 int work_done = 0;
7232 struct tg3_hw_status *sblk = tnapi->hw_status;
7233
7234 while (1) {
7235 work_done = tg3_poll_work(tnapi, work_done, budget);
7236
7237 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7238 goto tx_recovery;
7239
7240 if (unlikely(work_done >= budget))
7241 break;
7242
7243 /* tp->last_tag is used in tg3_int_reenable() below
7244 * to tell the hw how much work has been processed,
7245 * so we must read it before checking for more work.
7246 */
7247 tnapi->last_tag = sblk->status_tag;
7248 tnapi->last_irq_tag = tnapi->last_tag;
7249 rmb();
7250
7251 /* check for RX/TX work to do */
7252 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7253 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7254
7255 /* This test here is not race free, but will reduce
7256 * the number of interrupts by looping again.
7257 */
7258 if (tnapi == &tp->napi[1] && tp->rx_refill)
7259 continue;
7260
7261 napi_complete_done(napi, work_done);
7262 /* Reenable interrupts. */
7263 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7264
7265 /* This test here is synchronized by napi_schedule()
7266 * and napi_complete() to close the race condition.
7267 */
7268 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7269 tw32(HOSTCC_MODE, tp->coalesce_mode |
7270 HOSTCC_MODE_ENABLE |
7271 tnapi->coal_now);
7272 }
7273 mmiowb();
7274 break;
7275 }
7276 }
7277
7278 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7279 return work_done;
7280
7281tx_recovery:
7282 /* work_done is guaranteed to be less than budget. */
7283 napi_complete(napi);
7284 tg3_reset_task_schedule(tp);
7285 return work_done;
7286}
7287
7288static void tg3_process_error(struct tg3 *tp)
7289{
7290 u32 val;
7291 bool real_error = false;
7292
7293 if (tg3_flag(tp, ERROR_PROCESSED))
7294 return;
7295
7296 /* Check Flow Attention register */
7297 val = tr32(HOSTCC_FLOW_ATTN);
7298 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7299 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7300 real_error = true;
7301 }
7302
7303 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7304 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7305 real_error = true;
7306 }
7307
7308 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7309 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7310 real_error = true;
7311 }
7312
7313 if (!real_error)
7314 return;
7315
7316 tg3_dump_state(tp);
7317
7318 tg3_flag_set(tp, ERROR_PROCESSED);
7319 tg3_reset_task_schedule(tp);
7320}
7321
7322static int tg3_poll(struct napi_struct *napi, int budget)
7323{
7324 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7325 struct tg3 *tp = tnapi->tp;
7326 int work_done = 0;
7327 struct tg3_hw_status *sblk = tnapi->hw_status;
7328
7329 while (1) {
7330 if (sblk->status & SD_STATUS_ERROR)
7331 tg3_process_error(tp);
7332
7333 tg3_poll_link(tp);
7334
7335 work_done = tg3_poll_work(tnapi, work_done, budget);
7336
7337 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7338 goto tx_recovery;
7339
7340 if (unlikely(work_done >= budget))
7341 break;
7342
7343 if (tg3_flag(tp, TAGGED_STATUS)) {
7344 /* tp->last_tag is used in tg3_int_reenable() below
7345 * to tell the hw how much work has been processed,
7346 * so we must read it before checking for more work.
7347 */
7348 tnapi->last_tag = sblk->status_tag;
7349 tnapi->last_irq_tag = tnapi->last_tag;
7350 rmb();
7351 } else
7352 sblk->status &= ~SD_STATUS_UPDATED;
7353
7354 if (likely(!tg3_has_work(tnapi))) {
7355 napi_complete_done(napi, work_done);
7356 tg3_int_reenable(tnapi);
7357 break;
7358 }
7359 }
7360
7361 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7362 return work_done;
7363
7364tx_recovery:
7365 /* work_done is guaranteed to be less than budget. */
7366 napi_complete(napi);
7367 tg3_reset_task_schedule(tp);
7368 return work_done;
7369}
7370
7371static void tg3_napi_disable(struct tg3 *tp)
7372{
7373 int i;
7374
7375 for (i = tp->irq_cnt - 1; i >= 0; i--)
7376 napi_disable(&tp->napi[i].napi);
7377}
7378
7379static void tg3_napi_enable(struct tg3 *tp)
7380{
7381 int i;
7382
7383 for (i = 0; i < tp->irq_cnt; i++)
7384 napi_enable(&tp->napi[i].napi);
7385}
7386
7387static void tg3_napi_init(struct tg3 *tp)
7388{
7389 int i;
7390
7391 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7392 for (i = 1; i < tp->irq_cnt; i++)
7393 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7394}
7395
7396static void tg3_napi_fini(struct tg3 *tp)
7397{
7398 int i;
7399
7400 for (i = 0; i < tp->irq_cnt; i++)
7401 netif_napi_del(&tp->napi[i].napi);
7402}
7403
7404static inline void tg3_netif_stop(struct tg3 *tp)
7405{
7406 netif_trans_update(tp->dev); /* prevent tx timeout */
7407 tg3_napi_disable(tp);
7408 netif_carrier_off(tp->dev);
7409 netif_tx_disable(tp->dev);
7410}
7411
7412/* tp->lock must be held */
7413static inline void tg3_netif_start(struct tg3 *tp)
7414{
7415 tg3_ptp_resume(tp);
7416
7417 /* NOTE: unconditional netif_tx_wake_all_queues is only
7418 * appropriate so long as all callers are assured to
7419 * have free tx slots (such as after tg3_init_hw)
7420 */
7421 netif_tx_wake_all_queues(tp->dev);
7422
7423 if (tp->link_up)
7424 netif_carrier_on(tp->dev);
7425
7426 tg3_napi_enable(tp);
7427 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7428 tg3_enable_ints(tp);
7429}
7430
7431static void tg3_irq_quiesce(struct tg3 *tp)
7432 __releases(tp->lock)
7433 __acquires(tp->lock)
7434{
7435 int i;
7436
7437 BUG_ON(tp->irq_sync);
7438
7439 tp->irq_sync = 1;
7440 smp_mb();
7441
7442 spin_unlock_bh(&tp->lock);
7443
7444 for (i = 0; i < tp->irq_cnt; i++)
7445 synchronize_irq(tp->napi[i].irq_vec);
7446
7447 spin_lock_bh(&tp->lock);
7448}
7449
7450/* Fully shutdown all tg3 driver activity elsewhere in the system.
7451 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7452 * with as well. Most of the time, this is not necessary except when
7453 * shutting down the device.
7454 */
7455static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7456{
7457 spin_lock_bh(&tp->lock);
7458 if (irq_sync)
7459 tg3_irq_quiesce(tp);
7460}
7461
7462static inline void tg3_full_unlock(struct tg3 *tp)
7463{
7464 spin_unlock_bh(&tp->lock);
7465}
7466
7467/* One-shot MSI handler - Chip automatically disables interrupt
7468 * after sending MSI so driver doesn't have to do it.
7469 */
7470static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7471{
7472 struct tg3_napi *tnapi = dev_id;
7473 struct tg3 *tp = tnapi->tp;
7474
7475 prefetch(tnapi->hw_status);
7476 if (tnapi->rx_rcb)
7477 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7478
7479 if (likely(!tg3_irq_sync(tp)))
7480 napi_schedule(&tnapi->napi);
7481
7482 return IRQ_HANDLED;
7483}
7484
7485/* MSI ISR - No need to check for interrupt sharing and no need to
7486 * flush status block and interrupt mailbox. PCI ordering rules
7487 * guarantee that MSI will arrive after the status block.
7488 */
7489static irqreturn_t tg3_msi(int irq, void *dev_id)
7490{
7491 struct tg3_napi *tnapi = dev_id;
7492 struct tg3 *tp = tnapi->tp;
7493
7494 prefetch(tnapi->hw_status);
7495 if (tnapi->rx_rcb)
7496 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7497 /*
7498 * Writing any value to intr-mbox-0 clears PCI INTA# and
7499 * chip-internal interrupt pending events.
7500 * Writing non-zero to intr-mbox-0 additional tells the
7501 * NIC to stop sending us irqs, engaging "in-intr-handler"
7502 * event coalescing.
7503 */
7504 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7505 if (likely(!tg3_irq_sync(tp)))
7506 napi_schedule(&tnapi->napi);
7507
7508 return IRQ_RETVAL(1);
7509}
7510
7511static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7512{
7513 struct tg3_napi *tnapi = dev_id;
7514 struct tg3 *tp = tnapi->tp;
7515 struct tg3_hw_status *sblk = tnapi->hw_status;
7516 unsigned int handled = 1;
7517
7518 /* In INTx mode, it is possible for the interrupt to arrive at
7519 * the CPU before the status block posted prior to the interrupt.
7520 * Reading the PCI State register will confirm whether the
7521 * interrupt is ours and will flush the status block.
7522 */
7523 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7524 if (tg3_flag(tp, CHIP_RESETTING) ||
7525 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7526 handled = 0;
7527 goto out;
7528 }
7529 }
7530
7531 /*
7532 * Writing any value to intr-mbox-0 clears PCI INTA# and
7533 * chip-internal interrupt pending events.
7534 * Writing non-zero to intr-mbox-0 additional tells the
7535 * NIC to stop sending us irqs, engaging "in-intr-handler"
7536 * event coalescing.
7537 *
7538 * Flush the mailbox to de-assert the IRQ immediately to prevent
7539 * spurious interrupts. The flush impacts performance but
7540 * excessive spurious interrupts can be worse in some cases.
7541 */
7542 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7543 if (tg3_irq_sync(tp))
7544 goto out;
7545 sblk->status &= ~SD_STATUS_UPDATED;
7546 if (likely(tg3_has_work(tnapi))) {
7547 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7548 napi_schedule(&tnapi->napi);
7549 } else {
7550 /* No work, shared interrupt perhaps? re-enable
7551 * interrupts, and flush that PCI write
7552 */
7553 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7554 0x00000000);
7555 }
7556out:
7557 return IRQ_RETVAL(handled);
7558}
7559
7560static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7561{
7562 struct tg3_napi *tnapi = dev_id;
7563 struct tg3 *tp = tnapi->tp;
7564 struct tg3_hw_status *sblk = tnapi->hw_status;
7565 unsigned int handled = 1;
7566
7567 /* In INTx mode, it is possible for the interrupt to arrive at
7568 * the CPU before the status block posted prior to the interrupt.
7569 * Reading the PCI State register will confirm whether the
7570 * interrupt is ours and will flush the status block.
7571 */
7572 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7573 if (tg3_flag(tp, CHIP_RESETTING) ||
7574 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7575 handled = 0;
7576 goto out;
7577 }
7578 }
7579
7580 /*
7581 * writing any value to intr-mbox-0 clears PCI INTA# and
7582 * chip-internal interrupt pending events.
7583 * writing non-zero to intr-mbox-0 additional tells the
7584 * NIC to stop sending us irqs, engaging "in-intr-handler"
7585 * event coalescing.
7586 *
7587 * Flush the mailbox to de-assert the IRQ immediately to prevent
7588 * spurious interrupts. The flush impacts performance but
7589 * excessive spurious interrupts can be worse in some cases.
7590 */
7591 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7592
7593 /*
7594 * In a shared interrupt configuration, sometimes other devices'
7595 * interrupts will scream. We record the current status tag here
7596 * so that the above check can report that the screaming interrupts
7597 * are unhandled. Eventually they will be silenced.
7598 */
7599 tnapi->last_irq_tag = sblk->status_tag;
7600
7601 if (tg3_irq_sync(tp))
7602 goto out;
7603
7604 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7605
7606 napi_schedule(&tnapi->napi);
7607
7608out:
7609 return IRQ_RETVAL(handled);
7610}
7611
7612/* ISR for interrupt test */
7613static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7614{
7615 struct tg3_napi *tnapi = dev_id;
7616 struct tg3 *tp = tnapi->tp;
7617 struct tg3_hw_status *sblk = tnapi->hw_status;
7618
7619 if ((sblk->status & SD_STATUS_UPDATED) ||
7620 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7621 tg3_disable_ints(tp);
7622 return IRQ_RETVAL(1);
7623 }
7624 return IRQ_RETVAL(0);
7625}
7626
7627#ifdef CONFIG_NET_POLL_CONTROLLER
7628static void tg3_poll_controller(struct net_device *dev)
7629{
7630 int i;
7631 struct tg3 *tp = netdev_priv(dev);
7632
7633 if (tg3_irq_sync(tp))
7634 return;
7635
7636 for (i = 0; i < tp->irq_cnt; i++)
7637 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7638}
7639#endif
7640
7641static void tg3_tx_timeout(struct net_device *dev)
7642{
7643 struct tg3 *tp = netdev_priv(dev);
7644
7645 if (netif_msg_tx_err(tp)) {
7646 netdev_err(dev, "transmit timed out, resetting\n");
7647 tg3_dump_state(tp);
7648 }
7649
7650 tg3_reset_task_schedule(tp);
7651}
7652
7653/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7654static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7655{
7656 u32 base = (u32) mapping & 0xffffffff;
7657
7658 return base + len + 8 < base;
7659}
7660
7661/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7662 * of any 4GB boundaries: 4G, 8G, etc
7663 */
7664static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7665 u32 len, u32 mss)
7666{
7667 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7668 u32 base = (u32) mapping & 0xffffffff;
7669
7670 return ((base + len + (mss & 0x3fff)) < base);
7671 }
7672 return 0;
7673}
7674
7675/* Test for DMA addresses > 40-bit */
7676static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7677 int len)
7678{
7679#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7680 if (tg3_flag(tp, 40BIT_DMA_BUG))
7681 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7682 return 0;
7683#else
7684 return 0;
7685#endif
7686}
7687
7688static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7689 dma_addr_t mapping, u32 len, u32 flags,
7690 u32 mss, u32 vlan)
7691{
7692 txbd->addr_hi = ((u64) mapping >> 32);
7693 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7694 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7695 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7696}
7697
7698static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7699 dma_addr_t map, u32 len, u32 flags,
7700 u32 mss, u32 vlan)
7701{
7702 struct tg3 *tp = tnapi->tp;
7703 bool hwbug = false;
7704
7705 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7706 hwbug = true;
7707
7708 if (tg3_4g_overflow_test(map, len))
7709 hwbug = true;
7710
7711 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7712 hwbug = true;
7713
7714 if (tg3_40bit_overflow_test(tp, map, len))
7715 hwbug = true;
7716
7717 if (tp->dma_limit) {
7718 u32 prvidx = *entry;
7719 u32 tmp_flag = flags & ~TXD_FLAG_END;
7720 while (len > tp->dma_limit && *budget) {
7721 u32 frag_len = tp->dma_limit;
7722 len -= tp->dma_limit;
7723
7724 /* Avoid the 8byte DMA problem */
7725 if (len <= 8) {
7726 len += tp->dma_limit / 2;
7727 frag_len = tp->dma_limit / 2;
7728 }
7729
7730 tnapi->tx_buffers[*entry].fragmented = true;
7731
7732 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7733 frag_len, tmp_flag, mss, vlan);
7734 *budget -= 1;
7735 prvidx = *entry;
7736 *entry = NEXT_TX(*entry);
7737
7738 map += frag_len;
7739 }
7740
7741 if (len) {
7742 if (*budget) {
7743 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7744 len, flags, mss, vlan);
7745 *budget -= 1;
7746 *entry = NEXT_TX(*entry);
7747 } else {
7748 hwbug = true;
7749 tnapi->tx_buffers[prvidx].fragmented = false;
7750 }
7751 }
7752 } else {
7753 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7754 len, flags, mss, vlan);
7755 *entry = NEXT_TX(*entry);
7756 }
7757
7758 return hwbug;
7759}
7760
7761static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7762{
7763 int i;
7764 struct sk_buff *skb;
7765 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7766
7767 skb = txb->skb;
7768 txb->skb = NULL;
7769
7770 pci_unmap_single(tnapi->tp->pdev,
7771 dma_unmap_addr(txb, mapping),
7772 skb_headlen(skb),
7773 PCI_DMA_TODEVICE);
7774
7775 while (txb->fragmented) {
7776 txb->fragmented = false;
7777 entry = NEXT_TX(entry);
7778 txb = &tnapi->tx_buffers[entry];
7779 }
7780
7781 for (i = 0; i <= last; i++) {
7782 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7783
7784 entry = NEXT_TX(entry);
7785 txb = &tnapi->tx_buffers[entry];
7786
7787 pci_unmap_page(tnapi->tp->pdev,
7788 dma_unmap_addr(txb, mapping),
7789 skb_frag_size(frag), PCI_DMA_TODEVICE);
7790
7791 while (txb->fragmented) {
7792 txb->fragmented = false;
7793 entry = NEXT_TX(entry);
7794 txb = &tnapi->tx_buffers[entry];
7795 }
7796 }
7797}
7798
7799/* Workaround 4GB and 40-bit hardware DMA bugs. */
7800static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7801 struct sk_buff **pskb,
7802 u32 *entry, u32 *budget,
7803 u32 base_flags, u32 mss, u32 vlan)
7804{
7805 struct tg3 *tp = tnapi->tp;
7806 struct sk_buff *new_skb, *skb = *pskb;
7807 dma_addr_t new_addr = 0;
7808 int ret = 0;
7809
7810 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7811 new_skb = skb_copy(skb, GFP_ATOMIC);
7812 else {
7813 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7814
7815 new_skb = skb_copy_expand(skb,
7816 skb_headroom(skb) + more_headroom,
7817 skb_tailroom(skb), GFP_ATOMIC);
7818 }
7819
7820 if (!new_skb) {
7821 ret = -1;
7822 } else {
7823 /* New SKB is guaranteed to be linear. */
7824 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7825 PCI_DMA_TODEVICE);
7826 /* Make sure the mapping succeeded */
7827 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7828 dev_kfree_skb_any(new_skb);
7829 ret = -1;
7830 } else {
7831 u32 save_entry = *entry;
7832
7833 base_flags |= TXD_FLAG_END;
7834
7835 tnapi->tx_buffers[*entry].skb = new_skb;
7836 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7837 mapping, new_addr);
7838
7839 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7840 new_skb->len, base_flags,
7841 mss, vlan)) {
7842 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7843 dev_kfree_skb_any(new_skb);
7844 ret = -1;
7845 }
7846 }
7847 }
7848
7849 dev_consume_skb_any(skb);
7850 *pskb = new_skb;
7851 return ret;
7852}
7853
7854static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7855{
7856 /* Check if we will never have enough descriptors,
7857 * as gso_segs can be more than current ring size
7858 */
7859 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7860}
7861
7862static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7863
7864/* Use GSO to workaround all TSO packets that meet HW bug conditions
7865 * indicated in tg3_tx_frag_set()
7866 */
7867static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7868 struct netdev_queue *txq, struct sk_buff *skb)
7869{
7870 struct sk_buff *segs, *nskb;
7871 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7872
7873 /* Estimate the number of fragments in the worst case */
7874 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7875 netif_tx_stop_queue(txq);
7876
7877 /* netif_tx_stop_queue() must be done before checking
7878 * checking tx index in tg3_tx_avail() below, because in
7879 * tg3_tx(), we update tx index before checking for
7880 * netif_tx_queue_stopped().
7881 */
7882 smp_mb();
7883 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7884 return NETDEV_TX_BUSY;
7885
7886 netif_tx_wake_queue(txq);
7887 }
7888
7889 segs = skb_gso_segment(skb, tp->dev->features &
7890 ~(NETIF_F_TSO | NETIF_F_TSO6));
7891 if (IS_ERR(segs) || !segs)
7892 goto tg3_tso_bug_end;
7893
7894 do {
7895 nskb = segs;
7896 segs = segs->next;
7897 nskb->next = NULL;
7898 tg3_start_xmit(nskb, tp->dev);
7899 } while (segs);
7900
7901tg3_tso_bug_end:
7902 dev_consume_skb_any(skb);
7903
7904 return NETDEV_TX_OK;
7905}
7906
7907/* hard_start_xmit for all devices */
7908static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7909{
7910 struct tg3 *tp = netdev_priv(dev);
7911 u32 len, entry, base_flags, mss, vlan = 0;
7912 u32 budget;
7913 int i = -1, would_hit_hwbug;
7914 dma_addr_t mapping;
7915 struct tg3_napi *tnapi;
7916 struct netdev_queue *txq;
7917 unsigned int last;
7918 struct iphdr *iph = NULL;
7919 struct tcphdr *tcph = NULL;
7920 __sum16 tcp_csum = 0, ip_csum = 0;
7921 __be16 ip_tot_len = 0;
7922
7923 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7924 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7925 if (tg3_flag(tp, ENABLE_TSS))
7926 tnapi++;
7927
7928 budget = tg3_tx_avail(tnapi);
7929
7930 /* We are running in BH disabled context with netif_tx_lock
7931 * and TX reclaim runs via tp->napi.poll inside of a software
7932 * interrupt. Furthermore, IRQ processing runs lockless so we have
7933 * no IRQ context deadlocks to worry about either. Rejoice!
7934 */
7935 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7936 if (!netif_tx_queue_stopped(txq)) {
7937 netif_tx_stop_queue(txq);
7938
7939 /* This is a hard error, log it. */
7940 netdev_err(dev,
7941 "BUG! Tx Ring full when queue awake!\n");
7942 }
7943 return NETDEV_TX_BUSY;
7944 }
7945
7946 entry = tnapi->tx_prod;
7947 base_flags = 0;
7948
7949 mss = skb_shinfo(skb)->gso_size;
7950 if (mss) {
7951 u32 tcp_opt_len, hdr_len;
7952
7953 if (skb_cow_head(skb, 0))
7954 goto drop;
7955
7956 iph = ip_hdr(skb);
7957 tcp_opt_len = tcp_optlen(skb);
7958
7959 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7960
7961 /* HW/FW can not correctly segment packets that have been
7962 * vlan encapsulated.
7963 */
7964 if (skb->protocol == htons(ETH_P_8021Q) ||
7965 skb->protocol == htons(ETH_P_8021AD)) {
7966 if (tg3_tso_bug_gso_check(tnapi, skb))
7967 return tg3_tso_bug(tp, tnapi, txq, skb);
7968 goto drop;
7969 }
7970
7971 if (!skb_is_gso_v6(skb)) {
7972 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7973 tg3_flag(tp, TSO_BUG)) {
7974 if (tg3_tso_bug_gso_check(tnapi, skb))
7975 return tg3_tso_bug(tp, tnapi, txq, skb);
7976 goto drop;
7977 }
7978 ip_csum = iph->check;
7979 ip_tot_len = iph->tot_len;
7980 iph->check = 0;
7981 iph->tot_len = htons(mss + hdr_len);
7982 }
7983
7984 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7985 TXD_FLAG_CPU_POST_DMA);
7986
7987 tcph = tcp_hdr(skb);
7988 tcp_csum = tcph->check;
7989
7990 if (tg3_flag(tp, HW_TSO_1) ||
7991 tg3_flag(tp, HW_TSO_2) ||
7992 tg3_flag(tp, HW_TSO_3)) {
7993 tcph->check = 0;
7994 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7995 } else {
7996 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7997 0, IPPROTO_TCP, 0);
7998 }
7999
8000 if (tg3_flag(tp, HW_TSO_3)) {
8001 mss |= (hdr_len & 0xc) << 12;
8002 if (hdr_len & 0x10)
8003 base_flags |= 0x00000010;
8004 base_flags |= (hdr_len & 0x3e0) << 5;
8005 } else if (tg3_flag(tp, HW_TSO_2))
8006 mss |= hdr_len << 9;
8007 else if (tg3_flag(tp, HW_TSO_1) ||
8008 tg3_asic_rev(tp) == ASIC_REV_5705) {
8009 if (tcp_opt_len || iph->ihl > 5) {
8010 int tsflags;
8011
8012 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013 mss |= (tsflags << 11);
8014 }
8015 } else {
8016 if (tcp_opt_len || iph->ihl > 5) {
8017 int tsflags;
8018
8019 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8020 base_flags |= tsflags << 12;
8021 }
8022 }
8023 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8024 /* HW/FW can not correctly checksum packets that have been
8025 * vlan encapsulated.
8026 */
8027 if (skb->protocol == htons(ETH_P_8021Q) ||
8028 skb->protocol == htons(ETH_P_8021AD)) {
8029 if (skb_checksum_help(skb))
8030 goto drop;
8031 } else {
8032 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8033 }
8034 }
8035
8036 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8037 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8038 base_flags |= TXD_FLAG_JMB_PKT;
8039
8040 if (skb_vlan_tag_present(skb)) {
8041 base_flags |= TXD_FLAG_VLAN;
8042 vlan = skb_vlan_tag_get(skb);
8043 }
8044
8045 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8046 tg3_flag(tp, TX_TSTAMP_EN)) {
8047 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8048 base_flags |= TXD_FLAG_HWTSTAMP;
8049 }
8050
8051 len = skb_headlen(skb);
8052
8053 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8054 if (pci_dma_mapping_error(tp->pdev, mapping))
8055 goto drop;
8056
8057
8058 tnapi->tx_buffers[entry].skb = skb;
8059 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8060
8061 would_hit_hwbug = 0;
8062
8063 if (tg3_flag(tp, 5701_DMA_BUG))
8064 would_hit_hwbug = 1;
8065
8066 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8067 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8068 mss, vlan)) {
8069 would_hit_hwbug = 1;
8070 } else if (skb_shinfo(skb)->nr_frags > 0) {
8071 u32 tmp_mss = mss;
8072
8073 if (!tg3_flag(tp, HW_TSO_1) &&
8074 !tg3_flag(tp, HW_TSO_2) &&
8075 !tg3_flag(tp, HW_TSO_3))
8076 tmp_mss = 0;
8077
8078 /* Now loop through additional data
8079 * fragments, and queue them.
8080 */
8081 last = skb_shinfo(skb)->nr_frags - 1;
8082 for (i = 0; i <= last; i++) {
8083 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8084
8085 len = skb_frag_size(frag);
8086 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8087 len, DMA_TO_DEVICE);
8088
8089 tnapi->tx_buffers[entry].skb = NULL;
8090 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8091 mapping);
8092 if (dma_mapping_error(&tp->pdev->dev, mapping))
8093 goto dma_error;
8094
8095 if (!budget ||
8096 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8097 len, base_flags |
8098 ((i == last) ? TXD_FLAG_END : 0),
8099 tmp_mss, vlan)) {
8100 would_hit_hwbug = 1;
8101 break;
8102 }
8103 }
8104 }
8105
8106 if (would_hit_hwbug) {
8107 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8108
8109 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8110 /* If it's a TSO packet, do GSO instead of
8111 * allocating and copying to a large linear SKB
8112 */
8113 if (ip_tot_len) {
8114 iph->check = ip_csum;
8115 iph->tot_len = ip_tot_len;
8116 }
8117 tcph->check = tcp_csum;
8118 return tg3_tso_bug(tp, tnapi, txq, skb);
8119 }
8120
8121 /* If the workaround fails due to memory/mapping
8122 * failure, silently drop this packet.
8123 */
8124 entry = tnapi->tx_prod;
8125 budget = tg3_tx_avail(tnapi);
8126 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8127 base_flags, mss, vlan))
8128 goto drop_nofree;
8129 }
8130
8131 skb_tx_timestamp(skb);
8132 netdev_tx_sent_queue(txq, skb->len);
8133
8134 /* Sync BD data before updating mailbox */
8135 wmb();
8136
8137 tnapi->tx_prod = entry;
8138 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8139 netif_tx_stop_queue(txq);
8140
8141 /* netif_tx_stop_queue() must be done before checking
8142 * checking tx index in tg3_tx_avail() below, because in
8143 * tg3_tx(), we update tx index before checking for
8144 * netif_tx_queue_stopped().
8145 */
8146 smp_mb();
8147 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8148 netif_tx_wake_queue(txq);
8149 }
8150
8151 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8152 /* Packets are ready, update Tx producer idx on card. */
8153 tw32_tx_mbox(tnapi->prodmbox, entry);
8154 mmiowb();
8155 }
8156
8157 return NETDEV_TX_OK;
8158
8159dma_error:
8160 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8161 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8162drop:
8163 dev_kfree_skb_any(skb);
8164drop_nofree:
8165 tp->tx_dropped++;
8166 return NETDEV_TX_OK;
8167}
8168
8169static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8170{
8171 if (enable) {
8172 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8173 MAC_MODE_PORT_MODE_MASK);
8174
8175 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8176
8177 if (!tg3_flag(tp, 5705_PLUS))
8178 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8179
8180 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8181 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8182 else
8183 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8184 } else {
8185 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8186
8187 if (tg3_flag(tp, 5705_PLUS) ||
8188 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8189 tg3_asic_rev(tp) == ASIC_REV_5700)
8190 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8191 }
8192
8193 tw32(MAC_MODE, tp->mac_mode);
8194 udelay(40);
8195}
8196
8197static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8198{
8199 u32 val, bmcr, mac_mode, ptest = 0;
8200
8201 tg3_phy_toggle_apd(tp, false);
8202 tg3_phy_toggle_automdix(tp, false);
8203
8204 if (extlpbk && tg3_phy_set_extloopbk(tp))
8205 return -EIO;
8206
8207 bmcr = BMCR_FULLDPLX;
8208 switch (speed) {
8209 case SPEED_10:
8210 break;
8211 case SPEED_100:
8212 bmcr |= BMCR_SPEED100;
8213 break;
8214 case SPEED_1000:
8215 default:
8216 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8217 speed = SPEED_100;
8218 bmcr |= BMCR_SPEED100;
8219 } else {
8220 speed = SPEED_1000;
8221 bmcr |= BMCR_SPEED1000;
8222 }
8223 }
8224
8225 if (extlpbk) {
8226 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8227 tg3_readphy(tp, MII_CTRL1000, &val);
8228 val |= CTL1000_AS_MASTER |
8229 CTL1000_ENABLE_MASTER;
8230 tg3_writephy(tp, MII_CTRL1000, val);
8231 } else {
8232 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8233 MII_TG3_FET_PTEST_TRIM_2;
8234 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8235 }
8236 } else
8237 bmcr |= BMCR_LOOPBACK;
8238
8239 tg3_writephy(tp, MII_BMCR, bmcr);
8240
8241 /* The write needs to be flushed for the FETs */
8242 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8243 tg3_readphy(tp, MII_BMCR, &bmcr);
8244
8245 udelay(40);
8246
8247 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8248 tg3_asic_rev(tp) == ASIC_REV_5785) {
8249 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8250 MII_TG3_FET_PTEST_FRC_TX_LINK |
8251 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8252
8253 /* The write needs to be flushed for the AC131 */
8254 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8255 }
8256
8257 /* Reset to prevent losing 1st rx packet intermittently */
8258 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8259 tg3_flag(tp, 5780_CLASS)) {
8260 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8261 udelay(10);
8262 tw32_f(MAC_RX_MODE, tp->rx_mode);
8263 }
8264
8265 mac_mode = tp->mac_mode &
8266 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8267 if (speed == SPEED_1000)
8268 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8269 else
8270 mac_mode |= MAC_MODE_PORT_MODE_MII;
8271
8272 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8273 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8274
8275 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8276 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8277 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8278 mac_mode |= MAC_MODE_LINK_POLARITY;
8279
8280 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8281 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8282 }
8283
8284 tw32(MAC_MODE, mac_mode);
8285 udelay(40);
8286
8287 return 0;
8288}
8289
8290static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8291{
8292 struct tg3 *tp = netdev_priv(dev);
8293
8294 if (features & NETIF_F_LOOPBACK) {
8295 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8296 return;
8297
8298 spin_lock_bh(&tp->lock);
8299 tg3_mac_loopback(tp, true);
8300 netif_carrier_on(tp->dev);
8301 spin_unlock_bh(&tp->lock);
8302 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8303 } else {
8304 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8305 return;
8306
8307 spin_lock_bh(&tp->lock);
8308 tg3_mac_loopback(tp, false);
8309 /* Force link status check */
8310 tg3_setup_phy(tp, true);
8311 spin_unlock_bh(&tp->lock);
8312 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8313 }
8314}
8315
8316static netdev_features_t tg3_fix_features(struct net_device *dev,
8317 netdev_features_t features)
8318{
8319 struct tg3 *tp = netdev_priv(dev);
8320
8321 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8322 features &= ~NETIF_F_ALL_TSO;
8323
8324 return features;
8325}
8326
8327static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8328{
8329 netdev_features_t changed = dev->features ^ features;
8330
8331 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8332 tg3_set_loopback(dev, features);
8333
8334 return 0;
8335}
8336
8337static void tg3_rx_prodring_free(struct tg3 *tp,
8338 struct tg3_rx_prodring_set *tpr)
8339{
8340 int i;
8341
8342 if (tpr != &tp->napi[0].prodring) {
8343 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8344 i = (i + 1) & tp->rx_std_ring_mask)
8345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8346 tp->rx_pkt_map_sz);
8347
8348 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8349 for (i = tpr->rx_jmb_cons_idx;
8350 i != tpr->rx_jmb_prod_idx;
8351 i = (i + 1) & tp->rx_jmb_ring_mask) {
8352 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8353 TG3_RX_JMB_MAP_SZ);
8354 }
8355 }
8356
8357 return;
8358 }
8359
8360 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8361 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8362 tp->rx_pkt_map_sz);
8363
8364 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8365 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8366 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8367 TG3_RX_JMB_MAP_SZ);
8368 }
8369}
8370
8371/* Initialize rx rings for packet processing.
8372 *
8373 * The chip has been shut down and the driver detached from
8374 * the networking, so no interrupts or new tx packets will
8375 * end up in the driver. tp->{tx,}lock are held and thus
8376 * we may not sleep.
8377 */
8378static int tg3_rx_prodring_alloc(struct tg3 *tp,
8379 struct tg3_rx_prodring_set *tpr)
8380{
8381 u32 i, rx_pkt_dma_sz;
8382
8383 tpr->rx_std_cons_idx = 0;
8384 tpr->rx_std_prod_idx = 0;
8385 tpr->rx_jmb_cons_idx = 0;
8386 tpr->rx_jmb_prod_idx = 0;
8387
8388 if (tpr != &tp->napi[0].prodring) {
8389 memset(&tpr->rx_std_buffers[0], 0,
8390 TG3_RX_STD_BUFF_RING_SIZE(tp));
8391 if (tpr->rx_jmb_buffers)
8392 memset(&tpr->rx_jmb_buffers[0], 0,
8393 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8394 goto done;
8395 }
8396
8397 /* Zero out all descriptors. */
8398 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8399
8400 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8401 if (tg3_flag(tp, 5780_CLASS) &&
8402 tp->dev->mtu > ETH_DATA_LEN)
8403 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8404 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8405
8406 /* Initialize invariants of the rings, we only set this
8407 * stuff once. This works because the card does not
8408 * write into the rx buffer posting rings.
8409 */
8410 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8411 struct tg3_rx_buffer_desc *rxd;
8412
8413 rxd = &tpr->rx_std[i];
8414 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8415 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8416 rxd->opaque = (RXD_OPAQUE_RING_STD |
8417 (i << RXD_OPAQUE_INDEX_SHIFT));
8418 }
8419
8420 /* Now allocate fresh SKBs for each rx ring. */
8421 for (i = 0; i < tp->rx_pending; i++) {
8422 unsigned int frag_size;
8423
8424 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8425 &frag_size) < 0) {
8426 netdev_warn(tp->dev,
8427 "Using a smaller RX standard ring. Only "
8428 "%d out of %d buffers were allocated "
8429 "successfully\n", i, tp->rx_pending);
8430 if (i == 0)
8431 goto initfail;
8432 tp->rx_pending = i;
8433 break;
8434 }
8435 }
8436
8437 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8438 goto done;
8439
8440 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8441
8442 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8443 goto done;
8444
8445 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8446 struct tg3_rx_buffer_desc *rxd;
8447
8448 rxd = &tpr->rx_jmb[i].std;
8449 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8450 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8451 RXD_FLAG_JUMBO;
8452 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8453 (i << RXD_OPAQUE_INDEX_SHIFT));
8454 }
8455
8456 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8457 unsigned int frag_size;
8458
8459 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8460 &frag_size) < 0) {
8461 netdev_warn(tp->dev,
8462 "Using a smaller RX jumbo ring. Only %d "
8463 "out of %d buffers were allocated "
8464 "successfully\n", i, tp->rx_jumbo_pending);
8465 if (i == 0)
8466 goto initfail;
8467 tp->rx_jumbo_pending = i;
8468 break;
8469 }
8470 }
8471
8472done:
8473 return 0;
8474
8475initfail:
8476 tg3_rx_prodring_free(tp, tpr);
8477 return -ENOMEM;
8478}
8479
8480static void tg3_rx_prodring_fini(struct tg3 *tp,
8481 struct tg3_rx_prodring_set *tpr)
8482{
8483 kfree(tpr->rx_std_buffers);
8484 tpr->rx_std_buffers = NULL;
8485 kfree(tpr->rx_jmb_buffers);
8486 tpr->rx_jmb_buffers = NULL;
8487 if (tpr->rx_std) {
8488 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8489 tpr->rx_std, tpr->rx_std_mapping);
8490 tpr->rx_std = NULL;
8491 }
8492 if (tpr->rx_jmb) {
8493 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8494 tpr->rx_jmb, tpr->rx_jmb_mapping);
8495 tpr->rx_jmb = NULL;
8496 }
8497}
8498
8499static int tg3_rx_prodring_init(struct tg3 *tp,
8500 struct tg3_rx_prodring_set *tpr)
8501{
8502 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8503 GFP_KERNEL);
8504 if (!tpr->rx_std_buffers)
8505 return -ENOMEM;
8506
8507 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8508 TG3_RX_STD_RING_BYTES(tp),
8509 &tpr->rx_std_mapping,
8510 GFP_KERNEL);
8511 if (!tpr->rx_std)
8512 goto err_out;
8513
8514 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8515 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8516 GFP_KERNEL);
8517 if (!tpr->rx_jmb_buffers)
8518 goto err_out;
8519
8520 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8521 TG3_RX_JMB_RING_BYTES(tp),
8522 &tpr->rx_jmb_mapping,
8523 GFP_KERNEL);
8524 if (!tpr->rx_jmb)
8525 goto err_out;
8526 }
8527
8528 return 0;
8529
8530err_out:
8531 tg3_rx_prodring_fini(tp, tpr);
8532 return -ENOMEM;
8533}
8534
8535/* Free up pending packets in all rx/tx rings.
8536 *
8537 * The chip has been shut down and the driver detached from
8538 * the networking, so no interrupts or new tx packets will
8539 * end up in the driver. tp->{tx,}lock is not held and we are not
8540 * in an interrupt context and thus may sleep.
8541 */
8542static void tg3_free_rings(struct tg3 *tp)
8543{
8544 int i, j;
8545
8546 for (j = 0; j < tp->irq_cnt; j++) {
8547 struct tg3_napi *tnapi = &tp->napi[j];
8548
8549 tg3_rx_prodring_free(tp, &tnapi->prodring);
8550
8551 if (!tnapi->tx_buffers)
8552 continue;
8553
8554 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8555 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8556
8557 if (!skb)
8558 continue;
8559
8560 tg3_tx_skb_unmap(tnapi, i,
8561 skb_shinfo(skb)->nr_frags - 1);
8562
8563 dev_consume_skb_any(skb);
8564 }
8565 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8566 }
8567}
8568
8569/* Initialize tx/rx rings for packet processing.
8570 *
8571 * The chip has been shut down and the driver detached from
8572 * the networking, so no interrupts or new tx packets will
8573 * end up in the driver. tp->{tx,}lock are held and thus
8574 * we may not sleep.
8575 */
8576static int tg3_init_rings(struct tg3 *tp)
8577{
8578 int i;
8579
8580 /* Free up all the SKBs. */
8581 tg3_free_rings(tp);
8582
8583 for (i = 0; i < tp->irq_cnt; i++) {
8584 struct tg3_napi *tnapi = &tp->napi[i];
8585
8586 tnapi->last_tag = 0;
8587 tnapi->last_irq_tag = 0;
8588 tnapi->hw_status->status = 0;
8589 tnapi->hw_status->status_tag = 0;
8590 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8591
8592 tnapi->tx_prod = 0;
8593 tnapi->tx_cons = 0;
8594 if (tnapi->tx_ring)
8595 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8596
8597 tnapi->rx_rcb_ptr = 0;
8598 if (tnapi->rx_rcb)
8599 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8600
8601 if (tnapi->prodring.rx_std &&
8602 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8603 tg3_free_rings(tp);
8604 return -ENOMEM;
8605 }
8606 }
8607
8608 return 0;
8609}
8610
8611static void tg3_mem_tx_release(struct tg3 *tp)
8612{
8613 int i;
8614
8615 for (i = 0; i < tp->irq_max; i++) {
8616 struct tg3_napi *tnapi = &tp->napi[i];
8617
8618 if (tnapi->tx_ring) {
8619 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8620 tnapi->tx_ring, tnapi->tx_desc_mapping);
8621 tnapi->tx_ring = NULL;
8622 }
8623
8624 kfree(tnapi->tx_buffers);
8625 tnapi->tx_buffers = NULL;
8626 }
8627}
8628
8629static int tg3_mem_tx_acquire(struct tg3 *tp)
8630{
8631 int i;
8632 struct tg3_napi *tnapi = &tp->napi[0];
8633
8634 /* If multivector TSS is enabled, vector 0 does not handle
8635 * tx interrupts. Don't allocate any resources for it.
8636 */
8637 if (tg3_flag(tp, ENABLE_TSS))
8638 tnapi++;
8639
8640 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8641 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8642 sizeof(struct tg3_tx_ring_info),
8643 GFP_KERNEL);
8644 if (!tnapi->tx_buffers)
8645 goto err_out;
8646
8647 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8648 TG3_TX_RING_BYTES,
8649 &tnapi->tx_desc_mapping,
8650 GFP_KERNEL);
8651 if (!tnapi->tx_ring)
8652 goto err_out;
8653 }
8654
8655 return 0;
8656
8657err_out:
8658 tg3_mem_tx_release(tp);
8659 return -ENOMEM;
8660}
8661
8662static void tg3_mem_rx_release(struct tg3 *tp)
8663{
8664 int i;
8665
8666 for (i = 0; i < tp->irq_max; i++) {
8667 struct tg3_napi *tnapi = &tp->napi[i];
8668
8669 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8670
8671 if (!tnapi->rx_rcb)
8672 continue;
8673
8674 dma_free_coherent(&tp->pdev->dev,
8675 TG3_RX_RCB_RING_BYTES(tp),
8676 tnapi->rx_rcb,
8677 tnapi->rx_rcb_mapping);
8678 tnapi->rx_rcb = NULL;
8679 }
8680}
8681
8682static int tg3_mem_rx_acquire(struct tg3 *tp)
8683{
8684 unsigned int i, limit;
8685
8686 limit = tp->rxq_cnt;
8687
8688 /* If RSS is enabled, we need a (dummy) producer ring
8689 * set on vector zero. This is the true hw prodring.
8690 */
8691 if (tg3_flag(tp, ENABLE_RSS))
8692 limit++;
8693
8694 for (i = 0; i < limit; i++) {
8695 struct tg3_napi *tnapi = &tp->napi[i];
8696
8697 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8698 goto err_out;
8699
8700 /* If multivector RSS is enabled, vector 0
8701 * does not handle rx or tx interrupts.
8702 * Don't allocate any resources for it.
8703 */
8704 if (!i && tg3_flag(tp, ENABLE_RSS))
8705 continue;
8706
8707 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8708 TG3_RX_RCB_RING_BYTES(tp),
8709 &tnapi->rx_rcb_mapping,
8710 GFP_KERNEL);
8711 if (!tnapi->rx_rcb)
8712 goto err_out;
8713 }
8714
8715 return 0;
8716
8717err_out:
8718 tg3_mem_rx_release(tp);
8719 return -ENOMEM;
8720}
8721
8722/*
8723 * Must not be invoked with interrupt sources disabled and
8724 * the hardware shutdown down.
8725 */
8726static void tg3_free_consistent(struct tg3 *tp)
8727{
8728 int i;
8729
8730 for (i = 0; i < tp->irq_cnt; i++) {
8731 struct tg3_napi *tnapi = &tp->napi[i];
8732
8733 if (tnapi->hw_status) {
8734 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8735 tnapi->hw_status,
8736 tnapi->status_mapping);
8737 tnapi->hw_status = NULL;
8738 }
8739 }
8740
8741 tg3_mem_rx_release(tp);
8742 tg3_mem_tx_release(tp);
8743
8744 /* tp->hw_stats can be referenced safely:
8745 * 1. under rtnl_lock
8746 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8747 */
8748 if (tp->hw_stats) {
8749 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8750 tp->hw_stats, tp->stats_mapping);
8751 tp->hw_stats = NULL;
8752 }
8753}
8754
8755/*
8756 * Must not be invoked with interrupt sources disabled and
8757 * the hardware shutdown down. Can sleep.
8758 */
8759static int tg3_alloc_consistent(struct tg3 *tp)
8760{
8761 int i;
8762
8763 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8764 sizeof(struct tg3_hw_stats),
8765 &tp->stats_mapping, GFP_KERNEL);
8766 if (!tp->hw_stats)
8767 goto err_out;
8768
8769 for (i = 0; i < tp->irq_cnt; i++) {
8770 struct tg3_napi *tnapi = &tp->napi[i];
8771 struct tg3_hw_status *sblk;
8772
8773 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8774 TG3_HW_STATUS_SIZE,
8775 &tnapi->status_mapping,
8776 GFP_KERNEL);
8777 if (!tnapi->hw_status)
8778 goto err_out;
8779
8780 sblk = tnapi->hw_status;
8781
8782 if (tg3_flag(tp, ENABLE_RSS)) {
8783 u16 *prodptr = NULL;
8784
8785 /*
8786 * When RSS is enabled, the status block format changes
8787 * slightly. The "rx_jumbo_consumer", "reserved",
8788 * and "rx_mini_consumer" members get mapped to the
8789 * other three rx return ring producer indexes.
8790 */
8791 switch (i) {
8792 case 1:
8793 prodptr = &sblk->idx[0].rx_producer;
8794 break;
8795 case 2:
8796 prodptr = &sblk->rx_jumbo_consumer;
8797 break;
8798 case 3:
8799 prodptr = &sblk->reserved;
8800 break;
8801 case 4:
8802 prodptr = &sblk->rx_mini_consumer;
8803 break;
8804 }
8805 tnapi->rx_rcb_prod_idx = prodptr;
8806 } else {
8807 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8808 }
8809 }
8810
8811 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8812 goto err_out;
8813
8814 return 0;
8815
8816err_out:
8817 tg3_free_consistent(tp);
8818 return -ENOMEM;
8819}
8820
8821#define MAX_WAIT_CNT 1000
8822
8823/* To stop a block, clear the enable bit and poll till it
8824 * clears. tp->lock is held.
8825 */
8826static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8827{
8828 unsigned int i;
8829 u32 val;
8830
8831 if (tg3_flag(tp, 5705_PLUS)) {
8832 switch (ofs) {
8833 case RCVLSC_MODE:
8834 case DMAC_MODE:
8835 case MBFREE_MODE:
8836 case BUFMGR_MODE:
8837 case MEMARB_MODE:
8838 /* We can't enable/disable these bits of the
8839 * 5705/5750, just say success.
8840 */
8841 return 0;
8842
8843 default:
8844 break;
8845 }
8846 }
8847
8848 val = tr32(ofs);
8849 val &= ~enable_bit;
8850 tw32_f(ofs, val);
8851
8852 for (i = 0; i < MAX_WAIT_CNT; i++) {
8853 if (pci_channel_offline(tp->pdev)) {
8854 dev_err(&tp->pdev->dev,
8855 "tg3_stop_block device offline, "
8856 "ofs=%lx enable_bit=%x\n",
8857 ofs, enable_bit);
8858 return -ENODEV;
8859 }
8860
8861 udelay(100);
8862 val = tr32(ofs);
8863 if ((val & enable_bit) == 0)
8864 break;
8865 }
8866
8867 if (i == MAX_WAIT_CNT && !silent) {
8868 dev_err(&tp->pdev->dev,
8869 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8870 ofs, enable_bit);
8871 return -ENODEV;
8872 }
8873
8874 return 0;
8875}
8876
8877/* tp->lock is held. */
8878static int tg3_abort_hw(struct tg3 *tp, bool silent)
8879{
8880 int i, err;
8881
8882 tg3_disable_ints(tp);
8883
8884 if (pci_channel_offline(tp->pdev)) {
8885 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8886 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8887 err = -ENODEV;
8888 goto err_no_dev;
8889 }
8890
8891 tp->rx_mode &= ~RX_MODE_ENABLE;
8892 tw32_f(MAC_RX_MODE, tp->rx_mode);
8893 udelay(10);
8894
8895 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8901
8902 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8903 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8905 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8906 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8907 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8908 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8909
8910 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8911 tw32_f(MAC_MODE, tp->mac_mode);
8912 udelay(40);
8913
8914 tp->tx_mode &= ~TX_MODE_ENABLE;
8915 tw32_f(MAC_TX_MODE, tp->tx_mode);
8916
8917 for (i = 0; i < MAX_WAIT_CNT; i++) {
8918 udelay(100);
8919 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8920 break;
8921 }
8922 if (i >= MAX_WAIT_CNT) {
8923 dev_err(&tp->pdev->dev,
8924 "%s timed out, TX_MODE_ENABLE will not clear "
8925 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8926 err |= -ENODEV;
8927 }
8928
8929 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8930 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8931 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8932
8933 tw32(FTQ_RESET, 0xffffffff);
8934 tw32(FTQ_RESET, 0x00000000);
8935
8936 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8937 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8938
8939err_no_dev:
8940 for (i = 0; i < tp->irq_cnt; i++) {
8941 struct tg3_napi *tnapi = &tp->napi[i];
8942 if (tnapi->hw_status)
8943 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8944 }
8945
8946 return err;
8947}
8948
8949/* Save PCI command register before chip reset */
8950static void tg3_save_pci_state(struct tg3 *tp)
8951{
8952 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8953}
8954
8955/* Restore PCI state after chip reset */
8956static void tg3_restore_pci_state(struct tg3 *tp)
8957{
8958 u32 val;
8959
8960 /* Re-enable indirect register accesses. */
8961 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8962 tp->misc_host_ctrl);
8963
8964 /* Set MAX PCI retry to zero. */
8965 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8966 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8967 tg3_flag(tp, PCIX_MODE))
8968 val |= PCISTATE_RETRY_SAME_DMA;
8969 /* Allow reads and writes to the APE register and memory space. */
8970 if (tg3_flag(tp, ENABLE_APE))
8971 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8972 PCISTATE_ALLOW_APE_SHMEM_WR |
8973 PCISTATE_ALLOW_APE_PSPACE_WR;
8974 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8975
8976 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8977
8978 if (!tg3_flag(tp, PCI_EXPRESS)) {
8979 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8980 tp->pci_cacheline_sz);
8981 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8982 tp->pci_lat_timer);
8983 }
8984
8985 /* Make sure PCI-X relaxed ordering bit is clear. */
8986 if (tg3_flag(tp, PCIX_MODE)) {
8987 u16 pcix_cmd;
8988
8989 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8990 &pcix_cmd);
8991 pcix_cmd &= ~PCI_X_CMD_ERO;
8992 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8993 pcix_cmd);
8994 }
8995
8996 if (tg3_flag(tp, 5780_CLASS)) {
8997
8998 /* Chip reset on 5780 will reset MSI enable bit,
8999 * so need to restore it.
9000 */
9001 if (tg3_flag(tp, USING_MSI)) {
9002 u16 ctrl;
9003
9004 pci_read_config_word(tp->pdev,
9005 tp->msi_cap + PCI_MSI_FLAGS,
9006 &ctrl);
9007 pci_write_config_word(tp->pdev,
9008 tp->msi_cap + PCI_MSI_FLAGS,
9009 ctrl | PCI_MSI_FLAGS_ENABLE);
9010 val = tr32(MSGINT_MODE);
9011 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9012 }
9013 }
9014}
9015
9016static void tg3_override_clk(struct tg3 *tp)
9017{
9018 u32 val;
9019
9020 switch (tg3_asic_rev(tp)) {
9021 case ASIC_REV_5717:
9022 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9023 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9024 TG3_CPMU_MAC_ORIDE_ENABLE);
9025 break;
9026
9027 case ASIC_REV_5719:
9028 case ASIC_REV_5720:
9029 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9030 break;
9031
9032 default:
9033 return;
9034 }
9035}
9036
9037static void tg3_restore_clk(struct tg3 *tp)
9038{
9039 u32 val;
9040
9041 switch (tg3_asic_rev(tp)) {
9042 case ASIC_REV_5717:
9043 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9044 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9045 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9046 break;
9047
9048 case ASIC_REV_5719:
9049 case ASIC_REV_5720:
9050 val = tr32(TG3_CPMU_CLCK_ORIDE);
9051 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9052 break;
9053
9054 default:
9055 return;
9056 }
9057}
9058
9059/* tp->lock is held. */
9060static int tg3_chip_reset(struct tg3 *tp)
9061 __releases(tp->lock)
9062 __acquires(tp->lock)
9063{
9064 u32 val;
9065 void (*write_op)(struct tg3 *, u32, u32);
9066 int i, err;
9067
9068 if (!pci_device_is_present(tp->pdev))
9069 return -ENODEV;
9070
9071 tg3_nvram_lock(tp);
9072
9073 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9074
9075 /* No matching tg3_nvram_unlock() after this because
9076 * chip reset below will undo the nvram lock.
9077 */
9078 tp->nvram_lock_cnt = 0;
9079
9080 /* GRC_MISC_CFG core clock reset will clear the memory
9081 * enable bit in PCI register 4 and the MSI enable bit
9082 * on some chips, so we save relevant registers here.
9083 */
9084 tg3_save_pci_state(tp);
9085
9086 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9087 tg3_flag(tp, 5755_PLUS))
9088 tw32(GRC_FASTBOOT_PC, 0);
9089
9090 /*
9091 * We must avoid the readl() that normally takes place.
9092 * It locks machines, causes machine checks, and other
9093 * fun things. So, temporarily disable the 5701
9094 * hardware workaround, while we do the reset.
9095 */
9096 write_op = tp->write32;
9097 if (write_op == tg3_write_flush_reg32)
9098 tp->write32 = tg3_write32;
9099
9100 /* Prevent the irq handler from reading or writing PCI registers
9101 * during chip reset when the memory enable bit in the PCI command
9102 * register may be cleared. The chip does not generate interrupt
9103 * at this time, but the irq handler may still be called due to irq
9104 * sharing or irqpoll.
9105 */
9106 tg3_flag_set(tp, CHIP_RESETTING);
9107 for (i = 0; i < tp->irq_cnt; i++) {
9108 struct tg3_napi *tnapi = &tp->napi[i];
9109 if (tnapi->hw_status) {
9110 tnapi->hw_status->status = 0;
9111 tnapi->hw_status->status_tag = 0;
9112 }
9113 tnapi->last_tag = 0;
9114 tnapi->last_irq_tag = 0;
9115 }
9116 smp_mb();
9117
9118 tg3_full_unlock(tp);
9119
9120 for (i = 0; i < tp->irq_cnt; i++)
9121 synchronize_irq(tp->napi[i].irq_vec);
9122
9123 tg3_full_lock(tp, 0);
9124
9125 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9126 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9127 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9128 }
9129
9130 /* do the reset */
9131 val = GRC_MISC_CFG_CORECLK_RESET;
9132
9133 if (tg3_flag(tp, PCI_EXPRESS)) {
9134 /* Force PCIe 1.0a mode */
9135 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9136 !tg3_flag(tp, 57765_PLUS) &&
9137 tr32(TG3_PCIE_PHY_TSTCTL) ==
9138 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9139 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9140
9141 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9142 tw32(GRC_MISC_CFG, (1 << 29));
9143 val |= (1 << 29);
9144 }
9145 }
9146
9147 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9148 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9149 tw32(GRC_VCPU_EXT_CTRL,
9150 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9151 }
9152
9153 /* Set the clock to the highest frequency to avoid timeouts. With link
9154 * aware mode, the clock speed could be slow and bootcode does not
9155 * complete within the expected time. Override the clock to allow the
9156 * bootcode to finish sooner and then restore it.
9157 */
9158 tg3_override_clk(tp);
9159
9160 /* Manage gphy power for all CPMU absent PCIe devices. */
9161 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9162 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9163
9164 tw32(GRC_MISC_CFG, val);
9165
9166 /* restore 5701 hardware bug workaround write method */
9167 tp->write32 = write_op;
9168
9169 /* Unfortunately, we have to delay before the PCI read back.
9170 * Some 575X chips even will not respond to a PCI cfg access
9171 * when the reset command is given to the chip.
9172 *
9173 * How do these hardware designers expect things to work
9174 * properly if the PCI write is posted for a long period
9175 * of time? It is always necessary to have some method by
9176 * which a register read back can occur to push the write
9177 * out which does the reset.
9178 *
9179 * For most tg3 variants the trick below was working.
9180 * Ho hum...
9181 */
9182 udelay(120);
9183
9184 /* Flush PCI posted writes. The normal MMIO registers
9185 * are inaccessible at this time so this is the only
9186 * way to make this reliably (actually, this is no longer
9187 * the case, see above). I tried to use indirect
9188 * register read/write but this upset some 5701 variants.
9189 */
9190 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9191
9192 udelay(120);
9193
9194 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9195 u16 val16;
9196
9197 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9198 int j;
9199 u32 cfg_val;
9200
9201 /* Wait for link training to complete. */
9202 for (j = 0; j < 5000; j++)
9203 udelay(100);
9204
9205 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9206 pci_write_config_dword(tp->pdev, 0xc4,
9207 cfg_val | (1 << 15));
9208 }
9209
9210 /* Clear the "no snoop" and "relaxed ordering" bits. */
9211 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9212 /*
9213 * Older PCIe devices only support the 128 byte
9214 * MPS setting. Enforce the restriction.
9215 */
9216 if (!tg3_flag(tp, CPMU_PRESENT))
9217 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9218 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9219
9220 /* Clear error status */
9221 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9222 PCI_EXP_DEVSTA_CED |
9223 PCI_EXP_DEVSTA_NFED |
9224 PCI_EXP_DEVSTA_FED |
9225 PCI_EXP_DEVSTA_URD);
9226 }
9227
9228 tg3_restore_pci_state(tp);
9229
9230 tg3_flag_clear(tp, CHIP_RESETTING);
9231 tg3_flag_clear(tp, ERROR_PROCESSED);
9232
9233 val = 0;
9234 if (tg3_flag(tp, 5780_CLASS))
9235 val = tr32(MEMARB_MODE);
9236 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9237
9238 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9239 tg3_stop_fw(tp);
9240 tw32(0x5000, 0x400);
9241 }
9242
9243 if (tg3_flag(tp, IS_SSB_CORE)) {
9244 /*
9245 * BCM4785: In order to avoid repercussions from using
9246 * potentially defective internal ROM, stop the Rx RISC CPU,
9247 * which is not required.
9248 */
9249 tg3_stop_fw(tp);
9250 tg3_halt_cpu(tp, RX_CPU_BASE);
9251 }
9252
9253 err = tg3_poll_fw(tp);
9254 if (err)
9255 return err;
9256
9257 tw32(GRC_MODE, tp->grc_mode);
9258
9259 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9260 val = tr32(0xc4);
9261
9262 tw32(0xc4, val | (1 << 15));
9263 }
9264
9265 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9266 tg3_asic_rev(tp) == ASIC_REV_5705) {
9267 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9268 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9269 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9270 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9271 }
9272
9273 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9274 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9275 val = tp->mac_mode;
9276 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9277 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9278 val = tp->mac_mode;
9279 } else
9280 val = 0;
9281
9282 tw32_f(MAC_MODE, val);
9283 udelay(40);
9284
9285 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9286
9287 tg3_mdio_start(tp);
9288
9289 if (tg3_flag(tp, PCI_EXPRESS) &&
9290 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9291 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9292 !tg3_flag(tp, 57765_PLUS)) {
9293 val = tr32(0x7c00);
9294
9295 tw32(0x7c00, val | (1 << 25));
9296 }
9297
9298 tg3_restore_clk(tp);
9299
9300 /* Increase the core clock speed to fix tx timeout issue for 5762
9301 * with 100Mbps link speed.
9302 */
9303 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9304 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9305 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9306 TG3_CPMU_MAC_ORIDE_ENABLE);
9307 }
9308
9309 /* Reprobe ASF enable state. */
9310 tg3_flag_clear(tp, ENABLE_ASF);
9311 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9312 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9313
9314 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9315 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9316 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9317 u32 nic_cfg;
9318
9319 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9320 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9321 tg3_flag_set(tp, ENABLE_ASF);
9322 tp->last_event_jiffies = jiffies;
9323 if (tg3_flag(tp, 5750_PLUS))
9324 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9325
9326 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9327 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9328 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9329 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9330 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9331 }
9332 }
9333
9334 return 0;
9335}
9336
9337static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9338static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9339static void __tg3_set_rx_mode(struct net_device *);
9340
9341/* tp->lock is held. */
9342static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9343{
9344 int err;
9345
9346 tg3_stop_fw(tp);
9347
9348 tg3_write_sig_pre_reset(tp, kind);
9349
9350 tg3_abort_hw(tp, silent);
9351 err = tg3_chip_reset(tp);
9352
9353 __tg3_set_mac_addr(tp, false);
9354
9355 tg3_write_sig_legacy(tp, kind);
9356 tg3_write_sig_post_reset(tp, kind);
9357
9358 if (tp->hw_stats) {
9359 /* Save the stats across chip resets... */
9360 tg3_get_nstats(tp, &tp->net_stats_prev);
9361 tg3_get_estats(tp, &tp->estats_prev);
9362
9363 /* And make sure the next sample is new data */
9364 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9365 }
9366
9367 return err;
9368}
9369
9370static int tg3_set_mac_addr(struct net_device *dev, void *p)
9371{
9372 struct tg3 *tp = netdev_priv(dev);
9373 struct sockaddr *addr = p;
9374 int err = 0;
9375 bool skip_mac_1 = false;
9376
9377 if (!is_valid_ether_addr(addr->sa_data))
9378 return -EADDRNOTAVAIL;
9379
9380 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9381
9382 if (!netif_running(dev))
9383 return 0;
9384
9385 if (tg3_flag(tp, ENABLE_ASF)) {
9386 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9387
9388 addr0_high = tr32(MAC_ADDR_0_HIGH);
9389 addr0_low = tr32(MAC_ADDR_0_LOW);
9390 addr1_high = tr32(MAC_ADDR_1_HIGH);
9391 addr1_low = tr32(MAC_ADDR_1_LOW);
9392
9393 /* Skip MAC addr 1 if ASF is using it. */
9394 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9395 !(addr1_high == 0 && addr1_low == 0))
9396 skip_mac_1 = true;
9397 }
9398 spin_lock_bh(&tp->lock);
9399 __tg3_set_mac_addr(tp, skip_mac_1);
9400 __tg3_set_rx_mode(dev);
9401 spin_unlock_bh(&tp->lock);
9402
9403 return err;
9404}
9405
9406/* tp->lock is held. */
9407static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9408 dma_addr_t mapping, u32 maxlen_flags,
9409 u32 nic_addr)
9410{
9411 tg3_write_mem(tp,
9412 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9413 ((u64) mapping >> 32));
9414 tg3_write_mem(tp,
9415 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9416 ((u64) mapping & 0xffffffff));
9417 tg3_write_mem(tp,
9418 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9419 maxlen_flags);
9420
9421 if (!tg3_flag(tp, 5705_PLUS))
9422 tg3_write_mem(tp,
9423 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9424 nic_addr);
9425}
9426
9427
9428static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9429{
9430 int i = 0;
9431
9432 if (!tg3_flag(tp, ENABLE_TSS)) {
9433 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9434 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9435 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9436 } else {
9437 tw32(HOSTCC_TXCOL_TICKS, 0);
9438 tw32(HOSTCC_TXMAX_FRAMES, 0);
9439 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9440
9441 for (; i < tp->txq_cnt; i++) {
9442 u32 reg;
9443
9444 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9445 tw32(reg, ec->tx_coalesce_usecs);
9446 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9447 tw32(reg, ec->tx_max_coalesced_frames);
9448 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9449 tw32(reg, ec->tx_max_coalesced_frames_irq);
9450 }
9451 }
9452
9453 for (; i < tp->irq_max - 1; i++) {
9454 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9455 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9456 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9457 }
9458}
9459
9460static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9461{
9462 int i = 0;
9463 u32 limit = tp->rxq_cnt;
9464
9465 if (!tg3_flag(tp, ENABLE_RSS)) {
9466 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9467 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9468 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9469 limit--;
9470 } else {
9471 tw32(HOSTCC_RXCOL_TICKS, 0);
9472 tw32(HOSTCC_RXMAX_FRAMES, 0);
9473 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9474 }
9475
9476 for (; i < limit; i++) {
9477 u32 reg;
9478
9479 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9480 tw32(reg, ec->rx_coalesce_usecs);
9481 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9482 tw32(reg, ec->rx_max_coalesced_frames);
9483 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9484 tw32(reg, ec->rx_max_coalesced_frames_irq);
9485 }
9486
9487 for (; i < tp->irq_max - 1; i++) {
9488 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9489 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9490 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9491 }
9492}
9493
9494static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9495{
9496 tg3_coal_tx_init(tp, ec);
9497 tg3_coal_rx_init(tp, ec);
9498
9499 if (!tg3_flag(tp, 5705_PLUS)) {
9500 u32 val = ec->stats_block_coalesce_usecs;
9501
9502 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9503 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9504
9505 if (!tp->link_up)
9506 val = 0;
9507
9508 tw32(HOSTCC_STAT_COAL_TICKS, val);
9509 }
9510}
9511
9512/* tp->lock is held. */
9513static void tg3_tx_rcbs_disable(struct tg3 *tp)
9514{
9515 u32 txrcb, limit;
9516
9517 /* Disable all transmit rings but the first. */
9518 if (!tg3_flag(tp, 5705_PLUS))
9519 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9520 else if (tg3_flag(tp, 5717_PLUS))
9521 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9522 else if (tg3_flag(tp, 57765_CLASS) ||
9523 tg3_asic_rev(tp) == ASIC_REV_5762)
9524 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9525 else
9526 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9527
9528 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9529 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9530 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9531 BDINFO_FLAGS_DISABLED);
9532}
9533
9534/* tp->lock is held. */
9535static void tg3_tx_rcbs_init(struct tg3 *tp)
9536{
9537 int i = 0;
9538 u32 txrcb = NIC_SRAM_SEND_RCB;
9539
9540 if (tg3_flag(tp, ENABLE_TSS))
9541 i++;
9542
9543 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9544 struct tg3_napi *tnapi = &tp->napi[i];
9545
9546 if (!tnapi->tx_ring)
9547 continue;
9548
9549 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9550 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9551 NIC_SRAM_TX_BUFFER_DESC);
9552 }
9553}
9554
9555/* tp->lock is held. */
9556static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9557{
9558 u32 rxrcb, limit;
9559
9560 /* Disable all receive return rings but the first. */
9561 if (tg3_flag(tp, 5717_PLUS))
9562 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9563 else if (!tg3_flag(tp, 5705_PLUS))
9564 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9565 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9566 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9567 tg3_flag(tp, 57765_CLASS))
9568 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9569 else
9570 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9571
9572 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9573 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9574 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9575 BDINFO_FLAGS_DISABLED);
9576}
9577
9578/* tp->lock is held. */
9579static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9580{
9581 int i = 0;
9582 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9583
9584 if (tg3_flag(tp, ENABLE_RSS))
9585 i++;
9586
9587 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9588 struct tg3_napi *tnapi = &tp->napi[i];
9589
9590 if (!tnapi->rx_rcb)
9591 continue;
9592
9593 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9594 (tp->rx_ret_ring_mask + 1) <<
9595 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9596 }
9597}
9598
9599/* tp->lock is held. */
9600static void tg3_rings_reset(struct tg3 *tp)
9601{
9602 int i;
9603 u32 stblk;
9604 struct tg3_napi *tnapi = &tp->napi[0];
9605
9606 tg3_tx_rcbs_disable(tp);
9607
9608 tg3_rx_ret_rcbs_disable(tp);
9609
9610 /* Disable interrupts */
9611 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9612 tp->napi[0].chk_msi_cnt = 0;
9613 tp->napi[0].last_rx_cons = 0;
9614 tp->napi[0].last_tx_cons = 0;
9615
9616 /* Zero mailbox registers. */
9617 if (tg3_flag(tp, SUPPORT_MSIX)) {
9618 for (i = 1; i < tp->irq_max; i++) {
9619 tp->napi[i].tx_prod = 0;
9620 tp->napi[i].tx_cons = 0;
9621 if (tg3_flag(tp, ENABLE_TSS))
9622 tw32_mailbox(tp->napi[i].prodmbox, 0);
9623 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9624 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9625 tp->napi[i].chk_msi_cnt = 0;
9626 tp->napi[i].last_rx_cons = 0;
9627 tp->napi[i].last_tx_cons = 0;
9628 }
9629 if (!tg3_flag(tp, ENABLE_TSS))
9630 tw32_mailbox(tp->napi[0].prodmbox, 0);
9631 } else {
9632 tp->napi[0].tx_prod = 0;
9633 tp->napi[0].tx_cons = 0;
9634 tw32_mailbox(tp->napi[0].prodmbox, 0);
9635 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9636 }
9637
9638 /* Make sure the NIC-based send BD rings are disabled. */
9639 if (!tg3_flag(tp, 5705_PLUS)) {
9640 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9641 for (i = 0; i < 16; i++)
9642 tw32_tx_mbox(mbox + i * 8, 0);
9643 }
9644
9645 /* Clear status block in ram. */
9646 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9647
9648 /* Set status block DMA address */
9649 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9650 ((u64) tnapi->status_mapping >> 32));
9651 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9652 ((u64) tnapi->status_mapping & 0xffffffff));
9653
9654 stblk = HOSTCC_STATBLCK_RING1;
9655
9656 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9657 u64 mapping = (u64)tnapi->status_mapping;
9658 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9659 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9660 stblk += 8;
9661
9662 /* Clear status block in ram. */
9663 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9664 }
9665
9666 tg3_tx_rcbs_init(tp);
9667 tg3_rx_ret_rcbs_init(tp);
9668}
9669
9670static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9671{
9672 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9673
9674 if (!tg3_flag(tp, 5750_PLUS) ||
9675 tg3_flag(tp, 5780_CLASS) ||
9676 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9677 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9678 tg3_flag(tp, 57765_PLUS))
9679 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9680 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9681 tg3_asic_rev(tp) == ASIC_REV_5787)
9682 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9683 else
9684 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9685
9686 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9687 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9688
9689 val = min(nic_rep_thresh, host_rep_thresh);
9690 tw32(RCVBDI_STD_THRESH, val);
9691
9692 if (tg3_flag(tp, 57765_PLUS))
9693 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9694
9695 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9696 return;
9697
9698 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9699
9700 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9701
9702 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9703 tw32(RCVBDI_JUMBO_THRESH, val);
9704
9705 if (tg3_flag(tp, 57765_PLUS))
9706 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9707}
9708
9709static inline u32 calc_crc(unsigned char *buf, int len)
9710{
9711 u32 reg;
9712 u32 tmp;
9713 int j, k;
9714
9715 reg = 0xffffffff;
9716
9717 for (j = 0; j < len; j++) {
9718 reg ^= buf[j];
9719
9720 for (k = 0; k < 8; k++) {
9721 tmp = reg & 0x01;
9722
9723 reg >>= 1;
9724
9725 if (tmp)
9726 reg ^= CRC32_POLY_LE;
9727 }
9728 }
9729
9730 return ~reg;
9731}
9732
9733static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9734{
9735 /* accept or reject all multicast frames */
9736 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9737 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9738 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9739 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9740}
9741
9742static void __tg3_set_rx_mode(struct net_device *dev)
9743{
9744 struct tg3 *tp = netdev_priv(dev);
9745 u32 rx_mode;
9746
9747 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9748 RX_MODE_KEEP_VLAN_TAG);
9749
9750#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9751 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9752 * flag clear.
9753 */
9754 if (!tg3_flag(tp, ENABLE_ASF))
9755 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9756#endif
9757
9758 if (dev->flags & IFF_PROMISC) {
9759 /* Promiscuous mode. */
9760 rx_mode |= RX_MODE_PROMISC;
9761 } else if (dev->flags & IFF_ALLMULTI) {
9762 /* Accept all multicast. */
9763 tg3_set_multi(tp, 1);
9764 } else if (netdev_mc_empty(dev)) {
9765 /* Reject all multicast. */
9766 tg3_set_multi(tp, 0);
9767 } else {
9768 /* Accept one or more multicast(s). */
9769 struct netdev_hw_addr *ha;
9770 u32 mc_filter[4] = { 0, };
9771 u32 regidx;
9772 u32 bit;
9773 u32 crc;
9774
9775 netdev_for_each_mc_addr(ha, dev) {
9776 crc = calc_crc(ha->addr, ETH_ALEN);
9777 bit = ~crc & 0x7f;
9778 regidx = (bit & 0x60) >> 5;
9779 bit &= 0x1f;
9780 mc_filter[regidx] |= (1 << bit);
9781 }
9782
9783 tw32(MAC_HASH_REG_0, mc_filter[0]);
9784 tw32(MAC_HASH_REG_1, mc_filter[1]);
9785 tw32(MAC_HASH_REG_2, mc_filter[2]);
9786 tw32(MAC_HASH_REG_3, mc_filter[3]);
9787 }
9788
9789 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9790 rx_mode |= RX_MODE_PROMISC;
9791 } else if (!(dev->flags & IFF_PROMISC)) {
9792 /* Add all entries into to the mac addr filter list */
9793 int i = 0;
9794 struct netdev_hw_addr *ha;
9795
9796 netdev_for_each_uc_addr(ha, dev) {
9797 __tg3_set_one_mac_addr(tp, ha->addr,
9798 i + TG3_UCAST_ADDR_IDX(tp));
9799 i++;
9800 }
9801 }
9802
9803 if (rx_mode != tp->rx_mode) {
9804 tp->rx_mode = rx_mode;
9805 tw32_f(MAC_RX_MODE, rx_mode);
9806 udelay(10);
9807 }
9808}
9809
9810static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9811{
9812 int i;
9813
9814 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9815 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9816}
9817
9818static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9819{
9820 int i;
9821
9822 if (!tg3_flag(tp, SUPPORT_MSIX))
9823 return;
9824
9825 if (tp->rxq_cnt == 1) {
9826 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9827 return;
9828 }
9829
9830 /* Validate table against current IRQ count */
9831 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9832 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9833 break;
9834 }
9835
9836 if (i != TG3_RSS_INDIR_TBL_SIZE)
9837 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9838}
9839
9840static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9841{
9842 int i = 0;
9843 u32 reg = MAC_RSS_INDIR_TBL_0;
9844
9845 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9846 u32 val = tp->rss_ind_tbl[i];
9847 i++;
9848 for (; i % 8; i++) {
9849 val <<= 4;
9850 val |= tp->rss_ind_tbl[i];
9851 }
9852 tw32(reg, val);
9853 reg += 4;
9854 }
9855}
9856
9857static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9858{
9859 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9860 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9861 else
9862 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9863}
9864
9865/* tp->lock is held. */
9866static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9867{
9868 u32 val, rdmac_mode;
9869 int i, err, limit;
9870 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9871
9872 tg3_disable_ints(tp);
9873
9874 tg3_stop_fw(tp);
9875
9876 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9877
9878 if (tg3_flag(tp, INIT_COMPLETE))
9879 tg3_abort_hw(tp, 1);
9880
9881 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9882 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9883 tg3_phy_pull_config(tp);
9884 tg3_eee_pull_config(tp, NULL);
9885 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9886 }
9887
9888 /* Enable MAC control of LPI */
9889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9890 tg3_setup_eee(tp);
9891
9892 if (reset_phy)
9893 tg3_phy_reset(tp);
9894
9895 err = tg3_chip_reset(tp);
9896 if (err)
9897 return err;
9898
9899 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9900
9901 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9902 val = tr32(TG3_CPMU_CTRL);
9903 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9904 tw32(TG3_CPMU_CTRL, val);
9905
9906 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9907 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9908 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9909 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9910
9911 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9912 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9913 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9914 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9915
9916 val = tr32(TG3_CPMU_HST_ACC);
9917 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9918 val |= CPMU_HST_ACC_MACCLK_6_25;
9919 tw32(TG3_CPMU_HST_ACC, val);
9920 }
9921
9922 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9923 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9924 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9925 PCIE_PWR_MGMT_L1_THRESH_4MS;
9926 tw32(PCIE_PWR_MGMT_THRESH, val);
9927
9928 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9929 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9930
9931 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9932
9933 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9934 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9935 }
9936
9937 if (tg3_flag(tp, L1PLLPD_EN)) {
9938 u32 grc_mode = tr32(GRC_MODE);
9939
9940 /* Access the lower 1K of PL PCIE block registers. */
9941 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9942 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9943
9944 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9945 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9946 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9947
9948 tw32(GRC_MODE, grc_mode);
9949 }
9950
9951 if (tg3_flag(tp, 57765_CLASS)) {
9952 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9953 u32 grc_mode = tr32(GRC_MODE);
9954
9955 /* Access the lower 1K of PL PCIE block registers. */
9956 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9957 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9958
9959 val = tr32(TG3_PCIE_TLDLPL_PORT +
9960 TG3_PCIE_PL_LO_PHYCTL5);
9961 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9962 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9963
9964 tw32(GRC_MODE, grc_mode);
9965 }
9966
9967 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9968 u32 grc_mode;
9969
9970 /* Fix transmit hangs */
9971 val = tr32(TG3_CPMU_PADRNG_CTL);
9972 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9973 tw32(TG3_CPMU_PADRNG_CTL, val);
9974
9975 grc_mode = tr32(GRC_MODE);
9976
9977 /* Access the lower 1K of DL PCIE block registers. */
9978 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9979 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9980
9981 val = tr32(TG3_PCIE_TLDLPL_PORT +
9982 TG3_PCIE_DL_LO_FTSMAX);
9983 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9984 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9985 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9986
9987 tw32(GRC_MODE, grc_mode);
9988 }
9989
9990 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9991 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9992 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9993 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9994 }
9995
9996 /* This works around an issue with Athlon chipsets on
9997 * B3 tigon3 silicon. This bit has no effect on any
9998 * other revision. But do not set this on PCI Express
9999 * chips and don't even touch the clocks if the CPMU is present.
10000 */
10001 if (!tg3_flag(tp, CPMU_PRESENT)) {
10002 if (!tg3_flag(tp, PCI_EXPRESS))
10003 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10004 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10005 }
10006
10007 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10008 tg3_flag(tp, PCIX_MODE)) {
10009 val = tr32(TG3PCI_PCISTATE);
10010 val |= PCISTATE_RETRY_SAME_DMA;
10011 tw32(TG3PCI_PCISTATE, val);
10012 }
10013
10014 if (tg3_flag(tp, ENABLE_APE)) {
10015 /* Allow reads and writes to the
10016 * APE register and memory space.
10017 */
10018 val = tr32(TG3PCI_PCISTATE);
10019 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10020 PCISTATE_ALLOW_APE_SHMEM_WR |
10021 PCISTATE_ALLOW_APE_PSPACE_WR;
10022 tw32(TG3PCI_PCISTATE, val);
10023 }
10024
10025 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10026 /* Enable some hw fixes. */
10027 val = tr32(TG3PCI_MSI_DATA);
10028 val |= (1 << 26) | (1 << 28) | (1 << 29);
10029 tw32(TG3PCI_MSI_DATA, val);
10030 }
10031
10032 /* Descriptor ring init may make accesses to the
10033 * NIC SRAM area to setup the TX descriptors, so we
10034 * can only do this after the hardware has been
10035 * successfully reset.
10036 */
10037 err = tg3_init_rings(tp);
10038 if (err)
10039 return err;
10040
10041 if (tg3_flag(tp, 57765_PLUS)) {
10042 val = tr32(TG3PCI_DMA_RW_CTRL) &
10043 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10044 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10045 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10046 if (!tg3_flag(tp, 57765_CLASS) &&
10047 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10048 tg3_asic_rev(tp) != ASIC_REV_5762)
10049 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10050 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10051 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10052 tg3_asic_rev(tp) != ASIC_REV_5761) {
10053 /* This value is determined during the probe time DMA
10054 * engine test, tg3_test_dma.
10055 */
10056 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10057 }
10058
10059 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10060 GRC_MODE_4X_NIC_SEND_RINGS |
10061 GRC_MODE_NO_TX_PHDR_CSUM |
10062 GRC_MODE_NO_RX_PHDR_CSUM);
10063 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10064
10065 /* Pseudo-header checksum is done by hardware logic and not
10066 * the offload processers, so make the chip do the pseudo-
10067 * header checksums on receive. For transmit it is more
10068 * convenient to do the pseudo-header checksum in software
10069 * as Linux does that on transmit for us in all cases.
10070 */
10071 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10072
10073 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10074 if (tp->rxptpctl)
10075 tw32(TG3_RX_PTP_CTL,
10076 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10077
10078 if (tg3_flag(tp, PTP_CAPABLE))
10079 val |= GRC_MODE_TIME_SYNC_ENABLE;
10080
10081 tw32(GRC_MODE, tp->grc_mode | val);
10082
10083 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10084 * south bridge limitation. As a workaround, Driver is setting MRRS
10085 * to 2048 instead of default 4096.
10086 */
10087 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10088 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10089 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10090 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10091 }
10092
10093 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10094 val = tr32(GRC_MISC_CFG);
10095 val &= ~0xff;
10096 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10097 tw32(GRC_MISC_CFG, val);
10098
10099 /* Initialize MBUF/DESC pool. */
10100 if (tg3_flag(tp, 5750_PLUS)) {
10101 /* Do nothing. */
10102 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10103 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10104 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10105 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10106 else
10107 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10108 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10109 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10110 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10111 int fw_len;
10112
10113 fw_len = tp->fw_len;
10114 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10115 tw32(BUFMGR_MB_POOL_ADDR,
10116 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10117 tw32(BUFMGR_MB_POOL_SIZE,
10118 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10119 }
10120
10121 if (tp->dev->mtu <= ETH_DATA_LEN) {
10122 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10123 tp->bufmgr_config.mbuf_read_dma_low_water);
10124 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10125 tp->bufmgr_config.mbuf_mac_rx_low_water);
10126 tw32(BUFMGR_MB_HIGH_WATER,
10127 tp->bufmgr_config.mbuf_high_water);
10128 } else {
10129 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10130 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10131 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10132 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10133 tw32(BUFMGR_MB_HIGH_WATER,
10134 tp->bufmgr_config.mbuf_high_water_jumbo);
10135 }
10136 tw32(BUFMGR_DMA_LOW_WATER,
10137 tp->bufmgr_config.dma_low_water);
10138 tw32(BUFMGR_DMA_HIGH_WATER,
10139 tp->bufmgr_config.dma_high_water);
10140
10141 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10142 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10143 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10144 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10145 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10146 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10147 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10148 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10149 tw32(BUFMGR_MODE, val);
10150 for (i = 0; i < 2000; i++) {
10151 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10152 break;
10153 udelay(10);
10154 }
10155 if (i >= 2000) {
10156 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10157 return -ENODEV;
10158 }
10159
10160 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10161 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10162
10163 tg3_setup_rxbd_thresholds(tp);
10164
10165 /* Initialize TG3_BDINFO's at:
10166 * RCVDBDI_STD_BD: standard eth size rx ring
10167 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10168 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10169 *
10170 * like so:
10171 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10172 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10173 * ring attribute flags
10174 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10175 *
10176 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10177 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10178 *
10179 * The size of each ring is fixed in the firmware, but the location is
10180 * configurable.
10181 */
10182 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10183 ((u64) tpr->rx_std_mapping >> 32));
10184 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10185 ((u64) tpr->rx_std_mapping & 0xffffffff));
10186 if (!tg3_flag(tp, 5717_PLUS))
10187 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10188 NIC_SRAM_RX_BUFFER_DESC);
10189
10190 /* Disable the mini ring */
10191 if (!tg3_flag(tp, 5705_PLUS))
10192 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193 BDINFO_FLAGS_DISABLED);
10194
10195 /* Program the jumbo buffer descriptor ring control
10196 * blocks on those devices that have them.
10197 */
10198 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10199 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10200
10201 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10202 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10203 ((u64) tpr->rx_jmb_mapping >> 32));
10204 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10205 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10206 val = TG3_RX_JMB_RING_SIZE(tp) <<
10207 BDINFO_FLAGS_MAXLEN_SHIFT;
10208 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10209 val | BDINFO_FLAGS_USE_EXT_RECV);
10210 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10211 tg3_flag(tp, 57765_CLASS) ||
10212 tg3_asic_rev(tp) == ASIC_REV_5762)
10213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10214 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10215 } else {
10216 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10217 BDINFO_FLAGS_DISABLED);
10218 }
10219
10220 if (tg3_flag(tp, 57765_PLUS)) {
10221 val = TG3_RX_STD_RING_SIZE(tp);
10222 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10223 val |= (TG3_RX_STD_DMA_SZ << 2);
10224 } else
10225 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10226 } else
10227 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10228
10229 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10230
10231 tpr->rx_std_prod_idx = tp->rx_pending;
10232 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10233
10234 tpr->rx_jmb_prod_idx =
10235 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10236 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10237
10238 tg3_rings_reset(tp);
10239
10240 /* Initialize MAC address and backoff seed. */
10241 __tg3_set_mac_addr(tp, false);
10242
10243 /* MTU + ethernet header + FCS + optional VLAN tag */
10244 tw32(MAC_RX_MTU_SIZE,
10245 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10246
10247 /* The slot time is changed by tg3_setup_phy if we
10248 * run at gigabit with half duplex.
10249 */
10250 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10251 (6 << TX_LENGTHS_IPG_SHIFT) |
10252 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10253
10254 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10255 tg3_asic_rev(tp) == ASIC_REV_5762)
10256 val |= tr32(MAC_TX_LENGTHS) &
10257 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10258 TX_LENGTHS_CNT_DWN_VAL_MSK);
10259
10260 tw32(MAC_TX_LENGTHS, val);
10261
10262 /* Receive rules. */
10263 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10264 tw32(RCVLPC_CONFIG, 0x0181);
10265
10266 /* Calculate RDMAC_MODE setting early, we need it to determine
10267 * the RCVLPC_STATE_ENABLE mask.
10268 */
10269 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10270 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10271 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10272 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10273 RDMAC_MODE_LNGREAD_ENAB);
10274
10275 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10276 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10277
10278 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10279 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10280 tg3_asic_rev(tp) == ASIC_REV_57780)
10281 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10282 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10283 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10284
10285 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10286 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10287 if (tg3_flag(tp, TSO_CAPABLE) &&
10288 tg3_asic_rev(tp) == ASIC_REV_5705) {
10289 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10290 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10291 !tg3_flag(tp, IS_5788)) {
10292 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10293 }
10294 }
10295
10296 if (tg3_flag(tp, PCI_EXPRESS))
10297 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10298
10299 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10300 tp->dma_limit = 0;
10301 if (tp->dev->mtu <= ETH_DATA_LEN) {
10302 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10303 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10304 }
10305 }
10306
10307 if (tg3_flag(tp, HW_TSO_1) ||
10308 tg3_flag(tp, HW_TSO_2) ||
10309 tg3_flag(tp, HW_TSO_3))
10310 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10311
10312 if (tg3_flag(tp, 57765_PLUS) ||
10313 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10314 tg3_asic_rev(tp) == ASIC_REV_57780)
10315 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10316
10317 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5762)
10319 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10320
10321 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10322 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10323 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10324 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10325 tg3_flag(tp, 57765_PLUS)) {
10326 u32 tgtreg;
10327
10328 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10329 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10330 else
10331 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10332
10333 val = tr32(tgtreg);
10334 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10335 tg3_asic_rev(tp) == ASIC_REV_5762) {
10336 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10337 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10338 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10339 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10340 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10341 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10342 }
10343 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10344 }
10345
10346 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10347 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10348 tg3_asic_rev(tp) == ASIC_REV_5762) {
10349 u32 tgtreg;
10350
10351 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10352 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10353 else
10354 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10355
10356 val = tr32(tgtreg);
10357 tw32(tgtreg, val |
10358 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10359 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10360 }
10361
10362 /* Receive/send statistics. */
10363 if (tg3_flag(tp, 5750_PLUS)) {
10364 val = tr32(RCVLPC_STATS_ENABLE);
10365 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10366 tw32(RCVLPC_STATS_ENABLE, val);
10367 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10368 tg3_flag(tp, TSO_CAPABLE)) {
10369 val = tr32(RCVLPC_STATS_ENABLE);
10370 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10371 tw32(RCVLPC_STATS_ENABLE, val);
10372 } else {
10373 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10374 }
10375 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10376 tw32(SNDDATAI_STATSENAB, 0xffffff);
10377 tw32(SNDDATAI_STATSCTRL,
10378 (SNDDATAI_SCTRL_ENABLE |
10379 SNDDATAI_SCTRL_FASTUPD));
10380
10381 /* Setup host coalescing engine. */
10382 tw32(HOSTCC_MODE, 0);
10383 for (i = 0; i < 2000; i++) {
10384 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10385 break;
10386 udelay(10);
10387 }
10388
10389 __tg3_set_coalesce(tp, &tp->coal);
10390
10391 if (!tg3_flag(tp, 5705_PLUS)) {
10392 /* Status/statistics block address. See tg3_timer,
10393 * the tg3_periodic_fetch_stats call there, and
10394 * tg3_get_stats to see how this works for 5705/5750 chips.
10395 */
10396 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10397 ((u64) tp->stats_mapping >> 32));
10398 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10399 ((u64) tp->stats_mapping & 0xffffffff));
10400 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10401
10402 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10403
10404 /* Clear statistics and status block memory areas */
10405 for (i = NIC_SRAM_STATS_BLK;
10406 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10407 i += sizeof(u32)) {
10408 tg3_write_mem(tp, i, 0);
10409 udelay(40);
10410 }
10411 }
10412
10413 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10414
10415 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10416 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10417 if (!tg3_flag(tp, 5705_PLUS))
10418 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10419
10420 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10421 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10422 /* reset to prevent losing 1st rx packet intermittently */
10423 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10424 udelay(10);
10425 }
10426
10427 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10428 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10429 MAC_MODE_FHDE_ENABLE;
10430 if (tg3_flag(tp, ENABLE_APE))
10431 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10432 if (!tg3_flag(tp, 5705_PLUS) &&
10433 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10434 tg3_asic_rev(tp) != ASIC_REV_5700)
10435 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10436 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10437 udelay(40);
10438
10439 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10440 * If TG3_FLAG_IS_NIC is zero, we should read the
10441 * register to preserve the GPIO settings for LOMs. The GPIOs,
10442 * whether used as inputs or outputs, are set by boot code after
10443 * reset.
10444 */
10445 if (!tg3_flag(tp, IS_NIC)) {
10446 u32 gpio_mask;
10447
10448 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10449 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10450 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10451
10452 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10453 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10454 GRC_LCLCTRL_GPIO_OUTPUT3;
10455
10456 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10457 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10458
10459 tp->grc_local_ctrl &= ~gpio_mask;
10460 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10461
10462 /* GPIO1 must be driven high for eeprom write protect */
10463 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10464 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10465 GRC_LCLCTRL_GPIO_OUTPUT1);
10466 }
10467 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10468 udelay(100);
10469
10470 if (tg3_flag(tp, USING_MSIX)) {
10471 val = tr32(MSGINT_MODE);
10472 val |= MSGINT_MODE_ENABLE;
10473 if (tp->irq_cnt > 1)
10474 val |= MSGINT_MODE_MULTIVEC_EN;
10475 if (!tg3_flag(tp, 1SHOT_MSI))
10476 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10477 tw32(MSGINT_MODE, val);
10478 }
10479
10480 if (!tg3_flag(tp, 5705_PLUS)) {
10481 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10482 udelay(40);
10483 }
10484
10485 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10486 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10487 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10488 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10489 WDMAC_MODE_LNGREAD_ENAB);
10490
10491 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10492 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10493 if (tg3_flag(tp, TSO_CAPABLE) &&
10494 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10495 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10496 /* nothing */
10497 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10498 !tg3_flag(tp, IS_5788)) {
10499 val |= WDMAC_MODE_RX_ACCEL;
10500 }
10501 }
10502
10503 /* Enable host coalescing bug fix */
10504 if (tg3_flag(tp, 5755_PLUS))
10505 val |= WDMAC_MODE_STATUS_TAG_FIX;
10506
10507 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10508 val |= WDMAC_MODE_BURST_ALL_DATA;
10509
10510 tw32_f(WDMAC_MODE, val);
10511 udelay(40);
10512
10513 if (tg3_flag(tp, PCIX_MODE)) {
10514 u16 pcix_cmd;
10515
10516 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10517 &pcix_cmd);
10518 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10519 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10520 pcix_cmd |= PCI_X_CMD_READ_2K;
10521 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10522 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10523 pcix_cmd |= PCI_X_CMD_READ_2K;
10524 }
10525 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10526 pcix_cmd);
10527 }
10528
10529 tw32_f(RDMAC_MODE, rdmac_mode);
10530 udelay(40);
10531
10532 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10533 tg3_asic_rev(tp) == ASIC_REV_5720) {
10534 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10535 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10536 break;
10537 }
10538 if (i < TG3_NUM_RDMA_CHANNELS) {
10539 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10540 val |= tg3_lso_rd_dma_workaround_bit(tp);
10541 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10542 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10543 }
10544 }
10545
10546 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10547 if (!tg3_flag(tp, 5705_PLUS))
10548 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10549
10550 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10551 tw32(SNDDATAC_MODE,
10552 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10553 else
10554 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10555
10556 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10557 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10558 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10559 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10560 val |= RCVDBDI_MODE_LRG_RING_SZ;
10561 tw32(RCVDBDI_MODE, val);
10562 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10563 if (tg3_flag(tp, HW_TSO_1) ||
10564 tg3_flag(tp, HW_TSO_2) ||
10565 tg3_flag(tp, HW_TSO_3))
10566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10567 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10568 if (tg3_flag(tp, ENABLE_TSS))
10569 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10570 tw32(SNDBDI_MODE, val);
10571 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10572
10573 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10574 err = tg3_load_5701_a0_firmware_fix(tp);
10575 if (err)
10576 return err;
10577 }
10578
10579 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10580 /* Ignore any errors for the firmware download. If download
10581 * fails, the device will operate with EEE disabled
10582 */
10583 tg3_load_57766_firmware(tp);
10584 }
10585
10586 if (tg3_flag(tp, TSO_CAPABLE)) {
10587 err = tg3_load_tso_firmware(tp);
10588 if (err)
10589 return err;
10590 }
10591
10592 tp->tx_mode = TX_MODE_ENABLE;
10593
10594 if (tg3_flag(tp, 5755_PLUS) ||
10595 tg3_asic_rev(tp) == ASIC_REV_5906)
10596 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10597
10598 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10599 tg3_asic_rev(tp) == ASIC_REV_5762) {
10600 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10601 tp->tx_mode &= ~val;
10602 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10603 }
10604
10605 tw32_f(MAC_TX_MODE, tp->tx_mode);
10606 udelay(100);
10607
10608 if (tg3_flag(tp, ENABLE_RSS)) {
10609 u32 rss_key[10];
10610
10611 tg3_rss_write_indir_tbl(tp);
10612
10613 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10614
10615 for (i = 0; i < 10 ; i++)
10616 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10617 }
10618
10619 tp->rx_mode = RX_MODE_ENABLE;
10620 if (tg3_flag(tp, 5755_PLUS))
10621 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10622
10623 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10624 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10625
10626 if (tg3_flag(tp, ENABLE_RSS))
10627 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10628 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10629 RX_MODE_RSS_IPV6_HASH_EN |
10630 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10631 RX_MODE_RSS_IPV4_HASH_EN |
10632 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10633
10634 tw32_f(MAC_RX_MODE, tp->rx_mode);
10635 udelay(10);
10636
10637 tw32(MAC_LED_CTRL, tp->led_ctrl);
10638
10639 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10640 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10641 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10642 udelay(10);
10643 }
10644 tw32_f(MAC_RX_MODE, tp->rx_mode);
10645 udelay(10);
10646
10647 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10648 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10649 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10650 /* Set drive transmission level to 1.2V */
10651 /* only if the signal pre-emphasis bit is not set */
10652 val = tr32(MAC_SERDES_CFG);
10653 val &= 0xfffff000;
10654 val |= 0x880;
10655 tw32(MAC_SERDES_CFG, val);
10656 }
10657 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10658 tw32(MAC_SERDES_CFG, 0x616000);
10659 }
10660
10661 /* Prevent chip from dropping frames when flow control
10662 * is enabled.
10663 */
10664 if (tg3_flag(tp, 57765_CLASS))
10665 val = 1;
10666 else
10667 val = 2;
10668 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10669
10670 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10671 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10672 /* Use hardware link auto-negotiation */
10673 tg3_flag_set(tp, HW_AUTONEG);
10674 }
10675
10676 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10677 tg3_asic_rev(tp) == ASIC_REV_5714) {
10678 u32 tmp;
10679
10680 tmp = tr32(SERDES_RX_CTRL);
10681 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10682 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10683 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10684 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10685 }
10686
10687 if (!tg3_flag(tp, USE_PHYLIB)) {
10688 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10689 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10690
10691 err = tg3_setup_phy(tp, false);
10692 if (err)
10693 return err;
10694
10695 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10696 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10697 u32 tmp;
10698
10699 /* Clear CRC stats. */
10700 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10701 tg3_writephy(tp, MII_TG3_TEST1,
10702 tmp | MII_TG3_TEST1_CRC_EN);
10703 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10704 }
10705 }
10706 }
10707
10708 __tg3_set_rx_mode(tp->dev);
10709
10710 /* Initialize receive rules. */
10711 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10712 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10713 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10714 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10715
10716 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10717 limit = 8;
10718 else
10719 limit = 16;
10720 if (tg3_flag(tp, ENABLE_ASF))
10721 limit -= 4;
10722 switch (limit) {
10723 case 16:
10724 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10725 /* fall through */
10726 case 15:
10727 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10728 /* fall through */
10729 case 14:
10730 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10731 /* fall through */
10732 case 13:
10733 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10734 /* fall through */
10735 case 12:
10736 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10737 /* fall through */
10738 case 11:
10739 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10740 /* fall through */
10741 case 10:
10742 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10743 /* fall through */
10744 case 9:
10745 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10746 /* fall through */
10747 case 8:
10748 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10749 /* fall through */
10750 case 7:
10751 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10752 /* fall through */
10753 case 6:
10754 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10755 /* fall through */
10756 case 5:
10757 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10758 /* fall through */
10759 case 4:
10760 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10761 case 3:
10762 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10763 case 2:
10764 case 1:
10765
10766 default:
10767 break;
10768 }
10769
10770 if (tg3_flag(tp, ENABLE_APE))
10771 /* Write our heartbeat update interval to APE. */
10772 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10773 APE_HOST_HEARTBEAT_INT_5SEC);
10774
10775 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10776
10777 return 0;
10778}
10779
10780/* Called at device open time to get the chip ready for
10781 * packet processing. Invoked with tp->lock held.
10782 */
10783static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10784{
10785 /* Chip may have been just powered on. If so, the boot code may still
10786 * be running initialization. Wait for it to finish to avoid races in
10787 * accessing the hardware.
10788 */
10789 tg3_enable_register_access(tp);
10790 tg3_poll_fw(tp);
10791
10792 tg3_switch_clocks(tp);
10793
10794 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10795
10796 return tg3_reset_hw(tp, reset_phy);
10797}
10798
10799#ifdef CONFIG_TIGON3_HWMON
10800static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10801{
10802 int i;
10803
10804 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10805 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10806
10807 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10808 off += len;
10809
10810 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10811 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10812 memset(ocir, 0, TG3_OCIR_LEN);
10813 }
10814}
10815
10816/* sysfs attributes for hwmon */
10817static ssize_t tg3_show_temp(struct device *dev,
10818 struct device_attribute *devattr, char *buf)
10819{
10820 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10821 struct tg3 *tp = dev_get_drvdata(dev);
10822 u32 temperature;
10823
10824 spin_lock_bh(&tp->lock);
10825 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10826 sizeof(temperature));
10827 spin_unlock_bh(&tp->lock);
10828 return sprintf(buf, "%u\n", temperature * 1000);
10829}
10830
10831
10832static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10833 TG3_TEMP_SENSOR_OFFSET);
10834static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10835 TG3_TEMP_CAUTION_OFFSET);
10836static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10837 TG3_TEMP_MAX_OFFSET);
10838
10839static struct attribute *tg3_attrs[] = {
10840 &sensor_dev_attr_temp1_input.dev_attr.attr,
10841 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10842 &sensor_dev_attr_temp1_max.dev_attr.attr,
10843 NULL
10844};
10845ATTRIBUTE_GROUPS(tg3);
10846
10847static void tg3_hwmon_close(struct tg3 *tp)
10848{
10849 if (tp->hwmon_dev) {
10850 hwmon_device_unregister(tp->hwmon_dev);
10851 tp->hwmon_dev = NULL;
10852 }
10853}
10854
10855static void tg3_hwmon_open(struct tg3 *tp)
10856{
10857 int i;
10858 u32 size = 0;
10859 struct pci_dev *pdev = tp->pdev;
10860 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10861
10862 tg3_sd_scan_scratchpad(tp, ocirs);
10863
10864 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10865 if (!ocirs[i].src_data_length)
10866 continue;
10867
10868 size += ocirs[i].src_hdr_length;
10869 size += ocirs[i].src_data_length;
10870 }
10871
10872 if (!size)
10873 return;
10874
10875 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10876 tp, tg3_groups);
10877 if (IS_ERR(tp->hwmon_dev)) {
10878 tp->hwmon_dev = NULL;
10879 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10880 }
10881}
10882#else
10883static inline void tg3_hwmon_close(struct tg3 *tp) { }
10884static inline void tg3_hwmon_open(struct tg3 *tp) { }
10885#endif /* CONFIG_TIGON3_HWMON */
10886
10887
10888#define TG3_STAT_ADD32(PSTAT, REG) \
10889do { u32 __val = tr32(REG); \
10890 (PSTAT)->low += __val; \
10891 if ((PSTAT)->low < __val) \
10892 (PSTAT)->high += 1; \
10893} while (0)
10894
10895static void tg3_periodic_fetch_stats(struct tg3 *tp)
10896{
10897 struct tg3_hw_stats *sp = tp->hw_stats;
10898
10899 if (!tp->link_up)
10900 return;
10901
10902 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10903 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10904 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10905 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10906 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10907 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10908 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10909 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10910 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10911 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10912 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10913 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10914 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10915 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10916 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10917 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10918 u32 val;
10919
10920 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10921 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10922 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10923 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10924 }
10925
10926 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10927 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10928 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10929 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10930 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10931 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10932 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10933 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10934 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10935 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10936 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10937 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10938 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10939 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10940
10941 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10942 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10943 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10944 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10945 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10946 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10947 } else {
10948 u32 val = tr32(HOSTCC_FLOW_ATTN);
10949 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10950 if (val) {
10951 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10952 sp->rx_discards.low += val;
10953 if (sp->rx_discards.low < val)
10954 sp->rx_discards.high += 1;
10955 }
10956 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10957 }
10958 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10959}
10960
10961static void tg3_chk_missed_msi(struct tg3 *tp)
10962{
10963 u32 i;
10964
10965 for (i = 0; i < tp->irq_cnt; i++) {
10966 struct tg3_napi *tnapi = &tp->napi[i];
10967
10968 if (tg3_has_work(tnapi)) {
10969 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10970 tnapi->last_tx_cons == tnapi->tx_cons) {
10971 if (tnapi->chk_msi_cnt < 1) {
10972 tnapi->chk_msi_cnt++;
10973 return;
10974 }
10975 tg3_msi(0, tnapi);
10976 }
10977 }
10978 tnapi->chk_msi_cnt = 0;
10979 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10980 tnapi->last_tx_cons = tnapi->tx_cons;
10981 }
10982}
10983
10984static void tg3_timer(struct timer_list *t)
10985{
10986 struct tg3 *tp = from_timer(tp, t, timer);
10987
10988 spin_lock(&tp->lock);
10989
10990 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10991 spin_unlock(&tp->lock);
10992 goto restart_timer;
10993 }
10994
10995 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10996 tg3_flag(tp, 57765_CLASS))
10997 tg3_chk_missed_msi(tp);
10998
10999 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11000 /* BCM4785: Flush posted writes from GbE to host memory. */
11001 tr32(HOSTCC_MODE);
11002 }
11003
11004 if (!tg3_flag(tp, TAGGED_STATUS)) {
11005 /* All of this garbage is because when using non-tagged
11006 * IRQ status the mailbox/status_block protocol the chip
11007 * uses with the cpu is race prone.
11008 */
11009 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11010 tw32(GRC_LOCAL_CTRL,
11011 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11012 } else {
11013 tw32(HOSTCC_MODE, tp->coalesce_mode |
11014 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11015 }
11016
11017 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11018 spin_unlock(&tp->lock);
11019 tg3_reset_task_schedule(tp);
11020 goto restart_timer;
11021 }
11022 }
11023
11024 /* This part only runs once per second. */
11025 if (!--tp->timer_counter) {
11026 if (tg3_flag(tp, 5705_PLUS))
11027 tg3_periodic_fetch_stats(tp);
11028
11029 if (tp->setlpicnt && !--tp->setlpicnt)
11030 tg3_phy_eee_enable(tp);
11031
11032 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11033 u32 mac_stat;
11034 int phy_event;
11035
11036 mac_stat = tr32(MAC_STATUS);
11037
11038 phy_event = 0;
11039 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11040 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11041 phy_event = 1;
11042 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11043 phy_event = 1;
11044
11045 if (phy_event)
11046 tg3_setup_phy(tp, false);
11047 } else if (tg3_flag(tp, POLL_SERDES)) {
11048 u32 mac_stat = tr32(MAC_STATUS);
11049 int need_setup = 0;
11050
11051 if (tp->link_up &&
11052 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11053 need_setup = 1;
11054 }
11055 if (!tp->link_up &&
11056 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11057 MAC_STATUS_SIGNAL_DET))) {
11058 need_setup = 1;
11059 }
11060 if (need_setup) {
11061 if (!tp->serdes_counter) {
11062 tw32_f(MAC_MODE,
11063 (tp->mac_mode &
11064 ~MAC_MODE_PORT_MODE_MASK));
11065 udelay(40);
11066 tw32_f(MAC_MODE, tp->mac_mode);
11067 udelay(40);
11068 }
11069 tg3_setup_phy(tp, false);
11070 }
11071 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11072 tg3_flag(tp, 5780_CLASS)) {
11073 tg3_serdes_parallel_detect(tp);
11074 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11075 u32 cpmu = tr32(TG3_CPMU_STATUS);
11076 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11077 TG3_CPMU_STATUS_LINK_MASK);
11078
11079 if (link_up != tp->link_up)
11080 tg3_setup_phy(tp, false);
11081 }
11082
11083 tp->timer_counter = tp->timer_multiplier;
11084 }
11085
11086 /* Heartbeat is only sent once every 2 seconds.
11087 *
11088 * The heartbeat is to tell the ASF firmware that the host
11089 * driver is still alive. In the event that the OS crashes,
11090 * ASF needs to reset the hardware to free up the FIFO space
11091 * that may be filled with rx packets destined for the host.
11092 * If the FIFO is full, ASF will no longer function properly.
11093 *
11094 * Unintended resets have been reported on real time kernels
11095 * where the timer doesn't run on time. Netpoll will also have
11096 * same problem.
11097 *
11098 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11099 * to check the ring condition when the heartbeat is expiring
11100 * before doing the reset. This will prevent most unintended
11101 * resets.
11102 */
11103 if (!--tp->asf_counter) {
11104 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11105 tg3_wait_for_event_ack(tp);
11106
11107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11108 FWCMD_NICDRV_ALIVE3);
11109 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11110 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11111 TG3_FW_UPDATE_TIMEOUT_SEC);
11112
11113 tg3_generate_fw_event(tp);
11114 }
11115 tp->asf_counter = tp->asf_multiplier;
11116 }
11117
11118 /* Update the APE heartbeat every 5 seconds.*/
11119 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11120
11121 spin_unlock(&tp->lock);
11122
11123restart_timer:
11124 tp->timer.expires = jiffies + tp->timer_offset;
11125 add_timer(&tp->timer);
11126}
11127
11128static void tg3_timer_init(struct tg3 *tp)
11129{
11130 if (tg3_flag(tp, TAGGED_STATUS) &&
11131 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11132 !tg3_flag(tp, 57765_CLASS))
11133 tp->timer_offset = HZ;
11134 else
11135 tp->timer_offset = HZ / 10;
11136
11137 BUG_ON(tp->timer_offset > HZ);
11138
11139 tp->timer_multiplier = (HZ / tp->timer_offset);
11140 tp->asf_multiplier = (HZ / tp->timer_offset) *
11141 TG3_FW_UPDATE_FREQ_SEC;
11142
11143 timer_setup(&tp->timer, tg3_timer, 0);
11144}
11145
11146static void tg3_timer_start(struct tg3 *tp)
11147{
11148 tp->asf_counter = tp->asf_multiplier;
11149 tp->timer_counter = tp->timer_multiplier;
11150
11151 tp->timer.expires = jiffies + tp->timer_offset;
11152 add_timer(&tp->timer);
11153}
11154
11155static void tg3_timer_stop(struct tg3 *tp)
11156{
11157 del_timer_sync(&tp->timer);
11158}
11159
11160/* Restart hardware after configuration changes, self-test, etc.
11161 * Invoked with tp->lock held.
11162 */
11163static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11164 __releases(tp->lock)
11165 __acquires(tp->lock)
11166{
11167 int err;
11168
11169 err = tg3_init_hw(tp, reset_phy);
11170 if (err) {
11171 netdev_err(tp->dev,
11172 "Failed to re-initialize device, aborting\n");
11173 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11174 tg3_full_unlock(tp);
11175 tg3_timer_stop(tp);
11176 tp->irq_sync = 0;
11177 tg3_napi_enable(tp);
11178 dev_close(tp->dev);
11179 tg3_full_lock(tp, 0);
11180 }
11181 return err;
11182}
11183
11184static void tg3_reset_task(struct work_struct *work)
11185{
11186 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11187 int err;
11188
11189 rtnl_lock();
11190 tg3_full_lock(tp, 0);
11191
11192 if (!netif_running(tp->dev)) {
11193 tg3_flag_clear(tp, RESET_TASK_PENDING);
11194 tg3_full_unlock(tp);
11195 rtnl_unlock();
11196 return;
11197 }
11198
11199 tg3_full_unlock(tp);
11200
11201 tg3_phy_stop(tp);
11202
11203 tg3_netif_stop(tp);
11204
11205 tg3_full_lock(tp, 1);
11206
11207 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11208 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11209 tp->write32_rx_mbox = tg3_write_flush_reg32;
11210 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11211 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11212 }
11213
11214 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11215 err = tg3_init_hw(tp, true);
11216 if (err)
11217 goto out;
11218
11219 tg3_netif_start(tp);
11220
11221out:
11222 tg3_full_unlock(tp);
11223
11224 if (!err)
11225 tg3_phy_start(tp);
11226
11227 tg3_flag_clear(tp, RESET_TASK_PENDING);
11228 rtnl_unlock();
11229}
11230
11231static int tg3_request_irq(struct tg3 *tp, int irq_num)
11232{
11233 irq_handler_t fn;
11234 unsigned long flags;
11235 char *name;
11236 struct tg3_napi *tnapi = &tp->napi[irq_num];
11237
11238 if (tp->irq_cnt == 1)
11239 name = tp->dev->name;
11240 else {
11241 name = &tnapi->irq_lbl[0];
11242 if (tnapi->tx_buffers && tnapi->rx_rcb)
11243 snprintf(name, IFNAMSIZ,
11244 "%s-txrx-%d", tp->dev->name, irq_num);
11245 else if (tnapi->tx_buffers)
11246 snprintf(name, IFNAMSIZ,
11247 "%s-tx-%d", tp->dev->name, irq_num);
11248 else if (tnapi->rx_rcb)
11249 snprintf(name, IFNAMSIZ,
11250 "%s-rx-%d", tp->dev->name, irq_num);
11251 else
11252 snprintf(name, IFNAMSIZ,
11253 "%s-%d", tp->dev->name, irq_num);
11254 name[IFNAMSIZ-1] = 0;
11255 }
11256
11257 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11258 fn = tg3_msi;
11259 if (tg3_flag(tp, 1SHOT_MSI))
11260 fn = tg3_msi_1shot;
11261 flags = 0;
11262 } else {
11263 fn = tg3_interrupt;
11264 if (tg3_flag(tp, TAGGED_STATUS))
11265 fn = tg3_interrupt_tagged;
11266 flags = IRQF_SHARED;
11267 }
11268
11269 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11270}
11271
11272static int tg3_test_interrupt(struct tg3 *tp)
11273{
11274 struct tg3_napi *tnapi = &tp->napi[0];
11275 struct net_device *dev = tp->dev;
11276 int err, i, intr_ok = 0;
11277 u32 val;
11278
11279 if (!netif_running(dev))
11280 return -ENODEV;
11281
11282 tg3_disable_ints(tp);
11283
11284 free_irq(tnapi->irq_vec, tnapi);
11285
11286 /*
11287 * Turn off MSI one shot mode. Otherwise this test has no
11288 * observable way to know whether the interrupt was delivered.
11289 */
11290 if (tg3_flag(tp, 57765_PLUS)) {
11291 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11292 tw32(MSGINT_MODE, val);
11293 }
11294
11295 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11296 IRQF_SHARED, dev->name, tnapi);
11297 if (err)
11298 return err;
11299
11300 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11301 tg3_enable_ints(tp);
11302
11303 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11304 tnapi->coal_now);
11305
11306 for (i = 0; i < 5; i++) {
11307 u32 int_mbox, misc_host_ctrl;
11308
11309 int_mbox = tr32_mailbox(tnapi->int_mbox);
11310 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11311
11312 if ((int_mbox != 0) ||
11313 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11314 intr_ok = 1;
11315 break;
11316 }
11317
11318 if (tg3_flag(tp, 57765_PLUS) &&
11319 tnapi->hw_status->status_tag != tnapi->last_tag)
11320 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11321
11322 msleep(10);
11323 }
11324
11325 tg3_disable_ints(tp);
11326
11327 free_irq(tnapi->irq_vec, tnapi);
11328
11329 err = tg3_request_irq(tp, 0);
11330
11331 if (err)
11332 return err;
11333
11334 if (intr_ok) {
11335 /* Reenable MSI one shot mode. */
11336 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11337 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11338 tw32(MSGINT_MODE, val);
11339 }
11340 return 0;
11341 }
11342
11343 return -EIO;
11344}
11345
11346/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11347 * successfully restored
11348 */
11349static int tg3_test_msi(struct tg3 *tp)
11350{
11351 int err;
11352 u16 pci_cmd;
11353
11354 if (!tg3_flag(tp, USING_MSI))
11355 return 0;
11356
11357 /* Turn off SERR reporting in case MSI terminates with Master
11358 * Abort.
11359 */
11360 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11361 pci_write_config_word(tp->pdev, PCI_COMMAND,
11362 pci_cmd & ~PCI_COMMAND_SERR);
11363
11364 err = tg3_test_interrupt(tp);
11365
11366 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11367
11368 if (!err)
11369 return 0;
11370
11371 /* other failures */
11372 if (err != -EIO)
11373 return err;
11374
11375 /* MSI test failed, go back to INTx mode */
11376 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11377 "to INTx mode. Please report this failure to the PCI "
11378 "maintainer and include system chipset information\n");
11379
11380 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11381
11382 pci_disable_msi(tp->pdev);
11383
11384 tg3_flag_clear(tp, USING_MSI);
11385 tp->napi[0].irq_vec = tp->pdev->irq;
11386
11387 err = tg3_request_irq(tp, 0);
11388 if (err)
11389 return err;
11390
11391 /* Need to reset the chip because the MSI cycle may have terminated
11392 * with Master Abort.
11393 */
11394 tg3_full_lock(tp, 1);
11395
11396 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11397 err = tg3_init_hw(tp, true);
11398
11399 tg3_full_unlock(tp);
11400
11401 if (err)
11402 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11403
11404 return err;
11405}
11406
11407static int tg3_request_firmware(struct tg3 *tp)
11408{
11409 const struct tg3_firmware_hdr *fw_hdr;
11410
11411 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11412 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11413 tp->fw_needed);
11414 return -ENOENT;
11415 }
11416
11417 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11418
11419 /* Firmware blob starts with version numbers, followed by
11420 * start address and _full_ length including BSS sections
11421 * (which must be longer than the actual data, of course
11422 */
11423
11424 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11425 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11426 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11427 tp->fw_len, tp->fw_needed);
11428 release_firmware(tp->fw);
11429 tp->fw = NULL;
11430 return -EINVAL;
11431 }
11432
11433 /* We no longer need firmware; we have it. */
11434 tp->fw_needed = NULL;
11435 return 0;
11436}
11437
11438static u32 tg3_irq_count(struct tg3 *tp)
11439{
11440 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11441
11442 if (irq_cnt > 1) {
11443 /* We want as many rx rings enabled as there are cpus.
11444 * In multiqueue MSI-X mode, the first MSI-X vector
11445 * only deals with link interrupts, etc, so we add
11446 * one to the number of vectors we are requesting.
11447 */
11448 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11449 }
11450
11451 return irq_cnt;
11452}
11453
11454static bool tg3_enable_msix(struct tg3 *tp)
11455{
11456 int i, rc;
11457 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11458
11459 tp->txq_cnt = tp->txq_req;
11460 tp->rxq_cnt = tp->rxq_req;
11461 if (!tp->rxq_cnt)
11462 tp->rxq_cnt = netif_get_num_default_rss_queues();
11463 if (tp->rxq_cnt > tp->rxq_max)
11464 tp->rxq_cnt = tp->rxq_max;
11465
11466 /* Disable multiple TX rings by default. Simple round-robin hardware
11467 * scheduling of the TX rings can cause starvation of rings with
11468 * small packets when other rings have TSO or jumbo packets.
11469 */
11470 if (!tp->txq_req)
11471 tp->txq_cnt = 1;
11472
11473 tp->irq_cnt = tg3_irq_count(tp);
11474
11475 for (i = 0; i < tp->irq_max; i++) {
11476 msix_ent[i].entry = i;
11477 msix_ent[i].vector = 0;
11478 }
11479
11480 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11481 if (rc < 0) {
11482 return false;
11483 } else if (rc < tp->irq_cnt) {
11484 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11485 tp->irq_cnt, rc);
11486 tp->irq_cnt = rc;
11487 tp->rxq_cnt = max(rc - 1, 1);
11488 if (tp->txq_cnt)
11489 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11490 }
11491
11492 for (i = 0; i < tp->irq_max; i++)
11493 tp->napi[i].irq_vec = msix_ent[i].vector;
11494
11495 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11496 pci_disable_msix(tp->pdev);
11497 return false;
11498 }
11499
11500 if (tp->irq_cnt == 1)
11501 return true;
11502
11503 tg3_flag_set(tp, ENABLE_RSS);
11504
11505 if (tp->txq_cnt > 1)
11506 tg3_flag_set(tp, ENABLE_TSS);
11507
11508 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11509
11510 return true;
11511}
11512
11513static void tg3_ints_init(struct tg3 *tp)
11514{
11515 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11516 !tg3_flag(tp, TAGGED_STATUS)) {
11517 /* All MSI supporting chips should support tagged
11518 * status. Assert that this is the case.
11519 */
11520 netdev_warn(tp->dev,
11521 "MSI without TAGGED_STATUS? Not using MSI\n");
11522 goto defcfg;
11523 }
11524
11525 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11526 tg3_flag_set(tp, USING_MSIX);
11527 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11528 tg3_flag_set(tp, USING_MSI);
11529
11530 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11531 u32 msi_mode = tr32(MSGINT_MODE);
11532 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11533 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11534 if (!tg3_flag(tp, 1SHOT_MSI))
11535 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11536 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11537 }
11538defcfg:
11539 if (!tg3_flag(tp, USING_MSIX)) {
11540 tp->irq_cnt = 1;
11541 tp->napi[0].irq_vec = tp->pdev->irq;
11542 }
11543
11544 if (tp->irq_cnt == 1) {
11545 tp->txq_cnt = 1;
11546 tp->rxq_cnt = 1;
11547 netif_set_real_num_tx_queues(tp->dev, 1);
11548 netif_set_real_num_rx_queues(tp->dev, 1);
11549 }
11550}
11551
11552static void tg3_ints_fini(struct tg3 *tp)
11553{
11554 if (tg3_flag(tp, USING_MSIX))
11555 pci_disable_msix(tp->pdev);
11556 else if (tg3_flag(tp, USING_MSI))
11557 pci_disable_msi(tp->pdev);
11558 tg3_flag_clear(tp, USING_MSI);
11559 tg3_flag_clear(tp, USING_MSIX);
11560 tg3_flag_clear(tp, ENABLE_RSS);
11561 tg3_flag_clear(tp, ENABLE_TSS);
11562}
11563
11564static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11565 bool init)
11566{
11567 struct net_device *dev = tp->dev;
11568 int i, err;
11569
11570 /*
11571 * Setup interrupts first so we know how
11572 * many NAPI resources to allocate
11573 */
11574 tg3_ints_init(tp);
11575
11576 tg3_rss_check_indir_tbl(tp);
11577
11578 /* The placement of this call is tied
11579 * to the setup and use of Host TX descriptors.
11580 */
11581 err = tg3_alloc_consistent(tp);
11582 if (err)
11583 goto out_ints_fini;
11584
11585 tg3_napi_init(tp);
11586
11587 tg3_napi_enable(tp);
11588
11589 for (i = 0; i < tp->irq_cnt; i++) {
11590 err = tg3_request_irq(tp, i);
11591 if (err) {
11592 for (i--; i >= 0; i--) {
11593 struct tg3_napi *tnapi = &tp->napi[i];
11594
11595 free_irq(tnapi->irq_vec, tnapi);
11596 }
11597 goto out_napi_fini;
11598 }
11599 }
11600
11601 tg3_full_lock(tp, 0);
11602
11603 if (init)
11604 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11605
11606 err = tg3_init_hw(tp, reset_phy);
11607 if (err) {
11608 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11609 tg3_free_rings(tp);
11610 }
11611
11612 tg3_full_unlock(tp);
11613
11614 if (err)
11615 goto out_free_irq;
11616
11617 if (test_irq && tg3_flag(tp, USING_MSI)) {
11618 err = tg3_test_msi(tp);
11619
11620 if (err) {
11621 tg3_full_lock(tp, 0);
11622 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11623 tg3_free_rings(tp);
11624 tg3_full_unlock(tp);
11625
11626 goto out_napi_fini;
11627 }
11628
11629 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11630 u32 val = tr32(PCIE_TRANSACTION_CFG);
11631
11632 tw32(PCIE_TRANSACTION_CFG,
11633 val | PCIE_TRANS_CFG_1SHOT_MSI);
11634 }
11635 }
11636
11637 tg3_phy_start(tp);
11638
11639 tg3_hwmon_open(tp);
11640
11641 tg3_full_lock(tp, 0);
11642
11643 tg3_timer_start(tp);
11644 tg3_flag_set(tp, INIT_COMPLETE);
11645 tg3_enable_ints(tp);
11646
11647 tg3_ptp_resume(tp);
11648
11649 tg3_full_unlock(tp);
11650
11651 netif_tx_start_all_queues(dev);
11652
11653 /*
11654 * Reset loopback feature if it was turned on while the device was down
11655 * make sure that it's installed properly now.
11656 */
11657 if (dev->features & NETIF_F_LOOPBACK)
11658 tg3_set_loopback(dev, dev->features);
11659
11660 return 0;
11661
11662out_free_irq:
11663 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11664 struct tg3_napi *tnapi = &tp->napi[i];
11665 free_irq(tnapi->irq_vec, tnapi);
11666 }
11667
11668out_napi_fini:
11669 tg3_napi_disable(tp);
11670 tg3_napi_fini(tp);
11671 tg3_free_consistent(tp);
11672
11673out_ints_fini:
11674 tg3_ints_fini(tp);
11675
11676 return err;
11677}
11678
11679static void tg3_stop(struct tg3 *tp)
11680{
11681 int i;
11682
11683 tg3_reset_task_cancel(tp);
11684 tg3_netif_stop(tp);
11685
11686 tg3_timer_stop(tp);
11687
11688 tg3_hwmon_close(tp);
11689
11690 tg3_phy_stop(tp);
11691
11692 tg3_full_lock(tp, 1);
11693
11694 tg3_disable_ints(tp);
11695
11696 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11697 tg3_free_rings(tp);
11698 tg3_flag_clear(tp, INIT_COMPLETE);
11699
11700 tg3_full_unlock(tp);
11701
11702 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11703 struct tg3_napi *tnapi = &tp->napi[i];
11704 free_irq(tnapi->irq_vec, tnapi);
11705 }
11706
11707 tg3_ints_fini(tp);
11708
11709 tg3_napi_fini(tp);
11710
11711 tg3_free_consistent(tp);
11712}
11713
11714static int tg3_open(struct net_device *dev)
11715{
11716 struct tg3 *tp = netdev_priv(dev);
11717 int err;
11718
11719 if (tp->pcierr_recovery) {
11720 netdev_err(dev, "Failed to open device. PCI error recovery "
11721 "in progress\n");
11722 return -EAGAIN;
11723 }
11724
11725 if (tp->fw_needed) {
11726 err = tg3_request_firmware(tp);
11727 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11728 if (err) {
11729 netdev_warn(tp->dev, "EEE capability disabled\n");
11730 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11731 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11732 netdev_warn(tp->dev, "EEE capability restored\n");
11733 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11734 }
11735 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11736 if (err)
11737 return err;
11738 } else if (err) {
11739 netdev_warn(tp->dev, "TSO capability disabled\n");
11740 tg3_flag_clear(tp, TSO_CAPABLE);
11741 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11742 netdev_notice(tp->dev, "TSO capability restored\n");
11743 tg3_flag_set(tp, TSO_CAPABLE);
11744 }
11745 }
11746
11747 tg3_carrier_off(tp);
11748
11749 err = tg3_power_up(tp);
11750 if (err)
11751 return err;
11752
11753 tg3_full_lock(tp, 0);
11754
11755 tg3_disable_ints(tp);
11756 tg3_flag_clear(tp, INIT_COMPLETE);
11757
11758 tg3_full_unlock(tp);
11759
11760 err = tg3_start(tp,
11761 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11762 true, true);
11763 if (err) {
11764 tg3_frob_aux_power(tp, false);
11765 pci_set_power_state(tp->pdev, PCI_D3hot);
11766 }
11767
11768 return err;
11769}
11770
11771static int tg3_close(struct net_device *dev)
11772{
11773 struct tg3 *tp = netdev_priv(dev);
11774
11775 if (tp->pcierr_recovery) {
11776 netdev_err(dev, "Failed to close device. PCI error recovery "
11777 "in progress\n");
11778 return -EAGAIN;
11779 }
11780
11781 tg3_stop(tp);
11782
11783 if (pci_device_is_present(tp->pdev)) {
11784 tg3_power_down_prepare(tp);
11785
11786 tg3_carrier_off(tp);
11787 }
11788 return 0;
11789}
11790
11791static inline u64 get_stat64(tg3_stat64_t *val)
11792{
11793 return ((u64)val->high << 32) | ((u64)val->low);
11794}
11795
11796static u64 tg3_calc_crc_errors(struct tg3 *tp)
11797{
11798 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11799
11800 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11801 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11802 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11803 u32 val;
11804
11805 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11806 tg3_writephy(tp, MII_TG3_TEST1,
11807 val | MII_TG3_TEST1_CRC_EN);
11808 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11809 } else
11810 val = 0;
11811
11812 tp->phy_crc_errors += val;
11813
11814 return tp->phy_crc_errors;
11815 }
11816
11817 return get_stat64(&hw_stats->rx_fcs_errors);
11818}
11819
11820#define ESTAT_ADD(member) \
11821 estats->member = old_estats->member + \
11822 get_stat64(&hw_stats->member)
11823
11824static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11825{
11826 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11827 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11828
11829 ESTAT_ADD(rx_octets);
11830 ESTAT_ADD(rx_fragments);
11831 ESTAT_ADD(rx_ucast_packets);
11832 ESTAT_ADD(rx_mcast_packets);
11833 ESTAT_ADD(rx_bcast_packets);
11834 ESTAT_ADD(rx_fcs_errors);
11835 ESTAT_ADD(rx_align_errors);
11836 ESTAT_ADD(rx_xon_pause_rcvd);
11837 ESTAT_ADD(rx_xoff_pause_rcvd);
11838 ESTAT_ADD(rx_mac_ctrl_rcvd);
11839 ESTAT_ADD(rx_xoff_entered);
11840 ESTAT_ADD(rx_frame_too_long_errors);
11841 ESTAT_ADD(rx_jabbers);
11842 ESTAT_ADD(rx_undersize_packets);
11843 ESTAT_ADD(rx_in_length_errors);
11844 ESTAT_ADD(rx_out_length_errors);
11845 ESTAT_ADD(rx_64_or_less_octet_packets);
11846 ESTAT_ADD(rx_65_to_127_octet_packets);
11847 ESTAT_ADD(rx_128_to_255_octet_packets);
11848 ESTAT_ADD(rx_256_to_511_octet_packets);
11849 ESTAT_ADD(rx_512_to_1023_octet_packets);
11850 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11851 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11852 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11853 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11854 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11855
11856 ESTAT_ADD(tx_octets);
11857 ESTAT_ADD(tx_collisions);
11858 ESTAT_ADD(tx_xon_sent);
11859 ESTAT_ADD(tx_xoff_sent);
11860 ESTAT_ADD(tx_flow_control);
11861 ESTAT_ADD(tx_mac_errors);
11862 ESTAT_ADD(tx_single_collisions);
11863 ESTAT_ADD(tx_mult_collisions);
11864 ESTAT_ADD(tx_deferred);
11865 ESTAT_ADD(tx_excessive_collisions);
11866 ESTAT_ADD(tx_late_collisions);
11867 ESTAT_ADD(tx_collide_2times);
11868 ESTAT_ADD(tx_collide_3times);
11869 ESTAT_ADD(tx_collide_4times);
11870 ESTAT_ADD(tx_collide_5times);
11871 ESTAT_ADD(tx_collide_6times);
11872 ESTAT_ADD(tx_collide_7times);
11873 ESTAT_ADD(tx_collide_8times);
11874 ESTAT_ADD(tx_collide_9times);
11875 ESTAT_ADD(tx_collide_10times);
11876 ESTAT_ADD(tx_collide_11times);
11877 ESTAT_ADD(tx_collide_12times);
11878 ESTAT_ADD(tx_collide_13times);
11879 ESTAT_ADD(tx_collide_14times);
11880 ESTAT_ADD(tx_collide_15times);
11881 ESTAT_ADD(tx_ucast_packets);
11882 ESTAT_ADD(tx_mcast_packets);
11883 ESTAT_ADD(tx_bcast_packets);
11884 ESTAT_ADD(tx_carrier_sense_errors);
11885 ESTAT_ADD(tx_discards);
11886 ESTAT_ADD(tx_errors);
11887
11888 ESTAT_ADD(dma_writeq_full);
11889 ESTAT_ADD(dma_write_prioq_full);
11890 ESTAT_ADD(rxbds_empty);
11891 ESTAT_ADD(rx_discards);
11892 ESTAT_ADD(rx_errors);
11893 ESTAT_ADD(rx_threshold_hit);
11894
11895 ESTAT_ADD(dma_readq_full);
11896 ESTAT_ADD(dma_read_prioq_full);
11897 ESTAT_ADD(tx_comp_queue_full);
11898
11899 ESTAT_ADD(ring_set_send_prod_index);
11900 ESTAT_ADD(ring_status_update);
11901 ESTAT_ADD(nic_irqs);
11902 ESTAT_ADD(nic_avoided_irqs);
11903 ESTAT_ADD(nic_tx_threshold_hit);
11904
11905 ESTAT_ADD(mbuf_lwm_thresh_hit);
11906}
11907
11908static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11909{
11910 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11911 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11912
11913 stats->rx_packets = old_stats->rx_packets +
11914 get_stat64(&hw_stats->rx_ucast_packets) +
11915 get_stat64(&hw_stats->rx_mcast_packets) +
11916 get_stat64(&hw_stats->rx_bcast_packets);
11917
11918 stats->tx_packets = old_stats->tx_packets +
11919 get_stat64(&hw_stats->tx_ucast_packets) +
11920 get_stat64(&hw_stats->tx_mcast_packets) +
11921 get_stat64(&hw_stats->tx_bcast_packets);
11922
11923 stats->rx_bytes = old_stats->rx_bytes +
11924 get_stat64(&hw_stats->rx_octets);
11925 stats->tx_bytes = old_stats->tx_bytes +
11926 get_stat64(&hw_stats->tx_octets);
11927
11928 stats->rx_errors = old_stats->rx_errors +
11929 get_stat64(&hw_stats->rx_errors);
11930 stats->tx_errors = old_stats->tx_errors +
11931 get_stat64(&hw_stats->tx_errors) +
11932 get_stat64(&hw_stats->tx_mac_errors) +
11933 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11934 get_stat64(&hw_stats->tx_discards);
11935
11936 stats->multicast = old_stats->multicast +
11937 get_stat64(&hw_stats->rx_mcast_packets);
11938 stats->collisions = old_stats->collisions +
11939 get_stat64(&hw_stats->tx_collisions);
11940
11941 stats->rx_length_errors = old_stats->rx_length_errors +
11942 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11943 get_stat64(&hw_stats->rx_undersize_packets);
11944
11945 stats->rx_frame_errors = old_stats->rx_frame_errors +
11946 get_stat64(&hw_stats->rx_align_errors);
11947 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11948 get_stat64(&hw_stats->tx_discards);
11949 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11950 get_stat64(&hw_stats->tx_carrier_sense_errors);
11951
11952 stats->rx_crc_errors = old_stats->rx_crc_errors +
11953 tg3_calc_crc_errors(tp);
11954
11955 stats->rx_missed_errors = old_stats->rx_missed_errors +
11956 get_stat64(&hw_stats->rx_discards);
11957
11958 stats->rx_dropped = tp->rx_dropped;
11959 stats->tx_dropped = tp->tx_dropped;
11960}
11961
11962static int tg3_get_regs_len(struct net_device *dev)
11963{
11964 return TG3_REG_BLK_SIZE;
11965}
11966
11967static void tg3_get_regs(struct net_device *dev,
11968 struct ethtool_regs *regs, void *_p)
11969{
11970 struct tg3 *tp = netdev_priv(dev);
11971
11972 regs->version = 0;
11973
11974 memset(_p, 0, TG3_REG_BLK_SIZE);
11975
11976 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11977 return;
11978
11979 tg3_full_lock(tp, 0);
11980
11981 tg3_dump_legacy_regs(tp, (u32 *)_p);
11982
11983 tg3_full_unlock(tp);
11984}
11985
11986static int tg3_get_eeprom_len(struct net_device *dev)
11987{
11988 struct tg3 *tp = netdev_priv(dev);
11989
11990 return tp->nvram_size;
11991}
11992
11993static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11994{
11995 struct tg3 *tp = netdev_priv(dev);
11996 int ret, cpmu_restore = 0;
11997 u8 *pd;
11998 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11999 __be32 val;
12000
12001 if (tg3_flag(tp, NO_NVRAM))
12002 return -EINVAL;
12003
12004 offset = eeprom->offset;
12005 len = eeprom->len;
12006 eeprom->len = 0;
12007
12008 eeprom->magic = TG3_EEPROM_MAGIC;
12009
12010 /* Override clock, link aware and link idle modes */
12011 if (tg3_flag(tp, CPMU_PRESENT)) {
12012 cpmu_val = tr32(TG3_CPMU_CTRL);
12013 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12014 CPMU_CTRL_LINK_IDLE_MODE)) {
12015 tw32(TG3_CPMU_CTRL, cpmu_val &
12016 ~(CPMU_CTRL_LINK_AWARE_MODE |
12017 CPMU_CTRL_LINK_IDLE_MODE));
12018 cpmu_restore = 1;
12019 }
12020 }
12021 tg3_override_clk(tp);
12022
12023 if (offset & 3) {
12024 /* adjustments to start on required 4 byte boundary */
12025 b_offset = offset & 3;
12026 b_count = 4 - b_offset;
12027 if (b_count > len) {
12028 /* i.e. offset=1 len=2 */
12029 b_count = len;
12030 }
12031 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12032 if (ret)
12033 goto eeprom_done;
12034 memcpy(data, ((char *)&val) + b_offset, b_count);
12035 len -= b_count;
12036 offset += b_count;
12037 eeprom->len += b_count;
12038 }
12039
12040 /* read bytes up to the last 4 byte boundary */
12041 pd = &data[eeprom->len];
12042 for (i = 0; i < (len - (len & 3)); i += 4) {
12043 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12044 if (ret) {
12045 if (i)
12046 i -= 4;
12047 eeprom->len += i;
12048 goto eeprom_done;
12049 }
12050 memcpy(pd + i, &val, 4);
12051 if (need_resched()) {
12052 if (signal_pending(current)) {
12053 eeprom->len += i;
12054 ret = -EINTR;
12055 goto eeprom_done;
12056 }
12057 cond_resched();
12058 }
12059 }
12060 eeprom->len += i;
12061
12062 if (len & 3) {
12063 /* read last bytes not ending on 4 byte boundary */
12064 pd = &data[eeprom->len];
12065 b_count = len & 3;
12066 b_offset = offset + len - b_count;
12067 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12068 if (ret)
12069 goto eeprom_done;
12070 memcpy(pd, &val, b_count);
12071 eeprom->len += b_count;
12072 }
12073 ret = 0;
12074
12075eeprom_done:
12076 /* Restore clock, link aware and link idle modes */
12077 tg3_restore_clk(tp);
12078 if (cpmu_restore)
12079 tw32(TG3_CPMU_CTRL, cpmu_val);
12080
12081 return ret;
12082}
12083
12084static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12085{
12086 struct tg3 *tp = netdev_priv(dev);
12087 int ret;
12088 u32 offset, len, b_offset, odd_len;
12089 u8 *buf;
12090 __be32 start = 0, end;
12091
12092 if (tg3_flag(tp, NO_NVRAM) ||
12093 eeprom->magic != TG3_EEPROM_MAGIC)
12094 return -EINVAL;
12095
12096 offset = eeprom->offset;
12097 len = eeprom->len;
12098
12099 if ((b_offset = (offset & 3))) {
12100 /* adjustments to start on required 4 byte boundary */
12101 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12102 if (ret)
12103 return ret;
12104 len += b_offset;
12105 offset &= ~3;
12106 if (len < 4)
12107 len = 4;
12108 }
12109
12110 odd_len = 0;
12111 if (len & 3) {
12112 /* adjustments to end on required 4 byte boundary */
12113 odd_len = 1;
12114 len = (len + 3) & ~3;
12115 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12116 if (ret)
12117 return ret;
12118 }
12119
12120 buf = data;
12121 if (b_offset || odd_len) {
12122 buf = kmalloc(len, GFP_KERNEL);
12123 if (!buf)
12124 return -ENOMEM;
12125 if (b_offset)
12126 memcpy(buf, &start, 4);
12127 if (odd_len)
12128 memcpy(buf+len-4, &end, 4);
12129 memcpy(buf + b_offset, data, eeprom->len);
12130 }
12131
12132 ret = tg3_nvram_write_block(tp, offset, len, buf);
12133
12134 if (buf != data)
12135 kfree(buf);
12136
12137 return ret;
12138}
12139
12140static int tg3_get_link_ksettings(struct net_device *dev,
12141 struct ethtool_link_ksettings *cmd)
12142{
12143 struct tg3 *tp = netdev_priv(dev);
12144 u32 supported, advertising;
12145
12146 if (tg3_flag(tp, USE_PHYLIB)) {
12147 struct phy_device *phydev;
12148 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12149 return -EAGAIN;
12150 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12151 phy_ethtool_ksettings_get(phydev, cmd);
12152
12153 return 0;
12154 }
12155
12156 supported = (SUPPORTED_Autoneg);
12157
12158 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12159 supported |= (SUPPORTED_1000baseT_Half |
12160 SUPPORTED_1000baseT_Full);
12161
12162 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12163 supported |= (SUPPORTED_100baseT_Half |
12164 SUPPORTED_100baseT_Full |
12165 SUPPORTED_10baseT_Half |
12166 SUPPORTED_10baseT_Full |
12167 SUPPORTED_TP);
12168 cmd->base.port = PORT_TP;
12169 } else {
12170 supported |= SUPPORTED_FIBRE;
12171 cmd->base.port = PORT_FIBRE;
12172 }
12173 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12174 supported);
12175
12176 advertising = tp->link_config.advertising;
12177 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12178 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12179 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12180 advertising |= ADVERTISED_Pause;
12181 } else {
12182 advertising |= ADVERTISED_Pause |
12183 ADVERTISED_Asym_Pause;
12184 }
12185 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12186 advertising |= ADVERTISED_Asym_Pause;
12187 }
12188 }
12189 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12190 advertising);
12191
12192 if (netif_running(dev) && tp->link_up) {
12193 cmd->base.speed = tp->link_config.active_speed;
12194 cmd->base.duplex = tp->link_config.active_duplex;
12195 ethtool_convert_legacy_u32_to_link_mode(
12196 cmd->link_modes.lp_advertising,
12197 tp->link_config.rmt_adv);
12198
12199 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12200 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12201 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12202 else
12203 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12204 }
12205 } else {
12206 cmd->base.speed = SPEED_UNKNOWN;
12207 cmd->base.duplex = DUPLEX_UNKNOWN;
12208 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12209 }
12210 cmd->base.phy_address = tp->phy_addr;
12211 cmd->base.autoneg = tp->link_config.autoneg;
12212 return 0;
12213}
12214
12215static int tg3_set_link_ksettings(struct net_device *dev,
12216 const struct ethtool_link_ksettings *cmd)
12217{
12218 struct tg3 *tp = netdev_priv(dev);
12219 u32 speed = cmd->base.speed;
12220 u32 advertising;
12221
12222 if (tg3_flag(tp, USE_PHYLIB)) {
12223 struct phy_device *phydev;
12224 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12225 return -EAGAIN;
12226 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12227 return phy_ethtool_ksettings_set(phydev, cmd);
12228 }
12229
12230 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12231 cmd->base.autoneg != AUTONEG_DISABLE)
12232 return -EINVAL;
12233
12234 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12235 cmd->base.duplex != DUPLEX_FULL &&
12236 cmd->base.duplex != DUPLEX_HALF)
12237 return -EINVAL;
12238
12239 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12240 cmd->link_modes.advertising);
12241
12242 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12243 u32 mask = ADVERTISED_Autoneg |
12244 ADVERTISED_Pause |
12245 ADVERTISED_Asym_Pause;
12246
12247 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12248 mask |= ADVERTISED_1000baseT_Half |
12249 ADVERTISED_1000baseT_Full;
12250
12251 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12252 mask |= ADVERTISED_100baseT_Half |
12253 ADVERTISED_100baseT_Full |
12254 ADVERTISED_10baseT_Half |
12255 ADVERTISED_10baseT_Full |
12256 ADVERTISED_TP;
12257 else
12258 mask |= ADVERTISED_FIBRE;
12259
12260 if (advertising & ~mask)
12261 return -EINVAL;
12262
12263 mask &= (ADVERTISED_1000baseT_Half |
12264 ADVERTISED_1000baseT_Full |
12265 ADVERTISED_100baseT_Half |
12266 ADVERTISED_100baseT_Full |
12267 ADVERTISED_10baseT_Half |
12268 ADVERTISED_10baseT_Full);
12269
12270 advertising &= mask;
12271 } else {
12272 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12273 if (speed != SPEED_1000)
12274 return -EINVAL;
12275
12276 if (cmd->base.duplex != DUPLEX_FULL)
12277 return -EINVAL;
12278 } else {
12279 if (speed != SPEED_100 &&
12280 speed != SPEED_10)
12281 return -EINVAL;
12282 }
12283 }
12284
12285 tg3_full_lock(tp, 0);
12286
12287 tp->link_config.autoneg = cmd->base.autoneg;
12288 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12289 tp->link_config.advertising = (advertising |
12290 ADVERTISED_Autoneg);
12291 tp->link_config.speed = SPEED_UNKNOWN;
12292 tp->link_config.duplex = DUPLEX_UNKNOWN;
12293 } else {
12294 tp->link_config.advertising = 0;
12295 tp->link_config.speed = speed;
12296 tp->link_config.duplex = cmd->base.duplex;
12297 }
12298
12299 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12300
12301 tg3_warn_mgmt_link_flap(tp);
12302
12303 if (netif_running(dev))
12304 tg3_setup_phy(tp, true);
12305
12306 tg3_full_unlock(tp);
12307
12308 return 0;
12309}
12310
12311static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12312{
12313 struct tg3 *tp = netdev_priv(dev);
12314
12315 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12316 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12317 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12318 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12319}
12320
12321static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12322{
12323 struct tg3 *tp = netdev_priv(dev);
12324
12325 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12326 wol->supported = WAKE_MAGIC;
12327 else
12328 wol->supported = 0;
12329 wol->wolopts = 0;
12330 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12331 wol->wolopts = WAKE_MAGIC;
12332 memset(&wol->sopass, 0, sizeof(wol->sopass));
12333}
12334
12335static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12336{
12337 struct tg3 *tp = netdev_priv(dev);
12338 struct device *dp = &tp->pdev->dev;
12339
12340 if (wol->wolopts & ~WAKE_MAGIC)
12341 return -EINVAL;
12342 if ((wol->wolopts & WAKE_MAGIC) &&
12343 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12344 return -EINVAL;
12345
12346 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12347
12348 if (device_may_wakeup(dp))
12349 tg3_flag_set(tp, WOL_ENABLE);
12350 else
12351 tg3_flag_clear(tp, WOL_ENABLE);
12352
12353 return 0;
12354}
12355
12356static u32 tg3_get_msglevel(struct net_device *dev)
12357{
12358 struct tg3 *tp = netdev_priv(dev);
12359 return tp->msg_enable;
12360}
12361
12362static void tg3_set_msglevel(struct net_device *dev, u32 value)
12363{
12364 struct tg3 *tp = netdev_priv(dev);
12365 tp->msg_enable = value;
12366}
12367
12368static int tg3_nway_reset(struct net_device *dev)
12369{
12370 struct tg3 *tp = netdev_priv(dev);
12371 int r;
12372
12373 if (!netif_running(dev))
12374 return -EAGAIN;
12375
12376 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12377 return -EINVAL;
12378
12379 tg3_warn_mgmt_link_flap(tp);
12380
12381 if (tg3_flag(tp, USE_PHYLIB)) {
12382 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12383 return -EAGAIN;
12384 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12385 } else {
12386 u32 bmcr;
12387
12388 spin_lock_bh(&tp->lock);
12389 r = -EINVAL;
12390 tg3_readphy(tp, MII_BMCR, &bmcr);
12391 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12392 ((bmcr & BMCR_ANENABLE) ||
12393 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12394 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12395 BMCR_ANENABLE);
12396 r = 0;
12397 }
12398 spin_unlock_bh(&tp->lock);
12399 }
12400
12401 return r;
12402}
12403
12404static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12405{
12406 struct tg3 *tp = netdev_priv(dev);
12407
12408 ering->rx_max_pending = tp->rx_std_ring_mask;
12409 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12410 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12411 else
12412 ering->rx_jumbo_max_pending = 0;
12413
12414 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12415
12416 ering->rx_pending = tp->rx_pending;
12417 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12418 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12419 else
12420 ering->rx_jumbo_pending = 0;
12421
12422 ering->tx_pending = tp->napi[0].tx_pending;
12423}
12424
12425static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12426{
12427 struct tg3 *tp = netdev_priv(dev);
12428 int i, irq_sync = 0, err = 0;
12429 bool reset_phy = false;
12430
12431 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12432 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12433 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12434 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12435 (tg3_flag(tp, TSO_BUG) &&
12436 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12437 return -EINVAL;
12438
12439 if (netif_running(dev)) {
12440 tg3_phy_stop(tp);
12441 tg3_netif_stop(tp);
12442 irq_sync = 1;
12443 }
12444
12445 tg3_full_lock(tp, irq_sync);
12446
12447 tp->rx_pending = ering->rx_pending;
12448
12449 if (tg3_flag(tp, MAX_RXPEND_64) &&
12450 tp->rx_pending > 63)
12451 tp->rx_pending = 63;
12452
12453 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12454 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12455
12456 for (i = 0; i < tp->irq_max; i++)
12457 tp->napi[i].tx_pending = ering->tx_pending;
12458
12459 if (netif_running(dev)) {
12460 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12461 /* Reset PHY to avoid PHY lock up */
12462 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12463 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12464 tg3_asic_rev(tp) == ASIC_REV_5720)
12465 reset_phy = true;
12466
12467 err = tg3_restart_hw(tp, reset_phy);
12468 if (!err)
12469 tg3_netif_start(tp);
12470 }
12471
12472 tg3_full_unlock(tp);
12473
12474 if (irq_sync && !err)
12475 tg3_phy_start(tp);
12476
12477 return err;
12478}
12479
12480static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12481{
12482 struct tg3 *tp = netdev_priv(dev);
12483
12484 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12485
12486 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12487 epause->rx_pause = 1;
12488 else
12489 epause->rx_pause = 0;
12490
12491 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12492 epause->tx_pause = 1;
12493 else
12494 epause->tx_pause = 0;
12495}
12496
12497static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12498{
12499 struct tg3 *tp = netdev_priv(dev);
12500 int err = 0;
12501 bool reset_phy = false;
12502
12503 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12504 tg3_warn_mgmt_link_flap(tp);
12505
12506 if (tg3_flag(tp, USE_PHYLIB)) {
12507 u32 newadv;
12508 struct phy_device *phydev;
12509
12510 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12511
12512 if (!(phydev->supported & SUPPORTED_Pause) ||
12513 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12514 (epause->rx_pause != epause->tx_pause)))
12515 return -EINVAL;
12516
12517 tp->link_config.flowctrl = 0;
12518 if (epause->rx_pause) {
12519 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12520
12521 if (epause->tx_pause) {
12522 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12523 newadv = ADVERTISED_Pause;
12524 } else
12525 newadv = ADVERTISED_Pause |
12526 ADVERTISED_Asym_Pause;
12527 } else if (epause->tx_pause) {
12528 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12529 newadv = ADVERTISED_Asym_Pause;
12530 } else
12531 newadv = 0;
12532
12533 if (epause->autoneg)
12534 tg3_flag_set(tp, PAUSE_AUTONEG);
12535 else
12536 tg3_flag_clear(tp, PAUSE_AUTONEG);
12537
12538 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12539 u32 oldadv = phydev->advertising &
12540 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12541 if (oldadv != newadv) {
12542 phydev->advertising &=
12543 ~(ADVERTISED_Pause |
12544 ADVERTISED_Asym_Pause);
12545 phydev->advertising |= newadv;
12546 if (phydev->autoneg) {
12547 /*
12548 * Always renegotiate the link to
12549 * inform our link partner of our
12550 * flow control settings, even if the
12551 * flow control is forced. Let
12552 * tg3_adjust_link() do the final
12553 * flow control setup.
12554 */
12555 return phy_start_aneg(phydev);
12556 }
12557 }
12558
12559 if (!epause->autoneg)
12560 tg3_setup_flow_control(tp, 0, 0);
12561 } else {
12562 tp->link_config.advertising &=
12563 ~(ADVERTISED_Pause |
12564 ADVERTISED_Asym_Pause);
12565 tp->link_config.advertising |= newadv;
12566 }
12567 } else {
12568 int irq_sync = 0;
12569
12570 if (netif_running(dev)) {
12571 tg3_netif_stop(tp);
12572 irq_sync = 1;
12573 }
12574
12575 tg3_full_lock(tp, irq_sync);
12576
12577 if (epause->autoneg)
12578 tg3_flag_set(tp, PAUSE_AUTONEG);
12579 else
12580 tg3_flag_clear(tp, PAUSE_AUTONEG);
12581 if (epause->rx_pause)
12582 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12583 else
12584 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12585 if (epause->tx_pause)
12586 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12587 else
12588 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12589
12590 if (netif_running(dev)) {
12591 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12592 /* Reset PHY to avoid PHY lock up */
12593 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12594 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12595 tg3_asic_rev(tp) == ASIC_REV_5720)
12596 reset_phy = true;
12597
12598 err = tg3_restart_hw(tp, reset_phy);
12599 if (!err)
12600 tg3_netif_start(tp);
12601 }
12602
12603 tg3_full_unlock(tp);
12604 }
12605
12606 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12607
12608 return err;
12609}
12610
12611static int tg3_get_sset_count(struct net_device *dev, int sset)
12612{
12613 switch (sset) {
12614 case ETH_SS_TEST:
12615 return TG3_NUM_TEST;
12616 case ETH_SS_STATS:
12617 return TG3_NUM_STATS;
12618 default:
12619 return -EOPNOTSUPP;
12620 }
12621}
12622
12623static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12624 u32 *rules __always_unused)
12625{
12626 struct tg3 *tp = netdev_priv(dev);
12627
12628 if (!tg3_flag(tp, SUPPORT_MSIX))
12629 return -EOPNOTSUPP;
12630
12631 switch (info->cmd) {
12632 case ETHTOOL_GRXRINGS:
12633 if (netif_running(tp->dev))
12634 info->data = tp->rxq_cnt;
12635 else {
12636 info->data = num_online_cpus();
12637 if (info->data > TG3_RSS_MAX_NUM_QS)
12638 info->data = TG3_RSS_MAX_NUM_QS;
12639 }
12640
12641 return 0;
12642
12643 default:
12644 return -EOPNOTSUPP;
12645 }
12646}
12647
12648static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12649{
12650 u32 size = 0;
12651 struct tg3 *tp = netdev_priv(dev);
12652
12653 if (tg3_flag(tp, SUPPORT_MSIX))
12654 size = TG3_RSS_INDIR_TBL_SIZE;
12655
12656 return size;
12657}
12658
12659static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12660{
12661 struct tg3 *tp = netdev_priv(dev);
12662 int i;
12663
12664 if (hfunc)
12665 *hfunc = ETH_RSS_HASH_TOP;
12666 if (!indir)
12667 return 0;
12668
12669 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12670 indir[i] = tp->rss_ind_tbl[i];
12671
12672 return 0;
12673}
12674
12675static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12676 const u8 hfunc)
12677{
12678 struct tg3 *tp = netdev_priv(dev);
12679 size_t i;
12680
12681 /* We require at least one supported parameter to be changed and no
12682 * change in any of the unsupported parameters
12683 */
12684 if (key ||
12685 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12686 return -EOPNOTSUPP;
12687
12688 if (!indir)
12689 return 0;
12690
12691 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12692 tp->rss_ind_tbl[i] = indir[i];
12693
12694 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12695 return 0;
12696
12697 /* It is legal to write the indirection
12698 * table while the device is running.
12699 */
12700 tg3_full_lock(tp, 0);
12701 tg3_rss_write_indir_tbl(tp);
12702 tg3_full_unlock(tp);
12703
12704 return 0;
12705}
12706
12707static void tg3_get_channels(struct net_device *dev,
12708 struct ethtool_channels *channel)
12709{
12710 struct tg3 *tp = netdev_priv(dev);
12711 u32 deflt_qs = netif_get_num_default_rss_queues();
12712
12713 channel->max_rx = tp->rxq_max;
12714 channel->max_tx = tp->txq_max;
12715
12716 if (netif_running(dev)) {
12717 channel->rx_count = tp->rxq_cnt;
12718 channel->tx_count = tp->txq_cnt;
12719 } else {
12720 if (tp->rxq_req)
12721 channel->rx_count = tp->rxq_req;
12722 else
12723 channel->rx_count = min(deflt_qs, tp->rxq_max);
12724
12725 if (tp->txq_req)
12726 channel->tx_count = tp->txq_req;
12727 else
12728 channel->tx_count = min(deflt_qs, tp->txq_max);
12729 }
12730}
12731
12732static int tg3_set_channels(struct net_device *dev,
12733 struct ethtool_channels *channel)
12734{
12735 struct tg3 *tp = netdev_priv(dev);
12736
12737 if (!tg3_flag(tp, SUPPORT_MSIX))
12738 return -EOPNOTSUPP;
12739
12740 if (channel->rx_count > tp->rxq_max ||
12741 channel->tx_count > tp->txq_max)
12742 return -EINVAL;
12743
12744 tp->rxq_req = channel->rx_count;
12745 tp->txq_req = channel->tx_count;
12746
12747 if (!netif_running(dev))
12748 return 0;
12749
12750 tg3_stop(tp);
12751
12752 tg3_carrier_off(tp);
12753
12754 tg3_start(tp, true, false, false);
12755
12756 return 0;
12757}
12758
12759static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12760{
12761 switch (stringset) {
12762 case ETH_SS_STATS:
12763 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12764 break;
12765 case ETH_SS_TEST:
12766 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12767 break;
12768 default:
12769 WARN_ON(1); /* we need a WARN() */
12770 break;
12771 }
12772}
12773
12774static int tg3_set_phys_id(struct net_device *dev,
12775 enum ethtool_phys_id_state state)
12776{
12777 struct tg3 *tp = netdev_priv(dev);
12778
12779 if (!netif_running(tp->dev))
12780 return -EAGAIN;
12781
12782 switch (state) {
12783 case ETHTOOL_ID_ACTIVE:
12784 return 1; /* cycle on/off once per second */
12785
12786 case ETHTOOL_ID_ON:
12787 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12788 LED_CTRL_1000MBPS_ON |
12789 LED_CTRL_100MBPS_ON |
12790 LED_CTRL_10MBPS_ON |
12791 LED_CTRL_TRAFFIC_OVERRIDE |
12792 LED_CTRL_TRAFFIC_BLINK |
12793 LED_CTRL_TRAFFIC_LED);
12794 break;
12795
12796 case ETHTOOL_ID_OFF:
12797 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12798 LED_CTRL_TRAFFIC_OVERRIDE);
12799 break;
12800
12801 case ETHTOOL_ID_INACTIVE:
12802 tw32(MAC_LED_CTRL, tp->led_ctrl);
12803 break;
12804 }
12805
12806 return 0;
12807}
12808
12809static void tg3_get_ethtool_stats(struct net_device *dev,
12810 struct ethtool_stats *estats, u64 *tmp_stats)
12811{
12812 struct tg3 *tp = netdev_priv(dev);
12813
12814 if (tp->hw_stats)
12815 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12816 else
12817 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12818}
12819
12820static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12821{
12822 int i;
12823 __be32 *buf;
12824 u32 offset = 0, len = 0;
12825 u32 magic, val;
12826
12827 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12828 return NULL;
12829
12830 if (magic == TG3_EEPROM_MAGIC) {
12831 for (offset = TG3_NVM_DIR_START;
12832 offset < TG3_NVM_DIR_END;
12833 offset += TG3_NVM_DIRENT_SIZE) {
12834 if (tg3_nvram_read(tp, offset, &val))
12835 return NULL;
12836
12837 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12838 TG3_NVM_DIRTYPE_EXTVPD)
12839 break;
12840 }
12841
12842 if (offset != TG3_NVM_DIR_END) {
12843 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12844 if (tg3_nvram_read(tp, offset + 4, &offset))
12845 return NULL;
12846
12847 offset = tg3_nvram_logical_addr(tp, offset);
12848 }
12849 }
12850
12851 if (!offset || !len) {
12852 offset = TG3_NVM_VPD_OFF;
12853 len = TG3_NVM_VPD_LEN;
12854 }
12855
12856 buf = kmalloc(len, GFP_KERNEL);
12857 if (buf == NULL)
12858 return NULL;
12859
12860 if (magic == TG3_EEPROM_MAGIC) {
12861 for (i = 0; i < len; i += 4) {
12862 /* The data is in little-endian format in NVRAM.
12863 * Use the big-endian read routines to preserve
12864 * the byte order as it exists in NVRAM.
12865 */
12866 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12867 goto error;
12868 }
12869 } else {
12870 u8 *ptr;
12871 ssize_t cnt;
12872 unsigned int pos = 0;
12873
12874 ptr = (u8 *)&buf[0];
12875 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12876 cnt = pci_read_vpd(tp->pdev, pos,
12877 len - pos, ptr);
12878 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12879 cnt = 0;
12880 else if (cnt < 0)
12881 goto error;
12882 }
12883 if (pos != len)
12884 goto error;
12885 }
12886
12887 *vpdlen = len;
12888
12889 return buf;
12890
12891error:
12892 kfree(buf);
12893 return NULL;
12894}
12895
12896#define NVRAM_TEST_SIZE 0x100
12897#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12898#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12899#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12900#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12901#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12902#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12903#define NVRAM_SELFBOOT_HW_SIZE 0x20
12904#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12905
12906static int tg3_test_nvram(struct tg3 *tp)
12907{
12908 u32 csum, magic, len;
12909 __be32 *buf;
12910 int i, j, k, err = 0, size;
12911
12912 if (tg3_flag(tp, NO_NVRAM))
12913 return 0;
12914
12915 if (tg3_nvram_read(tp, 0, &magic) != 0)
12916 return -EIO;
12917
12918 if (magic == TG3_EEPROM_MAGIC)
12919 size = NVRAM_TEST_SIZE;
12920 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12921 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12922 TG3_EEPROM_SB_FORMAT_1) {
12923 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12924 case TG3_EEPROM_SB_REVISION_0:
12925 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12926 break;
12927 case TG3_EEPROM_SB_REVISION_2:
12928 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12929 break;
12930 case TG3_EEPROM_SB_REVISION_3:
12931 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12932 break;
12933 case TG3_EEPROM_SB_REVISION_4:
12934 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12935 break;
12936 case TG3_EEPROM_SB_REVISION_5:
12937 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12938 break;
12939 case TG3_EEPROM_SB_REVISION_6:
12940 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12941 break;
12942 default:
12943 return -EIO;
12944 }
12945 } else
12946 return 0;
12947 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12948 size = NVRAM_SELFBOOT_HW_SIZE;
12949 else
12950 return -EIO;
12951
12952 buf = kmalloc(size, GFP_KERNEL);
12953 if (buf == NULL)
12954 return -ENOMEM;
12955
12956 err = -EIO;
12957 for (i = 0, j = 0; i < size; i += 4, j++) {
12958 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12959 if (err)
12960 break;
12961 }
12962 if (i < size)
12963 goto out;
12964
12965 /* Selfboot format */
12966 magic = be32_to_cpu(buf[0]);
12967 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12968 TG3_EEPROM_MAGIC_FW) {
12969 u8 *buf8 = (u8 *) buf, csum8 = 0;
12970
12971 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12972 TG3_EEPROM_SB_REVISION_2) {
12973 /* For rev 2, the csum doesn't include the MBA. */
12974 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12975 csum8 += buf8[i];
12976 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12977 csum8 += buf8[i];
12978 } else {
12979 for (i = 0; i < size; i++)
12980 csum8 += buf8[i];
12981 }
12982
12983 if (csum8 == 0) {
12984 err = 0;
12985 goto out;
12986 }
12987
12988 err = -EIO;
12989 goto out;
12990 }
12991
12992 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12993 TG3_EEPROM_MAGIC_HW) {
12994 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12995 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12996 u8 *buf8 = (u8 *) buf;
12997
12998 /* Separate the parity bits and the data bytes. */
12999 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13000 if ((i == 0) || (i == 8)) {
13001 int l;
13002 u8 msk;
13003
13004 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13005 parity[k++] = buf8[i] & msk;
13006 i++;
13007 } else if (i == 16) {
13008 int l;
13009 u8 msk;
13010
13011 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13012 parity[k++] = buf8[i] & msk;
13013 i++;
13014
13015 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13016 parity[k++] = buf8[i] & msk;
13017 i++;
13018 }
13019 data[j++] = buf8[i];
13020 }
13021
13022 err = -EIO;
13023 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13024 u8 hw8 = hweight8(data[i]);
13025
13026 if ((hw8 & 0x1) && parity[i])
13027 goto out;
13028 else if (!(hw8 & 0x1) && !parity[i])
13029 goto out;
13030 }
13031 err = 0;
13032 goto out;
13033 }
13034
13035 err = -EIO;
13036
13037 /* Bootstrap checksum at offset 0x10 */
13038 csum = calc_crc((unsigned char *) buf, 0x10);
13039 if (csum != le32_to_cpu(buf[0x10/4]))
13040 goto out;
13041
13042 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13043 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13044 if (csum != le32_to_cpu(buf[0xfc/4]))
13045 goto out;
13046
13047 kfree(buf);
13048
13049 buf = tg3_vpd_readblock(tp, &len);
13050 if (!buf)
13051 return -ENOMEM;
13052
13053 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13054 if (i > 0) {
13055 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13056 if (j < 0)
13057 goto out;
13058
13059 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13060 goto out;
13061
13062 i += PCI_VPD_LRDT_TAG_SIZE;
13063 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13064 PCI_VPD_RO_KEYWORD_CHKSUM);
13065 if (j > 0) {
13066 u8 csum8 = 0;
13067
13068 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13069
13070 for (i = 0; i <= j; i++)
13071 csum8 += ((u8 *)buf)[i];
13072
13073 if (csum8)
13074 goto out;
13075 }
13076 }
13077
13078 err = 0;
13079
13080out:
13081 kfree(buf);
13082 return err;
13083}
13084
13085#define TG3_SERDES_TIMEOUT_SEC 2
13086#define TG3_COPPER_TIMEOUT_SEC 6
13087
13088static int tg3_test_link(struct tg3 *tp)
13089{
13090 int i, max;
13091
13092 if (!netif_running(tp->dev))
13093 return -ENODEV;
13094
13095 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13096 max = TG3_SERDES_TIMEOUT_SEC;
13097 else
13098 max = TG3_COPPER_TIMEOUT_SEC;
13099
13100 for (i = 0; i < max; i++) {
13101 if (tp->link_up)
13102 return 0;
13103
13104 if (msleep_interruptible(1000))
13105 break;
13106 }
13107
13108 return -EIO;
13109}
13110
13111/* Only test the commonly used registers */
13112static int tg3_test_registers(struct tg3 *tp)
13113{
13114 int i, is_5705, is_5750;
13115 u32 offset, read_mask, write_mask, val, save_val, read_val;
13116 static struct {
13117 u16 offset;
13118 u16 flags;
13119#define TG3_FL_5705 0x1
13120#define TG3_FL_NOT_5705 0x2
13121#define TG3_FL_NOT_5788 0x4
13122#define TG3_FL_NOT_5750 0x8
13123 u32 read_mask;
13124 u32 write_mask;
13125 } reg_tbl[] = {
13126 /* MAC Control Registers */
13127 { MAC_MODE, TG3_FL_NOT_5705,
13128 0x00000000, 0x00ef6f8c },
13129 { MAC_MODE, TG3_FL_5705,
13130 0x00000000, 0x01ef6b8c },
13131 { MAC_STATUS, TG3_FL_NOT_5705,
13132 0x03800107, 0x00000000 },
13133 { MAC_STATUS, TG3_FL_5705,
13134 0x03800100, 0x00000000 },
13135 { MAC_ADDR_0_HIGH, 0x0000,
13136 0x00000000, 0x0000ffff },
13137 { MAC_ADDR_0_LOW, 0x0000,
13138 0x00000000, 0xffffffff },
13139 { MAC_RX_MTU_SIZE, 0x0000,
13140 0x00000000, 0x0000ffff },
13141 { MAC_TX_MODE, 0x0000,
13142 0x00000000, 0x00000070 },
13143 { MAC_TX_LENGTHS, 0x0000,
13144 0x00000000, 0x00003fff },
13145 { MAC_RX_MODE, TG3_FL_NOT_5705,
13146 0x00000000, 0x000007fc },
13147 { MAC_RX_MODE, TG3_FL_5705,
13148 0x00000000, 0x000007dc },
13149 { MAC_HASH_REG_0, 0x0000,
13150 0x00000000, 0xffffffff },
13151 { MAC_HASH_REG_1, 0x0000,
13152 0x00000000, 0xffffffff },
13153 { MAC_HASH_REG_2, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { MAC_HASH_REG_3, 0x0000,
13156 0x00000000, 0xffffffff },
13157
13158 /* Receive Data and Receive BD Initiator Control Registers. */
13159 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13160 0x00000000, 0xffffffff },
13161 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13162 0x00000000, 0xffffffff },
13163 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13164 0x00000000, 0x00000003 },
13165 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13166 0x00000000, 0xffffffff },
13167 { RCVDBDI_STD_BD+0, 0x0000,
13168 0x00000000, 0xffffffff },
13169 { RCVDBDI_STD_BD+4, 0x0000,
13170 0x00000000, 0xffffffff },
13171 { RCVDBDI_STD_BD+8, 0x0000,
13172 0x00000000, 0xffff0002 },
13173 { RCVDBDI_STD_BD+0xc, 0x0000,
13174 0x00000000, 0xffffffff },
13175
13176 /* Receive BD Initiator Control Registers. */
13177 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13178 0x00000000, 0xffffffff },
13179 { RCVBDI_STD_THRESH, TG3_FL_5705,
13180 0x00000000, 0x000003ff },
13181 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13182 0x00000000, 0xffffffff },
13183
13184 /* Host Coalescing Control Registers. */
13185 { HOSTCC_MODE, TG3_FL_NOT_5705,
13186 0x00000000, 0x00000004 },
13187 { HOSTCC_MODE, TG3_FL_5705,
13188 0x00000000, 0x000000f6 },
13189 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13190 0x00000000, 0xffffffff },
13191 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13192 0x00000000, 0x000003ff },
13193 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13194 0x00000000, 0xffffffff },
13195 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13196 0x00000000, 0x000003ff },
13197 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13198 0x00000000, 0xffffffff },
13199 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13200 0x00000000, 0x000000ff },
13201 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13202 0x00000000, 0xffffffff },
13203 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13204 0x00000000, 0x000000ff },
13205 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13206 0x00000000, 0xffffffff },
13207 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13208 0x00000000, 0xffffffff },
13209 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13210 0x00000000, 0xffffffff },
13211 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13212 0x00000000, 0x000000ff },
13213 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13214 0x00000000, 0xffffffff },
13215 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13216 0x00000000, 0x000000ff },
13217 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13218 0x00000000, 0xffffffff },
13219 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13220 0x00000000, 0xffffffff },
13221 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13222 0x00000000, 0xffffffff },
13223 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13224 0x00000000, 0xffffffff },
13225 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13226 0x00000000, 0xffffffff },
13227 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13228 0xffffffff, 0x00000000 },
13229 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13230 0xffffffff, 0x00000000 },
13231
13232 /* Buffer Manager Control Registers. */
13233 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13234 0x00000000, 0x007fff80 },
13235 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13236 0x00000000, 0x007fffff },
13237 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13238 0x00000000, 0x0000003f },
13239 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13240 0x00000000, 0x000001ff },
13241 { BUFMGR_MB_HIGH_WATER, 0x0000,
13242 0x00000000, 0x000001ff },
13243 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13244 0xffffffff, 0x00000000 },
13245 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13246 0xffffffff, 0x00000000 },
13247
13248 /* Mailbox Registers */
13249 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13250 0x00000000, 0x000001ff },
13251 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13252 0x00000000, 0x000001ff },
13253 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13254 0x00000000, 0x000007ff },
13255 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13256 0x00000000, 0x000001ff },
13257
13258 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13259 };
13260
13261 is_5705 = is_5750 = 0;
13262 if (tg3_flag(tp, 5705_PLUS)) {
13263 is_5705 = 1;
13264 if (tg3_flag(tp, 5750_PLUS))
13265 is_5750 = 1;
13266 }
13267
13268 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13269 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13270 continue;
13271
13272 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13273 continue;
13274
13275 if (tg3_flag(tp, IS_5788) &&
13276 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13277 continue;
13278
13279 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13280 continue;
13281
13282 offset = (u32) reg_tbl[i].offset;
13283 read_mask = reg_tbl[i].read_mask;
13284 write_mask = reg_tbl[i].write_mask;
13285
13286 /* Save the original register content */
13287 save_val = tr32(offset);
13288
13289 /* Determine the read-only value. */
13290 read_val = save_val & read_mask;
13291
13292 /* Write zero to the register, then make sure the read-only bits
13293 * are not changed and the read/write bits are all zeros.
13294 */
13295 tw32(offset, 0);
13296
13297 val = tr32(offset);
13298
13299 /* Test the read-only and read/write bits. */
13300 if (((val & read_mask) != read_val) || (val & write_mask))
13301 goto out;
13302
13303 /* Write ones to all the bits defined by RdMask and WrMask, then
13304 * make sure the read-only bits are not changed and the
13305 * read/write bits are all ones.
13306 */
13307 tw32(offset, read_mask | write_mask);
13308
13309 val = tr32(offset);
13310
13311 /* Test the read-only bits. */
13312 if ((val & read_mask) != read_val)
13313 goto out;
13314
13315 /* Test the read/write bits. */
13316 if ((val & write_mask) != write_mask)
13317 goto out;
13318
13319 tw32(offset, save_val);
13320 }
13321
13322 return 0;
13323
13324out:
13325 if (netif_msg_hw(tp))
13326 netdev_err(tp->dev,
13327 "Register test failed at offset %x\n", offset);
13328 tw32(offset, save_val);
13329 return -EIO;
13330}
13331
13332static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13333{
13334 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13335 int i;
13336 u32 j;
13337
13338 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13339 for (j = 0; j < len; j += 4) {
13340 u32 val;
13341
13342 tg3_write_mem(tp, offset + j, test_pattern[i]);
13343 tg3_read_mem(tp, offset + j, &val);
13344 if (val != test_pattern[i])
13345 return -EIO;
13346 }
13347 }
13348 return 0;
13349}
13350
13351static int tg3_test_memory(struct tg3 *tp)
13352{
13353 static struct mem_entry {
13354 u32 offset;
13355 u32 len;
13356 } mem_tbl_570x[] = {
13357 { 0x00000000, 0x00b50},
13358 { 0x00002000, 0x1c000},
13359 { 0xffffffff, 0x00000}
13360 }, mem_tbl_5705[] = {
13361 { 0x00000100, 0x0000c},
13362 { 0x00000200, 0x00008},
13363 { 0x00004000, 0x00800},
13364 { 0x00006000, 0x01000},
13365 { 0x00008000, 0x02000},
13366 { 0x00010000, 0x0e000},
13367 { 0xffffffff, 0x00000}
13368 }, mem_tbl_5755[] = {
13369 { 0x00000200, 0x00008},
13370 { 0x00004000, 0x00800},
13371 { 0x00006000, 0x00800},
13372 { 0x00008000, 0x02000},
13373 { 0x00010000, 0x0c000},
13374 { 0xffffffff, 0x00000}
13375 }, mem_tbl_5906[] = {
13376 { 0x00000200, 0x00008},
13377 { 0x00004000, 0x00400},
13378 { 0x00006000, 0x00400},
13379 { 0x00008000, 0x01000},
13380 { 0x00010000, 0x01000},
13381 { 0xffffffff, 0x00000}
13382 }, mem_tbl_5717[] = {
13383 { 0x00000200, 0x00008},
13384 { 0x00010000, 0x0a000},
13385 { 0x00020000, 0x13c00},
13386 { 0xffffffff, 0x00000}
13387 }, mem_tbl_57765[] = {
13388 { 0x00000200, 0x00008},
13389 { 0x00004000, 0x00800},
13390 { 0x00006000, 0x09800},
13391 { 0x00010000, 0x0a000},
13392 { 0xffffffff, 0x00000}
13393 };
13394 struct mem_entry *mem_tbl;
13395 int err = 0;
13396 int i;
13397
13398 if (tg3_flag(tp, 5717_PLUS))
13399 mem_tbl = mem_tbl_5717;
13400 else if (tg3_flag(tp, 57765_CLASS) ||
13401 tg3_asic_rev(tp) == ASIC_REV_5762)
13402 mem_tbl = mem_tbl_57765;
13403 else if (tg3_flag(tp, 5755_PLUS))
13404 mem_tbl = mem_tbl_5755;
13405 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13406 mem_tbl = mem_tbl_5906;
13407 else if (tg3_flag(tp, 5705_PLUS))
13408 mem_tbl = mem_tbl_5705;
13409 else
13410 mem_tbl = mem_tbl_570x;
13411
13412 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13413 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13414 if (err)
13415 break;
13416 }
13417
13418 return err;
13419}
13420
13421#define TG3_TSO_MSS 500
13422
13423#define TG3_TSO_IP_HDR_LEN 20
13424#define TG3_TSO_TCP_HDR_LEN 20
13425#define TG3_TSO_TCP_OPT_LEN 12
13426
13427static const u8 tg3_tso_header[] = {
134280x08, 0x00,
134290x45, 0x00, 0x00, 0x00,
134300x00, 0x00, 0x40, 0x00,
134310x40, 0x06, 0x00, 0x00,
134320x0a, 0x00, 0x00, 0x01,
134330x0a, 0x00, 0x00, 0x02,
134340x0d, 0x00, 0xe0, 0x00,
134350x00, 0x00, 0x01, 0x00,
134360x00, 0x00, 0x02, 0x00,
134370x80, 0x10, 0x10, 0x00,
134380x14, 0x09, 0x00, 0x00,
134390x01, 0x01, 0x08, 0x0a,
134400x11, 0x11, 0x11, 0x11,
134410x11, 0x11, 0x11, 0x11,
13442};
13443
13444static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13445{
13446 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13447 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13448 u32 budget;
13449 struct sk_buff *skb;
13450 u8 *tx_data, *rx_data;
13451 dma_addr_t map;
13452 int num_pkts, tx_len, rx_len, i, err;
13453 struct tg3_rx_buffer_desc *desc;
13454 struct tg3_napi *tnapi, *rnapi;
13455 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13456
13457 tnapi = &tp->napi[0];
13458 rnapi = &tp->napi[0];
13459 if (tp->irq_cnt > 1) {
13460 if (tg3_flag(tp, ENABLE_RSS))
13461 rnapi = &tp->napi[1];
13462 if (tg3_flag(tp, ENABLE_TSS))
13463 tnapi = &tp->napi[1];
13464 }
13465 coal_now = tnapi->coal_now | rnapi->coal_now;
13466
13467 err = -EIO;
13468
13469 tx_len = pktsz;
13470 skb = netdev_alloc_skb(tp->dev, tx_len);
13471 if (!skb)
13472 return -ENOMEM;
13473
13474 tx_data = skb_put(skb, tx_len);
13475 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13476 memset(tx_data + ETH_ALEN, 0x0, 8);
13477
13478 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13479
13480 if (tso_loopback) {
13481 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13482
13483 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13484 TG3_TSO_TCP_OPT_LEN;
13485
13486 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13487 sizeof(tg3_tso_header));
13488 mss = TG3_TSO_MSS;
13489
13490 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13491 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13492
13493 /* Set the total length field in the IP header */
13494 iph->tot_len = htons((u16)(mss + hdr_len));
13495
13496 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13497 TXD_FLAG_CPU_POST_DMA);
13498
13499 if (tg3_flag(tp, HW_TSO_1) ||
13500 tg3_flag(tp, HW_TSO_2) ||
13501 tg3_flag(tp, HW_TSO_3)) {
13502 struct tcphdr *th;
13503 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13504 th = (struct tcphdr *)&tx_data[val];
13505 th->check = 0;
13506 } else
13507 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13508
13509 if (tg3_flag(tp, HW_TSO_3)) {
13510 mss |= (hdr_len & 0xc) << 12;
13511 if (hdr_len & 0x10)
13512 base_flags |= 0x00000010;
13513 base_flags |= (hdr_len & 0x3e0) << 5;
13514 } else if (tg3_flag(tp, HW_TSO_2))
13515 mss |= hdr_len << 9;
13516 else if (tg3_flag(tp, HW_TSO_1) ||
13517 tg3_asic_rev(tp) == ASIC_REV_5705) {
13518 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13519 } else {
13520 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13521 }
13522
13523 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13524 } else {
13525 num_pkts = 1;
13526 data_off = ETH_HLEN;
13527
13528 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13529 tx_len > VLAN_ETH_FRAME_LEN)
13530 base_flags |= TXD_FLAG_JMB_PKT;
13531 }
13532
13533 for (i = data_off; i < tx_len; i++)
13534 tx_data[i] = (u8) (i & 0xff);
13535
13536 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13537 if (pci_dma_mapping_error(tp->pdev, map)) {
13538 dev_kfree_skb(skb);
13539 return -EIO;
13540 }
13541
13542 val = tnapi->tx_prod;
13543 tnapi->tx_buffers[val].skb = skb;
13544 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13545
13546 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13547 rnapi->coal_now);
13548
13549 udelay(10);
13550
13551 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13552
13553 budget = tg3_tx_avail(tnapi);
13554 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13555 base_flags | TXD_FLAG_END, mss, 0)) {
13556 tnapi->tx_buffers[val].skb = NULL;
13557 dev_kfree_skb(skb);
13558 return -EIO;
13559 }
13560
13561 tnapi->tx_prod++;
13562
13563 /* Sync BD data before updating mailbox */
13564 wmb();
13565
13566 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13567 tr32_mailbox(tnapi->prodmbox);
13568
13569 udelay(10);
13570
13571 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13572 for (i = 0; i < 35; i++) {
13573 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13574 coal_now);
13575
13576 udelay(10);
13577
13578 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13579 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13580 if ((tx_idx == tnapi->tx_prod) &&
13581 (rx_idx == (rx_start_idx + num_pkts)))
13582 break;
13583 }
13584
13585 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13586 dev_kfree_skb(skb);
13587
13588 if (tx_idx != tnapi->tx_prod)
13589 goto out;
13590
13591 if (rx_idx != rx_start_idx + num_pkts)
13592 goto out;
13593
13594 val = data_off;
13595 while (rx_idx != rx_start_idx) {
13596 desc = &rnapi->rx_rcb[rx_start_idx++];
13597 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13598 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13599
13600 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13601 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13602 goto out;
13603
13604 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13605 - ETH_FCS_LEN;
13606
13607 if (!tso_loopback) {
13608 if (rx_len != tx_len)
13609 goto out;
13610
13611 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13612 if (opaque_key != RXD_OPAQUE_RING_STD)
13613 goto out;
13614 } else {
13615 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13616 goto out;
13617 }
13618 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13619 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13620 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13621 goto out;
13622 }
13623
13624 if (opaque_key == RXD_OPAQUE_RING_STD) {
13625 rx_data = tpr->rx_std_buffers[desc_idx].data;
13626 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13627 mapping);
13628 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13629 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13630 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13631 mapping);
13632 } else
13633 goto out;
13634
13635 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13636 PCI_DMA_FROMDEVICE);
13637
13638 rx_data += TG3_RX_OFFSET(tp);
13639 for (i = data_off; i < rx_len; i++, val++) {
13640 if (*(rx_data + i) != (u8) (val & 0xff))
13641 goto out;
13642 }
13643 }
13644
13645 err = 0;
13646
13647 /* tg3_free_rings will unmap and free the rx_data */
13648out:
13649 return err;
13650}
13651
13652#define TG3_STD_LOOPBACK_FAILED 1
13653#define TG3_JMB_LOOPBACK_FAILED 2
13654#define TG3_TSO_LOOPBACK_FAILED 4
13655#define TG3_LOOPBACK_FAILED \
13656 (TG3_STD_LOOPBACK_FAILED | \
13657 TG3_JMB_LOOPBACK_FAILED | \
13658 TG3_TSO_LOOPBACK_FAILED)
13659
13660static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13661{
13662 int err = -EIO;
13663 u32 eee_cap;
13664 u32 jmb_pkt_sz = 9000;
13665
13666 if (tp->dma_limit)
13667 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13668
13669 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13670 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13671
13672 if (!netif_running(tp->dev)) {
13673 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13674 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13675 if (do_extlpbk)
13676 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13677 goto done;
13678 }
13679
13680 err = tg3_reset_hw(tp, true);
13681 if (err) {
13682 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13683 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13684 if (do_extlpbk)
13685 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13686 goto done;
13687 }
13688
13689 if (tg3_flag(tp, ENABLE_RSS)) {
13690 int i;
13691
13692 /* Reroute all rx packets to the 1st queue */
13693 for (i = MAC_RSS_INDIR_TBL_0;
13694 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13695 tw32(i, 0x0);
13696 }
13697
13698 /* HW errata - mac loopback fails in some cases on 5780.
13699 * Normal traffic and PHY loopback are not affected by
13700 * errata. Also, the MAC loopback test is deprecated for
13701 * all newer ASIC revisions.
13702 */
13703 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13704 !tg3_flag(tp, CPMU_PRESENT)) {
13705 tg3_mac_loopback(tp, true);
13706
13707 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13708 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13709
13710 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13711 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13712 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13713
13714 tg3_mac_loopback(tp, false);
13715 }
13716
13717 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13718 !tg3_flag(tp, USE_PHYLIB)) {
13719 int i;
13720
13721 tg3_phy_lpbk_set(tp, 0, false);
13722
13723 /* Wait for link */
13724 for (i = 0; i < 100; i++) {
13725 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13726 break;
13727 mdelay(1);
13728 }
13729
13730 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13731 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13732 if (tg3_flag(tp, TSO_CAPABLE) &&
13733 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13734 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13735 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13736 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13737 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13738
13739 if (do_extlpbk) {
13740 tg3_phy_lpbk_set(tp, 0, true);
13741
13742 /* All link indications report up, but the hardware
13743 * isn't really ready for about 20 msec. Double it
13744 * to be sure.
13745 */
13746 mdelay(40);
13747
13748 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13749 data[TG3_EXT_LOOPB_TEST] |=
13750 TG3_STD_LOOPBACK_FAILED;
13751 if (tg3_flag(tp, TSO_CAPABLE) &&
13752 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13753 data[TG3_EXT_LOOPB_TEST] |=
13754 TG3_TSO_LOOPBACK_FAILED;
13755 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13756 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13757 data[TG3_EXT_LOOPB_TEST] |=
13758 TG3_JMB_LOOPBACK_FAILED;
13759 }
13760
13761 /* Re-enable gphy autopowerdown. */
13762 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13763 tg3_phy_toggle_apd(tp, true);
13764 }
13765
13766 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13767 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13768
13769done:
13770 tp->phy_flags |= eee_cap;
13771
13772 return err;
13773}
13774
13775static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13776 u64 *data)
13777{
13778 struct tg3 *tp = netdev_priv(dev);
13779 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13780
13781 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13782 if (tg3_power_up(tp)) {
13783 etest->flags |= ETH_TEST_FL_FAILED;
13784 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13785 return;
13786 }
13787 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13788 }
13789
13790 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13791
13792 if (tg3_test_nvram(tp) != 0) {
13793 etest->flags |= ETH_TEST_FL_FAILED;
13794 data[TG3_NVRAM_TEST] = 1;
13795 }
13796 if (!doextlpbk && tg3_test_link(tp)) {
13797 etest->flags |= ETH_TEST_FL_FAILED;
13798 data[TG3_LINK_TEST] = 1;
13799 }
13800 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13801 int err, err2 = 0, irq_sync = 0;
13802
13803 if (netif_running(dev)) {
13804 tg3_phy_stop(tp);
13805 tg3_netif_stop(tp);
13806 irq_sync = 1;
13807 }
13808
13809 tg3_full_lock(tp, irq_sync);
13810 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13811 err = tg3_nvram_lock(tp);
13812 tg3_halt_cpu(tp, RX_CPU_BASE);
13813 if (!tg3_flag(tp, 5705_PLUS))
13814 tg3_halt_cpu(tp, TX_CPU_BASE);
13815 if (!err)
13816 tg3_nvram_unlock(tp);
13817
13818 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13819 tg3_phy_reset(tp);
13820
13821 if (tg3_test_registers(tp) != 0) {
13822 etest->flags |= ETH_TEST_FL_FAILED;
13823 data[TG3_REGISTER_TEST] = 1;
13824 }
13825
13826 if (tg3_test_memory(tp) != 0) {
13827 etest->flags |= ETH_TEST_FL_FAILED;
13828 data[TG3_MEMORY_TEST] = 1;
13829 }
13830
13831 if (doextlpbk)
13832 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13833
13834 if (tg3_test_loopback(tp, data, doextlpbk))
13835 etest->flags |= ETH_TEST_FL_FAILED;
13836
13837 tg3_full_unlock(tp);
13838
13839 if (tg3_test_interrupt(tp) != 0) {
13840 etest->flags |= ETH_TEST_FL_FAILED;
13841 data[TG3_INTERRUPT_TEST] = 1;
13842 }
13843
13844 tg3_full_lock(tp, 0);
13845
13846 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13847 if (netif_running(dev)) {
13848 tg3_flag_set(tp, INIT_COMPLETE);
13849 err2 = tg3_restart_hw(tp, true);
13850 if (!err2)
13851 tg3_netif_start(tp);
13852 }
13853
13854 tg3_full_unlock(tp);
13855
13856 if (irq_sync && !err2)
13857 tg3_phy_start(tp);
13858 }
13859 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13860 tg3_power_down_prepare(tp);
13861
13862}
13863
13864static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13865{
13866 struct tg3 *tp = netdev_priv(dev);
13867 struct hwtstamp_config stmpconf;
13868
13869 if (!tg3_flag(tp, PTP_CAPABLE))
13870 return -EOPNOTSUPP;
13871
13872 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13873 return -EFAULT;
13874
13875 if (stmpconf.flags)
13876 return -EINVAL;
13877
13878 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13879 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13880 return -ERANGE;
13881
13882 switch (stmpconf.rx_filter) {
13883 case HWTSTAMP_FILTER_NONE:
13884 tp->rxptpctl = 0;
13885 break;
13886 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13887 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13888 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13889 break;
13890 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13891 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13892 TG3_RX_PTP_CTL_SYNC_EVNT;
13893 break;
13894 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13895 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13896 TG3_RX_PTP_CTL_DELAY_REQ;
13897 break;
13898 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13899 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13900 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13901 break;
13902 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13903 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13904 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13905 break;
13906 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13907 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13908 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13909 break;
13910 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13911 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13912 TG3_RX_PTP_CTL_SYNC_EVNT;
13913 break;
13914 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13915 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13916 TG3_RX_PTP_CTL_SYNC_EVNT;
13917 break;
13918 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13919 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13920 TG3_RX_PTP_CTL_SYNC_EVNT;
13921 break;
13922 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13923 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13924 TG3_RX_PTP_CTL_DELAY_REQ;
13925 break;
13926 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13927 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13928 TG3_RX_PTP_CTL_DELAY_REQ;
13929 break;
13930 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13931 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13932 TG3_RX_PTP_CTL_DELAY_REQ;
13933 break;
13934 default:
13935 return -ERANGE;
13936 }
13937
13938 if (netif_running(dev) && tp->rxptpctl)
13939 tw32(TG3_RX_PTP_CTL,
13940 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13941
13942 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13943 tg3_flag_set(tp, TX_TSTAMP_EN);
13944 else
13945 tg3_flag_clear(tp, TX_TSTAMP_EN);
13946
13947 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13948 -EFAULT : 0;
13949}
13950
13951static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13952{
13953 struct tg3 *tp = netdev_priv(dev);
13954 struct hwtstamp_config stmpconf;
13955
13956 if (!tg3_flag(tp, PTP_CAPABLE))
13957 return -EOPNOTSUPP;
13958
13959 stmpconf.flags = 0;
13960 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13961 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13962
13963 switch (tp->rxptpctl) {
13964 case 0:
13965 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13966 break;
13967 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13968 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13969 break;
13970 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13971 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13972 break;
13973 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13974 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13975 break;
13976 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13977 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13978 break;
13979 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13980 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13981 break;
13982 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13983 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13984 break;
13985 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13986 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13987 break;
13988 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13989 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13990 break;
13991 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13992 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13993 break;
13994 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13995 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13996 break;
13997 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13998 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13999 break;
14000 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14001 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14002 break;
14003 default:
14004 WARN_ON_ONCE(1);
14005 return -ERANGE;
14006 }
14007
14008 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14009 -EFAULT : 0;
14010}
14011
14012static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14013{
14014 struct mii_ioctl_data *data = if_mii(ifr);
14015 struct tg3 *tp = netdev_priv(dev);
14016 int err;
14017
14018 if (tg3_flag(tp, USE_PHYLIB)) {
14019 struct phy_device *phydev;
14020 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14021 return -EAGAIN;
14022 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14023 return phy_mii_ioctl(phydev, ifr, cmd);
14024 }
14025
14026 switch (cmd) {
14027 case SIOCGMIIPHY:
14028 data->phy_id = tp->phy_addr;
14029
14030 /* fallthru */
14031 case SIOCGMIIREG: {
14032 u32 mii_regval;
14033
14034 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14035 break; /* We have no PHY */
14036
14037 if (!netif_running(dev))
14038 return -EAGAIN;
14039
14040 spin_lock_bh(&tp->lock);
14041 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14042 data->reg_num & 0x1f, &mii_regval);
14043 spin_unlock_bh(&tp->lock);
14044
14045 data->val_out = mii_regval;
14046
14047 return err;
14048 }
14049
14050 case SIOCSMIIREG:
14051 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14052 break; /* We have no PHY */
14053
14054 if (!netif_running(dev))
14055 return -EAGAIN;
14056
14057 spin_lock_bh(&tp->lock);
14058 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14059 data->reg_num & 0x1f, data->val_in);
14060 spin_unlock_bh(&tp->lock);
14061
14062 return err;
14063
14064 case SIOCSHWTSTAMP:
14065 return tg3_hwtstamp_set(dev, ifr);
14066
14067 case SIOCGHWTSTAMP:
14068 return tg3_hwtstamp_get(dev, ifr);
14069
14070 default:
14071 /* do nothing */
14072 break;
14073 }
14074 return -EOPNOTSUPP;
14075}
14076
14077static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14078{
14079 struct tg3 *tp = netdev_priv(dev);
14080
14081 memcpy(ec, &tp->coal, sizeof(*ec));
14082 return 0;
14083}
14084
14085static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14086{
14087 struct tg3 *tp = netdev_priv(dev);
14088 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14089 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14090
14091 if (!tg3_flag(tp, 5705_PLUS)) {
14092 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14093 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14094 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14095 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14096 }
14097
14098 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14099 (!ec->rx_coalesce_usecs) ||
14100 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14101 (!ec->tx_coalesce_usecs) ||
14102 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14103 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14104 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14105 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14106 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14107 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14108 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14109 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14110 return -EINVAL;
14111
14112 /* Only copy relevant parameters, ignore all others. */
14113 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14114 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14115 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14116 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14117 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14118 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14119 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14120 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14121 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14122
14123 if (netif_running(dev)) {
14124 tg3_full_lock(tp, 0);
14125 __tg3_set_coalesce(tp, &tp->coal);
14126 tg3_full_unlock(tp);
14127 }
14128 return 0;
14129}
14130
14131static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14132{
14133 struct tg3 *tp = netdev_priv(dev);
14134
14135 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14136 netdev_warn(tp->dev, "Board does not support EEE!\n");
14137 return -EOPNOTSUPP;
14138 }
14139
14140 if (edata->advertised != tp->eee.advertised) {
14141 netdev_warn(tp->dev,
14142 "Direct manipulation of EEE advertisement is not supported\n");
14143 return -EINVAL;
14144 }
14145
14146 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14147 netdev_warn(tp->dev,
14148 "Maximal Tx Lpi timer supported is %#x(u)\n",
14149 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14150 return -EINVAL;
14151 }
14152
14153 tp->eee = *edata;
14154
14155 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14156 tg3_warn_mgmt_link_flap(tp);
14157
14158 if (netif_running(tp->dev)) {
14159 tg3_full_lock(tp, 0);
14160 tg3_setup_eee(tp);
14161 tg3_phy_reset(tp);
14162 tg3_full_unlock(tp);
14163 }
14164
14165 return 0;
14166}
14167
14168static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14169{
14170 struct tg3 *tp = netdev_priv(dev);
14171
14172 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14173 netdev_warn(tp->dev,
14174 "Board does not support EEE!\n");
14175 return -EOPNOTSUPP;
14176 }
14177
14178 *edata = tp->eee;
14179 return 0;
14180}
14181
14182static const struct ethtool_ops tg3_ethtool_ops = {
14183 .get_drvinfo = tg3_get_drvinfo,
14184 .get_regs_len = tg3_get_regs_len,
14185 .get_regs = tg3_get_regs,
14186 .get_wol = tg3_get_wol,
14187 .set_wol = tg3_set_wol,
14188 .get_msglevel = tg3_get_msglevel,
14189 .set_msglevel = tg3_set_msglevel,
14190 .nway_reset = tg3_nway_reset,
14191 .get_link = ethtool_op_get_link,
14192 .get_eeprom_len = tg3_get_eeprom_len,
14193 .get_eeprom = tg3_get_eeprom,
14194 .set_eeprom = tg3_set_eeprom,
14195 .get_ringparam = tg3_get_ringparam,
14196 .set_ringparam = tg3_set_ringparam,
14197 .get_pauseparam = tg3_get_pauseparam,
14198 .set_pauseparam = tg3_set_pauseparam,
14199 .self_test = tg3_self_test,
14200 .get_strings = tg3_get_strings,
14201 .set_phys_id = tg3_set_phys_id,
14202 .get_ethtool_stats = tg3_get_ethtool_stats,
14203 .get_coalesce = tg3_get_coalesce,
14204 .set_coalesce = tg3_set_coalesce,
14205 .get_sset_count = tg3_get_sset_count,
14206 .get_rxnfc = tg3_get_rxnfc,
14207 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14208 .get_rxfh = tg3_get_rxfh,
14209 .set_rxfh = tg3_set_rxfh,
14210 .get_channels = tg3_get_channels,
14211 .set_channels = tg3_set_channels,
14212 .get_ts_info = tg3_get_ts_info,
14213 .get_eee = tg3_get_eee,
14214 .set_eee = tg3_set_eee,
14215 .get_link_ksettings = tg3_get_link_ksettings,
14216 .set_link_ksettings = tg3_set_link_ksettings,
14217};
14218
14219static void tg3_get_stats64(struct net_device *dev,
14220 struct rtnl_link_stats64 *stats)
14221{
14222 struct tg3 *tp = netdev_priv(dev);
14223
14224 spin_lock_bh(&tp->lock);
14225 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14226 *stats = tp->net_stats_prev;
14227 spin_unlock_bh(&tp->lock);
14228 return;
14229 }
14230
14231 tg3_get_nstats(tp, stats);
14232 spin_unlock_bh(&tp->lock);
14233}
14234
14235static void tg3_set_rx_mode(struct net_device *dev)
14236{
14237 struct tg3 *tp = netdev_priv(dev);
14238
14239 if (!netif_running(dev))
14240 return;
14241
14242 tg3_full_lock(tp, 0);
14243 __tg3_set_rx_mode(dev);
14244 tg3_full_unlock(tp);
14245}
14246
14247static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14248 int new_mtu)
14249{
14250 dev->mtu = new_mtu;
14251
14252 if (new_mtu > ETH_DATA_LEN) {
14253 if (tg3_flag(tp, 5780_CLASS)) {
14254 netdev_update_features(dev);
14255 tg3_flag_clear(tp, TSO_CAPABLE);
14256 } else {
14257 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14258 }
14259 } else {
14260 if (tg3_flag(tp, 5780_CLASS)) {
14261 tg3_flag_set(tp, TSO_CAPABLE);
14262 netdev_update_features(dev);
14263 }
14264 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14265 }
14266}
14267
14268static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14269{
14270 struct tg3 *tp = netdev_priv(dev);
14271 int err;
14272 bool reset_phy = false;
14273
14274 if (!netif_running(dev)) {
14275 /* We'll just catch it later when the
14276 * device is up'd.
14277 */
14278 tg3_set_mtu(dev, tp, new_mtu);
14279 return 0;
14280 }
14281
14282 tg3_phy_stop(tp);
14283
14284 tg3_netif_stop(tp);
14285
14286 tg3_set_mtu(dev, tp, new_mtu);
14287
14288 tg3_full_lock(tp, 1);
14289
14290 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14291
14292 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14293 * breaks all requests to 256 bytes.
14294 */
14295 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14296 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14297 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14298 tg3_asic_rev(tp) == ASIC_REV_5720)
14299 reset_phy = true;
14300
14301 err = tg3_restart_hw(tp, reset_phy);
14302
14303 if (!err)
14304 tg3_netif_start(tp);
14305
14306 tg3_full_unlock(tp);
14307
14308 if (!err)
14309 tg3_phy_start(tp);
14310
14311 return err;
14312}
14313
14314static const struct net_device_ops tg3_netdev_ops = {
14315 .ndo_open = tg3_open,
14316 .ndo_stop = tg3_close,
14317 .ndo_start_xmit = tg3_start_xmit,
14318 .ndo_get_stats64 = tg3_get_stats64,
14319 .ndo_validate_addr = eth_validate_addr,
14320 .ndo_set_rx_mode = tg3_set_rx_mode,
14321 .ndo_set_mac_address = tg3_set_mac_addr,
14322 .ndo_do_ioctl = tg3_ioctl,
14323 .ndo_tx_timeout = tg3_tx_timeout,
14324 .ndo_change_mtu = tg3_change_mtu,
14325 .ndo_fix_features = tg3_fix_features,
14326 .ndo_set_features = tg3_set_features,
14327#ifdef CONFIG_NET_POLL_CONTROLLER
14328 .ndo_poll_controller = tg3_poll_controller,
14329#endif
14330};
14331
14332static void tg3_get_eeprom_size(struct tg3 *tp)
14333{
14334 u32 cursize, val, magic;
14335
14336 tp->nvram_size = EEPROM_CHIP_SIZE;
14337
14338 if (tg3_nvram_read(tp, 0, &magic) != 0)
14339 return;
14340
14341 if ((magic != TG3_EEPROM_MAGIC) &&
14342 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14343 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14344 return;
14345
14346 /*
14347 * Size the chip by reading offsets at increasing powers of two.
14348 * When we encounter our validation signature, we know the addressing
14349 * has wrapped around, and thus have our chip size.
14350 */
14351 cursize = 0x10;
14352
14353 while (cursize < tp->nvram_size) {
14354 if (tg3_nvram_read(tp, cursize, &val) != 0)
14355 return;
14356
14357 if (val == magic)
14358 break;
14359
14360 cursize <<= 1;
14361 }
14362
14363 tp->nvram_size = cursize;
14364}
14365
14366static void tg3_get_nvram_size(struct tg3 *tp)
14367{
14368 u32 val;
14369
14370 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14371 return;
14372
14373 /* Selfboot format */
14374 if (val != TG3_EEPROM_MAGIC) {
14375 tg3_get_eeprom_size(tp);
14376 return;
14377 }
14378
14379 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14380 if (val != 0) {
14381 /* This is confusing. We want to operate on the
14382 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14383 * call will read from NVRAM and byteswap the data
14384 * according to the byteswapping settings for all
14385 * other register accesses. This ensures the data we
14386 * want will always reside in the lower 16-bits.
14387 * However, the data in NVRAM is in LE format, which
14388 * means the data from the NVRAM read will always be
14389 * opposite the endianness of the CPU. The 16-bit
14390 * byteswap then brings the data to CPU endianness.
14391 */
14392 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14393 return;
14394 }
14395 }
14396 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14397}
14398
14399static void tg3_get_nvram_info(struct tg3 *tp)
14400{
14401 u32 nvcfg1;
14402
14403 nvcfg1 = tr32(NVRAM_CFG1);
14404 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14405 tg3_flag_set(tp, FLASH);
14406 } else {
14407 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14408 tw32(NVRAM_CFG1, nvcfg1);
14409 }
14410
14411 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14412 tg3_flag(tp, 5780_CLASS)) {
14413 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14414 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14415 tp->nvram_jedecnum = JEDEC_ATMEL;
14416 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14417 tg3_flag_set(tp, NVRAM_BUFFERED);
14418 break;
14419 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14420 tp->nvram_jedecnum = JEDEC_ATMEL;
14421 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14422 break;
14423 case FLASH_VENDOR_ATMEL_EEPROM:
14424 tp->nvram_jedecnum = JEDEC_ATMEL;
14425 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14426 tg3_flag_set(tp, NVRAM_BUFFERED);
14427 break;
14428 case FLASH_VENDOR_ST:
14429 tp->nvram_jedecnum = JEDEC_ST;
14430 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14431 tg3_flag_set(tp, NVRAM_BUFFERED);
14432 break;
14433 case FLASH_VENDOR_SAIFUN:
14434 tp->nvram_jedecnum = JEDEC_SAIFUN;
14435 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14436 break;
14437 case FLASH_VENDOR_SST_SMALL:
14438 case FLASH_VENDOR_SST_LARGE:
14439 tp->nvram_jedecnum = JEDEC_SST;
14440 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14441 break;
14442 }
14443 } else {
14444 tp->nvram_jedecnum = JEDEC_ATMEL;
14445 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14446 tg3_flag_set(tp, NVRAM_BUFFERED);
14447 }
14448}
14449
14450static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14451{
14452 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14453 case FLASH_5752PAGE_SIZE_256:
14454 tp->nvram_pagesize = 256;
14455 break;
14456 case FLASH_5752PAGE_SIZE_512:
14457 tp->nvram_pagesize = 512;
14458 break;
14459 case FLASH_5752PAGE_SIZE_1K:
14460 tp->nvram_pagesize = 1024;
14461 break;
14462 case FLASH_5752PAGE_SIZE_2K:
14463 tp->nvram_pagesize = 2048;
14464 break;
14465 case FLASH_5752PAGE_SIZE_4K:
14466 tp->nvram_pagesize = 4096;
14467 break;
14468 case FLASH_5752PAGE_SIZE_264:
14469 tp->nvram_pagesize = 264;
14470 break;
14471 case FLASH_5752PAGE_SIZE_528:
14472 tp->nvram_pagesize = 528;
14473 break;
14474 }
14475}
14476
14477static void tg3_get_5752_nvram_info(struct tg3 *tp)
14478{
14479 u32 nvcfg1;
14480
14481 nvcfg1 = tr32(NVRAM_CFG1);
14482
14483 /* NVRAM protection for TPM */
14484 if (nvcfg1 & (1 << 27))
14485 tg3_flag_set(tp, PROTECTED_NVRAM);
14486
14487 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14488 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14489 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14490 tp->nvram_jedecnum = JEDEC_ATMEL;
14491 tg3_flag_set(tp, NVRAM_BUFFERED);
14492 break;
14493 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14494 tp->nvram_jedecnum = JEDEC_ATMEL;
14495 tg3_flag_set(tp, NVRAM_BUFFERED);
14496 tg3_flag_set(tp, FLASH);
14497 break;
14498 case FLASH_5752VENDOR_ST_M45PE10:
14499 case FLASH_5752VENDOR_ST_M45PE20:
14500 case FLASH_5752VENDOR_ST_M45PE40:
14501 tp->nvram_jedecnum = JEDEC_ST;
14502 tg3_flag_set(tp, NVRAM_BUFFERED);
14503 tg3_flag_set(tp, FLASH);
14504 break;
14505 }
14506
14507 if (tg3_flag(tp, FLASH)) {
14508 tg3_nvram_get_pagesize(tp, nvcfg1);
14509 } else {
14510 /* For eeprom, set pagesize to maximum eeprom size */
14511 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14512
14513 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14514 tw32(NVRAM_CFG1, nvcfg1);
14515 }
14516}
14517
14518static void tg3_get_5755_nvram_info(struct tg3 *tp)
14519{
14520 u32 nvcfg1, protect = 0;
14521
14522 nvcfg1 = tr32(NVRAM_CFG1);
14523
14524 /* NVRAM protection for TPM */
14525 if (nvcfg1 & (1 << 27)) {
14526 tg3_flag_set(tp, PROTECTED_NVRAM);
14527 protect = 1;
14528 }
14529
14530 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14531 switch (nvcfg1) {
14532 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14533 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14534 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14535 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14536 tp->nvram_jedecnum = JEDEC_ATMEL;
14537 tg3_flag_set(tp, NVRAM_BUFFERED);
14538 tg3_flag_set(tp, FLASH);
14539 tp->nvram_pagesize = 264;
14540 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14541 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14542 tp->nvram_size = (protect ? 0x3e200 :
14543 TG3_NVRAM_SIZE_512KB);
14544 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14545 tp->nvram_size = (protect ? 0x1f200 :
14546 TG3_NVRAM_SIZE_256KB);
14547 else
14548 tp->nvram_size = (protect ? 0x1f200 :
14549 TG3_NVRAM_SIZE_128KB);
14550 break;
14551 case FLASH_5752VENDOR_ST_M45PE10:
14552 case FLASH_5752VENDOR_ST_M45PE20:
14553 case FLASH_5752VENDOR_ST_M45PE40:
14554 tp->nvram_jedecnum = JEDEC_ST;
14555 tg3_flag_set(tp, NVRAM_BUFFERED);
14556 tg3_flag_set(tp, FLASH);
14557 tp->nvram_pagesize = 256;
14558 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14559 tp->nvram_size = (protect ?
14560 TG3_NVRAM_SIZE_64KB :
14561 TG3_NVRAM_SIZE_128KB);
14562 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14563 tp->nvram_size = (protect ?
14564 TG3_NVRAM_SIZE_64KB :
14565 TG3_NVRAM_SIZE_256KB);
14566 else
14567 tp->nvram_size = (protect ?
14568 TG3_NVRAM_SIZE_128KB :
14569 TG3_NVRAM_SIZE_512KB);
14570 break;
14571 }
14572}
14573
14574static void tg3_get_5787_nvram_info(struct tg3 *tp)
14575{
14576 u32 nvcfg1;
14577
14578 nvcfg1 = tr32(NVRAM_CFG1);
14579
14580 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14581 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14582 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14583 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14584 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14585 tp->nvram_jedecnum = JEDEC_ATMEL;
14586 tg3_flag_set(tp, NVRAM_BUFFERED);
14587 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14588
14589 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14590 tw32(NVRAM_CFG1, nvcfg1);
14591 break;
14592 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14593 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14594 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14595 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14596 tp->nvram_jedecnum = JEDEC_ATMEL;
14597 tg3_flag_set(tp, NVRAM_BUFFERED);
14598 tg3_flag_set(tp, FLASH);
14599 tp->nvram_pagesize = 264;
14600 break;
14601 case FLASH_5752VENDOR_ST_M45PE10:
14602 case FLASH_5752VENDOR_ST_M45PE20:
14603 case FLASH_5752VENDOR_ST_M45PE40:
14604 tp->nvram_jedecnum = JEDEC_ST;
14605 tg3_flag_set(tp, NVRAM_BUFFERED);
14606 tg3_flag_set(tp, FLASH);
14607 tp->nvram_pagesize = 256;
14608 break;
14609 }
14610}
14611
14612static void tg3_get_5761_nvram_info(struct tg3 *tp)
14613{
14614 u32 nvcfg1, protect = 0;
14615
14616 nvcfg1 = tr32(NVRAM_CFG1);
14617
14618 /* NVRAM protection for TPM */
14619 if (nvcfg1 & (1 << 27)) {
14620 tg3_flag_set(tp, PROTECTED_NVRAM);
14621 protect = 1;
14622 }
14623
14624 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14625 switch (nvcfg1) {
14626 case FLASH_5761VENDOR_ATMEL_ADB021D:
14627 case FLASH_5761VENDOR_ATMEL_ADB041D:
14628 case FLASH_5761VENDOR_ATMEL_ADB081D:
14629 case FLASH_5761VENDOR_ATMEL_ADB161D:
14630 case FLASH_5761VENDOR_ATMEL_MDB021D:
14631 case FLASH_5761VENDOR_ATMEL_MDB041D:
14632 case FLASH_5761VENDOR_ATMEL_MDB081D:
14633 case FLASH_5761VENDOR_ATMEL_MDB161D:
14634 tp->nvram_jedecnum = JEDEC_ATMEL;
14635 tg3_flag_set(tp, NVRAM_BUFFERED);
14636 tg3_flag_set(tp, FLASH);
14637 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14638 tp->nvram_pagesize = 256;
14639 break;
14640 case FLASH_5761VENDOR_ST_A_M45PE20:
14641 case FLASH_5761VENDOR_ST_A_M45PE40:
14642 case FLASH_5761VENDOR_ST_A_M45PE80:
14643 case FLASH_5761VENDOR_ST_A_M45PE16:
14644 case FLASH_5761VENDOR_ST_M_M45PE20:
14645 case FLASH_5761VENDOR_ST_M_M45PE40:
14646 case FLASH_5761VENDOR_ST_M_M45PE80:
14647 case FLASH_5761VENDOR_ST_M_M45PE16:
14648 tp->nvram_jedecnum = JEDEC_ST;
14649 tg3_flag_set(tp, NVRAM_BUFFERED);
14650 tg3_flag_set(tp, FLASH);
14651 tp->nvram_pagesize = 256;
14652 break;
14653 }
14654
14655 if (protect) {
14656 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14657 } else {
14658 switch (nvcfg1) {
14659 case FLASH_5761VENDOR_ATMEL_ADB161D:
14660 case FLASH_5761VENDOR_ATMEL_MDB161D:
14661 case FLASH_5761VENDOR_ST_A_M45PE16:
14662 case FLASH_5761VENDOR_ST_M_M45PE16:
14663 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14664 break;
14665 case FLASH_5761VENDOR_ATMEL_ADB081D:
14666 case FLASH_5761VENDOR_ATMEL_MDB081D:
14667 case FLASH_5761VENDOR_ST_A_M45PE80:
14668 case FLASH_5761VENDOR_ST_M_M45PE80:
14669 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14670 break;
14671 case FLASH_5761VENDOR_ATMEL_ADB041D:
14672 case FLASH_5761VENDOR_ATMEL_MDB041D:
14673 case FLASH_5761VENDOR_ST_A_M45PE40:
14674 case FLASH_5761VENDOR_ST_M_M45PE40:
14675 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14676 break;
14677 case FLASH_5761VENDOR_ATMEL_ADB021D:
14678 case FLASH_5761VENDOR_ATMEL_MDB021D:
14679 case FLASH_5761VENDOR_ST_A_M45PE20:
14680 case FLASH_5761VENDOR_ST_M_M45PE20:
14681 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14682 break;
14683 }
14684 }
14685}
14686
14687static void tg3_get_5906_nvram_info(struct tg3 *tp)
14688{
14689 tp->nvram_jedecnum = JEDEC_ATMEL;
14690 tg3_flag_set(tp, NVRAM_BUFFERED);
14691 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14692}
14693
14694static void tg3_get_57780_nvram_info(struct tg3 *tp)
14695{
14696 u32 nvcfg1;
14697
14698 nvcfg1 = tr32(NVRAM_CFG1);
14699
14700 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14701 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14702 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14703 tp->nvram_jedecnum = JEDEC_ATMEL;
14704 tg3_flag_set(tp, NVRAM_BUFFERED);
14705 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14706
14707 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14708 tw32(NVRAM_CFG1, nvcfg1);
14709 return;
14710 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14711 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14712 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14713 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14714 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14715 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14716 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14717 tp->nvram_jedecnum = JEDEC_ATMEL;
14718 tg3_flag_set(tp, NVRAM_BUFFERED);
14719 tg3_flag_set(tp, FLASH);
14720
14721 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14722 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14723 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14724 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14725 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14726 break;
14727 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14728 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14729 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14730 break;
14731 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14732 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14733 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14734 break;
14735 }
14736 break;
14737 case FLASH_5752VENDOR_ST_M45PE10:
14738 case FLASH_5752VENDOR_ST_M45PE20:
14739 case FLASH_5752VENDOR_ST_M45PE40:
14740 tp->nvram_jedecnum = JEDEC_ST;
14741 tg3_flag_set(tp, NVRAM_BUFFERED);
14742 tg3_flag_set(tp, FLASH);
14743
14744 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745 case FLASH_5752VENDOR_ST_M45PE10:
14746 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14747 break;
14748 case FLASH_5752VENDOR_ST_M45PE20:
14749 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14750 break;
14751 case FLASH_5752VENDOR_ST_M45PE40:
14752 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14753 break;
14754 }
14755 break;
14756 default:
14757 tg3_flag_set(tp, NO_NVRAM);
14758 return;
14759 }
14760
14761 tg3_nvram_get_pagesize(tp, nvcfg1);
14762 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14763 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14764}
14765
14766
14767static void tg3_get_5717_nvram_info(struct tg3 *tp)
14768{
14769 u32 nvcfg1;
14770
14771 nvcfg1 = tr32(NVRAM_CFG1);
14772
14773 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14774 case FLASH_5717VENDOR_ATMEL_EEPROM:
14775 case FLASH_5717VENDOR_MICRO_EEPROM:
14776 tp->nvram_jedecnum = JEDEC_ATMEL;
14777 tg3_flag_set(tp, NVRAM_BUFFERED);
14778 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14779
14780 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14781 tw32(NVRAM_CFG1, nvcfg1);
14782 return;
14783 case FLASH_5717VENDOR_ATMEL_MDB011D:
14784 case FLASH_5717VENDOR_ATMEL_ADB011B:
14785 case FLASH_5717VENDOR_ATMEL_ADB011D:
14786 case FLASH_5717VENDOR_ATMEL_MDB021D:
14787 case FLASH_5717VENDOR_ATMEL_ADB021B:
14788 case FLASH_5717VENDOR_ATMEL_ADB021D:
14789 case FLASH_5717VENDOR_ATMEL_45USPT:
14790 tp->nvram_jedecnum = JEDEC_ATMEL;
14791 tg3_flag_set(tp, NVRAM_BUFFERED);
14792 tg3_flag_set(tp, FLASH);
14793
14794 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14795 case FLASH_5717VENDOR_ATMEL_MDB021D:
14796 /* Detect size with tg3_nvram_get_size() */
14797 break;
14798 case FLASH_5717VENDOR_ATMEL_ADB021B:
14799 case FLASH_5717VENDOR_ATMEL_ADB021D:
14800 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14801 break;
14802 default:
14803 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14804 break;
14805 }
14806 break;
14807 case FLASH_5717VENDOR_ST_M_M25PE10:
14808 case FLASH_5717VENDOR_ST_A_M25PE10:
14809 case FLASH_5717VENDOR_ST_M_M45PE10:
14810 case FLASH_5717VENDOR_ST_A_M45PE10:
14811 case FLASH_5717VENDOR_ST_M_M25PE20:
14812 case FLASH_5717VENDOR_ST_A_M25PE20:
14813 case FLASH_5717VENDOR_ST_M_M45PE20:
14814 case FLASH_5717VENDOR_ST_A_M45PE20:
14815 case FLASH_5717VENDOR_ST_25USPT:
14816 case FLASH_5717VENDOR_ST_45USPT:
14817 tp->nvram_jedecnum = JEDEC_ST;
14818 tg3_flag_set(tp, NVRAM_BUFFERED);
14819 tg3_flag_set(tp, FLASH);
14820
14821 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14822 case FLASH_5717VENDOR_ST_M_M25PE20:
14823 case FLASH_5717VENDOR_ST_M_M45PE20:
14824 /* Detect size with tg3_nvram_get_size() */
14825 break;
14826 case FLASH_5717VENDOR_ST_A_M25PE20:
14827 case FLASH_5717VENDOR_ST_A_M45PE20:
14828 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14829 break;
14830 default:
14831 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14832 break;
14833 }
14834 break;
14835 default:
14836 tg3_flag_set(tp, NO_NVRAM);
14837 return;
14838 }
14839
14840 tg3_nvram_get_pagesize(tp, nvcfg1);
14841 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14842 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14843}
14844
14845static void tg3_get_5720_nvram_info(struct tg3 *tp)
14846{
14847 u32 nvcfg1, nvmpinstrp, nv_status;
14848
14849 nvcfg1 = tr32(NVRAM_CFG1);
14850 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14851
14852 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14853 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14854 tg3_flag_set(tp, NO_NVRAM);
14855 return;
14856 }
14857
14858 switch (nvmpinstrp) {
14859 case FLASH_5762_MX25L_100:
14860 case FLASH_5762_MX25L_200:
14861 case FLASH_5762_MX25L_400:
14862 case FLASH_5762_MX25L_800:
14863 case FLASH_5762_MX25L_160_320:
14864 tp->nvram_pagesize = 4096;
14865 tp->nvram_jedecnum = JEDEC_MACRONIX;
14866 tg3_flag_set(tp, NVRAM_BUFFERED);
14867 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14868 tg3_flag_set(tp, FLASH);
14869 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14870 tp->nvram_size =
14871 (1 << (nv_status >> AUTOSENSE_DEVID &
14872 AUTOSENSE_DEVID_MASK)
14873 << AUTOSENSE_SIZE_IN_MB);
14874 return;
14875
14876 case FLASH_5762_EEPROM_HD:
14877 nvmpinstrp = FLASH_5720_EEPROM_HD;
14878 break;
14879 case FLASH_5762_EEPROM_LD:
14880 nvmpinstrp = FLASH_5720_EEPROM_LD;
14881 break;
14882 case FLASH_5720VENDOR_M_ST_M45PE20:
14883 /* This pinstrap supports multiple sizes, so force it
14884 * to read the actual size from location 0xf0.
14885 */
14886 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14887 break;
14888 }
14889 }
14890
14891 switch (nvmpinstrp) {
14892 case FLASH_5720_EEPROM_HD:
14893 case FLASH_5720_EEPROM_LD:
14894 tp->nvram_jedecnum = JEDEC_ATMEL;
14895 tg3_flag_set(tp, NVRAM_BUFFERED);
14896
14897 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14898 tw32(NVRAM_CFG1, nvcfg1);
14899 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14900 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14901 else
14902 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14903 return;
14904 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14905 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14906 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14907 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14908 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14909 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14910 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14911 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14912 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14913 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14914 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14915 case FLASH_5720VENDOR_ATMEL_45USPT:
14916 tp->nvram_jedecnum = JEDEC_ATMEL;
14917 tg3_flag_set(tp, NVRAM_BUFFERED);
14918 tg3_flag_set(tp, FLASH);
14919
14920 switch (nvmpinstrp) {
14921 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14922 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14923 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14924 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14925 break;
14926 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14927 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14928 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14929 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14930 break;
14931 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14932 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14933 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14934 break;
14935 default:
14936 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14937 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14938 break;
14939 }
14940 break;
14941 case FLASH_5720VENDOR_M_ST_M25PE10:
14942 case FLASH_5720VENDOR_M_ST_M45PE10:
14943 case FLASH_5720VENDOR_A_ST_M25PE10:
14944 case FLASH_5720VENDOR_A_ST_M45PE10:
14945 case FLASH_5720VENDOR_M_ST_M25PE20:
14946 case FLASH_5720VENDOR_M_ST_M45PE20:
14947 case FLASH_5720VENDOR_A_ST_M25PE20:
14948 case FLASH_5720VENDOR_A_ST_M45PE20:
14949 case FLASH_5720VENDOR_M_ST_M25PE40:
14950 case FLASH_5720VENDOR_M_ST_M45PE40:
14951 case FLASH_5720VENDOR_A_ST_M25PE40:
14952 case FLASH_5720VENDOR_A_ST_M45PE40:
14953 case FLASH_5720VENDOR_M_ST_M25PE80:
14954 case FLASH_5720VENDOR_M_ST_M45PE80:
14955 case FLASH_5720VENDOR_A_ST_M25PE80:
14956 case FLASH_5720VENDOR_A_ST_M45PE80:
14957 case FLASH_5720VENDOR_ST_25USPT:
14958 case FLASH_5720VENDOR_ST_45USPT:
14959 tp->nvram_jedecnum = JEDEC_ST;
14960 tg3_flag_set(tp, NVRAM_BUFFERED);
14961 tg3_flag_set(tp, FLASH);
14962
14963 switch (nvmpinstrp) {
14964 case FLASH_5720VENDOR_M_ST_M25PE20:
14965 case FLASH_5720VENDOR_M_ST_M45PE20:
14966 case FLASH_5720VENDOR_A_ST_M25PE20:
14967 case FLASH_5720VENDOR_A_ST_M45PE20:
14968 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14969 break;
14970 case FLASH_5720VENDOR_M_ST_M25PE40:
14971 case FLASH_5720VENDOR_M_ST_M45PE40:
14972 case FLASH_5720VENDOR_A_ST_M25PE40:
14973 case FLASH_5720VENDOR_A_ST_M45PE40:
14974 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14975 break;
14976 case FLASH_5720VENDOR_M_ST_M25PE80:
14977 case FLASH_5720VENDOR_M_ST_M45PE80:
14978 case FLASH_5720VENDOR_A_ST_M25PE80:
14979 case FLASH_5720VENDOR_A_ST_M45PE80:
14980 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14981 break;
14982 default:
14983 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14984 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14985 break;
14986 }
14987 break;
14988 default:
14989 tg3_flag_set(tp, NO_NVRAM);
14990 return;
14991 }
14992
14993 tg3_nvram_get_pagesize(tp, nvcfg1);
14994 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14995 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14996
14997 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14998 u32 val;
14999
15000 if (tg3_nvram_read(tp, 0, &val))
15001 return;
15002
15003 if (val != TG3_EEPROM_MAGIC &&
15004 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15005 tg3_flag_set(tp, NO_NVRAM);
15006 }
15007}
15008
15009/* Chips other than 5700/5701 use the NVRAM for fetching info. */
15010static void tg3_nvram_init(struct tg3 *tp)
15011{
15012 if (tg3_flag(tp, IS_SSB_CORE)) {
15013 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15014 tg3_flag_clear(tp, NVRAM);
15015 tg3_flag_clear(tp, NVRAM_BUFFERED);
15016 tg3_flag_set(tp, NO_NVRAM);
15017 return;
15018 }
15019
15020 tw32_f(GRC_EEPROM_ADDR,
15021 (EEPROM_ADDR_FSM_RESET |
15022 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15023 EEPROM_ADDR_CLKPERD_SHIFT)));
15024
15025 msleep(1);
15026
15027 /* Enable seeprom accesses. */
15028 tw32_f(GRC_LOCAL_CTRL,
15029 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15030 udelay(100);
15031
15032 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15033 tg3_asic_rev(tp) != ASIC_REV_5701) {
15034 tg3_flag_set(tp, NVRAM);
15035
15036 if (tg3_nvram_lock(tp)) {
15037 netdev_warn(tp->dev,
15038 "Cannot get nvram lock, %s failed\n",
15039 __func__);
15040 return;
15041 }
15042 tg3_enable_nvram_access(tp);
15043
15044 tp->nvram_size = 0;
15045
15046 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15047 tg3_get_5752_nvram_info(tp);
15048 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15049 tg3_get_5755_nvram_info(tp);
15050 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15051 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15052 tg3_asic_rev(tp) == ASIC_REV_5785)
15053 tg3_get_5787_nvram_info(tp);
15054 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15055 tg3_get_5761_nvram_info(tp);
15056 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15057 tg3_get_5906_nvram_info(tp);
15058 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15059 tg3_flag(tp, 57765_CLASS))
15060 tg3_get_57780_nvram_info(tp);
15061 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15062 tg3_asic_rev(tp) == ASIC_REV_5719)
15063 tg3_get_5717_nvram_info(tp);
15064 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15065 tg3_asic_rev(tp) == ASIC_REV_5762)
15066 tg3_get_5720_nvram_info(tp);
15067 else
15068 tg3_get_nvram_info(tp);
15069
15070 if (tp->nvram_size == 0)
15071 tg3_get_nvram_size(tp);
15072
15073 tg3_disable_nvram_access(tp);
15074 tg3_nvram_unlock(tp);
15075
15076 } else {
15077 tg3_flag_clear(tp, NVRAM);
15078 tg3_flag_clear(tp, NVRAM_BUFFERED);
15079
15080 tg3_get_eeprom_size(tp);
15081 }
15082}
15083
15084struct subsys_tbl_ent {
15085 u16 subsys_vendor, subsys_devid;
15086 u32 phy_id;
15087};
15088
15089static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15090 /* Broadcom boards. */
15091 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15092 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15093 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15094 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15095 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15096 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15097 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15098 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15099 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15100 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15101 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15102 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15103 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15104 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15105 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15106 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15107 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15108 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15109 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15110 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15111 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15112 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15113
15114 /* 3com boards. */
15115 { TG3PCI_SUBVENDOR_ID_3COM,
15116 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15117 { TG3PCI_SUBVENDOR_ID_3COM,
15118 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15119 { TG3PCI_SUBVENDOR_ID_3COM,
15120 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15121 { TG3PCI_SUBVENDOR_ID_3COM,
15122 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15123 { TG3PCI_SUBVENDOR_ID_3COM,
15124 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15125
15126 /* DELL boards. */
15127 { TG3PCI_SUBVENDOR_ID_DELL,
15128 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15129 { TG3PCI_SUBVENDOR_ID_DELL,
15130 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15131 { TG3PCI_SUBVENDOR_ID_DELL,
15132 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15133 { TG3PCI_SUBVENDOR_ID_DELL,
15134 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15135
15136 /* Compaq boards. */
15137 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15138 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15139 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15140 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15141 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15142 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15143 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15144 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15145 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15146 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15147
15148 /* IBM boards. */
15149 { TG3PCI_SUBVENDOR_ID_IBM,
15150 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15151};
15152
15153static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15154{
15155 int i;
15156
15157 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15158 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15159 tp->pdev->subsystem_vendor) &&
15160 (subsys_id_to_phy_id[i].subsys_devid ==
15161 tp->pdev->subsystem_device))
15162 return &subsys_id_to_phy_id[i];
15163 }
15164 return NULL;
15165}
15166
15167static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15168{
15169 u32 val;
15170
15171 tp->phy_id = TG3_PHY_ID_INVALID;
15172 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15173
15174 /* Assume an onboard device and WOL capable by default. */
15175 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15176 tg3_flag_set(tp, WOL_CAP);
15177
15178 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15179 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15180 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15181 tg3_flag_set(tp, IS_NIC);
15182 }
15183 val = tr32(VCPU_CFGSHDW);
15184 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15185 tg3_flag_set(tp, ASPM_WORKAROUND);
15186 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15187 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15188 tg3_flag_set(tp, WOL_ENABLE);
15189 device_set_wakeup_enable(&tp->pdev->dev, true);
15190 }
15191 goto done;
15192 }
15193
15194 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15195 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15196 u32 nic_cfg, led_cfg;
15197 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15198 u32 nic_phy_id, ver, eeprom_phy_id;
15199 int eeprom_phy_serdes = 0;
15200
15201 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15202 tp->nic_sram_data_cfg = nic_cfg;
15203
15204 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15205 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15206 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15207 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15208 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15209 (ver > 0) && (ver < 0x100))
15210 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15211
15212 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15213 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15214
15215 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15216 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15217 tg3_asic_rev(tp) == ASIC_REV_5720)
15218 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15219
15220 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15221 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15222 eeprom_phy_serdes = 1;
15223
15224 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15225 if (nic_phy_id != 0) {
15226 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15227 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15228
15229 eeprom_phy_id = (id1 >> 16) << 10;
15230 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15231 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15232 } else
15233 eeprom_phy_id = 0;
15234
15235 tp->phy_id = eeprom_phy_id;
15236 if (eeprom_phy_serdes) {
15237 if (!tg3_flag(tp, 5705_PLUS))
15238 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15239 else
15240 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15241 }
15242
15243 if (tg3_flag(tp, 5750_PLUS))
15244 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15245 SHASTA_EXT_LED_MODE_MASK);
15246 else
15247 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15248
15249 switch (led_cfg) {
15250 default:
15251 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15252 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15253 break;
15254
15255 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15256 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15257 break;
15258
15259 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15260 tp->led_ctrl = LED_CTRL_MODE_MAC;
15261
15262 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15263 * read on some older 5700/5701 bootcode.
15264 */
15265 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15266 tg3_asic_rev(tp) == ASIC_REV_5701)
15267 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15268
15269 break;
15270
15271 case SHASTA_EXT_LED_SHARED:
15272 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15273 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15274 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15275 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15276 LED_CTRL_MODE_PHY_2);
15277
15278 if (tg3_flag(tp, 5717_PLUS) ||
15279 tg3_asic_rev(tp) == ASIC_REV_5762)
15280 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15281 LED_CTRL_BLINK_RATE_MASK;
15282
15283 break;
15284
15285 case SHASTA_EXT_LED_MAC:
15286 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15287 break;
15288
15289 case SHASTA_EXT_LED_COMBO:
15290 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15291 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15292 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15293 LED_CTRL_MODE_PHY_2);
15294 break;
15295
15296 }
15297
15298 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15299 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15300 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15301 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15302
15303 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15304 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15305
15306 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15307 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15308 if ((tp->pdev->subsystem_vendor ==
15309 PCI_VENDOR_ID_ARIMA) &&
15310 (tp->pdev->subsystem_device == 0x205a ||
15311 tp->pdev->subsystem_device == 0x2063))
15312 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15313 } else {
15314 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15315 tg3_flag_set(tp, IS_NIC);
15316 }
15317
15318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15319 tg3_flag_set(tp, ENABLE_ASF);
15320 if (tg3_flag(tp, 5750_PLUS))
15321 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15322 }
15323
15324 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15325 tg3_flag(tp, 5750_PLUS))
15326 tg3_flag_set(tp, ENABLE_APE);
15327
15328 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15329 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15330 tg3_flag_clear(tp, WOL_CAP);
15331
15332 if (tg3_flag(tp, WOL_CAP) &&
15333 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15334 tg3_flag_set(tp, WOL_ENABLE);
15335 device_set_wakeup_enable(&tp->pdev->dev, true);
15336 }
15337
15338 if (cfg2 & (1 << 17))
15339 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15340
15341 /* serdes signal pre-emphasis in register 0x590 set by */
15342 /* bootcode if bit 18 is set */
15343 if (cfg2 & (1 << 18))
15344 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15345
15346 if ((tg3_flag(tp, 57765_PLUS) ||
15347 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15348 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15349 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15350 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15351
15352 if (tg3_flag(tp, PCI_EXPRESS)) {
15353 u32 cfg3;
15354
15355 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15356 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15357 !tg3_flag(tp, 57765_PLUS) &&
15358 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15359 tg3_flag_set(tp, ASPM_WORKAROUND);
15360 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15361 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15362 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15363 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15364 }
15365
15366 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15367 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15368 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15369 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15370 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15371 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15372
15373 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15374 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15375 }
15376done:
15377 if (tg3_flag(tp, WOL_CAP))
15378 device_set_wakeup_enable(&tp->pdev->dev,
15379 tg3_flag(tp, WOL_ENABLE));
15380 else
15381 device_set_wakeup_capable(&tp->pdev->dev, false);
15382}
15383
15384static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15385{
15386 int i, err;
15387 u32 val2, off = offset * 8;
15388
15389 err = tg3_nvram_lock(tp);
15390 if (err)
15391 return err;
15392
15393 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15394 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15395 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15396 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15397 udelay(10);
15398
15399 for (i = 0; i < 100; i++) {
15400 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15401 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15402 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15403 break;
15404 }
15405 udelay(10);
15406 }
15407
15408 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15409
15410 tg3_nvram_unlock(tp);
15411 if (val2 & APE_OTP_STATUS_CMD_DONE)
15412 return 0;
15413
15414 return -EBUSY;
15415}
15416
15417static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15418{
15419 int i;
15420 u32 val;
15421
15422 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15423 tw32(OTP_CTRL, cmd);
15424
15425 /* Wait for up to 1 ms for command to execute. */
15426 for (i = 0; i < 100; i++) {
15427 val = tr32(OTP_STATUS);
15428 if (val & OTP_STATUS_CMD_DONE)
15429 break;
15430 udelay(10);
15431 }
15432
15433 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15434}
15435
15436/* Read the gphy configuration from the OTP region of the chip. The gphy
15437 * configuration is a 32-bit value that straddles the alignment boundary.
15438 * We do two 32-bit reads and then shift and merge the results.
15439 */
15440static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15441{
15442 u32 bhalf_otp, thalf_otp;
15443
15444 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15445
15446 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15447 return 0;
15448
15449 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15450
15451 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15452 return 0;
15453
15454 thalf_otp = tr32(OTP_READ_DATA);
15455
15456 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15457
15458 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15459 return 0;
15460
15461 bhalf_otp = tr32(OTP_READ_DATA);
15462
15463 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15464}
15465
15466static void tg3_phy_init_link_config(struct tg3 *tp)
15467{
15468 u32 adv = ADVERTISED_Autoneg;
15469
15470 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15471 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15472 adv |= ADVERTISED_1000baseT_Half;
15473 adv |= ADVERTISED_1000baseT_Full;
15474 }
15475
15476 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15477 adv |= ADVERTISED_100baseT_Half |
15478 ADVERTISED_100baseT_Full |
15479 ADVERTISED_10baseT_Half |
15480 ADVERTISED_10baseT_Full |
15481 ADVERTISED_TP;
15482 else
15483 adv |= ADVERTISED_FIBRE;
15484
15485 tp->link_config.advertising = adv;
15486 tp->link_config.speed = SPEED_UNKNOWN;
15487 tp->link_config.duplex = DUPLEX_UNKNOWN;
15488 tp->link_config.autoneg = AUTONEG_ENABLE;
15489 tp->link_config.active_speed = SPEED_UNKNOWN;
15490 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15491
15492 tp->old_link = -1;
15493}
15494
15495static int tg3_phy_probe(struct tg3 *tp)
15496{
15497 u32 hw_phy_id_1, hw_phy_id_2;
15498 u32 hw_phy_id, hw_phy_id_masked;
15499 int err;
15500
15501 /* flow control autonegotiation is default behavior */
15502 tg3_flag_set(tp, PAUSE_AUTONEG);
15503 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15504
15505 if (tg3_flag(tp, ENABLE_APE)) {
15506 switch (tp->pci_fn) {
15507 case 0:
15508 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15509 break;
15510 case 1:
15511 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15512 break;
15513 case 2:
15514 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15515 break;
15516 case 3:
15517 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15518 break;
15519 }
15520 }
15521
15522 if (!tg3_flag(tp, ENABLE_ASF) &&
15523 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15524 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15525 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15526 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15527
15528 if (tg3_flag(tp, USE_PHYLIB))
15529 return tg3_phy_init(tp);
15530
15531 /* Reading the PHY ID register can conflict with ASF
15532 * firmware access to the PHY hardware.
15533 */
15534 err = 0;
15535 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15536 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15537 } else {
15538 /* Now read the physical PHY_ID from the chip and verify
15539 * that it is sane. If it doesn't look good, we fall back
15540 * to either the hard-coded table based PHY_ID and failing
15541 * that the value found in the eeprom area.
15542 */
15543 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15544 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15545
15546 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15547 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15548 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15549
15550 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15551 }
15552
15553 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15554 tp->phy_id = hw_phy_id;
15555 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15556 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15557 else
15558 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15559 } else {
15560 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15561 /* Do nothing, phy ID already set up in
15562 * tg3_get_eeprom_hw_cfg().
15563 */
15564 } else {
15565 struct subsys_tbl_ent *p;
15566
15567 /* No eeprom signature? Try the hardcoded
15568 * subsys device table.
15569 */
15570 p = tg3_lookup_by_subsys(tp);
15571 if (p) {
15572 tp->phy_id = p->phy_id;
15573 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15574 /* For now we saw the IDs 0xbc050cd0,
15575 * 0xbc050f80 and 0xbc050c30 on devices
15576 * connected to an BCM4785 and there are
15577 * probably more. Just assume that the phy is
15578 * supported when it is connected to a SSB core
15579 * for now.
15580 */
15581 return -ENODEV;
15582 }
15583
15584 if (!tp->phy_id ||
15585 tp->phy_id == TG3_PHY_ID_BCM8002)
15586 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15587 }
15588 }
15589
15590 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15591 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15592 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15593 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15594 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15595 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15596 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15597 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15598 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15599 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15600
15601 tp->eee.supported = SUPPORTED_100baseT_Full |
15602 SUPPORTED_1000baseT_Full;
15603 tp->eee.advertised = ADVERTISED_100baseT_Full |
15604 ADVERTISED_1000baseT_Full;
15605 tp->eee.eee_enabled = 1;
15606 tp->eee.tx_lpi_enabled = 1;
15607 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15608 }
15609
15610 tg3_phy_init_link_config(tp);
15611
15612 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15613 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15614 !tg3_flag(tp, ENABLE_APE) &&
15615 !tg3_flag(tp, ENABLE_ASF)) {
15616 u32 bmsr, dummy;
15617
15618 tg3_readphy(tp, MII_BMSR, &bmsr);
15619 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15620 (bmsr & BMSR_LSTATUS))
15621 goto skip_phy_reset;
15622
15623 err = tg3_phy_reset(tp);
15624 if (err)
15625 return err;
15626
15627 tg3_phy_set_wirespeed(tp);
15628
15629 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15630 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15631 tp->link_config.flowctrl);
15632
15633 tg3_writephy(tp, MII_BMCR,
15634 BMCR_ANENABLE | BMCR_ANRESTART);
15635 }
15636 }
15637
15638skip_phy_reset:
15639 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15640 err = tg3_init_5401phy_dsp(tp);
15641 if (err)
15642 return err;
15643
15644 err = tg3_init_5401phy_dsp(tp);
15645 }
15646
15647 return err;
15648}
15649
15650static void tg3_read_vpd(struct tg3 *tp)
15651{
15652 u8 *vpd_data;
15653 unsigned int block_end, rosize, len;
15654 u32 vpdlen;
15655 int j, i = 0;
15656
15657 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15658 if (!vpd_data)
15659 goto out_no_vpd;
15660
15661 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15662 if (i < 0)
15663 goto out_not_found;
15664
15665 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15666 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15667 i += PCI_VPD_LRDT_TAG_SIZE;
15668
15669 if (block_end > vpdlen)
15670 goto out_not_found;
15671
15672 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15673 PCI_VPD_RO_KEYWORD_MFR_ID);
15674 if (j > 0) {
15675 len = pci_vpd_info_field_size(&vpd_data[j]);
15676
15677 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15678 if (j + len > block_end || len != 4 ||
15679 memcmp(&vpd_data[j], "1028", 4))
15680 goto partno;
15681
15682 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15683 PCI_VPD_RO_KEYWORD_VENDOR0);
15684 if (j < 0)
15685 goto partno;
15686
15687 len = pci_vpd_info_field_size(&vpd_data[j]);
15688
15689 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15690 if (j + len > block_end)
15691 goto partno;
15692
15693 if (len >= sizeof(tp->fw_ver))
15694 len = sizeof(tp->fw_ver) - 1;
15695 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15696 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15697 &vpd_data[j]);
15698 }
15699
15700partno:
15701 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15702 PCI_VPD_RO_KEYWORD_PARTNO);
15703 if (i < 0)
15704 goto out_not_found;
15705
15706 len = pci_vpd_info_field_size(&vpd_data[i]);
15707
15708 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15709 if (len > TG3_BPN_SIZE ||
15710 (len + i) > vpdlen)
15711 goto out_not_found;
15712
15713 memcpy(tp->board_part_number, &vpd_data[i], len);
15714
15715out_not_found:
15716 kfree(vpd_data);
15717 if (tp->board_part_number[0])
15718 return;
15719
15720out_no_vpd:
15721 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15722 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15724 strcpy(tp->board_part_number, "BCM5717");
15725 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15726 strcpy(tp->board_part_number, "BCM5718");
15727 else
15728 goto nomatch;
15729 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15730 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15731 strcpy(tp->board_part_number, "BCM57780");
15732 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15733 strcpy(tp->board_part_number, "BCM57760");
15734 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15735 strcpy(tp->board_part_number, "BCM57790");
15736 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15737 strcpy(tp->board_part_number, "BCM57788");
15738 else
15739 goto nomatch;
15740 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15741 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15742 strcpy(tp->board_part_number, "BCM57761");
15743 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15744 strcpy(tp->board_part_number, "BCM57765");
15745 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15746 strcpy(tp->board_part_number, "BCM57781");
15747 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15748 strcpy(tp->board_part_number, "BCM57785");
15749 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15750 strcpy(tp->board_part_number, "BCM57791");
15751 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15752 strcpy(tp->board_part_number, "BCM57795");
15753 else
15754 goto nomatch;
15755 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15756 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15757 strcpy(tp->board_part_number, "BCM57762");
15758 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15759 strcpy(tp->board_part_number, "BCM57766");
15760 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15761 strcpy(tp->board_part_number, "BCM57782");
15762 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15763 strcpy(tp->board_part_number, "BCM57786");
15764 else
15765 goto nomatch;
15766 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15767 strcpy(tp->board_part_number, "BCM95906");
15768 } else {
15769nomatch:
15770 strcpy(tp->board_part_number, "none");
15771 }
15772}
15773
15774static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15775{
15776 u32 val;
15777
15778 if (tg3_nvram_read(tp, offset, &val) ||
15779 (val & 0xfc000000) != 0x0c000000 ||
15780 tg3_nvram_read(tp, offset + 4, &val) ||
15781 val != 0)
15782 return 0;
15783
15784 return 1;
15785}
15786
15787static void tg3_read_bc_ver(struct tg3 *tp)
15788{
15789 u32 val, offset, start, ver_offset;
15790 int i, dst_off;
15791 bool newver = false;
15792
15793 if (tg3_nvram_read(tp, 0xc, &offset) ||
15794 tg3_nvram_read(tp, 0x4, &start))
15795 return;
15796
15797 offset = tg3_nvram_logical_addr(tp, offset);
15798
15799 if (tg3_nvram_read(tp, offset, &val))
15800 return;
15801
15802 if ((val & 0xfc000000) == 0x0c000000) {
15803 if (tg3_nvram_read(tp, offset + 4, &val))
15804 return;
15805
15806 if (val == 0)
15807 newver = true;
15808 }
15809
15810 dst_off = strlen(tp->fw_ver);
15811
15812 if (newver) {
15813 if (TG3_VER_SIZE - dst_off < 16 ||
15814 tg3_nvram_read(tp, offset + 8, &ver_offset))
15815 return;
15816
15817 offset = offset + ver_offset - start;
15818 for (i = 0; i < 16; i += 4) {
15819 __be32 v;
15820 if (tg3_nvram_read_be32(tp, offset + i, &v))
15821 return;
15822
15823 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15824 }
15825 } else {
15826 u32 major, minor;
15827
15828 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15829 return;
15830
15831 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15832 TG3_NVM_BCVER_MAJSFT;
15833 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15834 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15835 "v%d.%02d", major, minor);
15836 }
15837}
15838
15839static void tg3_read_hwsb_ver(struct tg3 *tp)
15840{
15841 u32 val, major, minor;
15842
15843 /* Use native endian representation */
15844 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15845 return;
15846
15847 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15848 TG3_NVM_HWSB_CFG1_MAJSFT;
15849 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15850 TG3_NVM_HWSB_CFG1_MINSFT;
15851
15852 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15853}
15854
15855static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15856{
15857 u32 offset, major, minor, build;
15858
15859 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15860
15861 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15862 return;
15863
15864 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15865 case TG3_EEPROM_SB_REVISION_0:
15866 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15867 break;
15868 case TG3_EEPROM_SB_REVISION_2:
15869 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15870 break;
15871 case TG3_EEPROM_SB_REVISION_3:
15872 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15873 break;
15874 case TG3_EEPROM_SB_REVISION_4:
15875 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15876 break;
15877 case TG3_EEPROM_SB_REVISION_5:
15878 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15879 break;
15880 case TG3_EEPROM_SB_REVISION_6:
15881 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15882 break;
15883 default:
15884 return;
15885 }
15886
15887 if (tg3_nvram_read(tp, offset, &val))
15888 return;
15889
15890 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15891 TG3_EEPROM_SB_EDH_BLD_SHFT;
15892 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15893 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15894 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15895
15896 if (minor > 99 || build > 26)
15897 return;
15898
15899 offset = strlen(tp->fw_ver);
15900 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15901 " v%d.%02d", major, minor);
15902
15903 if (build > 0) {
15904 offset = strlen(tp->fw_ver);
15905 if (offset < TG3_VER_SIZE - 1)
15906 tp->fw_ver[offset] = 'a' + build - 1;
15907 }
15908}
15909
15910static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15911{
15912 u32 val, offset, start;
15913 int i, vlen;
15914
15915 for (offset = TG3_NVM_DIR_START;
15916 offset < TG3_NVM_DIR_END;
15917 offset += TG3_NVM_DIRENT_SIZE) {
15918 if (tg3_nvram_read(tp, offset, &val))
15919 return;
15920
15921 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15922 break;
15923 }
15924
15925 if (offset == TG3_NVM_DIR_END)
15926 return;
15927
15928 if (!tg3_flag(tp, 5705_PLUS))
15929 start = 0x08000000;
15930 else if (tg3_nvram_read(tp, offset - 4, &start))
15931 return;
15932
15933 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15934 !tg3_fw_img_is_valid(tp, offset) ||
15935 tg3_nvram_read(tp, offset + 8, &val))
15936 return;
15937
15938 offset += val - start;
15939
15940 vlen = strlen(tp->fw_ver);
15941
15942 tp->fw_ver[vlen++] = ',';
15943 tp->fw_ver[vlen++] = ' ';
15944
15945 for (i = 0; i < 4; i++) {
15946 __be32 v;
15947 if (tg3_nvram_read_be32(tp, offset, &v))
15948 return;
15949
15950 offset += sizeof(v);
15951
15952 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15953 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15954 break;
15955 }
15956
15957 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15958 vlen += sizeof(v);
15959 }
15960}
15961
15962static void tg3_probe_ncsi(struct tg3 *tp)
15963{
15964 u32 apedata;
15965
15966 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15967 if (apedata != APE_SEG_SIG_MAGIC)
15968 return;
15969
15970 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15971 if (!(apedata & APE_FW_STATUS_READY))
15972 return;
15973
15974 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15975 tg3_flag_set(tp, APE_HAS_NCSI);
15976}
15977
15978static void tg3_read_dash_ver(struct tg3 *tp)
15979{
15980 int vlen;
15981 u32 apedata;
15982 char *fwtype;
15983
15984 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15985
15986 if (tg3_flag(tp, APE_HAS_NCSI))
15987 fwtype = "NCSI";
15988 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15989 fwtype = "SMASH";
15990 else
15991 fwtype = "DASH";
15992
15993 vlen = strlen(tp->fw_ver);
15994
15995 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15996 fwtype,
15997 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15998 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15999 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16000 (apedata & APE_FW_VERSION_BLDMSK));
16001}
16002
16003static void tg3_read_otp_ver(struct tg3 *tp)
16004{
16005 u32 val, val2;
16006
16007 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16008 return;
16009
16010 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16011 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16012 TG3_OTP_MAGIC0_VALID(val)) {
16013 u64 val64 = (u64) val << 32 | val2;
16014 u32 ver = 0;
16015 int i, vlen;
16016
16017 for (i = 0; i < 7; i++) {
16018 if ((val64 & 0xff) == 0)
16019 break;
16020 ver = val64 & 0xff;
16021 val64 >>= 8;
16022 }
16023 vlen = strlen(tp->fw_ver);
16024 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16025 }
16026}
16027
16028static void tg3_read_fw_ver(struct tg3 *tp)
16029{
16030 u32 val;
16031 bool vpd_vers = false;
16032
16033 if (tp->fw_ver[0] != 0)
16034 vpd_vers = true;
16035
16036 if (tg3_flag(tp, NO_NVRAM)) {
16037 strcat(tp->fw_ver, "sb");
16038 tg3_read_otp_ver(tp);
16039 return;
16040 }
16041
16042 if (tg3_nvram_read(tp, 0, &val))
16043 return;
16044
16045 if (val == TG3_EEPROM_MAGIC)
16046 tg3_read_bc_ver(tp);
16047 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16048 tg3_read_sb_ver(tp, val);
16049 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16050 tg3_read_hwsb_ver(tp);
16051
16052 if (tg3_flag(tp, ENABLE_ASF)) {
16053 if (tg3_flag(tp, ENABLE_APE)) {
16054 tg3_probe_ncsi(tp);
16055 if (!vpd_vers)
16056 tg3_read_dash_ver(tp);
16057 } else if (!vpd_vers) {
16058 tg3_read_mgmtfw_ver(tp);
16059 }
16060 }
16061
16062 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16063}
16064
16065static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16066{
16067 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16068 return TG3_RX_RET_MAX_SIZE_5717;
16069 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16070 return TG3_RX_RET_MAX_SIZE_5700;
16071 else
16072 return TG3_RX_RET_MAX_SIZE_5705;
16073}
16074
16075static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16076 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16077 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16078 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16079 { },
16080};
16081
16082static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16083{
16084 struct pci_dev *peer;
16085 unsigned int func, devnr = tp->pdev->devfn & ~7;
16086
16087 for (func = 0; func < 8; func++) {
16088 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16089 if (peer && peer != tp->pdev)
16090 break;
16091 pci_dev_put(peer);
16092 }
16093 /* 5704 can be configured in single-port mode, set peer to
16094 * tp->pdev in that case.
16095 */
16096 if (!peer) {
16097 peer = tp->pdev;
16098 return peer;
16099 }
16100
16101 /*
16102 * We don't need to keep the refcount elevated; there's no way
16103 * to remove one half of this device without removing the other
16104 */
16105 pci_dev_put(peer);
16106
16107 return peer;
16108}
16109
16110static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16111{
16112 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16113 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16114 u32 reg;
16115
16116 /* All devices that use the alternate
16117 * ASIC REV location have a CPMU.
16118 */
16119 tg3_flag_set(tp, CPMU_PRESENT);
16120
16121 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16124 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16125 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16126 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16129 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16130 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16131 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16132 reg = TG3PCI_GEN2_PRODID_ASICREV;
16133 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16134 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16135 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16136 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16137 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16138 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16139 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16140 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16141 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16142 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16143 reg = TG3PCI_GEN15_PRODID_ASICREV;
16144 else
16145 reg = TG3PCI_PRODID_ASICREV;
16146
16147 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16148 }
16149
16150 /* Wrong chip ID in 5752 A0. This code can be removed later
16151 * as A0 is not in production.
16152 */
16153 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16154 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16155
16156 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16157 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16158
16159 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16160 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16161 tg3_asic_rev(tp) == ASIC_REV_5720)
16162 tg3_flag_set(tp, 5717_PLUS);
16163
16164 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16165 tg3_asic_rev(tp) == ASIC_REV_57766)
16166 tg3_flag_set(tp, 57765_CLASS);
16167
16168 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16169 tg3_asic_rev(tp) == ASIC_REV_5762)
16170 tg3_flag_set(tp, 57765_PLUS);
16171
16172 /* Intentionally exclude ASIC_REV_5906 */
16173 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16174 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16175 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16176 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16177 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16178 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16179 tg3_flag(tp, 57765_PLUS))
16180 tg3_flag_set(tp, 5755_PLUS);
16181
16182 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16183 tg3_asic_rev(tp) == ASIC_REV_5714)
16184 tg3_flag_set(tp, 5780_CLASS);
16185
16186 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16187 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16188 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16189 tg3_flag(tp, 5755_PLUS) ||
16190 tg3_flag(tp, 5780_CLASS))
16191 tg3_flag_set(tp, 5750_PLUS);
16192
16193 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16194 tg3_flag(tp, 5750_PLUS))
16195 tg3_flag_set(tp, 5705_PLUS);
16196}
16197
16198static bool tg3_10_100_only_device(struct tg3 *tp,
16199 const struct pci_device_id *ent)
16200{
16201 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16202
16203 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16204 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16205 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16206 return true;
16207
16208 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16209 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16210 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16211 return true;
16212 } else {
16213 return true;
16214 }
16215 }
16216
16217 return false;
16218}
16219
16220static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16221{
16222 u32 misc_ctrl_reg;
16223 u32 pci_state_reg, grc_misc_cfg;
16224 u32 val;
16225 u16 pci_cmd;
16226 int err;
16227
16228 /* Force memory write invalidate off. If we leave it on,
16229 * then on 5700_BX chips we have to enable a workaround.
16230 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16231 * to match the cacheline size. The Broadcom driver have this
16232 * workaround but turns MWI off all the times so never uses
16233 * it. This seems to suggest that the workaround is insufficient.
16234 */
16235 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16236 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16237 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16238
16239 /* Important! -- Make sure register accesses are byteswapped
16240 * correctly. Also, for those chips that require it, make
16241 * sure that indirect register accesses are enabled before
16242 * the first operation.
16243 */
16244 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16245 &misc_ctrl_reg);
16246 tp->misc_host_ctrl |= (misc_ctrl_reg &
16247 MISC_HOST_CTRL_CHIPREV);
16248 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16249 tp->misc_host_ctrl);
16250
16251 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16252
16253 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16254 * we need to disable memory and use config. cycles
16255 * only to access all registers. The 5702/03 chips
16256 * can mistakenly decode the special cycles from the
16257 * ICH chipsets as memory write cycles, causing corruption
16258 * of register and memory space. Only certain ICH bridges
16259 * will drive special cycles with non-zero data during the
16260 * address phase which can fall within the 5703's address
16261 * range. This is not an ICH bug as the PCI spec allows
16262 * non-zero address during special cycles. However, only
16263 * these ICH bridges are known to drive non-zero addresses
16264 * during special cycles.
16265 *
16266 * Since special cycles do not cross PCI bridges, we only
16267 * enable this workaround if the 5703 is on the secondary
16268 * bus of these ICH bridges.
16269 */
16270 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16271 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16272 static struct tg3_dev_id {
16273 u32 vendor;
16274 u32 device;
16275 u32 rev;
16276 } ich_chipsets[] = {
16277 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16278 PCI_ANY_ID },
16279 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16280 PCI_ANY_ID },
16281 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16282 0xa },
16283 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16284 PCI_ANY_ID },
16285 { },
16286 };
16287 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16288 struct pci_dev *bridge = NULL;
16289
16290 while (pci_id->vendor != 0) {
16291 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16292 bridge);
16293 if (!bridge) {
16294 pci_id++;
16295 continue;
16296 }
16297 if (pci_id->rev != PCI_ANY_ID) {
16298 if (bridge->revision > pci_id->rev)
16299 continue;
16300 }
16301 if (bridge->subordinate &&
16302 (bridge->subordinate->number ==
16303 tp->pdev->bus->number)) {
16304 tg3_flag_set(tp, ICH_WORKAROUND);
16305 pci_dev_put(bridge);
16306 break;
16307 }
16308 }
16309 }
16310
16311 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16312 static struct tg3_dev_id {
16313 u32 vendor;
16314 u32 device;
16315 } bridge_chipsets[] = {
16316 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16317 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16318 { },
16319 };
16320 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16321 struct pci_dev *bridge = NULL;
16322
16323 while (pci_id->vendor != 0) {
16324 bridge = pci_get_device(pci_id->vendor,
16325 pci_id->device,
16326 bridge);
16327 if (!bridge) {
16328 pci_id++;
16329 continue;
16330 }
16331 if (bridge->subordinate &&
16332 (bridge->subordinate->number <=
16333 tp->pdev->bus->number) &&
16334 (bridge->subordinate->busn_res.end >=
16335 tp->pdev->bus->number)) {
16336 tg3_flag_set(tp, 5701_DMA_BUG);
16337 pci_dev_put(bridge);
16338 break;
16339 }
16340 }
16341 }
16342
16343 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16344 * DMA addresses > 40-bit. This bridge may have other additional
16345 * 57xx devices behind it in some 4-port NIC designs for example.
16346 * Any tg3 device found behind the bridge will also need the 40-bit
16347 * DMA workaround.
16348 */
16349 if (tg3_flag(tp, 5780_CLASS)) {
16350 tg3_flag_set(tp, 40BIT_DMA_BUG);
16351 tp->msi_cap = tp->pdev->msi_cap;
16352 } else {
16353 struct pci_dev *bridge = NULL;
16354
16355 do {
16356 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16357 PCI_DEVICE_ID_SERVERWORKS_EPB,
16358 bridge);
16359 if (bridge && bridge->subordinate &&
16360 (bridge->subordinate->number <=
16361 tp->pdev->bus->number) &&
16362 (bridge->subordinate->busn_res.end >=
16363 tp->pdev->bus->number)) {
16364 tg3_flag_set(tp, 40BIT_DMA_BUG);
16365 pci_dev_put(bridge);
16366 break;
16367 }
16368 } while (bridge);
16369 }
16370
16371 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16372 tg3_asic_rev(tp) == ASIC_REV_5714)
16373 tp->pdev_peer = tg3_find_peer(tp);
16374
16375 /* Determine TSO capabilities */
16376 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16377 ; /* Do nothing. HW bug. */
16378 else if (tg3_flag(tp, 57765_PLUS))
16379 tg3_flag_set(tp, HW_TSO_3);
16380 else if (tg3_flag(tp, 5755_PLUS) ||
16381 tg3_asic_rev(tp) == ASIC_REV_5906)
16382 tg3_flag_set(tp, HW_TSO_2);
16383 else if (tg3_flag(tp, 5750_PLUS)) {
16384 tg3_flag_set(tp, HW_TSO_1);
16385 tg3_flag_set(tp, TSO_BUG);
16386 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16387 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16388 tg3_flag_clear(tp, TSO_BUG);
16389 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16390 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16391 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16392 tg3_flag_set(tp, FW_TSO);
16393 tg3_flag_set(tp, TSO_BUG);
16394 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16395 tp->fw_needed = FIRMWARE_TG3TSO5;
16396 else
16397 tp->fw_needed = FIRMWARE_TG3TSO;
16398 }
16399
16400 /* Selectively allow TSO based on operating conditions */
16401 if (tg3_flag(tp, HW_TSO_1) ||
16402 tg3_flag(tp, HW_TSO_2) ||
16403 tg3_flag(tp, HW_TSO_3) ||
16404 tg3_flag(tp, FW_TSO)) {
16405 /* For firmware TSO, assume ASF is disabled.
16406 * We'll disable TSO later if we discover ASF
16407 * is enabled in tg3_get_eeprom_hw_cfg().
16408 */
16409 tg3_flag_set(tp, TSO_CAPABLE);
16410 } else {
16411 tg3_flag_clear(tp, TSO_CAPABLE);
16412 tg3_flag_clear(tp, TSO_BUG);
16413 tp->fw_needed = NULL;
16414 }
16415
16416 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16417 tp->fw_needed = FIRMWARE_TG3;
16418
16419 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16420 tp->fw_needed = FIRMWARE_TG357766;
16421
16422 tp->irq_max = 1;
16423
16424 if (tg3_flag(tp, 5750_PLUS)) {
16425 tg3_flag_set(tp, SUPPORT_MSI);
16426 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16427 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16428 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16429 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16430 tp->pdev_peer == tp->pdev))
16431 tg3_flag_clear(tp, SUPPORT_MSI);
16432
16433 if (tg3_flag(tp, 5755_PLUS) ||
16434 tg3_asic_rev(tp) == ASIC_REV_5906) {
16435 tg3_flag_set(tp, 1SHOT_MSI);
16436 }
16437
16438 if (tg3_flag(tp, 57765_PLUS)) {
16439 tg3_flag_set(tp, SUPPORT_MSIX);
16440 tp->irq_max = TG3_IRQ_MAX_VECS;
16441 }
16442 }
16443
16444 tp->txq_max = 1;
16445 tp->rxq_max = 1;
16446 if (tp->irq_max > 1) {
16447 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16448 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16449
16450 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16451 tg3_asic_rev(tp) == ASIC_REV_5720)
16452 tp->txq_max = tp->irq_max - 1;
16453 }
16454
16455 if (tg3_flag(tp, 5755_PLUS) ||
16456 tg3_asic_rev(tp) == ASIC_REV_5906)
16457 tg3_flag_set(tp, SHORT_DMA_BUG);
16458
16459 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16460 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16461
16462 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16463 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16464 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16465 tg3_asic_rev(tp) == ASIC_REV_5762)
16466 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16467
16468 if (tg3_flag(tp, 57765_PLUS) &&
16469 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16470 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16471
16472 if (!tg3_flag(tp, 5705_PLUS) ||
16473 tg3_flag(tp, 5780_CLASS) ||
16474 tg3_flag(tp, USE_JUMBO_BDFLAG))
16475 tg3_flag_set(tp, JUMBO_CAPABLE);
16476
16477 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16478 &pci_state_reg);
16479
16480 if (pci_is_pcie(tp->pdev)) {
16481 u16 lnkctl;
16482
16483 tg3_flag_set(tp, PCI_EXPRESS);
16484
16485 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16486 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16487 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16488 tg3_flag_clear(tp, HW_TSO_2);
16489 tg3_flag_clear(tp, TSO_CAPABLE);
16490 }
16491 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16492 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16493 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16494 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16495 tg3_flag_set(tp, CLKREQ_BUG);
16496 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16497 tg3_flag_set(tp, L1PLLPD_EN);
16498 }
16499 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16500 /* BCM5785 devices are effectively PCIe devices, and should
16501 * follow PCIe codepaths, but do not have a PCIe capabilities
16502 * section.
16503 */
16504 tg3_flag_set(tp, PCI_EXPRESS);
16505 } else if (!tg3_flag(tp, 5705_PLUS) ||
16506 tg3_flag(tp, 5780_CLASS)) {
16507 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16508 if (!tp->pcix_cap) {
16509 dev_err(&tp->pdev->dev,
16510 "Cannot find PCI-X capability, aborting\n");
16511 return -EIO;
16512 }
16513
16514 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16515 tg3_flag_set(tp, PCIX_MODE);
16516 }
16517
16518 /* If we have an AMD 762 or VIA K8T800 chipset, write
16519 * reordering to the mailbox registers done by the host
16520 * controller can cause major troubles. We read back from
16521 * every mailbox register write to force the writes to be
16522 * posted to the chip in order.
16523 */
16524 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16525 !tg3_flag(tp, PCI_EXPRESS))
16526 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16527
16528 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16529 &tp->pci_cacheline_sz);
16530 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16531 &tp->pci_lat_timer);
16532 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16533 tp->pci_lat_timer < 64) {
16534 tp->pci_lat_timer = 64;
16535 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16536 tp->pci_lat_timer);
16537 }
16538
16539 /* Important! -- It is critical that the PCI-X hw workaround
16540 * situation is decided before the first MMIO register access.
16541 */
16542 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16543 /* 5700 BX chips need to have their TX producer index
16544 * mailboxes written twice to workaround a bug.
16545 */
16546 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16547
16548 /* If we are in PCI-X mode, enable register write workaround.
16549 *
16550 * The workaround is to use indirect register accesses
16551 * for all chip writes not to mailbox registers.
16552 */
16553 if (tg3_flag(tp, PCIX_MODE)) {
16554 u32 pm_reg;
16555
16556 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16557
16558 /* The chip can have it's power management PCI config
16559 * space registers clobbered due to this bug.
16560 * So explicitly force the chip into D0 here.
16561 */
16562 pci_read_config_dword(tp->pdev,
16563 tp->pdev->pm_cap + PCI_PM_CTRL,
16564 &pm_reg);
16565 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16566 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16567 pci_write_config_dword(tp->pdev,
16568 tp->pdev->pm_cap + PCI_PM_CTRL,
16569 pm_reg);
16570
16571 /* Also, force SERR#/PERR# in PCI command. */
16572 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16573 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16574 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16575 }
16576 }
16577
16578 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16579 tg3_flag_set(tp, PCI_HIGH_SPEED);
16580 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16581 tg3_flag_set(tp, PCI_32BIT);
16582
16583 /* Chip-specific fixup from Broadcom driver */
16584 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16585 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16586 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16587 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16588 }
16589
16590 /* Default fast path register access methods */
16591 tp->read32 = tg3_read32;
16592 tp->write32 = tg3_write32;
16593 tp->read32_mbox = tg3_read32;
16594 tp->write32_mbox = tg3_write32;
16595 tp->write32_tx_mbox = tg3_write32;
16596 tp->write32_rx_mbox = tg3_write32;
16597
16598 /* Various workaround register access methods */
16599 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16600 tp->write32 = tg3_write_indirect_reg32;
16601 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16602 (tg3_flag(tp, PCI_EXPRESS) &&
16603 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16604 /*
16605 * Back to back register writes can cause problems on these
16606 * chips, the workaround is to read back all reg writes
16607 * except those to mailbox regs.
16608 *
16609 * See tg3_write_indirect_reg32().
16610 */
16611 tp->write32 = tg3_write_flush_reg32;
16612 }
16613
16614 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16615 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16616 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16617 tp->write32_rx_mbox = tg3_write_flush_reg32;
16618 }
16619
16620 if (tg3_flag(tp, ICH_WORKAROUND)) {
16621 tp->read32 = tg3_read_indirect_reg32;
16622 tp->write32 = tg3_write_indirect_reg32;
16623 tp->read32_mbox = tg3_read_indirect_mbox;
16624 tp->write32_mbox = tg3_write_indirect_mbox;
16625 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16626 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16627
16628 iounmap(tp->regs);
16629 tp->regs = NULL;
16630
16631 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16632 pci_cmd &= ~PCI_COMMAND_MEMORY;
16633 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16634 }
16635 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16636 tp->read32_mbox = tg3_read32_mbox_5906;
16637 tp->write32_mbox = tg3_write32_mbox_5906;
16638 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16639 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16640 }
16641
16642 if (tp->write32 == tg3_write_indirect_reg32 ||
16643 (tg3_flag(tp, PCIX_MODE) &&
16644 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16645 tg3_asic_rev(tp) == ASIC_REV_5701)))
16646 tg3_flag_set(tp, SRAM_USE_CONFIG);
16647
16648 /* The memory arbiter has to be enabled in order for SRAM accesses
16649 * to succeed. Normally on powerup the tg3 chip firmware will make
16650 * sure it is enabled, but other entities such as system netboot
16651 * code might disable it.
16652 */
16653 val = tr32(MEMARB_MODE);
16654 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16655
16656 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16657 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16658 tg3_flag(tp, 5780_CLASS)) {
16659 if (tg3_flag(tp, PCIX_MODE)) {
16660 pci_read_config_dword(tp->pdev,
16661 tp->pcix_cap + PCI_X_STATUS,
16662 &val);
16663 tp->pci_fn = val & 0x7;
16664 }
16665 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16666 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16667 tg3_asic_rev(tp) == ASIC_REV_5720) {
16668 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16669 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16670 val = tr32(TG3_CPMU_STATUS);
16671
16672 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16673 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16674 else
16675 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16676 TG3_CPMU_STATUS_FSHFT_5719;
16677 }
16678
16679 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16680 tp->write32_tx_mbox = tg3_write_flush_reg32;
16681 tp->write32_rx_mbox = tg3_write_flush_reg32;
16682 }
16683
16684 /* Get eeprom hw config before calling tg3_set_power_state().
16685 * In particular, the TG3_FLAG_IS_NIC flag must be
16686 * determined before calling tg3_set_power_state() so that
16687 * we know whether or not to switch out of Vaux power.
16688 * When the flag is set, it means that GPIO1 is used for eeprom
16689 * write protect and also implies that it is a LOM where GPIOs
16690 * are not used to switch power.
16691 */
16692 tg3_get_eeprom_hw_cfg(tp);
16693
16694 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16695 tg3_flag_clear(tp, TSO_CAPABLE);
16696 tg3_flag_clear(tp, TSO_BUG);
16697 tp->fw_needed = NULL;
16698 }
16699
16700 if (tg3_flag(tp, ENABLE_APE)) {
16701 /* Allow reads and writes to the
16702 * APE register and memory space.
16703 */
16704 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16705 PCISTATE_ALLOW_APE_SHMEM_WR |
16706 PCISTATE_ALLOW_APE_PSPACE_WR;
16707 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16708 pci_state_reg);
16709
16710 tg3_ape_lock_init(tp);
16711 tp->ape_hb_interval =
16712 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16713 }
16714
16715 /* Set up tp->grc_local_ctrl before calling
16716 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16717 * will bring 5700's external PHY out of reset.
16718 * It is also used as eeprom write protect on LOMs.
16719 */
16720 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16721 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16722 tg3_flag(tp, EEPROM_WRITE_PROT))
16723 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16724 GRC_LCLCTRL_GPIO_OUTPUT1);
16725 /* Unused GPIO3 must be driven as output on 5752 because there
16726 * are no pull-up resistors on unused GPIO pins.
16727 */
16728 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16729 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16730
16731 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16732 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16733 tg3_flag(tp, 57765_CLASS))
16734 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16735
16736 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16737 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16738 /* Turn off the debug UART. */
16739 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16740 if (tg3_flag(tp, IS_NIC))
16741 /* Keep VMain power. */
16742 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16743 GRC_LCLCTRL_GPIO_OUTPUT0;
16744 }
16745
16746 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16747 tp->grc_local_ctrl |=
16748 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16749
16750 /* Switch out of Vaux if it is a NIC */
16751 tg3_pwrsrc_switch_to_vmain(tp);
16752
16753 /* Derive initial jumbo mode from MTU assigned in
16754 * ether_setup() via the alloc_etherdev() call
16755 */
16756 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16757 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16758
16759 /* Determine WakeOnLan speed to use. */
16760 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16761 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16762 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16763 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16764 tg3_flag_clear(tp, WOL_SPEED_100MB);
16765 } else {
16766 tg3_flag_set(tp, WOL_SPEED_100MB);
16767 }
16768
16769 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16770 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16771
16772 /* A few boards don't want Ethernet@WireSpeed phy feature */
16773 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16774 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16775 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16776 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16777 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16778 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16779 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16780
16781 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16782 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16783 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16784 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16785 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16786
16787 if (tg3_flag(tp, 5705_PLUS) &&
16788 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16789 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16790 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16791 !tg3_flag(tp, 57765_PLUS)) {
16792 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16793 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16794 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16795 tg3_asic_rev(tp) == ASIC_REV_5761) {
16796 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16797 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16798 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16799 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16800 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16801 } else
16802 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16803 }
16804
16805 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16806 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16807 tp->phy_otp = tg3_read_otp_phycfg(tp);
16808 if (tp->phy_otp == 0)
16809 tp->phy_otp = TG3_OTP_DEFAULT;
16810 }
16811
16812 if (tg3_flag(tp, CPMU_PRESENT))
16813 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16814 else
16815 tp->mi_mode = MAC_MI_MODE_BASE;
16816
16817 tp->coalesce_mode = 0;
16818 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16819 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16820 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16821
16822 /* Set these bits to enable statistics workaround. */
16823 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16824 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16825 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16826 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16827 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16828 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16829 }
16830
16831 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16832 tg3_asic_rev(tp) == ASIC_REV_57780)
16833 tg3_flag_set(tp, USE_PHYLIB);
16834
16835 err = tg3_mdio_init(tp);
16836 if (err)
16837 return err;
16838
16839 /* Initialize data/descriptor byte/word swapping. */
16840 val = tr32(GRC_MODE);
16841 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16842 tg3_asic_rev(tp) == ASIC_REV_5762)
16843 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16844 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16845 GRC_MODE_B2HRX_ENABLE |
16846 GRC_MODE_HTX2B_ENABLE |
16847 GRC_MODE_HOST_STACKUP);
16848 else
16849 val &= GRC_MODE_HOST_STACKUP;
16850
16851 tw32(GRC_MODE, val | tp->grc_mode);
16852
16853 tg3_switch_clocks(tp);
16854
16855 /* Clear this out for sanity. */
16856 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16857
16858 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16859 tw32(TG3PCI_REG_BASE_ADDR, 0);
16860
16861 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16862 &pci_state_reg);
16863 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16864 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16865 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16866 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16867 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16868 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16869 void __iomem *sram_base;
16870
16871 /* Write some dummy words into the SRAM status block
16872 * area, see if it reads back correctly. If the return
16873 * value is bad, force enable the PCIX workaround.
16874 */
16875 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16876
16877 writel(0x00000000, sram_base);
16878 writel(0x00000000, sram_base + 4);
16879 writel(0xffffffff, sram_base + 4);
16880 if (readl(sram_base) != 0x00000000)
16881 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16882 }
16883 }
16884
16885 udelay(50);
16886 tg3_nvram_init(tp);
16887
16888 /* If the device has an NVRAM, no need to load patch firmware */
16889 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16890 !tg3_flag(tp, NO_NVRAM))
16891 tp->fw_needed = NULL;
16892
16893 grc_misc_cfg = tr32(GRC_MISC_CFG);
16894 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16895
16896 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16897 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16898 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16899 tg3_flag_set(tp, IS_5788);
16900
16901 if (!tg3_flag(tp, IS_5788) &&
16902 tg3_asic_rev(tp) != ASIC_REV_5700)
16903 tg3_flag_set(tp, TAGGED_STATUS);
16904 if (tg3_flag(tp, TAGGED_STATUS)) {
16905 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16906 HOSTCC_MODE_CLRTICK_TXBD);
16907
16908 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16909 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16910 tp->misc_host_ctrl);
16911 }
16912
16913 /* Preserve the APE MAC_MODE bits */
16914 if (tg3_flag(tp, ENABLE_APE))
16915 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16916 else
16917 tp->mac_mode = 0;
16918
16919 if (tg3_10_100_only_device(tp, ent))
16920 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16921
16922 err = tg3_phy_probe(tp);
16923 if (err) {
16924 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16925 /* ... but do not return immediately ... */
16926 tg3_mdio_fini(tp);
16927 }
16928
16929 tg3_read_vpd(tp);
16930 tg3_read_fw_ver(tp);
16931
16932 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16933 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16934 } else {
16935 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16936 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16937 else
16938 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16939 }
16940
16941 /* 5700 {AX,BX} chips have a broken status block link
16942 * change bit implementation, so we must use the
16943 * status register in those cases.
16944 */
16945 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16946 tg3_flag_set(tp, USE_LINKCHG_REG);
16947 else
16948 tg3_flag_clear(tp, USE_LINKCHG_REG);
16949
16950 /* The led_ctrl is set during tg3_phy_probe, here we might
16951 * have to force the link status polling mechanism based
16952 * upon subsystem IDs.
16953 */
16954 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16955 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16956 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16957 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16958 tg3_flag_set(tp, USE_LINKCHG_REG);
16959 }
16960
16961 /* For all SERDES we poll the MAC status register. */
16962 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16963 tg3_flag_set(tp, POLL_SERDES);
16964 else
16965 tg3_flag_clear(tp, POLL_SERDES);
16966
16967 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16968 tg3_flag_set(tp, POLL_CPMU_LINK);
16969
16970 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16971 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16972 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16973 tg3_flag(tp, PCIX_MODE)) {
16974 tp->rx_offset = NET_SKB_PAD;
16975#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16976 tp->rx_copy_thresh = ~(u16)0;
16977#endif
16978 }
16979
16980 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16981 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16982 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16983
16984 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16985
16986 /* Increment the rx prod index on the rx std ring by at most
16987 * 8 for these chips to workaround hw errata.
16988 */
16989 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16990 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16991 tg3_asic_rev(tp) == ASIC_REV_5755)
16992 tp->rx_std_max_post = 8;
16993
16994 if (tg3_flag(tp, ASPM_WORKAROUND))
16995 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16996 PCIE_PWR_MGMT_L1_THRESH_MSK;
16997
16998 return err;
16999}
17000
17001#ifdef CONFIG_SPARC
17002static int tg3_get_macaddr_sparc(struct tg3 *tp)
17003{
17004 struct net_device *dev = tp->dev;
17005 struct pci_dev *pdev = tp->pdev;
17006 struct device_node *dp = pci_device_to_OF_node(pdev);
17007 const unsigned char *addr;
17008 int len;
17009
17010 addr = of_get_property(dp, "local-mac-address", &len);
17011 if (addr && len == ETH_ALEN) {
17012 memcpy(dev->dev_addr, addr, ETH_ALEN);
17013 return 0;
17014 }
17015 return -ENODEV;
17016}
17017
17018static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
17019{
17020 struct net_device *dev = tp->dev;
17021
17022 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
17023 return 0;
17024}
17025#endif
17026
17027static int tg3_get_device_address(struct tg3 *tp)
17028{
17029 struct net_device *dev = tp->dev;
17030 u32 hi, lo, mac_offset;
17031 int addr_ok = 0;
17032 int err;
17033
17034#ifdef CONFIG_SPARC
17035 if (!tg3_get_macaddr_sparc(tp))
17036 return 0;
17037#endif
17038
17039 if (tg3_flag(tp, IS_SSB_CORE)) {
17040 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17041 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17042 return 0;
17043 }
17044
17045 mac_offset = 0x7c;
17046 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17047 tg3_flag(tp, 5780_CLASS)) {
17048 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17049 mac_offset = 0xcc;
17050 if (tg3_nvram_lock(tp))
17051 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17052 else
17053 tg3_nvram_unlock(tp);
17054 } else if (tg3_flag(tp, 5717_PLUS)) {
17055 if (tp->pci_fn & 1)
17056 mac_offset = 0xcc;
17057 if (tp->pci_fn > 1)
17058 mac_offset += 0x18c;
17059 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17060 mac_offset = 0x10;
17061
17062 /* First try to get it from MAC address mailbox. */
17063 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17064 if ((hi >> 16) == 0x484b) {
17065 dev->dev_addr[0] = (hi >> 8) & 0xff;
17066 dev->dev_addr[1] = (hi >> 0) & 0xff;
17067
17068 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17069 dev->dev_addr[2] = (lo >> 24) & 0xff;
17070 dev->dev_addr[3] = (lo >> 16) & 0xff;
17071 dev->dev_addr[4] = (lo >> 8) & 0xff;
17072 dev->dev_addr[5] = (lo >> 0) & 0xff;
17073
17074 /* Some old bootcode may report a 0 MAC address in SRAM */
17075 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17076 }
17077 if (!addr_ok) {
17078 /* Next, try NVRAM. */
17079 if (!tg3_flag(tp, NO_NVRAM) &&
17080 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17081 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17082 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17083 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17084 }
17085 /* Finally just fetch it out of the MAC control regs. */
17086 else {
17087 hi = tr32(MAC_ADDR_0_HIGH);
17088 lo = tr32(MAC_ADDR_0_LOW);
17089
17090 dev->dev_addr[5] = lo & 0xff;
17091 dev->dev_addr[4] = (lo >> 8) & 0xff;
17092 dev->dev_addr[3] = (lo >> 16) & 0xff;
17093 dev->dev_addr[2] = (lo >> 24) & 0xff;
17094 dev->dev_addr[1] = hi & 0xff;
17095 dev->dev_addr[0] = (hi >> 8) & 0xff;
17096 }
17097 }
17098
17099 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17100#ifdef CONFIG_SPARC
17101 if (!tg3_get_default_macaddr_sparc(tp))
17102 return 0;
17103#endif
17104 return -EINVAL;
17105 }
17106 return 0;
17107}
17108
17109#define BOUNDARY_SINGLE_CACHELINE 1
17110#define BOUNDARY_MULTI_CACHELINE 2
17111
17112static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17113{
17114 int cacheline_size;
17115 u8 byte;
17116 int goal;
17117
17118 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17119 if (byte == 0)
17120 cacheline_size = 1024;
17121 else
17122 cacheline_size = (int) byte * 4;
17123
17124 /* On 5703 and later chips, the boundary bits have no
17125 * effect.
17126 */
17127 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17128 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17129 !tg3_flag(tp, PCI_EXPRESS))
17130 goto out;
17131
17132#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17133 goal = BOUNDARY_MULTI_CACHELINE;
17134#else
17135#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17136 goal = BOUNDARY_SINGLE_CACHELINE;
17137#else
17138 goal = 0;
17139#endif
17140#endif
17141
17142 if (tg3_flag(tp, 57765_PLUS)) {
17143 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17144 goto out;
17145 }
17146
17147 if (!goal)
17148 goto out;
17149
17150 /* PCI controllers on most RISC systems tend to disconnect
17151 * when a device tries to burst across a cache-line boundary.
17152 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17153 *
17154 * Unfortunately, for PCI-E there are only limited
17155 * write-side controls for this, and thus for reads
17156 * we will still get the disconnects. We'll also waste
17157 * these PCI cycles for both read and write for chips
17158 * other than 5700 and 5701 which do not implement the
17159 * boundary bits.
17160 */
17161 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17162 switch (cacheline_size) {
17163 case 16:
17164 case 32:
17165 case 64:
17166 case 128:
17167 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17168 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17170 } else {
17171 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17173 }
17174 break;
17175
17176 case 256:
17177 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17179 break;
17180
17181 default:
17182 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17184 break;
17185 }
17186 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17187 switch (cacheline_size) {
17188 case 16:
17189 case 32:
17190 case 64:
17191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17192 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17193 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17194 break;
17195 }
17196 /* fallthrough */
17197 case 128:
17198 default:
17199 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17200 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17201 break;
17202 }
17203 } else {
17204 switch (cacheline_size) {
17205 case 16:
17206 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17207 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17208 DMA_RWCTRL_WRITE_BNDRY_16);
17209 break;
17210 }
17211 /* fallthrough */
17212 case 32:
17213 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17214 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17215 DMA_RWCTRL_WRITE_BNDRY_32);
17216 break;
17217 }
17218 /* fallthrough */
17219 case 64:
17220 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17221 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17222 DMA_RWCTRL_WRITE_BNDRY_64);
17223 break;
17224 }
17225 /* fallthrough */
17226 case 128:
17227 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17228 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17229 DMA_RWCTRL_WRITE_BNDRY_128);
17230 break;
17231 }
17232 /* fallthrough */
17233 case 256:
17234 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17235 DMA_RWCTRL_WRITE_BNDRY_256);
17236 break;
17237 case 512:
17238 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17239 DMA_RWCTRL_WRITE_BNDRY_512);
17240 break;
17241 case 1024:
17242 default:
17243 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17244 DMA_RWCTRL_WRITE_BNDRY_1024);
17245 break;
17246 }
17247 }
17248
17249out:
17250 return val;
17251}
17252
17253static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17254 int size, bool to_device)
17255{
17256 struct tg3_internal_buffer_desc test_desc;
17257 u32 sram_dma_descs;
17258 int i, ret;
17259
17260 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17261
17262 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17263 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17264 tw32(RDMAC_STATUS, 0);
17265 tw32(WDMAC_STATUS, 0);
17266
17267 tw32(BUFMGR_MODE, 0);
17268 tw32(FTQ_RESET, 0);
17269
17270 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17271 test_desc.addr_lo = buf_dma & 0xffffffff;
17272 test_desc.nic_mbuf = 0x00002100;
17273 test_desc.len = size;
17274
17275 /*
17276 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17277 * the *second* time the tg3 driver was getting loaded after an
17278 * initial scan.
17279 *
17280 * Broadcom tells me:
17281 * ...the DMA engine is connected to the GRC block and a DMA
17282 * reset may affect the GRC block in some unpredictable way...
17283 * The behavior of resets to individual blocks has not been tested.
17284 *
17285 * Broadcom noted the GRC reset will also reset all sub-components.
17286 */
17287 if (to_device) {
17288 test_desc.cqid_sqid = (13 << 8) | 2;
17289
17290 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17291 udelay(40);
17292 } else {
17293 test_desc.cqid_sqid = (16 << 8) | 7;
17294
17295 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17296 udelay(40);
17297 }
17298 test_desc.flags = 0x00000005;
17299
17300 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17301 u32 val;
17302
17303 val = *(((u32 *)&test_desc) + i);
17304 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17305 sram_dma_descs + (i * sizeof(u32)));
17306 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17307 }
17308 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17309
17310 if (to_device)
17311 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17312 else
17313 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17314
17315 ret = -ENODEV;
17316 for (i = 0; i < 40; i++) {
17317 u32 val;
17318
17319 if (to_device)
17320 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17321 else
17322 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17323 if ((val & 0xffff) == sram_dma_descs) {
17324 ret = 0;
17325 break;
17326 }
17327
17328 udelay(100);
17329 }
17330
17331 return ret;
17332}
17333
17334#define TEST_BUFFER_SIZE 0x2000
17335
17336static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17337 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17338 { },
17339};
17340
17341static int tg3_test_dma(struct tg3 *tp)
17342{
17343 dma_addr_t buf_dma;
17344 u32 *buf, saved_dma_rwctrl;
17345 int ret = 0;
17346
17347 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17348 &buf_dma, GFP_KERNEL);
17349 if (!buf) {
17350 ret = -ENOMEM;
17351 goto out_nofree;
17352 }
17353
17354 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17355 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17356
17357 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17358
17359 if (tg3_flag(tp, 57765_PLUS))
17360 goto out;
17361
17362 if (tg3_flag(tp, PCI_EXPRESS)) {
17363 /* DMA read watermark not used on PCIE */
17364 tp->dma_rwctrl |= 0x00180000;
17365 } else if (!tg3_flag(tp, PCIX_MODE)) {
17366 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17367 tg3_asic_rev(tp) == ASIC_REV_5750)
17368 tp->dma_rwctrl |= 0x003f0000;
17369 else
17370 tp->dma_rwctrl |= 0x003f000f;
17371 } else {
17372 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17373 tg3_asic_rev(tp) == ASIC_REV_5704) {
17374 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17375 u32 read_water = 0x7;
17376
17377 /* If the 5704 is behind the EPB bridge, we can
17378 * do the less restrictive ONE_DMA workaround for
17379 * better performance.
17380 */
17381 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17382 tg3_asic_rev(tp) == ASIC_REV_5704)
17383 tp->dma_rwctrl |= 0x8000;
17384 else if (ccval == 0x6 || ccval == 0x7)
17385 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17386
17387 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17388 read_water = 4;
17389 /* Set bit 23 to enable PCIX hw bug fix */
17390 tp->dma_rwctrl |=
17391 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17392 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17393 (1 << 23);
17394 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17395 /* 5780 always in PCIX mode */
17396 tp->dma_rwctrl |= 0x00144000;
17397 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17398 /* 5714 always in PCIX mode */
17399 tp->dma_rwctrl |= 0x00148000;
17400 } else {
17401 tp->dma_rwctrl |= 0x001b000f;
17402 }
17403 }
17404 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17405 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17406
17407 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17408 tg3_asic_rev(tp) == ASIC_REV_5704)
17409 tp->dma_rwctrl &= 0xfffffff0;
17410
17411 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17412 tg3_asic_rev(tp) == ASIC_REV_5701) {
17413 /* Remove this if it causes problems for some boards. */
17414 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17415
17416 /* On 5700/5701 chips, we need to set this bit.
17417 * Otherwise the chip will issue cacheline transactions
17418 * to streamable DMA memory with not all the byte
17419 * enables turned on. This is an error on several
17420 * RISC PCI controllers, in particular sparc64.
17421 *
17422 * On 5703/5704 chips, this bit has been reassigned
17423 * a different meaning. In particular, it is used
17424 * on those chips to enable a PCI-X workaround.
17425 */
17426 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17427 }
17428
17429 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17430
17431
17432 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17433 tg3_asic_rev(tp) != ASIC_REV_5701)
17434 goto out;
17435
17436 /* It is best to perform DMA test with maximum write burst size
17437 * to expose the 5700/5701 write DMA bug.
17438 */
17439 saved_dma_rwctrl = tp->dma_rwctrl;
17440 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17441 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17442
17443 while (1) {
17444 u32 *p = buf, i;
17445
17446 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17447 p[i] = i;
17448
17449 /* Send the buffer to the chip. */
17450 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17451 if (ret) {
17452 dev_err(&tp->pdev->dev,
17453 "%s: Buffer write failed. err = %d\n",
17454 __func__, ret);
17455 break;
17456 }
17457
17458 /* Now read it back. */
17459 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17460 if (ret) {
17461 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17462 "err = %d\n", __func__, ret);
17463 break;
17464 }
17465
17466 /* Verify it. */
17467 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17468 if (p[i] == i)
17469 continue;
17470
17471 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17472 DMA_RWCTRL_WRITE_BNDRY_16) {
17473 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17474 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17475 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17476 break;
17477 } else {
17478 dev_err(&tp->pdev->dev,
17479 "%s: Buffer corrupted on read back! "
17480 "(%d != %d)\n", __func__, p[i], i);
17481 ret = -ENODEV;
17482 goto out;
17483 }
17484 }
17485
17486 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17487 /* Success. */
17488 ret = 0;
17489 break;
17490 }
17491 }
17492 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17493 DMA_RWCTRL_WRITE_BNDRY_16) {
17494 /* DMA test passed without adjusting DMA boundary,
17495 * now look for chipsets that are known to expose the
17496 * DMA bug without failing the test.
17497 */
17498 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17499 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17500 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17501 } else {
17502 /* Safe to use the calculated DMA boundary. */
17503 tp->dma_rwctrl = saved_dma_rwctrl;
17504 }
17505
17506 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17507 }
17508
17509out:
17510 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17511out_nofree:
17512 return ret;
17513}
17514
17515static void tg3_init_bufmgr_config(struct tg3 *tp)
17516{
17517 if (tg3_flag(tp, 57765_PLUS)) {
17518 tp->bufmgr_config.mbuf_read_dma_low_water =
17519 DEFAULT_MB_RDMA_LOW_WATER_5705;
17520 tp->bufmgr_config.mbuf_mac_rx_low_water =
17521 DEFAULT_MB_MACRX_LOW_WATER_57765;
17522 tp->bufmgr_config.mbuf_high_water =
17523 DEFAULT_MB_HIGH_WATER_57765;
17524
17525 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17526 DEFAULT_MB_RDMA_LOW_WATER_5705;
17527 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17528 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17529 tp->bufmgr_config.mbuf_high_water_jumbo =
17530 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17531 } else if (tg3_flag(tp, 5705_PLUS)) {
17532 tp->bufmgr_config.mbuf_read_dma_low_water =
17533 DEFAULT_MB_RDMA_LOW_WATER_5705;
17534 tp->bufmgr_config.mbuf_mac_rx_low_water =
17535 DEFAULT_MB_MACRX_LOW_WATER_5705;
17536 tp->bufmgr_config.mbuf_high_water =
17537 DEFAULT_MB_HIGH_WATER_5705;
17538 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17539 tp->bufmgr_config.mbuf_mac_rx_low_water =
17540 DEFAULT_MB_MACRX_LOW_WATER_5906;
17541 tp->bufmgr_config.mbuf_high_water =
17542 DEFAULT_MB_HIGH_WATER_5906;
17543 }
17544
17545 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17546 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17547 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17548 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17549 tp->bufmgr_config.mbuf_high_water_jumbo =
17550 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17551 } else {
17552 tp->bufmgr_config.mbuf_read_dma_low_water =
17553 DEFAULT_MB_RDMA_LOW_WATER;
17554 tp->bufmgr_config.mbuf_mac_rx_low_water =
17555 DEFAULT_MB_MACRX_LOW_WATER;
17556 tp->bufmgr_config.mbuf_high_water =
17557 DEFAULT_MB_HIGH_WATER;
17558
17559 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17560 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17561 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17562 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17563 tp->bufmgr_config.mbuf_high_water_jumbo =
17564 DEFAULT_MB_HIGH_WATER_JUMBO;
17565 }
17566
17567 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17568 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17569}
17570
17571static char *tg3_phy_string(struct tg3 *tp)
17572{
17573 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17574 case TG3_PHY_ID_BCM5400: return "5400";
17575 case TG3_PHY_ID_BCM5401: return "5401";
17576 case TG3_PHY_ID_BCM5411: return "5411";
17577 case TG3_PHY_ID_BCM5701: return "5701";
17578 case TG3_PHY_ID_BCM5703: return "5703";
17579 case TG3_PHY_ID_BCM5704: return "5704";
17580 case TG3_PHY_ID_BCM5705: return "5705";
17581 case TG3_PHY_ID_BCM5750: return "5750";
17582 case TG3_PHY_ID_BCM5752: return "5752";
17583 case TG3_PHY_ID_BCM5714: return "5714";
17584 case TG3_PHY_ID_BCM5780: return "5780";
17585 case TG3_PHY_ID_BCM5755: return "5755";
17586 case TG3_PHY_ID_BCM5787: return "5787";
17587 case TG3_PHY_ID_BCM5784: return "5784";
17588 case TG3_PHY_ID_BCM5756: return "5722/5756";
17589 case TG3_PHY_ID_BCM5906: return "5906";
17590 case TG3_PHY_ID_BCM5761: return "5761";
17591 case TG3_PHY_ID_BCM5718C: return "5718C";
17592 case TG3_PHY_ID_BCM5718S: return "5718S";
17593 case TG3_PHY_ID_BCM57765: return "57765";
17594 case TG3_PHY_ID_BCM5719C: return "5719C";
17595 case TG3_PHY_ID_BCM5720C: return "5720C";
17596 case TG3_PHY_ID_BCM5762: return "5762C";
17597 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17598 case 0: return "serdes";
17599 default: return "unknown";
17600 }
17601}
17602
17603static char *tg3_bus_string(struct tg3 *tp, char *str)
17604{
17605 if (tg3_flag(tp, PCI_EXPRESS)) {
17606 strcpy(str, "PCI Express");
17607 return str;
17608 } else if (tg3_flag(tp, PCIX_MODE)) {
17609 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17610
17611 strcpy(str, "PCIX:");
17612
17613 if ((clock_ctrl == 7) ||
17614 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17615 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17616 strcat(str, "133MHz");
17617 else if (clock_ctrl == 0)
17618 strcat(str, "33MHz");
17619 else if (clock_ctrl == 2)
17620 strcat(str, "50MHz");
17621 else if (clock_ctrl == 4)
17622 strcat(str, "66MHz");
17623 else if (clock_ctrl == 6)
17624 strcat(str, "100MHz");
17625 } else {
17626 strcpy(str, "PCI:");
17627 if (tg3_flag(tp, PCI_HIGH_SPEED))
17628 strcat(str, "66MHz");
17629 else
17630 strcat(str, "33MHz");
17631 }
17632 if (tg3_flag(tp, PCI_32BIT))
17633 strcat(str, ":32-bit");
17634 else
17635 strcat(str, ":64-bit");
17636 return str;
17637}
17638
17639static void tg3_init_coal(struct tg3 *tp)
17640{
17641 struct ethtool_coalesce *ec = &tp->coal;
17642
17643 memset(ec, 0, sizeof(*ec));
17644 ec->cmd = ETHTOOL_GCOALESCE;
17645 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17646 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17647 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17648 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17649 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17650 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17651 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17652 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17653 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17654
17655 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17656 HOSTCC_MODE_CLRTICK_TXBD)) {
17657 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17658 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17659 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17660 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17661 }
17662
17663 if (tg3_flag(tp, 5705_PLUS)) {
17664 ec->rx_coalesce_usecs_irq = 0;
17665 ec->tx_coalesce_usecs_irq = 0;
17666 ec->stats_block_coalesce_usecs = 0;
17667 }
17668}
17669
17670static int tg3_init_one(struct pci_dev *pdev,
17671 const struct pci_device_id *ent)
17672{
17673 struct net_device *dev;
17674 struct tg3 *tp;
17675 int i, err;
17676 u32 sndmbx, rcvmbx, intmbx;
17677 char str[40];
17678 u64 dma_mask, persist_dma_mask;
17679 netdev_features_t features = 0;
17680
17681 printk_once(KERN_INFO "%s\n", version);
17682
17683 err = pci_enable_device(pdev);
17684 if (err) {
17685 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17686 return err;
17687 }
17688
17689 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17690 if (err) {
17691 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17692 goto err_out_disable_pdev;
17693 }
17694
17695 pci_set_master(pdev);
17696
17697 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17698 if (!dev) {
17699 err = -ENOMEM;
17700 goto err_out_free_res;
17701 }
17702
17703 SET_NETDEV_DEV(dev, &pdev->dev);
17704
17705 tp = netdev_priv(dev);
17706 tp->pdev = pdev;
17707 tp->dev = dev;
17708 tp->rx_mode = TG3_DEF_RX_MODE;
17709 tp->tx_mode = TG3_DEF_TX_MODE;
17710 tp->irq_sync = 1;
17711 tp->pcierr_recovery = false;
17712
17713 if (tg3_debug > 0)
17714 tp->msg_enable = tg3_debug;
17715 else
17716 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17717
17718 if (pdev_is_ssb_gige_core(pdev)) {
17719 tg3_flag_set(tp, IS_SSB_CORE);
17720 if (ssb_gige_must_flush_posted_writes(pdev))
17721 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17722 if (ssb_gige_one_dma_at_once(pdev))
17723 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17724 if (ssb_gige_have_roboswitch(pdev)) {
17725 tg3_flag_set(tp, USE_PHYLIB);
17726 tg3_flag_set(tp, ROBOSWITCH);
17727 }
17728 if (ssb_gige_is_rgmii(pdev))
17729 tg3_flag_set(tp, RGMII_MODE);
17730 }
17731
17732 /* The word/byte swap controls here control register access byte
17733 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17734 * setting below.
17735 */
17736 tp->misc_host_ctrl =
17737 MISC_HOST_CTRL_MASK_PCI_INT |
17738 MISC_HOST_CTRL_WORD_SWAP |
17739 MISC_HOST_CTRL_INDIR_ACCESS |
17740 MISC_HOST_CTRL_PCISTATE_RW;
17741
17742 /* The NONFRM (non-frame) byte/word swap controls take effect
17743 * on descriptor entries, anything which isn't packet data.
17744 *
17745 * The StrongARM chips on the board (one for tx, one for rx)
17746 * are running in big-endian mode.
17747 */
17748 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17749 GRC_MODE_WSWAP_NONFRM_DATA);
17750#ifdef __BIG_ENDIAN
17751 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17752#endif
17753 spin_lock_init(&tp->lock);
17754 spin_lock_init(&tp->indirect_lock);
17755 INIT_WORK(&tp->reset_task, tg3_reset_task);
17756
17757 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17758 if (!tp->regs) {
17759 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17760 err = -ENOMEM;
17761 goto err_out_free_dev;
17762 }
17763
17764 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17765 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17768 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17774 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17775 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17776 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17777 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17778 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17779 tg3_flag_set(tp, ENABLE_APE);
17780 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17781 if (!tp->aperegs) {
17782 dev_err(&pdev->dev,
17783 "Cannot map APE registers, aborting\n");
17784 err = -ENOMEM;
17785 goto err_out_iounmap;
17786 }
17787 }
17788
17789 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17790 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17791
17792 dev->ethtool_ops = &tg3_ethtool_ops;
17793 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17794 dev->netdev_ops = &tg3_netdev_ops;
17795 dev->irq = pdev->irq;
17796
17797 err = tg3_get_invariants(tp, ent);
17798 if (err) {
17799 dev_err(&pdev->dev,
17800 "Problem fetching invariants of chip, aborting\n");
17801 goto err_out_apeunmap;
17802 }
17803
17804 /* The EPB bridge inside 5714, 5715, and 5780 and any
17805 * device behind the EPB cannot support DMA addresses > 40-bit.
17806 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17807 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17808 * do DMA address check in tg3_start_xmit().
17809 */
17810 if (tg3_flag(tp, IS_5788))
17811 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17812 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17813 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17814#ifdef CONFIG_HIGHMEM
17815 dma_mask = DMA_BIT_MASK(64);
17816#endif
17817 } else
17818 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17819
17820 /* Configure DMA attributes. */
17821 if (dma_mask > DMA_BIT_MASK(32)) {
17822 err = pci_set_dma_mask(pdev, dma_mask);
17823 if (!err) {
17824 features |= NETIF_F_HIGHDMA;
17825 err = pci_set_consistent_dma_mask(pdev,
17826 persist_dma_mask);
17827 if (err < 0) {
17828 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17829 "DMA for consistent allocations\n");
17830 goto err_out_apeunmap;
17831 }
17832 }
17833 }
17834 if (err || dma_mask == DMA_BIT_MASK(32)) {
17835 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17836 if (err) {
17837 dev_err(&pdev->dev,
17838 "No usable DMA configuration, aborting\n");
17839 goto err_out_apeunmap;
17840 }
17841 }
17842
17843 tg3_init_bufmgr_config(tp);
17844
17845 /* 5700 B0 chips do not support checksumming correctly due
17846 * to hardware bugs.
17847 */
17848 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17849 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17850
17851 if (tg3_flag(tp, 5755_PLUS))
17852 features |= NETIF_F_IPV6_CSUM;
17853 }
17854
17855 /* TSO is on by default on chips that support hardware TSO.
17856 * Firmware TSO on older chips gives lower performance, so it
17857 * is off by default, but can be enabled using ethtool.
17858 */
17859 if ((tg3_flag(tp, HW_TSO_1) ||
17860 tg3_flag(tp, HW_TSO_2) ||
17861 tg3_flag(tp, HW_TSO_3)) &&
17862 (features & NETIF_F_IP_CSUM))
17863 features |= NETIF_F_TSO;
17864 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17865 if (features & NETIF_F_IPV6_CSUM)
17866 features |= NETIF_F_TSO6;
17867 if (tg3_flag(tp, HW_TSO_3) ||
17868 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17869 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17870 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17871 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17872 tg3_asic_rev(tp) == ASIC_REV_57780)
17873 features |= NETIF_F_TSO_ECN;
17874 }
17875
17876 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17877 NETIF_F_HW_VLAN_CTAG_RX;
17878 dev->vlan_features |= features;
17879
17880 /*
17881 * Add loopback capability only for a subset of devices that support
17882 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17883 * loopback for the remaining devices.
17884 */
17885 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17886 !tg3_flag(tp, CPMU_PRESENT))
17887 /* Add the loopback capability */
17888 features |= NETIF_F_LOOPBACK;
17889
17890 dev->hw_features |= features;
17891 dev->priv_flags |= IFF_UNICAST_FLT;
17892
17893 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17894 dev->min_mtu = TG3_MIN_MTU;
17895 dev->max_mtu = TG3_MAX_MTU(tp);
17896
17897 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17898 !tg3_flag(tp, TSO_CAPABLE) &&
17899 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17900 tg3_flag_set(tp, MAX_RXPEND_64);
17901 tp->rx_pending = 63;
17902 }
17903
17904 err = tg3_get_device_address(tp);
17905 if (err) {
17906 dev_err(&pdev->dev,
17907 "Could not obtain valid ethernet address, aborting\n");
17908 goto err_out_apeunmap;
17909 }
17910
17911 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17912 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17913 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17914 for (i = 0; i < tp->irq_max; i++) {
17915 struct tg3_napi *tnapi = &tp->napi[i];
17916
17917 tnapi->tp = tp;
17918 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17919
17920 tnapi->int_mbox = intmbx;
17921 if (i <= 4)
17922 intmbx += 0x8;
17923 else
17924 intmbx += 0x4;
17925
17926 tnapi->consmbox = rcvmbx;
17927 tnapi->prodmbox = sndmbx;
17928
17929 if (i)
17930 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17931 else
17932 tnapi->coal_now = HOSTCC_MODE_NOW;
17933
17934 if (!tg3_flag(tp, SUPPORT_MSIX))
17935 break;
17936
17937 /*
17938 * If we support MSIX, we'll be using RSS. If we're using
17939 * RSS, the first vector only handles link interrupts and the
17940 * remaining vectors handle rx and tx interrupts. Reuse the
17941 * mailbox values for the next iteration. The values we setup
17942 * above are still useful for the single vectored mode.
17943 */
17944 if (!i)
17945 continue;
17946
17947 rcvmbx += 0x8;
17948
17949 if (sndmbx & 0x4)
17950 sndmbx -= 0x4;
17951 else
17952 sndmbx += 0xc;
17953 }
17954
17955 /*
17956 * Reset chip in case UNDI or EFI driver did not shutdown
17957 * DMA self test will enable WDMAC and we'll see (spurious)
17958 * pending DMA on the PCI bus at that point.
17959 */
17960 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17961 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17962 tg3_full_lock(tp, 0);
17963 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17964 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17965 tg3_full_unlock(tp);
17966 }
17967
17968 err = tg3_test_dma(tp);
17969 if (err) {
17970 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17971 goto err_out_apeunmap;
17972 }
17973
17974 tg3_init_coal(tp);
17975
17976 pci_set_drvdata(pdev, dev);
17977
17978 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17979 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17980 tg3_asic_rev(tp) == ASIC_REV_5762)
17981 tg3_flag_set(tp, PTP_CAPABLE);
17982
17983 tg3_timer_init(tp);
17984
17985 tg3_carrier_off(tp);
17986
17987 err = register_netdev(dev);
17988 if (err) {
17989 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17990 goto err_out_apeunmap;
17991 }
17992
17993 if (tg3_flag(tp, PTP_CAPABLE)) {
17994 tg3_ptp_init(tp);
17995 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17996 &tp->pdev->dev);
17997 if (IS_ERR(tp->ptp_clock))
17998 tp->ptp_clock = NULL;
17999 }
18000
18001 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18002 tp->board_part_number,
18003 tg3_chip_rev_id(tp),
18004 tg3_bus_string(tp, str),
18005 dev->dev_addr);
18006
18007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18008 char *ethtype;
18009
18010 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18011 ethtype = "10/100Base-TX";
18012 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18013 ethtype = "1000Base-SX";
18014 else
18015 ethtype = "10/100/1000Base-T";
18016
18017 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18018 "(WireSpeed[%d], EEE[%d])\n",
18019 tg3_phy_string(tp), ethtype,
18020 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18021 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18022 }
18023
18024 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18025 (dev->features & NETIF_F_RXCSUM) != 0,
18026 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18027 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18028 tg3_flag(tp, ENABLE_ASF) != 0,
18029 tg3_flag(tp, TSO_CAPABLE) != 0);
18030 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18031 tp->dma_rwctrl,
18032 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18033 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18034
18035 pci_save_state(pdev);
18036
18037 return 0;
18038
18039err_out_apeunmap:
18040 if (tp->aperegs) {
18041 iounmap(tp->aperegs);
18042 tp->aperegs = NULL;
18043 }
18044
18045err_out_iounmap:
18046 if (tp->regs) {
18047 iounmap(tp->regs);
18048 tp->regs = NULL;
18049 }
18050
18051err_out_free_dev:
18052 free_netdev(dev);
18053
18054err_out_free_res:
18055 pci_release_regions(pdev);
18056
18057err_out_disable_pdev:
18058 if (pci_is_enabled(pdev))
18059 pci_disable_device(pdev);
18060 return err;
18061}
18062
18063static void tg3_remove_one(struct pci_dev *pdev)
18064{
18065 struct net_device *dev = pci_get_drvdata(pdev);
18066
18067 if (dev) {
18068 struct tg3 *tp = netdev_priv(dev);
18069
18070 tg3_ptp_fini(tp);
18071
18072 release_firmware(tp->fw);
18073
18074 tg3_reset_task_cancel(tp);
18075
18076 if (tg3_flag(tp, USE_PHYLIB)) {
18077 tg3_phy_fini(tp);
18078 tg3_mdio_fini(tp);
18079 }
18080
18081 unregister_netdev(dev);
18082 if (tp->aperegs) {
18083 iounmap(tp->aperegs);
18084 tp->aperegs = NULL;
18085 }
18086 if (tp->regs) {
18087 iounmap(tp->regs);
18088 tp->regs = NULL;
18089 }
18090 free_netdev(dev);
18091 pci_release_regions(pdev);
18092 pci_disable_device(pdev);
18093 }
18094}
18095
18096#ifdef CONFIG_PM_SLEEP
18097static int tg3_suspend(struct device *device)
18098{
18099 struct pci_dev *pdev = to_pci_dev(device);
18100 struct net_device *dev = pci_get_drvdata(pdev);
18101 struct tg3 *tp = netdev_priv(dev);
18102 int err = 0;
18103
18104 rtnl_lock();
18105
18106 if (!netif_running(dev))
18107 goto unlock;
18108
18109 tg3_reset_task_cancel(tp);
18110 tg3_phy_stop(tp);
18111 tg3_netif_stop(tp);
18112
18113 tg3_timer_stop(tp);
18114
18115 tg3_full_lock(tp, 1);
18116 tg3_disable_ints(tp);
18117 tg3_full_unlock(tp);
18118
18119 netif_device_detach(dev);
18120
18121 tg3_full_lock(tp, 0);
18122 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18123 tg3_flag_clear(tp, INIT_COMPLETE);
18124 tg3_full_unlock(tp);
18125
18126 err = tg3_power_down_prepare(tp);
18127 if (err) {
18128 int err2;
18129
18130 tg3_full_lock(tp, 0);
18131
18132 tg3_flag_set(tp, INIT_COMPLETE);
18133 err2 = tg3_restart_hw(tp, true);
18134 if (err2)
18135 goto out;
18136
18137 tg3_timer_start(tp);
18138
18139 netif_device_attach(dev);
18140 tg3_netif_start(tp);
18141
18142out:
18143 tg3_full_unlock(tp);
18144
18145 if (!err2)
18146 tg3_phy_start(tp);
18147 }
18148
18149unlock:
18150 rtnl_unlock();
18151 return err;
18152}
18153
18154static int tg3_resume(struct device *device)
18155{
18156 struct pci_dev *pdev = to_pci_dev(device);
18157 struct net_device *dev = pci_get_drvdata(pdev);
18158 struct tg3 *tp = netdev_priv(dev);
18159 int err = 0;
18160
18161 rtnl_lock();
18162
18163 if (!netif_running(dev))
18164 goto unlock;
18165
18166 netif_device_attach(dev);
18167
18168 tg3_full_lock(tp, 0);
18169
18170 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18171
18172 tg3_flag_set(tp, INIT_COMPLETE);
18173 err = tg3_restart_hw(tp,
18174 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18175 if (err)
18176 goto out;
18177
18178 tg3_timer_start(tp);
18179
18180 tg3_netif_start(tp);
18181
18182out:
18183 tg3_full_unlock(tp);
18184
18185 if (!err)
18186 tg3_phy_start(tp);
18187
18188unlock:
18189 rtnl_unlock();
18190 return err;
18191}
18192#endif /* CONFIG_PM_SLEEP */
18193
18194static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18195
18196static void tg3_shutdown(struct pci_dev *pdev)
18197{
18198 struct net_device *dev = pci_get_drvdata(pdev);
18199 struct tg3 *tp = netdev_priv(dev);
18200
18201 rtnl_lock();
18202 netif_device_detach(dev);
18203
18204 if (netif_running(dev))
18205 dev_close(dev);
18206
18207 if (system_state == SYSTEM_POWER_OFF)
18208 tg3_power_down(tp);
18209
18210 rtnl_unlock();
18211}
18212
18213/**
18214 * tg3_io_error_detected - called when PCI error is detected
18215 * @pdev: Pointer to PCI device
18216 * @state: The current pci connection state
18217 *
18218 * This function is called after a PCI bus error affecting
18219 * this device has been detected.
18220 */
18221static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18222 pci_channel_state_t state)
18223{
18224 struct net_device *netdev = pci_get_drvdata(pdev);
18225 struct tg3 *tp = netdev_priv(netdev);
18226 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18227
18228 netdev_info(netdev, "PCI I/O error detected\n");
18229
18230 rtnl_lock();
18231
18232 /* We probably don't have netdev yet */
18233 if (!netdev || !netif_running(netdev))
18234 goto done;
18235
18236 /* We needn't recover from permanent error */
18237 if (state == pci_channel_io_frozen)
18238 tp->pcierr_recovery = true;
18239
18240 tg3_phy_stop(tp);
18241
18242 tg3_netif_stop(tp);
18243
18244 tg3_timer_stop(tp);
18245
18246 /* Want to make sure that the reset task doesn't run */
18247 tg3_reset_task_cancel(tp);
18248
18249 netif_device_detach(netdev);
18250
18251 /* Clean up software state, even if MMIO is blocked */
18252 tg3_full_lock(tp, 0);
18253 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18254 tg3_full_unlock(tp);
18255
18256done:
18257 if (state == pci_channel_io_perm_failure) {
18258 if (netdev) {
18259 tg3_napi_enable(tp);
18260 dev_close(netdev);
18261 }
18262 err = PCI_ERS_RESULT_DISCONNECT;
18263 } else {
18264 pci_disable_device(pdev);
18265 }
18266
18267 rtnl_unlock();
18268
18269 return err;
18270}
18271
18272/**
18273 * tg3_io_slot_reset - called after the pci bus has been reset.
18274 * @pdev: Pointer to PCI device
18275 *
18276 * Restart the card from scratch, as if from a cold-boot.
18277 * At this point, the card has exprienced a hard reset,
18278 * followed by fixups by BIOS, and has its config space
18279 * set up identically to what it was at cold boot.
18280 */
18281static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18282{
18283 struct net_device *netdev = pci_get_drvdata(pdev);
18284 struct tg3 *tp = netdev_priv(netdev);
18285 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18286 int err;
18287
18288 rtnl_lock();
18289
18290 if (pci_enable_device(pdev)) {
18291 dev_err(&pdev->dev,
18292 "Cannot re-enable PCI device after reset.\n");
18293 goto done;
18294 }
18295
18296 pci_set_master(pdev);
18297 pci_restore_state(pdev);
18298 pci_save_state(pdev);
18299
18300 if (!netdev || !netif_running(netdev)) {
18301 rc = PCI_ERS_RESULT_RECOVERED;
18302 goto done;
18303 }
18304
18305 err = tg3_power_up(tp);
18306 if (err)
18307 goto done;
18308
18309 rc = PCI_ERS_RESULT_RECOVERED;
18310
18311done:
18312 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18313 tg3_napi_enable(tp);
18314 dev_close(netdev);
18315 }
18316 rtnl_unlock();
18317
18318 return rc;
18319}
18320
18321/**
18322 * tg3_io_resume - called when traffic can start flowing again.
18323 * @pdev: Pointer to PCI device
18324 *
18325 * This callback is called when the error recovery driver tells
18326 * us that its OK to resume normal operation.
18327 */
18328static void tg3_io_resume(struct pci_dev *pdev)
18329{
18330 struct net_device *netdev = pci_get_drvdata(pdev);
18331 struct tg3 *tp = netdev_priv(netdev);
18332 int err;
18333
18334 rtnl_lock();
18335
18336 if (!netdev || !netif_running(netdev))
18337 goto done;
18338
18339 tg3_full_lock(tp, 0);
18340 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18341 tg3_flag_set(tp, INIT_COMPLETE);
18342 err = tg3_restart_hw(tp, true);
18343 if (err) {
18344 tg3_full_unlock(tp);
18345 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18346 goto done;
18347 }
18348
18349 netif_device_attach(netdev);
18350
18351 tg3_timer_start(tp);
18352
18353 tg3_netif_start(tp);
18354
18355 tg3_full_unlock(tp);
18356
18357 tg3_phy_start(tp);
18358
18359done:
18360 tp->pcierr_recovery = false;
18361 rtnl_unlock();
18362}
18363
18364static const struct pci_error_handlers tg3_err_handler = {
18365 .error_detected = tg3_io_error_detected,
18366 .slot_reset = tg3_io_slot_reset,
18367 .resume = tg3_io_resume
18368};
18369
18370static struct pci_driver tg3_driver = {
18371 .name = DRV_MODULE_NAME,
18372 .id_table = tg3_pci_tbl,
18373 .probe = tg3_init_one,
18374 .remove = tg3_remove_one,
18375 .err_handler = &tg3_err_handler,
18376 .driver.pm = &tg3_pm_ops,
18377 .shutdown = tg3_shutdown,
18378};
18379
18380module_pci_driver(tg3_driver);