blob: 4fa72b573c172ab0423be7cc652f798ee808e9c2 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2012-2013 Solarflare Communications Inc.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
6
7#include "net_driver.h"
Olivier Deprez157378f2022-04-04 15:47:50 +02008#include "rx_common.h"
9#include "tx_common.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include "ef10_regs.h"
11#include "io.h"
12#include "mcdi.h"
13#include "mcdi_pcol.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020014#include "mcdi_port.h"
15#include "mcdi_port_common.h"
16#include "mcdi_functions.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include "nic.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020018#include "mcdi_filters.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019#include "workarounds.h"
20#include "selftest.h"
21#include "ef10_sriov.h"
22#include <linux/in.h>
23#include <linux/jhash.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020026#include <net/udp_tunnel.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027
28/* Hardware control for EF10 architecture including 'Huntington'. */
29
30#define EFX_EF10_DRVGEN_EV 7
31enum {
32 EFX_EF10_TEST = 1,
33 EFX_EF10_REFILL,
34};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035
36/* VLAN list entry */
37struct efx_ef10_vlan {
38 struct list_head list;
39 u16 vid;
40};
41
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
Olivier Deprez157378f2022-04-04 15:47:50 +020043static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044
45static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
46{
47 efx_dword_t reg;
48
49 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
50 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
51 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
52}
53
54/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
55 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
56 * bar; PFs use BAR 0/1 for memory.
57 */
58static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
59{
60 switch (efx->pci_dev->device) {
61 case 0x0b03: /* SFC9250 PF */
62 return 0;
63 default:
64 return 2;
65 }
66}
67
68/* All VFs use BAR 0/1 for memory */
69static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
70{
71 return 0;
72}
73
74static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
75{
76 int bar;
77
78 bar = efx->type->mem_bar(efx);
79 return resource_size(&efx->pci_dev->resource[bar]);
80}
81
82static bool efx_ef10_is_vf(struct efx_nic *efx)
83{
84 return efx->type->is_vf;
85}
86
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087#ifdef CONFIG_SFC_SRIOV
88static int efx_ef10_get_vf_index(struct efx_nic *efx)
89{
90 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
91 struct efx_ef10_nic_data *nic_data = efx->nic_data;
92 size_t outlen;
93 int rc;
94
95 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
96 sizeof(outbuf), &outlen);
97 if (rc)
98 return rc;
99 if (outlen < sizeof(outbuf))
100 return -EIO;
101
102 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
103 return 0;
104}
105#endif
106
107static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
108{
109 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
110 struct efx_ef10_nic_data *nic_data = efx->nic_data;
111 size_t outlen;
112 int rc;
113
114 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
115
116 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
117 outbuf, sizeof(outbuf), &outlen);
118 if (rc)
119 return rc;
120 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
121 netif_err(efx, drv, efx->net_dev,
122 "unable to read datapath firmware capabilities\n");
123 return -EIO;
124 }
125
126 nic_data->datapath_caps =
127 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
128
129 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
130 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
131 GET_CAPABILITIES_V2_OUT_FLAGS2);
132 nic_data->piobuf_size = MCDI_WORD(outbuf,
133 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
134 } else {
135 nic_data->datapath_caps2 = 0;
136 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
137 }
138
139 /* record the DPCPU firmware IDs to determine VEB vswitching support.
140 */
141 nic_data->rx_dpcpu_fw_id =
142 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
143 nic_data->tx_dpcpu_fw_id =
144 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
145
146 if (!(nic_data->datapath_caps &
147 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
148 netif_err(efx, probe, efx->net_dev,
149 "current firmware does not support an RX prefix\n");
150 return -ENODEV;
151 }
152
153 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
154 u8 vi_window_mode = MCDI_BYTE(outbuf,
155 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
156
Olivier Deprez157378f2022-04-04 15:47:50 +0200157 rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
158 if (rc)
159 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160 } else {
161 /* keep default VI stride */
162 netif_dbg(efx, probe, efx->net_dev,
163 "firmware did not report VI window mode, assuming vi_stride = %u\n",
164 efx->vi_stride);
165 }
166
167 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
168 efx->num_mac_stats = MCDI_WORD(outbuf,
169 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
170 netif_dbg(efx, probe, efx->net_dev,
171 "firmware reports num_mac_stats = %u\n",
172 efx->num_mac_stats);
173 } else {
174 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
175 netif_dbg(efx, probe, efx->net_dev,
176 "firmware did not report num_mac_stats, assuming %u\n",
177 efx->num_mac_stats);
178 }
179
180 return 0;
181}
182
183static void efx_ef10_read_licensed_features(struct efx_nic *efx)
184{
185 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
186 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
187 struct efx_ef10_nic_data *nic_data = efx->nic_data;
188 size_t outlen;
189 int rc;
190
191 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
192 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
193 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
194 outbuf, sizeof(outbuf), &outlen);
195 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
196 return;
197
198 nic_data->licensed_features = MCDI_QWORD(outbuf,
199 LICENSING_V3_OUT_LICENSED_FEATURES);
200}
201
202static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
203{
204 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
205 int rc;
206
207 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
208 outbuf, sizeof(outbuf), NULL);
209 if (rc)
210 return rc;
211 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
212 return rc > 0 ? rc : -ERANGE;
213}
214
215static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
216{
217 struct efx_ef10_nic_data *nic_data = efx->nic_data;
218 unsigned int implemented;
219 unsigned int enabled;
220 int rc;
221
222 nic_data->workaround_35388 = false;
223 nic_data->workaround_61265 = false;
224
225 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
226
227 if (rc == -ENOSYS) {
228 /* Firmware without GET_WORKAROUNDS - not a problem. */
229 rc = 0;
230 } else if (rc == 0) {
231 /* Bug61265 workaround is always enabled if implemented. */
232 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
233 nic_data->workaround_61265 = true;
234
235 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
236 nic_data->workaround_35388 = true;
237 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
238 /* Workaround is implemented but not enabled.
239 * Try to enable it.
240 */
241 rc = efx_mcdi_set_workaround(efx,
242 MC_CMD_WORKAROUND_BUG35388,
243 true, NULL);
244 if (rc == 0)
245 nic_data->workaround_35388 = true;
246 /* If we failed to set the workaround just carry on. */
247 rc = 0;
248 }
249 }
250
251 netif_dbg(efx, probe, efx->net_dev,
252 "workaround for bug 35388 is %sabled\n",
253 nic_data->workaround_35388 ? "en" : "dis");
254 netif_dbg(efx, probe, efx->net_dev,
255 "workaround for bug 61265 is %sabled\n",
256 nic_data->workaround_61265 ? "en" : "dis");
257
258 return rc;
259}
260
261static void efx_ef10_process_timer_config(struct efx_nic *efx,
262 const efx_dword_t *data)
263{
264 unsigned int max_count;
265
266 if (EFX_EF10_WORKAROUND_61265(efx)) {
267 efx->timer_quantum_ns = MCDI_DWORD(data,
268 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
269 efx->timer_max_ns = MCDI_DWORD(data,
270 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
271 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
272 efx->timer_quantum_ns = MCDI_DWORD(data,
273 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
274 max_count = MCDI_DWORD(data,
275 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
276 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
277 } else {
278 efx->timer_quantum_ns = MCDI_DWORD(data,
279 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
280 max_count = MCDI_DWORD(data,
281 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
282 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
283 }
284
285 netif_dbg(efx, probe, efx->net_dev,
286 "got timer properties from MC: quantum %u ns; max %u ns\n",
287 efx->timer_quantum_ns, efx->timer_max_ns);
288}
289
290static int efx_ef10_get_timer_config(struct efx_nic *efx)
291{
292 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
293 int rc;
294
295 rc = efx_ef10_get_timer_workarounds(efx);
296 if (rc)
297 return rc;
298
299 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
300 outbuf, sizeof(outbuf), NULL);
301
302 if (rc == 0) {
303 efx_ef10_process_timer_config(efx, outbuf);
304 } else if (rc == -ENOSYS || rc == -EPERM) {
305 /* Not available - fall back to Huntington defaults. */
306 unsigned int quantum;
307
308 rc = efx_ef10_get_sysclk_freq(efx);
309 if (rc < 0)
310 return rc;
311
312 quantum = 1536000 / rc; /* 1536 cycles */
313 efx->timer_quantum_ns = quantum;
314 efx->timer_max_ns = efx->type->timer_period_max * quantum;
315 rc = 0;
316 } else {
317 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
318 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
319 NULL, 0, rc);
320 }
321
322 return rc;
323}
324
325static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
326{
327 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
328 size_t outlen;
329 int rc;
330
331 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
332
333 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
334 outbuf, sizeof(outbuf), &outlen);
335 if (rc)
336 return rc;
337 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
338 return -EIO;
339
340 ether_addr_copy(mac_address,
341 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
342 return 0;
343}
344
345static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
346{
347 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
348 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
349 size_t outlen;
350 int num_addrs, rc;
351
352 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
353 EVB_PORT_ID_ASSIGNED);
354 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
355 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
356
357 if (rc)
358 return rc;
359 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
360 return -EIO;
361
362 num_addrs = MCDI_DWORD(outbuf,
363 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
364
365 WARN_ON(num_addrs != 1);
366
367 ether_addr_copy(mac_address,
368 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
369
370 return 0;
371}
372
373static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
374 struct device_attribute *attr,
375 char *buf)
376{
David Brazdil0f672f62019-12-10 10:32:29 +0000377 struct efx_nic *efx = dev_get_drvdata(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378
379 return sprintf(buf, "%d\n",
380 ((efx->mcdi->fn_flags) &
381 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
382 ? 1 : 0);
383}
384
385static ssize_t efx_ef10_show_primary_flag(struct device *dev,
386 struct device_attribute *attr,
387 char *buf)
388{
David Brazdil0f672f62019-12-10 10:32:29 +0000389 struct efx_nic *efx = dev_get_drvdata(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390
391 return sprintf(buf, "%d\n",
392 ((efx->mcdi->fn_flags) &
393 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
394 ? 1 : 0);
395}
396
397static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
398{
399 struct efx_ef10_nic_data *nic_data = efx->nic_data;
400 struct efx_ef10_vlan *vlan;
401
402 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
403
404 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
405 if (vlan->vid == vid)
406 return vlan;
407 }
408
409 return NULL;
410}
411
412static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
413{
414 struct efx_ef10_nic_data *nic_data = efx->nic_data;
415 struct efx_ef10_vlan *vlan;
416 int rc;
417
418 mutex_lock(&nic_data->vlan_lock);
419
420 vlan = efx_ef10_find_vlan(efx, vid);
421 if (vlan) {
422 /* We add VID 0 on init. 8021q adds it on module init
423 * for all interfaces with VLAN filtring feature.
424 */
425 if (vid == 0)
426 goto done_unlock;
427 netif_warn(efx, drv, efx->net_dev,
428 "VLAN %u already added\n", vid);
429 rc = -EALREADY;
430 goto fail_exist;
431 }
432
433 rc = -ENOMEM;
434 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
435 if (!vlan)
436 goto fail_alloc;
437
438 vlan->vid = vid;
439
440 list_add_tail(&vlan->list, &nic_data->vlan_list);
441
442 if (efx->filter_state) {
443 mutex_lock(&efx->mac_lock);
444 down_write(&efx->filter_sem);
Olivier Deprez157378f2022-04-04 15:47:50 +0200445 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446 up_write(&efx->filter_sem);
447 mutex_unlock(&efx->mac_lock);
448 if (rc)
449 goto fail_filter_add_vlan;
450 }
451
452done_unlock:
453 mutex_unlock(&nic_data->vlan_lock);
454 return 0;
455
456fail_filter_add_vlan:
457 list_del(&vlan->list);
458 kfree(vlan);
459fail_alloc:
460fail_exist:
461 mutex_unlock(&nic_data->vlan_lock);
462 return rc;
463}
464
465static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
466 struct efx_ef10_vlan *vlan)
467{
468 struct efx_ef10_nic_data *nic_data = efx->nic_data;
469
470 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
471
472 if (efx->filter_state) {
473 down_write(&efx->filter_sem);
Olivier Deprez157378f2022-04-04 15:47:50 +0200474 efx_mcdi_filter_del_vlan(efx, vlan->vid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475 up_write(&efx->filter_sem);
476 }
477
478 list_del(&vlan->list);
479 kfree(vlan);
480}
481
482static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
483{
484 struct efx_ef10_nic_data *nic_data = efx->nic_data;
485 struct efx_ef10_vlan *vlan;
486 int rc = 0;
487
488 /* 8021q removes VID 0 on module unload for all interfaces
489 * with VLAN filtering feature. We need to keep it to receive
490 * untagged traffic.
491 */
492 if (vid == 0)
493 return 0;
494
495 mutex_lock(&nic_data->vlan_lock);
496
497 vlan = efx_ef10_find_vlan(efx, vid);
498 if (!vlan) {
499 netif_err(efx, drv, efx->net_dev,
500 "VLAN %u to be deleted not found\n", vid);
501 rc = -ENOENT;
502 } else {
503 efx_ef10_del_vlan_internal(efx, vlan);
504 }
505
506 mutex_unlock(&nic_data->vlan_lock);
507
508 return rc;
509}
510
511static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
512{
513 struct efx_ef10_nic_data *nic_data = efx->nic_data;
514 struct efx_ef10_vlan *vlan, *next_vlan;
515
516 mutex_lock(&nic_data->vlan_lock);
517 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
518 efx_ef10_del_vlan_internal(efx, vlan);
519 mutex_unlock(&nic_data->vlan_lock);
520}
521
522static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
523 NULL);
524static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
525
526static int efx_ef10_probe(struct efx_nic *efx)
527{
528 struct efx_ef10_nic_data *nic_data;
529 int i, rc;
530
531 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
532 if (!nic_data)
533 return -ENOMEM;
534 efx->nic_data = nic_data;
535
536 /* we assume later that we can copy from this buffer in dwords */
537 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
538
539 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
540 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
541 if (rc)
542 goto fail1;
543
544 /* Get the MC's warm boot count. In case it's rebooting right
545 * now, be prepared to retry.
546 */
547 i = 0;
548 for (;;) {
549 rc = efx_ef10_get_warm_boot_count(efx);
550 if (rc >= 0)
551 break;
552 if (++i == 5)
553 goto fail2;
554 ssleep(1);
555 }
556 nic_data->warm_boot_count = rc;
557
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000558 /* In case we're recovering from a crash (kexec), we want to
559 * cancel any outstanding request by the previous user of this
560 * function. We send a special message using the least
561 * significant bits of the 'high' (doorbell) register.
562 */
563 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
564
565 rc = efx_mcdi_init(efx);
566 if (rc)
567 goto fail2;
568
569 mutex_init(&nic_data->udp_tunnels_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200570 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
571 nic_data->udp_tunnels[i].type =
572 TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573
574 /* Reset (most) configuration for this function */
575 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
576 if (rc)
577 goto fail3;
578
579 /* Enable event logging */
580 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
581 if (rc)
582 goto fail3;
583
584 rc = device_create_file(&efx->pci_dev->dev,
585 &dev_attr_link_control_flag);
586 if (rc)
587 goto fail3;
588
589 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
590 if (rc)
591 goto fail4;
592
Olivier Deprez157378f2022-04-04 15:47:50 +0200593 rc = efx_get_pf_index(efx, &nic_data->pf_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594 if (rc)
595 goto fail5;
596
597 rc = efx_ef10_init_datapath_caps(efx);
598 if (rc < 0)
599 goto fail5;
600
601 efx_ef10_read_licensed_features(efx);
602
603 /* We can have one VI for each vi_stride-byte region.
Olivier Deprez157378f2022-04-04 15:47:50 +0200604 * However, until we use TX option descriptors we need up to four
605 * TX queues per channel for different checksumming combinations.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200607 if (nic_data->datapath_caps &
608 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
609 efx->tx_queues_per_channel = 4;
610 else
611 efx->tx_queues_per_channel = 2;
612 efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
613 if (!efx->max_vis) {
614 netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
615 rc = -EIO;
616 goto fail5;
617 }
618 efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS,
619 efx->max_vis / efx->tx_queues_per_channel);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000620 efx->max_tx_channels = efx->max_channels;
621 if (WARN_ON(efx->max_channels == 0)) {
622 rc = -EIO;
623 goto fail5;
624 }
625
626 efx->rx_packet_len_offset =
627 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
628
629 if (nic_data->datapath_caps &
630 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
631 efx->net_dev->hw_features |= NETIF_F_RXFCS;
632
633 rc = efx_mcdi_port_get_number(efx);
634 if (rc < 0)
635 goto fail5;
636 efx->port_num = rc;
637
638 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
639 if (rc)
640 goto fail5;
641
642 rc = efx_ef10_get_timer_config(efx);
643 if (rc < 0)
644 goto fail5;
645
646 rc = efx_mcdi_mon_probe(efx);
647 if (rc && rc != -EPERM)
648 goto fail5;
649
650 efx_ptp_defer_probe_with_channel(efx);
651
652#ifdef CONFIG_SFC_SRIOV
653 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
654 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
655 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
656
657 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
658 } else
659#endif
660 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
661
662 INIT_LIST_HEAD(&nic_data->vlan_list);
663 mutex_init(&nic_data->vlan_lock);
664
665 /* Add unspecified VID to support VLAN filtering being disabled */
666 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
667 if (rc)
668 goto fail_add_vid_unspec;
669
670 /* If VLAN filtering is enabled, we need VID 0 to get untagged
671 * traffic. It is added automatically if 8021q module is loaded,
672 * but we can't rely on it since module may be not loaded.
673 */
674 rc = efx_ef10_add_vlan(efx, 0);
675 if (rc)
676 goto fail_add_vid_0;
677
Olivier Deprez157378f2022-04-04 15:47:50 +0200678 if (nic_data->datapath_caps &
679 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) &&
680 efx->mcdi->fn_flags &
681 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED))
682 efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels;
683
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 return 0;
685
686fail_add_vid_0:
687 efx_ef10_cleanup_vlans(efx);
688fail_add_vid_unspec:
689 mutex_destroy(&nic_data->vlan_lock);
690 efx_ptp_remove(efx);
691 efx_mcdi_mon_remove(efx);
692fail5:
693 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
694fail4:
695 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
696fail3:
697 efx_mcdi_detach(efx);
698
699 mutex_lock(&nic_data->udp_tunnels_lock);
700 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
701 (void)efx_ef10_set_udp_tnl_ports(efx, true);
702 mutex_unlock(&nic_data->udp_tunnels_lock);
703 mutex_destroy(&nic_data->udp_tunnels_lock);
704
705 efx_mcdi_fini(efx);
706fail2:
707 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
708fail1:
709 kfree(nic_data);
710 efx->nic_data = NULL;
711 return rc;
712}
713
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714#ifdef EFX_USE_PIO
715
716static void efx_ef10_free_piobufs(struct efx_nic *efx)
717{
718 struct efx_ef10_nic_data *nic_data = efx->nic_data;
719 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
720 unsigned int i;
721 int rc;
722
723 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
724
725 for (i = 0; i < nic_data->n_piobufs; i++) {
726 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
727 nic_data->piobuf_handle[i]);
728 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
729 NULL, 0, NULL);
730 WARN_ON(rc);
731 }
732
733 nic_data->n_piobufs = 0;
734}
735
736static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
737{
738 struct efx_ef10_nic_data *nic_data = efx->nic_data;
739 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
740 unsigned int i;
741 size_t outlen;
742 int rc = 0;
743
744 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
745
746 for (i = 0; i < n; i++) {
747 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
748 outbuf, sizeof(outbuf), &outlen);
749 if (rc) {
750 /* Don't display the MC error if we didn't have space
751 * for a VF.
752 */
753 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
754 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
755 0, outbuf, outlen, rc);
756 break;
757 }
758 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
759 rc = -EIO;
760 break;
761 }
762 nic_data->piobuf_handle[i] =
763 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
764 netif_dbg(efx, probe, efx->net_dev,
765 "allocated PIO buffer %u handle %x\n", i,
766 nic_data->piobuf_handle[i]);
767 }
768
769 nic_data->n_piobufs = i;
770 if (rc)
771 efx_ef10_free_piobufs(efx);
772 return rc;
773}
774
775static int efx_ef10_link_piobufs(struct efx_nic *efx)
776{
777 struct efx_ef10_nic_data *nic_data = efx->nic_data;
778 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
779 struct efx_channel *channel;
780 struct efx_tx_queue *tx_queue;
781 unsigned int offset, index;
782 int rc;
783
784 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
785 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
786
787 /* Link a buffer to each VI in the write-combining mapping */
788 for (index = 0; index < nic_data->n_piobufs; ++index) {
789 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
790 nic_data->piobuf_handle[index]);
791 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
792 nic_data->pio_write_vi_base + index);
793 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
794 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
795 NULL, 0, NULL);
796 if (rc) {
797 netif_err(efx, drv, efx->net_dev,
798 "failed to link VI %u to PIO buffer %u (%d)\n",
799 nic_data->pio_write_vi_base + index, index,
800 rc);
801 goto fail;
802 }
803 netif_dbg(efx, probe, efx->net_dev,
804 "linked VI %u to PIO buffer %u\n",
805 nic_data->pio_write_vi_base + index, index);
806 }
807
808 /* Link a buffer to each TX queue */
809 efx_for_each_channel(channel, efx) {
810 /* Extra channels, even those with TXQs (PTP), do not require
811 * PIO resources.
812 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200813 if (!channel->type->want_pio ||
814 channel->channel >= efx->xdp_channel_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000815 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200816
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000817 efx_for_each_channel_tx_queue(tx_queue, channel) {
818 /* We assign the PIO buffers to queues in
819 * reverse order to allow for the following
820 * special case.
821 */
822 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
823 tx_queue->channel->channel - 1) *
824 efx_piobuf_size);
825 index = offset / nic_data->piobuf_size;
826 offset = offset % nic_data->piobuf_size;
827
828 /* When the host page size is 4K, the first
829 * host page in the WC mapping may be within
830 * the same VI page as the last TX queue. We
831 * can only link one buffer to each VI.
832 */
833 if (tx_queue->queue == nic_data->pio_write_vi_base) {
834 BUG_ON(index != 0);
835 rc = 0;
836 } else {
837 MCDI_SET_DWORD(inbuf,
838 LINK_PIOBUF_IN_PIOBUF_HANDLE,
839 nic_data->piobuf_handle[index]);
840 MCDI_SET_DWORD(inbuf,
841 LINK_PIOBUF_IN_TXQ_INSTANCE,
842 tx_queue->queue);
843 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
844 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
845 NULL, 0, NULL);
846 }
847
848 if (rc) {
849 /* This is non-fatal; the TX path just
850 * won't use PIO for this queue
851 */
852 netif_err(efx, drv, efx->net_dev,
853 "failed to link VI %u to PIO buffer %u (%d)\n",
854 tx_queue->queue, index, rc);
855 tx_queue->piobuf = NULL;
856 } else {
857 tx_queue->piobuf =
858 nic_data->pio_write_base +
859 index * efx->vi_stride + offset;
860 tx_queue->piobuf_offset = offset;
861 netif_dbg(efx, probe, efx->net_dev,
862 "linked VI %u to PIO buffer %u offset %x addr %p\n",
863 tx_queue->queue, index,
864 tx_queue->piobuf_offset,
865 tx_queue->piobuf);
866 }
867 }
868 }
869
870 return 0;
871
872fail:
873 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
874 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
875 */
876 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
877 while (index--) {
878 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
879 nic_data->pio_write_vi_base + index);
880 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
881 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
882 NULL, 0, NULL);
883 }
884 return rc;
885}
886
887static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
888{
889 struct efx_channel *channel;
890 struct efx_tx_queue *tx_queue;
891
892 /* All our existing PIO buffers went away */
893 efx_for_each_channel(channel, efx)
894 efx_for_each_channel_tx_queue(tx_queue, channel)
895 tx_queue->piobuf = NULL;
896}
897
898#else /* !EFX_USE_PIO */
899
900static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
901{
902 return n == 0 ? 0 : -ENOBUFS;
903}
904
905static int efx_ef10_link_piobufs(struct efx_nic *efx)
906{
907 return 0;
908}
909
910static void efx_ef10_free_piobufs(struct efx_nic *efx)
911{
912}
913
914static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
915{
916}
917
918#endif /* EFX_USE_PIO */
919
920static void efx_ef10_remove(struct efx_nic *efx)
921{
922 struct efx_ef10_nic_data *nic_data = efx->nic_data;
923 int rc;
924
925#ifdef CONFIG_SFC_SRIOV
926 struct efx_ef10_nic_data *nic_data_pf;
927 struct pci_dev *pci_dev_pf;
928 struct efx_nic *efx_pf;
929 struct ef10_vf *vf;
930
931 if (efx->pci_dev->is_virtfn) {
932 pci_dev_pf = efx->pci_dev->physfn;
933 if (pci_dev_pf) {
934 efx_pf = pci_get_drvdata(pci_dev_pf);
935 nic_data_pf = efx_pf->nic_data;
936 vf = nic_data_pf->vf + nic_data->vf_index;
937 vf->efx = NULL;
938 } else
939 netif_info(efx, drv, efx->net_dev,
940 "Could not get the PF id from VF\n");
941 }
942#endif
943
944 efx_ef10_cleanup_vlans(efx);
945 mutex_destroy(&nic_data->vlan_lock);
946
947 efx_ptp_remove(efx);
948
949 efx_mcdi_mon_remove(efx);
950
Olivier Deprez157378f2022-04-04 15:47:50 +0200951 efx_mcdi_rx_free_indir_table(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000952
953 if (nic_data->wc_membase)
954 iounmap(nic_data->wc_membase);
955
Olivier Deprez157378f2022-04-04 15:47:50 +0200956 rc = efx_mcdi_free_vis(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000957 WARN_ON(rc != 0);
958
959 if (!nic_data->must_restore_piobufs)
960 efx_ef10_free_piobufs(efx);
961
962 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
963 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
964
965 efx_mcdi_detach(efx);
966
967 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
968 mutex_lock(&nic_data->udp_tunnels_lock);
969 (void)efx_ef10_set_udp_tnl_ports(efx, true);
970 mutex_unlock(&nic_data->udp_tunnels_lock);
971
972 mutex_destroy(&nic_data->udp_tunnels_lock);
973
974 efx_mcdi_fini(efx);
975 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
976 kfree(nic_data);
977}
978
979static int efx_ef10_probe_pf(struct efx_nic *efx)
980{
981 return efx_ef10_probe(efx);
982}
983
984int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
985 u32 *port_flags, u32 *vadaptor_flags,
986 unsigned int *vlan_tags)
987{
988 struct efx_ef10_nic_data *nic_data = efx->nic_data;
989 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
990 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
991 size_t outlen;
992 int rc;
993
994 if (nic_data->datapath_caps &
995 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
996 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
997 port_id);
998
999 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1000 outbuf, sizeof(outbuf), &outlen);
1001 if (rc)
1002 return rc;
1003
1004 if (outlen < sizeof(outbuf)) {
1005 rc = -EIO;
1006 return rc;
1007 }
1008 }
1009
1010 if (port_flags)
1011 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1012 if (vadaptor_flags)
1013 *vadaptor_flags =
1014 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1015 if (vlan_tags)
1016 *vlan_tags =
1017 MCDI_DWORD(outbuf,
1018 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1019
1020 return 0;
1021}
1022
1023int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1024{
1025 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1026
1027 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1028 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1029 NULL, 0, NULL);
1030}
1031
1032int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1033{
1034 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1035
1036 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1037 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1038 NULL, 0, NULL);
1039}
1040
1041int efx_ef10_vport_add_mac(struct efx_nic *efx,
1042 unsigned int port_id, u8 *mac)
1043{
1044 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1045
1046 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1047 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1048
1049 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1050 sizeof(inbuf), NULL, 0, NULL);
1051}
1052
1053int efx_ef10_vport_del_mac(struct efx_nic *efx,
1054 unsigned int port_id, u8 *mac)
1055{
1056 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1057
1058 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1059 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1060
1061 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1062 sizeof(inbuf), NULL, 0, NULL);
1063}
1064
1065#ifdef CONFIG_SFC_SRIOV
1066static int efx_ef10_probe_vf(struct efx_nic *efx)
1067{
1068 int rc;
1069 struct pci_dev *pci_dev_pf;
1070
1071 /* If the parent PF has no VF data structure, it doesn't know about this
1072 * VF so fail probe. The VF needs to be re-created. This can happen
1073 * if the PF driver is unloaded while the VF is assigned to a guest.
1074 */
1075 pci_dev_pf = efx->pci_dev->physfn;
1076 if (pci_dev_pf) {
1077 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1078 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1079
1080 if (!nic_data_pf->vf) {
1081 netif_info(efx, drv, efx->net_dev,
1082 "The VF cannot link to its parent PF; "
1083 "please destroy and re-create the VF\n");
1084 return -EBUSY;
1085 }
1086 }
1087
1088 rc = efx_ef10_probe(efx);
1089 if (rc)
1090 return rc;
1091
1092 rc = efx_ef10_get_vf_index(efx);
1093 if (rc)
1094 goto fail;
1095
1096 if (efx->pci_dev->is_virtfn) {
1097 if (efx->pci_dev->physfn) {
1098 struct efx_nic *efx_pf =
1099 pci_get_drvdata(efx->pci_dev->physfn);
1100 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1101 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1102
1103 nic_data_p->vf[nic_data->vf_index].efx = efx;
1104 nic_data_p->vf[nic_data->vf_index].pci_dev =
1105 efx->pci_dev;
1106 } else
1107 netif_info(efx, drv, efx->net_dev,
1108 "Could not get the PF id from VF\n");
1109 }
1110
1111 return 0;
1112
1113fail:
1114 efx_ef10_remove(efx);
1115 return rc;
1116}
1117#else
1118static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1119{
1120 return 0;
1121}
1122#endif
1123
1124static int efx_ef10_alloc_vis(struct efx_nic *efx,
1125 unsigned int min_vis, unsigned int max_vis)
1126{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001127 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001128
Olivier Deprez157378f2022-04-04 15:47:50 +02001129 return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
1130 &nic_data->n_allocated_vis);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001131}
1132
1133/* Note that the failure path of this function does not free
1134 * resources, as this will be done by efx_ef10_remove().
1135 */
1136static int efx_ef10_dimension_resources(struct efx_nic *efx)
1137{
Olivier Deprez157378f2022-04-04 15:47:50 +02001138 unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel,
1139 efx_separate_tx_channels ? 2 : 1);
1140 unsigned int channel_vis, pio_write_vi_base, max_vis;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1142 unsigned int uc_mem_map_size, wc_mem_map_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001143 void __iomem *membase;
1144 int rc;
1145
1146 channel_vis = max(efx->n_channels,
Olivier Deprez157378f2022-04-04 15:47:50 +02001147 ((efx->n_tx_channels + efx->n_extra_tx_channels) *
1148 efx->tx_queues_per_channel) +
1149 efx->n_xdp_channels * efx->xdp_tx_per_channel);
1150 if (efx->max_vis && efx->max_vis < channel_vis) {
1151 netif_dbg(efx, drv, efx->net_dev,
1152 "Reducing channel VIs from %u to %u\n",
1153 channel_vis, efx->max_vis);
1154 channel_vis = efx->max_vis;
1155 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001156
1157#ifdef EFX_USE_PIO
1158 /* Try to allocate PIO buffers if wanted and if the full
1159 * number of PIO buffers would be sufficient to allocate one
1160 * copy-buffer per TX channel. Failure is non-fatal, as there
1161 * are only a small number of PIO buffers shared between all
1162 * functions of the controller.
1163 */
1164 if (efx_piobuf_size != 0 &&
1165 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1166 efx->n_tx_channels) {
1167 unsigned int n_piobufs =
1168 DIV_ROUND_UP(efx->n_tx_channels,
1169 nic_data->piobuf_size / efx_piobuf_size);
1170
1171 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1172 if (rc == -ENOSPC)
1173 netif_dbg(efx, probe, efx->net_dev,
1174 "out of PIO buffers; cannot allocate more\n");
1175 else if (rc == -EPERM)
1176 netif_dbg(efx, probe, efx->net_dev,
1177 "not permitted to allocate PIO buffers\n");
1178 else if (rc)
1179 netif_err(efx, probe, efx->net_dev,
1180 "failed to allocate PIO buffers (%d)\n", rc);
1181 else
1182 netif_dbg(efx, probe, efx->net_dev,
1183 "allocated %u PIO buffers\n", n_piobufs);
1184 }
1185#else
1186 nic_data->n_piobufs = 0;
1187#endif
1188
1189 /* PIO buffers should be mapped with write-combining enabled,
1190 * and we want to make single UC and WC mappings rather than
1191 * several of each (in fact that's the only option if host
1192 * page size is >4K). So we may allocate some extra VIs just
1193 * for writing PIO buffers through.
1194 *
1195 * The UC mapping contains (channel_vis - 1) complete VIs and the
1196 * first 4K of the next VI. Then the WC mapping begins with
1197 * the remainder of this last VI.
1198 */
1199 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
1200 ER_DZ_TX_PIOBUF);
1201 if (nic_data->n_piobufs) {
1202 /* pio_write_vi_base rounds down to give the number of complete
1203 * VIs inside the UC mapping.
1204 */
1205 pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
1206 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1207 nic_data->n_piobufs) *
1208 efx->vi_stride) -
1209 uc_mem_map_size);
1210 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1211 } else {
1212 pio_write_vi_base = 0;
1213 wc_mem_map_size = 0;
1214 max_vis = channel_vis;
1215 }
1216
1217 /* In case the last attached driver failed to free VIs, do it now */
Olivier Deprez157378f2022-04-04 15:47:50 +02001218 rc = efx_mcdi_free_vis(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001219 if (rc != 0)
1220 return rc;
1221
1222 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1223 if (rc != 0)
1224 return rc;
1225
1226 if (nic_data->n_allocated_vis < channel_vis) {
1227 netif_info(efx, drv, efx->net_dev,
1228 "Could not allocate enough VIs to satisfy RSS"
1229 " requirements. Performance may not be optimal.\n");
1230 /* We didn't get the VIs to populate our channels.
1231 * We could keep what we got but then we'd have more
1232 * interrupts than we need.
1233 * Instead calculate new max_channels and restart
1234 */
1235 efx->max_channels = nic_data->n_allocated_vis;
1236 efx->max_tx_channels =
Olivier Deprez157378f2022-04-04 15:47:50 +02001237 nic_data->n_allocated_vis / efx->tx_queues_per_channel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001238
Olivier Deprez157378f2022-04-04 15:47:50 +02001239 efx_mcdi_free_vis(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240 return -EAGAIN;
1241 }
1242
1243 /* If we didn't get enough VIs to map all the PIO buffers, free the
1244 * PIO buffers
1245 */
1246 if (nic_data->n_piobufs &&
1247 nic_data->n_allocated_vis <
1248 pio_write_vi_base + nic_data->n_piobufs) {
1249 netif_dbg(efx, probe, efx->net_dev,
1250 "%u VIs are not sufficient to map %u PIO buffers\n",
1251 nic_data->n_allocated_vis, nic_data->n_piobufs);
1252 efx_ef10_free_piobufs(efx);
1253 }
1254
1255 /* Shrink the original UC mapping of the memory BAR */
Olivier Deprez157378f2022-04-04 15:47:50 +02001256 membase = ioremap(efx->membase_phys, uc_mem_map_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257 if (!membase) {
1258 netif_err(efx, probe, efx->net_dev,
1259 "could not shrink memory BAR to %x\n",
1260 uc_mem_map_size);
1261 return -ENOMEM;
1262 }
1263 iounmap(efx->membase);
1264 efx->membase = membase;
1265
1266 /* Set up the WC mapping if needed */
1267 if (wc_mem_map_size) {
1268 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1269 uc_mem_map_size,
1270 wc_mem_map_size);
1271 if (!nic_data->wc_membase) {
1272 netif_err(efx, probe, efx->net_dev,
1273 "could not allocate WC mapping of size %x\n",
1274 wc_mem_map_size);
1275 return -ENOMEM;
1276 }
1277 nic_data->pio_write_vi_base = pio_write_vi_base;
1278 nic_data->pio_write_base =
1279 nic_data->wc_membase +
1280 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
1281 uc_mem_map_size);
1282
1283 rc = efx_ef10_link_piobufs(efx);
1284 if (rc)
1285 efx_ef10_free_piobufs(efx);
1286 }
1287
1288 netif_dbg(efx, probe, efx->net_dev,
1289 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1290 &efx->membase_phys, efx->membase, uc_mem_map_size,
1291 nic_data->wc_membase, wc_mem_map_size);
1292
1293 return 0;
1294}
1295
Olivier Deprez157378f2022-04-04 15:47:50 +02001296static void efx_ef10_fini_nic(struct efx_nic *efx)
1297{
1298 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1299
1300 kfree(nic_data->mc_stats);
1301 nic_data->mc_stats = NULL;
1302}
1303
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001304static int efx_ef10_init_nic(struct efx_nic *efx)
1305{
1306 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02001307 netdev_features_t hw_enc_features = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 int rc;
1309
1310 if (nic_data->must_check_datapath_caps) {
1311 rc = efx_ef10_init_datapath_caps(efx);
1312 if (rc)
1313 return rc;
1314 nic_data->must_check_datapath_caps = false;
1315 }
1316
Olivier Deprez157378f2022-04-04 15:47:50 +02001317 if (efx->must_realloc_vis) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001318 /* We cannot let the number of VIs change now */
1319 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1320 nic_data->n_allocated_vis);
1321 if (rc)
1322 return rc;
Olivier Deprez157378f2022-04-04 15:47:50 +02001323 efx->must_realloc_vis = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324 }
1325
Olivier Deprez157378f2022-04-04 15:47:50 +02001326 nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64),
1327 GFP_KERNEL);
1328 if (!nic_data->mc_stats)
1329 return -ENOMEM;
1330
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1332 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1333 if (rc == 0) {
1334 rc = efx_ef10_link_piobufs(efx);
1335 if (rc)
1336 efx_ef10_free_piobufs(efx);
1337 }
1338
1339 /* Log an error on failure, but this is non-fatal.
1340 * Permission errors are less important - we've presumably
1341 * had the PIO buffer licence removed.
1342 */
1343 if (rc == -EPERM)
1344 netif_dbg(efx, drv, efx->net_dev,
1345 "not permitted to restore PIO buffers\n");
1346 else if (rc)
1347 netif_err(efx, drv, efx->net_dev,
1348 "failed to restore PIO buffers (%d)\n", rc);
1349 nic_data->must_restore_piobufs = false;
1350 }
1351
Olivier Deprez157378f2022-04-04 15:47:50 +02001352 /* add encapsulated checksum offload features */
1353 if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
1354 hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1355 /* add encapsulated TSO features */
1356 if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
1357 netdev_features_t encap_tso_features;
1358
1359 encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
1360 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
1361
1362 hw_enc_features |= encap_tso_features | NETIF_F_TSO;
1363 efx->net_dev->features |= encap_tso_features;
1364 }
1365 efx->net_dev->hw_enc_features = hw_enc_features;
1366
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001367 /* don't fail init if RSS setup doesn't work */
1368 rc = efx->type->rx_push_rss_config(efx, false,
1369 efx->rss_context.rx_indir_table, NULL);
1370
1371 return 0;
1372}
1373
Olivier Deprez157378f2022-04-04 15:47:50 +02001374static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375{
1376 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1377#ifdef CONFIG_SFC_SRIOV
1378 unsigned int i;
1379#endif
1380
1381 /* All our allocations have been reset */
Olivier Deprez157378f2022-04-04 15:47:50 +02001382 efx->must_realloc_vis = true;
1383 efx_mcdi_filter_table_reset_mc_allocations(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001384 nic_data->must_restore_piobufs = true;
1385 efx_ef10_forget_old_piobufs(efx);
Olivier Deprez157378f2022-04-04 15:47:50 +02001386 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001387
1388 /* Driver-created vswitches and vports must be re-created */
1389 nic_data->must_probe_vswitching = true;
Olivier Deprez157378f2022-04-04 15:47:50 +02001390 efx->vport_id = EVB_PORT_ID_ASSIGNED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001391#ifdef CONFIG_SFC_SRIOV
1392 if (nic_data->vf)
1393 for (i = 0; i < efx->vf_count; i++)
1394 nic_data->vf[i].vport_id = 0;
1395#endif
1396}
1397
1398static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1399{
1400 if (reason == RESET_TYPE_MC_FAILURE)
1401 return RESET_TYPE_DATAPATH;
1402
1403 return efx_mcdi_map_reset_reason(reason);
1404}
1405
1406static int efx_ef10_map_reset_flags(u32 *flags)
1407{
1408 enum {
1409 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1410 ETH_RESET_SHARED_SHIFT),
1411 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1412 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1413 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1414 ETH_RESET_SHARED_SHIFT)
1415 };
1416
1417 /* We assume for now that our PCI function is permitted to
1418 * reset everything.
1419 */
1420
1421 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1422 *flags &= ~EF10_RESET_MC;
1423 return RESET_TYPE_WORLD;
1424 }
1425
1426 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1427 *flags &= ~EF10_RESET_PORT;
1428 return RESET_TYPE_ALL;
1429 }
1430
1431 /* no invisible reset implemented */
1432
1433 return -EINVAL;
1434}
1435
1436static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1437{
1438 int rc = efx_mcdi_reset(efx, reset_type);
1439
1440 /* Unprivileged functions return -EPERM, but need to return success
1441 * here so that the datapath is brought back up.
1442 */
1443 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1444 rc = 0;
1445
1446 /* If it was a port reset, trigger reallocation of MC resources.
1447 * Note that on an MC reset nothing needs to be done now because we'll
1448 * detect the MC reset later and handle it then.
1449 * For an FLR, we never get an MC reset event, but the MC has reset all
1450 * resources assigned to us, so we have to trigger reallocation now.
1451 */
1452 if ((reset_type == RESET_TYPE_ALL ||
1453 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
Olivier Deprez157378f2022-04-04 15:47:50 +02001454 efx_ef10_table_reset_mc_allocations(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001455 return rc;
1456}
1457
1458#define EF10_DMA_STAT(ext_name, mcdi_name) \
1459 [EF10_STAT_ ## ext_name] = \
1460 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1461#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1462 [EF10_STAT_ ## int_name] = \
1463 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1464#define EF10_OTHER_STAT(ext_name) \
1465 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001466
1467static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1468 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1469 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1470 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1471 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1472 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1473 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1474 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1475 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1476 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1477 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1478 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1479 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1480 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1481 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1482 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1483 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1484 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1485 EF10_OTHER_STAT(port_rx_good_bytes),
1486 EF10_OTHER_STAT(port_rx_bad_bytes),
1487 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1488 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1489 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1490 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1491 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1492 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1493 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1494 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1495 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1496 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1497 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1498 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1499 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1500 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1501 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1502 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1503 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1504 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1505 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1506 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1507 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1508 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
Olivier Deprez157378f2022-04-04 15:47:50 +02001509 EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
1510 EFX_GENERIC_SW_STAT(rx_noskb_drops),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001511 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1512 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1513 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1514 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1515 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1516 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1517 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1518 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1519 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1520 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1521 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1522 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1523 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1524 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1525 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1526 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1527 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1528 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1529 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1530 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1531 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1532 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1533 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1534 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1535 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1536 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1537 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1538 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1539 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1540 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1541 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1542 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1543 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1544 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1545 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1546 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
1547 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1548 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1549 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1550 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1551 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1552 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1553 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1554 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1555 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1556 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1557 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1558 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1559 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1560 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1561 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1562 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
1563};
1564
1565#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1566 (1ULL << EF10_STAT_port_tx_packets) | \
1567 (1ULL << EF10_STAT_port_tx_pause) | \
1568 (1ULL << EF10_STAT_port_tx_unicast) | \
1569 (1ULL << EF10_STAT_port_tx_multicast) | \
1570 (1ULL << EF10_STAT_port_tx_broadcast) | \
1571 (1ULL << EF10_STAT_port_rx_bytes) | \
1572 (1ULL << \
1573 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1574 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1575 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1576 (1ULL << EF10_STAT_port_rx_packets) | \
1577 (1ULL << EF10_STAT_port_rx_good) | \
1578 (1ULL << EF10_STAT_port_rx_bad) | \
1579 (1ULL << EF10_STAT_port_rx_pause) | \
1580 (1ULL << EF10_STAT_port_rx_control) | \
1581 (1ULL << EF10_STAT_port_rx_unicast) | \
1582 (1ULL << EF10_STAT_port_rx_multicast) | \
1583 (1ULL << EF10_STAT_port_rx_broadcast) | \
1584 (1ULL << EF10_STAT_port_rx_lt64) | \
1585 (1ULL << EF10_STAT_port_rx_64) | \
1586 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1587 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1588 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1589 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1590 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1591 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1592 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1593 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1594 (1ULL << EF10_STAT_port_rx_overflow) | \
1595 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1596 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1597 (1ULL << GENERIC_STAT_rx_noskb_drops))
1598
1599/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1600 * For a 10G/40G switchable port we do not expose these because they might
1601 * not include all the packets they should.
1602 * On 8000 series NICs these statistics are always provided.
1603 */
1604#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1605 (1ULL << EF10_STAT_port_tx_lt64) | \
1606 (1ULL << EF10_STAT_port_tx_64) | \
1607 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1608 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1609 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1610 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1611 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1612 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1613
1614/* These statistics are only provided by the 40G MAC. For a 10G/40G
1615 * switchable port we do expose these because the errors will otherwise
1616 * be silent.
1617 */
1618#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1619 (1ULL << EF10_STAT_port_rx_length_error))
1620
1621/* These statistics are only provided if the firmware supports the
1622 * capability PM_AND_RXDP_COUNTERS.
1623 */
1624#define HUNT_PM_AND_RXDP_STAT_MASK ( \
1625 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1626 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1627 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1628 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1629 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1630 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1631 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1632 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1633 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1634 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1635 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1636 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1637
1638/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1639 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1640 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1641 * These bits are in the second u64 of the raw mask.
1642 */
1643#define EF10_FEC_STAT_MASK ( \
1644 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
1645 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
1646 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
1647 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
1648 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
1649 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1650
1651/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1652 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1653 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1654 * These bits are in the second u64 of the raw mask.
1655 */
1656#define EF10_CTPIO_STAT_MASK ( \
1657 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
1658 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
1659 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
1660 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
1661 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
1662 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
1663 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
1664 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
1665 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
1666 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
1667 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
1668 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
1669 (1ULL << (EF10_STAT_ctpio_success - 64)) | \
1670 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
1671 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
1672 (1ULL << (EF10_STAT_ctpio_erase - 64)))
1673
1674static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1675{
1676 u64 raw_mask = HUNT_COMMON_STAT_MASK;
1677 u32 port_caps = efx_mcdi_phy_get_caps(efx);
1678 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1679
1680 if (!(efx->mcdi->fn_flags &
1681 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1682 return 0;
1683
1684 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
1685 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1686 /* 8000 series have everything even at 40G */
1687 if (nic_data->datapath_caps2 &
1688 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1689 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1690 } else {
1691 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1692 }
1693
1694 if (nic_data->datapath_caps &
1695 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1696 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1697
1698 return raw_mask;
1699}
1700
1701static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1702{
1703 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1704 u64 raw_mask[2];
1705
1706 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1707
1708 /* Only show vadaptor stats when EVB capability is present */
1709 if (nic_data->datapath_caps &
1710 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1711 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1712 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
1713 } else {
1714 raw_mask[1] = 0;
1715 }
1716 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1717 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1718 raw_mask[1] |= EF10_FEC_STAT_MASK;
1719
1720 /* CTPIO stats appear in V3. Only show them on devices that actually
1721 * support CTPIO. Although this driver doesn't use CTPIO others might,
1722 * and we may be reporting the stats for the underlying port.
1723 */
1724 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1725 (nic_data->datapath_caps2 &
1726 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1727 raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1728
1729#if BITS_PER_LONG == 64
1730 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1731 mask[0] = raw_mask[0];
1732 mask[1] = raw_mask[1];
1733#else
1734 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1735 mask[0] = raw_mask[0] & 0xffffffff;
1736 mask[1] = raw_mask[0] >> 32;
1737 mask[2] = raw_mask[1] & 0xffffffff;
1738#endif
1739}
1740
1741static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1742{
1743 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1744
1745 efx_ef10_get_stat_mask(efx, mask);
1746 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1747 mask, names);
1748}
1749
1750static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1751 struct rtnl_link_stats64 *core_stats)
1752{
1753 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1754 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1755 u64 *stats = nic_data->stats;
1756 size_t stats_count = 0, index;
1757
1758 efx_ef10_get_stat_mask(efx, mask);
1759
1760 if (full_stats) {
1761 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1762 if (efx_ef10_stat_desc[index].name) {
1763 *full_stats++ = stats[index];
1764 ++stats_count;
1765 }
1766 }
1767 }
1768
1769 if (!core_stats)
1770 return stats_count;
1771
1772 if (nic_data->datapath_caps &
1773 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1774 /* Use vadaptor stats. */
1775 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1776 stats[EF10_STAT_rx_multicast] +
1777 stats[EF10_STAT_rx_broadcast];
1778 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1779 stats[EF10_STAT_tx_multicast] +
1780 stats[EF10_STAT_tx_broadcast];
1781 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1782 stats[EF10_STAT_rx_multicast_bytes] +
1783 stats[EF10_STAT_rx_broadcast_bytes];
1784 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1785 stats[EF10_STAT_tx_multicast_bytes] +
1786 stats[EF10_STAT_tx_broadcast_bytes];
1787 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1788 stats[GENERIC_STAT_rx_noskb_drops];
1789 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1790 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1791 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1792 core_stats->rx_errors = core_stats->rx_crc_errors;
1793 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1794 } else {
1795 /* Use port stats. */
1796 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1797 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1798 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1799 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1800 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1801 stats[GENERIC_STAT_rx_nodesc_trunc] +
1802 stats[GENERIC_STAT_rx_noskb_drops];
1803 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1804 core_stats->rx_length_errors =
1805 stats[EF10_STAT_port_rx_gtjumbo] +
1806 stats[EF10_STAT_port_rx_length_error];
1807 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1808 core_stats->rx_frame_errors =
1809 stats[EF10_STAT_port_rx_align_error];
1810 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1811 core_stats->rx_errors = (core_stats->rx_length_errors +
1812 core_stats->rx_crc_errors +
1813 core_stats->rx_frame_errors);
1814 }
1815
1816 return stats_count;
1817}
1818
Olivier Deprez157378f2022-04-04 15:47:50 +02001819static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1820 struct rtnl_link_stats64 *core_stats)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001821{
1822 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1823 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001824 u64 *stats = nic_data->stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001825
1826 efx_ef10_get_stat_mask(efx, mask);
1827
Olivier Deprez157378f2022-04-04 15:47:50 +02001828 efx_nic_copy_stats(efx, nic_data->mc_stats);
1829 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1830 mask, stats, nic_data->mc_stats, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001831
1832 /* Update derived statistics */
1833 efx_nic_fix_nodesc_drop_stat(efx,
1834 &stats[EF10_STAT_port_rx_nodesc_drops]);
Olivier Deprez157378f2022-04-04 15:47:50 +02001835 /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC.
1836 * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES.
1837 * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES.
1838 * Here we calculate port_rx_good_bytes.
1839 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001840 stats[EF10_STAT_port_rx_good_bytes] =
1841 stats[EF10_STAT_port_rx_bytes] -
1842 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
Olivier Deprez157378f2022-04-04 15:47:50 +02001843
1844 /* The asynchronous reads used to calculate RX_BAD_BYTES in
1845 * MC Firmware are done such that we should not see an increase in
1846 * RX_BAD_BYTES when a good packet has arrived. Unfortunately this
1847 * does mean that the stat can decrease at times. Here we do not
1848 * update the stat unless it has increased or has gone to zero
1849 * (In the case of the NIC rebooting).
1850 * Please see Bug 33781 for a discussion of why things work this way.
1851 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001852 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1853 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1854 efx_update_sw_stats(efx, stats);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001855
1856 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1857}
1858
1859static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
Olivier Deprez157378f2022-04-04 15:47:50 +02001860 __must_hold(&efx->stats_lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001861{
1862 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1863 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1864 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1865 __le64 generation_start, generation_end;
1866 u64 *stats = nic_data->stats;
1867 u32 dma_len = efx->num_mac_stats * sizeof(u64);
1868 struct efx_buffer stats_buf;
1869 __le64 *dma_stats;
1870 int rc;
1871
1872 spin_unlock_bh(&efx->stats_lock);
1873
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001874 efx_ef10_get_stat_mask(efx, mask);
1875
Olivier Deprez157378f2022-04-04 15:47:50 +02001876 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001877 if (rc) {
1878 spin_lock_bh(&efx->stats_lock);
1879 return rc;
1880 }
1881
1882 dma_stats = stats_buf.addr;
1883 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
1884
1885 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1886 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
1887 MAC_STATS_IN_DMA, 1);
1888 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1889 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1890
1891 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1892 NULL, 0, NULL);
1893 spin_lock_bh(&efx->stats_lock);
1894 if (rc) {
1895 /* Expect ENOENT if DMA queues have not been set up */
1896 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1897 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1898 sizeof(inbuf), NULL, 0, rc);
1899 goto out;
1900 }
1901
1902 generation_end = dma_stats[efx->num_mac_stats - 1];
1903 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1904 WARN_ON_ONCE(1);
1905 goto out;
1906 }
1907 rmb();
1908 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1909 stats, stats_buf.addr, false);
1910 rmb();
1911 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1912 if (generation_end != generation_start) {
1913 rc = -EAGAIN;
1914 goto out;
1915 }
1916
1917 efx_update_sw_stats(efx, stats);
1918out:
1919 efx_nic_free_buffer(efx, &stats_buf);
1920 return rc;
1921}
1922
1923static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1924 struct rtnl_link_stats64 *core_stats)
1925{
1926 if (efx_ef10_try_update_nic_stats_vf(efx))
1927 return 0;
1928
1929 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1930}
1931
Olivier Deprez157378f2022-04-04 15:47:50 +02001932static size_t efx_ef10_update_stats_atomic_vf(struct efx_nic *efx, u64 *full_stats,
1933 struct rtnl_link_stats64 *core_stats)
1934{
1935 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1936
1937 /* In atomic context, cannot update HW stats. Just update the
1938 * software stats and return so the caller can continue.
1939 */
1940 efx_update_sw_stats(efx, nic_data->stats);
1941 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1942}
1943
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001944static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1945{
1946 struct efx_nic *efx = channel->efx;
1947 unsigned int mode, usecs;
1948 efx_dword_t timer_cmd;
1949
1950 if (channel->irq_moderation_us) {
1951 mode = 3;
1952 usecs = channel->irq_moderation_us;
1953 } else {
1954 mode = 0;
1955 usecs = 0;
1956 }
1957
1958 if (EFX_EF10_WORKAROUND_61265(efx)) {
1959 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
1960 unsigned int ns = usecs * 1000;
1961
1962 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
1963 channel->channel);
1964 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
1965 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
1966 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
1967
1968 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
1969 inbuf, sizeof(inbuf), 0, NULL, 0);
1970 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
1971 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1972
1973 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1974 EFE_DD_EVQ_IND_TIMER_FLAGS,
1975 ERF_DD_EVQ_IND_TIMER_MODE, mode,
1976 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
1977 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1978 channel->channel);
1979 } else {
1980 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1981
1982 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1983 ERF_DZ_TC_TIMER_VAL, ticks,
1984 ERF_FZ_TC_TMR_REL_VAL, ticks);
1985 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1986 channel->channel);
1987 }
1988}
1989
1990static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1991 struct ethtool_wolinfo *wol) {}
1992
1993static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1994{
1995 return -EOPNOTSUPP;
1996}
1997
1998static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1999{
2000 wol->supported = 0;
2001 wol->wolopts = 0;
2002 memset(&wol->sopass, 0, sizeof(wol->sopass));
2003}
2004
2005static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2006{
2007 if (type != 0)
2008 return -EINVAL;
2009 return 0;
2010}
2011
2012static void efx_ef10_mcdi_request(struct efx_nic *efx,
2013 const efx_dword_t *hdr, size_t hdr_len,
2014 const efx_dword_t *sdu, size_t sdu_len)
2015{
2016 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2017 u8 *pdu = nic_data->mcdi_buf.addr;
2018
2019 memcpy(pdu, hdr, hdr_len);
2020 memcpy(pdu + hdr_len, sdu, sdu_len);
2021 wmb();
2022
2023 /* The hardware provides 'low' and 'high' (doorbell) registers
2024 * for passing the 64-bit address of an MCDI request to
2025 * firmware. However the dwords are swapped by firmware. The
2026 * least significant bits of the doorbell are then 0 for all
2027 * MCDI requests due to alignment.
2028 */
2029 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2030 ER_DZ_MC_DB_LWRD);
2031 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2032 ER_DZ_MC_DB_HWRD);
2033}
2034
2035static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2036{
2037 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2038 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2039
2040 rmb();
2041 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2042}
2043
2044static void
2045efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2046 size_t offset, size_t outlen)
2047{
2048 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2049 const u8 *pdu = nic_data->mcdi_buf.addr;
2050
2051 memcpy(outbuf, pdu + offset, outlen);
2052}
2053
2054static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2055{
2056 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2057
2058 /* All our allocations have been reset */
Olivier Deprez157378f2022-04-04 15:47:50 +02002059 efx_ef10_table_reset_mc_allocations(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002060
2061 /* The datapath firmware might have been changed */
2062 nic_data->must_check_datapath_caps = true;
2063
2064 /* MAC statistics have been cleared on the NIC; clear the local
2065 * statistic that we update with efx_update_diff_stat().
2066 */
2067 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2068}
2069
2070static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2071{
2072 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2073 int rc;
2074
2075 rc = efx_ef10_get_warm_boot_count(efx);
2076 if (rc < 0) {
2077 /* The firmware is presumably in the process of
2078 * rebooting. However, we are supposed to report each
2079 * reboot just once, so we must only do that once we
2080 * can read and store the updated warm boot count.
2081 */
2082 return 0;
2083 }
2084
2085 if (rc == nic_data->warm_boot_count)
2086 return 0;
2087
2088 nic_data->warm_boot_count = rc;
2089 efx_ef10_mcdi_reboot_detected(efx);
2090
2091 return -EIO;
2092}
2093
2094/* Handle an MSI interrupt
2095 *
2096 * Handle an MSI hardware interrupt. This routine schedules event
2097 * queue processing. No interrupt acknowledgement cycle is necessary.
2098 * Also, we never need to check that the interrupt is for us, since
2099 * MSI interrupts cannot be shared.
2100 */
2101static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2102{
2103 struct efx_msi_context *context = dev_id;
2104 struct efx_nic *efx = context->efx;
2105
2106 netif_vdbg(efx, intr, efx->net_dev,
2107 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2108
2109 if (likely(READ_ONCE(efx->irq_soft_enabled))) {
2110 /* Note test interrupts */
2111 if (context->index == efx->irq_level)
2112 efx->last_irq_cpu = raw_smp_processor_id();
2113
2114 /* Schedule processing of the channel */
2115 efx_schedule_channel_irq(efx->channel[context->index]);
2116 }
2117
2118 return IRQ_HANDLED;
2119}
2120
2121static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2122{
2123 struct efx_nic *efx = dev_id;
2124 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
2125 struct efx_channel *channel;
2126 efx_dword_t reg;
2127 u32 queues;
2128
2129 /* Read the ISR which also ACKs the interrupts */
2130 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2131 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2132
2133 if (queues == 0)
2134 return IRQ_NONE;
2135
2136 if (likely(soft_enabled)) {
2137 /* Note test interrupts */
2138 if (queues & (1U << efx->irq_level))
2139 efx->last_irq_cpu = raw_smp_processor_id();
2140
2141 efx_for_each_channel(channel, efx) {
2142 if (queues & 1)
2143 efx_schedule_channel_irq(channel);
2144 queues >>= 1;
2145 }
2146 }
2147
2148 netif_vdbg(efx, intr, efx->net_dev,
2149 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2150 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2151
2152 return IRQ_HANDLED;
2153}
2154
2155static int efx_ef10_irq_test_generate(struct efx_nic *efx)
2156{
2157 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2158
2159 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2160 NULL) == 0)
2161 return -ENOTSUPP;
2162
2163 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2164
2165 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
2166 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
2167 inbuf, sizeof(inbuf), NULL, 0, NULL);
2168}
2169
2170static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2171{
Olivier Deprez157378f2022-04-04 15:47:50 +02002172 /* low two bits of label are what we want for type */
2173 BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3);
2174 tx_queue->type = tx_queue->label & 3;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002175 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2176 (tx_queue->ptr_mask + 1) *
2177 sizeof(efx_qword_t),
2178 GFP_KERNEL);
2179}
2180
2181/* This writes to the TX_DESC_WPTR and also pushes data */
2182static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2183 const efx_qword_t *txd)
2184{
2185 unsigned int write_ptr;
2186 efx_oword_t reg;
2187
2188 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2189 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2190 reg.qword[0] = *txd;
2191 efx_writeo_page(tx_queue->efx, &reg,
2192 ER_DZ_TX_DESC_UPD, tx_queue->queue);
2193}
2194
2195/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2196 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002197int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
2198 bool *data_mapped)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002199{
2200 struct efx_tx_buffer *buffer;
Olivier Deprez157378f2022-04-04 15:47:50 +02002201 u16 inner_ipv4_id = 0;
2202 u16 outer_ipv4_id = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002203 struct tcphdr *tcp;
2204 struct iphdr *ip;
Olivier Deprez157378f2022-04-04 15:47:50 +02002205 u16 ip_tot_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002206 u32 seqnum;
2207 u32 mss;
2208
2209 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
2210
2211 mss = skb_shinfo(skb)->gso_size;
2212
2213 if (unlikely(mss < 4)) {
2214 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2215 return -EINVAL;
2216 }
2217
Olivier Deprez157378f2022-04-04 15:47:50 +02002218 if (skb->encapsulation) {
2219 if (!tx_queue->tso_encap)
2220 return -EINVAL;
2221 ip = ip_hdr(skb);
2222 if (ip->version == 4)
2223 outer_ipv4_id = ntohs(ip->id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002224
Olivier Deprez157378f2022-04-04 15:47:50 +02002225 ip = inner_ip_hdr(skb);
2226 tcp = inner_tcp_hdr(skb);
2227 } else {
2228 ip = ip_hdr(skb);
2229 tcp = tcp_hdr(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002230 }
2231
Olivier Deprez157378f2022-04-04 15:47:50 +02002232 /* 8000-series EF10 hardware requires that IP Total Length be
2233 * greater than or equal to the value it will have in each segment
2234 * (which is at most mss + 208 + TCP header length), but also less
2235 * than (0x10000 - inner_network_header). Otherwise the TCP
2236 * checksum calculation will be broken for encapsulated packets.
2237 * We fill in ip->tot_len with 0xff30, which should satisfy the
2238 * first requirement unless the MSS is ridiculously large (which
2239 * should be impossible as the driver max MTU is 9216); it is
2240 * guaranteed to satisfy the second as we only attempt TSO if
2241 * inner_network_header <= 208.
2242 */
2243 ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
2244 EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
2245 (tcp->doff << 2u) > ip_tot_len);
2246
2247 if (ip->version == 4) {
2248 ip->tot_len = htons(ip_tot_len);
2249 ip->check = 0;
2250 inner_ipv4_id = ntohs(ip->id);
2251 } else {
2252 ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len);
2253 }
2254
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002255 seqnum = ntohl(tcp->seq);
2256
2257 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2258
2259 buffer->flags = EFX_TX_BUF_OPTION;
2260 buffer->len = 0;
2261 buffer->unmap_len = 0;
2262 EFX_POPULATE_QWORD_5(buffer->option,
2263 ESF_DZ_TX_DESC_IS_OPT, 1,
2264 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2265 ESF_DZ_TX_TSO_OPTION_TYPE,
2266 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
Olivier Deprez157378f2022-04-04 15:47:50 +02002267 ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002268 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2269 );
2270 ++tx_queue->insert_count;
2271
2272 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2273
2274 buffer->flags = EFX_TX_BUF_OPTION;
2275 buffer->len = 0;
2276 buffer->unmap_len = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02002277 EFX_POPULATE_QWORD_5(buffer->option,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002278 ESF_DZ_TX_DESC_IS_OPT, 1,
2279 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2280 ESF_DZ_TX_TSO_OPTION_TYPE,
2281 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
Olivier Deprez157378f2022-04-04 15:47:50 +02002282 ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002283 ESF_DZ_TX_TSO_TCP_MSS, mss
2284 );
2285 ++tx_queue->insert_count;
2286
2287 return 0;
2288}
2289
2290static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2291{
2292 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2293 u32 tso_versions = 0;
2294
2295 if (nic_data->datapath_caps &
2296 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2297 tso_versions |= BIT(1);
2298 if (nic_data->datapath_caps2 &
2299 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2300 tso_versions |= BIT(2);
2301 return tso_versions;
2302}
2303
2304static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2305{
Olivier Deprez157378f2022-04-04 15:47:50 +02002306 bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
2307 bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002308 struct efx_channel *channel = tx_queue->channel;
2309 struct efx_nic *efx = tx_queue->efx;
Olivier Deprez157378f2022-04-04 15:47:50 +02002310 struct efx_ef10_nic_data *nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002311 efx_qword_t *txd;
2312 int rc;
Olivier Deprez157378f2022-04-04 15:47:50 +02002313
2314 nic_data = efx->nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002315
2316 /* Only attempt to enable TX timestamping if we have the license for it,
2317 * otherwise TXQ init will fail
2318 */
2319 if (!(nic_data->licensed_features &
2320 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
2321 tx_queue->timestamping = false;
2322 /* Disable sync events on this channel. */
2323 if (efx->type->ptp_set_ts_sync_events)
2324 efx->type->ptp_set_ts_sync_events(efx, false, false);
2325 }
2326
2327 /* TSOv2 is a limited resource that can only be configured on a limited
2328 * number of queues. TSO without checksum offload is not really a thing,
2329 * so we only enable it for those queues.
Olivier Deprez157378f2022-04-04 15:47:50 +02002330 * TSOv2 cannot be used with Hardware timestamping, and is never needed
2331 * for XDP tx.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002332 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002333 if (efx_has_cap(efx, TX_TSO_V2)) {
2334 if ((csum_offload || inner_csum) &&
2335 !tx_queue->timestamping && !tx_queue->xdp_tx) {
2336 tx_queue->tso_version = 2;
2337 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2338 channel->channel);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002339 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002340 } else if (efx_has_cap(efx, TX_TSO)) {
2341 tx_queue->tso_version = 1;
2342 }
2343
2344 rc = efx_mcdi_tx_init(tx_queue);
2345 if (rc)
2346 goto fail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002347
2348 /* A previous user of this TX queue might have set us up the
2349 * bomb by writing a descriptor to the TX push collector but
2350 * not the doorbell. (Each collector belongs to a port, not a
2351 * queue or function, so cannot easily be reset.) We must
2352 * attempt to push a no-op descriptor in its place.
2353 */
2354 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2355 tx_queue->insert_count = 1;
2356 txd = efx_tx_desc(tx_queue, 0);
Olivier Deprez157378f2022-04-04 15:47:50 +02002357 EFX_POPULATE_QWORD_7(*txd,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002358 ESF_DZ_TX_DESC_IS_OPT, true,
2359 ESF_DZ_TX_OPTION_TYPE,
2360 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2361 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
Olivier Deprez157378f2022-04-04 15:47:50 +02002362 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload && tx_queue->tso_version != 2,
2363 ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, inner_csum,
2364 ESF_DZ_TX_OPTION_INNER_IP_CSUM, inner_csum && tx_queue->tso_version != 2,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002365 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
2366 tx_queue->write_count = 1;
2367
Olivier Deprez157378f2022-04-04 15:47:50 +02002368 if (tx_queue->tso_version == 2 && efx_has_cap(efx, TX_TSO_V2_ENCAP))
2369 tx_queue->tso_encap = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002370
2371 wmb();
2372 efx_ef10_push_tx_desc(tx_queue, txd);
2373
2374 return;
2375
2376fail:
2377 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2378 tx_queue->queue);
2379}
2380
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002381/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2382static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2383{
2384 unsigned int write_ptr;
2385 efx_dword_t reg;
2386
2387 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2388 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2389 efx_writed_page(tx_queue->efx, &reg,
2390 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2391}
2392
2393#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2394
2395static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2396 dma_addr_t dma_addr, unsigned int len)
2397{
2398 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2399 /* If we need to break across multiple descriptors we should
2400 * stop at a page boundary. This assumes the length limit is
2401 * greater than the page size.
2402 */
2403 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2404
2405 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2406 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2407 }
2408
2409 return len;
2410}
2411
2412static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2413{
2414 unsigned int old_write_count = tx_queue->write_count;
2415 struct efx_tx_buffer *buffer;
2416 unsigned int write_ptr;
2417 efx_qword_t *txd;
2418
Olivier Deprez157378f2022-04-04 15:47:50 +02002419 tx_queue->xmit_pending = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002420 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2421 return;
2422
2423 do {
2424 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2425 buffer = &tx_queue->buffer[write_ptr];
2426 txd = efx_tx_desc(tx_queue, write_ptr);
2427 ++tx_queue->write_count;
2428
2429 /* Create TX descriptor ring entry */
2430 if (buffer->flags & EFX_TX_BUF_OPTION) {
2431 *txd = buffer->option;
2432 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2433 /* PIO descriptor */
2434 tx_queue->packet_write_count = tx_queue->write_count;
2435 } else {
2436 tx_queue->packet_write_count = tx_queue->write_count;
2437 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2438 EFX_POPULATE_QWORD_3(
2439 *txd,
2440 ESF_DZ_TX_KER_CONT,
2441 buffer->flags & EFX_TX_BUF_CONT,
2442 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2443 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2444 }
2445 } while (tx_queue->write_count != tx_queue->insert_count);
2446
2447 wmb(); /* Ensure descriptors are written before they are fetched */
2448
2449 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2450 txd = efx_tx_desc(tx_queue,
2451 old_write_count & tx_queue->ptr_mask);
2452 efx_ef10_push_tx_desc(tx_queue, txd);
2453 ++tx_queue->pushes;
2454 } else {
2455 efx_ef10_notify_tx_desc(tx_queue);
2456 }
2457}
2458
Olivier Deprez157378f2022-04-04 15:47:50 +02002459static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002460{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002461 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02002462 unsigned int enabled, implemented;
2463 bool want_workaround_26807;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002464 int rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002465
Olivier Deprez157378f2022-04-04 15:47:50 +02002466 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
2467 if (rc == -ENOSYS) {
2468 /* GET_WORKAROUNDS was implemented before this workaround,
2469 * thus it must be unavailable in this firmware.
2470 */
2471 nic_data->workaround_26807 = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002472 return 0;
2473 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002474 if (rc)
2475 return rc;
Olivier Deprez157378f2022-04-04 15:47:50 +02002476 want_workaround_26807 =
2477 implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
2478 nic_data->workaround_26807 =
2479 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002480
Olivier Deprez157378f2022-04-04 15:47:50 +02002481 if (want_workaround_26807 && !nic_data->workaround_26807) {
2482 unsigned int flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002483
Olivier Deprez157378f2022-04-04 15:47:50 +02002484 rc = efx_mcdi_set_workaround(efx,
2485 MC_CMD_WORKAROUND_BUG26807,
2486 true, &flags);
2487 if (!rc) {
2488 if (flags &
2489 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2490 netif_info(efx, drv, efx->net_dev,
2491 "other functions on NIC have been reset\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002492
Olivier Deprez157378f2022-04-04 15:47:50 +02002493 /* With MCFW v4.6.x and earlier, the
2494 * boot count will have incremented,
2495 * so re-read the warm_boot_count
2496 * value now to ensure this function
2497 * doesn't think it has changed next
2498 * time it checks.
2499 */
2500 rc = efx_ef10_get_warm_boot_count(efx);
2501 if (rc >= 0) {
2502 nic_data->warm_boot_count = rc;
2503 rc = 0;
2504 }
2505 }
2506 nic_data->workaround_26807 = true;
2507 } else if (rc == -EPERM) {
2508 rc = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002509 }
2510 }
2511 return rc;
2512}
2513
Olivier Deprez157378f2022-04-04 15:47:50 +02002514static int efx_ef10_filter_table_probe(struct efx_nic *efx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002515{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002516 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02002517 int rc = efx_ef10_probe_multicast_chaining(efx);
2518 struct efx_mcdi_filter_vlan *vlan;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002519
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002520 if (rc)
Olivier Deprez157378f2022-04-04 15:47:50 +02002521 return rc;
2522 rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002523
Olivier Deprez157378f2022-04-04 15:47:50 +02002524 if (rc)
2525 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002526
Olivier Deprez157378f2022-04-04 15:47:50 +02002527 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
2528 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
2529 if (rc)
2530 goto fail_add_vlan;
2531 }
2532 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002533
Olivier Deprez157378f2022-04-04 15:47:50 +02002534fail_add_vlan:
2535 efx_mcdi_filter_table_remove(efx);
2536 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002537}
2538
2539/* This creates an entry in the RX descriptor queue */
2540static inline void
2541efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2542{
2543 struct efx_rx_buffer *rx_buf;
2544 efx_qword_t *rxd;
2545
2546 rxd = efx_rx_desc(rx_queue, index);
2547 rx_buf = efx_rx_buffer(rx_queue, index);
2548 EFX_POPULATE_QWORD_2(*rxd,
2549 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2550 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2551}
2552
2553static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2554{
2555 struct efx_nic *efx = rx_queue->efx;
2556 unsigned int write_count;
2557 efx_dword_t reg;
2558
2559 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2560 write_count = rx_queue->added_count & ~7;
2561 if (rx_queue->notified_count == write_count)
2562 return;
2563
2564 do
2565 efx_ef10_build_rx_desc(
2566 rx_queue,
2567 rx_queue->notified_count & rx_queue->ptr_mask);
2568 while (++rx_queue->notified_count != write_count);
2569
2570 wmb();
2571 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2572 write_count & rx_queue->ptr_mask);
2573 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
2574 efx_rx_queue_index(rx_queue));
2575}
2576
2577static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2578
2579static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2580{
2581 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2582 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2583 efx_qword_t event;
2584
2585 EFX_POPULATE_QWORD_2(event,
2586 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2587 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2588
2589 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2590
2591 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2592 * already swapped the data to little-endian order.
2593 */
2594 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2595 sizeof(efx_qword_t));
2596
2597 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2598 inbuf, sizeof(inbuf), 0,
2599 efx_ef10_rx_defer_refill_complete, 0);
2600}
2601
2602static void
2603efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2604 int rc, efx_dword_t *outbuf,
2605 size_t outlen_actual)
2606{
2607 /* nothing to do */
2608}
2609
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002610static int efx_ef10_ev_init(struct efx_channel *channel)
2611{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002612 struct efx_nic *efx = channel->efx;
2613 struct efx_ef10_nic_data *nic_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02002614 bool use_v2, cut_thru;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002615
2616 nic_data = efx->nic_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02002617 use_v2 = nic_data->datapath_caps2 &
2618 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
2619 cut_thru = !(nic_data->datapath_caps &
2620 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2621 return efx_mcdi_ev_init(channel, cut_thru, use_v2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002622}
2623
2624static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2625 unsigned int rx_queue_label)
2626{
2627 struct efx_nic *efx = rx_queue->efx;
2628
2629 netif_info(efx, hw, efx->net_dev,
2630 "rx event arrived on queue %d labeled as queue %u\n",
2631 efx_rx_queue_index(rx_queue), rx_queue_label);
2632
2633 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2634}
2635
2636static void
2637efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2638 unsigned int actual, unsigned int expected)
2639{
2640 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2641 struct efx_nic *efx = rx_queue->efx;
2642
2643 netif_info(efx, hw, efx->net_dev,
2644 "dropped %d events (index=%d expected=%d)\n",
2645 dropped, actual, expected);
2646
2647 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2648}
2649
2650/* partially received RX was aborted. clean up. */
2651static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2652{
2653 unsigned int rx_desc_ptr;
2654
2655 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2656 "scattered RX aborted (dropping %u buffers)\n",
2657 rx_queue->scatter_n);
2658
2659 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2660
2661 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2662 0, EFX_RX_PKT_DISCARD);
2663
2664 rx_queue->removed_count += rx_queue->scatter_n;
2665 rx_queue->scatter_n = 0;
2666 rx_queue->scatter_len = 0;
2667 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2668}
2669
2670static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
2671 unsigned int n_packets,
2672 unsigned int rx_encap_hdr,
2673 unsigned int rx_l3_class,
2674 unsigned int rx_l4_class,
2675 const efx_qword_t *event)
2676{
2677 struct efx_nic *efx = channel->efx;
2678 bool handled = false;
2679
2680 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
2681 if (!(efx->net_dev->features & NETIF_F_RXALL)) {
2682 if (!efx->loopback_selftest)
2683 channel->n_rx_eth_crc_err += n_packets;
2684 return EFX_RX_PKT_DISCARD;
2685 }
2686 handled = true;
2687 }
2688 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
2689 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
2690 rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2691 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
2692 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
2693 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
2694 netdev_WARN(efx->net_dev,
2695 "invalid class for RX_IPCKSUM_ERR: event="
2696 EFX_QWORD_FMT "\n",
2697 EFX_QWORD_VAL(*event));
2698 if (!efx->loopback_selftest)
2699 *(rx_encap_hdr ?
2700 &channel->n_rx_outer_ip_hdr_chksum_err :
2701 &channel->n_rx_ip_hdr_chksum_err) += n_packets;
2702 return 0;
2703 }
2704 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
2705 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
2706 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2707 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
2708 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
2709 rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
2710 netdev_WARN(efx->net_dev,
2711 "invalid class for RX_TCPUDP_CKSUM_ERR: event="
2712 EFX_QWORD_FMT "\n",
2713 EFX_QWORD_VAL(*event));
2714 if (!efx->loopback_selftest)
2715 *(rx_encap_hdr ?
2716 &channel->n_rx_outer_tcp_udp_chksum_err :
2717 &channel->n_rx_tcp_udp_chksum_err) += n_packets;
2718 return 0;
2719 }
2720 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
2721 if (unlikely(!rx_encap_hdr))
2722 netdev_WARN(efx->net_dev,
2723 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
2724 EFX_QWORD_FMT "\n",
2725 EFX_QWORD_VAL(*event));
2726 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2727 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
2728 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
2729 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
2730 netdev_WARN(efx->net_dev,
2731 "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
2732 EFX_QWORD_FMT "\n",
2733 EFX_QWORD_VAL(*event));
2734 if (!efx->loopback_selftest)
2735 channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
2736 return 0;
2737 }
2738 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
2739 if (unlikely(!rx_encap_hdr))
2740 netdev_WARN(efx->net_dev,
2741 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
2742 EFX_QWORD_FMT "\n",
2743 EFX_QWORD_VAL(*event));
2744 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2745 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
2746 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
2747 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
2748 netdev_WARN(efx->net_dev,
2749 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
2750 EFX_QWORD_FMT "\n",
2751 EFX_QWORD_VAL(*event));
2752 if (!efx->loopback_selftest)
2753 channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
2754 return 0;
2755 }
2756
2757 WARN_ON(!handled); /* No error bits were recognised */
2758 return 0;
2759}
2760
2761static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2762 const efx_qword_t *event)
2763{
2764 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
2765 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
2766 unsigned int n_descs, n_packets, i;
2767 struct efx_nic *efx = channel->efx;
2768 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2769 struct efx_rx_queue *rx_queue;
2770 efx_qword_t errors;
2771 bool rx_cont;
2772 u16 flags = 0;
2773
2774 if (unlikely(READ_ONCE(efx->reset_pending)))
2775 return 0;
2776
2777 /* Basic packet information */
2778 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2779 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2780 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2781 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
2782 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
2783 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2784 rx_encap_hdr =
2785 nic_data->datapath_caps &
2786 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
2787 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
2788 ESE_EZ_ENCAP_HDR_NONE;
2789
2790 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2791 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2792 EFX_QWORD_FMT "\n",
2793 EFX_QWORD_VAL(*event));
2794
2795 rx_queue = efx_channel_get_rx_queue(channel);
2796
2797 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2798 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2799
2800 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2801 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2802
2803 if (n_descs != rx_queue->scatter_n + 1) {
2804 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2805
2806 /* detect rx abort */
2807 if (unlikely(n_descs == rx_queue->scatter_n)) {
2808 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2809 netdev_WARN(efx->net_dev,
2810 "invalid RX abort: scatter_n=%u event="
2811 EFX_QWORD_FMT "\n",
2812 rx_queue->scatter_n,
2813 EFX_QWORD_VAL(*event));
2814 efx_ef10_handle_rx_abort(rx_queue);
2815 return 0;
2816 }
2817
2818 /* Check that RX completion merging is valid, i.e.
2819 * the current firmware supports it and this is a
2820 * non-scattered packet.
2821 */
2822 if (!(nic_data->datapath_caps &
2823 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2824 rx_queue->scatter_n != 0 || rx_cont) {
2825 efx_ef10_handle_rx_bad_lbits(
2826 rx_queue, next_ptr_lbits,
2827 (rx_queue->removed_count +
2828 rx_queue->scatter_n + 1) &
2829 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2830 return 0;
2831 }
2832
2833 /* Merged completion for multiple non-scattered packets */
2834 rx_queue->scatter_n = 1;
2835 rx_queue->scatter_len = 0;
2836 n_packets = n_descs;
2837 ++channel->n_rx_merge_events;
2838 channel->n_rx_merge_packets += n_packets;
2839 flags |= EFX_RX_PKT_PREFIX_LEN;
2840 } else {
2841 ++rx_queue->scatter_n;
2842 rx_queue->scatter_len += rx_bytes;
2843 if (rx_cont)
2844 return 0;
2845 n_packets = 1;
2846 }
2847
2848 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
2849 ESF_DZ_RX_IPCKSUM_ERR, 1,
2850 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
2851 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
2852 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
2853 EFX_AND_QWORD(errors, *event, errors);
2854 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
2855 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
2856 rx_encap_hdr,
2857 rx_l3_class, rx_l4_class,
2858 event);
2859 } else {
2860 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
2861 rx_l4_class == ESE_FZ_L4_CLASS_UDP;
2862
2863 switch (rx_encap_hdr) {
2864 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
2865 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
2866 if (tcpudp)
2867 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
2868 break;
2869 case ESE_EZ_ENCAP_HDR_GRE:
2870 case ESE_EZ_ENCAP_HDR_NONE:
2871 if (tcpudp)
2872 flags |= EFX_RX_PKT_CSUMMED;
2873 break;
2874 default:
2875 netdev_WARN(efx->net_dev,
2876 "unknown encapsulation type: event="
2877 EFX_QWORD_FMT "\n",
2878 EFX_QWORD_VAL(*event));
2879 }
2880 }
2881
2882 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
2883 flags |= EFX_RX_PKT_TCP;
2884
2885 channel->irq_mod_score += 2 * n_packets;
2886
2887 /* Handle received packet(s) */
2888 for (i = 0; i < n_packets; i++) {
2889 efx_rx_packet(rx_queue,
2890 rx_queue->removed_count & rx_queue->ptr_mask,
2891 rx_queue->scatter_n, rx_queue->scatter_len,
2892 flags);
2893 rx_queue->removed_count += rx_queue->scatter_n;
2894 }
2895
2896 rx_queue->scatter_n = 0;
2897 rx_queue->scatter_len = 0;
2898
2899 return n_packets;
2900}
2901
2902static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
2903{
2904 u32 tstamp;
2905
2906 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
2907 tstamp <<= 16;
2908 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
2909
2910 return tstamp;
2911}
2912
2913static void
2914efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2915{
2916 struct efx_nic *efx = channel->efx;
2917 struct efx_tx_queue *tx_queue;
2918 unsigned int tx_ev_desc_ptr;
2919 unsigned int tx_ev_q_label;
2920 unsigned int tx_ev_type;
2921 u64 ts_part;
2922
2923 if (unlikely(READ_ONCE(efx->reset_pending)))
2924 return;
2925
2926 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2927 return;
2928
2929 /* Get the transmit queue */
2930 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
Olivier Deprez157378f2022-04-04 15:47:50 +02002931 tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002932
2933 if (!tx_queue->timestamping) {
2934 /* Transmit completion */
2935 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2936 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2937 return;
2938 }
2939
2940 /* Transmit timestamps are only available for 8XXX series. They result
Olivier Deprez157378f2022-04-04 15:47:50 +02002941 * in up to three events per packet. These occur in order, and are:
2942 * - the normal completion event (may be omitted)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002943 * - the low part of the timestamp
2944 * - the high part of the timestamp
2945 *
Olivier Deprez157378f2022-04-04 15:47:50 +02002946 * It's possible for multiple completion events to appear before the
2947 * corresponding timestamps. So we can for example get:
2948 * COMP N
2949 * COMP N+1
2950 * TS_LO N
2951 * TS_HI N
2952 * TS_LO N+1
2953 * TS_HI N+1
2954 *
2955 * In addition it's also possible for the adjacent completions to be
2956 * merged, so we may not see COMP N above. As such, the completion
2957 * events are not very useful here.
2958 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002959 * Each part of the timestamp is itself split across two 16 bit
2960 * fields in the event.
2961 */
2962 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
2963
2964 switch (tx_ev_type) {
2965 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
Olivier Deprez157378f2022-04-04 15:47:50 +02002966 /* Ignore this event - see above. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002967 break;
2968
2969 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
2970 ts_part = efx_ef10_extract_event_ts(event);
2971 tx_queue->completed_timestamp_minor = ts_part;
2972 break;
2973
2974 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
2975 ts_part = efx_ef10_extract_event_ts(event);
2976 tx_queue->completed_timestamp_major = ts_part;
2977
Olivier Deprez157378f2022-04-04 15:47:50 +02002978 efx_xmit_done_single(tx_queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002979 break;
2980
2981 default:
2982 netif_err(efx, hw, efx->net_dev,
2983 "channel %d unknown tx event type %d (data "
2984 EFX_QWORD_FMT ")\n",
2985 channel->channel, tx_ev_type,
2986 EFX_QWORD_VAL(*event));
2987 break;
2988 }
2989}
2990
2991static void
2992efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
2993{
2994 struct efx_nic *efx = channel->efx;
2995 int subcode;
2996
2997 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
2998
2999 switch (subcode) {
3000 case ESE_DZ_DRV_TIMER_EV:
3001 case ESE_DZ_DRV_WAKE_UP_EV:
3002 break;
3003 case ESE_DZ_DRV_START_UP_EV:
3004 /* event queue init complete. ok. */
3005 break;
3006 default:
3007 netif_err(efx, hw, efx->net_dev,
3008 "channel %d unknown driver event type %d"
3009 " (data " EFX_QWORD_FMT ")\n",
3010 channel->channel, subcode,
3011 EFX_QWORD_VAL(*event));
3012
3013 }
3014}
3015
3016static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3017 efx_qword_t *event)
3018{
3019 struct efx_nic *efx = channel->efx;
3020 u32 subcode;
3021
3022 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3023
3024 switch (subcode) {
3025 case EFX_EF10_TEST:
3026 channel->event_test_cpu = raw_smp_processor_id();
3027 break;
3028 case EFX_EF10_REFILL:
3029 /* The queue must be empty, so we won't receive any rx
3030 * events, so efx_process_channel() won't refill the
3031 * queue. Refill it here
3032 */
3033 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
3034 break;
3035 default:
3036 netif_err(efx, hw, efx->net_dev,
3037 "channel %d unknown driver event type %u"
3038 " (data " EFX_QWORD_FMT ")\n",
3039 channel->channel, (unsigned) subcode,
3040 EFX_QWORD_VAL(*event));
3041 }
3042}
3043
3044static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3045{
3046 struct efx_nic *efx = channel->efx;
3047 efx_qword_t event, *p_event;
3048 unsigned int read_ptr;
3049 int ev_code;
3050 int spent = 0;
3051
3052 if (quota <= 0)
3053 return spent;
3054
3055 read_ptr = channel->eventq_read_ptr;
3056
3057 for (;;) {
3058 p_event = efx_event(channel, read_ptr);
3059 event = *p_event;
3060
3061 if (!efx_event_present(&event))
3062 break;
3063
3064 EFX_SET_QWORD(*p_event);
3065
3066 ++read_ptr;
3067
3068 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3069
3070 netif_vdbg(efx, drv, efx->net_dev,
3071 "processing event on %d " EFX_QWORD_FMT "\n",
3072 channel->channel, EFX_QWORD_VAL(event));
3073
3074 switch (ev_code) {
3075 case ESE_DZ_EV_CODE_MCDI_EV:
3076 efx_mcdi_process_event(channel, &event);
3077 break;
3078 case ESE_DZ_EV_CODE_RX_EV:
3079 spent += efx_ef10_handle_rx_event(channel, &event);
3080 if (spent >= quota) {
3081 /* XXX can we split a merged event to
3082 * avoid going over-quota?
3083 */
3084 spent = quota;
3085 goto out;
3086 }
3087 break;
3088 case ESE_DZ_EV_CODE_TX_EV:
3089 efx_ef10_handle_tx_event(channel, &event);
3090 break;
3091 case ESE_DZ_EV_CODE_DRIVER_EV:
3092 efx_ef10_handle_driver_event(channel, &event);
3093 if (++spent == quota)
3094 goto out;
3095 break;
3096 case EFX_EF10_DRVGEN_EV:
3097 efx_ef10_handle_driver_generated_event(channel, &event);
3098 break;
3099 default:
3100 netif_err(efx, hw, efx->net_dev,
3101 "channel %d unknown event type %d"
3102 " (data " EFX_QWORD_FMT ")\n",
3103 channel->channel, ev_code,
3104 EFX_QWORD_VAL(event));
3105 }
3106 }
3107
3108out:
3109 channel->eventq_read_ptr = read_ptr;
3110 return spent;
3111}
3112
3113static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3114{
3115 struct efx_nic *efx = channel->efx;
3116 efx_dword_t rptr;
3117
3118 if (EFX_EF10_WORKAROUND_35388(efx)) {
3119 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3120 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3121 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3122 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3123
3124 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3125 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3126 ERF_DD_EVQ_IND_RPTR,
3127 (channel->eventq_read_ptr &
3128 channel->eventq_mask) >>
3129 ERF_DD_EVQ_IND_RPTR_WIDTH);
3130 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3131 channel->channel);
3132 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3133 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3134 ERF_DD_EVQ_IND_RPTR,
3135 channel->eventq_read_ptr &
3136 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3137 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3138 channel->channel);
3139 } else {
3140 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3141 channel->eventq_read_ptr &
3142 channel->eventq_mask);
3143 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3144 }
3145}
3146
3147static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3148{
3149 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3150 struct efx_nic *efx = channel->efx;
3151 efx_qword_t event;
3152 int rc;
3153
3154 EFX_POPULATE_QWORD_2(event,
3155 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3156 ESF_DZ_EV_DATA, EFX_EF10_TEST);
3157
3158 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3159
3160 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3161 * already swapped the data to little-endian order.
3162 */
3163 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3164 sizeof(efx_qword_t));
3165
3166 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3167 NULL, 0, NULL);
3168 if (rc != 0)
3169 goto fail;
3170
3171 return;
3172
3173fail:
3174 WARN_ON(true);
3175 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3176}
3177
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003178static void efx_ef10_prepare_flr(struct efx_nic *efx)
3179{
3180 atomic_set(&efx->active_queues, 0);
3181}
3182
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003183static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
3184{
3185 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3186 u8 mac_old[ETH_ALEN];
3187 int rc, rc2;
3188
3189 /* Only reconfigure a PF-created vport */
3190 if (is_zero_ether_addr(nic_data->vport_mac))
3191 return 0;
3192
3193 efx_device_detach_sync(efx);
3194 efx_net_stop(efx->net_dev);
3195 down_write(&efx->filter_sem);
Olivier Deprez157378f2022-04-04 15:47:50 +02003196 efx_mcdi_filter_table_remove(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003197 up_write(&efx->filter_sem);
3198
Olivier Deprez157378f2022-04-04 15:47:50 +02003199 rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003200 if (rc)
3201 goto restore_filters;
3202
3203 ether_addr_copy(mac_old, nic_data->vport_mac);
Olivier Deprez157378f2022-04-04 15:47:50 +02003204 rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003205 nic_data->vport_mac);
3206 if (rc)
3207 goto restore_vadaptor;
3208
Olivier Deprez157378f2022-04-04 15:47:50 +02003209 rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003210 efx->net_dev->dev_addr);
3211 if (!rc) {
3212 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
3213 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +02003214 rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003215 if (rc2) {
3216 /* Failed to add original MAC, so clear vport_mac */
3217 eth_zero_addr(nic_data->vport_mac);
3218 goto reset_nic;
3219 }
3220 }
3221
3222restore_vadaptor:
Olivier Deprez157378f2022-04-04 15:47:50 +02003223 rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003224 if (rc2)
3225 goto reset_nic;
3226restore_filters:
3227 down_write(&efx->filter_sem);
3228 rc2 = efx_ef10_filter_table_probe(efx);
3229 up_write(&efx->filter_sem);
3230 if (rc2)
3231 goto reset_nic;
3232
3233 rc2 = efx_net_open(efx->net_dev);
3234 if (rc2)
3235 goto reset_nic;
3236
3237 efx_device_attach_if_not_resetting(efx);
3238
3239 return rc;
3240
3241reset_nic:
3242 netif_err(efx, drv, efx->net_dev,
3243 "Failed to restore when changing MAC address - scheduling reset\n");
3244 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
3245
3246 return rc ? rc : rc2;
3247}
3248
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003249static int efx_ef10_set_mac_address(struct efx_nic *efx)
3250{
3251 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003252 bool was_enabled = efx->port_enabled;
3253 int rc;
3254
3255 efx_device_detach_sync(efx);
3256 efx_net_stop(efx->net_dev);
3257
3258 mutex_lock(&efx->mac_lock);
3259 down_write(&efx->filter_sem);
Olivier Deprez157378f2022-04-04 15:47:50 +02003260 efx_mcdi_filter_table_remove(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003261
3262 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
3263 efx->net_dev->dev_addr);
3264 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
Olivier Deprez157378f2022-04-04 15:47:50 +02003265 efx->vport_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003266 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
3267 sizeof(inbuf), NULL, 0, NULL);
3268
3269 efx_ef10_filter_table_probe(efx);
3270 up_write(&efx->filter_sem);
3271 mutex_unlock(&efx->mac_lock);
3272
3273 if (was_enabled)
3274 efx_net_open(efx->net_dev);
3275 efx_device_attach_if_not_resetting(efx);
3276
3277#ifdef CONFIG_SFC_SRIOV
3278 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003279 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003280 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
3281
3282 if (rc == -EPERM) {
3283 struct efx_nic *efx_pf;
3284
3285 /* Switch to PF and change MAC address on vport */
3286 efx_pf = pci_get_drvdata(pci_dev_pf);
3287
3288 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
3289 nic_data->vf_index,
3290 efx->net_dev->dev_addr);
3291 } else if (!rc) {
3292 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
3293 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
3294 unsigned int i;
3295
3296 /* MAC address successfully changed by VF (with MAC
3297 * spoofing) so update the parent PF if possible.
3298 */
3299 for (i = 0; i < efx_pf->vf_count; ++i) {
3300 struct ef10_vf *vf = nic_data->vf + i;
3301
3302 if (vf->efx == efx) {
3303 ether_addr_copy(vf->mac,
3304 efx->net_dev->dev_addr);
3305 return 0;
3306 }
3307 }
3308 }
3309 } else
3310#endif
3311 if (rc == -EPERM) {
3312 netif_err(efx, drv, efx->net_dev,
3313 "Cannot change MAC address; use sfboot to enable"
3314 " mac-spoofing on this interface\n");
3315 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
3316 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
3317 * fall-back to the method of changing the MAC address on the
3318 * vport. This only applies to PFs because such versions of
3319 * MCFW do not support VFs.
3320 */
3321 rc = efx_ef10_vport_set_mac_address(efx);
3322 } else if (rc) {
3323 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
3324 sizeof(inbuf), NULL, 0, rc);
3325 }
3326
3327 return rc;
3328}
3329
Olivier Deprez157378f2022-04-04 15:47:50 +02003330static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003331{
Olivier Deprez157378f2022-04-04 15:47:50 +02003332 WARN_ON(!mutex_is_locked(&efx->mac_lock));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003333
Olivier Deprez157378f2022-04-04 15:47:50 +02003334 efx_mcdi_filter_sync_rx_mode(efx);
3335
3336 if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
3337 return efx_mcdi_set_mtu(efx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003338 return efx_mcdi_set_mac(efx);
3339}
3340
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003341static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
3342{
3343 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
3344
3345 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
3346 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
3347 NULL, 0, NULL);
3348}
3349
3350/* MC BISTs follow a different poll mechanism to phy BISTs.
3351 * The BIST is done in the poll handler on the MC, and the MCDI command
3352 * will block until the BIST is done.
3353 */
3354static int efx_ef10_poll_bist(struct efx_nic *efx)
3355{
3356 int rc;
3357 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
3358 size_t outlen;
3359 u32 result;
3360
3361 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
3362 outbuf, sizeof(outbuf), &outlen);
3363 if (rc != 0)
3364 return rc;
3365
3366 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
3367 return -EIO;
3368
3369 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
3370 switch (result) {
3371 case MC_CMD_POLL_BIST_PASSED:
3372 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
3373 return 0;
3374 case MC_CMD_POLL_BIST_TIMEOUT:
3375 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
3376 return -EIO;
3377 case MC_CMD_POLL_BIST_FAILED:
3378 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
3379 return -EIO;
3380 default:
3381 netif_err(efx, hw, efx->net_dev,
3382 "BIST returned unknown result %u", result);
3383 return -EIO;
3384 }
3385}
3386
3387static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
3388{
3389 int rc;
3390
3391 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
3392
3393 rc = efx_ef10_start_bist(efx, bist_type);
3394 if (rc != 0)
3395 return rc;
3396
3397 return efx_ef10_poll_bist(efx);
3398}
3399
3400static int
3401efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
3402{
3403 int rc, rc2;
3404
3405 efx_reset_down(efx, RESET_TYPE_WORLD);
3406
3407 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
3408 NULL, 0, NULL, 0, NULL);
3409 if (rc != 0)
3410 goto out;
3411
3412 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
3413 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
3414
3415 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
3416
3417out:
3418 if (rc == -EPERM)
3419 rc = 0;
3420 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
3421 return rc ? rc : rc2;
3422}
3423
3424#ifdef CONFIG_SFC_MTD
3425
3426struct efx_ef10_nvram_type_info {
3427 u16 type, type_mask;
3428 u8 port;
3429 const char *name;
3430};
3431
3432static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
3433 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
3434 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
3435 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
3436 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
3437 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
3438 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
3439 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
3440 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
3441 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
3442 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
3443 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
David Brazdil0f672f62019-12-10 10:32:29 +00003444 { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
3445 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
3446 { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" },
3447 { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" },
3448 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" },
3449 { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" },
3450 { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003451};
David Brazdil0f672f62019-12-10 10:32:29 +00003452#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003453
3454static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
3455 struct efx_mcdi_mtd_partition *part,
David Brazdil0f672f62019-12-10 10:32:29 +00003456 unsigned int type,
3457 unsigned long *found)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003458{
3459 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
3460 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
3461 const struct efx_ef10_nvram_type_info *info;
3462 size_t size, erase_size, outlen;
David Brazdil0f672f62019-12-10 10:32:29 +00003463 int type_idx = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003464 bool protected;
3465 int rc;
3466
David Brazdil0f672f62019-12-10 10:32:29 +00003467 for (type_idx = 0; ; type_idx++) {
3468 if (type_idx == EF10_NVRAM_PARTITION_COUNT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003469 return -ENODEV;
David Brazdil0f672f62019-12-10 10:32:29 +00003470 info = efx_ef10_nvram_types + type_idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003471 if ((type & ~info->type_mask) == info->type)
3472 break;
3473 }
3474 if (info->port != efx_port_num(efx))
3475 return -ENODEV;
3476
3477 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
3478 if (rc)
3479 return rc;
David Brazdil0f672f62019-12-10 10:32:29 +00003480 if (protected &&
3481 (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
3482 type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
3483 /* Hide protected partitions that don't provide defaults. */
3484 return -ENODEV;
3485
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003486 if (protected)
David Brazdil0f672f62019-12-10 10:32:29 +00003487 /* Protected partitions are read only. */
3488 erase_size = 0;
3489
3490 /* If we've already exposed a partition of this type, hide this
3491 * duplicate. All operations on MTDs are keyed by the type anyway,
3492 * so we can't act on the duplicate.
3493 */
3494 if (__test_and_set_bit(type_idx, found))
3495 return -EEXIST;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003496
3497 part->nvram_type = type;
3498
3499 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
3500 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
3501 outbuf, sizeof(outbuf), &outlen);
3502 if (rc)
3503 return rc;
3504 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
3505 return -EIO;
3506 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
3507 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
3508 part->fw_subtype = MCDI_DWORD(outbuf,
3509 NVRAM_METADATA_OUT_SUBTYPE);
3510
3511 part->common.dev_type_name = "EF10 NVRAM manager";
3512 part->common.type_name = info->name;
3513
3514 part->common.mtd.type = MTD_NORFLASH;
3515 part->common.mtd.flags = MTD_CAP_NORFLASH;
3516 part->common.mtd.size = size;
3517 part->common.mtd.erasesize = erase_size;
David Brazdil0f672f62019-12-10 10:32:29 +00003518 /* sfc_status is read-only */
3519 if (!erase_size)
3520 part->common.mtd.flags |= MTD_NO_ERASE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003521
3522 return 0;
3523}
3524
3525static int efx_ef10_mtd_probe(struct efx_nic *efx)
3526{
3527 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
David Brazdil0f672f62019-12-10 10:32:29 +00003528 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003529 struct efx_mcdi_mtd_partition *parts;
3530 size_t outlen, n_parts_total, i, n_parts;
3531 unsigned int type;
3532 int rc;
3533
3534 ASSERT_RTNL();
3535
3536 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
3537 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
3538 outbuf, sizeof(outbuf), &outlen);
3539 if (rc)
3540 return rc;
3541 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
3542 return -EIO;
3543
3544 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
3545 if (n_parts_total >
3546 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
3547 return -EIO;
3548
3549 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
3550 if (!parts)
3551 return -ENOMEM;
3552
3553 n_parts = 0;
3554 for (i = 0; i < n_parts_total; i++) {
3555 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
3556 i);
David Brazdil0f672f62019-12-10 10:32:29 +00003557 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
3558 found);
3559 if (rc == -EEXIST || rc == -ENODEV)
3560 continue;
3561 if (rc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003562 goto fail;
David Brazdil0f672f62019-12-10 10:32:29 +00003563 n_parts++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003564 }
3565
3566 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
3567fail:
3568 if (rc)
3569 kfree(parts);
3570 return rc;
3571}
3572
3573#endif /* CONFIG_SFC_MTD */
3574
3575static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3576{
3577 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3578}
3579
3580static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
3581 u32 host_time) {}
3582
3583static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
3584 bool temp)
3585{
3586 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
3587 int rc;
3588
3589 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
3590 channel->sync_events_state == SYNC_EVENTS_VALID ||
3591 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
3592 return 0;
3593 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
3594
3595 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
3596 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3597 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
3598 channel->channel);
3599
3600 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3601 inbuf, sizeof(inbuf), NULL, 0, NULL);
3602
3603 if (rc != 0)
3604 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3605 SYNC_EVENTS_DISABLED;
3606
3607 return rc;
3608}
3609
3610static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
3611 bool temp)
3612{
3613 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
3614 int rc;
3615
3616 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
3617 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
3618 return 0;
3619 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
3620 channel->sync_events_state = SYNC_EVENTS_DISABLED;
3621 return 0;
3622 }
3623 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3624 SYNC_EVENTS_DISABLED;
3625
3626 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
3627 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3628 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
3629 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
3630 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
3631 channel->channel);
3632
3633 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3634 inbuf, sizeof(inbuf), NULL, 0, NULL);
3635
3636 return rc;
3637}
3638
3639static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
3640 bool temp)
3641{
3642 int (*set)(struct efx_channel *channel, bool temp);
3643 struct efx_channel *channel;
3644
3645 set = en ?
3646 efx_ef10_rx_enable_timestamping :
3647 efx_ef10_rx_disable_timestamping;
3648
3649 channel = efx_ptp_channel(efx);
3650 if (channel) {
3651 int rc = set(channel, temp);
3652 if (en && rc != 0) {
3653 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
3654 return rc;
3655 }
3656 }
3657
3658 return 0;
3659}
3660
3661static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
3662 struct hwtstamp_config *init)
3663{
3664 return -EOPNOTSUPP;
3665}
3666
3667static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
3668 struct hwtstamp_config *init)
3669{
3670 int rc;
3671
3672 switch (init->rx_filter) {
3673 case HWTSTAMP_FILTER_NONE:
3674 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
3675 /* if TX timestamping is still requested then leave PTP on */
3676 return efx_ptp_change_mode(efx,
3677 init->tx_type != HWTSTAMP_TX_OFF, 0);
3678 case HWTSTAMP_FILTER_ALL:
3679 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3680 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3682 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3683 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3684 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3685 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3686 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3687 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3688 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3689 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3690 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3691 case HWTSTAMP_FILTER_NTP_ALL:
3692 init->rx_filter = HWTSTAMP_FILTER_ALL;
3693 rc = efx_ptp_change_mode(efx, true, 0);
3694 if (!rc)
3695 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
3696 if (rc)
3697 efx_ptp_change_mode(efx, false, 0);
3698 return rc;
3699 default:
3700 return -ERANGE;
3701 }
3702}
3703
3704static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
3705 struct netdev_phys_item_id *ppid)
3706{
3707 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3708
3709 if (!is_valid_ether_addr(nic_data->port_id))
3710 return -EOPNOTSUPP;
3711
3712 ppid->id_len = ETH_ALEN;
3713 memcpy(ppid->id, nic_data->port_id, ppid->id_len);
3714
3715 return 0;
3716}
3717
3718static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
3719{
3720 if (proto != htons(ETH_P_8021Q))
3721 return -EINVAL;
3722
3723 return efx_ef10_add_vlan(efx, vid);
3724}
3725
3726static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
3727{
3728 if (proto != htons(ETH_P_8021Q))
3729 return -EINVAL;
3730
3731 return efx_ef10_del_vlan(efx, vid);
3732}
3733
3734/* We rely on the MCDI wiping out our TX rings if it made any changes to the
3735 * ports table, ensuring that any TSO descriptors that were made on a now-
3736 * removed tunnel port will be blown away and won't break things when we try
3737 * to transmit them using the new ports table.
3738 */
3739static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
3740{
3741 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3742 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
3743 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
3744 bool will_reset = false;
3745 size_t num_entries = 0;
3746 size_t inlen, outlen;
3747 size_t i;
3748 int rc;
3749 efx_dword_t flags_and_num_entries;
3750
3751 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
3752
3753 nic_data->udp_tunnels_dirty = false;
3754
3755 if (!(nic_data->datapath_caps &
3756 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
3757 efx_device_attach_if_not_resetting(efx);
3758 return 0;
3759 }
3760
3761 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
3762 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
3763
3764 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003765 if (nic_data->udp_tunnels[i].type !=
3766 TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003767 efx_dword_t entry;
3768
3769 EFX_POPULATE_DWORD_2(entry,
3770 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
3771 ntohs(nic_data->udp_tunnels[i].port),
3772 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
3773 nic_data->udp_tunnels[i].type);
3774 *_MCDI_ARRAY_DWORD(inbuf,
3775 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
3776 num_entries++) = entry;
3777 }
3778 }
3779
3780 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
3781 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
3782 EFX_WORD_1_LBN);
3783 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
3784 EFX_WORD_1_WIDTH);
3785 EFX_POPULATE_DWORD_2(flags_and_num_entries,
3786 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
3787 !!unloading,
3788 EFX_WORD_1, num_entries);
3789 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
3790 flags_and_num_entries;
3791
3792 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
3793
3794 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
3795 inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
3796 if (rc == -EIO) {
3797 /* Most likely the MC rebooted due to another function also
3798 * setting its tunnel port list. Mark the tunnel port list as
3799 * dirty, so it will be pushed upon coming up from the reboot.
3800 */
3801 nic_data->udp_tunnels_dirty = true;
3802 return 0;
3803 }
3804
3805 if (rc) {
3806 /* expected not available on unprivileged functions */
3807 if (rc != -EPERM)
3808 netif_warn(efx, drv, efx->net_dev,
3809 "Unable to set UDP tunnel ports; rc=%d.\n", rc);
3810 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
3811 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
3812 netif_info(efx, drv, efx->net_dev,
3813 "Rebooting MC due to UDP tunnel port list change\n");
3814 will_reset = true;
3815 if (unloading)
3816 /* Delay for the MC reset to complete. This will make
3817 * unloading other functions a bit smoother. This is a
3818 * race, but the other unload will work whichever way
3819 * it goes, this just avoids an unnecessary error
3820 * message.
3821 */
3822 msleep(100);
3823 }
3824 if (!will_reset && !unloading) {
3825 /* The caller will have detached, relying on the MC reset to
3826 * trigger a re-attach. Since there won't be an MC reset, we
3827 * have to do the attach ourselves.
3828 */
3829 efx_device_attach_if_not_resetting(efx);
3830 }
3831
3832 return rc;
3833}
3834
3835static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
3836{
3837 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3838 int rc = 0;
3839
3840 mutex_lock(&nic_data->udp_tunnels_lock);
3841 if (nic_data->udp_tunnels_dirty) {
3842 /* Make sure all TX are stopped while we modify the table, else
3843 * we might race against an efx_features_check().
3844 */
3845 efx_device_detach_sync(efx);
3846 rc = efx_ef10_set_udp_tnl_ports(efx, false);
3847 }
3848 mutex_unlock(&nic_data->udp_tunnels_lock);
3849 return rc;
3850}
3851
Olivier Deprez157378f2022-04-04 15:47:50 +02003852static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
3853 unsigned int table, unsigned int entry,
3854 struct udp_tunnel_info *ti)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003855{
Olivier Deprez157378f2022-04-04 15:47:50 +02003856 struct efx_nic *efx = netdev_priv(dev);
3857 struct efx_ef10_nic_data *nic_data;
3858 int efx_tunnel_type, rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003859
Olivier Deprez157378f2022-04-04 15:47:50 +02003860 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
3861 efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
3862 else
3863 efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003864
Olivier Deprez157378f2022-04-04 15:47:50 +02003865 nic_data = efx->nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003866 if (!(nic_data->datapath_caps &
3867 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
Olivier Deprez157378f2022-04-04 15:47:50 +02003868 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003869
3870 mutex_lock(&nic_data->udp_tunnels_lock);
3871 /* Make sure all TX are stopped while we add to the table, else we
3872 * might race against an efx_features_check().
3873 */
3874 efx_device_detach_sync(efx);
Olivier Deprez157378f2022-04-04 15:47:50 +02003875 nic_data->udp_tunnels[entry].type = efx_tunnel_type;
3876 nic_data->udp_tunnels[entry].port = ti->port;
3877 rc = efx_ef10_set_udp_tnl_ports(efx, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003878 mutex_unlock(&nic_data->udp_tunnels_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02003879
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003880 return rc;
3881}
3882
3883/* Called under the TX lock with the TX queue running, hence no-one can be
3884 * in the middle of updating the UDP tunnels table. However, they could
3885 * have tried and failed the MCDI, in which case they'll have set the dirty
3886 * flag before dropping their locks.
3887 */
3888static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
3889{
3890 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02003891 size_t i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003892
3893 if (!(nic_data->datapath_caps &
3894 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
3895 return false;
3896
3897 if (nic_data->udp_tunnels_dirty)
3898 /* SW table may not match HW state, so just assume we can't
3899 * use any UDP tunnel offloads.
3900 */
3901 return false;
3902
Olivier Deprez157378f2022-04-04 15:47:50 +02003903 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
3904 if (nic_data->udp_tunnels[i].type !=
3905 TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID &&
3906 nic_data->udp_tunnels[i].port == port)
3907 return true;
3908
3909 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003910}
3911
Olivier Deprez157378f2022-04-04 15:47:50 +02003912static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
3913 unsigned int table, unsigned int entry,
3914 struct udp_tunnel_info *ti)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003915{
Olivier Deprez157378f2022-04-04 15:47:50 +02003916 struct efx_nic *efx = netdev_priv(dev);
3917 struct efx_ef10_nic_data *nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003918 int rc;
3919
Olivier Deprez157378f2022-04-04 15:47:50 +02003920 nic_data = efx->nic_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003921
3922 mutex_lock(&nic_data->udp_tunnels_lock);
3923 /* Make sure all TX are stopped while we remove from the table, else we
3924 * might race against an efx_features_check().
3925 */
3926 efx_device_detach_sync(efx);
Olivier Deprez157378f2022-04-04 15:47:50 +02003927 nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
3928 nic_data->udp_tunnels[entry].port = 0;
3929 rc = efx_ef10_set_udp_tnl_ports(efx, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003930 mutex_unlock(&nic_data->udp_tunnels_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02003931
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003932 return rc;
3933}
3934
Olivier Deprez157378f2022-04-04 15:47:50 +02003935static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
3936 .set_port = efx_ef10_udp_tnl_set_port,
3937 .unset_port = efx_ef10_udp_tnl_unset_port,
3938 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
3939 .tables = {
3940 {
3941 .n_entries = 16,
3942 .tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
3943 UDP_TUNNEL_TYPE_GENEVE,
3944 },
3945 },
3946};
3947
3948/* EF10 may have multiple datapath firmware variants within a
3949 * single version. Report which variants are running.
3950 */
3951static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
3952 size_t len)
3953{
3954 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3955
3956 return scnprintf(buf, len, " rx%x tx%x",
3957 nic_data->rx_dpcpu_fw_id,
3958 nic_data->tx_dpcpu_fw_id);
3959}
3960
3961static unsigned int ef10_check_caps(const struct efx_nic *efx,
3962 u8 flag,
3963 u32 offset)
3964{
3965 const struct efx_ef10_nic_data *nic_data = efx->nic_data;
3966
3967 switch (offset) {
3968 case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
3969 return nic_data->datapath_caps & BIT_ULL(flag);
3970 case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
3971 return nic_data->datapath_caps2 & BIT_ULL(flag);
3972 default:
3973 return 0;
3974 }
3975}
3976
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003977#define EF10_OFFLOAD_FEATURES \
3978 (NETIF_F_IP_CSUM | \
3979 NETIF_F_HW_VLAN_CTAG_FILTER | \
3980 NETIF_F_IPV6_CSUM | \
3981 NETIF_F_RXHASH | \
3982 NETIF_F_NTUPLE)
3983
3984const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
3985 .is_vf = true,
3986 .mem_bar = efx_ef10_vf_mem_bar,
3987 .mem_map_size = efx_ef10_mem_map_size,
3988 .probe = efx_ef10_probe_vf,
3989 .remove = efx_ef10_remove,
3990 .dimension_resources = efx_ef10_dimension_resources,
3991 .init = efx_ef10_init_nic,
Olivier Deprez157378f2022-04-04 15:47:50 +02003992 .fini = efx_ef10_fini_nic,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003993 .map_reset_reason = efx_ef10_map_reset_reason,
3994 .map_reset_flags = efx_ef10_map_reset_flags,
3995 .reset = efx_ef10_reset,
3996 .probe_port = efx_mcdi_port_probe,
3997 .remove_port = efx_mcdi_port_remove,
Olivier Deprez157378f2022-04-04 15:47:50 +02003998 .fini_dmaq = efx_fini_dmaq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003999 .prepare_flr = efx_ef10_prepare_flr,
4000 .finish_flr = efx_port_dummy_op_void,
4001 .describe_stats = efx_ef10_describe_stats,
4002 .update_stats = efx_ef10_update_stats_vf,
Olivier Deprez157378f2022-04-04 15:47:50 +02004003 .update_stats_atomic = efx_ef10_update_stats_atomic_vf,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004004 .start_stats = efx_port_dummy_op_void,
4005 .pull_stats = efx_port_dummy_op_void,
4006 .stop_stats = efx_port_dummy_op_void,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004007 .push_irq_moderation = efx_ef10_push_irq_moderation,
Olivier Deprez157378f2022-04-04 15:47:50 +02004008 .reconfigure_mac = efx_ef10_mac_reconfigure,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004009 .check_mac_fault = efx_mcdi_mac_check_fault,
4010 .reconfigure_port = efx_mcdi_port_reconfigure,
4011 .get_wol = efx_ef10_get_wol_vf,
4012 .set_wol = efx_ef10_set_wol_vf,
4013 .resume_wol = efx_port_dummy_op_void,
4014 .mcdi_request = efx_ef10_mcdi_request,
4015 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4016 .mcdi_read_response = efx_ef10_mcdi_read_response,
4017 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4018 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
4019 .irq_enable_master = efx_port_dummy_op_void,
4020 .irq_test_generate = efx_ef10_irq_test_generate,
4021 .irq_disable_non_ev = efx_port_dummy_op_void,
4022 .irq_handle_msi = efx_ef10_msi_interrupt,
4023 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4024 .tx_probe = efx_ef10_tx_probe,
4025 .tx_init = efx_ef10_tx_init,
Olivier Deprez157378f2022-04-04 15:47:50 +02004026 .tx_remove = efx_mcdi_tx_remove,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004027 .tx_write = efx_ef10_tx_write,
4028 .tx_limit_len = efx_ef10_tx_limit_len,
Olivier Deprez157378f2022-04-04 15:47:50 +02004029 .tx_enqueue = __efx_enqueue_skb,
4030 .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
4031 .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
4032 .rx_probe = efx_mcdi_rx_probe,
4033 .rx_init = efx_mcdi_rx_init,
4034 .rx_remove = efx_mcdi_rx_remove,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004035 .rx_write = efx_ef10_rx_write,
4036 .rx_defer_refill = efx_ef10_rx_defer_refill,
Olivier Deprez157378f2022-04-04 15:47:50 +02004037 .rx_packet = __efx_rx_packet,
4038 .ev_probe = efx_mcdi_ev_probe,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004039 .ev_init = efx_ef10_ev_init,
Olivier Deprez157378f2022-04-04 15:47:50 +02004040 .ev_fini = efx_mcdi_ev_fini,
4041 .ev_remove = efx_mcdi_ev_remove,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004042 .ev_process = efx_ef10_ev_process,
4043 .ev_read_ack = efx_ef10_ev_read_ack,
4044 .ev_test_generate = efx_ef10_ev_test_generate,
4045 .filter_table_probe = efx_ef10_filter_table_probe,
Olivier Deprez157378f2022-04-04 15:47:50 +02004046 .filter_table_restore = efx_mcdi_filter_table_restore,
4047 .filter_table_remove = efx_mcdi_filter_table_remove,
4048 .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
4049 .filter_insert = efx_mcdi_filter_insert,
4050 .filter_remove_safe = efx_mcdi_filter_remove_safe,
4051 .filter_get_safe = efx_mcdi_filter_get_safe,
4052 .filter_clear_rx = efx_mcdi_filter_clear_rx,
4053 .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
4054 .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
4055 .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004056#ifdef CONFIG_RFS_ACCEL
Olivier Deprez157378f2022-04-04 15:47:50 +02004057 .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004058#endif
4059#ifdef CONFIG_SFC_MTD
4060 .mtd_probe = efx_port_dummy_op_int,
4061#endif
4062 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
4063 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
4064 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
4065 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
4066#ifdef CONFIG_SFC_SRIOV
4067 .vswitching_probe = efx_ef10_vswitching_probe_vf,
4068 .vswitching_restore = efx_ef10_vswitching_restore_vf,
4069 .vswitching_remove = efx_ef10_vswitching_remove_vf,
4070#endif
4071 .get_mac_address = efx_ef10_get_mac_address_vf,
4072 .set_mac_address = efx_ef10_set_mac_address,
4073
4074 .get_phys_port_id = efx_ef10_get_phys_port_id,
4075 .revision = EFX_REV_HUNT_A0,
4076 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4077 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4078 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4079 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4080 .can_rx_scatter = true,
4081 .always_rx_scatter = true,
4082 .min_interrupt_mode = EFX_INT_MODE_MSIX,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004083 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4084 .offload_features = EF10_OFFLOAD_FEATURES,
4085 .mcdi_max_ver = 2,
Olivier Deprez157378f2022-04-04 15:47:50 +02004086 .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004087 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4088 1 << HWTSTAMP_FILTER_ALL,
4089 .rx_hash_key_size = 40,
Olivier Deprez157378f2022-04-04 15:47:50 +02004090 .check_caps = ef10_check_caps,
4091 .print_additional_fwver = efx_ef10_print_additional_fwver,
4092 .sensor_event = efx_mcdi_sensor_event,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004093};
4094
4095const struct efx_nic_type efx_hunt_a0_nic_type = {
4096 .is_vf = false,
4097 .mem_bar = efx_ef10_pf_mem_bar,
4098 .mem_map_size = efx_ef10_mem_map_size,
4099 .probe = efx_ef10_probe_pf,
4100 .remove = efx_ef10_remove,
4101 .dimension_resources = efx_ef10_dimension_resources,
4102 .init = efx_ef10_init_nic,
Olivier Deprez157378f2022-04-04 15:47:50 +02004103 .fini = efx_ef10_fini_nic,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004104 .map_reset_reason = efx_ef10_map_reset_reason,
4105 .map_reset_flags = efx_ef10_map_reset_flags,
4106 .reset = efx_ef10_reset,
4107 .probe_port = efx_mcdi_port_probe,
4108 .remove_port = efx_mcdi_port_remove,
Olivier Deprez157378f2022-04-04 15:47:50 +02004109 .fini_dmaq = efx_fini_dmaq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004110 .prepare_flr = efx_ef10_prepare_flr,
4111 .finish_flr = efx_port_dummy_op_void,
4112 .describe_stats = efx_ef10_describe_stats,
4113 .update_stats = efx_ef10_update_stats_pf,
4114 .start_stats = efx_mcdi_mac_start_stats,
4115 .pull_stats = efx_mcdi_mac_pull_stats,
4116 .stop_stats = efx_mcdi_mac_stop_stats,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004117 .push_irq_moderation = efx_ef10_push_irq_moderation,
4118 .reconfigure_mac = efx_ef10_mac_reconfigure,
4119 .check_mac_fault = efx_mcdi_mac_check_fault,
4120 .reconfigure_port = efx_mcdi_port_reconfigure,
4121 .get_wol = efx_ef10_get_wol,
4122 .set_wol = efx_ef10_set_wol,
4123 .resume_wol = efx_port_dummy_op_void,
4124 .test_chip = efx_ef10_test_chip,
4125 .test_nvram = efx_mcdi_nvram_test_all,
4126 .mcdi_request = efx_ef10_mcdi_request,
4127 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4128 .mcdi_read_response = efx_ef10_mcdi_read_response,
4129 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4130 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
4131 .irq_enable_master = efx_port_dummy_op_void,
4132 .irq_test_generate = efx_ef10_irq_test_generate,
4133 .irq_disable_non_ev = efx_port_dummy_op_void,
4134 .irq_handle_msi = efx_ef10_msi_interrupt,
4135 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4136 .tx_probe = efx_ef10_tx_probe,
4137 .tx_init = efx_ef10_tx_init,
Olivier Deprez157378f2022-04-04 15:47:50 +02004138 .tx_remove = efx_mcdi_tx_remove,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004139 .tx_write = efx_ef10_tx_write,
4140 .tx_limit_len = efx_ef10_tx_limit_len,
Olivier Deprez157378f2022-04-04 15:47:50 +02004141 .tx_enqueue = __efx_enqueue_skb,
4142 .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
4143 .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
4144 .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
4145 .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
4146 .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
4147 .rx_probe = efx_mcdi_rx_probe,
4148 .rx_init = efx_mcdi_rx_init,
4149 .rx_remove = efx_mcdi_rx_remove,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004150 .rx_write = efx_ef10_rx_write,
4151 .rx_defer_refill = efx_ef10_rx_defer_refill,
Olivier Deprez157378f2022-04-04 15:47:50 +02004152 .rx_packet = __efx_rx_packet,
4153 .ev_probe = efx_mcdi_ev_probe,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004154 .ev_init = efx_ef10_ev_init,
Olivier Deprez157378f2022-04-04 15:47:50 +02004155 .ev_fini = efx_mcdi_ev_fini,
4156 .ev_remove = efx_mcdi_ev_remove,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004157 .ev_process = efx_ef10_ev_process,
4158 .ev_read_ack = efx_ef10_ev_read_ack,
4159 .ev_test_generate = efx_ef10_ev_test_generate,
4160 .filter_table_probe = efx_ef10_filter_table_probe,
Olivier Deprez157378f2022-04-04 15:47:50 +02004161 .filter_table_restore = efx_mcdi_filter_table_restore,
4162 .filter_table_remove = efx_mcdi_filter_table_remove,
4163 .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
4164 .filter_insert = efx_mcdi_filter_insert,
4165 .filter_remove_safe = efx_mcdi_filter_remove_safe,
4166 .filter_get_safe = efx_mcdi_filter_get_safe,
4167 .filter_clear_rx = efx_mcdi_filter_clear_rx,
4168 .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
4169 .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
4170 .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004171#ifdef CONFIG_RFS_ACCEL
Olivier Deprez157378f2022-04-04 15:47:50 +02004172 .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004173#endif
4174#ifdef CONFIG_SFC_MTD
4175 .mtd_probe = efx_ef10_mtd_probe,
4176 .mtd_rename = efx_mcdi_mtd_rename,
4177 .mtd_read = efx_mcdi_mtd_read,
4178 .mtd_erase = efx_mcdi_mtd_erase,
4179 .mtd_write = efx_mcdi_mtd_write,
4180 .mtd_sync = efx_mcdi_mtd_sync,
4181#endif
4182 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
4183 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
4184 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
4185 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
4186 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
4187 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004188 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004189#ifdef CONFIG_SFC_SRIOV
4190 .sriov_configure = efx_ef10_sriov_configure,
4191 .sriov_init = efx_ef10_sriov_init,
4192 .sriov_fini = efx_ef10_sriov_fini,
4193 .sriov_wanted = efx_ef10_sriov_wanted,
4194 .sriov_reset = efx_ef10_sriov_reset,
4195 .sriov_flr = efx_ef10_sriov_flr,
4196 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
4197 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
4198 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
4199 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
4200 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
4201 .vswitching_probe = efx_ef10_vswitching_probe_pf,
4202 .vswitching_restore = efx_ef10_vswitching_restore_pf,
4203 .vswitching_remove = efx_ef10_vswitching_remove_pf,
4204#endif
4205 .get_mac_address = efx_ef10_get_mac_address_pf,
4206 .set_mac_address = efx_ef10_set_mac_address,
4207 .tso_versions = efx_ef10_tso_versions,
4208
4209 .get_phys_port_id = efx_ef10_get_phys_port_id,
4210 .revision = EFX_REV_HUNT_A0,
4211 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4212 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4213 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4214 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4215 .can_rx_scatter = true,
4216 .always_rx_scatter = true,
4217 .option_descriptors = true,
4218 .min_interrupt_mode = EFX_INT_MODE_LEGACY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004219 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4220 .offload_features = EF10_OFFLOAD_FEATURES,
4221 .mcdi_max_ver = 2,
Olivier Deprez157378f2022-04-04 15:47:50 +02004222 .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004223 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4224 1 << HWTSTAMP_FILTER_ALL,
4225 .rx_hash_key_size = 40,
Olivier Deprez157378f2022-04-04 15:47:50 +02004226 .check_caps = ef10_check_caps,
4227 .print_additional_fwver = efx_ef10_print_additional_fwver,
4228 .sensor_event = efx_mcdi_sensor_event,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004229};