David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /**************************************************************************** |
| 3 | * Driver for Solarflare network controllers and boards |
| 4 | * Copyright 2005-2006 Fen Systems Ltd. |
| 5 | * Copyright 2006-2013 Solarflare Communications Inc. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/bitops.h> |
| 9 | #include <linux/delay.h> |
| 10 | #include <linux/interrupt.h> |
| 11 | #include <linux/pci.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/seq_file.h> |
| 14 | #include <linux/cpu_rmap.h> |
| 15 | #include "net_driver.h" |
| 16 | #include "bitfield.h" |
| 17 | #include "efx.h" |
| 18 | #include "nic.h" |
| 19 | #include "ef10_regs.h" |
| 20 | #include "farch_regs.h" |
| 21 | #include "io.h" |
| 22 | #include "workarounds.h" |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 23 | #include "mcdi_pcol.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | |
| 25 | /************************************************************************** |
| 26 | * |
| 27 | * Generic buffer handling |
| 28 | * These buffers are used for interrupt status, MAC stats, etc. |
| 29 | * |
| 30 | **************************************************************************/ |
| 31 | |
| 32 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
| 33 | unsigned int len, gfp_t gfp_flags) |
| 34 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 35 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
| 36 | &buffer->dma_addr, gfp_flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 37 | if (!buffer->addr) |
| 38 | return -ENOMEM; |
| 39 | buffer->len = len; |
| 40 | return 0; |
| 41 | } |
| 42 | |
| 43 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) |
| 44 | { |
| 45 | if (buffer->addr) { |
| 46 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, |
| 47 | buffer->addr, buffer->dma_addr); |
| 48 | buffer->addr = NULL; |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | /* Check whether an event is present in the eventq at the current |
| 53 | * read pointer. Only useful for self-test. |
| 54 | */ |
| 55 | bool efx_nic_event_present(struct efx_channel *channel) |
| 56 | { |
| 57 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); |
| 58 | } |
| 59 | |
| 60 | void efx_nic_event_test_start(struct efx_channel *channel) |
| 61 | { |
| 62 | channel->event_test_cpu = -1; |
| 63 | smp_wmb(); |
| 64 | channel->efx->type->ev_test_generate(channel); |
| 65 | } |
| 66 | |
| 67 | int efx_nic_irq_test_start(struct efx_nic *efx) |
| 68 | { |
| 69 | efx->last_irq_cpu = -1; |
| 70 | smp_wmb(); |
| 71 | return efx->type->irq_test_generate(efx); |
| 72 | } |
| 73 | |
| 74 | /* Hook interrupt handler(s) |
| 75 | * Try MSI and then legacy interrupts. |
| 76 | */ |
| 77 | int efx_nic_init_interrupt(struct efx_nic *efx) |
| 78 | { |
| 79 | struct efx_channel *channel; |
| 80 | unsigned int n_irqs; |
| 81 | int rc; |
| 82 | |
| 83 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
| 84 | rc = request_irq(efx->legacy_irq, |
| 85 | efx->type->irq_handle_legacy, IRQF_SHARED, |
| 86 | efx->name, efx); |
| 87 | if (rc) { |
| 88 | netif_err(efx, drv, efx->net_dev, |
| 89 | "failed to hook legacy IRQ %d\n", |
| 90 | efx->pci_dev->irq); |
| 91 | goto fail1; |
| 92 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 93 | efx->irqs_hooked = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | return 0; |
| 95 | } |
| 96 | |
| 97 | #ifdef CONFIG_RFS_ACCEL |
| 98 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { |
| 99 | efx->net_dev->rx_cpu_rmap = |
| 100 | alloc_irq_cpu_rmap(efx->n_rx_channels); |
| 101 | if (!efx->net_dev->rx_cpu_rmap) { |
| 102 | rc = -ENOMEM; |
| 103 | goto fail1; |
| 104 | } |
| 105 | } |
| 106 | #endif |
| 107 | |
| 108 | /* Hook MSI or MSI-X interrupt */ |
| 109 | n_irqs = 0; |
| 110 | efx_for_each_channel(channel, efx) { |
| 111 | rc = request_irq(channel->irq, efx->type->irq_handle_msi, |
| 112 | IRQF_PROBE_SHARED, /* Not shared */ |
| 113 | efx->msi_context[channel->channel].name, |
| 114 | &efx->msi_context[channel->channel]); |
| 115 | if (rc) { |
| 116 | netif_err(efx, drv, efx->net_dev, |
| 117 | "failed to hook IRQ %d\n", channel->irq); |
| 118 | goto fail2; |
| 119 | } |
| 120 | ++n_irqs; |
| 121 | |
| 122 | #ifdef CONFIG_RFS_ACCEL |
| 123 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX && |
| 124 | channel->channel < efx->n_rx_channels) { |
| 125 | rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, |
| 126 | channel->irq); |
| 127 | if (rc) |
| 128 | goto fail2; |
| 129 | } |
| 130 | #endif |
| 131 | } |
| 132 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 133 | efx->irqs_hooked = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 134 | return 0; |
| 135 | |
| 136 | fail2: |
| 137 | #ifdef CONFIG_RFS_ACCEL |
| 138 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); |
| 139 | efx->net_dev->rx_cpu_rmap = NULL; |
| 140 | #endif |
| 141 | efx_for_each_channel(channel, efx) { |
| 142 | if (n_irqs-- == 0) |
| 143 | break; |
| 144 | free_irq(channel->irq, &efx->msi_context[channel->channel]); |
| 145 | } |
| 146 | fail1: |
| 147 | return rc; |
| 148 | } |
| 149 | |
| 150 | void efx_nic_fini_interrupt(struct efx_nic *efx) |
| 151 | { |
| 152 | struct efx_channel *channel; |
| 153 | |
| 154 | #ifdef CONFIG_RFS_ACCEL |
| 155 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); |
| 156 | efx->net_dev->rx_cpu_rmap = NULL; |
| 157 | #endif |
| 158 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 159 | if (!efx->irqs_hooked) |
| 160 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 161 | if (EFX_INT_MODE_USE_MSI(efx)) { |
| 162 | /* Disable MSI/MSI-X interrupts */ |
| 163 | efx_for_each_channel(channel, efx) |
| 164 | free_irq(channel->irq, |
| 165 | &efx->msi_context[channel->channel]); |
| 166 | } else { |
| 167 | /* Disable legacy interrupt */ |
| 168 | free_irq(efx->legacy_irq, efx); |
| 169 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 170 | efx->irqs_hooked = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | /* Register dump */ |
| 174 | |
| 175 | #define REGISTER_REVISION_FA 1 |
| 176 | #define REGISTER_REVISION_FB 2 |
| 177 | #define REGISTER_REVISION_FC 3 |
| 178 | #define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ |
| 179 | #define REGISTER_REVISION_ED 4 |
| 180 | #define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ |
| 181 | |
| 182 | struct efx_nic_reg { |
| 183 | u32 offset:24; |
| 184 | u32 min_revision:3, max_revision:3; |
| 185 | }; |
| 186 | |
| 187 | #define REGISTER(name, arch, min_rev, max_rev) { \ |
| 188 | arch ## R_ ## min_rev ## max_rev ## _ ## name, \ |
| 189 | REGISTER_REVISION_ ## arch ## min_rev, \ |
| 190 | REGISTER_REVISION_ ## arch ## max_rev \ |
| 191 | } |
| 192 | #define REGISTER_AA(name) REGISTER(name, F, A, A) |
| 193 | #define REGISTER_AB(name) REGISTER(name, F, A, B) |
| 194 | #define REGISTER_AZ(name) REGISTER(name, F, A, Z) |
| 195 | #define REGISTER_BB(name) REGISTER(name, F, B, B) |
| 196 | #define REGISTER_BZ(name) REGISTER(name, F, B, Z) |
| 197 | #define REGISTER_CZ(name) REGISTER(name, F, C, Z) |
| 198 | #define REGISTER_DZ(name) REGISTER(name, E, D, Z) |
| 199 | |
| 200 | static const struct efx_nic_reg efx_nic_regs[] = { |
| 201 | REGISTER_AZ(ADR_REGION), |
| 202 | REGISTER_AZ(INT_EN_KER), |
| 203 | REGISTER_BZ(INT_EN_CHAR), |
| 204 | REGISTER_AZ(INT_ADR_KER), |
| 205 | REGISTER_BZ(INT_ADR_CHAR), |
| 206 | /* INT_ACK_KER is WO */ |
| 207 | /* INT_ISR0 is RC */ |
| 208 | REGISTER_AZ(HW_INIT), |
| 209 | REGISTER_CZ(USR_EV_CFG), |
| 210 | REGISTER_AB(EE_SPI_HCMD), |
| 211 | REGISTER_AB(EE_SPI_HADR), |
| 212 | REGISTER_AB(EE_SPI_HDATA), |
| 213 | REGISTER_AB(EE_BASE_PAGE), |
| 214 | REGISTER_AB(EE_VPD_CFG0), |
| 215 | /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ |
| 216 | /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ |
| 217 | /* PCIE_CORE_INDIRECT is indirect */ |
| 218 | REGISTER_AB(NIC_STAT), |
| 219 | REGISTER_AB(GPIO_CTL), |
| 220 | REGISTER_AB(GLB_CTL), |
| 221 | /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ |
| 222 | REGISTER_BZ(DP_CTRL), |
| 223 | REGISTER_AZ(MEM_STAT), |
| 224 | REGISTER_AZ(CS_DEBUG), |
| 225 | REGISTER_AZ(ALTERA_BUILD), |
| 226 | REGISTER_AZ(CSR_SPARE), |
| 227 | REGISTER_AB(PCIE_SD_CTL0123), |
| 228 | REGISTER_AB(PCIE_SD_CTL45), |
| 229 | REGISTER_AB(PCIE_PCS_CTL_STAT), |
| 230 | /* DEBUG_DATA_OUT is not used */ |
| 231 | /* DRV_EV is WO */ |
| 232 | REGISTER_AZ(EVQ_CTL), |
| 233 | REGISTER_AZ(EVQ_CNT1), |
| 234 | REGISTER_AZ(EVQ_CNT2), |
| 235 | REGISTER_AZ(BUF_TBL_CFG), |
| 236 | REGISTER_AZ(SRM_RX_DC_CFG), |
| 237 | REGISTER_AZ(SRM_TX_DC_CFG), |
| 238 | REGISTER_AZ(SRM_CFG), |
| 239 | /* BUF_TBL_UPD is WO */ |
| 240 | REGISTER_AZ(SRM_UPD_EVQ), |
| 241 | REGISTER_AZ(SRAM_PARITY), |
| 242 | REGISTER_AZ(RX_CFG), |
| 243 | REGISTER_BZ(RX_FILTER_CTL), |
| 244 | /* RX_FLUSH_DESCQ is WO */ |
| 245 | REGISTER_AZ(RX_DC_CFG), |
| 246 | REGISTER_AZ(RX_DC_PF_WM), |
| 247 | REGISTER_BZ(RX_RSS_TKEY), |
| 248 | /* RX_NODESC_DROP is RC */ |
| 249 | REGISTER_AA(RX_SELF_RST), |
| 250 | /* RX_DEBUG, RX_PUSH_DROP are not used */ |
| 251 | REGISTER_CZ(RX_RSS_IPV6_REG1), |
| 252 | REGISTER_CZ(RX_RSS_IPV6_REG2), |
| 253 | REGISTER_CZ(RX_RSS_IPV6_REG3), |
| 254 | /* TX_FLUSH_DESCQ is WO */ |
| 255 | REGISTER_AZ(TX_DC_CFG), |
| 256 | REGISTER_AA(TX_CHKSM_CFG), |
| 257 | REGISTER_AZ(TX_CFG), |
| 258 | /* TX_PUSH_DROP is not used */ |
| 259 | REGISTER_AZ(TX_RESERVED), |
| 260 | REGISTER_BZ(TX_PACE), |
| 261 | /* TX_PACE_DROP_QID is RC */ |
| 262 | REGISTER_BB(TX_VLAN), |
| 263 | REGISTER_BZ(TX_IPFIL_PORTEN), |
| 264 | REGISTER_AB(MD_TXD), |
| 265 | REGISTER_AB(MD_RXD), |
| 266 | REGISTER_AB(MD_CS), |
| 267 | REGISTER_AB(MD_PHY_ADR), |
| 268 | REGISTER_AB(MD_ID), |
| 269 | /* MD_STAT is RC */ |
| 270 | REGISTER_AB(MAC_STAT_DMA), |
| 271 | REGISTER_AB(MAC_CTRL), |
| 272 | REGISTER_BB(GEN_MODE), |
| 273 | REGISTER_AB(MAC_MC_HASH_REG0), |
| 274 | REGISTER_AB(MAC_MC_HASH_REG1), |
| 275 | REGISTER_AB(GM_CFG1), |
| 276 | REGISTER_AB(GM_CFG2), |
| 277 | /* GM_IPG and GM_HD are not used */ |
| 278 | REGISTER_AB(GM_MAX_FLEN), |
| 279 | /* GM_TEST is not used */ |
| 280 | REGISTER_AB(GM_ADR1), |
| 281 | REGISTER_AB(GM_ADR2), |
| 282 | REGISTER_AB(GMF_CFG0), |
| 283 | REGISTER_AB(GMF_CFG1), |
| 284 | REGISTER_AB(GMF_CFG2), |
| 285 | REGISTER_AB(GMF_CFG3), |
| 286 | REGISTER_AB(GMF_CFG4), |
| 287 | REGISTER_AB(GMF_CFG5), |
| 288 | REGISTER_BB(TX_SRC_MAC_CTL), |
| 289 | REGISTER_AB(XM_ADR_LO), |
| 290 | REGISTER_AB(XM_ADR_HI), |
| 291 | REGISTER_AB(XM_GLB_CFG), |
| 292 | REGISTER_AB(XM_TX_CFG), |
| 293 | REGISTER_AB(XM_RX_CFG), |
| 294 | REGISTER_AB(XM_MGT_INT_MASK), |
| 295 | REGISTER_AB(XM_FC), |
| 296 | REGISTER_AB(XM_PAUSE_TIME), |
| 297 | REGISTER_AB(XM_TX_PARAM), |
| 298 | REGISTER_AB(XM_RX_PARAM), |
| 299 | /* XM_MGT_INT_MSK (note no 'A') is RC */ |
| 300 | REGISTER_AB(XX_PWR_RST), |
| 301 | REGISTER_AB(XX_SD_CTL), |
| 302 | REGISTER_AB(XX_TXDRV_CTL), |
| 303 | /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ |
| 304 | /* XX_CORE_STAT is partly RC */ |
| 305 | REGISTER_DZ(BIU_HW_REV_ID), |
| 306 | REGISTER_DZ(MC_DB_LWRD), |
| 307 | REGISTER_DZ(MC_DB_HWRD), |
| 308 | }; |
| 309 | |
| 310 | struct efx_nic_reg_table { |
| 311 | u32 offset:24; |
| 312 | u32 min_revision:3, max_revision:3; |
| 313 | u32 step:6, rows:21; |
| 314 | }; |
| 315 | |
| 316 | #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ |
| 317 | offset, \ |
| 318 | REGISTER_REVISION_ ## arch ## min_rev, \ |
| 319 | REGISTER_REVISION_ ## arch ## max_rev, \ |
| 320 | step, rows \ |
| 321 | } |
| 322 | #define REGISTER_TABLE(name, arch, min_rev, max_rev) \ |
| 323 | REGISTER_TABLE_DIMENSIONS( \ |
| 324 | name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ |
| 325 | arch, min_rev, max_rev, \ |
| 326 | arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ |
| 327 | arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) |
| 328 | #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) |
| 329 | #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) |
| 330 | #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) |
| 331 | #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) |
| 332 | #define REGISTER_TABLE_BB_CZ(name) \ |
| 333 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ |
| 334 | FR_BZ_ ## name ## _STEP, \ |
| 335 | FR_BB_ ## name ## _ROWS), \ |
| 336 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ |
| 337 | FR_BZ_ ## name ## _STEP, \ |
| 338 | FR_CZ_ ## name ## _ROWS) |
| 339 | #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) |
| 340 | #define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) |
| 341 | |
| 342 | static const struct efx_nic_reg_table efx_nic_reg_tables[] = { |
| 343 | /* DRIVER is not used */ |
| 344 | /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ |
| 345 | REGISTER_TABLE_BB(TX_IPFIL_TBL), |
| 346 | REGISTER_TABLE_BB(TX_SRC_MAC_TBL), |
| 347 | REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), |
| 348 | REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), |
| 349 | REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), |
| 350 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), |
| 351 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), |
| 352 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), |
| 353 | /* We can't reasonably read all of the buffer table (up to 8MB!). |
| 354 | * However this driver will only use a few entries. Reading |
| 355 | * 1K entries allows for some expansion of queue count and |
| 356 | * size before we need to change the version. */ |
| 357 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, |
| 358 | F, A, A, 8, 1024), |
| 359 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, |
| 360 | F, B, Z, 8, 1024), |
| 361 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), |
| 362 | REGISTER_TABLE_BB_CZ(TIMER_TBL), |
| 363 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), |
| 364 | REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), |
| 365 | /* TX_FILTER_TBL0 is huge and not used by this driver */ |
| 366 | REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), |
| 367 | REGISTER_TABLE_CZ(MC_TREG_SMEM), |
| 368 | /* MSIX_PBA_TABLE is not mapped */ |
| 369 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ |
| 370 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), |
| 371 | REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), |
| 372 | }; |
| 373 | |
| 374 | size_t efx_nic_get_regs_len(struct efx_nic *efx) |
| 375 | { |
| 376 | const struct efx_nic_reg *reg; |
| 377 | const struct efx_nic_reg_table *table; |
| 378 | size_t len = 0; |
| 379 | |
| 380 | for (reg = efx_nic_regs; |
| 381 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); |
| 382 | reg++) |
| 383 | if (efx->type->revision >= reg->min_revision && |
| 384 | efx->type->revision <= reg->max_revision) |
| 385 | len += sizeof(efx_oword_t); |
| 386 | |
| 387 | for (table = efx_nic_reg_tables; |
| 388 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); |
| 389 | table++) |
| 390 | if (efx->type->revision >= table->min_revision && |
| 391 | efx->type->revision <= table->max_revision) |
| 392 | len += table->rows * min_t(size_t, table->step, 16); |
| 393 | |
| 394 | return len; |
| 395 | } |
| 396 | |
| 397 | void efx_nic_get_regs(struct efx_nic *efx, void *buf) |
| 398 | { |
| 399 | const struct efx_nic_reg *reg; |
| 400 | const struct efx_nic_reg_table *table; |
| 401 | |
| 402 | for (reg = efx_nic_regs; |
| 403 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); |
| 404 | reg++) { |
| 405 | if (efx->type->revision >= reg->min_revision && |
| 406 | efx->type->revision <= reg->max_revision) { |
| 407 | efx_reado(efx, (efx_oword_t *)buf, reg->offset); |
| 408 | buf += sizeof(efx_oword_t); |
| 409 | } |
| 410 | } |
| 411 | |
| 412 | for (table = efx_nic_reg_tables; |
| 413 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); |
| 414 | table++) { |
| 415 | size_t size, i; |
| 416 | |
| 417 | if (!(efx->type->revision >= table->min_revision && |
| 418 | efx->type->revision <= table->max_revision)) |
| 419 | continue; |
| 420 | |
| 421 | size = min_t(size_t, table->step, 16); |
| 422 | |
| 423 | for (i = 0; i < table->rows; i++) { |
| 424 | switch (table->step) { |
| 425 | case 4: /* 32-bit SRAM */ |
| 426 | efx_readd(efx, buf, table->offset + 4 * i); |
| 427 | break; |
| 428 | case 8: /* 64-bit SRAM */ |
| 429 | efx_sram_readq(efx, |
| 430 | efx->membase + table->offset, |
| 431 | buf, i); |
| 432 | break; |
| 433 | case 16: /* 128-bit-readable register */ |
| 434 | efx_reado_table(efx, buf, table->offset, i); |
| 435 | break; |
| 436 | case 32: /* 128-bit register, interleaved */ |
| 437 | efx_reado_table(efx, buf, table->offset, 2 * i); |
| 438 | break; |
| 439 | default: |
| 440 | WARN_ON(1); |
| 441 | return; |
| 442 | } |
| 443 | buf += size; |
| 444 | } |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | /** |
| 449 | * efx_nic_describe_stats - Describe supported statistics for ethtool |
| 450 | * @desc: Array of &struct efx_hw_stat_desc describing the statistics |
| 451 | * @count: Length of the @desc array |
| 452 | * @mask: Bitmask of which elements of @desc are enabled |
| 453 | * @names: Buffer to copy names to, or %NULL. The names are copied |
| 454 | * starting at intervals of %ETH_GSTRING_LEN bytes. |
| 455 | * |
| 456 | * Returns the number of visible statistics, i.e. the number of set |
| 457 | * bits in the first @count bits of @mask for which a name is defined. |
| 458 | */ |
| 459 | size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, |
| 460 | const unsigned long *mask, u8 *names) |
| 461 | { |
| 462 | size_t visible = 0; |
| 463 | size_t index; |
| 464 | |
| 465 | for_each_set_bit(index, mask, count) { |
| 466 | if (desc[index].name) { |
| 467 | if (names) { |
| 468 | strlcpy(names, desc[index].name, |
| 469 | ETH_GSTRING_LEN); |
| 470 | names += ETH_GSTRING_LEN; |
| 471 | } |
| 472 | ++visible; |
| 473 | } |
| 474 | } |
| 475 | |
| 476 | return visible; |
| 477 | } |
| 478 | |
| 479 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 480 | * efx_nic_copy_stats - Copy stats from the DMA buffer in to an |
| 481 | * intermediate buffer. This is used to get a consistent |
| 482 | * set of stats while the DMA buffer can be written at any time |
| 483 | * by the NIC. |
| 484 | * @efx: The associated NIC. |
| 485 | * @dest: Destination buffer. Must be the same size as the DMA buffer. |
| 486 | */ |
| 487 | int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest) |
| 488 | { |
| 489 | __le64 *dma_stats = efx->stats_buffer.addr; |
| 490 | __le64 generation_start, generation_end; |
| 491 | int rc = 0, retry; |
| 492 | |
| 493 | if (!dest) |
| 494 | return 0; |
| 495 | |
| 496 | if (!dma_stats) |
| 497 | goto return_zeroes; |
| 498 | |
| 499 | /* If we're unlucky enough to read statistics during the DMA, wait |
| 500 | * up to 10ms for it to finish (typically takes <500us) |
| 501 | */ |
| 502 | for (retry = 0; retry < 100; ++retry) { |
| 503 | generation_end = dma_stats[efx->num_mac_stats - 1]; |
| 504 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) |
| 505 | goto return_zeroes; |
| 506 | rmb(); |
| 507 | memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64)); |
| 508 | rmb(); |
| 509 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| 510 | if (generation_end == generation_start) |
| 511 | return 0; /* return good data */ |
| 512 | udelay(100); |
| 513 | } |
| 514 | |
| 515 | rc = -EIO; |
| 516 | |
| 517 | return_zeroes: |
| 518 | memset(dest, 0, efx->num_mac_stats * sizeof(u64)); |
| 519 | return rc; |
| 520 | } |
| 521 | |
| 522 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 523 | * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 |
| 524 | * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer |
| 525 | * layout. DMA widths of 0, 16, 32 and 64 are supported; where |
| 526 | * the width is specified as 0 the corresponding element of |
| 527 | * @stats is not updated. |
| 528 | * @count: Length of the @desc array |
| 529 | * @mask: Bitmask of which elements of @desc are enabled |
| 530 | * @stats: Buffer to update with the converted statistics. The length |
| 531 | * of this array must be at least @count. |
| 532 | * @dma_buf: DMA buffer containing hardware statistics |
| 533 | * @accumulate: If set, the converted values will be added rather than |
| 534 | * directly stored to the corresponding elements of @stats |
| 535 | */ |
| 536 | void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, |
| 537 | const unsigned long *mask, |
| 538 | u64 *stats, const void *dma_buf, bool accumulate) |
| 539 | { |
| 540 | size_t index; |
| 541 | |
| 542 | for_each_set_bit(index, mask, count) { |
| 543 | if (desc[index].dma_width) { |
| 544 | const void *addr = dma_buf + desc[index].offset; |
| 545 | u64 val; |
| 546 | |
| 547 | switch (desc[index].dma_width) { |
| 548 | case 16: |
| 549 | val = le16_to_cpup((__le16 *)addr); |
| 550 | break; |
| 551 | case 32: |
| 552 | val = le32_to_cpup((__le32 *)addr); |
| 553 | break; |
| 554 | case 64: |
| 555 | val = le64_to_cpup((__le64 *)addr); |
| 556 | break; |
| 557 | default: |
| 558 | WARN_ON(1); |
| 559 | val = 0; |
| 560 | break; |
| 561 | } |
| 562 | |
| 563 | if (accumulate) |
| 564 | stats[index] += val; |
| 565 | else |
| 566 | stats[index] = val; |
| 567 | } |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) |
| 572 | { |
| 573 | /* if down, or this is the first update after coming up */ |
| 574 | if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) |
| 575 | efx->rx_nodesc_drops_while_down += |
| 576 | *rx_nodesc_drops - efx->rx_nodesc_drops_total; |
| 577 | efx->rx_nodesc_drops_total = *rx_nodesc_drops; |
| 578 | efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); |
| 579 | *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; |
| 580 | } |