David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Microchip ENC28J60 ethernet driver (MAC + PHY) |
| 4 | * |
| 5 | * Copyright (C) 2007 Eurek srl |
| 6 | * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> |
| 7 | * based on enc28j60.c written by David Anders for 2.4 kernel version |
| 8 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $ |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/fcntl.h> |
| 16 | #include <linux/interrupt.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 17 | #include <linux/property.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | #include <linux/string.h> |
| 19 | #include <linux/errno.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | #include <linux/netdevice.h> |
| 21 | #include <linux/etherdevice.h> |
| 22 | #include <linux/ethtool.h> |
| 23 | #include <linux/tcp.h> |
| 24 | #include <linux/skbuff.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/spi/spi.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | |
| 28 | #include "enc28j60_hw.h" |
| 29 | |
| 30 | #define DRV_NAME "enc28j60" |
| 31 | #define DRV_VERSION "1.02" |
| 32 | |
| 33 | #define SPI_OPLEN 1 |
| 34 | |
| 35 | #define ENC28J60_MSG_DEFAULT \ |
| 36 | (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK) |
| 37 | |
| 38 | /* Buffer size required for the largest SPI transfer (i.e., reading a |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 39 | * frame). |
| 40 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | #define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN) |
| 42 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 43 | #define TX_TIMEOUT (4 * HZ) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | |
| 45 | /* Max TX retries in case of collision as suggested by errata datasheet */ |
| 46 | #define MAX_TX_RETRYCOUNT 16 |
| 47 | |
| 48 | enum { |
| 49 | RXFILTER_NORMAL, |
| 50 | RXFILTER_MULTI, |
| 51 | RXFILTER_PROMISC |
| 52 | }; |
| 53 | |
| 54 | /* Driver local data */ |
| 55 | struct enc28j60_net { |
| 56 | struct net_device *netdev; |
| 57 | struct spi_device *spi; |
| 58 | struct mutex lock; |
| 59 | struct sk_buff *tx_skb; |
| 60 | struct work_struct tx_work; |
| 61 | struct work_struct irq_work; |
| 62 | struct work_struct setrx_work; |
| 63 | struct work_struct restart_work; |
| 64 | u8 bank; /* current register bank selected */ |
| 65 | u16 next_pk_ptr; /* next packet pointer within FIFO */ |
| 66 | u16 max_pk_counter; /* statistics: max packet counter */ |
| 67 | u16 tx_retry_count; |
| 68 | bool hw_enable; |
| 69 | bool full_duplex; |
| 70 | int rxfilter; |
| 71 | u32 msg_enable; |
| 72 | u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN]; |
| 73 | }; |
| 74 | |
| 75 | /* use ethtool to change the level for any given device */ |
| 76 | static struct { |
| 77 | u32 msg_enable; |
| 78 | } debug = { -1 }; |
| 79 | |
| 80 | /* |
| 81 | * SPI read buffer |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 82 | * Wait for the SPI transfer and copy received data to destination. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 83 | */ |
| 84 | static int |
| 85 | spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) |
| 86 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 87 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | u8 *rx_buf = priv->spi_transfer_buf + 4; |
| 89 | u8 *tx_buf = priv->spi_transfer_buf; |
| 90 | struct spi_transfer tx = { |
| 91 | .tx_buf = tx_buf, |
| 92 | .len = SPI_OPLEN, |
| 93 | }; |
| 94 | struct spi_transfer rx = { |
| 95 | .rx_buf = rx_buf, |
| 96 | .len = len, |
| 97 | }; |
| 98 | struct spi_message msg; |
| 99 | int ret; |
| 100 | |
| 101 | tx_buf[0] = ENC28J60_READ_BUF_MEM; |
| 102 | |
| 103 | spi_message_init(&msg); |
| 104 | spi_message_add_tail(&tx, &msg); |
| 105 | spi_message_add_tail(&rx, &msg); |
| 106 | |
| 107 | ret = spi_sync(priv->spi, &msg); |
| 108 | if (ret == 0) { |
| 109 | memcpy(data, rx_buf, len); |
| 110 | ret = msg.status; |
| 111 | } |
| 112 | if (ret && netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 113 | dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", |
| 114 | __func__, ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | |
| 116 | return ret; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * SPI write buffer |
| 121 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 122 | static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 124 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | int ret; |
| 126 | |
| 127 | if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0) |
| 128 | ret = -EINVAL; |
| 129 | else { |
| 130 | priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM; |
| 131 | memcpy(&priv->spi_transfer_buf[1], data, len); |
| 132 | ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); |
| 133 | if (ret && netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 134 | dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", |
| 135 | __func__, ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | } |
| 137 | return ret; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * basic SPI read operation |
| 142 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 143 | static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 145 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | u8 tx_buf[2]; |
| 147 | u8 rx_buf[4]; |
| 148 | u8 val = 0; |
| 149 | int ret; |
| 150 | int slen = SPI_OPLEN; |
| 151 | |
| 152 | /* do dummy read if needed */ |
| 153 | if (addr & SPRD_MASK) |
| 154 | slen++; |
| 155 | |
| 156 | tx_buf[0] = op | (addr & ADDR_MASK); |
| 157 | ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); |
| 158 | if (ret) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 159 | dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", |
| 160 | __func__, ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 161 | else |
| 162 | val = rx_buf[slen - 1]; |
| 163 | |
| 164 | return val; |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * basic SPI write operation |
| 169 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 170 | static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 172 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | int ret; |
| 174 | |
| 175 | priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK); |
| 176 | priv->spi_transfer_buf[1] = val; |
| 177 | ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); |
| 178 | if (ret && netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 179 | dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", |
| 180 | __func__, ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 181 | return ret; |
| 182 | } |
| 183 | |
| 184 | static void enc28j60_soft_reset(struct enc28j60_net *priv) |
| 185 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 186 | spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); |
| 187 | /* Errata workaround #1, CLKRDY check is unreliable, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 188 | * delay at least 1 ms instead */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | udelay(2000); |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * select the current register bank if necessary |
| 194 | */ |
| 195 | static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) |
| 196 | { |
| 197 | u8 b = (addr & BANK_MASK) >> 5; |
| 198 | |
| 199 | /* These registers (EIE, EIR, ESTAT, ECON2, ECON1) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 200 | * are present in all banks, no need to switch bank. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | */ |
| 202 | if (addr >= EIE && addr <= ECON1) |
| 203 | return; |
| 204 | |
| 205 | /* Clear or set each bank selection bit as needed */ |
| 206 | if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) { |
| 207 | if (b & ECON1_BSEL0) |
| 208 | spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, |
| 209 | ECON1_BSEL0); |
| 210 | else |
| 211 | spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, |
| 212 | ECON1_BSEL0); |
| 213 | } |
| 214 | if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) { |
| 215 | if (b & ECON1_BSEL1) |
| 216 | spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, |
| 217 | ECON1_BSEL1); |
| 218 | else |
| 219 | spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, |
| 220 | ECON1_BSEL1); |
| 221 | } |
| 222 | priv->bank = b; |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Register access routines through the SPI bus. |
| 227 | * Every register access comes in two flavours: |
| 228 | * - nolock_xxx: caller needs to invoke mutex_lock, usually to access |
| 229 | * atomically more than one register |
| 230 | * - locked_xxx: caller doesn't need to invoke mutex_lock, single access |
| 231 | * |
| 232 | * Some registers can be accessed through the bit field clear and |
| 233 | * bit field set to avoid a read modify write cycle. |
| 234 | */ |
| 235 | |
| 236 | /* |
| 237 | * Register bit field Set |
| 238 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 239 | static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 240 | { |
| 241 | enc28j60_set_bank(priv, addr); |
| 242 | spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask); |
| 243 | } |
| 244 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 245 | static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 246 | { |
| 247 | mutex_lock(&priv->lock); |
| 248 | nolock_reg_bfset(priv, addr, mask); |
| 249 | mutex_unlock(&priv->lock); |
| 250 | } |
| 251 | |
| 252 | /* |
| 253 | * Register bit field Clear |
| 254 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 255 | static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | { |
| 257 | enc28j60_set_bank(priv, addr); |
| 258 | spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask); |
| 259 | } |
| 260 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 261 | static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 262 | { |
| 263 | mutex_lock(&priv->lock); |
| 264 | nolock_reg_bfclr(priv, addr, mask); |
| 265 | mutex_unlock(&priv->lock); |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Register byte read |
| 270 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 271 | static int nolock_regb_read(struct enc28j60_net *priv, u8 address) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 272 | { |
| 273 | enc28j60_set_bank(priv, address); |
| 274 | return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); |
| 275 | } |
| 276 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 277 | static int locked_regb_read(struct enc28j60_net *priv, u8 address) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 278 | { |
| 279 | int ret; |
| 280 | |
| 281 | mutex_lock(&priv->lock); |
| 282 | ret = nolock_regb_read(priv, address); |
| 283 | mutex_unlock(&priv->lock); |
| 284 | |
| 285 | return ret; |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Register word read |
| 290 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 291 | static int nolock_regw_read(struct enc28j60_net *priv, u8 address) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 292 | { |
| 293 | int rl, rh; |
| 294 | |
| 295 | enc28j60_set_bank(priv, address); |
| 296 | rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); |
| 297 | rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1); |
| 298 | |
| 299 | return (rh << 8) | rl; |
| 300 | } |
| 301 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 302 | static int locked_regw_read(struct enc28j60_net *priv, u8 address) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 303 | { |
| 304 | int ret; |
| 305 | |
| 306 | mutex_lock(&priv->lock); |
| 307 | ret = nolock_regw_read(priv, address); |
| 308 | mutex_unlock(&priv->lock); |
| 309 | |
| 310 | return ret; |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * Register byte write |
| 315 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 316 | static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 317 | { |
| 318 | enc28j60_set_bank(priv, address); |
| 319 | spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data); |
| 320 | } |
| 321 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 322 | static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 323 | { |
| 324 | mutex_lock(&priv->lock); |
| 325 | nolock_regb_write(priv, address, data); |
| 326 | mutex_unlock(&priv->lock); |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * Register word write |
| 331 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 332 | static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 333 | { |
| 334 | enc28j60_set_bank(priv, address); |
| 335 | spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data); |
| 336 | spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1, |
| 337 | (u8) (data >> 8)); |
| 338 | } |
| 339 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 340 | static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 341 | { |
| 342 | mutex_lock(&priv->lock); |
| 343 | nolock_regw_write(priv, address, data); |
| 344 | mutex_unlock(&priv->lock); |
| 345 | } |
| 346 | |
| 347 | /* |
| 348 | * Buffer memory read |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 349 | * Select the starting address and execute a SPI buffer read. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 350 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 351 | static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len, |
| 352 | u8 *data) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 353 | { |
| 354 | mutex_lock(&priv->lock); |
| 355 | nolock_regw_write(priv, ERDPTL, addr); |
| 356 | #ifdef CONFIG_ENC28J60_WRITEVERIFY |
| 357 | if (netif_msg_drv(priv)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 358 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 359 | u16 reg; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 360 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | reg = nolock_regw_read(priv, ERDPTL); |
| 362 | if (reg != addr) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 363 | dev_printk(KERN_DEBUG, dev, |
| 364 | "%s() error writing ERDPT (0x%04x - 0x%04x)\n", |
| 365 | __func__, reg, addr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | } |
| 367 | #endif |
| 368 | spi_read_buf(priv, len, data); |
| 369 | mutex_unlock(&priv->lock); |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * Write packet to enc28j60 TX buffer memory |
| 374 | */ |
| 375 | static void |
| 376 | enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) |
| 377 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 378 | struct device *dev = &priv->spi->dev; |
| 379 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 380 | mutex_lock(&priv->lock); |
| 381 | /* Set the write pointer to start of transmit buffer area */ |
| 382 | nolock_regw_write(priv, EWRPTL, TXSTART_INIT); |
| 383 | #ifdef CONFIG_ENC28J60_WRITEVERIFY |
| 384 | if (netif_msg_drv(priv)) { |
| 385 | u16 reg; |
| 386 | reg = nolock_regw_read(priv, EWRPTL); |
| 387 | if (reg != TXSTART_INIT) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 388 | dev_printk(KERN_DEBUG, dev, |
| 389 | "%s() ERWPT:0x%04x != 0x%04x\n", |
| 390 | __func__, reg, TXSTART_INIT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 391 | } |
| 392 | #endif |
| 393 | /* Set the TXND pointer to correspond to the packet size given */ |
| 394 | nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len); |
| 395 | /* write per-packet control byte */ |
| 396 | spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00); |
| 397 | if (netif_msg_hw(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 398 | dev_printk(KERN_DEBUG, dev, |
| 399 | "%s() after control byte ERWPT:0x%04x\n", |
| 400 | __func__, nolock_regw_read(priv, EWRPTL)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 401 | /* copy the packet into the transmit buffer */ |
| 402 | spi_write_buf(priv, len, data); |
| 403 | if (netif_msg_hw(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 404 | dev_printk(KERN_DEBUG, dev, |
| 405 | "%s() after write packet ERWPT:0x%04x, len=%d\n", |
| 406 | __func__, nolock_regw_read(priv, EWRPTL), len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 407 | mutex_unlock(&priv->lock); |
| 408 | } |
| 409 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 410 | static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) |
| 411 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 412 | struct device *dev = &priv->spi->dev; |
| 413 | unsigned long timeout = jiffies + msecs_to_jiffies(20); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 414 | |
| 415 | /* 20 msec timeout read */ |
| 416 | while ((nolock_regb_read(priv, reg) & mask) != val) { |
| 417 | if (time_after(jiffies, timeout)) { |
| 418 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 419 | dev_dbg(dev, "reg %02x ready timeout!\n", reg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 420 | return -ETIMEDOUT; |
| 421 | } |
| 422 | cpu_relax(); |
| 423 | } |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | /* |
| 428 | * Wait until the PHY operation is complete. |
| 429 | */ |
| 430 | static int wait_phy_ready(struct enc28j60_net *priv) |
| 431 | { |
| 432 | return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1; |
| 433 | } |
| 434 | |
| 435 | /* |
| 436 | * PHY register read |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 437 | * PHY registers are not accessed directly, but through the MII. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 438 | */ |
| 439 | static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address) |
| 440 | { |
| 441 | u16 ret; |
| 442 | |
| 443 | mutex_lock(&priv->lock); |
| 444 | /* set the PHY register address */ |
| 445 | nolock_regb_write(priv, MIREGADR, address); |
| 446 | /* start the register read operation */ |
| 447 | nolock_regb_write(priv, MICMD, MICMD_MIIRD); |
| 448 | /* wait until the PHY read completes */ |
| 449 | wait_phy_ready(priv); |
| 450 | /* quit reading */ |
| 451 | nolock_regb_write(priv, MICMD, 0x00); |
| 452 | /* return the data */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 453 | ret = nolock_regw_read(priv, MIRDL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 454 | mutex_unlock(&priv->lock); |
| 455 | |
| 456 | return ret; |
| 457 | } |
| 458 | |
| 459 | static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data) |
| 460 | { |
| 461 | int ret; |
| 462 | |
| 463 | mutex_lock(&priv->lock); |
| 464 | /* set the PHY register address */ |
| 465 | nolock_regb_write(priv, MIREGADR, address); |
| 466 | /* write the PHY data */ |
| 467 | nolock_regw_write(priv, MIWRL, data); |
| 468 | /* wait until the PHY write completes and return */ |
| 469 | ret = wait_phy_ready(priv); |
| 470 | mutex_unlock(&priv->lock); |
| 471 | |
| 472 | return ret; |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * Program the hardware MAC address from dev->dev_addr. |
| 477 | */ |
| 478 | static int enc28j60_set_hw_macaddr(struct net_device *ndev) |
| 479 | { |
| 480 | int ret; |
| 481 | struct enc28j60_net *priv = netdev_priv(ndev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 482 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 483 | |
| 484 | mutex_lock(&priv->lock); |
| 485 | if (!priv->hw_enable) { |
| 486 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 487 | dev_info(dev, "%s: Setting MAC address to %pM\n", |
| 488 | ndev->name, ndev->dev_addr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 489 | /* NOTE: MAC address in ENC28J60 is byte-backward */ |
| 490 | nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]); |
| 491 | nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]); |
| 492 | nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]); |
| 493 | nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]); |
| 494 | nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]); |
| 495 | nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]); |
| 496 | ret = 0; |
| 497 | } else { |
| 498 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 499 | dev_printk(KERN_DEBUG, dev, |
| 500 | "%s() Hardware must be disabled to set Mac address\n", |
| 501 | __func__); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 502 | ret = -EBUSY; |
| 503 | } |
| 504 | mutex_unlock(&priv->lock); |
| 505 | return ret; |
| 506 | } |
| 507 | |
| 508 | /* |
| 509 | * Store the new hardware address in dev->dev_addr, and update the MAC. |
| 510 | */ |
| 511 | static int enc28j60_set_mac_address(struct net_device *dev, void *addr) |
| 512 | { |
| 513 | struct sockaddr *address = addr; |
| 514 | |
| 515 | if (netif_running(dev)) |
| 516 | return -EBUSY; |
| 517 | if (!is_valid_ether_addr(address->sa_data)) |
| 518 | return -EADDRNOTAVAIL; |
| 519 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 520 | ether_addr_copy(dev->dev_addr, address->sa_data); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | return enc28j60_set_hw_macaddr(dev); |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * Debug routine to dump useful register contents |
| 526 | */ |
| 527 | static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg) |
| 528 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 529 | struct device *dev = &priv->spi->dev; |
| 530 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 531 | mutex_lock(&priv->lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 532 | dev_printk(KERN_DEBUG, dev, |
| 533 | " %s\n" |
| 534 | "HwRevID: 0x%02x\n" |
| 535 | "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n" |
| 536 | " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n" |
| 537 | "MAC : MACON1 MACON3 MACON4\n" |
| 538 | " 0x%02x 0x%02x 0x%02x\n" |
| 539 | "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n" |
| 540 | " 0x%04x 0x%04x 0x%04x 0x%04x " |
| 541 | "0x%02x 0x%02x 0x%04x\n" |
| 542 | "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n" |
| 543 | " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n", |
| 544 | msg, nolock_regb_read(priv, EREVID), |
| 545 | nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2), |
| 546 | nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR), |
| 547 | nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1), |
| 548 | nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4), |
| 549 | nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL), |
| 550 | nolock_regw_read(priv, ERXWRPTL), |
| 551 | nolock_regw_read(priv, ERXRDPTL), |
| 552 | nolock_regb_read(priv, ERXFCON), |
| 553 | nolock_regb_read(priv, EPKTCNT), |
| 554 | nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL), |
| 555 | nolock_regw_read(priv, ETXNDL), |
| 556 | nolock_regb_read(priv, MACLCON1), |
| 557 | nolock_regb_read(priv, MACLCON2), |
| 558 | nolock_regb_read(priv, MAPHSUP)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 559 | mutex_unlock(&priv->lock); |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * ERXRDPT need to be set always at odd addresses, refer to errata datasheet |
| 564 | */ |
| 565 | static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end) |
| 566 | { |
| 567 | u16 erxrdpt; |
| 568 | |
| 569 | if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end)) |
| 570 | erxrdpt = end; |
| 571 | else |
| 572 | erxrdpt = next_packet_ptr - 1; |
| 573 | |
| 574 | return erxrdpt; |
| 575 | } |
| 576 | |
| 577 | /* |
| 578 | * Calculate wrap around when reading beyond the end of the RX buffer |
| 579 | */ |
| 580 | static u16 rx_packet_start(u16 ptr) |
| 581 | { |
| 582 | if (ptr + RSV_SIZE > RXEND_INIT) |
| 583 | return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1); |
| 584 | else |
| 585 | return ptr + RSV_SIZE; |
| 586 | } |
| 587 | |
| 588 | static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) |
| 589 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 590 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 591 | u16 erxrdpt; |
| 592 | |
| 593 | if (start > 0x1FFF || end > 0x1FFF || start > end) { |
| 594 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 595 | dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n", |
| 596 | __func__, start, end); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 597 | return; |
| 598 | } |
| 599 | /* set receive buffer start + end */ |
| 600 | priv->next_pk_ptr = start; |
| 601 | nolock_regw_write(priv, ERXSTL, start); |
| 602 | erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end); |
| 603 | nolock_regw_write(priv, ERXRDPTL, erxrdpt); |
| 604 | nolock_regw_write(priv, ERXNDL, end); |
| 605 | } |
| 606 | |
| 607 | static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) |
| 608 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 609 | struct device *dev = &priv->spi->dev; |
| 610 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 611 | if (start > 0x1FFF || end > 0x1FFF || start > end) { |
| 612 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 613 | dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n", |
| 614 | __func__, start, end); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 615 | return; |
| 616 | } |
| 617 | /* set transmit buffer start + end */ |
| 618 | nolock_regw_write(priv, ETXSTL, start); |
| 619 | nolock_regw_write(priv, ETXNDL, end); |
| 620 | } |
| 621 | |
| 622 | /* |
| 623 | * Low power mode shrinks power consumption about 100x, so we'd like |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 624 | * the chip to be in that mode whenever it's inactive. (However, we |
| 625 | * can't stay in low power mode during suspend with WOL active.) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 626 | */ |
| 627 | static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) |
| 628 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 629 | struct device *dev = &priv->spi->dev; |
| 630 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 632 | dev_dbg(dev, "%s power...\n", is_low ? "low" : "high"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 633 | |
| 634 | mutex_lock(&priv->lock); |
| 635 | if (is_low) { |
| 636 | nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); |
| 637 | poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0); |
| 638 | poll_ready(priv, ECON1, ECON1_TXRTS, 0); |
| 639 | /* ECON2_VRPS was set during initialization */ |
| 640 | nolock_reg_bfset(priv, ECON2, ECON2_PWRSV); |
| 641 | } else { |
| 642 | nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV); |
| 643 | poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY); |
| 644 | /* caller sets ECON1_RXEN */ |
| 645 | } |
| 646 | mutex_unlock(&priv->lock); |
| 647 | } |
| 648 | |
| 649 | static int enc28j60_hw_init(struct enc28j60_net *priv) |
| 650 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 651 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 652 | u8 reg; |
| 653 | |
| 654 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 655 | dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__, |
| 656 | priv->full_duplex ? "FullDuplex" : "HalfDuplex"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 657 | |
| 658 | mutex_lock(&priv->lock); |
| 659 | /* first reset the chip */ |
| 660 | enc28j60_soft_reset(priv); |
| 661 | /* Clear ECON1 */ |
| 662 | spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00); |
| 663 | priv->bank = 0; |
| 664 | priv->hw_enable = false; |
| 665 | priv->tx_retry_count = 0; |
| 666 | priv->max_pk_counter = 0; |
| 667 | priv->rxfilter = RXFILTER_NORMAL; |
| 668 | /* enable address auto increment and voltage regulator powersave */ |
| 669 | nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS); |
| 670 | |
| 671 | nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); |
| 672 | nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); |
| 673 | mutex_unlock(&priv->lock); |
| 674 | |
| 675 | /* |
| 676 | * Check the RevID. |
| 677 | * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 678 | * damaged. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 679 | */ |
| 680 | reg = locked_regb_read(priv, EREVID); |
| 681 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 682 | dev_info(dev, "chip RevID: 0x%02x\n", reg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 683 | if (reg == 0x00 || reg == 0xff) { |
| 684 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 685 | dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n", |
| 686 | __func__, reg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 687 | return 0; |
| 688 | } |
| 689 | |
| 690 | /* default filter mode: (unicast OR broadcast) AND crc valid */ |
| 691 | locked_regb_write(priv, ERXFCON, |
| 692 | ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN); |
| 693 | |
| 694 | /* enable MAC receive */ |
| 695 | locked_regb_write(priv, MACON1, |
| 696 | MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS); |
| 697 | /* enable automatic padding and CRC operations */ |
| 698 | if (priv->full_duplex) { |
| 699 | locked_regb_write(priv, MACON3, |
| 700 | MACON3_PADCFG0 | MACON3_TXCRCEN | |
| 701 | MACON3_FRMLNEN | MACON3_FULDPX); |
| 702 | /* set inter-frame gap (non-back-to-back) */ |
| 703 | locked_regb_write(priv, MAIPGL, 0x12); |
| 704 | /* set inter-frame gap (back-to-back) */ |
| 705 | locked_regb_write(priv, MABBIPG, 0x15); |
| 706 | } else { |
| 707 | locked_regb_write(priv, MACON3, |
| 708 | MACON3_PADCFG0 | MACON3_TXCRCEN | |
| 709 | MACON3_FRMLNEN); |
| 710 | locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */ |
| 711 | /* set inter-frame gap (non-back-to-back) */ |
| 712 | locked_regw_write(priv, MAIPGL, 0x0C12); |
| 713 | /* set inter-frame gap (back-to-back) */ |
| 714 | locked_regb_write(priv, MABBIPG, 0x12); |
| 715 | } |
| 716 | /* |
| 717 | * MACLCON1 (default) |
| 718 | * MACLCON2 (default) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 719 | * Set the maximum packet size which the controller will accept. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 720 | */ |
| 721 | locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN); |
| 722 | |
| 723 | /* Configure LEDs */ |
| 724 | if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE)) |
| 725 | return 0; |
| 726 | |
| 727 | if (priv->full_duplex) { |
| 728 | if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD)) |
| 729 | return 0; |
| 730 | if (!enc28j60_phy_write(priv, PHCON2, 0x00)) |
| 731 | return 0; |
| 732 | } else { |
| 733 | if (!enc28j60_phy_write(priv, PHCON1, 0x00)) |
| 734 | return 0; |
| 735 | if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS)) |
| 736 | return 0; |
| 737 | } |
| 738 | if (netif_msg_hw(priv)) |
| 739 | enc28j60_dump_regs(priv, "Hw initialized."); |
| 740 | |
| 741 | return 1; |
| 742 | } |
| 743 | |
| 744 | static void enc28j60_hw_enable(struct enc28j60_net *priv) |
| 745 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 746 | struct device *dev = &priv->spi->dev; |
| 747 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 748 | /* enable interrupts */ |
| 749 | if (netif_msg_hw(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 750 | dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n", |
| 751 | __func__); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 752 | |
| 753 | enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); |
| 754 | |
| 755 | mutex_lock(&priv->lock); |
| 756 | nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF | |
| 757 | EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF); |
| 758 | nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE | |
| 759 | EIE_TXIE | EIE_TXERIE | EIE_RXERIE); |
| 760 | |
| 761 | /* enable receive logic */ |
| 762 | nolock_reg_bfset(priv, ECON1, ECON1_RXEN); |
| 763 | priv->hw_enable = true; |
| 764 | mutex_unlock(&priv->lock); |
| 765 | } |
| 766 | |
| 767 | static void enc28j60_hw_disable(struct enc28j60_net *priv) |
| 768 | { |
| 769 | mutex_lock(&priv->lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 770 | /* disable interrupts and packet reception */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 771 | nolock_regb_write(priv, EIE, 0x00); |
| 772 | nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); |
| 773 | priv->hw_enable = false; |
| 774 | mutex_unlock(&priv->lock); |
| 775 | } |
| 776 | |
| 777 | static int |
| 778 | enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) |
| 779 | { |
| 780 | struct enc28j60_net *priv = netdev_priv(ndev); |
| 781 | int ret = 0; |
| 782 | |
| 783 | if (!priv->hw_enable) { |
| 784 | /* link is in low power mode now; duplex setting |
| 785 | * will take effect on next enc28j60_hw_init(). |
| 786 | */ |
| 787 | if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) |
| 788 | priv->full_duplex = (duplex == DUPLEX_FULL); |
| 789 | else { |
| 790 | if (netif_msg_link(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 791 | netdev_warn(ndev, "unsupported link setting\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 792 | ret = -EOPNOTSUPP; |
| 793 | } |
| 794 | } else { |
| 795 | if (netif_msg_link(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 796 | netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 797 | ret = -EBUSY; |
| 798 | } |
| 799 | return ret; |
| 800 | } |
| 801 | |
| 802 | /* |
| 803 | * Read the Transmit Status Vector |
| 804 | */ |
| 805 | static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) |
| 806 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 807 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 808 | int endptr; |
| 809 | |
| 810 | endptr = locked_regw_read(priv, ETXNDL); |
| 811 | if (netif_msg_hw(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 812 | dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n", |
| 813 | endptr + 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 814 | enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); |
| 815 | } |
| 816 | |
| 817 | static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 818 | u8 tsv[TSV_SIZE]) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 819 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 820 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 821 | u16 tmp1, tmp2; |
| 822 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 823 | dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 824 | tmp1 = tsv[1]; |
| 825 | tmp1 <<= 8; |
| 826 | tmp1 |= tsv[0]; |
| 827 | |
| 828 | tmp2 = tsv[5]; |
| 829 | tmp2 <<= 8; |
| 830 | tmp2 |= tsv[4]; |
| 831 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 832 | dev_printk(KERN_DEBUG, dev, |
| 833 | "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n", |
| 834 | tmp1, tsv[2] & 0x0f, tmp2); |
| 835 | dev_printk(KERN_DEBUG, dev, |
| 836 | "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", |
| 837 | TSV_GETBIT(tsv, TSV_TXDONE), |
| 838 | TSV_GETBIT(tsv, TSV_TXCRCERROR), |
| 839 | TSV_GETBIT(tsv, TSV_TXLENCHKERROR), |
| 840 | TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE)); |
| 841 | dev_printk(KERN_DEBUG, dev, |
| 842 | "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n", |
| 843 | TSV_GETBIT(tsv, TSV_TXMULTICAST), |
| 844 | TSV_GETBIT(tsv, TSV_TXBROADCAST), |
| 845 | TSV_GETBIT(tsv, TSV_TXPACKETDEFER), |
| 846 | TSV_GETBIT(tsv, TSV_TXEXDEFER)); |
| 847 | dev_printk(KERN_DEBUG, dev, |
| 848 | "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n", |
| 849 | TSV_GETBIT(tsv, TSV_TXEXCOLLISION), |
| 850 | TSV_GETBIT(tsv, TSV_TXLATECOLLISION), |
| 851 | TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN)); |
| 852 | dev_printk(KERN_DEBUG, dev, |
| 853 | "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n", |
| 854 | TSV_GETBIT(tsv, TSV_TXCONTROLFRAME), |
| 855 | TSV_GETBIT(tsv, TSV_TXPAUSEFRAME), |
| 856 | TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP), |
| 857 | TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 858 | } |
| 859 | |
| 860 | /* |
| 861 | * Receive Status vector |
| 862 | */ |
| 863 | static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg, |
| 864 | u16 pk_ptr, int len, u16 sts) |
| 865 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 866 | struct device *dev = &priv->spi->dev; |
| 867 | |
| 868 | dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr); |
| 869 | dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n", |
| 870 | len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE)); |
| 871 | dev_printk(KERN_DEBUG, dev, |
| 872 | "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", |
| 873 | RSV_GETBIT(sts, RSV_RXOK), |
| 874 | RSV_GETBIT(sts, RSV_CRCERROR), |
| 875 | RSV_GETBIT(sts, RSV_LENCHECKERR), |
| 876 | RSV_GETBIT(sts, RSV_LENOUTOFRANGE)); |
| 877 | dev_printk(KERN_DEBUG, dev, |
| 878 | "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n", |
| 879 | RSV_GETBIT(sts, RSV_RXMULTICAST), |
| 880 | RSV_GETBIT(sts, RSV_RXBROADCAST), |
| 881 | RSV_GETBIT(sts, RSV_RXLONGEVDROPEV), |
| 882 | RSV_GETBIT(sts, RSV_CARRIEREV)); |
| 883 | dev_printk(KERN_DEBUG, dev, |
| 884 | "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n", |
| 885 | RSV_GETBIT(sts, RSV_RXCONTROLFRAME), |
| 886 | RSV_GETBIT(sts, RSV_RXPAUSEFRAME), |
| 887 | RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE), |
| 888 | RSV_GETBIT(sts, RSV_RXTYPEVLAN)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 889 | } |
| 890 | |
| 891 | static void dump_packet(const char *msg, int len, const char *data) |
| 892 | { |
| 893 | printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len); |
| 894 | print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1, |
| 895 | data, len, true); |
| 896 | } |
| 897 | |
| 898 | /* |
| 899 | * Hardware receive function. |
| 900 | * Read the buffer memory, update the FIFO pointer to free the buffer, |
| 901 | * check the status vector and decrement the packet counter. |
| 902 | */ |
| 903 | static void enc28j60_hw_rx(struct net_device *ndev) |
| 904 | { |
| 905 | struct enc28j60_net *priv = netdev_priv(ndev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 906 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 907 | struct sk_buff *skb = NULL; |
| 908 | u16 erxrdpt, next_packet, rxstat; |
| 909 | u8 rsv[RSV_SIZE]; |
| 910 | int len; |
| 911 | |
| 912 | if (netif_msg_rx_status(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 913 | netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n", |
| 914 | priv->next_pk_ptr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 915 | |
| 916 | if (unlikely(priv->next_pk_ptr > RXEND_INIT)) { |
| 917 | if (netif_msg_rx_err(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 918 | netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n", |
| 919 | __func__, priv->next_pk_ptr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 920 | /* packet address corrupted: reset RX logic */ |
| 921 | mutex_lock(&priv->lock); |
| 922 | nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); |
| 923 | nolock_reg_bfset(priv, ECON1, ECON1_RXRST); |
| 924 | nolock_reg_bfclr(priv, ECON1, ECON1_RXRST); |
| 925 | nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); |
| 926 | nolock_reg_bfclr(priv, EIR, EIR_RXERIF); |
| 927 | nolock_reg_bfset(priv, ECON1, ECON1_RXEN); |
| 928 | mutex_unlock(&priv->lock); |
| 929 | ndev->stats.rx_errors++; |
| 930 | return; |
| 931 | } |
| 932 | /* Read next packet pointer and rx status vector */ |
| 933 | enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv); |
| 934 | |
| 935 | next_packet = rsv[1]; |
| 936 | next_packet <<= 8; |
| 937 | next_packet |= rsv[0]; |
| 938 | |
| 939 | len = rsv[3]; |
| 940 | len <<= 8; |
| 941 | len |= rsv[2]; |
| 942 | |
| 943 | rxstat = rsv[5]; |
| 944 | rxstat <<= 8; |
| 945 | rxstat |= rsv[4]; |
| 946 | |
| 947 | if (netif_msg_rx_status(priv)) |
| 948 | enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat); |
| 949 | |
| 950 | if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) { |
| 951 | if (netif_msg_rx_err(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 952 | netdev_err(ndev, "Rx Error (%04x)\n", rxstat); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 953 | ndev->stats.rx_errors++; |
| 954 | if (RSV_GETBIT(rxstat, RSV_CRCERROR)) |
| 955 | ndev->stats.rx_crc_errors++; |
| 956 | if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) |
| 957 | ndev->stats.rx_frame_errors++; |
| 958 | if (len > MAX_FRAMELEN) |
| 959 | ndev->stats.rx_over_errors++; |
| 960 | } else { |
| 961 | skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN); |
| 962 | if (!skb) { |
| 963 | if (netif_msg_rx_err(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 964 | netdev_err(ndev, "out of memory for Rx'd frame\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 965 | ndev->stats.rx_dropped++; |
| 966 | } else { |
| 967 | skb_reserve(skb, NET_IP_ALIGN); |
| 968 | /* copy the packet from the receive buffer */ |
| 969 | enc28j60_mem_read(priv, |
| 970 | rx_packet_start(priv->next_pk_ptr), |
| 971 | len, skb_put(skb, len)); |
| 972 | if (netif_msg_pktdata(priv)) |
| 973 | dump_packet(__func__, skb->len, skb->data); |
| 974 | skb->protocol = eth_type_trans(skb, ndev); |
| 975 | /* update statistics */ |
| 976 | ndev->stats.rx_packets++; |
| 977 | ndev->stats.rx_bytes += len; |
| 978 | netif_rx_ni(skb); |
| 979 | } |
| 980 | } |
| 981 | /* |
| 982 | * Move the RX read pointer to the start of the next |
| 983 | * received packet. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 984 | * This frees the memory we just read out. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 985 | */ |
| 986 | erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); |
| 987 | if (netif_msg_hw(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 988 | dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n", |
| 989 | __func__, erxrdpt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 990 | |
| 991 | mutex_lock(&priv->lock); |
| 992 | nolock_regw_write(priv, ERXRDPTL, erxrdpt); |
| 993 | #ifdef CONFIG_ENC28J60_WRITEVERIFY |
| 994 | if (netif_msg_drv(priv)) { |
| 995 | u16 reg; |
| 996 | reg = nolock_regw_read(priv, ERXRDPTL); |
| 997 | if (reg != erxrdpt) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 998 | dev_printk(KERN_DEBUG, dev, |
| 999 | "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n", |
| 1000 | __func__, reg, erxrdpt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1001 | } |
| 1002 | #endif |
| 1003 | priv->next_pk_ptr = next_packet; |
| 1004 | /* we are done with this packet, decrement the packet counter */ |
| 1005 | nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC); |
| 1006 | mutex_unlock(&priv->lock); |
| 1007 | } |
| 1008 | |
| 1009 | /* |
| 1010 | * Calculate free space in RxFIFO |
| 1011 | */ |
| 1012 | static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) |
| 1013 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1014 | struct net_device *ndev = priv->netdev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1015 | int epkcnt, erxst, erxnd, erxwr, erxrd; |
| 1016 | int free_space; |
| 1017 | |
| 1018 | mutex_lock(&priv->lock); |
| 1019 | epkcnt = nolock_regb_read(priv, EPKTCNT); |
| 1020 | if (epkcnt >= 255) |
| 1021 | free_space = -1; |
| 1022 | else { |
| 1023 | erxst = nolock_regw_read(priv, ERXSTL); |
| 1024 | erxnd = nolock_regw_read(priv, ERXNDL); |
| 1025 | erxwr = nolock_regw_read(priv, ERXWRPTL); |
| 1026 | erxrd = nolock_regw_read(priv, ERXRDPTL); |
| 1027 | |
| 1028 | if (erxwr > erxrd) |
| 1029 | free_space = (erxnd - erxst) - (erxwr - erxrd); |
| 1030 | else if (erxwr == erxrd) |
| 1031 | free_space = (erxnd - erxst); |
| 1032 | else |
| 1033 | free_space = erxrd - erxwr - 1; |
| 1034 | } |
| 1035 | mutex_unlock(&priv->lock); |
| 1036 | if (netif_msg_rx_status(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1037 | netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n", |
| 1038 | __func__, free_space); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1039 | return free_space; |
| 1040 | } |
| 1041 | |
| 1042 | /* |
| 1043 | * Access the PHY to determine link status |
| 1044 | */ |
| 1045 | static void enc28j60_check_link_status(struct net_device *ndev) |
| 1046 | { |
| 1047 | struct enc28j60_net *priv = netdev_priv(ndev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1048 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1049 | u16 reg; |
| 1050 | int duplex; |
| 1051 | |
| 1052 | reg = enc28j60_phy_read(priv, PHSTAT2); |
| 1053 | if (netif_msg_hw(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1054 | dev_printk(KERN_DEBUG, dev, |
| 1055 | "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__, |
| 1056 | enc28j60_phy_read(priv, PHSTAT1), reg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1057 | duplex = reg & PHSTAT2_DPXSTAT; |
| 1058 | |
| 1059 | if (reg & PHSTAT2_LSTAT) { |
| 1060 | netif_carrier_on(ndev); |
| 1061 | if (netif_msg_ifup(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1062 | netdev_info(ndev, "link up - %s\n", |
| 1063 | duplex ? "Full duplex" : "Half duplex"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1064 | } else { |
| 1065 | if (netif_msg_ifdown(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1066 | netdev_info(ndev, "link down\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1067 | netif_carrier_off(ndev); |
| 1068 | } |
| 1069 | } |
| 1070 | |
| 1071 | static void enc28j60_tx_clear(struct net_device *ndev, bool err) |
| 1072 | { |
| 1073 | struct enc28j60_net *priv = netdev_priv(ndev); |
| 1074 | |
| 1075 | if (err) |
| 1076 | ndev->stats.tx_errors++; |
| 1077 | else |
| 1078 | ndev->stats.tx_packets++; |
| 1079 | |
| 1080 | if (priv->tx_skb) { |
| 1081 | if (!err) |
| 1082 | ndev->stats.tx_bytes += priv->tx_skb->len; |
| 1083 | dev_kfree_skb(priv->tx_skb); |
| 1084 | priv->tx_skb = NULL; |
| 1085 | } |
| 1086 | locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); |
| 1087 | netif_wake_queue(ndev); |
| 1088 | } |
| 1089 | |
| 1090 | /* |
| 1091 | * RX handler |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1092 | * Ignore PKTIF because is unreliable! (Look at the errata datasheet) |
| 1093 | * Check EPKTCNT is the suggested workaround. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1094 | * We don't need to clear interrupt flag, automatically done when |
| 1095 | * enc28j60_hw_rx() decrements the packet counter. |
| 1096 | * Returns how many packet processed. |
| 1097 | */ |
| 1098 | static int enc28j60_rx_interrupt(struct net_device *ndev) |
| 1099 | { |
| 1100 | struct enc28j60_net *priv = netdev_priv(ndev); |
| 1101 | int pk_counter, ret; |
| 1102 | |
| 1103 | pk_counter = locked_regb_read(priv, EPKTCNT); |
| 1104 | if (pk_counter && netif_msg_intr(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1105 | netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n", |
| 1106 | pk_counter); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1107 | if (pk_counter > priv->max_pk_counter) { |
| 1108 | /* update statistics */ |
| 1109 | priv->max_pk_counter = pk_counter; |
| 1110 | if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1111 | netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n", |
| 1112 | priv->max_pk_counter); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1113 | } |
| 1114 | ret = pk_counter; |
| 1115 | while (pk_counter-- > 0) |
| 1116 | enc28j60_hw_rx(ndev); |
| 1117 | |
| 1118 | return ret; |
| 1119 | } |
| 1120 | |
| 1121 | static void enc28j60_irq_work_handler(struct work_struct *work) |
| 1122 | { |
| 1123 | struct enc28j60_net *priv = |
| 1124 | container_of(work, struct enc28j60_net, irq_work); |
| 1125 | struct net_device *ndev = priv->netdev; |
| 1126 | int intflags, loop; |
| 1127 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1128 | /* disable further interrupts */ |
| 1129 | locked_reg_bfclr(priv, EIE, EIE_INTIE); |
| 1130 | |
| 1131 | do { |
| 1132 | loop = 0; |
| 1133 | intflags = locked_regb_read(priv, EIR); |
| 1134 | /* DMA interrupt handler (not currently used) */ |
| 1135 | if ((intflags & EIR_DMAIF) != 0) { |
| 1136 | loop++; |
| 1137 | if (netif_msg_intr(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1138 | netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n", |
| 1139 | loop); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1140 | locked_reg_bfclr(priv, EIR, EIR_DMAIF); |
| 1141 | } |
| 1142 | /* LINK changed handler */ |
| 1143 | if ((intflags & EIR_LINKIF) != 0) { |
| 1144 | loop++; |
| 1145 | if (netif_msg_intr(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1146 | netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n", |
| 1147 | loop); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1148 | enc28j60_check_link_status(ndev); |
| 1149 | /* read PHIR to clear the flag */ |
| 1150 | enc28j60_phy_read(priv, PHIR); |
| 1151 | } |
| 1152 | /* TX complete handler */ |
| 1153 | if (((intflags & EIR_TXIF) != 0) && |
| 1154 | ((intflags & EIR_TXERIF) == 0)) { |
| 1155 | bool err = false; |
| 1156 | loop++; |
| 1157 | if (netif_msg_intr(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1158 | netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n", |
| 1159 | loop); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1160 | priv->tx_retry_count = 0; |
| 1161 | if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) { |
| 1162 | if (netif_msg_tx_err(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1163 | netdev_err(ndev, "Tx Error (aborted)\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1164 | err = true; |
| 1165 | } |
| 1166 | if (netif_msg_tx_done(priv)) { |
| 1167 | u8 tsv[TSV_SIZE]; |
| 1168 | enc28j60_read_tsv(priv, tsv); |
| 1169 | enc28j60_dump_tsv(priv, "Tx Done", tsv); |
| 1170 | } |
| 1171 | enc28j60_tx_clear(ndev, err); |
| 1172 | locked_reg_bfclr(priv, EIR, EIR_TXIF); |
| 1173 | } |
| 1174 | /* TX Error handler */ |
| 1175 | if ((intflags & EIR_TXERIF) != 0) { |
| 1176 | u8 tsv[TSV_SIZE]; |
| 1177 | |
| 1178 | loop++; |
| 1179 | if (netif_msg_intr(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1180 | netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n", |
| 1181 | loop); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1182 | locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); |
| 1183 | enc28j60_read_tsv(priv, tsv); |
| 1184 | if (netif_msg_tx_err(priv)) |
| 1185 | enc28j60_dump_tsv(priv, "Tx Error", tsv); |
| 1186 | /* Reset TX logic */ |
| 1187 | mutex_lock(&priv->lock); |
| 1188 | nolock_reg_bfset(priv, ECON1, ECON1_TXRST); |
| 1189 | nolock_reg_bfclr(priv, ECON1, ECON1_TXRST); |
| 1190 | nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); |
| 1191 | mutex_unlock(&priv->lock); |
| 1192 | /* Transmit Late collision check for retransmit */ |
| 1193 | if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) { |
| 1194 | if (netif_msg_tx_err(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1195 | netdev_printk(KERN_DEBUG, ndev, |
| 1196 | "LateCollision TXErr (%d)\n", |
| 1197 | priv->tx_retry_count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1198 | if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT) |
| 1199 | locked_reg_bfset(priv, ECON1, |
| 1200 | ECON1_TXRTS); |
| 1201 | else |
| 1202 | enc28j60_tx_clear(ndev, true); |
| 1203 | } else |
| 1204 | enc28j60_tx_clear(ndev, true); |
| 1205 | locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF); |
| 1206 | } |
| 1207 | /* RX Error handler */ |
| 1208 | if ((intflags & EIR_RXERIF) != 0) { |
| 1209 | loop++; |
| 1210 | if (netif_msg_intr(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1211 | netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n", |
| 1212 | loop); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1213 | /* Check free FIFO space to flag RX overrun */ |
| 1214 | if (enc28j60_get_free_rxfifo(priv) <= 0) { |
| 1215 | if (netif_msg_rx_err(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1216 | netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1217 | ndev->stats.rx_dropped++; |
| 1218 | } |
| 1219 | locked_reg_bfclr(priv, EIR, EIR_RXERIF); |
| 1220 | } |
| 1221 | /* RX handler */ |
| 1222 | if (enc28j60_rx_interrupt(ndev)) |
| 1223 | loop++; |
| 1224 | } while (loop); |
| 1225 | |
| 1226 | /* re-enable interrupts */ |
| 1227 | locked_reg_bfset(priv, EIE, EIE_INTIE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1228 | } |
| 1229 | |
| 1230 | /* |
| 1231 | * Hardware transmit function. |
| 1232 | * Fill the buffer memory and send the contents of the transmit buffer |
| 1233 | * onto the network |
| 1234 | */ |
| 1235 | static void enc28j60_hw_tx(struct enc28j60_net *priv) |
| 1236 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1237 | struct net_device *ndev = priv->netdev; |
| 1238 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1239 | BUG_ON(!priv->tx_skb); |
| 1240 | |
| 1241 | if (netif_msg_tx_queued(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1242 | netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n", |
| 1243 | priv->tx_skb->len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1244 | |
| 1245 | if (netif_msg_pktdata(priv)) |
| 1246 | dump_packet(__func__, |
| 1247 | priv->tx_skb->len, priv->tx_skb->data); |
| 1248 | enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); |
| 1249 | |
| 1250 | #ifdef CONFIG_ENC28J60_WRITEVERIFY |
| 1251 | /* readback and verify written data */ |
| 1252 | if (netif_msg_drv(priv)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1253 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1254 | int test_len, k; |
| 1255 | u8 test_buf[64]; /* limit the test to the first 64 bytes */ |
| 1256 | int okflag; |
| 1257 | |
| 1258 | test_len = priv->tx_skb->len; |
| 1259 | if (test_len > sizeof(test_buf)) |
| 1260 | test_len = sizeof(test_buf); |
| 1261 | |
| 1262 | /* + 1 to skip control byte */ |
| 1263 | enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf); |
| 1264 | okflag = 1; |
| 1265 | for (k = 0; k < test_len; k++) { |
| 1266 | if (priv->tx_skb->data[k] != test_buf[k]) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1267 | dev_printk(KERN_DEBUG, dev, |
| 1268 | "Error, %d location differ: 0x%02x-0x%02x\n", |
| 1269 | k, priv->tx_skb->data[k], test_buf[k]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1270 | okflag = 0; |
| 1271 | } |
| 1272 | } |
| 1273 | if (!okflag) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1274 | dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1275 | } |
| 1276 | #endif |
| 1277 | /* set TX request flag */ |
| 1278 | locked_reg_bfset(priv, ECON1, ECON1_TXRTS); |
| 1279 | } |
| 1280 | |
| 1281 | static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb, |
| 1282 | struct net_device *dev) |
| 1283 | { |
| 1284 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1285 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1286 | /* If some error occurs while trying to transmit this |
| 1287 | * packet, you should return '1' from this function. |
| 1288 | * In such a case you _may not_ do anything to the |
| 1289 | * SKB, it is still owned by the network queueing |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1290 | * layer when an error is returned. This means you |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1291 | * may not modify any SKB fields, you may not free |
| 1292 | * the SKB, etc. |
| 1293 | */ |
| 1294 | netif_stop_queue(dev); |
| 1295 | |
| 1296 | /* Remember the skb for deferred processing */ |
| 1297 | priv->tx_skb = skb; |
| 1298 | schedule_work(&priv->tx_work); |
| 1299 | |
| 1300 | return NETDEV_TX_OK; |
| 1301 | } |
| 1302 | |
| 1303 | static void enc28j60_tx_work_handler(struct work_struct *work) |
| 1304 | { |
| 1305 | struct enc28j60_net *priv = |
| 1306 | container_of(work, struct enc28j60_net, tx_work); |
| 1307 | |
| 1308 | /* actual delivery of data */ |
| 1309 | enc28j60_hw_tx(priv); |
| 1310 | } |
| 1311 | |
| 1312 | static irqreturn_t enc28j60_irq(int irq, void *dev_id) |
| 1313 | { |
| 1314 | struct enc28j60_net *priv = dev_id; |
| 1315 | |
| 1316 | /* |
| 1317 | * Can't do anything in interrupt context because we need to |
| 1318 | * block (spi_sync() is blocking) so fire of the interrupt |
| 1319 | * handling workqueue. |
| 1320 | * Remember that we access enc28j60 registers through SPI bus |
| 1321 | * via spi_sync() call. |
| 1322 | */ |
| 1323 | schedule_work(&priv->irq_work); |
| 1324 | |
| 1325 | return IRQ_HANDLED; |
| 1326 | } |
| 1327 | |
| 1328 | static void enc28j60_tx_timeout(struct net_device *ndev) |
| 1329 | { |
| 1330 | struct enc28j60_net *priv = netdev_priv(ndev); |
| 1331 | |
| 1332 | if (netif_msg_timer(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1333 | netdev_err(ndev, "tx timeout\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1334 | |
| 1335 | ndev->stats.tx_errors++; |
| 1336 | /* can't restart safely under softirq */ |
| 1337 | schedule_work(&priv->restart_work); |
| 1338 | } |
| 1339 | |
| 1340 | /* |
| 1341 | * Open/initialize the board. This is called (in the current kernel) |
| 1342 | * sometime after booting when the 'ifconfig' program is run. |
| 1343 | * |
| 1344 | * This routine should set everything up anew at each open, even |
| 1345 | * registers that "should" only need to be set once at boot, so that |
| 1346 | * there is non-reboot way to recover if something goes wrong. |
| 1347 | */ |
| 1348 | static int enc28j60_net_open(struct net_device *dev) |
| 1349 | { |
| 1350 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1351 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1352 | if (!is_valid_ether_addr(dev->dev_addr)) { |
| 1353 | if (netif_msg_ifup(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1354 | netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1355 | return -EADDRNOTAVAIL; |
| 1356 | } |
| 1357 | /* Reset the hardware here (and take it out of low power mode) */ |
| 1358 | enc28j60_lowpower(priv, false); |
| 1359 | enc28j60_hw_disable(priv); |
| 1360 | if (!enc28j60_hw_init(priv)) { |
| 1361 | if (netif_msg_ifup(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1362 | netdev_err(dev, "hw_reset() failed\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1363 | return -EINVAL; |
| 1364 | } |
| 1365 | /* Update the MAC address (in case user has changed it) */ |
| 1366 | enc28j60_set_hw_macaddr(dev); |
| 1367 | /* Enable interrupts */ |
| 1368 | enc28j60_hw_enable(priv); |
| 1369 | /* check link status */ |
| 1370 | enc28j60_check_link_status(dev); |
| 1371 | /* We are now ready to accept transmit requests from |
| 1372 | * the queueing layer of the networking. |
| 1373 | */ |
| 1374 | netif_start_queue(dev); |
| 1375 | |
| 1376 | return 0; |
| 1377 | } |
| 1378 | |
| 1379 | /* The inverse routine to net_open(). */ |
| 1380 | static int enc28j60_net_close(struct net_device *dev) |
| 1381 | { |
| 1382 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1383 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1384 | enc28j60_hw_disable(priv); |
| 1385 | enc28j60_lowpower(priv, true); |
| 1386 | netif_stop_queue(dev); |
| 1387 | |
| 1388 | return 0; |
| 1389 | } |
| 1390 | |
| 1391 | /* |
| 1392 | * Set or clear the multicast filter for this adapter |
| 1393 | * num_addrs == -1 Promiscuous mode, receive all packets |
| 1394 | * num_addrs == 0 Normal mode, filter out multicast packets |
| 1395 | * num_addrs > 0 Multicast mode, receive normal and MC packets |
| 1396 | */ |
| 1397 | static void enc28j60_set_multicast_list(struct net_device *dev) |
| 1398 | { |
| 1399 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1400 | int oldfilter = priv->rxfilter; |
| 1401 | |
| 1402 | if (dev->flags & IFF_PROMISC) { |
| 1403 | if (netif_msg_link(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1404 | netdev_info(dev, "promiscuous mode\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1405 | priv->rxfilter = RXFILTER_PROMISC; |
| 1406 | } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { |
| 1407 | if (netif_msg_link(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1408 | netdev_info(dev, "%smulticast mode\n", |
| 1409 | (dev->flags & IFF_ALLMULTI) ? "all-" : ""); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1410 | priv->rxfilter = RXFILTER_MULTI; |
| 1411 | } else { |
| 1412 | if (netif_msg_link(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1413 | netdev_info(dev, "normal mode\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1414 | priv->rxfilter = RXFILTER_NORMAL; |
| 1415 | } |
| 1416 | |
| 1417 | if (oldfilter != priv->rxfilter) |
| 1418 | schedule_work(&priv->setrx_work); |
| 1419 | } |
| 1420 | |
| 1421 | static void enc28j60_setrx_work_handler(struct work_struct *work) |
| 1422 | { |
| 1423 | struct enc28j60_net *priv = |
| 1424 | container_of(work, struct enc28j60_net, setrx_work); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1425 | struct device *dev = &priv->spi->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1426 | |
| 1427 | if (priv->rxfilter == RXFILTER_PROMISC) { |
| 1428 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1429 | dev_printk(KERN_DEBUG, dev, "promiscuous mode\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1430 | locked_regb_write(priv, ERXFCON, 0x00); |
| 1431 | } else if (priv->rxfilter == RXFILTER_MULTI) { |
| 1432 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1433 | dev_printk(KERN_DEBUG, dev, "multicast mode\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1434 | locked_regb_write(priv, ERXFCON, |
| 1435 | ERXFCON_UCEN | ERXFCON_CRCEN | |
| 1436 | ERXFCON_BCEN | ERXFCON_MCEN); |
| 1437 | } else { |
| 1438 | if (netif_msg_drv(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1439 | dev_printk(KERN_DEBUG, dev, "normal mode\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1440 | locked_regb_write(priv, ERXFCON, |
| 1441 | ERXFCON_UCEN | ERXFCON_CRCEN | |
| 1442 | ERXFCON_BCEN); |
| 1443 | } |
| 1444 | } |
| 1445 | |
| 1446 | static void enc28j60_restart_work_handler(struct work_struct *work) |
| 1447 | { |
| 1448 | struct enc28j60_net *priv = |
| 1449 | container_of(work, struct enc28j60_net, restart_work); |
| 1450 | struct net_device *ndev = priv->netdev; |
| 1451 | int ret; |
| 1452 | |
| 1453 | rtnl_lock(); |
| 1454 | if (netif_running(ndev)) { |
| 1455 | enc28j60_net_close(ndev); |
| 1456 | ret = enc28j60_net_open(ndev); |
| 1457 | if (unlikely(ret)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1458 | netdev_info(ndev, "could not restart %d\n", ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1459 | dev_close(ndev); |
| 1460 | } |
| 1461 | } |
| 1462 | rtnl_unlock(); |
| 1463 | } |
| 1464 | |
| 1465 | /* ......................... ETHTOOL SUPPORT ........................... */ |
| 1466 | |
| 1467 | static void |
| 1468 | enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
| 1469 | { |
| 1470 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
| 1471 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
| 1472 | strlcpy(info->bus_info, |
| 1473 | dev_name(dev->dev.parent), sizeof(info->bus_info)); |
| 1474 | } |
| 1475 | |
| 1476 | static int |
| 1477 | enc28j60_get_link_ksettings(struct net_device *dev, |
| 1478 | struct ethtool_link_ksettings *cmd) |
| 1479 | { |
| 1480 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1481 | |
| 1482 | ethtool_link_ksettings_zero_link_mode(cmd, supported); |
| 1483 | ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); |
| 1484 | ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); |
| 1485 | ethtool_link_ksettings_add_link_mode(cmd, supported, TP); |
| 1486 | |
| 1487 | cmd->base.speed = SPEED_10; |
| 1488 | cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; |
| 1489 | cmd->base.port = PORT_TP; |
| 1490 | cmd->base.autoneg = AUTONEG_DISABLE; |
| 1491 | |
| 1492 | return 0; |
| 1493 | } |
| 1494 | |
| 1495 | static int |
| 1496 | enc28j60_set_link_ksettings(struct net_device *dev, |
| 1497 | const struct ethtool_link_ksettings *cmd) |
| 1498 | { |
| 1499 | return enc28j60_setlink(dev, cmd->base.autoneg, |
| 1500 | cmd->base.speed, cmd->base.duplex); |
| 1501 | } |
| 1502 | |
| 1503 | static u32 enc28j60_get_msglevel(struct net_device *dev) |
| 1504 | { |
| 1505 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1506 | return priv->msg_enable; |
| 1507 | } |
| 1508 | |
| 1509 | static void enc28j60_set_msglevel(struct net_device *dev, u32 val) |
| 1510 | { |
| 1511 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1512 | priv->msg_enable = val; |
| 1513 | } |
| 1514 | |
| 1515 | static const struct ethtool_ops enc28j60_ethtool_ops = { |
| 1516 | .get_drvinfo = enc28j60_get_drvinfo, |
| 1517 | .get_msglevel = enc28j60_get_msglevel, |
| 1518 | .set_msglevel = enc28j60_set_msglevel, |
| 1519 | .get_link_ksettings = enc28j60_get_link_ksettings, |
| 1520 | .set_link_ksettings = enc28j60_set_link_ksettings, |
| 1521 | }; |
| 1522 | |
| 1523 | static int enc28j60_chipset_init(struct net_device *dev) |
| 1524 | { |
| 1525 | struct enc28j60_net *priv = netdev_priv(dev); |
| 1526 | |
| 1527 | return enc28j60_hw_init(priv); |
| 1528 | } |
| 1529 | |
| 1530 | static const struct net_device_ops enc28j60_netdev_ops = { |
| 1531 | .ndo_open = enc28j60_net_open, |
| 1532 | .ndo_stop = enc28j60_net_close, |
| 1533 | .ndo_start_xmit = enc28j60_send_packet, |
| 1534 | .ndo_set_rx_mode = enc28j60_set_multicast_list, |
| 1535 | .ndo_set_mac_address = enc28j60_set_mac_address, |
| 1536 | .ndo_tx_timeout = enc28j60_tx_timeout, |
| 1537 | .ndo_validate_addr = eth_validate_addr, |
| 1538 | }; |
| 1539 | |
| 1540 | static int enc28j60_probe(struct spi_device *spi) |
| 1541 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1542 | unsigned char macaddr[ETH_ALEN]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1543 | struct net_device *dev; |
| 1544 | struct enc28j60_net *priv; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1545 | int ret = 0; |
| 1546 | |
| 1547 | if (netif_msg_drv(&debug)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1548 | dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1549 | |
| 1550 | dev = alloc_etherdev(sizeof(struct enc28j60_net)); |
| 1551 | if (!dev) { |
| 1552 | ret = -ENOMEM; |
| 1553 | goto error_alloc; |
| 1554 | } |
| 1555 | priv = netdev_priv(dev); |
| 1556 | |
| 1557 | priv->netdev = dev; /* priv to netdev reference */ |
| 1558 | priv->spi = spi; /* priv to spi reference */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1559 | priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1560 | mutex_init(&priv->lock); |
| 1561 | INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler); |
| 1562 | INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); |
| 1563 | INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler); |
| 1564 | INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler); |
| 1565 | spi_set_drvdata(spi, priv); /* spi to priv reference */ |
| 1566 | SET_NETDEV_DEV(dev, &spi->dev); |
| 1567 | |
| 1568 | if (!enc28j60_chipset_init(dev)) { |
| 1569 | if (netif_msg_probe(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1570 | dev_info(&spi->dev, "chip not found\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1571 | ret = -EIO; |
| 1572 | goto error_irq; |
| 1573 | } |
| 1574 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1575 | if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1576 | ether_addr_copy(dev->dev_addr, macaddr); |
| 1577 | else |
| 1578 | eth_hw_addr_random(dev); |
| 1579 | enc28j60_set_hw_macaddr(dev); |
| 1580 | |
| 1581 | /* Board setup must set the relevant edge trigger type; |
| 1582 | * level triggers won't currently work. |
| 1583 | */ |
| 1584 | ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv); |
| 1585 | if (ret < 0) { |
| 1586 | if (netif_msg_probe(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1587 | dev_err(&spi->dev, "request irq %d failed (ret = %d)\n", |
| 1588 | spi->irq, ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1589 | goto error_irq; |
| 1590 | } |
| 1591 | |
| 1592 | dev->if_port = IF_PORT_10BASET; |
| 1593 | dev->irq = spi->irq; |
| 1594 | dev->netdev_ops = &enc28j60_netdev_ops; |
| 1595 | dev->watchdog_timeo = TX_TIMEOUT; |
| 1596 | dev->ethtool_ops = &enc28j60_ethtool_ops; |
| 1597 | |
| 1598 | enc28j60_lowpower(priv, true); |
| 1599 | |
| 1600 | ret = register_netdev(dev); |
| 1601 | if (ret) { |
| 1602 | if (netif_msg_probe(priv)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1603 | dev_err(&spi->dev, "register netdev failed (ret = %d)\n", |
| 1604 | ret); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1605 | goto error_register; |
| 1606 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1607 | |
| 1608 | return 0; |
| 1609 | |
| 1610 | error_register: |
| 1611 | free_irq(spi->irq, priv); |
| 1612 | error_irq: |
| 1613 | free_netdev(dev); |
| 1614 | error_alloc: |
| 1615 | return ret; |
| 1616 | } |
| 1617 | |
| 1618 | static int enc28j60_remove(struct spi_device *spi) |
| 1619 | { |
| 1620 | struct enc28j60_net *priv = spi_get_drvdata(spi); |
| 1621 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1622 | unregister_netdev(priv->netdev); |
| 1623 | free_irq(spi->irq, priv); |
| 1624 | free_netdev(priv->netdev); |
| 1625 | |
| 1626 | return 0; |
| 1627 | } |
| 1628 | |
| 1629 | static const struct of_device_id enc28j60_dt_ids[] = { |
| 1630 | { .compatible = "microchip,enc28j60" }, |
| 1631 | { /* sentinel */ } |
| 1632 | }; |
| 1633 | MODULE_DEVICE_TABLE(of, enc28j60_dt_ids); |
| 1634 | |
| 1635 | static struct spi_driver enc28j60_driver = { |
| 1636 | .driver = { |
| 1637 | .name = DRV_NAME, |
| 1638 | .of_match_table = enc28j60_dt_ids, |
| 1639 | }, |
| 1640 | .probe = enc28j60_probe, |
| 1641 | .remove = enc28j60_remove, |
| 1642 | }; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1643 | module_spi_driver(enc28j60_driver); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1644 | |
| 1645 | MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); |
| 1646 | MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>"); |
| 1647 | MODULE_LICENSE("GPL"); |
| 1648 | module_param_named(debug, debug.msg_enable, int, 0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1649 | MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1650 | MODULE_ALIAS("spi:" DRV_NAME); |