blob: 2c22f40e12bdf1b1be927c29b6f11e5587098f98 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * TI HECC (CAN) device driver
3 *
4 * This driver supports TI's HECC (High End CAN Controller module) and the
5 * specs for the same is available at <http://www.ti.com>
6 *
7 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
David Brazdil0f672f62019-12-10 10:32:29 +00008 * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation version 2.
13 *
14 * This program is distributed as is WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/interrupt.h>
25#include <linux/errno.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h>
28#include <linux/platform_device.h>
29#include <linux/clk.h>
30#include <linux/io.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/regulator/consumer.h>
34
35#include <linux/can/dev.h>
36#include <linux/can/error.h>
37#include <linux/can/led.h>
David Brazdil0f672f62019-12-10 10:32:29 +000038#include <linux/can/rx-offload.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039
40#define DRV_NAME "ti_hecc"
41#define HECC_MODULE_VERSION "0.7"
42MODULE_VERSION(HECC_MODULE_VERSION);
43#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION
44
45/* TX / RX Mailbox Configuration */
46#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
47#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
48
David Brazdil0f672f62019-12-10 10:32:29 +000049/* Important Note: TX mailbox configuration
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 * TX mailboxes should be restricted to the number of SKB buffers to avoid
51 * maintaining SKB buffers separately. TX mailboxes should be a power of 2
52 * for the mailbox logic to work. Top mailbox numbers are reserved for RX
53 * and lower mailboxes for TX.
54 *
55 * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT
56 * 4 (default) 2
57 * 8 3
58 * 16 4
59 */
60#define HECC_MB_TX_SHIFT 2 /* as per table above */
61#define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT)
62
63#define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT)
64#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
65#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
66#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067
David Brazdil0f672f62019-12-10 10:32:29 +000068/* RX mailbox configuration
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069 *
David Brazdil0f672f62019-12-10 10:32:29 +000070 * The remaining mailboxes are used for reception and are delivered
71 * based on their timestamp, to avoid a hardware race when CANME is
72 * changed while CAN-bus traffic is being received.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
David Brazdil0f672f62019-12-10 10:32:29 +000076#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077
78/* TI HECC module registers */
79#define HECC_CANME 0x0 /* Mailbox enable */
80#define HECC_CANMD 0x4 /* Mailbox direction */
81#define HECC_CANTRS 0x8 /* Transmit request set */
82#define HECC_CANTRR 0xC /* Transmit request */
83#define HECC_CANTA 0x10 /* Transmission acknowledge */
84#define HECC_CANAA 0x14 /* Abort acknowledge */
85#define HECC_CANRMP 0x18 /* Receive message pending */
David Brazdil0f672f62019-12-10 10:32:29 +000086#define HECC_CANRML 0x1C /* Receive message lost */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087#define HECC_CANRFP 0x20 /* Remote frame pending */
88#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
89#define HECC_CANMC 0x28 /* Master control */
90#define HECC_CANBTC 0x2C /* Bit timing configuration */
91#define HECC_CANES 0x30 /* Error and status */
92#define HECC_CANTEC 0x34 /* Transmit error counter */
93#define HECC_CANREC 0x38 /* Receive error counter */
94#define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */
95#define HECC_CANGIM 0x40 /* Global interrupt mask */
96#define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */
97#define HECC_CANMIM 0x48 /* Mailbox interrupt mask */
98#define HECC_CANMIL 0x4C /* Mailbox interrupt level */
99#define HECC_CANOPC 0x50 /* Overwrite protection control */
100#define HECC_CANTIOC 0x54 /* Transmit I/O control */
101#define HECC_CANRIOC 0x58 /* Receive I/O control */
102#define HECC_CANLNT 0x5C /* HECC only: Local network time */
103#define HECC_CANTOC 0x60 /* HECC only: Time-out control */
104#define HECC_CANTOS 0x64 /* HECC only: Time-out status */
105#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
106#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
107
David Brazdil0f672f62019-12-10 10:32:29 +0000108/* TI HECC RAM registers */
109#define HECC_CANMOTS 0x80 /* Message object time stamp */
110
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111/* Mailbox registers */
112#define HECC_CANMID 0x0
113#define HECC_CANMCF 0x4
114#define HECC_CANMDL 0x8
115#define HECC_CANMDH 0xC
116
117#define HECC_SET_REG 0xFFFFFFFF
118#define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */
119#define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */
120
121#define HECC_CANMC_SCM BIT(13) /* SCC compat mode */
122#define HECC_CANMC_CCR BIT(12) /* Change config request */
123#define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */
124#define HECC_CANMC_ABO BIT(7) /* Auto Bus On */
125#define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */
126#define HECC_CANMC_SRES BIT(5) /* Software reset */
127
128#define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */
129#define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */
130
131#define HECC_CANMID_IDE BIT(31) /* Extended frame format */
132#define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */
133#define HECC_CANMID_AAM BIT(29) /* Auto answer mode */
134
135#define HECC_CANES_FE BIT(24) /* form error */
136#define HECC_CANES_BE BIT(23) /* bit error */
137#define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */
138#define HECC_CANES_CRCE BIT(21) /* CRC error */
139#define HECC_CANES_SE BIT(20) /* stuff bit error */
140#define HECC_CANES_ACKE BIT(19) /* ack error */
141#define HECC_CANES_BO BIT(18) /* Bus off status */
142#define HECC_CANES_EP BIT(17) /* Error passive status */
143#define HECC_CANES_EW BIT(16) /* Error warning status */
144#define HECC_CANES_SMA BIT(5) /* suspend mode ack */
145#define HECC_CANES_CCE BIT(4) /* Change config enabled */
146#define HECC_CANES_PDA BIT(3) /* Power down mode ack */
147
148#define HECC_CANBTC_SAM BIT(7) /* sample points */
149
150#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
151 HECC_CANES_CRCE | HECC_CANES_SE |\
152 HECC_CANES_ACKE)
David Brazdil0f672f62019-12-10 10:32:29 +0000153#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
154 HECC_CANES_EP | HECC_CANES_EW)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155
156#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
157
158#define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */
159#define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */
160#define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */
161#define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */
162#define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */
163#define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */
164#define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */
165#define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */
166#define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */
167#define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */
168#define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */
169#define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */
170#define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */
171#define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */
172#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
173
174/* CAN Bittiming constants as per HECC specs */
175static const struct can_bittiming_const ti_hecc_bittiming_const = {
176 .name = DRV_NAME,
177 .tseg1_min = 1,
178 .tseg1_max = 16,
179 .tseg2_min = 1,
180 .tseg2_max = 8,
181 .sjw_max = 4,
182 .brp_min = 1,
183 .brp_max = 256,
184 .brp_inc = 1,
185};
186
187struct ti_hecc_priv {
188 struct can_priv can; /* MUST be first member/field */
David Brazdil0f672f62019-12-10 10:32:29 +0000189 struct can_rx_offload offload;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190 struct net_device *ndev;
191 struct clk *clk;
192 void __iomem *base;
193 void __iomem *hecc_ram;
194 void __iomem *mbx;
195 bool use_hecc1int;
196 spinlock_t mbx_lock; /* CANME register needs protection */
197 u32 tx_head;
198 u32 tx_tail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 struct regulator *reg_xceiver;
200};
201
202static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
203{
204 return priv->tx_head & HECC_TX_MB_MASK;
205}
206
207static inline int get_tx_tail_mb(struct ti_hecc_priv *priv)
208{
209 return priv->tx_tail & HECC_TX_MB_MASK;
210}
211
212static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
213{
214 return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO;
215}
216
217static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
218{
219 __raw_writel(val, priv->hecc_ram + mbxno * 4);
220}
221
David Brazdil0f672f62019-12-10 10:32:29 +0000222static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno)
223{
224 return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4);
225}
226
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
David Brazdil0f672f62019-12-10 10:32:29 +0000228 u32 reg, u32 val)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229{
230 __raw_writel(val, priv->mbx + mbxno * 0x10 + reg);
231}
232
233static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
234{
235 return __raw_readl(priv->mbx + mbxno * 0x10 + reg);
236}
237
238static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
239{
240 __raw_writel(val, priv->base + reg);
241}
242
243static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
244{
245 return __raw_readl(priv->base + reg);
246}
247
248static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
David Brazdil0f672f62019-12-10 10:32:29 +0000249 u32 bit_mask)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250{
251 hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
252}
253
254static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
David Brazdil0f672f62019-12-10 10:32:29 +0000255 u32 bit_mask)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256{
257 hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
258}
259
260static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
261{
262 return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
263}
264
265static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
266{
267 struct can_bittiming *bit_timing = &priv->can.bittiming;
268 u32 can_btc;
269
270 can_btc = (bit_timing->phase_seg2 - 1) & 0x7;
271 can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1)
272 & 0xF) << 3;
273 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) {
274 if (bit_timing->brp > 4)
275 can_btc |= HECC_CANBTC_SAM;
276 else
David Brazdil0f672f62019-12-10 10:32:29 +0000277 netdev_warn(priv->ndev,
278 "WARN: Triple sampling not set due to h/w limitations");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279 }
280 can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
281 can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
282
283 /* ERM being set to 0 by default meaning resync at falling edge */
284
285 hecc_write(priv, HECC_CANBTC, can_btc);
286 netdev_info(priv->ndev, "setting CANBTC=%#x\n", can_btc);
287
288 return 0;
289}
290
291static int ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
292 int on)
293{
294 if (!priv->reg_xceiver)
295 return 0;
296
297 if (on)
298 return regulator_enable(priv->reg_xceiver);
299 else
300 return regulator_disable(priv->reg_xceiver);
301}
302
303static void ti_hecc_reset(struct net_device *ndev)
304{
305 u32 cnt;
306 struct ti_hecc_priv *priv = netdev_priv(ndev);
307
308 netdev_dbg(ndev, "resetting hecc ...\n");
309 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
310
311 /* Set change control request and wait till enabled */
312 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
313
David Brazdil0f672f62019-12-10 10:32:29 +0000314 /* INFO: It has been observed that at times CCE bit may not be
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315 * set and hw seems to be ok even if this bit is not set so
316 * timing out with a timing of 1ms to respect the specs
317 */
318 cnt = HECC_CCE_WAIT_COUNT;
319 while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
320 --cnt;
321 udelay(10);
322 }
323
David Brazdil0f672f62019-12-10 10:32:29 +0000324 /* Note: On HECC, BTC can be programmed only in initialization mode, so
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325 * it is expected that the can bittiming parameters are set via ip
326 * utility before the device is opened
327 */
328 ti_hecc_set_btc(priv);
329
330 /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
331 hecc_write(priv, HECC_CANMC, 0);
332
David Brazdil0f672f62019-12-10 10:32:29 +0000333 /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
335 */
336
David Brazdil0f672f62019-12-10 10:32:29 +0000337 /* INFO: It has been observed that at times CCE bit may not be
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000338 * set and hw seems to be ok even if this bit is not set so
339 */
340 cnt = HECC_CCE_WAIT_COUNT;
341 while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
342 --cnt;
343 udelay(10);
344 }
345
346 /* Enable TX and RX I/O Control pins */
347 hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN);
348 hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN);
349
350 /* Clear registers for clean operation */
351 hecc_write(priv, HECC_CANTA, HECC_SET_REG);
352 hecc_write(priv, HECC_CANRMP, HECC_SET_REG);
353 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
354 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
355 hecc_write(priv, HECC_CANME, 0);
356 hecc_write(priv, HECC_CANMD, 0);
357
358 /* SCC compat mode NOT supported (and not needed too) */
359 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM);
360}
361
362static void ti_hecc_start(struct net_device *ndev)
363{
364 struct ti_hecc_priv *priv = netdev_priv(ndev);
365 u32 cnt, mbxno, mbx_mask;
366
367 /* put HECC in initialization mode and set btc */
368 ti_hecc_reset(ndev);
369
David Brazdil0f672f62019-12-10 10:32:29 +0000370 priv->tx_head = HECC_TX_MASK;
371 priv->tx_tail = HECC_TX_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372
373 /* Enable local and global acceptance mask registers */
374 hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
375
376 /* Prepare configured mailboxes to receive messages */
377 for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) {
378 mbxno = HECC_MAX_MAILBOXES - 1 - cnt;
379 mbx_mask = BIT(mbxno);
380 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
381 hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME);
382 hecc_write_lam(priv, mbxno, HECC_SET_REG);
383 hecc_set_bit(priv, HECC_CANMD, mbx_mask);
384 hecc_set_bit(priv, HECC_CANME, mbx_mask);
385 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
386 }
387
David Brazdil0f672f62019-12-10 10:32:29 +0000388 /* Enable tx interrupts */
389 hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
390
391 /* Prevent message over-write to create a rx fifo, but not for
392 * the lowest priority mailbox, since that allows detecting
393 * overflows instead of the hardware silently dropping the
394 * messages.
395 */
396 mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
397 hecc_write(priv, HECC_CANOPC, mbx_mask);
398
399 /* Enable interrupts */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 if (priv->use_hecc1int) {
401 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
402 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
403 HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
404 } else {
405 hecc_write(priv, HECC_CANMIL, 0);
406 hecc_write(priv, HECC_CANGIM,
David Brazdil0f672f62019-12-10 10:32:29 +0000407 HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408 }
409 priv->can.state = CAN_STATE_ERROR_ACTIVE;
410}
411
412static void ti_hecc_stop(struct net_device *ndev)
413{
414 struct ti_hecc_priv *priv = netdev_priv(ndev);
415
David Brazdil0f672f62019-12-10 10:32:29 +0000416 /* Disable the CPK; stop sending, erroring and acking */
417 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
418
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419 /* Disable interrupts and disable mailboxes */
420 hecc_write(priv, HECC_CANGIM, 0);
421 hecc_write(priv, HECC_CANMIM, 0);
422 hecc_write(priv, HECC_CANME, 0);
423 priv->can.state = CAN_STATE_STOPPED;
424}
425
426static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
427{
428 int ret = 0;
429
430 switch (mode) {
431 case CAN_MODE_START:
432 ti_hecc_start(ndev);
433 netif_wake_queue(ndev);
434 break;
435 default:
436 ret = -EOPNOTSUPP;
437 break;
438 }
439
440 return ret;
441}
442
443static int ti_hecc_get_berr_counter(const struct net_device *ndev,
David Brazdil0f672f62019-12-10 10:32:29 +0000444 struct can_berr_counter *bec)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000445{
446 struct ti_hecc_priv *priv = netdev_priv(ndev);
447
448 bec->txerr = hecc_read(priv, HECC_CANTEC);
449 bec->rxerr = hecc_read(priv, HECC_CANREC);
450
451 return 0;
452}
453
David Brazdil0f672f62019-12-10 10:32:29 +0000454/* ti_hecc_xmit: HECC Transmit
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455 *
456 * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
Olivier Deprez157378f2022-04-04 15:47:50 +0200457 * priority of the mailbox for transmission is dependent upon priority setting
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458 * field in mailbox registers. The mailbox with highest value in priority field
459 * is transmitted first. Only when two mailboxes have the same value in
460 * priority field the highest numbered mailbox is transmitted first.
461 *
462 * To utilize the HECC priority feature as described above we start with the
463 * highest numbered mailbox with highest priority level and move on to the next
464 * mailbox with the same priority level and so on. Once we loop through all the
465 * transmit mailboxes we choose the next priority level (lower) and so on
466 * until we reach the lowest priority level on the lowest numbered mailbox
467 * when we stop transmission until all mailboxes are transmitted and then
468 * restart at highest numbered mailbox with highest priority.
469 *
470 * Two counters (head and tail) are used to track the next mailbox to transmit
471 * and to track the echo buffer for already transmitted mailbox. The queue
472 * is stopped when all the mailboxes are busy or when there is a priority
473 * value roll-over happens.
474 */
475static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
476{
477 struct ti_hecc_priv *priv = netdev_priv(ndev);
478 struct can_frame *cf = (struct can_frame *)skb->data;
479 u32 mbxno, mbx_mask, data;
480 unsigned long flags;
481
482 if (can_dropped_invalid_skb(ndev, skb))
483 return NETDEV_TX_OK;
484
485 mbxno = get_tx_head_mb(priv);
486 mbx_mask = BIT(mbxno);
487 spin_lock_irqsave(&priv->mbx_lock, flags);
488 if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
489 spin_unlock_irqrestore(&priv->mbx_lock, flags);
490 netif_stop_queue(ndev);
491 netdev_err(priv->ndev,
David Brazdil0f672f62019-12-10 10:32:29 +0000492 "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
493 priv->tx_head, priv->tx_tail);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494 return NETDEV_TX_BUSY;
495 }
496 spin_unlock_irqrestore(&priv->mbx_lock, flags);
497
498 /* Prepare mailbox for transmission */
499 data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
500 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
501 data |= HECC_CANMCF_RTR;
502 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
503
504 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
505 data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE;
506 else /* Standard frame format */
507 data = (cf->can_id & CAN_SFF_MASK) << 18;
508 hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
509 hecc_write_mbx(priv, mbxno, HECC_CANMDL,
David Brazdil0f672f62019-12-10 10:32:29 +0000510 be32_to_cpu(*(__be32 *)(cf->data)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000511 if (cf->can_dlc > 4)
512 hecc_write_mbx(priv, mbxno, HECC_CANMDH,
David Brazdil0f672f62019-12-10 10:32:29 +0000513 be32_to_cpu(*(__be32 *)(cf->data + 4)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514 else
515 *(u32 *)(cf->data + 4) = 0;
516 can_put_echo_skb(skb, ndev, mbxno);
517
518 spin_lock_irqsave(&priv->mbx_lock, flags);
519 --priv->tx_head;
520 if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
David Brazdil0f672f62019-12-10 10:32:29 +0000521 (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 netif_stop_queue(ndev);
523 }
524 hecc_set_bit(priv, HECC_CANME, mbx_mask);
525 spin_unlock_irqrestore(&priv->mbx_lock, flags);
526
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527 hecc_write(priv, HECC_CANTRS, mbx_mask);
528
529 return NETDEV_TX_OK;
530}
531
David Brazdil0f672f62019-12-10 10:32:29 +0000532static inline
533struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534{
David Brazdil0f672f62019-12-10 10:32:29 +0000535 return container_of(offload, struct ti_hecc_priv, offload);
536}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537
Olivier Deprez157378f2022-04-04 15:47:50 +0200538static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
539 unsigned int mbxno, u32 *timestamp,
540 bool drop)
David Brazdil0f672f62019-12-10 10:32:29 +0000541{
542 struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
Olivier Deprez157378f2022-04-04 15:47:50 +0200543 struct sk_buff *skb;
544 struct can_frame *cf;
David Brazdil0f672f62019-12-10 10:32:29 +0000545 u32 data, mbx_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546
547 mbx_mask = BIT(mbxno);
Olivier Deprez157378f2022-04-04 15:47:50 +0200548
549 if (unlikely(drop)) {
550 skb = ERR_PTR(-ENOBUFS);
551 goto mark_as_read;
552 }
553
554 skb = alloc_can_skb(offload->dev, &cf);
555 if (unlikely(!skb)) {
556 skb = ERR_PTR(-ENOMEM);
557 goto mark_as_read;
558 }
559
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
561 if (data & HECC_CANMID_IDE)
562 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
563 else
564 cf->can_id = (data >> 18) & CAN_SFF_MASK;
David Brazdil0f672f62019-12-10 10:32:29 +0000565
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
567 if (data & HECC_CANMCF_RTR)
568 cf->can_id |= CAN_RTR_FLAG;
569 cf->can_dlc = get_can_dlc(data & 0xF);
David Brazdil0f672f62019-12-10 10:32:29 +0000570
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000571 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
572 *(__be32 *)(cf->data) = cpu_to_be32(data);
573 if (cf->can_dlc > 4) {
574 data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
575 *(__be32 *)(cf->data + 4) = cpu_to_be32(data);
576 }
David Brazdil0f672f62019-12-10 10:32:29 +0000577
578 *timestamp = hecc_read_stamp(priv, mbxno);
579
580 /* Check for FIFO overrun.
581 *
582 * All but the last RX mailbox have activated overwrite
583 * protection. So skip check for overrun, if we're not
584 * handling the last RX mailbox.
585 *
586 * As the overwrite protection for the last RX mailbox is
587 * disabled, the CAN core might update while we're reading
588 * it. This means the skb might be inconsistent.
589 *
590 * Return an error to let rx-offload discard this CAN frame.
591 */
592 if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
593 hecc_read(priv, HECC_CANRML) & mbx_mask))
Olivier Deprez157378f2022-04-04 15:47:50 +0200594 skb = ERR_PTR(-ENOBUFS);
David Brazdil0f672f62019-12-10 10:32:29 +0000595
Olivier Deprez157378f2022-04-04 15:47:50 +0200596 mark_as_read:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597 hecc_write(priv, HECC_CANRMP, mbx_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000598
Olivier Deprez157378f2022-04-04 15:47:50 +0200599 return skb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000600}
601
602static int ti_hecc_error(struct net_device *ndev, int int_status,
David Brazdil0f672f62019-12-10 10:32:29 +0000603 int err_status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604{
605 struct ti_hecc_priv *priv = netdev_priv(ndev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 struct can_frame *cf;
607 struct sk_buff *skb;
David Brazdil0f672f62019-12-10 10:32:29 +0000608 u32 timestamp;
609 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610
611 if (err_status & HECC_BUS_ERROR) {
David Brazdil0f672f62019-12-10 10:32:29 +0000612 /* propagate the error condition to the can stack */
613 skb = alloc_can_err_skb(ndev, &cf);
614 if (!skb) {
615 if (net_ratelimit())
616 netdev_err(priv->ndev,
617 "%s: alloc_can_err_skb() failed\n",
618 __func__);
619 return -ENOMEM;
620 }
621
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000622 ++priv->can.can_stats.bus_error;
623 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
David Brazdil0f672f62019-12-10 10:32:29 +0000624 if (err_status & HECC_CANES_FE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625 cf->data[2] |= CAN_ERR_PROT_FORM;
David Brazdil0f672f62019-12-10 10:32:29 +0000626 if (err_status & HECC_CANES_BE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000627 cf->data[2] |= CAN_ERR_PROT_BIT;
David Brazdil0f672f62019-12-10 10:32:29 +0000628 if (err_status & HECC_CANES_SE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000629 cf->data[2] |= CAN_ERR_PROT_STUFF;
David Brazdil0f672f62019-12-10 10:32:29 +0000630 if (err_status & HECC_CANES_CRCE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000631 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
David Brazdil0f672f62019-12-10 10:32:29 +0000632 if (err_status & HECC_CANES_ACKE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000633 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
David Brazdil0f672f62019-12-10 10:32:29 +0000634
635 timestamp = hecc_read(priv, HECC_CANLNT);
636 err = can_rx_offload_queue_sorted(&priv->offload, skb,
637 timestamp);
638 if (err)
639 ndev->stats.rx_fifo_errors++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000640 }
641
David Brazdil0f672f62019-12-10 10:32:29 +0000642 hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643
644 return 0;
645}
646
David Brazdil0f672f62019-12-10 10:32:29 +0000647static void ti_hecc_change_state(struct net_device *ndev,
648 enum can_state rx_state,
649 enum can_state tx_state)
650{
651 struct ti_hecc_priv *priv = netdev_priv(ndev);
652 struct can_frame *cf;
653 struct sk_buff *skb;
654 u32 timestamp;
655 int err;
656
657 skb = alloc_can_err_skb(priv->ndev, &cf);
658 if (unlikely(!skb)) {
659 priv->can.state = max(tx_state, rx_state);
660 return;
661 }
662
663 can_change_state(priv->ndev, cf, tx_state, rx_state);
664
665 if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
666 cf->data[6] = hecc_read(priv, HECC_CANTEC);
667 cf->data[7] = hecc_read(priv, HECC_CANREC);
668 }
669
670 timestamp = hecc_read(priv, HECC_CANLNT);
671 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
672 if (err)
673 ndev->stats.rx_fifo_errors++;
674}
675
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
677{
678 struct net_device *ndev = (struct net_device *)dev_id;
679 struct ti_hecc_priv *priv = netdev_priv(ndev);
680 struct net_device_stats *stats = &ndev->stats;
David Brazdil0f672f62019-12-10 10:32:29 +0000681 u32 mbxno, mbx_mask, int_status, err_status, stamp;
682 unsigned long flags, rx_pending;
683 u32 handled = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684
685 int_status = hecc_read(priv,
David Brazdil0f672f62019-12-10 10:32:29 +0000686 priv->use_hecc1int ?
687 HECC_CANGIF1 : HECC_CANGIF0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000688
689 if (!int_status)
690 return IRQ_NONE;
691
692 err_status = hecc_read(priv, HECC_CANES);
David Brazdil0f672f62019-12-10 10:32:29 +0000693 if (unlikely(err_status & HECC_CANES_FLAGS))
694 ti_hecc_error(ndev, int_status, err_status);
695
696 if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
697 enum can_state rx_state, tx_state;
698 u32 rec = hecc_read(priv, HECC_CANREC);
699 u32 tec = hecc_read(priv, HECC_CANTEC);
700
701 if (int_status & HECC_CANGIF_WLIF) {
702 handled |= HECC_CANGIF_WLIF;
703 rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
704 tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
705 netdev_dbg(priv->ndev, "Error Warning interrupt\n");
706 ti_hecc_change_state(ndev, rx_state, tx_state);
707 }
708
709 if (int_status & HECC_CANGIF_EPIF) {
710 handled |= HECC_CANGIF_EPIF;
711 rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
712 tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
713 netdev_dbg(priv->ndev, "Error passive interrupt\n");
714 ti_hecc_change_state(ndev, rx_state, tx_state);
715 }
716
717 if (int_status & HECC_CANGIF_BOIF) {
718 handled |= HECC_CANGIF_BOIF;
719 rx_state = CAN_STATE_BUS_OFF;
720 tx_state = CAN_STATE_BUS_OFF;
721 netdev_dbg(priv->ndev, "Bus off interrupt\n");
722
723 /* Disable all interrupts */
724 hecc_write(priv, HECC_CANGIM, 0);
725 can_bus_off(ndev);
726 ti_hecc_change_state(ndev, rx_state, tx_state);
727 }
728 } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
729 enum can_state new_state, tx_state, rx_state;
730 u32 rec = hecc_read(priv, HECC_CANREC);
731 u32 tec = hecc_read(priv, HECC_CANTEC);
732
733 if (rec >= 128 || tec >= 128)
734 new_state = CAN_STATE_ERROR_PASSIVE;
735 else if (rec >= 96 || tec >= 96)
736 new_state = CAN_STATE_ERROR_WARNING;
737 else
738 new_state = CAN_STATE_ERROR_ACTIVE;
739
740 if (new_state < priv->can.state) {
741 rx_state = rec >= tec ? new_state : 0;
742 tx_state = rec <= tec ? new_state : 0;
743 ti_hecc_change_state(ndev, rx_state, tx_state);
744 }
745 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000746
747 if (int_status & HECC_CANGIF_GMIF) {
748 while (priv->tx_tail - priv->tx_head > 0) {
749 mbxno = get_tx_tail_mb(priv);
750 mbx_mask = BIT(mbxno);
751 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
752 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 hecc_write(priv, HECC_CANTA, mbx_mask);
754 spin_lock_irqsave(&priv->mbx_lock, flags);
755 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
756 spin_unlock_irqrestore(&priv->mbx_lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000757 stamp = hecc_read_stamp(priv, mbxno);
758 stats->tx_bytes +=
759 can_rx_offload_get_echo_skb(&priv->offload,
760 mbxno, stamp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761 stats->tx_packets++;
762 can_led_event(ndev, CAN_LED_EVENT_TX);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000763 --priv->tx_tail;
764 }
765
766 /* restart queue if wrap-up or if queue stalled on last pkt */
David Brazdil0f672f62019-12-10 10:32:29 +0000767 if ((priv->tx_head == priv->tx_tail &&
768 ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
769 (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
770 ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 netif_wake_queue(ndev);
772
David Brazdil0f672f62019-12-10 10:32:29 +0000773 /* offload RX mailboxes and let NAPI deliver them */
774 while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
775 can_rx_offload_irq_offload_timestamp(&priv->offload,
776 rx_pending);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000777 }
778 }
779
780 /* clear all interrupt conditions - read back to avoid spurious ints */
781 if (priv->use_hecc1int) {
David Brazdil0f672f62019-12-10 10:32:29 +0000782 hecc_write(priv, HECC_CANGIF1, handled);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000783 int_status = hecc_read(priv, HECC_CANGIF1);
784 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000785 hecc_write(priv, HECC_CANGIF0, handled);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000786 int_status = hecc_read(priv, HECC_CANGIF0);
787 }
788
789 return IRQ_HANDLED;
790}
791
792static int ti_hecc_open(struct net_device *ndev)
793{
794 struct ti_hecc_priv *priv = netdev_priv(ndev);
795 int err;
796
797 err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
David Brazdil0f672f62019-12-10 10:32:29 +0000798 ndev->name, ndev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799 if (err) {
800 netdev_err(ndev, "error requesting interrupt\n");
801 return err;
802 }
803
804 ti_hecc_transceiver_switch(priv, 1);
805
806 /* Open common can device */
807 err = open_candev(ndev);
808 if (err) {
809 netdev_err(ndev, "open_candev() failed %d\n", err);
810 ti_hecc_transceiver_switch(priv, 0);
811 free_irq(ndev->irq, ndev);
812 return err;
813 }
814
815 can_led_event(ndev, CAN_LED_EVENT_OPEN);
816
817 ti_hecc_start(ndev);
David Brazdil0f672f62019-12-10 10:32:29 +0000818 can_rx_offload_enable(&priv->offload);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 netif_start_queue(ndev);
820
821 return 0;
822}
823
824static int ti_hecc_close(struct net_device *ndev)
825{
826 struct ti_hecc_priv *priv = netdev_priv(ndev);
827
828 netif_stop_queue(ndev);
David Brazdil0f672f62019-12-10 10:32:29 +0000829 can_rx_offload_disable(&priv->offload);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000830 ti_hecc_stop(ndev);
831 free_irq(ndev->irq, ndev);
832 close_candev(ndev);
833 ti_hecc_transceiver_switch(priv, 0);
834
835 can_led_event(ndev, CAN_LED_EVENT_STOP);
836
837 return 0;
838}
839
840static const struct net_device_ops ti_hecc_netdev_ops = {
841 .ndo_open = ti_hecc_open,
842 .ndo_stop = ti_hecc_close,
843 .ndo_start_xmit = ti_hecc_xmit,
844 .ndo_change_mtu = can_change_mtu,
845};
846
847static const struct of_device_id ti_hecc_dt_ids[] = {
848 {
849 .compatible = "ti,am3517-hecc",
850 },
851 { }
852};
853MODULE_DEVICE_TABLE(of, ti_hecc_dt_ids);
854
855static int ti_hecc_probe(struct platform_device *pdev)
856{
857 struct net_device *ndev = (struct net_device *)0;
858 struct ti_hecc_priv *priv;
859 struct device_node *np = pdev->dev.of_node;
Olivier Deprez157378f2022-04-04 15:47:50 +0200860 struct resource *irq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000861 struct regulator *reg_xceiver;
862 int err = -ENODEV;
863
864 if (!IS_ENABLED(CONFIG_OF) || !np)
865 return -EINVAL;
866
867 reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
868 if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
869 return -EPROBE_DEFER;
870 else if (IS_ERR(reg_xceiver))
871 reg_xceiver = NULL;
872
873 ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
874 if (!ndev) {
875 dev_err(&pdev->dev, "alloc_candev failed\n");
876 return -ENOMEM;
877 }
878 priv = netdev_priv(ndev);
879
880 /* handle hecc memory */
Olivier Deprez157378f2022-04-04 15:47:50 +0200881 priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 if (IS_ERR(priv->base)) {
883 dev_err(&pdev->dev, "hecc ioremap failed\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200884 err = PTR_ERR(priv->base);
885 goto probe_exit_candev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000886 }
887
888 /* handle hecc-ram memory */
Olivier Deprez157378f2022-04-04 15:47:50 +0200889 priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev,
890 "hecc-ram");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000891 if (IS_ERR(priv->hecc_ram)) {
892 dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200893 err = PTR_ERR(priv->hecc_ram);
894 goto probe_exit_candev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895 }
896
897 /* handle mbx memory */
Olivier Deprez157378f2022-04-04 15:47:50 +0200898 priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000899 if (IS_ERR(priv->mbx)) {
900 dev_err(&pdev->dev, "mbx ioremap failed\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200901 err = PTR_ERR(priv->mbx);
902 goto probe_exit_candev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000903 }
904
905 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
906 if (!irq) {
907 dev_err(&pdev->dev, "No irq resource\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200908 goto probe_exit_candev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000909 }
910
911 priv->ndev = ndev;
912 priv->reg_xceiver = reg_xceiver;
913 priv->use_hecc1int = of_property_read_bool(np, "ti,use-hecc1int");
914
915 priv->can.bittiming_const = &ti_hecc_bittiming_const;
916 priv->can.do_set_mode = ti_hecc_do_set_mode;
917 priv->can.do_get_berr_counter = ti_hecc_get_berr_counter;
918 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
919
920 spin_lock_init(&priv->mbx_lock);
921 ndev->irq = irq->start;
922 ndev->flags |= IFF_ECHO;
923 platform_set_drvdata(pdev, ndev);
924 SET_NETDEV_DEV(ndev, &pdev->dev);
925 ndev->netdev_ops = &ti_hecc_netdev_ops;
926
927 priv->clk = clk_get(&pdev->dev, "hecc_ck");
928 if (IS_ERR(priv->clk)) {
929 dev_err(&pdev->dev, "No clock available\n");
930 err = PTR_ERR(priv->clk);
931 priv->clk = NULL;
932 goto probe_exit_candev;
933 }
934 priv->can.clock.freq = clk_get_rate(priv->clk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000935
936 err = clk_prepare_enable(priv->clk);
937 if (err) {
938 dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200939 goto probe_exit_release_clk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000940 }
941
David Brazdil0f672f62019-12-10 10:32:29 +0000942 priv->offload.mailbox_read = ti_hecc_mailbox_read;
943 priv->offload.mb_first = HECC_RX_FIRST_MBOX;
944 priv->offload.mb_last = HECC_RX_LAST_MBOX;
945 err = can_rx_offload_add_timestamp(ndev, &priv->offload);
946 if (err) {
947 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200948 goto probe_exit_disable_clk;
David Brazdil0f672f62019-12-10 10:32:29 +0000949 }
950
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000951 err = register_candev(ndev);
952 if (err) {
953 dev_err(&pdev->dev, "register_candev() failed\n");
David Brazdil0f672f62019-12-10 10:32:29 +0000954 goto probe_exit_offload;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000955 }
956
957 devm_can_led_init(ndev);
958
959 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
David Brazdil0f672f62019-12-10 10:32:29 +0000960 priv->base, (u32)ndev->irq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000961
962 return 0;
963
David Brazdil0f672f62019-12-10 10:32:29 +0000964probe_exit_offload:
965 can_rx_offload_del(&priv->offload);
Olivier Deprez0e641232021-09-23 10:07:05 +0200966probe_exit_disable_clk:
967 clk_disable_unprepare(priv->clk);
968probe_exit_release_clk:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000969 clk_put(priv->clk);
970probe_exit_candev:
971 free_candev(ndev);
Olivier Deprez0e641232021-09-23 10:07:05 +0200972
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000973 return err;
974}
975
976static int ti_hecc_remove(struct platform_device *pdev)
977{
978 struct net_device *ndev = platform_get_drvdata(pdev);
979 struct ti_hecc_priv *priv = netdev_priv(ndev);
980
981 unregister_candev(ndev);
982 clk_disable_unprepare(priv->clk);
983 clk_put(priv->clk);
David Brazdil0f672f62019-12-10 10:32:29 +0000984 can_rx_offload_del(&priv->offload);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000985 free_candev(ndev);
986
987 return 0;
988}
989
990#ifdef CONFIG_PM
991static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
992{
993 struct net_device *dev = platform_get_drvdata(pdev);
994 struct ti_hecc_priv *priv = netdev_priv(dev);
995
996 if (netif_running(dev)) {
997 netif_stop_queue(dev);
998 netif_device_detach(dev);
999 }
1000
1001 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1002 priv->can.state = CAN_STATE_SLEEPING;
1003
1004 clk_disable_unprepare(priv->clk);
1005
1006 return 0;
1007}
1008
1009static int ti_hecc_resume(struct platform_device *pdev)
1010{
1011 struct net_device *dev = platform_get_drvdata(pdev);
1012 struct ti_hecc_priv *priv = netdev_priv(dev);
1013 int err;
1014
1015 err = clk_prepare_enable(priv->clk);
1016 if (err)
1017 return err;
1018
1019 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1020 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1021
1022 if (netif_running(dev)) {
1023 netif_device_attach(dev);
1024 netif_start_queue(dev);
1025 }
1026
1027 return 0;
1028}
1029#else
1030#define ti_hecc_suspend NULL
1031#define ti_hecc_resume NULL
1032#endif
1033
1034/* TI HECC netdevice driver: platform driver structure */
1035static struct platform_driver ti_hecc_driver = {
1036 .driver = {
1037 .name = DRV_NAME,
1038 .of_match_table = ti_hecc_dt_ids,
1039 },
1040 .probe = ti_hecc_probe,
1041 .remove = ti_hecc_remove,
1042 .suspend = ti_hecc_suspend,
1043 .resume = ti_hecc_resume,
1044};
1045
1046module_platform_driver(ti_hecc_driver);
1047
1048MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
1049MODULE_LICENSE("GPL v2");
1050MODULE_DESCRIPTION(DRV_DESC);
1051MODULE_ALIAS("platform:" DRV_NAME);