blob: 474d5a7fa95e3ab183434092f21ff90cd24a7883 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0+
2// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3// Copyright (C) 2008 Juergen Beisert
4
5#include <linux/clk.h>
6#include <linux/completion.h>
7#include <linux/delay.h>
8#include <linux/dmaengine.h>
9#include <linux/dma-mapping.h>
10#include <linux/err.h>
11#include <linux/gpio.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spi/spi.h>
20#include <linux/spi/spi_bitbang.h>
21#include <linux/types.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25
26#include <linux/platform_data/dma-imx.h>
27#include <linux/platform_data/spi-imx.h>
28
29#define DRIVER_NAME "spi_imx"
30
David Brazdil0f672f62019-12-10 10:32:29 +000031static bool use_dma = true;
32module_param(use_dma, bool, 0644);
33MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
34
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035#define MXC_CSPIRXDATA 0x00
36#define MXC_CSPITXDATA 0x04
37#define MXC_CSPICTRL 0x08
38#define MXC_CSPIINT 0x0c
39#define MXC_RESET 0x1c
40
41/* generic defines to abstract from the different register layouts */
42#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
43#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
44#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
45
David Brazdil0f672f62019-12-10 10:32:29 +000046/* The maximum bytes that a sdma BD can transfer. */
47#define MAX_SDMA_BD_BYTES (1 << 15)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048#define MX51_ECSPI_CTRL_MAX_BURST 512
49/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
50#define MX53_MAX_TRANSFER_BYTES 512
51
52enum spi_imx_devtype {
53 IMX1_CSPI,
54 IMX21_CSPI,
55 IMX27_CSPI,
56 IMX31_CSPI,
57 IMX35_CSPI, /* CSPI on all i.mx except above */
58 IMX51_ECSPI, /* ECSPI on i.mx51 */
59 IMX53_ECSPI, /* ECSPI on i.mx53 and later */
60};
61
62struct spi_imx_data;
63
64struct spi_imx_devtype_data {
65 void (*intctrl)(struct spi_imx_data *, int);
David Brazdil0f672f62019-12-10 10:32:29 +000066 int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
Olivier Deprez0e641232021-09-23 10:07:05 +020067 int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068 void (*trigger)(struct spi_imx_data *);
69 int (*rx_available)(struct spi_imx_data *);
70 void (*reset)(struct spi_imx_data *);
David Brazdil0f672f62019-12-10 10:32:29 +000071 void (*setup_wml)(struct spi_imx_data *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 void (*disable)(struct spi_imx_data *);
73 bool has_dmamode;
74 bool has_slavemode;
75 unsigned int fifo_size;
76 bool dynamic_burst;
77 enum spi_imx_devtype devtype;
78};
79
80struct spi_imx_data {
81 struct spi_bitbang bitbang;
82 struct device *dev;
83
84 struct completion xfer_done;
85 void __iomem *base;
86 unsigned long base_phys;
87
88 struct clk *clk_per;
89 struct clk *clk_ipg;
90 unsigned long spi_clk;
91 unsigned int spi_bus_clk;
92
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093 unsigned int bits_per_word;
94 unsigned int spi_drctl;
95
96 unsigned int count, remainder;
97 void (*tx)(struct spi_imx_data *);
98 void (*rx)(struct spi_imx_data *);
99 void *rx_buf;
100 const void *tx_buf;
101 unsigned int txfifo; /* number of words pushed in tx FIFO */
102 unsigned int dynamic_burst;
103
104 /* Slave mode */
105 bool slave_mode;
106 bool slave_aborted;
107 unsigned int slave_burst;
108
109 /* DMA */
110 bool usedma;
111 u32 wml;
112 struct completion dma_rx_completion;
113 struct completion dma_tx_completion;
114
115 const struct spi_imx_devtype_data *devtype_data;
116};
117
118static inline int is_imx27_cspi(struct spi_imx_data *d)
119{
120 return d->devtype_data->devtype == IMX27_CSPI;
121}
122
123static inline int is_imx35_cspi(struct spi_imx_data *d)
124{
125 return d->devtype_data->devtype == IMX35_CSPI;
126}
127
128static inline int is_imx51_ecspi(struct spi_imx_data *d)
129{
130 return d->devtype_data->devtype == IMX51_ECSPI;
131}
132
133static inline int is_imx53_ecspi(struct spi_imx_data *d)
134{
135 return d->devtype_data->devtype == IMX53_ECSPI;
136}
137
138#define MXC_SPI_BUF_RX(type) \
139static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
140{ \
141 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
142 \
143 if (spi_imx->rx_buf) { \
144 *(type *)spi_imx->rx_buf = val; \
145 spi_imx->rx_buf += sizeof(type); \
146 } \
147 \
148 spi_imx->remainder -= sizeof(type); \
149}
150
151#define MXC_SPI_BUF_TX(type) \
152static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
153{ \
154 type val = 0; \
155 \
156 if (spi_imx->tx_buf) { \
157 val = *(type *)spi_imx->tx_buf; \
158 spi_imx->tx_buf += sizeof(type); \
159 } \
160 \
161 spi_imx->count -= sizeof(type); \
162 \
163 writel(val, spi_imx->base + MXC_CSPITXDATA); \
164}
165
166MXC_SPI_BUF_RX(u8)
167MXC_SPI_BUF_TX(u8)
168MXC_SPI_BUF_RX(u16)
169MXC_SPI_BUF_TX(u16)
170MXC_SPI_BUF_RX(u32)
171MXC_SPI_BUF_TX(u32)
172
173/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
174 * (which is currently not the case in this driver)
175 */
176static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
177 256, 384, 512, 768, 1024};
178
179/* MX21, MX27 */
180static unsigned int spi_imx_clkdiv_1(unsigned int fin,
181 unsigned int fspi, unsigned int max, unsigned int *fres)
182{
183 int i;
184
185 for (i = 2; i < max; i++)
186 if (fspi * mxc_clkdivs[i] >= fin)
187 break;
188
189 *fres = fin / mxc_clkdivs[i];
190 return i;
191}
192
193/* MX1, MX31, MX35, MX51 CSPI */
194static unsigned int spi_imx_clkdiv_2(unsigned int fin,
195 unsigned int fspi, unsigned int *fres)
196{
197 int i, div = 4;
198
199 for (i = 0; i < 7; i++) {
200 if (fspi * div >= fin)
201 goto out;
202 div <<= 1;
203 }
204
205out:
206 *fres = fin / div;
207 return i;
208}
209
210static int spi_imx_bytes_per_word(const int bits_per_word)
211{
212 if (bits_per_word <= 8)
213 return 1;
214 else if (bits_per_word <= 16)
215 return 2;
216 else
217 return 4;
218}
219
220static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
221 struct spi_transfer *transfer)
222{
223 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
David Brazdil0f672f62019-12-10 10:32:29 +0000224
225 if (!use_dma)
226 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227
228 if (!master->dma_rx)
229 return false;
230
231 if (spi_imx->slave_mode)
232 return false;
233
David Brazdil0f672f62019-12-10 10:32:29 +0000234 if (transfer->len < spi_imx->devtype_data->fifo_size)
235 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000237 spi_imx->dynamic_burst = 0;
238
239 return true;
240}
241
242#define MX51_ECSPI_CTRL 0x08
243#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
244#define MX51_ECSPI_CTRL_XCH (1 << 2)
245#define MX51_ECSPI_CTRL_SMC (1 << 3)
246#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
247#define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
248#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
249#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
250#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
251#define MX51_ECSPI_CTRL_BL_OFFSET 20
252#define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
253
254#define MX51_ECSPI_CONFIG 0x0c
255#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
256#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
257#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
258#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
259#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
260
261#define MX51_ECSPI_INT 0x10
262#define MX51_ECSPI_INT_TEEN (1 << 0)
263#define MX51_ECSPI_INT_RREN (1 << 3)
264#define MX51_ECSPI_INT_RDREN (1 << 4)
265
David Brazdil0f672f62019-12-10 10:32:29 +0000266#define MX51_ECSPI_DMA 0x14
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
268#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
269#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
270
271#define MX51_ECSPI_DMA_TEDEN (1 << 7)
272#define MX51_ECSPI_DMA_RXDEN (1 << 23)
273#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
274
275#define MX51_ECSPI_STAT 0x18
276#define MX51_ECSPI_STAT_RR (1 << 3)
277
278#define MX51_ECSPI_TESTREG 0x20
279#define MX51_ECSPI_TESTREG_LBC BIT(31)
280
281static void spi_imx_buf_rx_swap_u32(struct spi_imx_data *spi_imx)
282{
283 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);
284#ifdef __LITTLE_ENDIAN
285 unsigned int bytes_per_word;
286#endif
287
288 if (spi_imx->rx_buf) {
289#ifdef __LITTLE_ENDIAN
290 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
291 if (bytes_per_word == 1)
292 val = cpu_to_be32(val);
293 else if (bytes_per_word == 2)
294 val = (val << 16) | (val >> 16);
295#endif
296 *(u32 *)spi_imx->rx_buf = val;
297 spi_imx->rx_buf += sizeof(u32);
298 }
299
300 spi_imx->remainder -= sizeof(u32);
301}
302
303static void spi_imx_buf_rx_swap(struct spi_imx_data *spi_imx)
304{
305 int unaligned;
306 u32 val;
307
308 unaligned = spi_imx->remainder % 4;
309
310 if (!unaligned) {
311 spi_imx_buf_rx_swap_u32(spi_imx);
312 return;
313 }
314
315 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
316 spi_imx_buf_rx_u16(spi_imx);
317 return;
318 }
319
320 val = readl(spi_imx->base + MXC_CSPIRXDATA);
321
322 while (unaligned--) {
323 if (spi_imx->rx_buf) {
324 *(u8 *)spi_imx->rx_buf = (val >> (8 * unaligned)) & 0xff;
325 spi_imx->rx_buf++;
326 }
327 spi_imx->remainder--;
328 }
329}
330
331static void spi_imx_buf_tx_swap_u32(struct spi_imx_data *spi_imx)
332{
333 u32 val = 0;
334#ifdef __LITTLE_ENDIAN
335 unsigned int bytes_per_word;
336#endif
337
338 if (spi_imx->tx_buf) {
339 val = *(u32 *)spi_imx->tx_buf;
340 spi_imx->tx_buf += sizeof(u32);
341 }
342
343 spi_imx->count -= sizeof(u32);
344#ifdef __LITTLE_ENDIAN
345 bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
346
347 if (bytes_per_word == 1)
348 val = cpu_to_be32(val);
349 else if (bytes_per_word == 2)
350 val = (val << 16) | (val >> 16);
351#endif
352 writel(val, spi_imx->base + MXC_CSPITXDATA);
353}
354
355static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
356{
357 int unaligned;
358 u32 val = 0;
359
360 unaligned = spi_imx->count % 4;
361
362 if (!unaligned) {
363 spi_imx_buf_tx_swap_u32(spi_imx);
364 return;
365 }
366
367 if (spi_imx_bytes_per_word(spi_imx->bits_per_word) == 2) {
368 spi_imx_buf_tx_u16(spi_imx);
369 return;
370 }
371
372 while (unaligned--) {
373 if (spi_imx->tx_buf) {
374 val |= *(u8 *)spi_imx->tx_buf << (8 * unaligned);
375 spi_imx->tx_buf++;
376 }
377 spi_imx->count--;
378 }
379
380 writel(val, spi_imx->base + MXC_CSPITXDATA);
381}
382
383static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
384{
385 u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
386
387 if (spi_imx->rx_buf) {
388 int n_bytes = spi_imx->slave_burst % sizeof(val);
389
390 if (!n_bytes)
391 n_bytes = sizeof(val);
392
393 memcpy(spi_imx->rx_buf,
394 ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
395
396 spi_imx->rx_buf += n_bytes;
397 spi_imx->slave_burst -= n_bytes;
398 }
399
400 spi_imx->remainder -= sizeof(u32);
401}
402
403static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
404{
405 u32 val = 0;
406 int n_bytes = spi_imx->count % sizeof(val);
407
408 if (!n_bytes)
409 n_bytes = sizeof(val);
410
411 if (spi_imx->tx_buf) {
412 memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
413 spi_imx->tx_buf, n_bytes);
414 val = cpu_to_be32(val);
415 spi_imx->tx_buf += n_bytes;
416 }
417
418 spi_imx->count -= n_bytes;
419
420 writel(val, spi_imx->base + MXC_CSPITXDATA);
421}
422
423/* MX51 eCSPI */
424static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
425 unsigned int fspi, unsigned int *fres)
426{
427 /*
428 * there are two 4-bit dividers, the pre-divider divides by
429 * $pre, the post-divider by 2^$post
430 */
431 unsigned int pre, post;
432 unsigned int fin = spi_imx->spi_clk;
433
434 if (unlikely(fspi > fin))
435 return 0;
436
437 post = fls(fin) - fls(fspi);
438 if (fin > fspi << post)
439 post++;
440
441 /* now we have: (fin <= fspi << post) with post being minimal */
442
443 post = max(4U, post) - 4;
444 if (unlikely(post > 0xf)) {
445 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
446 fspi, fin);
447 return 0xff;
448 }
449
450 pre = DIV_ROUND_UP(fin, fspi << post) - 1;
451
452 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
453 __func__, fin, fspi, post, pre);
454
455 /* Resulting frequency for the SCLK line. */
456 *fres = (fin / (pre + 1)) >> post;
457
458 return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
459 (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
460}
461
462static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
463{
464 unsigned val = 0;
465
466 if (enable & MXC_INT_TE)
467 val |= MX51_ECSPI_INT_TEEN;
468
469 if (enable & MXC_INT_RR)
470 val |= MX51_ECSPI_INT_RREN;
471
472 if (enable & MXC_INT_RDR)
473 val |= MX51_ECSPI_INT_RDREN;
474
475 writel(val, spi_imx->base + MX51_ECSPI_INT);
476}
477
478static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
479{
480 u32 reg;
481
482 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
483 reg |= MX51_ECSPI_CTRL_XCH;
484 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
485}
486
487static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
488{
489 u32 ctrl;
490
491 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
492 ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
493 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
494}
495
David Brazdil0f672f62019-12-10 10:32:29 +0000496static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
497 struct spi_message *msg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498{
David Brazdil0f672f62019-12-10 10:32:29 +0000499 struct spi_device *spi = msg->spi;
Olivier Deprez0e641232021-09-23 10:07:05 +0200500 struct spi_transfer *xfer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
Olivier Deprez0e641232021-09-23 10:07:05 +0200502 u32 min_speed_hz = ~0U;
503 u32 testreg, delay;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
505
506 /* set Master or Slave mode */
507 if (spi_imx->slave_mode)
508 ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
509 else
510 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
511
512 /*
513 * Enable SPI_RDY handling (falling edge/level triggered).
514 */
515 if (spi->mode & SPI_READY)
516 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
517
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518 /* set chip select to use */
519 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
520
David Brazdil0f672f62019-12-10 10:32:29 +0000521 /*
522 * The ctrl register must be written first, with the EN bit set other
523 * registers must not be written to.
524 */
525 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
526
527 testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
528 if (spi->mode & SPI_LOOP)
529 testreg |= MX51_ECSPI_TESTREG_LBC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530 else
David Brazdil0f672f62019-12-10 10:32:29 +0000531 testreg &= ~MX51_ECSPI_TESTREG_LBC;
532 writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000533
534 /*
535 * eCSPI burst completion by Chip Select signal in Slave mode
536 * is not functional for imx53 Soc, config SPI burst completed when
537 * BURST_LENGTH + 1 bits are received
538 */
539 if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
540 cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
541 else
542 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
543
544 if (spi->mode & SPI_CPHA)
545 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
546 else
547 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
548
549 if (spi->mode & SPI_CPOL) {
550 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
551 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
552 } else {
553 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
554 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
555 }
David Brazdil0f672f62019-12-10 10:32:29 +0000556
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 if (spi->mode & SPI_CS_HIGH)
558 cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
559 else
560 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
561
David Brazdil0f672f62019-12-10 10:32:29 +0000562 writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
563
Olivier Deprez0e641232021-09-23 10:07:05 +0200564 /*
565 * Wait until the changes in the configuration register CONFIGREG
566 * propagate into the hardware. It takes exactly one tick of the
567 * SCLK clock, but we will wait two SCLK clock just to be sure. The
568 * effect of the delay it takes for the hardware to apply changes
569 * is noticable if the SCLK clock run very slow. In such a case, if
570 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
571 * be asserted before the SCLK polarity changes, which would disrupt
572 * the SPI communication as the device on the other end would consider
573 * the change of SCLK polarity as a clock tick already.
574 *
575 * Because spi_imx->spi_bus_clk is only set in bitbang prepare_message
576 * callback, iterate over all the transfers in spi_message, find the
577 * one with lowest bus frequency, and use that bus frequency for the
578 * delay calculation. In case all transfers have speed_hz == 0, then
579 * min_speed_hz is ~0 and the resulting delay is zero.
580 */
581 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
582 if (!xfer->speed_hz)
583 continue;
584 min_speed_hz = min(xfer->speed_hz, min_speed_hz);
585 }
586
587 delay = (2 * 1000000) / min_speed_hz;
588 if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
589 udelay(delay);
590 else /* SCLK is _very_ slow */
591 usleep_range(delay, delay + 10);
592
David Brazdil0f672f62019-12-10 10:32:29 +0000593 return 0;
594}
595
596static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
Olivier Deprez0e641232021-09-23 10:07:05 +0200597 struct spi_device *spi)
David Brazdil0f672f62019-12-10 10:32:29 +0000598{
599 u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
Olivier Deprez0e641232021-09-23 10:07:05 +0200600 u32 clk;
David Brazdil0f672f62019-12-10 10:32:29 +0000601
602 /* Clear BL field and set the right value */
603 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
604 if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
605 ctrl |= (spi_imx->slave_burst * 8 - 1)
606 << MX51_ECSPI_CTRL_BL_OFFSET;
607 else
608 ctrl |= (spi_imx->bits_per_word - 1)
609 << MX51_ECSPI_CTRL_BL_OFFSET;
610
611 /* set clock speed */
612 ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
613 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
Olivier Deprez0e641232021-09-23 10:07:05 +0200614 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
David Brazdil0f672f62019-12-10 10:32:29 +0000615 spi_imx->spi_bus_clk = clk;
616
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617 if (spi_imx->usedma)
618 ctrl |= MX51_ECSPI_CTRL_SMC;
619
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000620 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
621
David Brazdil0f672f62019-12-10 10:32:29 +0000622 return 0;
623}
624
625static void mx51_setup_wml(struct spi_imx_data *spi_imx)
626{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000627 /*
628 * Configure the DMA register: setup the watermark
629 * and enable DMA request.
630 */
David Brazdil0f672f62019-12-10 10:32:29 +0000631 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632 MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
633 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
634 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
635 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000636}
637
638static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
639{
640 return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
641}
642
643static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
644{
645 /* drain receive buffer */
646 while (mx51_ecspi_rx_available(spi_imx))
647 readl(spi_imx->base + MXC_CSPIRXDATA);
648}
649
650#define MX31_INTREG_TEEN (1 << 0)
651#define MX31_INTREG_RREN (1 << 3)
652
653#define MX31_CSPICTRL_ENABLE (1 << 0)
654#define MX31_CSPICTRL_MASTER (1 << 1)
655#define MX31_CSPICTRL_XCH (1 << 2)
656#define MX31_CSPICTRL_SMC (1 << 3)
657#define MX31_CSPICTRL_POL (1 << 4)
658#define MX31_CSPICTRL_PHA (1 << 5)
659#define MX31_CSPICTRL_SSCTL (1 << 6)
660#define MX31_CSPICTRL_SSPOL (1 << 7)
661#define MX31_CSPICTRL_BC_SHIFT 8
662#define MX35_CSPICTRL_BL_SHIFT 20
663#define MX31_CSPICTRL_CS_SHIFT 24
664#define MX35_CSPICTRL_CS_SHIFT 12
665#define MX31_CSPICTRL_DR_SHIFT 16
666
667#define MX31_CSPI_DMAREG 0x10
668#define MX31_DMAREG_RH_DEN (1<<4)
669#define MX31_DMAREG_TH_DEN (1<<1)
670
671#define MX31_CSPISTATUS 0x14
672#define MX31_STATUS_RR (1 << 3)
673
674#define MX31_CSPI_TESTREG 0x1C
675#define MX31_TEST_LBC (1 << 14)
676
677/* These functions also work for the i.MX35, but be aware that
678 * the i.MX35 has a slightly different register layout for bits
679 * we do not use here.
680 */
681static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
682{
683 unsigned int val = 0;
684
685 if (enable & MXC_INT_TE)
686 val |= MX31_INTREG_TEEN;
687 if (enable & MXC_INT_RR)
688 val |= MX31_INTREG_RREN;
689
690 writel(val, spi_imx->base + MXC_CSPIINT);
691}
692
693static void mx31_trigger(struct spi_imx_data *spi_imx)
694{
695 unsigned int reg;
696
697 reg = readl(spi_imx->base + MXC_CSPICTRL);
698 reg |= MX31_CSPICTRL_XCH;
699 writel(reg, spi_imx->base + MXC_CSPICTRL);
700}
701
David Brazdil0f672f62019-12-10 10:32:29 +0000702static int mx31_prepare_message(struct spi_imx_data *spi_imx,
703 struct spi_message *msg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000704{
David Brazdil0f672f62019-12-10 10:32:29 +0000705 return 0;
706}
707
708static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
Olivier Deprez0e641232021-09-23 10:07:05 +0200709 struct spi_device *spi)
David Brazdil0f672f62019-12-10 10:32:29 +0000710{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
712 unsigned int clk;
713
Olivier Deprez0e641232021-09-23 10:07:05 +0200714 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715 MX31_CSPICTRL_DR_SHIFT;
716 spi_imx->spi_bus_clk = clk;
717
718 if (is_imx35_cspi(spi_imx)) {
719 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
720 reg |= MX31_CSPICTRL_SSCTL;
721 } else {
722 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
723 }
724
725 if (spi->mode & SPI_CPHA)
726 reg |= MX31_CSPICTRL_PHA;
727 if (spi->mode & SPI_CPOL)
728 reg |= MX31_CSPICTRL_POL;
729 if (spi->mode & SPI_CS_HIGH)
730 reg |= MX31_CSPICTRL_SSPOL;
731 if (!gpio_is_valid(spi->cs_gpio))
732 reg |= (spi->chip_select) <<
733 (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
734 MX31_CSPICTRL_CS_SHIFT);
735
736 if (spi_imx->usedma)
737 reg |= MX31_CSPICTRL_SMC;
738
739 writel(reg, spi_imx->base + MXC_CSPICTRL);
740
741 reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
742 if (spi->mode & SPI_LOOP)
743 reg |= MX31_TEST_LBC;
744 else
745 reg &= ~MX31_TEST_LBC;
746 writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
747
748 if (spi_imx->usedma) {
David Brazdil0f672f62019-12-10 10:32:29 +0000749 /*
750 * configure DMA requests when RXFIFO is half full and
751 * when TXFIFO is half empty
752 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
754 spi_imx->base + MX31_CSPI_DMAREG);
755 }
756
757 return 0;
758}
759
760static int mx31_rx_available(struct spi_imx_data *spi_imx)
761{
762 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
763}
764
765static void mx31_reset(struct spi_imx_data *spi_imx)
766{
767 /* drain receive buffer */
768 while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
769 readl(spi_imx->base + MXC_CSPIRXDATA);
770}
771
772#define MX21_INTREG_RR (1 << 4)
773#define MX21_INTREG_TEEN (1 << 9)
774#define MX21_INTREG_RREN (1 << 13)
775
776#define MX21_CSPICTRL_POL (1 << 5)
777#define MX21_CSPICTRL_PHA (1 << 6)
778#define MX21_CSPICTRL_SSPOL (1 << 8)
779#define MX21_CSPICTRL_XCH (1 << 9)
780#define MX21_CSPICTRL_ENABLE (1 << 10)
781#define MX21_CSPICTRL_MASTER (1 << 11)
782#define MX21_CSPICTRL_DR_SHIFT 14
783#define MX21_CSPICTRL_CS_SHIFT 19
784
785static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
786{
787 unsigned int val = 0;
788
789 if (enable & MXC_INT_TE)
790 val |= MX21_INTREG_TEEN;
791 if (enable & MXC_INT_RR)
792 val |= MX21_INTREG_RREN;
793
794 writel(val, spi_imx->base + MXC_CSPIINT);
795}
796
797static void mx21_trigger(struct spi_imx_data *spi_imx)
798{
799 unsigned int reg;
800
801 reg = readl(spi_imx->base + MXC_CSPICTRL);
802 reg |= MX21_CSPICTRL_XCH;
803 writel(reg, spi_imx->base + MXC_CSPICTRL);
804}
805
David Brazdil0f672f62019-12-10 10:32:29 +0000806static int mx21_prepare_message(struct spi_imx_data *spi_imx,
807 struct spi_message *msg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808{
David Brazdil0f672f62019-12-10 10:32:29 +0000809 return 0;
810}
811
812static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
Olivier Deprez0e641232021-09-23 10:07:05 +0200813 struct spi_device *spi)
David Brazdil0f672f62019-12-10 10:32:29 +0000814{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000815 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
816 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
817 unsigned int clk;
818
Olivier Deprez0e641232021-09-23 10:07:05 +0200819 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 << MX21_CSPICTRL_DR_SHIFT;
821 spi_imx->spi_bus_clk = clk;
822
823 reg |= spi_imx->bits_per_word - 1;
824
825 if (spi->mode & SPI_CPHA)
826 reg |= MX21_CSPICTRL_PHA;
827 if (spi->mode & SPI_CPOL)
828 reg |= MX21_CSPICTRL_POL;
829 if (spi->mode & SPI_CS_HIGH)
830 reg |= MX21_CSPICTRL_SSPOL;
831 if (!gpio_is_valid(spi->cs_gpio))
832 reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
833
834 writel(reg, spi_imx->base + MXC_CSPICTRL);
835
836 return 0;
837}
838
839static int mx21_rx_available(struct spi_imx_data *spi_imx)
840{
841 return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
842}
843
844static void mx21_reset(struct spi_imx_data *spi_imx)
845{
846 writel(1, spi_imx->base + MXC_RESET);
847}
848
849#define MX1_INTREG_RR (1 << 3)
850#define MX1_INTREG_TEEN (1 << 8)
851#define MX1_INTREG_RREN (1 << 11)
852
853#define MX1_CSPICTRL_POL (1 << 4)
854#define MX1_CSPICTRL_PHA (1 << 5)
855#define MX1_CSPICTRL_XCH (1 << 8)
856#define MX1_CSPICTRL_ENABLE (1 << 9)
857#define MX1_CSPICTRL_MASTER (1 << 10)
858#define MX1_CSPICTRL_DR_SHIFT 13
859
860static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
861{
862 unsigned int val = 0;
863
864 if (enable & MXC_INT_TE)
865 val |= MX1_INTREG_TEEN;
866 if (enable & MXC_INT_RR)
867 val |= MX1_INTREG_RREN;
868
869 writel(val, spi_imx->base + MXC_CSPIINT);
870}
871
872static void mx1_trigger(struct spi_imx_data *spi_imx)
873{
874 unsigned int reg;
875
876 reg = readl(spi_imx->base + MXC_CSPICTRL);
877 reg |= MX1_CSPICTRL_XCH;
878 writel(reg, spi_imx->base + MXC_CSPICTRL);
879}
880
David Brazdil0f672f62019-12-10 10:32:29 +0000881static int mx1_prepare_message(struct spi_imx_data *spi_imx,
882 struct spi_message *msg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000883{
David Brazdil0f672f62019-12-10 10:32:29 +0000884 return 0;
885}
886
887static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
Olivier Deprez0e641232021-09-23 10:07:05 +0200888 struct spi_device *spi)
David Brazdil0f672f62019-12-10 10:32:29 +0000889{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
891 unsigned int clk;
892
Olivier Deprez0e641232021-09-23 10:07:05 +0200893 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000894 MX1_CSPICTRL_DR_SHIFT;
895 spi_imx->spi_bus_clk = clk;
896
897 reg |= spi_imx->bits_per_word - 1;
898
899 if (spi->mode & SPI_CPHA)
900 reg |= MX1_CSPICTRL_PHA;
901 if (spi->mode & SPI_CPOL)
902 reg |= MX1_CSPICTRL_POL;
903
904 writel(reg, spi_imx->base + MXC_CSPICTRL);
905
906 return 0;
907}
908
909static int mx1_rx_available(struct spi_imx_data *spi_imx)
910{
911 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
912}
913
914static void mx1_reset(struct spi_imx_data *spi_imx)
915{
916 writel(1, spi_imx->base + MXC_RESET);
917}
918
919static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
920 .intctrl = mx1_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +0000921 .prepare_message = mx1_prepare_message,
922 .prepare_transfer = mx1_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000923 .trigger = mx1_trigger,
924 .rx_available = mx1_rx_available,
925 .reset = mx1_reset,
926 .fifo_size = 8,
927 .has_dmamode = false,
928 .dynamic_burst = false,
929 .has_slavemode = false,
930 .devtype = IMX1_CSPI,
931};
932
933static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
934 .intctrl = mx21_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +0000935 .prepare_message = mx21_prepare_message,
936 .prepare_transfer = mx21_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 .trigger = mx21_trigger,
938 .rx_available = mx21_rx_available,
939 .reset = mx21_reset,
940 .fifo_size = 8,
941 .has_dmamode = false,
942 .dynamic_burst = false,
943 .has_slavemode = false,
944 .devtype = IMX21_CSPI,
945};
946
947static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
948 /* i.mx27 cspi shares the functions with i.mx21 one */
949 .intctrl = mx21_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +0000950 .prepare_message = mx21_prepare_message,
951 .prepare_transfer = mx21_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000952 .trigger = mx21_trigger,
953 .rx_available = mx21_rx_available,
954 .reset = mx21_reset,
955 .fifo_size = 8,
956 .has_dmamode = false,
957 .dynamic_burst = false,
958 .has_slavemode = false,
959 .devtype = IMX27_CSPI,
960};
961
962static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
963 .intctrl = mx31_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +0000964 .prepare_message = mx31_prepare_message,
965 .prepare_transfer = mx31_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000966 .trigger = mx31_trigger,
967 .rx_available = mx31_rx_available,
968 .reset = mx31_reset,
969 .fifo_size = 8,
970 .has_dmamode = false,
971 .dynamic_burst = false,
972 .has_slavemode = false,
973 .devtype = IMX31_CSPI,
974};
975
976static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
977 /* i.mx35 and later cspi shares the functions with i.mx31 one */
978 .intctrl = mx31_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +0000979 .prepare_message = mx31_prepare_message,
980 .prepare_transfer = mx31_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000981 .trigger = mx31_trigger,
982 .rx_available = mx31_rx_available,
983 .reset = mx31_reset,
984 .fifo_size = 8,
985 .has_dmamode = true,
986 .dynamic_burst = false,
987 .has_slavemode = false,
988 .devtype = IMX35_CSPI,
989};
990
991static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
992 .intctrl = mx51_ecspi_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +0000993 .prepare_message = mx51_ecspi_prepare_message,
994 .prepare_transfer = mx51_ecspi_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000995 .trigger = mx51_ecspi_trigger,
996 .rx_available = mx51_ecspi_rx_available,
997 .reset = mx51_ecspi_reset,
David Brazdil0f672f62019-12-10 10:32:29 +0000998 .setup_wml = mx51_setup_wml,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000999 .fifo_size = 64,
1000 .has_dmamode = true,
1001 .dynamic_burst = true,
1002 .has_slavemode = true,
1003 .disable = mx51_ecspi_disable,
1004 .devtype = IMX51_ECSPI,
1005};
1006
1007static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1008 .intctrl = mx51_ecspi_intctrl,
David Brazdil0f672f62019-12-10 10:32:29 +00001009 .prepare_message = mx51_ecspi_prepare_message,
1010 .prepare_transfer = mx51_ecspi_prepare_transfer,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001011 .trigger = mx51_ecspi_trigger,
1012 .rx_available = mx51_ecspi_rx_available,
1013 .reset = mx51_ecspi_reset,
1014 .fifo_size = 64,
1015 .has_dmamode = true,
1016 .has_slavemode = true,
1017 .disable = mx51_ecspi_disable,
1018 .devtype = IMX53_ECSPI,
1019};
1020
1021static const struct platform_device_id spi_imx_devtype[] = {
1022 {
1023 .name = "imx1-cspi",
1024 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
1025 }, {
1026 .name = "imx21-cspi",
1027 .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
1028 }, {
1029 .name = "imx27-cspi",
1030 .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
1031 }, {
1032 .name = "imx31-cspi",
1033 .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
1034 }, {
1035 .name = "imx35-cspi",
1036 .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
1037 }, {
1038 .name = "imx51-ecspi",
1039 .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
1040 }, {
1041 .name = "imx53-ecspi",
1042 .driver_data = (kernel_ulong_t) &imx53_ecspi_devtype_data,
1043 }, {
1044 /* sentinel */
1045 }
1046};
1047
1048static const struct of_device_id spi_imx_dt_ids[] = {
1049 { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
1050 { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
1051 { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
1052 { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
1053 { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
1054 { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
1055 { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
1056 { /* sentinel */ }
1057};
1058MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
1059
1060static void spi_imx_chipselect(struct spi_device *spi, int is_active)
1061{
1062 int active = is_active != BITBANG_CS_INACTIVE;
1063 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
1064
1065 if (spi->mode & SPI_NO_CS)
1066 return;
1067
1068 if (!gpio_is_valid(spi->cs_gpio))
1069 return;
1070
1071 gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
1072}
1073
1074static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
1075{
1076 u32 ctrl;
1077
1078 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
1079 ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
1080 ctrl |= ((n_bits - 1) << MX51_ECSPI_CTRL_BL_OFFSET);
1081 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
1082}
1083
1084static void spi_imx_push(struct spi_imx_data *spi_imx)
1085{
1086 unsigned int burst_len, fifo_words;
1087
1088 if (spi_imx->dynamic_burst)
1089 fifo_words = 4;
1090 else
1091 fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
1092 /*
1093 * Reload the FIFO when the remaining bytes to be transferred in the
1094 * current burst is 0. This only applies when bits_per_word is a
1095 * multiple of 8.
1096 */
1097 if (!spi_imx->remainder) {
1098 if (spi_imx->dynamic_burst) {
1099
1100 /* We need to deal unaligned data first */
1101 burst_len = spi_imx->count % MX51_ECSPI_CTRL_MAX_BURST;
1102
1103 if (!burst_len)
1104 burst_len = MX51_ECSPI_CTRL_MAX_BURST;
1105
1106 spi_imx_set_burst_len(spi_imx, burst_len * 8);
1107
1108 spi_imx->remainder = burst_len;
1109 } else {
1110 spi_imx->remainder = fifo_words;
1111 }
1112 }
1113
1114 while (spi_imx->txfifo < spi_imx->devtype_data->fifo_size) {
1115 if (!spi_imx->count)
1116 break;
1117 if (spi_imx->dynamic_burst &&
David Brazdil0f672f62019-12-10 10:32:29 +00001118 spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119 fifo_words))
1120 break;
1121 spi_imx->tx(spi_imx);
1122 spi_imx->txfifo++;
1123 }
1124
1125 if (!spi_imx->slave_mode)
1126 spi_imx->devtype_data->trigger(spi_imx);
1127}
1128
1129static irqreturn_t spi_imx_isr(int irq, void *dev_id)
1130{
1131 struct spi_imx_data *spi_imx = dev_id;
1132
1133 while (spi_imx->txfifo &&
1134 spi_imx->devtype_data->rx_available(spi_imx)) {
1135 spi_imx->rx(spi_imx);
1136 spi_imx->txfifo--;
1137 }
1138
1139 if (spi_imx->count) {
1140 spi_imx_push(spi_imx);
1141 return IRQ_HANDLED;
1142 }
1143
1144 if (spi_imx->txfifo) {
1145 /* No data left to push, but still waiting for rx data,
1146 * enable receive data available interrupt.
1147 */
1148 spi_imx->devtype_data->intctrl(
1149 spi_imx, MXC_INT_RR);
1150 return IRQ_HANDLED;
1151 }
1152
1153 spi_imx->devtype_data->intctrl(spi_imx, 0);
1154 complete(&spi_imx->xfer_done);
1155
1156 return IRQ_HANDLED;
1157}
1158
1159static int spi_imx_dma_configure(struct spi_master *master)
1160{
1161 int ret;
1162 enum dma_slave_buswidth buswidth;
1163 struct dma_slave_config rx = {}, tx = {};
1164 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1165
1166 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
1167 case 4:
1168 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1169 break;
1170 case 2:
1171 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1172 break;
1173 case 1:
1174 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1175 break;
1176 default:
1177 return -EINVAL;
1178 }
1179
1180 tx.direction = DMA_MEM_TO_DEV;
1181 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
1182 tx.dst_addr_width = buswidth;
1183 tx.dst_maxburst = spi_imx->wml;
1184 ret = dmaengine_slave_config(master->dma_tx, &tx);
1185 if (ret) {
1186 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
1187 return ret;
1188 }
1189
1190 rx.direction = DMA_DEV_TO_MEM;
1191 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
1192 rx.src_addr_width = buswidth;
1193 rx.src_maxburst = spi_imx->wml;
1194 ret = dmaengine_slave_config(master->dma_rx, &rx);
1195 if (ret) {
1196 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
1197 return ret;
1198 }
1199
1200 return 0;
1201}
1202
1203static int spi_imx_setupxfer(struct spi_device *spi,
1204 struct spi_transfer *t)
1205{
1206 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207
1208 if (!t)
1209 return 0;
1210
Olivier Deprez0e641232021-09-23 10:07:05 +02001211 if (!t->speed_hz) {
1212 if (!spi->max_speed_hz) {
1213 dev_err(&spi->dev, "no speed_hz provided!\n");
1214 return -EINVAL;
1215 }
1216 dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
1217 spi_imx->spi_bus_clk = spi->max_speed_hz;
1218 } else
1219 spi_imx->spi_bus_clk = t->speed_hz;
1220
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221 spi_imx->bits_per_word = t->bits_per_word;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222
1223 /*
1224 * Initialize the functions for transfer. To transfer non byte-aligned
1225 * words, we have to use multiple word-size bursts, we can't use
1226 * dynamic_burst in that case.
1227 */
1228 if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
1229 (spi_imx->bits_per_word == 8 ||
1230 spi_imx->bits_per_word == 16 ||
1231 spi_imx->bits_per_word == 32)) {
1232
1233 spi_imx->rx = spi_imx_buf_rx_swap;
1234 spi_imx->tx = spi_imx_buf_tx_swap;
1235 spi_imx->dynamic_burst = 1;
1236
1237 } else {
1238 if (spi_imx->bits_per_word <= 8) {
1239 spi_imx->rx = spi_imx_buf_rx_u8;
1240 spi_imx->tx = spi_imx_buf_tx_u8;
1241 } else if (spi_imx->bits_per_word <= 16) {
1242 spi_imx->rx = spi_imx_buf_rx_u16;
1243 spi_imx->tx = spi_imx_buf_tx_u16;
1244 } else {
1245 spi_imx->rx = spi_imx_buf_rx_u32;
1246 spi_imx->tx = spi_imx_buf_tx_u32;
1247 }
1248 spi_imx->dynamic_burst = 0;
1249 }
1250
1251 if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
1252 spi_imx->usedma = 1;
1253 else
1254 spi_imx->usedma = 0;
1255
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001256 if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
1257 spi_imx->rx = mx53_ecspi_rx_slave;
1258 spi_imx->tx = mx53_ecspi_tx_slave;
1259 spi_imx->slave_burst = t->len;
1260 }
1261
Olivier Deprez0e641232021-09-23 10:07:05 +02001262 spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001263
1264 return 0;
1265}
1266
1267static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
1268{
1269 struct spi_master *master = spi_imx->bitbang.master;
1270
1271 if (master->dma_rx) {
1272 dma_release_channel(master->dma_rx);
1273 master->dma_rx = NULL;
1274 }
1275
1276 if (master->dma_tx) {
1277 dma_release_channel(master->dma_tx);
1278 master->dma_tx = NULL;
1279 }
1280}
1281
1282static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
1283 struct spi_master *master)
1284{
1285 int ret;
1286
1287 /* use pio mode for i.mx6dl chip TKT238285 */
1288 if (of_machine_is_compatible("fsl,imx6dl"))
1289 return 0;
1290
1291 spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
1292
1293 /* Prepare for TX DMA: */
1294 master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
1295 if (IS_ERR(master->dma_tx)) {
1296 ret = PTR_ERR(master->dma_tx);
1297 dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
1298 master->dma_tx = NULL;
1299 goto err;
1300 }
1301
1302 /* Prepare for RX : */
1303 master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
1304 if (IS_ERR(master->dma_rx)) {
1305 ret = PTR_ERR(master->dma_rx);
1306 dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
1307 master->dma_rx = NULL;
1308 goto err;
1309 }
1310
1311 init_completion(&spi_imx->dma_rx_completion);
1312 init_completion(&spi_imx->dma_tx_completion);
1313 master->can_dma = spi_imx_can_dma;
1314 master->max_dma_len = MAX_SDMA_BD_BYTES;
1315 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
1316 SPI_MASTER_MUST_TX;
1317
1318 return 0;
1319err:
1320 spi_imx_sdma_exit(spi_imx);
1321 return ret;
1322}
1323
1324static void spi_imx_dma_rx_callback(void *cookie)
1325{
1326 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1327
1328 complete(&spi_imx->dma_rx_completion);
1329}
1330
1331static void spi_imx_dma_tx_callback(void *cookie)
1332{
1333 struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
1334
1335 complete(&spi_imx->dma_tx_completion);
1336}
1337
1338static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
1339{
1340 unsigned long timeout = 0;
1341
1342 /* Time with actual data transfer and CS change delay related to HW */
1343 timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
1344
1345 /* Add extra second for scheduler related activities */
1346 timeout += 1;
1347
1348 /* Double calculated timeout */
1349 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
1350}
1351
1352static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1353 struct spi_transfer *transfer)
1354{
1355 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
1356 unsigned long transfer_timeout;
1357 unsigned long timeout;
1358 struct spi_master *master = spi_imx->bitbang.master;
1359 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
David Brazdil0f672f62019-12-10 10:32:29 +00001360 struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
1361 unsigned int bytes_per_word, i;
1362 int ret;
1363
1364 /* Get the right burst length from the last sg to ensure no tail data */
1365 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
1366 for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
1367 if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
1368 break;
1369 }
1370 /* Use 1 as wml in case no available burst length got */
1371 if (i == 0)
1372 i = 1;
1373
1374 spi_imx->wml = i;
1375
1376 ret = spi_imx_dma_configure(master);
1377 if (ret)
1378 return ret;
1379
1380 if (!spi_imx->devtype_data->setup_wml) {
1381 dev_err(spi_imx->dev, "No setup_wml()?\n");
1382 return -EINVAL;
1383 }
1384 spi_imx->devtype_data->setup_wml(spi_imx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001385
1386 /*
1387 * The TX DMA setup starts the transfer, so make sure RX is configured
1388 * before TX.
1389 */
1390 desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
1391 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
1392 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1393 if (!desc_rx)
1394 return -EINVAL;
1395
1396 desc_rx->callback = spi_imx_dma_rx_callback;
1397 desc_rx->callback_param = (void *)spi_imx;
1398 dmaengine_submit(desc_rx);
1399 reinit_completion(&spi_imx->dma_rx_completion);
1400 dma_async_issue_pending(master->dma_rx);
1401
1402 desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
1403 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
1404 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1405 if (!desc_tx) {
1406 dmaengine_terminate_all(master->dma_tx);
1407 return -EINVAL;
1408 }
1409
1410 desc_tx->callback = spi_imx_dma_tx_callback;
1411 desc_tx->callback_param = (void *)spi_imx;
1412 dmaengine_submit(desc_tx);
1413 reinit_completion(&spi_imx->dma_tx_completion);
1414 dma_async_issue_pending(master->dma_tx);
1415
1416 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1417
1418 /* Wait SDMA to finish the data transfer.*/
1419 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
1420 transfer_timeout);
1421 if (!timeout) {
1422 dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
1423 dmaengine_terminate_all(master->dma_tx);
1424 dmaengine_terminate_all(master->dma_rx);
1425 return -ETIMEDOUT;
1426 }
1427
1428 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
1429 transfer_timeout);
1430 if (!timeout) {
1431 dev_err(&master->dev, "I/O Error in DMA RX\n");
1432 spi_imx->devtype_data->reset(spi_imx);
1433 dmaengine_terminate_all(master->dma_rx);
1434 return -ETIMEDOUT;
1435 }
1436
1437 return transfer->len;
1438}
1439
1440static int spi_imx_pio_transfer(struct spi_device *spi,
1441 struct spi_transfer *transfer)
1442{
1443 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1444 unsigned long transfer_timeout;
1445 unsigned long timeout;
1446
1447 spi_imx->tx_buf = transfer->tx_buf;
1448 spi_imx->rx_buf = transfer->rx_buf;
1449 spi_imx->count = transfer->len;
1450 spi_imx->txfifo = 0;
1451 spi_imx->remainder = 0;
1452
1453 reinit_completion(&spi_imx->xfer_done);
1454
1455 spi_imx_push(spi_imx);
1456
1457 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
1458
1459 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
1460
1461 timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
1462 transfer_timeout);
1463 if (!timeout) {
1464 dev_err(&spi->dev, "I/O Error in PIO\n");
1465 spi_imx->devtype_data->reset(spi_imx);
1466 return -ETIMEDOUT;
1467 }
1468
1469 return transfer->len;
1470}
1471
1472static int spi_imx_pio_transfer_slave(struct spi_device *spi,
1473 struct spi_transfer *transfer)
1474{
1475 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1476 int ret = transfer->len;
1477
1478 if (is_imx53_ecspi(spi_imx) &&
1479 transfer->len > MX53_MAX_TRANSFER_BYTES) {
1480 dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
1481 MX53_MAX_TRANSFER_BYTES);
1482 return -EMSGSIZE;
1483 }
1484
1485 spi_imx->tx_buf = transfer->tx_buf;
1486 spi_imx->rx_buf = transfer->rx_buf;
1487 spi_imx->count = transfer->len;
1488 spi_imx->txfifo = 0;
1489 spi_imx->remainder = 0;
1490
1491 reinit_completion(&spi_imx->xfer_done);
1492 spi_imx->slave_aborted = false;
1493
1494 spi_imx_push(spi_imx);
1495
1496 spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
1497
1498 if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
1499 spi_imx->slave_aborted) {
1500 dev_dbg(&spi->dev, "interrupted\n");
1501 ret = -EINTR;
1502 }
1503
1504 /* ecspi has a HW issue when works in Slave mode,
1505 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1506 * ECSPI_TXDATA keeps shift out the last word data,
1507 * so we have to disable ECSPI when in slave mode after the
1508 * transfer completes
1509 */
1510 if (spi_imx->devtype_data->disable)
1511 spi_imx->devtype_data->disable(spi_imx);
1512
1513 return ret;
1514}
1515
1516static int spi_imx_transfer(struct spi_device *spi,
1517 struct spi_transfer *transfer)
1518{
1519 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1520
1521 /* flush rxfifo before transfer */
1522 while (spi_imx->devtype_data->rx_available(spi_imx))
David Brazdil0f672f62019-12-10 10:32:29 +00001523 readl(spi_imx->base + MXC_CSPIRXDATA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001524
1525 if (spi_imx->slave_mode)
1526 return spi_imx_pio_transfer_slave(spi, transfer);
1527
1528 if (spi_imx->usedma)
1529 return spi_imx_dma_transfer(spi_imx, transfer);
1530 else
1531 return spi_imx_pio_transfer(spi, transfer);
1532}
1533
1534static int spi_imx_setup(struct spi_device *spi)
1535{
1536 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
1537 spi->mode, spi->bits_per_word, spi->max_speed_hz);
1538
1539 if (spi->mode & SPI_NO_CS)
1540 return 0;
1541
1542 if (gpio_is_valid(spi->cs_gpio))
1543 gpio_direction_output(spi->cs_gpio,
1544 spi->mode & SPI_CS_HIGH ? 0 : 1);
1545
1546 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
1547
1548 return 0;
1549}
1550
1551static void spi_imx_cleanup(struct spi_device *spi)
1552{
1553}
1554
1555static int
1556spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
1557{
1558 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1559 int ret;
1560
1561 ret = clk_enable(spi_imx->clk_per);
1562 if (ret)
1563 return ret;
1564
1565 ret = clk_enable(spi_imx->clk_ipg);
1566 if (ret) {
1567 clk_disable(spi_imx->clk_per);
1568 return ret;
1569 }
1570
David Brazdil0f672f62019-12-10 10:32:29 +00001571 ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1572 if (ret) {
1573 clk_disable(spi_imx->clk_ipg);
1574 clk_disable(spi_imx->clk_per);
1575 }
1576
1577 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001578}
1579
1580static int
1581spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
1582{
1583 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1584
1585 clk_disable(spi_imx->clk_ipg);
1586 clk_disable(spi_imx->clk_per);
1587 return 0;
1588}
1589
1590static int spi_imx_slave_abort(struct spi_master *master)
1591{
1592 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1593
1594 spi_imx->slave_aborted = true;
1595 complete(&spi_imx->xfer_done);
1596
1597 return 0;
1598}
1599
1600static int spi_imx_probe(struct platform_device *pdev)
1601{
1602 struct device_node *np = pdev->dev.of_node;
1603 const struct of_device_id *of_id =
1604 of_match_device(spi_imx_dt_ids, &pdev->dev);
1605 struct spi_imx_master *mxc_platform_info =
1606 dev_get_platdata(&pdev->dev);
1607 struct spi_master *master;
1608 struct spi_imx_data *spi_imx;
1609 struct resource *res;
1610 int i, ret, irq, spi_drctl;
1611 const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
1612 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
1613 bool slave_mode;
1614
1615 if (!np && !mxc_platform_info) {
1616 dev_err(&pdev->dev, "can't get the platform data\n");
1617 return -EINVAL;
1618 }
1619
1620 slave_mode = devtype_data->has_slavemode &&
1621 of_property_read_bool(np, "spi-slave");
1622 if (slave_mode)
1623 master = spi_alloc_slave(&pdev->dev,
1624 sizeof(struct spi_imx_data));
1625 else
1626 master = spi_alloc_master(&pdev->dev,
1627 sizeof(struct spi_imx_data));
1628 if (!master)
1629 return -ENOMEM;
1630
1631 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1632 if ((ret < 0) || (spi_drctl >= 0x3)) {
1633 /* '11' is reserved */
1634 spi_drctl = 0;
1635 }
1636
1637 platform_set_drvdata(pdev, master);
1638
1639 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
1640 master->bus_num = np ? -1 : pdev->id;
1641
1642 spi_imx = spi_master_get_devdata(master);
1643 spi_imx->bitbang.master = master;
1644 spi_imx->dev = &pdev->dev;
1645 spi_imx->slave_mode = slave_mode;
1646
1647 spi_imx->devtype_data = devtype_data;
1648
1649 /* Get number of chip selects, either platform data or OF */
1650 if (mxc_platform_info) {
1651 master->num_chipselect = mxc_platform_info->num_chipselect;
1652 if (mxc_platform_info->chipselect) {
1653 master->cs_gpios = devm_kcalloc(&master->dev,
1654 master->num_chipselect, sizeof(int),
1655 GFP_KERNEL);
1656 if (!master->cs_gpios)
1657 return -ENOMEM;
1658
1659 for (i = 0; i < master->num_chipselect; i++)
1660 master->cs_gpios[i] = mxc_platform_info->chipselect[i];
1661 }
1662 } else {
1663 u32 num_cs;
1664
1665 if (!of_property_read_u32(np, "num-cs", &num_cs))
1666 master->num_chipselect = num_cs;
1667 /* If not preset, default value of 1 is used */
1668 }
1669
1670 spi_imx->bitbang.chipselect = spi_imx_chipselect;
1671 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
1672 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
1673 spi_imx->bitbang.master->setup = spi_imx_setup;
1674 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
1675 spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
1676 spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
1677 spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
1678 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1679 | SPI_NO_CS;
1680 if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
1681 is_imx53_ecspi(spi_imx))
1682 spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
1683
1684 spi_imx->spi_drctl = spi_drctl;
1685
1686 init_completion(&spi_imx->xfer_done);
1687
1688 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1689 spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
1690 if (IS_ERR(spi_imx->base)) {
1691 ret = PTR_ERR(spi_imx->base);
1692 goto out_master_put;
1693 }
1694 spi_imx->base_phys = res->start;
1695
1696 irq = platform_get_irq(pdev, 0);
1697 if (irq < 0) {
1698 ret = irq;
1699 goto out_master_put;
1700 }
1701
1702 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1703 dev_name(&pdev->dev), spi_imx);
1704 if (ret) {
1705 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1706 goto out_master_put;
1707 }
1708
1709 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1710 if (IS_ERR(spi_imx->clk_ipg)) {
1711 ret = PTR_ERR(spi_imx->clk_ipg);
1712 goto out_master_put;
1713 }
1714
1715 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
1716 if (IS_ERR(spi_imx->clk_per)) {
1717 ret = PTR_ERR(spi_imx->clk_per);
1718 goto out_master_put;
1719 }
1720
1721 ret = clk_prepare_enable(spi_imx->clk_per);
1722 if (ret)
1723 goto out_master_put;
1724
1725 ret = clk_prepare_enable(spi_imx->clk_ipg);
1726 if (ret)
1727 goto out_put_per;
1728
1729 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
1730 /*
1731 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1732 * if validated on other chips.
1733 */
1734 if (spi_imx->devtype_data->has_dmamode) {
1735 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
1736 if (ret == -EPROBE_DEFER)
1737 goto out_clk_put;
1738
1739 if (ret < 0)
1740 dev_err(&pdev->dev, "dma setup error %d, use pio\n",
1741 ret);
1742 }
1743
1744 spi_imx->devtype_data->reset(spi_imx);
1745
1746 spi_imx->devtype_data->intctrl(spi_imx, 0);
1747
1748 master->dev.of_node = pdev->dev.of_node;
1749 ret = spi_bitbang_start(&spi_imx->bitbang);
1750 if (ret) {
1751 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
1752 goto out_clk_put;
1753 }
1754
1755 /* Request GPIO CS lines, if any */
1756 if (!spi_imx->slave_mode && master->cs_gpios) {
1757 for (i = 0; i < master->num_chipselect; i++) {
1758 if (!gpio_is_valid(master->cs_gpios[i]))
1759 continue;
1760
1761 ret = devm_gpio_request(&pdev->dev,
1762 master->cs_gpios[i],
1763 DRIVER_NAME);
1764 if (ret) {
1765 dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
1766 master->cs_gpios[i]);
1767 goto out_spi_bitbang;
1768 }
1769 }
1770 }
1771
1772 dev_info(&pdev->dev, "probed\n");
1773
1774 clk_disable(spi_imx->clk_ipg);
1775 clk_disable(spi_imx->clk_per);
1776 return ret;
1777
1778out_spi_bitbang:
1779 spi_bitbang_stop(&spi_imx->bitbang);
1780out_clk_put:
1781 clk_disable_unprepare(spi_imx->clk_ipg);
1782out_put_per:
1783 clk_disable_unprepare(spi_imx->clk_per);
1784out_master_put:
1785 spi_master_put(master);
1786
1787 return ret;
1788}
1789
1790static int spi_imx_remove(struct platform_device *pdev)
1791{
1792 struct spi_master *master = platform_get_drvdata(pdev);
1793 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1794 int ret;
1795
1796 spi_bitbang_stop(&spi_imx->bitbang);
1797
1798 ret = clk_enable(spi_imx->clk_per);
1799 if (ret)
1800 return ret;
1801
1802 ret = clk_enable(spi_imx->clk_ipg);
1803 if (ret) {
1804 clk_disable(spi_imx->clk_per);
1805 return ret;
1806 }
1807
1808 writel(0, spi_imx->base + MXC_CSPICTRL);
1809 clk_disable_unprepare(spi_imx->clk_ipg);
1810 clk_disable_unprepare(spi_imx->clk_per);
1811 spi_imx_sdma_exit(spi_imx);
1812 spi_master_put(master);
1813
1814 return 0;
1815}
1816
1817static struct platform_driver spi_imx_driver = {
1818 .driver = {
1819 .name = DRIVER_NAME,
1820 .of_match_table = spi_imx_dt_ids,
1821 },
1822 .id_table = spi_imx_devtype,
1823 .probe = spi_imx_probe,
1824 .remove = spi_imx_remove,
1825};
1826module_platform_driver(spi_imx_driver);
1827
1828MODULE_DESCRIPTION("SPI Controller driver");
1829MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1830MODULE_LICENSE("GPL");
1831MODULE_ALIAS("platform:" DRIVER_NAME);