blob: 2242e999a76bf0b827f7905f8d3910e994f18c48 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/bitfield.h>
16#include <linux/completion.h>
17#include <linux/dma-mapping.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/rawnand.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26#include "denali.h"
27
28MODULE_LICENSE("GPL");
29
30#define DENALI_NAND_NAME "denali-nand"
31#define DENALI_DEFAULT_OOB_SKIP_BYTES 8
32
33/* for Indexed Addressing */
34#define DENALI_INDEXED_CTRL 0x00
35#define DENALI_INDEXED_DATA 0x10
36
37#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
38#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
39#define DENALI_MAP10 (2 << 26) /* high-level control plane */
40#define DENALI_MAP11 (3 << 26) /* direct controller access */
41
42/* MAP11 access cycle type */
43#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
44#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
45#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
46
47/* MAP10 commands */
48#define DENALI_ERASE 0x01
49
50#define DENALI_BANK(denali) ((denali)->active_bank << 24)
51
52#define DENALI_INVALID_BANK -1
53#define DENALI_NR_BANKS 4
54
55static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
56{
57 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
58}
59
60/*
61 * Direct Addressing - the slave address forms the control information (command
62 * type, bank, block, and page address). The slave data is the actual data to
63 * be transferred. This mode requires 28 bits of address region allocated.
64 */
65static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
66{
67 return ioread32(denali->host + addr);
68}
69
70static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
71 u32 data)
72{
73 iowrite32(data, denali->host + addr);
74}
75
76/*
77 * Indexed Addressing - address translation module intervenes in passing the
78 * control information. This mode reduces the required address range. The
79 * control information and transferred data are latched by the registers in
80 * the translation module.
81 */
82static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
83{
84 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
85 return ioread32(denali->host + DENALI_INDEXED_DATA);
86}
87
88static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
89 u32 data)
90{
91 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
92 iowrite32(data, denali->host + DENALI_INDEXED_DATA);
93}
94
95/*
96 * Use the configuration feature register to determine the maximum number of
97 * banks that the hardware supports.
98 */
99static void denali_detect_max_banks(struct denali_nand_info *denali)
100{
101 uint32_t features = ioread32(denali->reg + FEATURES);
102
103 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
104
105 /* the encoding changed from rev 5.0 to 5.1 */
106 if (denali->revision < 0x0501)
107 denali->max_banks <<= 1;
108}
109
110static void denali_enable_irq(struct denali_nand_info *denali)
111{
112 int i;
113
114 for (i = 0; i < DENALI_NR_BANKS; i++)
115 iowrite32(U32_MAX, denali->reg + INTR_EN(i));
116 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
117}
118
119static void denali_disable_irq(struct denali_nand_info *denali)
120{
121 int i;
122
123 for (i = 0; i < DENALI_NR_BANKS; i++)
124 iowrite32(0, denali->reg + INTR_EN(i));
125 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
126}
127
128static void denali_clear_irq(struct denali_nand_info *denali,
129 int bank, uint32_t irq_status)
130{
131 /* write one to clear bits */
132 iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
133}
134
135static void denali_clear_irq_all(struct denali_nand_info *denali)
136{
137 int i;
138
139 for (i = 0; i < DENALI_NR_BANKS; i++)
140 denali_clear_irq(denali, i, U32_MAX);
141}
142
143static irqreturn_t denali_isr(int irq, void *dev_id)
144{
145 struct denali_nand_info *denali = dev_id;
146 irqreturn_t ret = IRQ_NONE;
147 uint32_t irq_status;
148 int i;
149
150 spin_lock(&denali->irq_lock);
151
152 for (i = 0; i < DENALI_NR_BANKS; i++) {
153 irq_status = ioread32(denali->reg + INTR_STATUS(i));
154 if (irq_status)
155 ret = IRQ_HANDLED;
156
157 denali_clear_irq(denali, i, irq_status);
158
159 if (i != denali->active_bank)
160 continue;
161
162 denali->irq_status |= irq_status;
163
164 if (denali->irq_status & denali->irq_mask)
165 complete(&denali->complete);
166 }
167
168 spin_unlock(&denali->irq_lock);
169
170 return ret;
171}
172
173static void denali_reset_irq(struct denali_nand_info *denali)
174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&denali->irq_lock, flags);
178 denali->irq_status = 0;
179 denali->irq_mask = 0;
180 spin_unlock_irqrestore(&denali->irq_lock, flags);
181}
182
183static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
184 uint32_t irq_mask)
185{
186 unsigned long time_left, flags;
187 uint32_t irq_status;
188
189 spin_lock_irqsave(&denali->irq_lock, flags);
190
191 irq_status = denali->irq_status;
192
193 if (irq_mask & irq_status) {
194 /* return immediately if the IRQ has already happened. */
195 spin_unlock_irqrestore(&denali->irq_lock, flags);
196 return irq_status;
197 }
198
199 denali->irq_mask = irq_mask;
200 reinit_completion(&denali->complete);
201 spin_unlock_irqrestore(&denali->irq_lock, flags);
202
203 time_left = wait_for_completion_timeout(&denali->complete,
204 msecs_to_jiffies(1000));
205 if (!time_left) {
206 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
207 irq_mask);
208 return 0;
209 }
210
211 return denali->irq_status;
212}
213
214static uint32_t denali_check_irq(struct denali_nand_info *denali)
215{
216 unsigned long flags;
217 uint32_t irq_status;
218
219 spin_lock_irqsave(&denali->irq_lock, flags);
220 irq_status = denali->irq_status;
221 spin_unlock_irqrestore(&denali->irq_lock, flags);
222
223 return irq_status;
224}
225
226static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
227{
228 struct denali_nand_info *denali = mtd_to_denali(mtd);
229 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
230 int i;
231
232 for (i = 0; i < len; i++)
233 buf[i] = denali->host_read(denali, addr);
234}
235
236static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
237{
238 struct denali_nand_info *denali = mtd_to_denali(mtd);
239 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
240 int i;
241
242 for (i = 0; i < len; i++)
243 denali->host_write(denali, addr, buf[i]);
244}
245
246static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
247{
248 struct denali_nand_info *denali = mtd_to_denali(mtd);
249 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
250 uint16_t *buf16 = (uint16_t *)buf;
251 int i;
252
253 for (i = 0; i < len / 2; i++)
254 buf16[i] = denali->host_read(denali, addr);
255}
256
257static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
258 int len)
259{
260 struct denali_nand_info *denali = mtd_to_denali(mtd);
261 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
262 const uint16_t *buf16 = (const uint16_t *)buf;
263 int i;
264
265 for (i = 0; i < len / 2; i++)
266 denali->host_write(denali, addr, buf16[i]);
267}
268
269static uint8_t denali_read_byte(struct mtd_info *mtd)
270{
271 uint8_t byte;
272
273 denali_read_buf(mtd, &byte, 1);
274
275 return byte;
276}
277
278static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
279{
280 denali_write_buf(mtd, &byte, 1);
281}
282
283static uint16_t denali_read_word(struct mtd_info *mtd)
284{
285 uint16_t word;
286
287 denali_read_buf16(mtd, (uint8_t *)&word, 2);
288
289 return word;
290}
291
292static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
293{
294 struct denali_nand_info *denali = mtd_to_denali(mtd);
295 uint32_t type;
296
297 if (ctrl & NAND_CLE)
298 type = DENALI_MAP11_CMD;
299 else if (ctrl & NAND_ALE)
300 type = DENALI_MAP11_ADDR;
301 else
302 return;
303
304 /*
305 * Some commands are followed by chip->dev_ready or chip->waitfunc.
306 * irq_status must be cleared here to catch the R/B# interrupt later.
307 */
308 if (ctrl & NAND_CTRL_CHANGE)
309 denali_reset_irq(denali);
310
311 denali->host_write(denali, DENALI_BANK(denali) | type, dat);
312}
313
314static int denali_dev_ready(struct mtd_info *mtd)
315{
316 struct denali_nand_info *denali = mtd_to_denali(mtd);
317
318 return !!(denali_check_irq(denali) & INTR__INT_ACT);
319}
320
321static int denali_check_erased_page(struct mtd_info *mtd,
322 struct nand_chip *chip, uint8_t *buf,
323 unsigned long uncor_ecc_flags,
324 unsigned int max_bitflips)
325{
326 struct denali_nand_info *denali = mtd_to_denali(mtd);
327 uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
328 int ecc_steps = chip->ecc.steps;
329 int ecc_size = chip->ecc.size;
330 int ecc_bytes = chip->ecc.bytes;
331 int i, stat;
332
333 for (i = 0; i < ecc_steps; i++) {
334 if (!(uncor_ecc_flags & BIT(i)))
335 continue;
336
337 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
338 ecc_code, ecc_bytes,
339 NULL, 0,
340 chip->ecc.strength);
341 if (stat < 0) {
342 mtd->ecc_stats.failed++;
343 } else {
344 mtd->ecc_stats.corrected += stat;
345 max_bitflips = max_t(unsigned int, max_bitflips, stat);
346 }
347
348 buf += ecc_size;
349 ecc_code += ecc_bytes;
350 }
351
352 return max_bitflips;
353}
354
355static int denali_hw_ecc_fixup(struct mtd_info *mtd,
356 struct denali_nand_info *denali,
357 unsigned long *uncor_ecc_flags)
358{
359 struct nand_chip *chip = mtd_to_nand(mtd);
360 int bank = denali->active_bank;
361 uint32_t ecc_cor;
362 unsigned int max_bitflips;
363
364 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
365 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
366
367 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
368 /*
369 * This flag is set when uncorrectable error occurs at least in
370 * one ECC sector. We can not know "how many sectors", or
371 * "which sector(s)". We need erase-page check for all sectors.
372 */
373 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
374 return 0;
375 }
376
377 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
378
379 /*
380 * The register holds the maximum of per-sector corrected bitflips.
381 * This is suitable for the return value of the ->read_page() callback.
382 * Unfortunately, we can not know the total number of corrected bits in
383 * the page. Increase the stats by max_bitflips. (compromised solution)
384 */
385 mtd->ecc_stats.corrected += max_bitflips;
386
387 return max_bitflips;
388}
389
390static int denali_sw_ecc_fixup(struct mtd_info *mtd,
391 struct denali_nand_info *denali,
392 unsigned long *uncor_ecc_flags, uint8_t *buf)
393{
394 unsigned int ecc_size = denali->nand.ecc.size;
395 unsigned int bitflips = 0;
396 unsigned int max_bitflips = 0;
397 uint32_t err_addr, err_cor_info;
398 unsigned int err_byte, err_sector, err_device;
399 uint8_t err_cor_value;
400 unsigned int prev_sector = 0;
401 uint32_t irq_status;
402
403 denali_reset_irq(denali);
404
405 do {
406 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
407 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
408 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
409
410 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
411 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
412 err_cor_info);
413 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
414 err_cor_info);
415
416 /* reset the bitflip counter when crossing ECC sector */
417 if (err_sector != prev_sector)
418 bitflips = 0;
419
420 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
421 /*
422 * Check later if this is a real ECC error, or
423 * an erased sector.
424 */
425 *uncor_ecc_flags |= BIT(err_sector);
426 } else if (err_byte < ecc_size) {
427 /*
428 * If err_byte is larger than ecc_size, means error
429 * happened in OOB, so we ignore it. It's no need for
430 * us to correct it err_device is represented the NAND
431 * error bits are happened in if there are more than
432 * one NAND connected.
433 */
434 int offset;
435 unsigned int flips_in_byte;
436
437 offset = (err_sector * ecc_size + err_byte) *
438 denali->devs_per_cs + err_device;
439
440 /* correct the ECC error */
441 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
442 buf[offset] ^= err_cor_value;
443 mtd->ecc_stats.corrected += flips_in_byte;
444 bitflips += flips_in_byte;
445
446 max_bitflips = max(max_bitflips, bitflips);
447 }
448
449 prev_sector = err_sector;
450 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
451
452 /*
453 * Once handle all ECC errors, controller will trigger an
454 * ECC_TRANSACTION_DONE interrupt.
455 */
456 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
457 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
458 return -EIO;
459
460 return max_bitflips;
461}
462
463static void denali_setup_dma64(struct denali_nand_info *denali,
464 dma_addr_t dma_addr, int page, int write)
465{
466 uint32_t mode;
467 const int page_count = 1;
468
469 mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
470
471 /* DMA is a three step process */
472
473 /*
474 * 1. setup transfer type, interrupt when complete,
475 * burst len = 64 bytes, the number of pages
476 */
477 denali->host_write(denali, mode,
478 0x01002000 | (64 << 16) | (write << 8) | page_count);
479
480 /* 2. set memory low address */
481 denali->host_write(denali, mode, lower_32_bits(dma_addr));
482
483 /* 3. set memory high address */
484 denali->host_write(denali, mode, upper_32_bits(dma_addr));
485}
486
487static void denali_setup_dma32(struct denali_nand_info *denali,
488 dma_addr_t dma_addr, int page, int write)
489{
490 uint32_t mode;
491 const int page_count = 1;
492
493 mode = DENALI_MAP10 | DENALI_BANK(denali);
494
495 /* DMA is a four step process */
496
497 /* 1. setup transfer type and # of pages */
498 denali->host_write(denali, mode | page,
499 0x2000 | (write << 8) | page_count);
500
501 /* 2. set memory high address bits 23:8 */
502 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
503
504 /* 3. set memory low address bits 23:8 */
505 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
506
507 /* 4. interrupt when complete, burst len = 64 bytes */
508 denali->host_write(denali, mode | 0x14000, 0x2400);
509}
510
511static int denali_pio_read(struct denali_nand_info *denali, void *buf,
512 size_t size, int page, int raw)
513{
514 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
515 uint32_t *buf32 = (uint32_t *)buf;
516 uint32_t irq_status, ecc_err_mask;
517 int i;
518
519 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
520 ecc_err_mask = INTR__ECC_UNCOR_ERR;
521 else
522 ecc_err_mask = INTR__ECC_ERR;
523
524 denali_reset_irq(denali);
525
526 for (i = 0; i < size / 4; i++)
527 *buf32++ = denali->host_read(denali, addr);
528
529 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
530 if (!(irq_status & INTR__PAGE_XFER_INC))
531 return -EIO;
532
533 if (irq_status & INTR__ERASED_PAGE)
534 memset(buf, 0xff, size);
535
536 return irq_status & ecc_err_mask ? -EBADMSG : 0;
537}
538
539static int denali_pio_write(struct denali_nand_info *denali,
540 const void *buf, size_t size, int page, int raw)
541{
542 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
543 const uint32_t *buf32 = (uint32_t *)buf;
544 uint32_t irq_status;
545 int i;
546
547 denali_reset_irq(denali);
548
549 for (i = 0; i < size / 4; i++)
550 denali->host_write(denali, addr, *buf32++);
551
552 irq_status = denali_wait_for_irq(denali,
553 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
554 if (!(irq_status & INTR__PROGRAM_COMP))
555 return -EIO;
556
557 return 0;
558}
559
560static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
561 size_t size, int page, int raw, int write)
562{
563 if (write)
564 return denali_pio_write(denali, buf, size, page, raw);
565 else
566 return denali_pio_read(denali, buf, size, page, raw);
567}
568
569static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
570 size_t size, int page, int raw, int write)
571{
572 dma_addr_t dma_addr;
573 uint32_t irq_mask, irq_status, ecc_err_mask;
574 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
575 int ret = 0;
576
577 dma_addr = dma_map_single(denali->dev, buf, size, dir);
578 if (dma_mapping_error(denali->dev, dma_addr)) {
579 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
580 return denali_pio_xfer(denali, buf, size, page, raw, write);
581 }
582
583 if (write) {
584 /*
585 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
586 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
587 * when the page program is completed.
588 */
589 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
590 ecc_err_mask = 0;
591 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
592 irq_mask = INTR__DMA_CMD_COMP;
593 ecc_err_mask = INTR__ECC_UNCOR_ERR;
594 } else {
595 irq_mask = INTR__DMA_CMD_COMP;
596 ecc_err_mask = INTR__ECC_ERR;
597 }
598
599 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
600 /*
601 * The ->setup_dma() hook kicks DMA by using the data/command
602 * interface, which belongs to a different AXI port from the
603 * register interface. Read back the register to avoid a race.
604 */
605 ioread32(denali->reg + DMA_ENABLE);
606
607 denali_reset_irq(denali);
608 denali->setup_dma(denali, dma_addr, page, write);
609
610 irq_status = denali_wait_for_irq(denali, irq_mask);
611 if (!(irq_status & INTR__DMA_CMD_COMP))
612 ret = -EIO;
613 else if (irq_status & ecc_err_mask)
614 ret = -EBADMSG;
615
616 iowrite32(0, denali->reg + DMA_ENABLE);
617
618 dma_unmap_single(denali->dev, dma_addr, size, dir);
619
620 if (irq_status & INTR__ERASED_PAGE)
621 memset(buf, 0xff, size);
622
623 return ret;
624}
625
626static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
627 size_t size, int page, int raw, int write)
628{
629 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
630 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
631 denali->reg + TRANSFER_SPARE_REG);
632
633 if (denali->dma_avail)
634 return denali_dma_xfer(denali, buf, size, page, raw, write);
635 else
636 return denali_pio_xfer(denali, buf, size, page, raw, write);
637}
638
639static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
640 int page, int write)
641{
642 struct denali_nand_info *denali = mtd_to_denali(mtd);
643 int writesize = mtd->writesize;
644 int oobsize = mtd->oobsize;
645 uint8_t *bufpoi = chip->oob_poi;
646 int ecc_steps = chip->ecc.steps;
647 int ecc_size = chip->ecc.size;
648 int ecc_bytes = chip->ecc.bytes;
649 int oob_skip = denali->oob_skip_bytes;
650 size_t size = writesize + oobsize;
651 int i, pos, len;
652
653 /* BBM at the beginning of the OOB area */
654 if (write)
655 nand_prog_page_begin_op(chip, page, writesize, bufpoi,
656 oob_skip);
657 else
658 nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
659 bufpoi += oob_skip;
660
661 /* OOB ECC */
662 for (i = 0; i < ecc_steps; i++) {
663 pos = ecc_size + i * (ecc_size + ecc_bytes);
664 len = ecc_bytes;
665
666 if (pos >= writesize)
667 pos += oob_skip;
668 else if (pos + len > writesize)
669 len = writesize - pos;
670
671 if (write)
672 nand_change_write_column_op(chip, pos, bufpoi, len,
673 false);
674 else
675 nand_change_read_column_op(chip, pos, bufpoi, len,
676 false);
677 bufpoi += len;
678 if (len < ecc_bytes) {
679 len = ecc_bytes - len;
680 if (write)
681 nand_change_write_column_op(chip, writesize +
682 oob_skip, bufpoi,
683 len, false);
684 else
685 nand_change_read_column_op(chip, writesize +
686 oob_skip, bufpoi,
687 len, false);
688 bufpoi += len;
689 }
690 }
691
692 /* OOB free */
693 len = oobsize - (bufpoi - chip->oob_poi);
694 if (write)
695 nand_change_write_column_op(chip, size - len, bufpoi, len,
696 false);
697 else
698 nand_change_read_column_op(chip, size - len, bufpoi, len,
699 false);
700}
701
702static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
703 uint8_t *buf, int oob_required, int page)
704{
705 struct denali_nand_info *denali = mtd_to_denali(mtd);
706 int writesize = mtd->writesize;
707 int oobsize = mtd->oobsize;
708 int ecc_steps = chip->ecc.steps;
709 int ecc_size = chip->ecc.size;
710 int ecc_bytes = chip->ecc.bytes;
711 void *tmp_buf = denali->buf;
712 int oob_skip = denali->oob_skip_bytes;
713 size_t size = writesize + oobsize;
714 int ret, i, pos, len;
715
716 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
717 if (ret)
718 return ret;
719
720 /* Arrange the buffer for syndrome payload/ecc layout */
721 if (buf) {
722 for (i = 0; i < ecc_steps; i++) {
723 pos = i * (ecc_size + ecc_bytes);
724 len = ecc_size;
725
726 if (pos >= writesize)
727 pos += oob_skip;
728 else if (pos + len > writesize)
729 len = writesize - pos;
730
731 memcpy(buf, tmp_buf + pos, len);
732 buf += len;
733 if (len < ecc_size) {
734 len = ecc_size - len;
735 memcpy(buf, tmp_buf + writesize + oob_skip,
736 len);
737 buf += len;
738 }
739 }
740 }
741
742 if (oob_required) {
743 uint8_t *oob = chip->oob_poi;
744
745 /* BBM at the beginning of the OOB area */
746 memcpy(oob, tmp_buf + writesize, oob_skip);
747 oob += oob_skip;
748
749 /* OOB ECC */
750 for (i = 0; i < ecc_steps; i++) {
751 pos = ecc_size + i * (ecc_size + ecc_bytes);
752 len = ecc_bytes;
753
754 if (pos >= writesize)
755 pos += oob_skip;
756 else if (pos + len > writesize)
757 len = writesize - pos;
758
759 memcpy(oob, tmp_buf + pos, len);
760 oob += len;
761 if (len < ecc_bytes) {
762 len = ecc_bytes - len;
763 memcpy(oob, tmp_buf + writesize + oob_skip,
764 len);
765 oob += len;
766 }
767 }
768
769 /* OOB free */
770 len = oobsize - (oob - chip->oob_poi);
771 memcpy(oob, tmp_buf + size - len, len);
772 }
773
774 return 0;
775}
776
777static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
778 int page)
779{
780 denali_oob_xfer(mtd, chip, page, 0);
781
782 return 0;
783}
784
785static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
786 int page)
787{
788 struct denali_nand_info *denali = mtd_to_denali(mtd);
789
790 denali_reset_irq(denali);
791
792 denali_oob_xfer(mtd, chip, page, 1);
793
794 return nand_prog_page_end_op(chip);
795}
796
797static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
798 uint8_t *buf, int oob_required, int page)
799{
800 struct denali_nand_info *denali = mtd_to_denali(mtd);
801 unsigned long uncor_ecc_flags = 0;
802 int stat = 0;
803 int ret;
804
805 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
806 if (ret && ret != -EBADMSG)
807 return ret;
808
809 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
810 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
811 else if (ret == -EBADMSG)
812 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
813
814 if (stat < 0)
815 return stat;
816
817 if (uncor_ecc_flags) {
818 ret = denali_read_oob(mtd, chip, page);
819 if (ret)
820 return ret;
821
822 stat = denali_check_erased_page(mtd, chip, buf,
823 uncor_ecc_flags, stat);
824 }
825
826 return stat;
827}
828
829static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
830 const uint8_t *buf, int oob_required, int page)
831{
832 struct denali_nand_info *denali = mtd_to_denali(mtd);
833 int writesize = mtd->writesize;
834 int oobsize = mtd->oobsize;
835 int ecc_steps = chip->ecc.steps;
836 int ecc_size = chip->ecc.size;
837 int ecc_bytes = chip->ecc.bytes;
838 void *tmp_buf = denali->buf;
839 int oob_skip = denali->oob_skip_bytes;
840 size_t size = writesize + oobsize;
841 int i, pos, len;
842
843 /*
844 * Fill the buffer with 0xff first except the full page transfer.
845 * This simplifies the logic.
846 */
847 if (!buf || !oob_required)
848 memset(tmp_buf, 0xff, size);
849
850 /* Arrange the buffer for syndrome payload/ecc layout */
851 if (buf) {
852 for (i = 0; i < ecc_steps; i++) {
853 pos = i * (ecc_size + ecc_bytes);
854 len = ecc_size;
855
856 if (pos >= writesize)
857 pos += oob_skip;
858 else if (pos + len > writesize)
859 len = writesize - pos;
860
861 memcpy(tmp_buf + pos, buf, len);
862 buf += len;
863 if (len < ecc_size) {
864 len = ecc_size - len;
865 memcpy(tmp_buf + writesize + oob_skip, buf,
866 len);
867 buf += len;
868 }
869 }
870 }
871
872 if (oob_required) {
873 const uint8_t *oob = chip->oob_poi;
874
875 /* BBM at the beginning of the OOB area */
876 memcpy(tmp_buf + writesize, oob, oob_skip);
877 oob += oob_skip;
878
879 /* OOB ECC */
880 for (i = 0; i < ecc_steps; i++) {
881 pos = ecc_size + i * (ecc_size + ecc_bytes);
882 len = ecc_bytes;
883
884 if (pos >= writesize)
885 pos += oob_skip;
886 else if (pos + len > writesize)
887 len = writesize - pos;
888
889 memcpy(tmp_buf + pos, oob, len);
890 oob += len;
891 if (len < ecc_bytes) {
892 len = ecc_bytes - len;
893 memcpy(tmp_buf + writesize + oob_skip, oob,
894 len);
895 oob += len;
896 }
897 }
898
899 /* OOB free */
900 len = oobsize - (oob - chip->oob_poi);
901 memcpy(tmp_buf + size - len, oob, len);
902 }
903
904 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
905}
906
907static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
908 const uint8_t *buf, int oob_required, int page)
909{
910 struct denali_nand_info *denali = mtd_to_denali(mtd);
911
912 return denali_data_xfer(denali, (void *)buf, mtd->writesize,
913 page, 0, 1);
914}
915
916static void denali_select_chip(struct mtd_info *mtd, int chip)
917{
918 struct denali_nand_info *denali = mtd_to_denali(mtd);
919
920 denali->active_bank = chip;
921}
922
923static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
924{
925 struct denali_nand_info *denali = mtd_to_denali(mtd);
926 uint32_t irq_status;
927
928 /* R/B# pin transitioned from low to high? */
929 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
930
931 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
932}
933
934static int denali_erase(struct mtd_info *mtd, int page)
935{
936 struct denali_nand_info *denali = mtd_to_denali(mtd);
937 uint32_t irq_status;
938
939 denali_reset_irq(denali);
940
941 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
942 DENALI_ERASE);
943
944 /* wait for erase to complete or failure to occur */
945 irq_status = denali_wait_for_irq(denali,
946 INTR__ERASE_COMP | INTR__ERASE_FAIL);
947
948 return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
949}
950
951static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
952 const struct nand_data_interface *conf)
953{
954 struct denali_nand_info *denali = mtd_to_denali(mtd);
955 const struct nand_sdr_timings *timings;
956 unsigned long t_x, mult_x;
957 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
958 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
959 int addr_2_data_mask;
960 uint32_t tmp;
961
962 timings = nand_get_sdr_timings(conf);
963 if (IS_ERR(timings))
964 return PTR_ERR(timings);
965
966 /* clk_x period in picoseconds */
967 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
968 if (!t_x)
969 return -EINVAL;
970
971 /*
972 * The bus interface clock, clk_x, is phase aligned with the core clock.
973 * The clk_x is an integral multiple N of the core clk. The value N is
974 * configured at IP delivery time, and its available value is 4, 5, 6.
975 */
976 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
977 if (mult_x < 4 || mult_x > 6)
978 return -EINVAL;
979
980 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
981 return 0;
982
983 /* tREA -> ACC_CLKS */
984 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
985 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
986
987 tmp = ioread32(denali->reg + ACC_CLKS);
988 tmp &= ~ACC_CLKS__VALUE;
989 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
990 iowrite32(tmp, denali->reg + ACC_CLKS);
991
992 /* tRWH -> RE_2_WE */
993 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
994 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
995
996 tmp = ioread32(denali->reg + RE_2_WE);
997 tmp &= ~RE_2_WE__VALUE;
998 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
999 iowrite32(tmp, denali->reg + RE_2_WE);
1000
1001 /* tRHZ -> RE_2_RE */
1002 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
1003 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1004
1005 tmp = ioread32(denali->reg + RE_2_RE);
1006 tmp &= ~RE_2_RE__VALUE;
1007 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
1008 iowrite32(tmp, denali->reg + RE_2_RE);
1009
1010 /*
1011 * tCCS, tWHR -> WE_2_RE
1012 *
1013 * With WE_2_RE properly set, the Denali controller automatically takes
1014 * care of the delay; the driver need not set NAND_WAIT_TCCS.
1015 */
1016 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
1017 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1018
1019 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
1020 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1021 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
1022 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
1023
1024 /* tADL -> ADDR_2_DATA */
1025
1026 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1027 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1028 if (denali->revision < 0x0501)
1029 addr_2_data_mask >>= 1;
1030
1031 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
1032 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1033
1034 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
1035 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1036 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
1037 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
1038
1039 /* tREH, tWH -> RDWR_EN_HI_CNT */
1040 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1041 t_x);
1042 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1043
1044 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
1045 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1046 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
1047 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
1048
1049 /* tRP, tWP -> RDWR_EN_LO_CNT */
1050 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
1051 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1052 t_x);
1053 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
1054 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1055 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1056
1057 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
1058 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1059 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
1060 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
1061
1062 /* tCS, tCEA -> CS_SETUP_CNT */
1063 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
1064 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
1065 0);
1066 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1067
1068 tmp = ioread32(denali->reg + CS_SETUP_CNT);
1069 tmp &= ~CS_SETUP_CNT__VALUE;
1070 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
1071 iowrite32(tmp, denali->reg + CS_SETUP_CNT);
1072
1073 return 0;
1074}
1075
1076static void denali_reset_banks(struct denali_nand_info *denali)
1077{
1078 u32 irq_status;
1079 int i;
1080
1081 for (i = 0; i < denali->max_banks; i++) {
1082 denali->active_bank = i;
1083
1084 denali_reset_irq(denali);
1085
1086 iowrite32(DEVICE_RESET__BANK(i),
1087 denali->reg + DEVICE_RESET);
1088
1089 irq_status = denali_wait_for_irq(denali,
1090 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
1091 if (!(irq_status & INTR__INT_ACT))
1092 break;
1093 }
1094
1095 dev_dbg(denali->dev, "%d chips connected\n", i);
1096 denali->max_banks = i;
1097}
1098
1099static void denali_hw_init(struct denali_nand_info *denali)
1100{
1101 /*
1102 * The REVISION register may not be reliable. Platforms are allowed to
1103 * override it.
1104 */
1105 if (!denali->revision)
1106 denali->revision = swab16(ioread32(denali->reg + REVISION));
1107
1108 /*
1109 * Set how many bytes should be skipped before writing data in OOB.
1110 * If a non-zero value has already been set (by firmware or something),
1111 * just use it. Otherwise, set the driver default.
1112 */
1113 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
1114 if (!denali->oob_skip_bytes) {
1115 denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
1116 iowrite32(denali->oob_skip_bytes,
1117 denali->reg + SPARE_AREA_SKIP_BYTES);
1118 }
1119
1120 denali_detect_max_banks(denali);
1121 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
1122 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
1123
1124 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
1125}
1126
1127int denali_calc_ecc_bytes(int step_size, int strength)
1128{
1129 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1130 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1131}
1132EXPORT_SYMBOL(denali_calc_ecc_bytes);
1133
1134static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1135 struct mtd_oob_region *oobregion)
1136{
1137 struct denali_nand_info *denali = mtd_to_denali(mtd);
1138 struct nand_chip *chip = mtd_to_nand(mtd);
1139
1140 if (section)
1141 return -ERANGE;
1142
1143 oobregion->offset = denali->oob_skip_bytes;
1144 oobregion->length = chip->ecc.total;
1145
1146 return 0;
1147}
1148
1149static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1150 struct mtd_oob_region *oobregion)
1151{
1152 struct denali_nand_info *denali = mtd_to_denali(mtd);
1153 struct nand_chip *chip = mtd_to_nand(mtd);
1154
1155 if (section)
1156 return -ERANGE;
1157
1158 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
1159 oobregion->length = mtd->oobsize - oobregion->offset;
1160
1161 return 0;
1162}
1163
1164static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1165 .ecc = denali_ooblayout_ecc,
1166 .free = denali_ooblayout_free,
1167};
1168
1169static int denali_multidev_fixup(struct denali_nand_info *denali)
1170{
1171 struct nand_chip *chip = &denali->nand;
1172 struct mtd_info *mtd = nand_to_mtd(chip);
1173
1174 /*
1175 * Support for multi device:
1176 * When the IP configuration is x16 capable and two x8 chips are
1177 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1178 * In this case, the core framework knows nothing about this fact,
1179 * so we should tell it the _logical_ pagesize and anything necessary.
1180 */
1181 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
1182
1183 /*
1184 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1185 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1186 */
1187 if (denali->devs_per_cs == 0) {
1188 denali->devs_per_cs = 1;
1189 iowrite32(1, denali->reg + DEVICES_CONNECTED);
1190 }
1191
1192 if (denali->devs_per_cs == 1)
1193 return 0;
1194
1195 if (denali->devs_per_cs != 2) {
1196 dev_err(denali->dev, "unsupported number of devices %d\n",
1197 denali->devs_per_cs);
1198 return -EINVAL;
1199 }
1200
1201 /* 2 chips in parallel */
1202 mtd->size <<= 1;
1203 mtd->erasesize <<= 1;
1204 mtd->writesize <<= 1;
1205 mtd->oobsize <<= 1;
1206 chip->chipsize <<= 1;
1207 chip->page_shift += 1;
1208 chip->phys_erase_shift += 1;
1209 chip->bbt_erase_shift += 1;
1210 chip->chip_shift += 1;
1211 chip->pagemask <<= 1;
1212 chip->ecc.size <<= 1;
1213 chip->ecc.bytes <<= 1;
1214 chip->ecc.strength <<= 1;
1215 denali->oob_skip_bytes <<= 1;
1216
1217 return 0;
1218}
1219
1220static int denali_attach_chip(struct nand_chip *chip)
1221{
1222 struct mtd_info *mtd = nand_to_mtd(chip);
1223 struct denali_nand_info *denali = mtd_to_denali(mtd);
1224 int ret;
1225
1226 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
1227 denali->dma_avail = 1;
1228
1229 if (denali->dma_avail) {
1230 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1231
1232 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1233 if (ret) {
1234 dev_info(denali->dev,
1235 "Failed to set DMA mask. Disabling DMA.\n");
1236 denali->dma_avail = 0;
1237 }
1238 }
1239
1240 if (denali->dma_avail) {
1241 chip->options |= NAND_USE_BOUNCE_BUFFER;
1242 chip->buf_align = 16;
1243 if (denali->caps & DENALI_CAP_DMA_64BIT)
1244 denali->setup_dma = denali_setup_dma64;
1245 else
1246 denali->setup_dma = denali_setup_dma32;
1247 }
1248
1249 chip->bbt_options |= NAND_BBT_USE_FLASH;
1250 chip->bbt_options |= NAND_BBT_NO_OOB;
1251 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1252 chip->options |= NAND_NO_SUBPAGE_WRITE;
1253
1254 ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
1255 mtd->oobsize - denali->oob_skip_bytes);
1256 if (ret) {
1257 dev_err(denali->dev, "Failed to setup ECC settings.\n");
1258 return ret;
1259 }
1260
1261 dev_dbg(denali->dev,
1262 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1263 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1264
1265 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
1266 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
1267 denali->reg + ECC_CORRECTION);
1268 iowrite32(mtd->erasesize / mtd->writesize,
1269 denali->reg + PAGES_PER_BLOCK);
1270 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1271 denali->reg + DEVICE_WIDTH);
1272 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
1273 denali->reg + TWO_ROW_ADDR_CYCLES);
1274 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
1275 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
1276
1277 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
1278 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
1279 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1280 iowrite32(mtd->writesize / chip->ecc.size,
1281 denali->reg + CFG_NUM_DATA_BLOCKS);
1282
1283 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1284
1285 if (chip->options & NAND_BUSWIDTH_16) {
1286 chip->read_buf = denali_read_buf16;
1287 chip->write_buf = denali_write_buf16;
1288 } else {
1289 chip->read_buf = denali_read_buf;
1290 chip->write_buf = denali_write_buf;
1291 }
1292 chip->ecc.read_page = denali_read_page;
1293 chip->ecc.read_page_raw = denali_read_page_raw;
1294 chip->ecc.write_page = denali_write_page;
1295 chip->ecc.write_page_raw = denali_write_page_raw;
1296 chip->ecc.read_oob = denali_read_oob;
1297 chip->ecc.write_oob = denali_write_oob;
1298 chip->erase = denali_erase;
1299
1300 ret = denali_multidev_fixup(denali);
1301 if (ret)
1302 return ret;
1303
1304 /*
1305 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
1306 * use devm_kmalloc() because the memory allocated by devm_ does not
1307 * guarantee DMA-safe alignment.
1308 */
1309 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1310 if (!denali->buf)
1311 return -ENOMEM;
1312
1313 return 0;
1314}
1315
1316static void denali_detach_chip(struct nand_chip *chip)
1317{
1318 struct mtd_info *mtd = nand_to_mtd(chip);
1319 struct denali_nand_info *denali = mtd_to_denali(mtd);
1320
1321 kfree(denali->buf);
1322}
1323
1324static const struct nand_controller_ops denali_controller_ops = {
1325 .attach_chip = denali_attach_chip,
1326 .detach_chip = denali_detach_chip,
1327};
1328
1329int denali_init(struct denali_nand_info *denali)
1330{
1331 struct nand_chip *chip = &denali->nand;
1332 struct mtd_info *mtd = nand_to_mtd(chip);
1333 u32 features = ioread32(denali->reg + FEATURES);
1334 int ret;
1335
1336 mtd->dev.parent = denali->dev;
1337 denali_hw_init(denali);
1338
1339 init_completion(&denali->complete);
1340 spin_lock_init(&denali->irq_lock);
1341
1342 denali_clear_irq_all(denali);
1343
1344 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1345 IRQF_SHARED, DENALI_NAND_NAME, denali);
1346 if (ret) {
1347 dev_err(denali->dev, "Unable to request IRQ\n");
1348 return ret;
1349 }
1350
1351 denali_enable_irq(denali);
1352 denali_reset_banks(denali);
1353 if (!denali->max_banks) {
1354 /* Error out earlier if no chip is found for some reasons. */
1355 ret = -ENODEV;
1356 goto disable_irq;
1357 }
1358
1359 denali->active_bank = DENALI_INVALID_BANK;
1360
1361 nand_set_flash_node(chip, denali->dev->of_node);
1362 /* Fallback to the default name if DT did not give "label" property */
1363 if (!mtd->name)
1364 mtd->name = "denali-nand";
1365
1366 chip->select_chip = denali_select_chip;
1367 chip->read_byte = denali_read_byte;
1368 chip->write_byte = denali_write_byte;
1369 chip->read_word = denali_read_word;
1370 chip->cmd_ctrl = denali_cmd_ctrl;
1371 chip->dev_ready = denali_dev_ready;
1372 chip->waitfunc = denali_waitfunc;
1373
1374 if (features & FEATURES__INDEX_ADDR) {
1375 denali->host_read = denali_indexed_read;
1376 denali->host_write = denali_indexed_write;
1377 } else {
1378 denali->host_read = denali_direct_read;
1379 denali->host_write = denali_direct_write;
1380 }
1381
1382 /* clk rate info is needed for setup_data_interface */
1383 if (denali->clk_rate && denali->clk_x_rate)
1384 chip->setup_data_interface = denali_setup_data_interface;
1385
1386 chip->dummy_controller.ops = &denali_controller_ops;
1387 ret = nand_scan(mtd, denali->max_banks);
1388 if (ret)
1389 goto disable_irq;
1390
1391 ret = mtd_device_register(mtd, NULL, 0);
1392 if (ret) {
1393 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1394 goto cleanup_nand;
1395 }
1396
1397 return 0;
1398
1399cleanup_nand:
1400 nand_cleanup(chip);
1401disable_irq:
1402 denali_disable_irq(denali);
1403
1404 return ret;
1405}
1406EXPORT_SYMBOL(denali_init);
1407
1408void denali_remove(struct denali_nand_info *denali)
1409{
1410 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
1411
1412 nand_release(mtd);
1413 denali_disable_irq(denali);
1414}
1415EXPORT_SYMBOL(denali_remove);