Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 42d401e..6ddab79 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -170,6 +170,16 @@
 	  buffer in a flash partition where it can be read back at some
 	  later point.
 
+config MTD_PSTORE
+	tristate "Log panic/oops to an MTD buffer based on pstore"
+	depends on PSTORE_BLK
+	help
+	  This enables panic and oops messages to be logged to a circular
+	  buffer in a flash partition where it can be read back as files after
+	  mounting pstore filesystem.
+
+	  If unsure, say N.
+
 config MTD_SWAP
 	tristate "Swap on MTD device support"
 	depends on MTD && SWAP
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 56cc60c..593d059 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_SSFDC)		+= ssfdc.o
 obj-$(CONFIG_SM_FTL)		+= sm_ftl.o
 obj-$(CONFIG_MTD_OOPS)		+= mtdoops.o
+obj-$(CONFIG_MTD_PSTORE)	+= mtdpstore.o
 obj-$(CONFIG_MTD_SWAP)		+= mtdswap.o
 
 nftl-objs		:= nftlcore.o nftlmount.o
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index a7e47e0..19726eb 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -11,7 +11,7 @@
 	  AMD and other flash manufactures that provides a universal method
 	  for probing the capabilities of flash devices. If you wish to
 	  support any device that is CFI-compliant, you need to enable this
-	  option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
+	  option. Visit <https://www.amd.com/products/nvd/overview/cfi.html>
 	  for more information on CFI.
 
 config MTD_JEDECPROBE
@@ -55,12 +55,14 @@
 	  LITTLE_ENDIAN_BYTE, if the bytes are reversed.
 
 config MTD_CFI_NOSWAP
+	depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN
 	bool "NO"
 
 config MTD_CFI_BE_BYTE_SWAP
 	bool "BIG_ENDIAN_BYTE"
 
 config MTD_CFI_LE_BYTE_SWAP
+	depends on !ARCH_IXP4XX
 	bool "LITTLE_ENDIAN_BYTE"
 
 endchoice
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 79a53cb..42001c4 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -420,8 +420,9 @@
 		extra_size = 0;
 
 		/* Protection Register info */
-		extra_size += (extp->NumProtectionFields - 1) *
-			      sizeof(struct cfi_intelext_otpinfo);
+		if (extp->NumProtectionFields)
+			extra_size += (extp->NumProtectionFields - 1) *
+				      sizeof(struct cfi_intelext_otpinfo);
 	}
 
 	if (extp->MinorVersion >= '1') {
@@ -695,14 +696,16 @@
 	 */
 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 	    && extp->FeatureSupport & (1 << 9)) {
+		int offs = 0;
 		struct cfi_private *newcfi;
 		struct flchip *chip;
 		struct flchip_shared *shared;
-		int offs, numregions, numparts, partshift, numvirtchips, i, j;
+		int numregions, numparts, partshift, numvirtchips, i, j;
 
 		/* Protection Register info */
-		offs = (extp->NumProtectionFields - 1) *
-		       sizeof(struct cfi_intelext_otpinfo);
+		if (extp->NumProtectionFields)
+			offs = (extp->NumProtectionFields - 1) *
+			       sizeof(struct cfi_intelext_otpinfo);
 
 		/* Burst Read info */
 		offs += extp->extra[offs+1]+2;
@@ -834,7 +837,7 @@
 			/* Someone else might have been playing with it. */
 			return -EAGAIN;
 		}
-		/* Fall through */
+		fallthrough;
 	case FL_READY:
 	case FL_CFI_QUERY:
 	case FL_JEDEC_QUERY:
@@ -907,7 +910,7 @@
 		/* Only if there's no operation suspended... */
 		if (mode == FL_READY && chip->oldstate == FL_READY)
 			return 0;
-		/* Fall through */
+		fallthrough;
 	default:
 	sleep:
 		set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1353,7 +1356,7 @@
 {
 	unsigned long cmd_addr;
 	struct cfi_private *cfi = map->fldrv_priv;
-	int ret = 0;
+	int ret;
 
 	adr += chip->start;
 
@@ -1383,7 +1386,7 @@
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long ofs, last_end = 0;
 	int chipnum;
-	int ret = 0;
+	int ret;
 
 	if (!map->virt)
 		return -EINVAL;
@@ -1550,7 +1553,7 @@
 {
 	struct cfi_private *cfi = map->fldrv_priv;
 	map_word status, write_cmd;
-	int ret=0;
+	int ret;
 
 	adr += chip->start;
 
@@ -1624,7 +1627,7 @@
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
-	int ret = 0;
+	int ret;
 	int chipnum;
 	unsigned long ofs;
 
@@ -1871,7 +1874,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
-	int ret = 0;
+	int ret;
 	int chipnum;
 	unsigned long ofs, vec_seek, i;
 	size_t len = 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9c98dde..96a27e0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -966,8 +966,7 @@
 		/* Only if there's no operation suspended... */
 		if (mode == FL_READY && chip->oldstate == FL_READY)
 			return 0;
-		/* fall through */
-
+		fallthrough;
 	default:
 	sleep:
 		set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1720,7 +1719,7 @@
 					   struct flchip *chip,
 					   unsigned long adr, int mode)
 {
-	int ret = 0;
+	int ret;
 
 	mutex_lock(&chip->mutex);
 
@@ -1797,7 +1796,7 @@
 				     unsigned long adr, map_word datum,
 				     int mode)
 {
-	int ret = 0;
+	int ret;
 
 	adr += chip->start;
 
@@ -1821,7 +1820,7 @@
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
-	int ret = 0;
+	int ret;
 	int chipnum;
 	unsigned long ofs, chipstart;
 	DECLARE_WAITQUEUE(wait, current);
@@ -2025,7 +2024,7 @@
 				    int len)
 {
 	struct cfi_private *cfi = map->fldrv_priv;
-	int ret = -EIO;
+	int ret;
 	unsigned long cmd_adr;
 	int z, words;
 	map_word datum;
@@ -2102,7 +2101,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
-	int ret = 0;
+	int ret;
 	int chipnum;
 	unsigned long ofs;
 
@@ -2239,7 +2238,7 @@
 	struct cfi_private *cfi = map->fldrv_priv;
 	int retry_cnt = 0;
 	map_word oldd;
-	int ret = 0;
+	int ret;
 	int i;
 
 	adr += chip->start;
@@ -2314,7 +2313,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long ofs, chipstart;
-	int ret = 0;
+	int ret;
 	int chipnum;
 
 	chipnum = to >> cfi->chipshift;
@@ -2418,7 +2417,7 @@
 	unsigned long timeo = jiffies + HZ;
 	unsigned long int adr;
 	DECLARE_WAITQUEUE(wait, current);
-	int ret = 0;
+	int ret;
 	int retry_cnt = 0;
 
 	adr = cfi->addr_unlock1;
@@ -2517,7 +2516,7 @@
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long timeo = jiffies + HZ;
 	DECLARE_WAITQUEUE(wait, current);
-	int ret = 0;
+	int ret;
 	int retry_cnt = 0;
 
 	adr += chip->start;
@@ -2935,7 +2934,7 @@
 			 * as the whole point is that nobody can do anything
 			 * with the chip now anyway.
 			 */
-			/* fall through */
+			fallthrough;
 		case FL_SYNCING:
 			mutex_unlock(&chip->mutex);
 			break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index e752067..270322b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -324,8 +324,7 @@
 	case FL_JEDEC_QUERY:
 		map_write(map, CMD(0x70), cmd_addr);
 		chip->state = FL_STATUS;
-		/* Fall through */
-
+		fallthrough;
 	case FL_STATUS:
 		status = map_read(map, cmd_addr);
 		if (map_word_andequal(map, status, status_OK, status_OK)) {
@@ -462,8 +461,7 @@
 #ifdef DEBUG_CFI_FEATURES
 	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
 #endif
-		/* Fall through */
-
+		fallthrough;
 	case FL_STATUS:
 		status = map_read(map, cmd_adr);
 		if (map_word_andequal(map, status, status_OK, status_OK))
@@ -611,7 +609,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
-	int ret = 0;
+	int ret;
 	int chipnum;
 	unsigned long ofs;
 
@@ -756,8 +754,7 @@
 	case FL_READY:
 		map_write(map, CMD(0x70), adr);
 		chip->state = FL_STATUS;
-		/* Fall through */
-
+		fallthrough;
 	case FL_STATUS:
 		status = map_read(map, adr);
 		if (map_word_andequal(map, status, status_OK, status_OK))
@@ -895,7 +892,7 @@
 {	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long adr, len;
-	int chipnum, ret = 0;
+	int chipnum, ret;
 	int i, first;
 	struct mtd_erase_region_info *regions = mtd->eraseregions;
 
@@ -998,7 +995,7 @@
 			 * as the whole point is that nobody can do anything
 			 * with the chip now anyway.
 			 */
-			/* Fall through */
+			fallthrough;
 		case FL_SYNCING:
 			mutex_unlock(&chip->mutex);
 			break;
@@ -1054,8 +1051,7 @@
 	case FL_READY:
 		map_write(map, CMD(0x70), adr);
 		chip->state = FL_STATUS;
-		/* Fall through */
-
+		fallthrough;
 	case FL_STATUS:
 		status = map_read(map, adr);
 		if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1132,7 +1128,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long adr;
-	int chipnum, ret = 0;
+	int chipnum, ret;
 #ifdef DEBUG_LOCK_BITS
 	int ofs_factor = cfi->interleave * cfi->device_type;
 #endif
@@ -1201,8 +1197,7 @@
 	case FL_READY:
 		map_write(map, CMD(0x70), adr);
 		chip->state = FL_STATUS;
-		/* Fall through */
-
+		fallthrough;
 	case FL_STATUS:
 		status = map_read(map, adr);
 		if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1279,7 +1274,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	unsigned long adr;
-	int chipnum, ret = 0;
+	int chipnum, ret;
 #ifdef DEBUG_LOCK_BITS
 	int ofs_factor = cfi->interleave * cfi->device_type;
 #endif
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e3b266e..99b7986 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -26,7 +26,7 @@
 void cfi_udelay(int us)
 {
 	if (us >= 1000) {
-		msleep((us+999)/1000);
+		msleep(DIV_ROUND_UP(us, 1000));
 	} else {
 		udelay(us);
 		cond_resched();
@@ -109,13 +109,13 @@
 	case 8:
 		onecmd |= (onecmd << (chip_mode * 32));
 #endif
-		/* fall through */
+		fallthrough;
 	case 4:
 		onecmd |= (onecmd << (chip_mode * 16));
-		/* fall through */
+		fallthrough;
 	case 2:
 		onecmd |= (onecmd << (chip_mode * 8));
-		/* fall through */
+		fallthrough;
 	case 1:
 		;
 	}
@@ -165,13 +165,13 @@
 	case 8:
 		res |= (onestat >> (chip_mode * 32));
 #endif
-		/* fall through */
+		fallthrough;
 	case 4:
 		res |= (onestat >> (chip_mode * 16));
-		/* fall through */
+		fallthrough;
 	case 2:
 		res |= (onestat >> (chip_mode * 8));
-		/* fall through */
+		fallthrough;
 	case 1:
 		;
 	}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index f96287c..0f4c2d8 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -91,7 +91,7 @@
 
 config MTD_SPEAR_SMI
 	tristate "SPEAR MTD NOR Support through SMI controller"
-	depends on PLAT_SPEAR
+	depends on PLAT_SPEAR || COMPILE_TEST
 	default y
 	help
 	  This enable SNOR support on SPEAR platforms using SMI controller
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index eccf2e5..3af50db 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -320,7 +320,7 @@
 	 * ChipCommon revision.
 	 */
 	if (b47s->bcma_cc->core->id.rev == 54)
-		b47s->window = ioremap_nocache(res->start, resource_size(res));
+		b47s->window = ioremap(res->start, resource_size(res));
 	else
 		b47s->window = ioremap_cache(res->start, resource_size(res));
 	if (!b47s->window) {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 410a321..c08721b 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -44,7 +44,7 @@
 static LIST_HEAD(blkmtd_device_list);
 
 
-static struct page *page_read(struct address_space *mapping, int index)
+static struct page *page_read(struct address_space *mapping, pgoff_t index)
 {
 	return read_mapping_page(mapping, index, NULL);
 }
@@ -54,7 +54,7 @@
 {
 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
 	struct page *page;
-	int index = to >> PAGE_SHIFT;	// page index
+	pgoff_t index = to >> PAGE_SHIFT;	// page index
 	int pages = len >> PAGE_SHIFT;
 	u_long *p;
 	u_long *max;
@@ -103,7 +103,7 @@
 {
 	struct block2mtd_dev *dev = mtd->priv;
 	struct page *page;
-	int index = from >> PAGE_SHIFT;
+	pgoff_t index = from >> PAGE_SHIFT;
 	int offset = from & (PAGE_SIZE-1);
 	int cpylen;
 
@@ -137,7 +137,7 @@
 {
 	struct page *page;
 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
-	int index = to >> PAGE_SHIFT;	// page index
+	pgoff_t index = to >> PAGE_SHIFT;	// page index
 	int offset = to & ~PAGE_MASK;	// page offset
 	int cpylen;
 
@@ -329,10 +329,10 @@
 	switch (**endp) {
 	case 'G' :
 		result *= 1024;
-		/* fall through */
+		fallthrough;
 	case 'M':
 		result *= 1024;
-		/* fall through */
+		fallthrough;
 	case 'K':
 	case 'k':
 		result *= 1024;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index eb0f460..a030792 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -647,7 +647,7 @@
 
 	for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
 		ecc[i] = bitrev8(hwecc[i]);
-	numerrs = decode_bch(docg3->cascade->bch, NULL,
+	numerrs = bch_decode(docg3->cascade->bch, NULL,
 			     DOC_ECC_BCH_COVERED_BYTES,
 			     NULL, ecc, NULL, errorpos);
 	BUG_ON(numerrs == -EINVAL);
@@ -1984,8 +1984,8 @@
 		return ret;
 	cascade->base = base;
 	mutex_init(&cascade->lock);
-	cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
-			     DOC_ECC_BCH_PRIMPOLY);
+	cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+				DOC_ECC_BCH_PRIMPOLY, false);
 	if (!cascade->bch)
 		return ret;
 
@@ -2021,7 +2021,7 @@
 	ret = -ENODEV;
 	dev_info(dev, "No supported DiskOnChip found\n");
 err_probe:
-	free_bch(cascade->bch);
+	bch_free(cascade->bch);
 	for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
 		if (cascade->floors[floor])
 			doc_release_device(cascade->floors[floor]);
@@ -2045,7 +2045,7 @@
 		if (cascade->floors[floor])
 			doc_release_device(cascade->floors[floor]);
 
-	free_bch(docg3->cascade->bch);
+	bch_free(docg3->cascade->bch);
 	return 0;
 }
 
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 56f50d2..aecd441 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -436,7 +436,10 @@
 	 {
 		int gap = BUSWIDTH - (from & (BUSWIDTH - 1));
 
-		while (len && gap--) *buf++ = read8 (from++), len--;
+		while (len && gap--) {
+			*buf++ = read8 (from++);
+			len--;
+		}
 	 }
 
    /* now we read dwords until we reach a non-dword boundary */
@@ -518,7 +521,10 @@
 		i = n = 0;
 
 		while (gap--) tmp[i++] = 0xFF;
-		while (len && i < BUSWIDTH) tmp[i++] = buf[n++], len--;
+		while (len && i < BUSWIDTH) {
+			tmp[i++] = buf[n++];
+			len--;
+		}
 		while (i < BUSWIDTH) tmp[i++] = 0xFF;
 
 		if (!write_dword (aligned,*((__u32 *) tmp))) return (-EIO);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index b50ec7e..087b5e8 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -148,10 +148,10 @@
 			switch (token[len - 2]) {
 			case 'G':
 				shift += 10;
-				/* fall through */
+				fallthrough;
 			case 'M':
 				shift += 10;
-				/* fall through */
+				fallthrough;
 			case 'k':
 				shift += 10;
 				token[len - 2] = 0;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 47ad076..2e00862 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -793,7 +793,7 @@
 				     struct device_node *np)
 {
 	struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
-	struct device_node *pp = NULL;
+	struct device_node *pp;
 	const __be32 *addr;
 	u32 val;
 	int len;
@@ -812,10 +812,7 @@
 		return -ENOMEM;
 
 	/* Fill structs for each subnode (flash device) */
-	while ((pp = of_get_next_child(np, pp))) {
-		struct spear_smi_flash_info *flash_info;
-
-		flash_info = &pdata->board_flash_info[i];
+	for_each_child_of_node(np, pp) {
 		pdata->np[i] = pp;
 
 		/* Read base-addr and size from DT */
@@ -969,7 +966,6 @@
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		ret = -ENODEV;
-		dev_err(&pdev->dev, "invalid smi irq\n");
 		goto err;
 	}
 
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index f4d1667..1888523 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -255,7 +255,6 @@
 struct stfsm {
 	struct device		*dev;
 	void __iomem		*base;
-	struct resource		*region;
 	struct mtd_info		mtd;
 	struct mutex		lock;
 	struct flash_info       *info;
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index a4d8968..46c7e40 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -22,4 +22,11 @@
 	 This is the driver for HyperBus controller on TI's AM65x and
 	 other SoCs
 
+config RPCIF_HYPERBUS
+	tristate "Renesas RPC-IF HyperBus driver"
+	depends on RENESAS_RPCIF
+	depends on MTD_CFI_BE_BYTE_SWAP
+	help
+	  This option includes Renesas RPC-IF HyperBus support.
+
 endif # MTD_HYPERBUS
diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile
index 8a936e0..5fc2b51 100644
--- a/drivers/mtd/hyperbus/Makefile
+++ b/drivers/mtd/hyperbus/Makefile
@@ -2,3 +2,4 @@
 
 obj-$(CONFIG_MTD_HYPERBUS)	+= hyperbus-core.o
 obj-$(CONFIG_HBMC_AM654)	+= hbmc-am654.o
+obj-$(CONFIG_RPCIF_HYPERBUS)	+= rpc-if.o
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 08d543b..a3439b7 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -1,8 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 //
-// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
 // Author: Vignesh Raghavendra <vigneshr@ti.com>
 
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -11,12 +15,20 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mux/consumer.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
+#include <linux/sched/task_stack.h>
 #include <linux/types.h>
 
 #define AM654_HBMC_CALIB_COUNT 25
 
+struct am654_hbmc_device_priv {
+	struct completion rx_dma_complete;
+	phys_addr_t device_base;
+	struct hyperbus_ctlr *ctlr;
+	struct dma_chan *rx_chan;
+};
+
 struct am654_hbmc_priv {
 	struct hyperbus_ctlr ctlr;
 	struct hyperbus_device hbdev;
@@ -51,14 +63,106 @@
 	return ret;
 }
 
+static void am654_hbmc_dma_callback(void *param)
+{
+	struct am654_hbmc_device_priv *priv = param;
+
+	complete(&priv->rx_dma_complete);
+}
+
+static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
+			       unsigned long from, ssize_t len)
+
+{
+	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	struct dma_chan *rx_chan = priv->rx_chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dst, dma_src;
+	dma_cookie_t cookie;
+	int ret;
+
+	if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
+		return -EINVAL;
+
+	dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
+		dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
+		return -EIO;
+	}
+
+	dma_src = priv->device_base + from;
+	tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
+	if (!tx) {
+		dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
+		ret = -EIO;
+		goto unmap_dma;
+	}
+
+	reinit_completion(&priv->rx_dma_complete);
+	tx->callback = am654_hbmc_dma_callback;
+	tx->callback_param = priv;
+	cookie = dmaengine_submit(tx);
+
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
+		goto unmap_dma;
+	}
+
+	dma_async_issue_pending(rx_chan);
+	if (!wait_for_completion_timeout(&priv->rx_dma_complete,  msecs_to_jiffies(len + 1000))) {
+		dmaengine_terminate_sync(rx_chan);
+		dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
+		ret = -ETIMEDOUT;
+	}
+
+unmap_dma:
+	dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
+	return ret;
+}
+
+static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
+			    unsigned long from, ssize_t len)
+{
+	struct am654_hbmc_device_priv *priv = hbdev->priv;
+
+	if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
+		memcpy_fromio(to, hbdev->map.virt + from, len);
+}
+
 static const struct hyperbus_ops am654_hbmc_ops = {
 	.calibrate = am654_hbmc_calibrate,
+	.copy_from = am654_hbmc_read,
 };
 
+static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
+{
+	struct dma_chan *rx_chan;
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	rx_chan = dma_request_chan_by_mask(&mask);
+	if (IS_ERR(rx_chan)) {
+		if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
+		return 0;
+	}
+	priv->rx_chan = rx_chan;
+	init_completion(&priv->rx_dma_complete);
+
+	return 0;
+}
+
 static int am654_hbmc_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
+	struct am654_hbmc_device_priv *dev_priv;
 	struct device *dev = &pdev->dev;
 	struct am654_hbmc_priv *priv;
+	struct resource res;
 	int ret;
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -67,6 +171,11 @@
 
 	platform_set_drvdata(pdev, priv);
 
+	priv->hbdev.np = of_get_next_child(np, NULL);
+	ret = of_address_to_resource(priv->hbdev.np, 0, &res);
+	if (ret)
+		return ret;
+
 	if (of_property_read_bool(dev->of_node, "mux-controls")) {
 		struct mux_control *control = devm_mux_control_get(dev, NULL);
 
@@ -81,27 +190,40 @@
 		priv->mux_ctrl = control;
 	}
 
-	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0) {
-		pm_runtime_put_noidle(dev);
-		goto disable_pm;
-	}
+	priv->hbdev.map.size = resource_size(&res);
+	priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(priv->hbdev.map.virt))
+		return PTR_ERR(priv->hbdev.map.virt);
 
 	priv->ctlr.dev = dev;
 	priv->ctlr.ops = &am654_hbmc_ops;
 	priv->hbdev.ctlr = &priv->ctlr;
-	priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
+
+	dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
+	if (!dev_priv) {
+		ret = -ENOMEM;
+		goto disable_mux;
+	}
+
+	priv->hbdev.priv = dev_priv;
+	dev_priv->device_base = res.start;
+	dev_priv->ctlr = &priv->ctlr;
+
+	ret = am654_hbmc_request_mmap_dma(dev_priv);
+	if (ret)
+		goto disable_mux;
+
 	ret = hyperbus_register_device(&priv->hbdev);
 	if (ret) {
 		dev_err(dev, "failed to register controller\n");
-		pm_runtime_put_sync(&pdev->dev);
-		goto disable_pm;
+		goto release_dma;
 	}
 
 	return 0;
-disable_pm:
-	pm_runtime_disable(dev);
+release_dma:
+	if (dev_priv->rx_chan)
+		dma_release_channel(dev_priv->rx_chan);
+disable_mux:
 	if (priv->mux_ctrl)
 		mux_control_deselect(priv->mux_ctrl);
 	return ret;
@@ -110,13 +232,15 @@
 static int am654_hbmc_remove(struct platform_device *pdev)
 {
 	struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
+	struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
 	int ret;
 
 	ret = hyperbus_unregister_device(&priv->hbdev);
 	if (priv->mux_ctrl)
 		mux_control_deselect(priv->mux_ctrl);
-	pm_runtime_put_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
+
+	if (dev_priv->rx_chan)
+		dma_release_channel(dev_priv->rx_chan);
 
 	return ret;
 }
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 6af9ea3..2f9fc4e 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 //
-// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
 // Author: Vignesh Raghavendra <vigneshr@ti.com>
 
 #include <linux/err.h>
@@ -10,7 +10,6 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/mtd.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
 #include <linux/types.h>
 
 static struct hyperbus_device *map_to_hbdev(struct map_info *map)
@@ -62,7 +61,6 @@
 	struct hyperbus_ctlr *ctlr;
 	struct device_node *np;
 	struct map_info *map;
-	struct resource res;
 	struct device *dev;
 	int ret;
 
@@ -73,22 +71,15 @@
 
 	np = hbdev->np;
 	ctlr = hbdev->ctlr;
-	if (!of_device_is_compatible(np, "cypress,hyperflash"))
+	if (!of_device_is_compatible(np, "cypress,hyperflash")) {
+		dev_err(ctlr->dev, "\"cypress,hyperflash\" compatible missing\n");
 		return -ENODEV;
+	}
 
 	hbdev->memtype = HYPERFLASH;
 
-	ret = of_address_to_resource(np, 0, &res);
-	if (ret)
-		return ret;
-
 	dev = ctlr->dev;
 	map = &hbdev->map;
-	map->size = resource_size(&res);
-	map->virt = devm_ioremap_resource(dev, &res);
-	if (IS_ERR(map->virt))
-		return PTR_ERR(map->virt);
-
 	map->name = dev_name(dev);
 	map->bankwidth = 2;
 	map->device_node = np;
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
new file mode 100644
index 0000000..dc164c1
--- /dev/null
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux driver for RPC-IF HyperFlash
+ *
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <memory/renesas-rpc-if.h>
+
+struct	rpcif_hyperbus {
+	struct rpcif rpc;
+	struct hyperbus_ctlr ctlr;
+	struct hyperbus_device hbdev;
+};
+
+static const struct rpcif_op rpcif_op_tmpl = {
+	.cmd = {
+		.buswidth = 8,
+		.ddr = true,
+	},
+	.ocmd = {
+		.buswidth = 8,
+		.ddr = true,
+	},
+	.addr = {
+		.nbytes = 1,
+		.buswidth = 8,
+		.ddr = true,
+	},
+	.data = {
+		.buswidth = 8,
+		.ddr = true,
+	},
+};
+
+static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to,
+				  unsigned long from, ssize_t len)
+{
+	struct rpcif_op op = rpcif_op_tmpl;
+
+	op.cmd.opcode = HYPERBUS_RW_READ | HYPERBUS_AS_MEM;
+	op.addr.val = from >> 1;
+	op.dummy.buswidth = 1;
+	op.dummy.ncycles = 15;
+	op.data.dir = RPCIF_DATA_IN;
+	op.data.nbytes = len;
+	op.data.buf.in = to;
+
+	rpcif_prepare(rpc, &op, NULL, NULL);
+}
+
+static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to,
+				   void *from, ssize_t len)
+{
+	struct rpcif_op op = rpcif_op_tmpl;
+
+	op.cmd.opcode = HYPERBUS_RW_WRITE | HYPERBUS_AS_MEM;
+	op.addr.val = to >> 1;
+	op.data.dir = RPCIF_DATA_OUT;
+	op.data.nbytes = len;
+	op.data.buf.out = from;
+
+	rpcif_prepare(rpc, &op, NULL, NULL);
+}
+
+static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr)
+{
+	struct rpcif_hyperbus *hyperbus =
+		container_of(hbdev, struct rpcif_hyperbus, hbdev);
+	map_word data;
+
+	rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2);
+
+	rpcif_manual_xfer(&hyperbus->rpc);
+
+	return data.x[0];
+}
+
+static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr,
+			     u16 data)
+{
+	struct rpcif_hyperbus *hyperbus =
+		container_of(hbdev, struct rpcif_hyperbus, hbdev);
+
+	rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2);
+
+	rpcif_manual_xfer(&hyperbus->rpc);
+}
+
+static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to,
+			       unsigned long from, ssize_t len)
+{
+	struct rpcif_hyperbus *hyperbus =
+		container_of(hbdev, struct rpcif_hyperbus, hbdev);
+
+	rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len);
+
+	rpcif_dirmap_read(&hyperbus->rpc, from, len, to);
+}
+
+static const struct hyperbus_ops rpcif_hb_ops = {
+	.read16 = rpcif_hb_read16,
+	.write16 = rpcif_hb_write16,
+	.copy_from = rpcif_hb_copy_from,
+};
+
+static int rpcif_hb_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rpcif_hyperbus *hyperbus;
+	int error;
+
+	hyperbus = devm_kzalloc(dev, sizeof(*hyperbus), GFP_KERNEL);
+	if (!hyperbus)
+		return -ENOMEM;
+
+	error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+	if (error)
+		return error;
+
+	platform_set_drvdata(pdev, hyperbus);
+
+	rpcif_enable_rpm(&hyperbus->rpc);
+
+	rpcif_hw_init(&hyperbus->rpc, true);
+
+	hyperbus->hbdev.map.size = hyperbus->rpc.size;
+	hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
+
+	hyperbus->ctlr.dev = dev;
+	hyperbus->ctlr.ops = &rpcif_hb_ops;
+	hyperbus->hbdev.ctlr = &hyperbus->ctlr;
+	hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
+	error = hyperbus_register_device(&hyperbus->hbdev);
+	if (error)
+		rpcif_disable_rpm(&hyperbus->rpc);
+
+	return error;
+}
+
+static int rpcif_hb_remove(struct platform_device *pdev)
+{
+	struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
+	int error = hyperbus_unregister_device(&hyperbus->hbdev);
+
+	rpcif_disable_rpm(&hyperbus->rpc);
+
+	return error;
+}
+
+static struct platform_driver rpcif_platform_driver = {
+	.probe	= rpcif_hb_probe,
+	.remove	= rpcif_hb_remove,
+	.driver	= {
+		.name	= "rpc-if-hyperflash",
+	},
+};
+
+module_platform_driver(rpcif_platform_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF HyperFlash driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 54b176d..af16d34 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -130,7 +130,7 @@
 			 "    NoOfBootImageBlocks   = %d\n"
 			 "    NoOfBinaryPartitions  = %d\n"
 			 "    NoOfBDTLPartitions    = %d\n"
-			 "    BlockMultiplerBits    = %d\n"
+			 "    BlockMultiplierBits   = %d\n"
 			 "    FormatFlgs            = %d\n"
 			 "    OsakVersion           = 0x%x\n"
 			 "    PercentUsed           = %d\n",
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 9341a8a..ee063ba 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -94,6 +94,34 @@
 }
 EXPORT_SYMBOL(lpddr_cmdset);
 
+static void print_drs_error(unsigned int dsr)
+{
+	int prog_status = (dsr & DSR_RPS) >> 8;
+
+	if (!(dsr & DSR_AVAILABLE))
+		pr_notice("DSR.15: (0) Device not Available\n");
+	if ((prog_status & 0x03) == 0x03)
+		pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
+	else if (prog_status & 0x02)
+		pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
+	else if (prog_status &  0x01)
+		pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
+	if (!(dsr & DSR_READY_STATUS))
+		pr_notice("DSR.7: (0) Device is Busy\n");
+	if (dsr & DSR_ESS)
+		pr_notice("DSR.6: (1) Erase Suspended\n");
+	if (dsr & DSR_ERASE_STATUS)
+		pr_notice("DSR.5: (1) Erase/Blank check error\n");
+	if (dsr & DSR_PROGRAM_STATUS)
+		pr_notice("DSR.4: (1) Program Error\n");
+	if (dsr & DSR_VPPS)
+		pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
+	if (dsr & DSR_PSS)
+		pr_notice("DSR.2: (1) Program suspended\n");
+	if (dsr & DSR_DPS)
+		pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
+}
+
 static int wait_for_ready(struct map_info *map, struct flchip *chip,
 		unsigned int chip_op_time)
 {
@@ -304,8 +332,7 @@
 		/* Only if there's no operation suspended... */
 		if (mode == FL_READY && chip->oldstate == FL_READY)
 			return 0;
-		/* fall through */
-
+		fallthrough;
 	default:
 sleep:
 		set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index bc82305..fc0aaa0 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -75,6 +75,17 @@
 	  physically into the CPU's memory. The mapping description here is
 	  taken from OF device tree.
 
+config MTD_PHYSMAP_BT1_ROM
+	bool "Baikal-T1 Boot ROMs OF-based physical memory map handling"
+	depends on MTD_PHYSMAP_OF
+	depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+	select MTD_COMPLEX_MAPPINGS
+	select MULTIPLEXER
+	select MUX_MMIO
+	help
+	  This provides some extra DT physmap parsing for the Baikal-T1
+	  platforms, some detection and setting up ROMs-specific accessors.
+
 config MTD_PHYSMAP_VERSATILE
 	bool "ARM Versatile OF-based physical memory map handling"
 	depends on MTD_PHYSMAP_OF
@@ -96,6 +107,17 @@
 	  platforms, some detection and setting up parallel mode on the
 	  external interface.
 
+config MTD_PHYSMAP_IXP4XX
+	bool "Intel IXP4xx OF-based physical memory map handling"
+	depends on MTD_PHYSMAP_OF
+	depends on ARM
+	select MTD_COMPLEX_MAPPINGS
+	select MTD_CFI_BE_BYTE_SWAP if CPU_BIG_ENDIAN
+	default ARCH_IXP4XX
+	help
+	  This provides some extra DT physmap parsing for the Intel IXP4xx
+	  platforms, some elaborate endianness handling in particular.
+
 config MTD_PHYSMAP_GPIO_ADDR
 	bool "GPIO-assisted Flash Chip Support"
 	depends on MTD_PHYSMAP
@@ -299,11 +321,11 @@
 	help
 	  This provides a driver for the flash accessed using Intel's
 	  21285 bridge used with Intel's StrongARM processors. More info at
-	  <http://www.intel.com/design/bridge/docs/21285_documentation.htm>.
+	  <https://www.intel.com/design/bridge/docs/21285_documentation.htm>.
 
 config MTD_IXP4XX
 	tristate "CFI Flash device mapped on Intel IXP4xx based systems"
-	depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
+	depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS
 	help
 	  This enables MTD access to flash devices on platforms based
 	  on Intel's IXP4xx family of network processors such as the
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 1146009..79f018c 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -18,8 +18,10 @@
 obj-$(CONFIG_MTD_TSUNAMI)	+= tsunami_flash.o
 obj-$(CONFIG_MTD_PXA2XX)	+= pxa2xx-flash.o
 physmap-objs-y			+= physmap-core.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
 physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
 physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
 physmap-objs			:= $(physmap-objs-y)
 obj-$(CONFIG_MTD_PHYSMAP)	+= physmap.o
 obj-$(CONFIG_MTD_PISMO)		+= pismo.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 462fadb..42a95ba 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -163,7 +163,7 @@
 	/* FIXME handle registers 0x80 - 0x8C the bios region locks */
 
 	/* For write accesses caches are useless */
-	window->virt = ioremap_nocache(window->phys, window->size);
+	window->virt = ioremap(window->phys, window->size);
 	if (!window->virt) {
 		printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
 			window->phys, window->size);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index c9b7b4d..4604942 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -191,7 +191,7 @@
 	/* FIXME handle registers 0x80 - 0x8C the bios region locks */
 
 	/* For write accesses caches are useless */
-	window->virt = ioremap_nocache(window->phys, window->size);
+	window->virt = ioremap(window->phys, window->size);
 	if (!window->virt) {
 		printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
 			window->phys, window->size);
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 5c27c69..85e1415 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -249,7 +249,7 @@
 	}
 
 	/* Map the firmware hub into my address space. */
-	window->virt = ioremap_nocache(window->phys, window->size);
+	window->virt = ioremap(window->phys, window->size);
 	if (!window->virt) {
 		printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
 			window->phys, window->size);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6b989f3..fda72c5 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -184,7 +184,7 @@
 	}
 
 	/* Map the firmware hub into my address space. */
-	window->virt = ioremap_nocache(window->phys, window->size);
+	window->virt = ioremap(window->phys, window->size);
 	if (!window->virt) {
 		printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
 			window->phys, window->size);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 69503ae..d67b845 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -133,7 +133,7 @@
 	if (win_len < (CS0_START + CS0_SIZE))
 		return -ENXIO;
 
-	p->csr_base = ioremap_nocache(csr_phys, csr_len);
+	p->csr_base = ioremap(csr_phys, csr_len);
 	if (!p->csr_base)
 		return -ENOMEM;
 
@@ -152,7 +152,7 @@
 	p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
 	p->map.phys = win_phys + CS0_START;
 	p->map.size = CS0_SIZE;
-	p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
+	p->map.virt = ioremap(p->map.phys, p->map.size);
 	if (!p->map.virt) {
 		err = -ENOMEM;
 		goto release;
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 876f12f..832b880 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -78,7 +78,7 @@
 		return -ENODEV;
 	}
 
-	l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+	l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
 
 	if (!l440gx_map.virt) {
 		printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
@@ -86,7 +86,7 @@
 		return -ENOMEM;
 	}
 	simple_map_init(&l440gx_map);
-	printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
+	pr_debug("window_addr = %p\n", l440gx_map.virt);
 
 	/* Setup the pm iobase resource
 	 * This code should move into some kind of generic bridge
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index abc52b7..0bb6516 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -82,10 +82,10 @@
 	printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
 			(unsigned long long)netsc520_map.size,
 			(unsigned long long)netsc520_map.phys);
-	netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
+	netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
 
 	if (!netsc520_map.virt) {
-		printk("Failed to ioremap_nocache\n");
+		printk("Failed to ioremap\n");
 		return -EIO;
 	}
 
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 50046d4..7d34987 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -176,7 +176,7 @@
 #endif
 	int rc = 0;
 
-	nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
+	nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
 	if (nettel_mmcrp == NULL) {
 		printk("SNAPGEAR: failed to disable MMCR cache??\n");
 		return(-EIO);
@@ -217,7 +217,7 @@
 	__asm__ ("wbinvd");
 
 	nettel_amd_map.phys = amdaddr;
-	nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+	nettel_amd_map.virt = ioremap(amdaddr, maxsize);
 	if (!nettel_amd_map.virt) {
 		printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
 		iounmap(nettel_mmcrp);
@@ -303,7 +303,7 @@
 	/* Probe for the size of the first Intel flash */
 	nettel_intel_map.size = maxsize;
 	nettel_intel_map.phys = intel0addr;
-	nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+	nettel_intel_map.virt = ioremap(intel0addr, maxsize);
 	if (!nettel_intel_map.virt) {
 		printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
 		rc = -EIO;
@@ -337,7 +337,7 @@
 	iounmap(nettel_intel_map.virt);
 
 	nettel_intel_map.size = maxsize;
-	nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+	nettel_intel_map.virt = ioremap(intel0addr, maxsize);
 	if (!nettel_intel_map.virt) {
 		printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
 		rc = -EIO;
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 9a49f8a..377ef0f 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -94,7 +94,7 @@
 	map->map.write = mtd_pci_write8,
 
 	map->map.size     = 0x00800000;
-	map->base         = ioremap_nocache(pci_resource_start(dev, 0),
+	map->base         = ioremap(pci_resource_start(dev, 0),
 					    pci_resource_len(dev, 0));
 
 	if (!map->base)
@@ -188,7 +188,7 @@
 	map->map.read = mtd_pci_read32,
 	map->map.write = mtd_pci_write32,
 	map->map.size     = len;
-	map->base         = ioremap_nocache(base, len);
+	map->base         = ioremap(base, len);
 
 	if (!map->base)
 		return -ENOMEM;
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 70bb403..2ac79e1 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,16 +294,15 @@
 }
 
 
-static DEFINE_SPINLOCK(pcmcia_vpp_lock);
+static DEFINE_MUTEX(pcmcia_vpp_lock);
 static int pcmcia_vpp_refcnt;
 static void pcmciamtd_set_vpp(struct map_info *map, int on)
 {
 	struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
 	struct pcmcia_device *link = dev->p_dev;
-	unsigned long flags;
 
 	pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
-	spin_lock_irqsave(&pcmcia_vpp_lock, flags);
+	mutex_lock(&pcmcia_vpp_lock);
 	if (on) {
 		if (++pcmcia_vpp_refcnt == 1)   /* first nested 'on' */
 			pcmcia_fixup_vpp(link, dev->vpp);
@@ -311,7 +310,7 @@
 		if (--pcmcia_vpp_refcnt == 0)   /* last nested 'off' */
 			pcmcia_fixup_vpp(link, 0);
 	}
-	spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
+	mutex_unlock(&pcmcia_vpp_lock);
 }
 
 
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
new file mode 100644
index 0000000..d68ae75
--- /dev/null
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ *   Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 Physically Mapped Internal ROM driver
+ */
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "physmap-bt1-rom.h"
+
+/*
+ * Baikal-T1 SoC ROMs are only accessible by the dword-aligned instructions.
+ * We have to take this into account when implementing the data read-methods.
+ * Note there is no need in bothering with endianness, since both Baikal-T1
+ * CPU and MMIO are LE.
+ */
+static map_word __xipram bt1_rom_map_read(struct map_info *map,
+					  unsigned long ofs)
+{
+	void __iomem *src = map->virt + ofs;
+	unsigned long shift;
+	map_word ret;
+	u32 data;
+
+	/* Read data within offset dword. */
+	shift = (unsigned long)src & 0x3;
+	data = readl_relaxed(src - shift);
+	if (!shift) {
+		ret.x[0] = data;
+		return ret;
+	}
+	ret.x[0] = data >> (shift * BITS_PER_BYTE);
+
+	/* Read data from the next dword. */
+	shift = 4 - shift;
+	if (ofs + shift >= map->size)
+		return ret;
+
+	data = readl_relaxed(src + shift);
+	ret.x[0] |= data << (shift * BITS_PER_BYTE);
+
+	return ret;
+}
+
+static void __xipram bt1_rom_map_copy_from(struct map_info *map,
+					   void *to, unsigned long from,
+					   ssize_t len)
+{
+	void __iomem *src = map->virt + from;
+	ssize_t shift, chunk;
+	u32 data;
+
+	if (len <= 0 || from >= map->size)
+		return;
+
+	/* Make sure we don't go over the map limit. */
+	len = min_t(ssize_t, map->size - from, len);
+
+	/*
+	 * Since requested data size can be pretty big we have to implement
+	 * the copy procedure as optimal as possible. That's why it's split
+	 * up into the next three stages: unaligned head, aligned body,
+	 * unaligned tail.
+	 */
+	shift = (ssize_t)src & 0x3;
+	if (shift) {
+		chunk = min_t(ssize_t, 4 - shift, len);
+		data = readl_relaxed(src - shift);
+		memcpy(to, (char *)&data + shift, chunk);
+		src += chunk;
+		to += chunk;
+		len -= chunk;
+	}
+
+	while (len >= 4) {
+		data = readl_relaxed(src);
+		memcpy(to, &data, 4);
+		src += 4;
+		to += 4;
+		len -= 4;
+	}
+
+	if (len) {
+		data = readl_relaxed(src);
+		memcpy(to, &data, len);
+	}
+}
+
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+			   struct device_node *np,
+			   struct map_info *map)
+{
+	struct device *dev = &pdev->dev;
+
+	/* It's supposed to be read-only MTD. */
+	if (!of_device_is_compatible(np, "mtd-rom")) {
+		dev_info(dev, "No mtd-rom compatible string\n");
+		return 0;
+	}
+
+	/* Multiplatform guard. */
+	if (!of_device_is_compatible(np, "baikal,bt1-int-rom"))
+		return 0;
+
+	/* Sanity check the device parameters retrieved from DTB. */
+	if (map->bankwidth != 4)
+		dev_warn(dev, "Bank width is supposed to be 32 bits wide\n");
+
+	map->read = bt1_rom_map_read;
+	map->copy_from = bt1_rom_map_copy_from;
+
+	return 0;
+}
diff --git a/drivers/mtd/maps/physmap-bt1-rom.h b/drivers/mtd/maps/physmap-bt1-rom.h
new file mode 100644
index 0000000..6782899
--- /dev/null
+++ b/drivers/mtd/maps/physmap-bt1-rom.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/mtd/map.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_BT1_ROM
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+			   struct device_node *np,
+			   struct map_info *map);
+#else
+static inline
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+			   struct device_node *np,
+			   struct map_info *map)
+{
+	return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 21b556a..4f63b84 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -38,9 +38,12 @@
 #include <linux/mtd/cfi_endian.h>
 #include <linux/io.h>
 #include <linux/of_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/gpio/consumer.h>
 
+#include "physmap-bt1-rom.h"
 #include "physmap-gemini.h"
+#include "physmap-ixp4xx.h"
 #include "physmap-versatile.h"
 
 struct physmap_flash_info {
@@ -63,16 +66,18 @@
 {
 	struct physmap_flash_info *info;
 	struct physmap_flash_data *physmap_data;
-	int i, err;
+	int i, err = 0;
 
 	info = platform_get_drvdata(dev);
-	if (!info)
-		return 0;
+	if (!info) {
+		err = -EINVAL;
+		goto out;
+	}
 
 	if (info->cmtd) {
 		err = mtd_device_unregister(info->cmtd);
 		if (err)
-			return err;
+			goto out;
 
 		if (info->cmtd != info->mtds[0])
 			mtd_concat_destroy(info->cmtd);
@@ -87,7 +92,10 @@
 	if (physmap_data && physmap_data->exit)
 		physmap_data->exit(dev);
 
-	return 0;
+out:
+	pm_runtime_put(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+	return err;
 }
 
 static void physmap_set_vpp(struct map_info *map, int state)
@@ -366,10 +374,18 @@
 		info->maps[i].bankwidth = bankwidth;
 		info->maps[i].device_node = dp;
 
+		err = of_flash_probe_bt1_rom(dev, dp, &info->maps[i]);
+		if (err)
+			return err;
+
 		err = of_flash_probe_gemini(dev, dp, &info->maps[i]);
 		if (err)
 			return err;
 
+		err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]);
+		if (err)
+			return err;
+
 		err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
 		if (err)
 			return err;
@@ -479,13 +495,19 @@
 		return -EINVAL;
 	}
 
+	pm_runtime_enable(&dev->dev);
+	pm_runtime_get_sync(&dev->dev);
+
 	if (dev->dev.of_node)
 		err = physmap_flash_of_init(dev);
 	else
 		err = physmap_flash_pdata_init(dev);
 
-	if (err)
+	if (err) {
+		pm_runtime_put(&dev->dev);
+		pm_runtime_disable(&dev->dev);
 		return err;
+	}
 
 	for (i = 0; i < info->nmaps; i++) {
 		struct resource *res;
@@ -500,7 +522,8 @@
 		dev_notice(&dev->dev, "physmap platform flash device: %pR\n",
 			   res);
 
-		info->maps[i].name = dev_name(&dev->dev);
+		if (!info->maps[i].name)
+			info->maps[i].name = dev_name(&dev->dev);
 
 		if (!info->maps[i].phys)
 			info->maps[i].phys = res->start;
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index a289c8b..d4a46e1 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -46,11 +46,6 @@
 
 #define FLASH_PARALLEL_HIGH_PIN_CNT	(1 << 20)	/* else low pin cnt */
 
-static const struct of_device_id syscon_match[] = {
-	{ .compatible = "cortina,gemini-syscon" },
-	{ },
-};
-
 struct gemini_flash {
 	struct device *dev;
 	struct pinctrl *p;
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
new file mode 100644
index 0000000..6a05422
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IXP4xx OF physmap add-on
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the ixp4xx.c map driver, originally written by:
+ * Intel Corporation
+ * Deepak Saxena <dsaxena@mvista.com>
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ */
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include "physmap-ixp4xx.h"
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ *     D15    D0
+ *     +---+---+
+ *     | A | B | 0
+ *     +---+---+
+ *     | C | D | 2
+ *     +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+	return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+	__raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define	BYTE0(h)	((h) & 0xFF)
+#define	BYTE1(h)	(((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+	return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+	__raw_writew(d, addr);
+}
+
+#define	BYTE0(h)	(((h) >> 8) & 0xFF)
+#define	BYTE1(h)	((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+	map_word val;
+
+	val.x[0] = flash_read16(map->virt + ofs);
+	return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+			     unsigned long from, ssize_t len)
+{
+	u8 *dest = (u8 *) to;
+	void __iomem *src = map->virt + from;
+
+	if (len <= 0)
+		return;
+
+	if (from & 1) {
+		*dest++ = BYTE1(flash_read16(src-1));
+		src++;
+		--len;
+	}
+
+	while (len >= 2) {
+		u16 data = flash_read16(src);
+		*dest++ = BYTE0(data);
+		*dest++ = BYTE1(data);
+		src += 2;
+		len -= 2;
+	}
+
+	if (len > 0)
+		*dest++ = BYTE0(flash_read16(src));
+}
+
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+	flash_write16(d.x[0], map->virt + adr);
+}
+
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+			  struct device_node *np,
+			  struct map_info *map)
+{
+	struct device *dev = &pdev->dev;
+
+	/* Multiplatform guard */
+	if (!of_device_is_compatible(np, "intel,ixp4xx-flash"))
+		return 0;
+
+	map->read = ixp4xx_read16;
+	map->write = ixp4xx_write16;
+	map->copy_from = ixp4xx_copy_from;
+	map->copy_to = NULL;
+
+	dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n");
+
+	return 0;
+}
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
new file mode 100644
index 0000000..b0fc49b
--- /dev/null
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+			  struct device_node *np,
+			  struct map_info *map);
+#else
+static inline
+int of_flash_probe_ixp4xx(struct platform_device *pdev,
+			  struct device_node *np,
+			  struct map_info *map)
+{
+	return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 47602af..d3d4e98 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -34,7 +34,7 @@
 struct sa_info {
 	struct mtd_info		*mtd;
 	int			num_subdev;
-	struct sa_subdev_info	subdev[0];
+	struct sa_subdev_info	subdev[];
 };
 
 static DEFINE_SPINLOCK(sa1100_vpp_lock);
@@ -81,8 +81,7 @@
 	default:
 		printk(KERN_WARNING "SA1100 flash: unknown base address "
 		       "0x%08lx, assuming CS0\n", phys);
-		/* Fall through */
-
+		fallthrough;
 	case SA1100_CS0_PHYS:
 		subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
 		break;
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 03af2df..8ef7aec 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -6,7 +6,7 @@
  * The SC520CDP is an evaluation board for the Elan SC520 processor available
  * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
  * and up to 512 KiB of 8-bit DIL Flash ROM.
- * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
+ * For details see https://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
  */
 
 #include <linux/module.h>
@@ -174,8 +174,8 @@
 	int i, j;
 
 	/* map in SC520's MMCR area */
-	mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
-	if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
+	mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+	if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
 		/* force physical address fields to BIOS defaults: */
 		for(i = 0; i < NUM_FLASH_BANKS; i++)
 			sc520cdp_map[i].phys = par_table[i].default_address;
@@ -225,10 +225,10 @@
 			(unsigned long long)sc520cdp_map[i].size,
 			(unsigned long long)sc520cdp_map[i].phys);
 
-		sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+		sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
 
 		if (!sc520cdp_map[i].virt) {
-			printk("Failed to ioremap_nocache\n");
+			printk("Failed to ioremap\n");
 			for (j = 0; j < i; j++) {
 				if (mymtd[j]) {
 					map_destroy(mymtd[j]);
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 2afb253..57303f9 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -152,7 +152,7 @@
 	}
 
 	/* remap the IO window (w/o caching) */
-	scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+	scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
 	if (!scb2_ioaddr) {
 		printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
 		if (!region_fail)
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 6cfc878..70d6e86 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -56,10 +56,10 @@
 {
 	int rc = 0;
 
-	ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+	ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
 
 	if (!ts5500_map.virt) {
-		printk(KERN_ERR "Failed to ioremap_nocache\n");
+		printk(KERN_ERR "Failed to ioremap\n");
 		rc = -EIO;
 		goto err2;
 	}
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 177bf13..a7ec947 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -40,7 +40,7 @@
 	u32 blocklen;
 	u32 writecnt;
 	u32 readcnt;
-	u32 removeable;
+	u32 removable;
 	int partition;
 	int read;
 	unsigned char *blockread;
@@ -619,7 +619,7 @@
 	card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
 	card->writecnt = basic_flash_data >> 12 & 0xF;
 	card->readcnt = basic_flash_data >> 8 & 0xF;
-	card->removeable = basic_flash_data >> 7 & 1;
+	card->removable = basic_flash_data >> 7 & 1;
 
 	card->partition = 0;
 
@@ -772,7 +772,6 @@
 
 static int probe_maple_vmu(struct device *dev)
 {
-	int error;
 	struct maple_device *mdev = to_maple_dev(dev);
 	struct maple_driver *mdrv = to_maple_driver(dev->driver);
 
@@ -780,11 +779,7 @@
 	mdev->fileerr_handler = vmu_file_error;
 	mdev->driver = mdrv;
 
-	error = vmu_connect(mdev);
-	if (error)
-		return error;
-
-	return 0;
+	return vmu_connect(mdev);
 }
 
 static int remove_maple_vmu(struct device *dev)
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index c06b532..32e52d8 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -89,8 +89,6 @@
 
 	ret = erase_write (mtd, mtdblk->cache_offset,
 			   mtdblk->cache_size, mtdblk->cache_data);
-	if (ret)
-		return ret;
 
 	/*
 	 * Here we could arguably set the cache state to STATE_CLEAN.
@@ -98,9 +96,14 @@
 	 * be notified if this content is altered on the flash by other
 	 * means.  Let's declare it empty and leave buffering tasks to
 	 * the buffer cache instead.
+	 *
+	 * If this cache_offset points to a bad block, data cannot be
+	 * written to the device. Clear cache_state to avoid writing to
+	 * bad blocks repeatedly.
 	 */
-	mtdblk->cache_state = STATE_EMPTY;
-	return 0;
+	if (ret == 0 || ret == -EIO)
+		mtdblk->cache_state = STATE_EMPTY;
+	return ret;
 }
 
 
@@ -294,12 +297,13 @@
 static int mtdblock_flush(struct mtd_blktrans_dev *dev)
 {
 	struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+	int ret;
 
 	mutex_lock(&mtdblk->cache_mutex);
-	write_cached_data(mtdblk);
+	ret = write_cached_data(mtdblk);
 	mutex_unlock(&mtdblk->cache_mutex);
 	mtd_sync(dev->mtd);
-	return 0;
+	return ret;
 }
 
 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index ee437af..69fb5da 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -174,7 +174,7 @@
 			break;
 		case MTD_FILE_MODE_RAW:
 		{
-			struct mtd_oob_ops ops;
+			struct mtd_oob_ops ops = {};
 
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
@@ -268,7 +268,7 @@
 
 		case MTD_FILE_MODE_RAW:
 		{
-			struct mtd_oob_ops ops;
+			struct mtd_oob_ops ops = {};
 
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
@@ -349,15 +349,16 @@
 	uint64_t start, uint32_t length, void __user *ptr,
 	uint32_t __user *retp)
 {
+	struct mtd_info *master  = mtd_get_master(mtd);
 	struct mtd_file_info *mfi = file->private_data;
-	struct mtd_oob_ops ops;
+	struct mtd_oob_ops ops = {};
 	uint32_t retlen;
 	int ret = 0;
 
 	if (length > 4096)
 		return -EINVAL;
 
-	if (!mtd->_write_oob)
+	if (!master->_write_oob)
 		return -EOPNOTSUPP;
 
 	ops.ooblen = length;
@@ -391,7 +392,7 @@
 	uint32_t __user *retp)
 {
 	struct mtd_file_info *mfi = file->private_data;
-	struct mtd_oob_ops ops;
+	struct mtd_oob_ops ops = {};
 	int ret = 0;
 
 	if (length > 4096)
@@ -583,8 +584,9 @@
 static int mtdchar_write_ioctl(struct mtd_info *mtd,
 		struct mtd_write_req __user *argp)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
 	struct mtd_write_req req;
-	struct mtd_oob_ops ops;
+	struct mtd_oob_ops ops = {};
 	const void __user *usr_data, *usr_oob;
 	int ret;
 
@@ -594,9 +596,8 @@
 	usr_data = (const void __user *)(uintptr_t)req.usr_data;
 	usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
 
-	if (!mtd->_write_oob)
+	if (!master->_write_oob)
 		return -EOPNOTSUPP;
-
 	ops.mode = req.mode;
 	ops.len = (size_t)req.len;
 	ops.ooblen = (size_t)req.ooblen;
@@ -632,6 +633,7 @@
 {
 	struct mtd_file_info *mfi = file->private_data;
 	struct mtd_info *mtd = mfi->mtd;
+	struct mtd_info *master = mtd_get_master(mtd);
 	void __user *argp = (void __user *)arg;
 	int ret = 0;
 	struct mtd_info_user info;
@@ -860,7 +862,7 @@
 	{
 		struct nand_oobinfo oi;
 
-		if (!mtd->ooblayout)
+		if (!master->ooblayout)
 			return -EOPNOTSUPP;
 
 		ret = get_oobinfo(mtd, &oi);
@@ -954,7 +956,7 @@
 	{
 		struct nand_ecclayout_user *usrlay;
 
-		if (!mtd->ooblayout)
+		if (!master->ooblayout)
 			return -EOPNOTSUPP;
 
 		usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 170a722..f685a58 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -103,6 +103,47 @@
 }
 
 static int
+concat_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+	     size_t * retlen, const u_char * buf)
+{
+	struct mtd_concat *concat = CONCAT(mtd);
+	int err = -EINVAL;
+	int i;
+	for (i = 0; i < concat->num_subdev; i++) {
+		struct mtd_info *subdev = concat->subdev[i];
+		size_t size, retsize;
+
+		if (to >= subdev->size) {
+			to -= subdev->size;
+			continue;
+		}
+		if (to + len > subdev->size)
+			size = subdev->size - to;
+		else
+			size = len;
+
+		err = mtd_panic_write(subdev, to, size, &retsize, buf);
+		if (err == -EOPNOTSUPP) {
+			printk(KERN_ERR "mtdconcat: Cannot write from panic without panic_write\n");
+			return err;
+		}
+		if (err)
+			break;
+
+		*retlen += retsize;
+		len -= size;
+		if (len == 0)
+			break;
+
+		err = -EINVAL;
+		buf += size;
+		to = 0;
+	}
+	return err;
+}
+
+
+static int
 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
 	     size_t * retlen, const u_char * buf)
 {
@@ -600,6 +641,7 @@
 	int i;
 	size_t size;
 	struct mtd_concat *concat;
+	struct mtd_info *subdev_master = NULL;
 	uint32_t max_erasesize, curr_erasesize;
 	int num_erase_region;
 	int max_writebufsize = 0;
@@ -638,16 +680,24 @@
 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
 	concat->mtd.oobsize = subdev[0]->oobsize;
 	concat->mtd.oobavail = subdev[0]->oobavail;
-	if (subdev[0]->_writev)
+
+	subdev_master = mtd_get_master(subdev[0]);
+	if (subdev_master->_writev)
 		concat->mtd._writev = concat_writev;
-	if (subdev[0]->_read_oob)
+	if (subdev_master->_read_oob)
 		concat->mtd._read_oob = concat_read_oob;
-	if (subdev[0]->_write_oob)
+	if (subdev_master->_write_oob)
 		concat->mtd._write_oob = concat_write_oob;
-	if (subdev[0]->_block_isbad)
+	if (subdev_master->_block_isbad)
 		concat->mtd._block_isbad = concat_block_isbad;
-	if (subdev[0]->_block_markbad)
+	if (subdev_master->_block_markbad)
 		concat->mtd._block_markbad = concat_block_markbad;
+	if (subdev_master->_panic_write)
+		concat->mtd._panic_write = concat_panic_write;
+	if (subdev_master->_read)
+		concat->mtd._read = concat_read;
+	if (subdev_master->_write)
+		concat->mtd._write = concat_write;
 
 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
 
@@ -678,14 +728,22 @@
 				    subdev[i]->flags & MTD_WRITEABLE;
 		}
 
+		subdev_master = mtd_get_master(subdev[i]);
 		concat->mtd.size += subdev[i]->size;
 		concat->mtd.ecc_stats.badblocks +=
 			subdev[i]->ecc_stats.badblocks;
 		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
 		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
 		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
-		    !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
-		    !concat->mtd._write_oob != !subdev[i]->_write_oob) {
+		    !concat->mtd._read_oob  != !subdev_master->_read_oob ||
+		    !concat->mtd._write_oob != !subdev_master->_write_oob) {
+			/*
+			 * Check against subdev[i] for data members, because
+			 * subdev's attributes may be different from master
+			 * mtd device. Check against subdev's master mtd
+			 * device for callbacks, because the existence of
+			 * subdev's callbacks is decided by master mtd device.
+			 */
 			kfree(concat);
 			printk("Incompatible OOB or ECC data on \"%s\"\n",
 			       subdev[i]->name);
@@ -701,8 +759,6 @@
 	concat->mtd.name = name;
 
 	concat->mtd._erase = concat_erase;
-	concat->mtd._read = concat_read;
-	concat->mtd._write = concat_write;
 	concat->mtd._sync = concat_sync;
 	concat->mtd._lock = concat_lock;
 	concat->mtd._unlock = concat_unlock;
@@ -841,10 +897,7 @@
 	return &concat->mtd;
 }
 
-/*
- * This function destroys an MTD object obtained from concat_mtd_devs()
- */
-
+/* Cleans the context obtained from mtd_concat_create() */
 void mtd_concat_destroy(struct mtd_info *mtd)
 {
 	struct mtd_concat *concat = CONCAT(mtd);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 32a76b8..a5197a4 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -335,7 +335,7 @@
 	.release	= mtd_release,
 };
 
-static int mtd_partid_show(struct seq_file *s, void *p)
+static int mtd_partid_debug_show(struct seq_file *s, void *p)
 {
 	struct mtd_info *mtd = s->private;
 
@@ -344,19 +344,9 @@
 	return 0;
 }
 
-static int mtd_partid_debugfs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mtd_partid_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
 
-static const struct file_operations mtd_partid_debug_fops = {
-	.open           = mtd_partid_debugfs_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = single_release,
-};
-
-static int mtd_partname_show(struct seq_file *s, void *p)
+static int mtd_partname_debug_show(struct seq_file *s, void *p)
 {
 	struct mtd_info *mtd = s->private;
 
@@ -365,50 +355,28 @@
 	return 0;
 }
 
-static int mtd_partname_debugfs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mtd_partname_show, inode->i_private);
-}
-
-static const struct file_operations mtd_partname_debug_fops = {
-	.open           = mtd_partname_debugfs_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
 
 static struct dentry *dfs_dir_mtd;
 
 static void mtd_debugfs_populate(struct mtd_info *mtd)
 {
 	struct device *dev = &mtd->dev;
-	struct dentry *root, *dent;
+	struct dentry *root;
 
 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
 		return;
 
 	root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
-	if (IS_ERR_OR_NULL(root)) {
-		dev_dbg(dev, "won't show data in debugfs\n");
-		return;
-	}
-
 	mtd->dbg.dfs_dir = root;
 
-	if (mtd->dbg.partid) {
-		dent = debugfs_create_file("partid", 0400, root, mtd,
-					   &mtd_partid_debug_fops);
-		if (IS_ERR_OR_NULL(dent))
-			dev_err(dev, "can't create debugfs entry for partid\n");
-	}
+	if (mtd->dbg.partid)
+		debugfs_create_file("partid", 0400, root, mtd,
+				    &mtd_partid_debug_fops);
 
-	if (mtd->dbg.partname) {
-		dent = debugfs_create_file("partname", 0400, root, mtd,
-					   &mtd_partname_debug_fops);
-		if (IS_ERR_OR_NULL(dent))
-			dev_err(dev,
-				"can't create debugfs entry for partname\n");
-	}
+	if (mtd->dbg.partname)
+		debugfs_create_file("partname", 0400, root, mtd,
+				    &mtd_partname_debug_fops);
 }
 
 #ifndef CONFIG_MMU
@@ -468,13 +436,14 @@
 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
 			      struct mtd_pairing_info *info)
 {
-	int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+	struct mtd_info *master = mtd_get_master(mtd);
+	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
 
 	if (wunit < 0 || wunit >= npairs)
 		return -EINVAL;
 
-	if (mtd->pairing && mtd->pairing->get_info)
-		return mtd->pairing->get_info(mtd, wunit, info);
+	if (master->pairing && master->pairing->get_info)
+		return master->pairing->get_info(master, wunit, info);
 
 	info->group = 0;
 	info->pair = wunit;
@@ -510,15 +479,16 @@
 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
 			      const struct mtd_pairing_info *info)
 {
-	int ngroups = mtd_pairing_groups(mtd);
-	int npairs = mtd_wunit_per_eb(mtd) / ngroups;
+	struct mtd_info *master = mtd_get_master(mtd);
+	int ngroups = mtd_pairing_groups(master);
+	int npairs = mtd_wunit_per_eb(master) / ngroups;
 
 	if (!info || info->pair < 0 || info->pair >= npairs ||
 	    info->group < 0 || info->group >= ngroups)
 		return -EINVAL;
 
-	if (mtd->pairing && mtd->pairing->get_wunit)
-		return mtd->pairing->get_wunit(mtd, info);
+	if (master->pairing && master->pairing->get_wunit)
+		return mtd->pairing->get_wunit(master, info);
 
 	return info->pair;
 }
@@ -536,10 +506,12 @@
  */
 int mtd_pairing_groups(struct mtd_info *mtd)
 {
-	if (!mtd->pairing || !mtd->pairing->ngroups)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->pairing || !master->pairing->ngroups)
 		return 1;
 
-	return mtd->pairing->ngroups;
+	return master->pairing->ngroups;
 }
 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
 
@@ -599,6 +571,7 @@
 
 int add_mtd_device(struct mtd_info *mtd)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
 	struct mtd_notifier *not;
 	int i, error;
 
@@ -620,10 +593,23 @@
 		    (mtd->_read && mtd->_read_oob)))
 		return -EINVAL;
 
-	if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
 		    !(mtd->flags & MTD_NO_ERASE)))
 		return -EINVAL;
 
+	/*
+	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
+	 * master is an MLC NAND and has a proper pairing scheme defined.
+	 * We also reject masters that implement ->_writev() for now, because
+	 * NAND controller drivers don't implement this hook, and adding the
+	 * SLC -> MLC address/length conversion to this path is useless if we
+	 * don't have a user.
+	 */
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
+	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
+	     !master->pairing || master->_writev))
+		return -EINVAL;
+
 	mutex_lock(&mtd_table_mutex);
 
 	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -639,6 +625,14 @@
 	if (mtd->bitflip_threshold == 0)
 		mtd->bitflip_threshold = mtd->ecc_strength;
 
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+		int ngroups = mtd_pairing_groups(master);
+
+		mtd->erasesize /= ngroups;
+		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
+			    mtd->erasesize;
+	}
+
 	if (is_power_of_2(mtd->erasesize))
 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
 	else
@@ -727,8 +721,6 @@
 
 	mutex_lock(&mtd_table_mutex);
 
-	debugfs_remove_recursive(mtd->dbg.dfs_dir);
-
 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
 		ret = -ENODEV;
 		goto out_error;
@@ -744,6 +736,8 @@
 		       mtd->index, mtd->name, mtd->usecount);
 		ret = -EBUSY;
 	} else {
+		debugfs_remove_recursive(mtd->dbg.dfs_dir);
+
 		/* Try to remove the NVMEM provider */
 		if (mtd->nvmem)
 			nvmem_unregister(mtd->nvmem);
@@ -777,7 +771,8 @@
 		pr_debug("mtd device won't show a device symlink in sysfs\n");
 	}
 
-	mtd->orig_flags = mtd->flags;
+	INIT_LIST_HEAD(&mtd->partitions);
+	mutex_init(&mtd->master.partitions_lock);
 }
 
 /**
@@ -986,20 +981,28 @@
 
 int __get_mtd_device(struct mtd_info *mtd)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
 	int err;
 
-	if (!try_module_get(mtd->owner))
+	if (!try_module_get(master->owner))
 		return -ENODEV;
 
-	if (mtd->_get_device) {
-		err = mtd->_get_device(mtd);
+	if (master->_get_device) {
+		err = master->_get_device(mtd);
 
 		if (err) {
-			module_put(mtd->owner);
+			module_put(master->owner);
 			return err;
 		}
 	}
-	mtd->usecount++;
+
+	master->usecount++;
+
+	while (mtd->parent) {
+		mtd->usecount++;
+		mtd = mtd->parent;
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1053,13 +1056,20 @@
 
 void __put_mtd_device(struct mtd_info *mtd)
 {
-	--mtd->usecount;
-	BUG_ON(mtd->usecount < 0);
+	struct mtd_info *master = mtd_get_master(mtd);
 
-	if (mtd->_put_device)
-		mtd->_put_device(mtd);
+	while (mtd->parent) {
+		--mtd->usecount;
+		BUG_ON(mtd->usecount < 0);
+		mtd = mtd->parent;
+	}
 
-	module_put(mtd->owner);
+	master->usecount--;
+
+	if (master->_put_device)
+		master->_put_device(master);
+
+	module_put(master->owner);
 }
 EXPORT_SYMBOL_GPL(__put_mtd_device);
 
@@ -1070,9 +1080,15 @@
  */
 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
-	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+	struct mtd_info *master = mtd_get_master(mtd);
+	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+	struct erase_info adjinstr;
+	int ret;
 
-	if (!mtd->erasesize || !mtd->_erase)
+	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+	adjinstr = *instr;
+
+	if (!mtd->erasesize || !master->_erase)
 		return -ENOTSUPP;
 
 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
@@ -1084,7 +1100,29 @@
 		return 0;
 
 	ledtrig_mtd_activity();
-	return mtd->_erase(mtd, instr);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
+				master->erasesize;
+		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
+				master->erasesize) -
+			       adjinstr.addr;
+	}
+
+	adjinstr.addr += mst_ofs;
+
+	ret = master->_erase(master, &adjinstr);
+
+	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
+		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
+		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
+							 master);
+			instr->fail_addr *= mtd->erasesize;
+		}
+	}
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_erase);
 
@@ -1094,30 +1132,36 @@
 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
 	      void **virt, resource_size_t *phys)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	*retlen = 0;
 	*virt = NULL;
 	if (phys)
 		*phys = 0;
-	if (!mtd->_point)
+	if (!master->_point)
 		return -EOPNOTSUPP;
 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
 		return -EINVAL;
 	if (!len)
 		return 0;
-	return mtd->_point(mtd, from, len, retlen, virt, phys);
+
+	from = mtd_get_master_ofs(mtd, from);
+	return master->_point(master, from, len, retlen, virt, phys);
 }
 EXPORT_SYMBOL_GPL(mtd_point);
 
 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 {
-	if (!mtd->_unpoint)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_unpoint)
 		return -EOPNOTSUPP;
 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
 		return -EINVAL;
 	if (!len)
 		return 0;
-	return mtd->_unpoint(mtd, from, len);
+	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
 }
 EXPORT_SYMBOL_GPL(mtd_unpoint);
 
@@ -1144,6 +1188,25 @@
 }
 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
 
+static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
+				 const struct mtd_ecc_stats *old_stats)
+{
+	struct mtd_ecc_stats diff;
+
+	if (master == mtd)
+		return;
+
+	diff = master->ecc_stats;
+	diff.failed -= old_stats->failed;
+	diff.corrected -= old_stats->corrected;
+
+	while (mtd->parent) {
+		mtd->ecc_stats.failed += diff.failed;
+		mtd->ecc_stats.corrected += diff.corrected;
+		mtd = mtd->parent;
+	}
+}
+
 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
 	     u_char *buf)
 {
@@ -1186,8 +1249,10 @@
 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
 		    const u_char *buf)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	*retlen = 0;
-	if (!mtd->_panic_write)
+	if (!master->_panic_write)
 		return -EOPNOTSUPP;
 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
 		return -EINVAL;
@@ -1195,10 +1260,11 @@
 		return -EROFS;
 	if (!len)
 		return 0;
-	if (!mtd->oops_panic_write)
-		mtd->oops_panic_write = true;
+	if (!master->oops_panic_write)
+		master->oops_panic_write = true;
 
-	return mtd->_panic_write(mtd, to, len, retlen, buf);
+	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
+				    retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_panic_write);
 
@@ -1235,9 +1301,107 @@
 	return 0;
 }
 
+static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
+			    struct mtd_oob_ops *ops)
+{
+	struct mtd_info *master = mtd_get_master(mtd);
+	int ret;
+
+	from = mtd_get_master_ofs(mtd, from);
+	if (master->_read_oob)
+		ret = master->_read_oob(master, from, ops);
+	else
+		ret = master->_read(master, from, ops->len, &ops->retlen,
+				    ops->datbuf);
+
+	return ret;
+}
+
+static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
+			     struct mtd_oob_ops *ops)
+{
+	struct mtd_info *master = mtd_get_master(mtd);
+	int ret;
+
+	to = mtd_get_master_ofs(mtd, to);
+	if (master->_write_oob)
+		ret = master->_write_oob(master, to, ops);
+	else
+		ret = master->_write(master, to, ops->len, &ops->retlen,
+				     ops->datbuf);
+
+	return ret;
+}
+
+static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
+			       struct mtd_oob_ops *ops)
+{
+	struct mtd_info *master = mtd_get_master(mtd);
+	int ngroups = mtd_pairing_groups(master);
+	int npairs = mtd_wunit_per_eb(master) / ngroups;
+	struct mtd_oob_ops adjops = *ops;
+	unsigned int wunit, oobavail;
+	struct mtd_pairing_info info;
+	int max_bitflips = 0;
+	u32 ebofs, pageofs;
+	loff_t base, pos;
+
+	ebofs = mtd_mod_by_eb(start, mtd);
+	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
+	info.group = 0;
+	info.pair = mtd_div_by_ws(ebofs, mtd);
+	pageofs = mtd_mod_by_ws(ebofs, mtd);
+	oobavail = mtd_oobavail(mtd, ops);
+
+	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
+		int ret;
+
+		if (info.pair >= npairs) {
+			info.pair = 0;
+			base += master->erasesize;
+		}
+
+		wunit = mtd_pairing_info_to_wunit(master, &info);
+		pos = mtd_wunit_to_offset(mtd, base, wunit);
+
+		adjops.len = ops->len - ops->retlen;
+		if (adjops.len > mtd->writesize - pageofs)
+			adjops.len = mtd->writesize - pageofs;
+
+		adjops.ooblen = ops->ooblen - ops->oobretlen;
+		if (adjops.ooblen > oobavail - adjops.ooboffs)
+			adjops.ooblen = oobavail - adjops.ooboffs;
+
+		if (read) {
+			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
+			if (ret > 0)
+				max_bitflips = max(max_bitflips, ret);
+		} else {
+			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
+		}
+
+		if (ret < 0)
+			return ret;
+
+		max_bitflips = max(max_bitflips, ret);
+		ops->retlen += adjops.retlen;
+		ops->oobretlen += adjops.oobretlen;
+		adjops.datbuf += adjops.retlen;
+		adjops.oobbuf += adjops.oobretlen;
+		adjops.ooboffs = 0;
+		pageofs = 0;
+		info.pair++;
+	}
+
+	return max_bitflips;
+}
+
 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+	struct mtd_ecc_stats old_stats = master->ecc_stats;
 	int ret_code;
+
 	ops->retlen = ops->oobretlen = 0;
 
 	ret_code = mtd_check_oob_ops(mtd, from, ops);
@@ -1247,14 +1411,15 @@
 	ledtrig_mtd_activity();
 
 	/* Check the validity of a potential fallback on mtd->_read */
-	if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+	if (!master->_read_oob && (!master->_read || ops->oobbuf))
 		return -EOPNOTSUPP;
 
-	if (mtd->_read_oob)
-		ret_code = mtd->_read_oob(mtd, from, ops);
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
 	else
-		ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
-				      ops->datbuf);
+		ret_code = mtd_read_oob_std(mtd, from, ops);
+
+	mtd_update_ecc_stats(mtd, master, &old_stats);
 
 	/*
 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1273,6 +1438,7 @@
 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
 				struct mtd_oob_ops *ops)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
 	int ret;
 
 	ops->retlen = ops->oobretlen = 0;
@@ -1287,14 +1453,13 @@
 	ledtrig_mtd_activity();
 
 	/* Check the validity of a potential fallback on mtd->_write */
-	if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+	if (!master->_write_oob && (!master->_write || ops->oobbuf))
 		return -EOPNOTSUPP;
 
-	if (mtd->_write_oob)
-		return mtd->_write_oob(mtd, to, ops);
-	else
-		return mtd->_write(mtd, to, ops->len, &ops->retlen,
-				   ops->datbuf);
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+		return mtd_io_emulated_slc(mtd, to, false, ops);
+
+	return mtd_write_oob_std(mtd, to, ops);
 }
 EXPORT_SYMBOL_GPL(mtd_write_oob);
 
@@ -1317,15 +1482,17 @@
 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
 		      struct mtd_oob_region *oobecc)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	memset(oobecc, 0, sizeof(*oobecc));
 
-	if (!mtd || section < 0)
+	if (!master || section < 0)
 		return -EINVAL;
 
-	if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+	if (!master->ooblayout || !master->ooblayout->ecc)
 		return -ENOTSUPP;
 
-	return mtd->ooblayout->ecc(mtd, section, oobecc);
+	return master->ooblayout->ecc(master, section, oobecc);
 }
 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
 
@@ -1349,15 +1516,17 @@
 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
 		       struct mtd_oob_region *oobfree)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	memset(oobfree, 0, sizeof(*oobfree));
 
-	if (!mtd || section < 0)
+	if (!master || section < 0)
 		return -EINVAL;
 
-	if (!mtd->ooblayout || !mtd->ooblayout->free)
+	if (!master->ooblayout || !master->ooblayout->free)
 		return -ENOTSUPP;
 
-	return mtd->ooblayout->free(mtd, section, oobfree);
+	return master->ooblayout->free(master, section, oobfree);
 }
 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
 
@@ -1618,7 +1787,7 @@
  * @start: first ECC byte to set
  * @nbytes: number of ECC bytes to set
  *
- * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
  *
  * Returns zero on success, a negative error code otherwise.
  */
@@ -1666,60 +1835,69 @@
 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
 			   struct otp_info *buf)
 {
-	if (!mtd->_get_fact_prot_info)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_get_fact_prot_info)
 		return -EOPNOTSUPP;
 	if (!len)
 		return 0;
-	return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+	return master->_get_fact_prot_info(master, len, retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
 
 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
 			   size_t *retlen, u_char *buf)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	*retlen = 0;
-	if (!mtd->_read_fact_prot_reg)
+	if (!master->_read_fact_prot_reg)
 		return -EOPNOTSUPP;
 	if (!len)
 		return 0;
-	return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
 
 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
 			   struct otp_info *buf)
 {
-	if (!mtd->_get_user_prot_info)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_get_user_prot_info)
 		return -EOPNOTSUPP;
 	if (!len)
 		return 0;
-	return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+	return master->_get_user_prot_info(master, len, retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
 
 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
 			   size_t *retlen, u_char *buf)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	*retlen = 0;
-	if (!mtd->_read_user_prot_reg)
+	if (!master->_read_user_prot_reg)
 		return -EOPNOTSUPP;
 	if (!len)
 		return 0;
-	return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+	return master->_read_user_prot_reg(master, from, len, retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
 
 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
 			    size_t *retlen, u_char *buf)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
 	int ret;
 
 	*retlen = 0;
-	if (!mtd->_write_user_prot_reg)
+	if (!master->_write_user_prot_reg)
 		return -EOPNOTSUPP;
 	if (!len)
 		return 0;
-	ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
 	if (ret)
 		return ret;
 
@@ -1733,80 +1911,134 @@
 
 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
 {
-	if (!mtd->_lock_user_prot_reg)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_lock_user_prot_reg)
 		return -EOPNOTSUPP;
 	if (!len)
 		return 0;
-	return mtd->_lock_user_prot_reg(mtd, from, len);
+	return master->_lock_user_prot_reg(master, from, len);
 }
 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
 
 /* Chip-supported device locking */
 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
-	if (!mtd->_lock)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_lock)
 		return -EOPNOTSUPP;
 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
 		return -EINVAL;
 	if (!len)
 		return 0;
-	return mtd->_lock(mtd, ofs, len);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+	}
+
+	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
 }
 EXPORT_SYMBOL_GPL(mtd_lock);
 
 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
-	if (!mtd->_unlock)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_unlock)
 		return -EOPNOTSUPP;
 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
 		return -EINVAL;
 	if (!len)
 		return 0;
-	return mtd->_unlock(mtd, ofs, len);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+	}
+
+	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
 }
 EXPORT_SYMBOL_GPL(mtd_unlock);
 
 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
-	if (!mtd->_is_locked)
+	struct mtd_info *master = mtd_get_master(mtd);
+
+	if (!master->_is_locked)
 		return -EOPNOTSUPP;
 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
 		return -EINVAL;
 	if (!len)
 		return 0;
-	return mtd->_is_locked(mtd, ofs, len);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+	}
+
+	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
 }
 EXPORT_SYMBOL_GPL(mtd_is_locked);
 
 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	if (ofs < 0 || ofs >= mtd->size)
 		return -EINVAL;
-	if (!mtd->_block_isreserved)
+	if (!master->_block_isreserved)
 		return 0;
-	return mtd->_block_isreserved(mtd, ofs);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
+	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
 }
 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
 
 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	if (ofs < 0 || ofs >= mtd->size)
 		return -EINVAL;
-	if (!mtd->_block_isbad)
+	if (!master->_block_isbad)
 		return 0;
-	return mtd->_block_isbad(mtd, ofs);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
+	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
 }
 EXPORT_SYMBOL_GPL(mtd_block_isbad);
 
 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
-	if (!mtd->_block_markbad)
+	struct mtd_info *master = mtd_get_master(mtd);
+	int ret;
+
+	if (!master->_block_markbad)
 		return -EOPNOTSUPP;
 	if (ofs < 0 || ofs >= mtd->size)
 		return -EINVAL;
 	if (!(mtd->flags & MTD_WRITEABLE))
 		return -EROFS;
-	return mtd->_block_markbad(mtd, ofs);
+
+	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
+	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+	if (ret)
+		return ret;
+
+	while (mtd->parent) {
+		mtd->ecc_stats.badblocks++;
+		mtd = mtd->parent;
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(mtd_block_markbad);
 
@@ -1856,12 +2088,17 @@
 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
 	       unsigned long count, loff_t to, size_t *retlen)
 {
+	struct mtd_info *master = mtd_get_master(mtd);
+
 	*retlen = 0;
 	if (!(mtd->flags & MTD_WRITEABLE))
 		return -EROFS;
-	if (!mtd->_writev)
+
+	if (!master->_writev)
 		return default_mtd_writev(mtd, vecs, count, to, retlen);
-	return mtd->_writev(mtd, vecs, count, to, retlen);
+
+	return master->_writev(master, vecs, count,
+			       mtd_get_master_ofs(mtd, to), retlen);
 }
 EXPORT_SYMBOL_GPL(mtd_writev);
 
@@ -1943,11 +2180,12 @@
 	struct backing_dev_info *bdi;
 	int ret;
 
-	bdi = bdi_alloc(GFP_KERNEL);
+	bdi = bdi_alloc(NUMA_NO_NODE);
 	if (!bdi)
 		return ERR_PTR(-ENOMEM);
+	bdi->ra_pages = 0;
+	bdi->io_pages = 0;
 
-	bdi->name = name;
 	/*
 	 * We put '-0' suffix to the name to get the same name format as we
 	 * used to get. Since this is called only once, we get a unique name. 
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 7328c06..5725818 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -20,339 +20,56 @@
 
 #include "mtdcore.h"
 
-/* Our partition linked list */
-static LIST_HEAD(mtd_partitions);
-static DEFINE_MUTEX(mtd_partitions_mutex);
-
-/**
- * struct mtd_part - our partition node structure
- *
- * @mtd: struct holding partition details
- * @parent: parent mtd - flash device or another partition
- * @offset: partition offset relative to the *flash device*
- */
-struct mtd_part {
-	struct mtd_info mtd;
-	struct mtd_info *parent;
-	uint64_t offset;
-	struct list_head list;
-};
-
-/*
- * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
- * the pointer to that structure.
- */
-static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
-{
-	return container_of(mtd, struct mtd_part, mtd);
-}
-
-static u64 part_absolute_offset(struct mtd_info *mtd)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	if (!mtd_is_partition(mtd))
-		return 0;
-
-	return part_absolute_offset(part->parent) + part->offset;
-}
-
 /*
  * MTD methods which simply translate the effective address and pass through
  * to the _real_ device.
  */
 
-static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
-		size_t *retlen, u_char *buf)
+static inline void free_partition(struct mtd_info *mtd)
 {
-	struct mtd_part *part = mtd_to_part(mtd);
-	struct mtd_ecc_stats stats;
-	int res;
-
-	stats = part->parent->ecc_stats;
-	res = part->parent->_read(part->parent, from + part->offset, len,
-				  retlen, buf);
-	if (unlikely(mtd_is_eccerr(res)))
-		mtd->ecc_stats.failed +=
-			part->parent->ecc_stats.failed - stats.failed;
-	else
-		mtd->ecc_stats.corrected +=
-			part->parent->ecc_stats.corrected - stats.corrected;
-	return res;
+	kfree(mtd->name);
+	kfree(mtd);
 }
 
-static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
-		size_t *retlen, void **virt, resource_size_t *phys)
+static struct mtd_info *allocate_partition(struct mtd_info *parent,
+					   const struct mtd_partition *part,
+					   int partno, uint64_t cur_offset)
 {
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	return part->parent->_point(part->parent, from + part->offset, len,
-				    retlen, virt, phys);
-}
-
-static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	return part->parent->_unpoint(part->parent, from + part->offset, len);
-}
-
-static int part_read_oob(struct mtd_info *mtd, loff_t from,
-		struct mtd_oob_ops *ops)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	struct mtd_ecc_stats stats;
-	int res;
-
-	stats = part->parent->ecc_stats;
-	res = part->parent->_read_oob(part->parent, from + part->offset, ops);
-	if (unlikely(mtd_is_eccerr(res)))
-		mtd->ecc_stats.failed +=
-			part->parent->ecc_stats.failed - stats.failed;
-	else
-		mtd->ecc_stats.corrected +=
-			part->parent->ecc_stats.corrected - stats.corrected;
-	return res;
-}
-
-static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
-		size_t len, size_t *retlen, u_char *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_read_user_prot_reg(part->parent, from, len,
-						 retlen, buf);
-}
-
-static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
-				   size_t *retlen, struct otp_info *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_get_user_prot_info(part->parent, len, retlen,
-						 buf);
-}
-
-static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
-		size_t len, size_t *retlen, u_char *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_read_fact_prot_reg(part->parent, from, len,
-						 retlen, buf);
-}
-
-static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
-				   size_t *retlen, struct otp_info *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_get_fact_prot_info(part->parent, len, retlen,
-						 buf);
-}
-
-static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
-		size_t *retlen, const u_char *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_write(part->parent, to + part->offset, len,
-				    retlen, buf);
-}
-
-static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
-		size_t *retlen, const u_char *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_panic_write(part->parent, to + part->offset, len,
-					  retlen, buf);
-}
-
-static int part_write_oob(struct mtd_info *mtd, loff_t to,
-		struct mtd_oob_ops *ops)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	return part->parent->_write_oob(part->parent, to + part->offset, ops);
-}
-
-static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
-		size_t len, size_t *retlen, u_char *buf)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_write_user_prot_reg(part->parent, from, len,
-						  retlen, buf);
-}
-
-static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
-		size_t len)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_lock_user_prot_reg(part->parent, from, len);
-}
-
-static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
-		unsigned long count, loff_t to, size_t *retlen)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_writev(part->parent, vecs, count,
-				     to + part->offset, retlen);
-}
-
-static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	int ret;
-
-	instr->addr += part->offset;
-	ret = part->parent->_erase(part->parent, instr);
-	if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
-		instr->fail_addr -= part->offset;
-	instr->addr -= part->offset;
-
-	return ret;
-}
-
-static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_lock(part->parent, ofs + part->offset, len);
-}
-
-static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_unlock(part->parent, ofs + part->offset, len);
-}
-
-static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_is_locked(part->parent, ofs + part->offset, len);
-}
-
-static void part_sync(struct mtd_info *mtd)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	part->parent->_sync(part->parent);
-}
-
-static int part_suspend(struct mtd_info *mtd)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_suspend(part->parent);
-}
-
-static void part_resume(struct mtd_info *mtd)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	part->parent->_resume(part->parent);
-}
-
-static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	ofs += part->offset;
-	return part->parent->_block_isreserved(part->parent, ofs);
-}
-
-static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	ofs += part->offset;
-	return part->parent->_block_isbad(part->parent, ofs);
-}
-
-static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	int res;
-
-	ofs += part->offset;
-	res = part->parent->_block_markbad(part->parent, ofs);
-	if (!res)
-		mtd->ecc_stats.badblocks++;
-	return res;
-}
-
-static int part_get_device(struct mtd_info *mtd)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	return part->parent->_get_device(part->parent);
-}
-
-static void part_put_device(struct mtd_info *mtd)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-	part->parent->_put_device(part->parent);
-}
-
-static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
-			      struct mtd_oob_region *oobregion)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	return mtd_ooblayout_ecc(part->parent, section, oobregion);
-}
-
-static int part_ooblayout_free(struct mtd_info *mtd, int section,
-			       struct mtd_oob_region *oobregion)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	return mtd_ooblayout_free(part->parent, section, oobregion);
-}
-
-static const struct mtd_ooblayout_ops part_ooblayout_ops = {
-	.ecc = part_ooblayout_ecc,
-	.free = part_ooblayout_free,
-};
-
-static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
-	struct mtd_part *part = mtd_to_part(mtd);
-
-	return part->parent->_max_bad_blocks(part->parent,
-					     ofs + part->offset, len);
-}
-
-static inline void free_partition(struct mtd_part *p)
-{
-	kfree(p->mtd.name);
-	kfree(p);
-}
-
-static struct mtd_part *allocate_partition(struct mtd_info *parent,
-			const struct mtd_partition *part, int partno,
-			uint64_t cur_offset)
-{
-	int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
-							    parent->erasesize;
-	struct mtd_part *slave;
+	struct mtd_info *master = mtd_get_master(parent);
+	int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
+			   master->writesize : master->erasesize;
+	u64 parent_size = mtd_is_partition(parent) ?
+			  parent->part.size : parent->size;
+	struct mtd_info *child;
 	u32 remainder;
 	char *name;
 	u64 tmp;
 
 	/* allocate the partition structure */
-	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	child = kzalloc(sizeof(*child), GFP_KERNEL);
 	name = kstrdup(part->name, GFP_KERNEL);
-	if (!name || !slave) {
+	if (!name || !child) {
 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
 		       parent->name);
 		kfree(name);
-		kfree(slave);
+		kfree(child);
 		return ERR_PTR(-ENOMEM);
 	}
 
 	/* set up the MTD object for this partition */
-	slave->mtd.type = parent->type;
-	slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
-	slave->mtd.orig_flags = slave->mtd.flags;
-	slave->mtd.size = part->size;
-	slave->mtd.writesize = parent->writesize;
-	slave->mtd.writebufsize = parent->writebufsize;
-	slave->mtd.oobsize = parent->oobsize;
-	slave->mtd.oobavail = parent->oobavail;
-	slave->mtd.subpage_sft = parent->subpage_sft;
-	slave->mtd.pairing = parent->pairing;
+	child->type = parent->type;
+	child->part.flags = parent->flags & ~part->mask_flags;
+	child->part.flags |= part->add_flags;
+	child->flags = child->part.flags;
+	child->part.size = part->size;
+	child->writesize = parent->writesize;
+	child->writebufsize = parent->writebufsize;
+	child->oobsize = parent->oobsize;
+	child->oobavail = parent->oobavail;
+	child->subpage_sft = parent->subpage_sft;
 
-	slave->mtd.name = name;
-	slave->mtd.owner = parent->owner;
+	child->name = name;
+	child->owner = parent->owner;
 
 	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
 	 * concern for showing the same data in multiple partitions.
@@ -360,134 +77,77 @@
 	 * so the MTD_PARTITIONED_MASTER option allows that. The master
 	 * will have device nodes etc only if this is set, so make the
 	 * parent conditional on that option. Note, this is a way to
-	 * distinguish between the master and the partition in sysfs.
+	 * distinguish between the parent and its partitions in sysfs.
 	 */
-	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
-				&parent->dev :
-				parent->dev.parent;
-	slave->mtd.dev.of_node = part->of_node;
+	child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+			    &parent->dev : parent->dev.parent;
+	child->dev.of_node = part->of_node;
+	child->parent = parent;
+	child->part.offset = part->offset;
+	INIT_LIST_HEAD(&child->partitions);
 
-	if (parent->_read)
-		slave->mtd._read = part_read;
-	if (parent->_write)
-		slave->mtd._write = part_write;
-
-	if (parent->_panic_write)
-		slave->mtd._panic_write = part_panic_write;
-
-	if (parent->_point && parent->_unpoint) {
-		slave->mtd._point = part_point;
-		slave->mtd._unpoint = part_unpoint;
-	}
-
-	if (parent->_read_oob)
-		slave->mtd._read_oob = part_read_oob;
-	if (parent->_write_oob)
-		slave->mtd._write_oob = part_write_oob;
-	if (parent->_read_user_prot_reg)
-		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
-	if (parent->_read_fact_prot_reg)
-		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
-	if (parent->_write_user_prot_reg)
-		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
-	if (parent->_lock_user_prot_reg)
-		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
-	if (parent->_get_user_prot_info)
-		slave->mtd._get_user_prot_info = part_get_user_prot_info;
-	if (parent->_get_fact_prot_info)
-		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
-	if (parent->_sync)
-		slave->mtd._sync = part_sync;
-	if (!partno && !parent->dev.class && parent->_suspend &&
-	    parent->_resume) {
-		slave->mtd._suspend = part_suspend;
-		slave->mtd._resume = part_resume;
-	}
-	if (parent->_writev)
-		slave->mtd._writev = part_writev;
-	if (parent->_lock)
-		slave->mtd._lock = part_lock;
-	if (parent->_unlock)
-		slave->mtd._unlock = part_unlock;
-	if (parent->_is_locked)
-		slave->mtd._is_locked = part_is_locked;
-	if (parent->_block_isreserved)
-		slave->mtd._block_isreserved = part_block_isreserved;
-	if (parent->_block_isbad)
-		slave->mtd._block_isbad = part_block_isbad;
-	if (parent->_block_markbad)
-		slave->mtd._block_markbad = part_block_markbad;
-	if (parent->_max_bad_blocks)
-		slave->mtd._max_bad_blocks = part_max_bad_blocks;
-
-	if (parent->_get_device)
-		slave->mtd._get_device = part_get_device;
-	if (parent->_put_device)
-		slave->mtd._put_device = part_put_device;
-
-	slave->mtd._erase = part_erase;
-	slave->parent = parent;
-	slave->offset = part->offset;
-
-	if (slave->offset == MTDPART_OFS_APPEND)
-		slave->offset = cur_offset;
-	if (slave->offset == MTDPART_OFS_NXTBLK) {
+	if (child->part.offset == MTDPART_OFS_APPEND)
+		child->part.offset = cur_offset;
+	if (child->part.offset == MTDPART_OFS_NXTBLK) {
 		tmp = cur_offset;
-		slave->offset = cur_offset;
+		child->part.offset = cur_offset;
 		remainder = do_div(tmp, wr_alignment);
 		if (remainder) {
-			slave->offset += wr_alignment - remainder;
+			child->part.offset += wr_alignment - remainder;
 			printk(KERN_NOTICE "Moving partition %d: "
 			       "0x%012llx -> 0x%012llx\n", partno,
-			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+			       (unsigned long long)cur_offset,
+			       child->part.offset);
 		}
 	}
-	if (slave->offset == MTDPART_OFS_RETAIN) {
-		slave->offset = cur_offset;
-		if (parent->size - slave->offset >= slave->mtd.size) {
-			slave->mtd.size = parent->size - slave->offset
-							- slave->mtd.size;
+	if (child->part.offset == MTDPART_OFS_RETAIN) {
+		child->part.offset = cur_offset;
+		if (parent_size - child->part.offset >= child->part.size) {
+			child->part.size = parent_size - child->part.offset -
+					   child->part.size;
 		} else {
 			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
-				part->name, parent->size - slave->offset,
-				slave->mtd.size);
+				part->name, parent_size - child->part.offset,
+				child->part.size);
 			/* register to preserve ordering */
 			goto out_register;
 		}
 	}
-	if (slave->mtd.size == MTDPART_SIZ_FULL)
-		slave->mtd.size = parent->size - slave->offset;
+	if (child->part.size == MTDPART_SIZ_FULL)
+		child->part.size = parent_size - child->part.offset;
 
-	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
-		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
+	       child->part.offset, child->part.offset + child->part.size,
+	       child->name);
 
 	/* let's do some sanity checks */
-	if (slave->offset >= parent->size) {
+	if (child->part.offset >= parent_size) {
 		/* let's register it anyway to preserve ordering */
-		slave->offset = 0;
-		slave->mtd.size = 0;
+		child->part.offset = 0;
+		child->part.size = 0;
 
 		/* Initialize ->erasesize to make add_mtd_device() happy. */
-		slave->mtd.erasesize = parent->erasesize;
-
+		child->erasesize = parent->erasesize;
 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
 			part->name);
 		goto out_register;
 	}
-	if (slave->offset + slave->mtd.size > parent->size) {
-		slave->mtd.size = parent->size - slave->offset;
+	if (child->part.offset + child->part.size > parent->size) {
+		child->part.size = parent_size - child->part.offset;
 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
-			part->name, parent->name, (unsigned long long)slave->mtd.size);
+			part->name, parent->name, child->part.size);
 	}
+
 	if (parent->numeraseregions > 1) {
 		/* Deal with variable erase size stuff */
 		int i, max = parent->numeraseregions;
-		u64 end = slave->offset + slave->mtd.size;
+		u64 end = child->part.offset + child->part.size;
 		struct mtd_erase_region_info *regions = parent->eraseregions;
 
 		/* Find the first erase regions which is part of this
 		 * partition. */
-		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+		for (i = 0; i < max && regions[i].offset <= child->part.offset;
+		     i++)
 			;
 		/* The loop searched for the region _behind_ the first one */
 		if (i > 0)
@@ -495,70 +155,69 @@
 
 		/* Pick biggest erasesize */
 		for (; i < max && regions[i].offset < end; i++) {
-			if (slave->mtd.erasesize < regions[i].erasesize) {
-				slave->mtd.erasesize = regions[i].erasesize;
-			}
+			if (child->erasesize < regions[i].erasesize)
+				child->erasesize = regions[i].erasesize;
 		}
-		BUG_ON(slave->mtd.erasesize == 0);
+		BUG_ON(child->erasesize == 0);
 	} else {
 		/* Single erase size */
-		slave->mtd.erasesize = parent->erasesize;
+		child->erasesize = master->erasesize;
 	}
 
 	/*
-	 * Slave erasesize might differ from the master one if the master
+	 * Child erasesize might differ from the parent one if the parent
 	 * exposes several regions with different erasesize. Adjust
 	 * wr_alignment accordingly.
 	 */
-	if (!(slave->mtd.flags & MTD_NO_ERASE))
-		wr_alignment = slave->mtd.erasesize;
+	if (!(child->flags & MTD_NO_ERASE))
+		wr_alignment = child->erasesize;
 
-	tmp = part_absolute_offset(parent) + slave->offset;
+	tmp = mtd_get_master_ofs(child, 0);
 	remainder = do_div(tmp, wr_alignment);
-	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+	if ((child->flags & MTD_WRITEABLE) && remainder) {
 		/* Doesn't start on a boundary of major erase size */
 		/* FIXME: Let it be writable if it is on a boundary of
 		 * _minor_ erase size though */
-		slave->mtd.flags &= ~MTD_WRITEABLE;
+		child->flags &= ~MTD_WRITEABLE;
 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
 			part->name);
 	}
 
-	tmp = part_absolute_offset(parent) + slave->mtd.size;
+	tmp = mtd_get_master_ofs(child, 0) + child->part.size;
 	remainder = do_div(tmp, wr_alignment);
-	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
-		slave->mtd.flags &= ~MTD_WRITEABLE;
+	if ((child->flags & MTD_WRITEABLE) && remainder) {
+		child->flags &= ~MTD_WRITEABLE;
 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
 			part->name);
 	}
 
-	mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
-	slave->mtd.ecc_step_size = parent->ecc_step_size;
-	slave->mtd.ecc_strength = parent->ecc_strength;
-	slave->mtd.bitflip_threshold = parent->bitflip_threshold;
+	child->size = child->part.size;
+	child->ecc_step_size = parent->ecc_step_size;
+	child->ecc_strength = parent->ecc_strength;
+	child->bitflip_threshold = parent->bitflip_threshold;
 
-	if (parent->_block_isbad) {
+	if (master->_block_isbad) {
 		uint64_t offs = 0;
 
-		while (offs < slave->mtd.size) {
-			if (mtd_block_isreserved(parent, offs + slave->offset))
-				slave->mtd.ecc_stats.bbtblocks++;
-			else if (mtd_block_isbad(parent, offs + slave->offset))
-				slave->mtd.ecc_stats.badblocks++;
-			offs += slave->mtd.erasesize;
+		while (offs < child->part.size) {
+			if (mtd_block_isreserved(child, offs))
+				child->ecc_stats.bbtblocks++;
+			else if (mtd_block_isbad(child, offs))
+				child->ecc_stats.badblocks++;
+			offs += child->erasesize;
 		}
 	}
 
 out_register:
-	return slave;
+	return child;
 }
 
 static ssize_t mtd_partition_offset_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct mtd_info *mtd = dev_get_drvdata(dev);
-	struct mtd_part *part = mtd_to_part(mtd);
-	return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset);
 }
 
 static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
@@ -568,9 +227,9 @@
 	NULL
 };
 
-static int mtd_add_partition_attrs(struct mtd_part *new)
+static int mtd_add_partition_attrs(struct mtd_info *new)
 {
-	int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+	int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
 	if (ret)
 		printk(KERN_WARNING
 		       "mtd: failed to create partition attrs, err=%d\n", ret);
@@ -580,8 +239,11 @@
 int mtd_add_partition(struct mtd_info *parent, const char *name,
 		      long long offset, long long length)
 {
+	struct mtd_info *master = mtd_get_master(parent);
+	u64 parent_size = mtd_is_partition(parent) ?
+			  parent->part.size : parent->size;
 	struct mtd_partition part;
-	struct mtd_part *new;
+	struct mtd_info *child;
 	int ret = 0;
 
 	/* the direct offset is expected */
@@ -590,7 +252,7 @@
 		return -EINVAL;
 
 	if (length == MTDPART_SIZ_FULL)
-		length = parent->size - offset;
+		length = parent_size - offset;
 
 	if (length <= 0)
 		return -EINVAL;
@@ -600,28 +262,28 @@
 	part.size = length;
 	part.offset = offset;
 
-	new = allocate_partition(parent, &part, -1, offset);
-	if (IS_ERR(new))
-		return PTR_ERR(new);
+	child = allocate_partition(parent, &part, -1, offset);
+	if (IS_ERR(child))
+		return PTR_ERR(child);
 
-	mutex_lock(&mtd_partitions_mutex);
-	list_add(&new->list, &mtd_partitions);
-	mutex_unlock(&mtd_partitions_mutex);
+	mutex_lock(&master->master.partitions_lock);
+	list_add_tail(&child->part.node, &parent->partitions);
+	mutex_unlock(&master->master.partitions_lock);
 
-	ret = add_mtd_device(&new->mtd);
+	ret = add_mtd_device(child);
 	if (ret)
 		goto err_remove_part;
 
-	mtd_add_partition_attrs(new);
+	mtd_add_partition_attrs(child);
 
 	return 0;
 
 err_remove_part:
-	mutex_lock(&mtd_partitions_mutex);
-	list_del(&new->list);
-	mutex_unlock(&mtd_partitions_mutex);
+	mutex_lock(&master->master.partitions_lock);
+	list_del(&child->part.node);
+	mutex_unlock(&master->master.partitions_lock);
 
-	free_partition(new);
+	free_partition(child);
 
 	return ret;
 }
@@ -630,119 +292,142 @@
 /**
  * __mtd_del_partition - delete MTD partition
  *
- * @priv: internal MTD struct for partition to be deleted
+ * @priv: MTD structure to be deleted
  *
  * This function must be called with the partitions mutex locked.
  */
-static int __mtd_del_partition(struct mtd_part *priv)
+static int __mtd_del_partition(struct mtd_info *mtd)
 {
-	struct mtd_part *child, *next;
+	struct mtd_info *child, *next;
 	int err;
 
-	list_for_each_entry_safe(child, next, &mtd_partitions, list) {
-		if (child->parent == &priv->mtd) {
-			err = __mtd_del_partition(child);
-			if (err)
-				return err;
-		}
+	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+		err = __mtd_del_partition(child);
+		if (err)
+			return err;
 	}
 
-	sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
+	sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
 
-	err = del_mtd_device(&priv->mtd);
+	err = del_mtd_device(mtd);
 	if (err)
 		return err;
 
-	list_del(&priv->list);
-	free_partition(priv);
+	list_del(&mtd->part.node);
+	free_partition(mtd);
 
 	return 0;
 }
 
 /*
  * This function unregisters and destroy all slave MTD objects which are
- * attached to the given MTD object.
+ * attached to the given MTD object, recursively.
  */
-int del_mtd_partitions(struct mtd_info *mtd)
+static int __del_mtd_partitions(struct mtd_info *mtd)
 {
-	struct mtd_part *slave, *next;
+	struct mtd_info *child, *next;
+	LIST_HEAD(tmp_list);
 	int ret, err = 0;
 
-	mutex_lock(&mtd_partitions_mutex);
-	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
-		if (slave->parent == mtd) {
-			ret = __mtd_del_partition(slave);
-			if (ret < 0)
-				err = ret;
+	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+		if (mtd_has_partitions(child))
+			__del_mtd_partitions(child);
+
+		pr_info("Deleting %s MTD partition\n", child->name);
+		ret = del_mtd_device(child);
+		if (ret < 0) {
+			pr_err("Error when deleting partition \"%s\" (%d)\n",
+			       child->name, ret);
+			err = ret;
+			continue;
 		}
-	mutex_unlock(&mtd_partitions_mutex);
+
+		list_del(&child->part.node);
+		free_partition(child);
+	}
 
 	return err;
 }
 
+int del_mtd_partitions(struct mtd_info *mtd)
+{
+	struct mtd_info *master = mtd_get_master(mtd);
+	int ret;
+
+	pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
+
+	mutex_lock(&master->master.partitions_lock);
+	ret = __del_mtd_partitions(mtd);
+	mutex_unlock(&master->master.partitions_lock);
+
+	return ret;
+}
+
 int mtd_del_partition(struct mtd_info *mtd, int partno)
 {
-	struct mtd_part *slave, *next;
+	struct mtd_info *child, *master = mtd_get_master(mtd);
 	int ret = -EINVAL;
 
-	mutex_lock(&mtd_partitions_mutex);
-	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
-		if ((slave->parent == mtd) &&
-		    (slave->mtd.index == partno)) {
-			ret = __mtd_del_partition(slave);
+	mutex_lock(&master->master.partitions_lock);
+	list_for_each_entry(child, &mtd->partitions, part.node) {
+		if (child->index == partno) {
+			ret = __mtd_del_partition(child);
 			break;
 		}
-	mutex_unlock(&mtd_partitions_mutex);
+	}
+	mutex_unlock(&master->master.partitions_lock);
 
 	return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_del_partition);
 
 /*
- * This function, given a master MTD object and a partition table, creates
- * and registers slave MTD objects which are bound to the master according to
- * the partition definitions.
+ * This function, given a parent MTD object and a partition table, creates
+ * and registers the child MTD objects which are bound to the parent according
+ * to the partition definitions.
  *
- * For historical reasons, this function's caller only registers the master
+ * For historical reasons, this function's caller only registers the parent
  * if the MTD_PARTITIONED_MASTER config option is set.
  */
 
-int add_mtd_partitions(struct mtd_info *master,
+int add_mtd_partitions(struct mtd_info *parent,
 		       const struct mtd_partition *parts,
 		       int nbparts)
 {
-	struct mtd_part *slave;
+	struct mtd_info *child, *master = mtd_get_master(parent);
 	uint64_t cur_offset = 0;
 	int i, ret;
 
-	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
+	       nbparts, parent->name);
 
 	for (i = 0; i < nbparts; i++) {
-		slave = allocate_partition(master, parts + i, i, cur_offset);
-		if (IS_ERR(slave)) {
-			ret = PTR_ERR(slave);
+		child = allocate_partition(parent, parts + i, i, cur_offset);
+		if (IS_ERR(child)) {
+			ret = PTR_ERR(child);
 			goto err_del_partitions;
 		}
 
-		mutex_lock(&mtd_partitions_mutex);
-		list_add(&slave->list, &mtd_partitions);
-		mutex_unlock(&mtd_partitions_mutex);
+		mutex_lock(&master->master.partitions_lock);
+		list_add_tail(&child->part.node, &parent->partitions);
+		mutex_unlock(&master->master.partitions_lock);
 
-		ret = add_mtd_device(&slave->mtd);
+		ret = add_mtd_device(child);
 		if (ret) {
-			mutex_lock(&mtd_partitions_mutex);
-			list_del(&slave->list);
-			mutex_unlock(&mtd_partitions_mutex);
+			mutex_lock(&master->master.partitions_lock);
+			list_del(&child->part.node);
+			mutex_unlock(&master->master.partitions_lock);
 
-			free_partition(slave);
+			free_partition(child);
 			goto err_del_partitions;
 		}
 
-		mtd_add_partition_attrs(slave);
-		/* Look for subpartitions */
-		parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
+		mtd_add_partition_attrs(child);
 
-		cur_offset = slave->offset + slave->mtd.size;
+		/* Look for subpartitions */
+		parse_mtd_partitions(child, parts[i].types, NULL);
+
+		cur_offset = child->part.offset + child->part.size;
 	}
 
 	return 0;
@@ -1023,29 +708,11 @@
 	}
 }
 
-int mtd_is_partition(const struct mtd_info *mtd)
-{
-	struct mtd_part *part;
-	int ispart = 0;
-
-	mutex_lock(&mtd_partitions_mutex);
-	list_for_each_entry(part, &mtd_partitions, list)
-		if (&part->mtd == mtd) {
-			ispart = 1;
-			break;
-		}
-	mutex_unlock(&mtd_partitions_mutex);
-
-	return ispart;
-}
-EXPORT_SYMBOL_GPL(mtd_is_partition);
-
 /* Returns the size of the entire flash chip */
 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
 {
-	if (!mtd_is_partition(mtd))
-		return mtd->size;
+	struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
 
-	return mtd_get_device_size(mtd_to_part(mtd)->parent);
+	return master->size;
 }
 EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
new file mode 100644
index 0000000..a3ae877
--- /dev/null
+++ b/drivers/mtd/mtdpstore.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define dev_fmt(fmt) "mtdoops-pstore: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pstore_blk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+static struct mtdpstore_context {
+	int index;
+	struct pstore_blk_config info;
+	struct pstore_device_info dev;
+	struct mtd_info *mtd;
+	unsigned long *rmmap;		/* removed bit map */
+	unsigned long *usedmap;		/* used bit map */
+	/*
+	 * used for panic write
+	 * As there are no block_isbad for panic case, we should keep this
+	 * status before panic to ensure panic_write not failed.
+	 */
+	unsigned long *badmap;		/* bad block bit map */
+} oops_cxt;
+
+static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
+{
+	int ret;
+	struct mtd_info *mtd = cxt->mtd;
+	u64 blknum;
+
+	off = ALIGN_DOWN(off, mtd->erasesize);
+	blknum = div_u64(off, mtd->erasesize);
+
+	if (test_bit(blknum, cxt->badmap))
+		return true;
+	ret = mtd_block_isbad(mtd, off);
+	if (ret < 0) {
+		dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
+		return ret;
+	} else if (ret > 0) {
+		set_bit(blknum, cxt->badmap);
+		return true;
+	}
+	return false;
+}
+
+static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u64 blknum;
+
+	off = ALIGN_DOWN(off, mtd->erasesize);
+	blknum = div_u64(off, mtd->erasesize);
+	return test_bit(blknum, cxt->badmap);
+}
+
+static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+	dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
+	set_bit(zonenum, cxt->usedmap);
+}
+
+static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+	dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
+	clear_bit(zonenum, cxt->usedmap);
+}
+
+static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+	u64 zonenum;
+
+	off = ALIGN_DOWN(off, mtd->erasesize);
+	zonenum = div_u64(off, cxt->info.kmsg_size);
+	while (zonecnt > 0) {
+		dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
+		clear_bit(zonenum, cxt->usedmap);
+		zonenum++;
+		zonecnt--;
+	}
+}
+
+static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
+{
+	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+	u64 blknum = div_u64(off, cxt->mtd->erasesize);
+
+	if (test_bit(blknum, cxt->badmap))
+		return true;
+	return test_bit(zonenum, cxt->usedmap);
+}
+
+static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+	u64 zonenum;
+
+	off = ALIGN_DOWN(off, mtd->erasesize);
+	zonenum = div_u64(off, cxt->info.kmsg_size);
+	while (zonecnt > 0) {
+		if (test_bit(zonenum, cxt->usedmap))
+			return true;
+		zonenum++;
+		zonecnt--;
+	}
+	return false;
+}
+
+static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
+		size_t size)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	size_t sz;
+	int i;
+
+	sz = min_t(uint32_t, size, mtd->writesize / 4);
+	for (i = 0; i < sz; i++) {
+		if (buf[i] != (char)0xFF)
+			return false;
+	}
+	return true;
+}
+
+static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u64 zonenum = div_u64(off, cxt->info.kmsg_size);
+
+	dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
+	set_bit(zonenum, cxt->rmmap);
+}
+
+static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+	u64 zonenum;
+
+	off = ALIGN_DOWN(off, mtd->erasesize);
+	zonenum = div_u64(off, cxt->info.kmsg_size);
+	while (zonecnt > 0) {
+		clear_bit(zonenum, cxt->rmmap);
+		zonenum++;
+		zonecnt--;
+	}
+}
+
+static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
+		loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
+	u64 zonenum;
+
+	off = ALIGN_DOWN(off, mtd->erasesize);
+	zonenum = div_u64(off, cxt->info.kmsg_size);
+	while (zonecnt > 0) {
+		if (test_bit(zonenum, cxt->rmmap))
+			return true;
+		zonenum++;
+		zonecnt--;
+	}
+	return false;
+}
+
+static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	struct erase_info erase;
+	int ret;
+
+	off = ALIGN_DOWN(off, cxt->mtd->erasesize);
+	dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
+	erase.len = cxt->mtd->erasesize;
+	erase.addr = off;
+	ret = mtd_erase(cxt->mtd, &erase);
+	if (!ret)
+		mtdpstore_block_clear_removed(cxt, off);
+	else
+		dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+		       (unsigned long long)erase.addr,
+		       (unsigned long long)erase.len, cxt->info.device);
+	return ret;
+}
+
+/*
+ * called while removing file
+ *
+ * Avoiding over erasing, do erase block only when the whole block is unused.
+ * If the block contains valid log, do erase lazily on flush_removed() when
+ * unregister.
+ */
+static ssize_t mtdpstore_erase(size_t size, loff_t off)
+{
+	struct mtdpstore_context *cxt = &oops_cxt;
+
+	if (mtdpstore_block_isbad(cxt, off))
+		return -EIO;
+
+	mtdpstore_mark_unused(cxt, off);
+
+	/* If the block still has valid data, mtdpstore do erase lazily */
+	if (likely(mtdpstore_block_is_used(cxt, off))) {
+		mtdpstore_mark_removed(cxt, off);
+		return 0;
+	}
+
+	/* all zones are unused, erase it */
+	return mtdpstore_erase_do(cxt, off);
+}
+
+/*
+ * What is security for mtdpstore?
+ * As there is no erase for panic case, we should ensure at least one zone
+ * is writable. Otherwise, panic write will fail.
+ * If zone is used, write operation will return -ENOMSG, which means that
+ * pstore/blk will try one by one until gets an empty zone. So, it is not
+ * needed to ensure the next zone is empty, but at least one.
+ */
+static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
+{
+	int ret = 0, i;
+	struct mtd_info *mtd = cxt->mtd;
+	u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
+	u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
+	u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
+	u32 erasesize = cxt->mtd->erasesize;
+
+	for (i = 0; i < zonecnt; i++) {
+		u32 num = (zonenum + i) % zonecnt;
+
+		/* found empty zone */
+		if (!test_bit(num, cxt->usedmap))
+			return 0;
+	}
+
+	/* If there is no any empty zone, we have no way but to do erase */
+	while (blkcnt--) {
+		div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
+
+		if (mtdpstore_block_isbad(cxt, off))
+			continue;
+
+		ret = mtdpstore_erase_do(cxt, off);
+		if (!ret) {
+			mtdpstore_block_mark_unused(cxt, off);
+			break;
+		}
+	}
+
+	if (ret)
+		dev_err(&mtd->dev, "all blocks bad!\n");
+	dev_dbg(&mtd->dev, "end security\n");
+	return ret;
+}
+
+static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
+{
+	struct mtdpstore_context *cxt = &oops_cxt;
+	struct mtd_info *mtd = cxt->mtd;
+	size_t retlen;
+	int ret;
+
+	if (mtdpstore_block_isbad(cxt, off))
+		return -ENOMSG;
+
+	/* zone is used, please try next one */
+	if (mtdpstore_is_used(cxt, off))
+		return -ENOMSG;
+
+	dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
+	ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
+	if (ret < 0 || retlen != size) {
+		dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
+				off, retlen, size, ret);
+		return -EIO;
+	}
+	mtdpstore_mark_used(cxt, off);
+
+	mtdpstore_security(cxt, off);
+	return retlen;
+}
+
+static inline bool mtdpstore_is_io_error(int ret)
+{
+	return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
+}
+
+/*
+ * All zones will be read as pstore/blk will read zone one by one when do
+ * recover.
+ */
+static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
+{
+	struct mtdpstore_context *cxt = &oops_cxt;
+	struct mtd_info *mtd = cxt->mtd;
+	size_t retlen, done;
+	int ret;
+
+	if (mtdpstore_block_isbad(cxt, off))
+		return -ENOMSG;
+
+	dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
+	for (done = 0, retlen = 0; done < size; done += retlen) {
+		retlen = 0;
+
+		ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
+				(u_char *)buf + done);
+		if (mtdpstore_is_io_error(ret)) {
+			dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
+					off + done, retlen, size - done, ret);
+			/* the zone may be broken, try next one */
+			return -ENOMSG;
+		}
+
+		/*
+		 * ECC error. The impact on log data is so small. Maybe we can
+		 * still read it and try to understand. So mtdpstore just hands
+		 * over what it gets and user can judge whether the data is
+		 * valid or not.
+		 */
+		if (mtd_is_eccerr(ret)) {
+			dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
+					off + done, retlen, size - done, ret);
+			/* driver may not set retlen when ecc error */
+			retlen = retlen == 0 ? size - done : retlen;
+		}
+	}
+
+	if (mtdpstore_is_empty(cxt, buf, size))
+		mtdpstore_mark_unused(cxt, off);
+	else
+		mtdpstore_mark_used(cxt, off);
+
+	mtdpstore_security(cxt, off);
+	return retlen;
+}
+
+static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
+{
+	struct mtdpstore_context *cxt = &oops_cxt;
+	struct mtd_info *mtd = cxt->mtd;
+	size_t retlen;
+	int ret;
+
+	if (mtdpstore_panic_block_isbad(cxt, off))
+		return -ENOMSG;
+
+	/* zone is used, please try next one */
+	if (mtdpstore_is_used(cxt, off))
+		return -ENOMSG;
+
+	ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
+	if (ret < 0 || size != retlen) {
+		dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
+				off, retlen, size, ret);
+		return -EIO;
+	}
+	mtdpstore_mark_used(cxt, off);
+
+	return retlen;
+}
+
+static void mtdpstore_notify_add(struct mtd_info *mtd)
+{
+	int ret;
+	struct mtdpstore_context *cxt = &oops_cxt;
+	struct pstore_blk_config *info = &cxt->info;
+	unsigned long longcnt;
+
+	if (!strcmp(mtd->name, info->device))
+		cxt->index = mtd->index;
+
+	if (mtd->index != cxt->index || cxt->index < 0)
+		return;
+
+	dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
+
+	if (mtd->size < info->kmsg_size * 2) {
+		dev_err(&mtd->dev, "MTD partition %d not big enough\n",
+				mtd->index);
+		return;
+	}
+	/*
+	 * kmsg_size must be aligned to 4096 Bytes, which is limited by
+	 * psblk. The default value of kmsg_size is 64KB. If kmsg_size
+	 * is larger than erasesize, some errors will occur since mtdpsotre
+	 * is designed on it.
+	 */
+	if (mtd->erasesize < info->kmsg_size) {
+		dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
+				mtd->index);
+		return;
+	}
+	if (unlikely(info->kmsg_size % mtd->writesize)) {
+		dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
+				info->kmsg_size / 1024,
+				mtd->writesize / 1024);
+		return;
+	}
+
+	longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
+	cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+	cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+
+	longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
+	cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+
+	cxt->dev.total_size = mtd->size;
+	/* just support dmesg right now */
+	cxt->dev.flags = PSTORE_FLAGS_DMESG;
+	cxt->dev.read = mtdpstore_read;
+	cxt->dev.write = mtdpstore_write;
+	cxt->dev.erase = mtdpstore_erase;
+	cxt->dev.panic_write = mtdpstore_panic_write;
+
+	ret = register_pstore_device(&cxt->dev);
+	if (ret) {
+		dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
+				mtd->index);
+		return;
+	}
+	cxt->mtd = mtd;
+	dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
+}
+
+static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
+		loff_t off, size_t size)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	u_char *buf;
+	int ret;
+	size_t retlen;
+	struct erase_info erase;
+
+	buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* 1st. read to cache */
+	ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
+	if (mtdpstore_is_io_error(ret))
+		goto free;
+
+	/* 2nd. erase block */
+	erase.len = mtd->erasesize;
+	erase.addr = off;
+	ret = mtd_erase(mtd, &erase);
+	if (ret)
+		goto free;
+
+	/* 3rd. write back */
+	while (size) {
+		unsigned int zonesize = cxt->info.kmsg_size;
+
+		/* there is valid data on block, write back */
+		if (mtdpstore_is_used(cxt, off)) {
+			ret = mtd_write(mtd, off, zonesize, &retlen, buf);
+			if (ret)
+				dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
+						off, retlen, zonesize, ret);
+		}
+
+		off += zonesize;
+		size -= min_t(unsigned int, zonesize, size);
+	}
+
+free:
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * What does mtdpstore_flush_removed() do?
+ * When user remove any log file on pstore filesystem, mtdpstore should do
+ * something to ensure log file removed. If the whole block is no longer used,
+ * it's nice to erase the block. However if the block still contains valid log,
+ * what mtdpstore can do is to erase and write the valid log back.
+ */
+static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
+{
+	struct mtd_info *mtd = cxt->mtd;
+	int ret;
+	loff_t off;
+	u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
+
+	for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
+		ret = mtdpstore_block_isbad(cxt, off);
+		if (ret)
+			continue;
+
+		ret = mtdpstore_block_is_removed(cxt, off);
+		if (!ret)
+			continue;
+
+		ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static void mtdpstore_notify_remove(struct mtd_info *mtd)
+{
+	struct mtdpstore_context *cxt = &oops_cxt;
+
+	if (mtd->index != cxt->index || cxt->index < 0)
+		return;
+
+	mtdpstore_flush_removed(cxt);
+
+	unregister_pstore_device(&cxt->dev);
+	kfree(cxt->badmap);
+	kfree(cxt->usedmap);
+	kfree(cxt->rmmap);
+	cxt->mtd = NULL;
+	cxt->index = -1;
+}
+
+static struct mtd_notifier mtdpstore_notifier = {
+	.add	= mtdpstore_notify_add,
+	.remove	= mtdpstore_notify_remove,
+};
+
+static int __init mtdpstore_init(void)
+{
+	int ret;
+	struct mtdpstore_context *cxt = &oops_cxt;
+	struct pstore_blk_config *info = &cxt->info;
+
+	ret = pstore_blk_get_config(info);
+	if (unlikely(ret))
+		return ret;
+
+	if (strlen(info->device) == 0) {
+		pr_err("mtd device must be supplied (device name is empty)\n");
+		return -EINVAL;
+	}
+	if (!info->kmsg_size) {
+		pr_err("no backend enabled (kmsg_size is 0)\n");
+		return -EINVAL;
+	}
+
+	/* Setup the MTD device to use */
+	ret = kstrtoint((char *)info->device, 0, &cxt->index);
+	if (ret)
+		cxt->index = -1;
+
+	register_mtd_user(&mtdpstore_notifier);
+	return 0;
+}
+module_init(mtdpstore_init);
+
+static void __exit mtdpstore_exit(void)
+{
+	unregister_mtd_user(&mtdpstore_notifier);
+}
+module_exit(mtdpstore_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
+MODULE_DESCRIPTION("MTD backend for pstore/blk");
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f92414e..58eefa4 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1257,7 +1257,6 @@
 static int mtdswap_add_debugfs(struct mtdswap_dev *d)
 {
 	struct dentry *root = d->mtd->dbg.dfs_dir;
-	struct dentry *dent;
 
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
@@ -1265,12 +1264,7 @@
 	if (IS_ERR_OR_NULL(root))
 		return -1;
 
-	dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d,
-				&mtdswap_fops);
-	if (!dent) {
-		dev_err(d->dev, "debugfs_create_file failed\n");
-		return -1;
-	}
+	debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index a5d8a21..4a9aed4 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,7 +1,20 @@
 # SPDX-License-Identifier: GPL-2.0-only
+
+menu "NAND"
+
 config MTD_NAND_CORE
 	tristate
 
 source "drivers/mtd/nand/onenand/Kconfig"
 source "drivers/mtd/nand/raw/Kconfig"
 source "drivers/mtd/nand/spi/Kconfig"
+
+menu "ECC engine support"
+
+config MTD_NAND_ECC
+       bool
+       depends on MTD_NAND_CORE
+
+endmenu
+
+endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 7ecd80c..9813729 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -6,3 +6,5 @@
 obj-y	+= onenand/
 obj-y	+= raw/
 obj-y	+= spi/
+
+nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 044adf9..64af689 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -123,7 +123,7 @@
 		unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
 
 		pos[1] &= ~GENMASK(rbits - 1, 0);
-		pos[1] |= val >> rbits;
+		pos[1] |= val >> (bits_per_block - rbits);
 	}
 
 	return 0;
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
new file mode 100644
index 0000000..4a56e6c
--- /dev/null
+++ b/drivers/mtd/nand/ecc.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Generic Error-Correcting Code (ECC) engine
+ *
+ * Copyright (C) 2019 Macronix
+ * Author:
+ *     Miquèl RAYNAL <miquel.raynal@bootlin.com>
+ *
+ *
+ * This file describes the abstraction of any NAND ECC engine. It has been
+ * designed to fit most cases, including parallel NANDs and SPI-NANDs.
+ *
+ * There are three main situations where instantiating this ECC engine makes
+ * sense:
+ *   - external: The ECC engine is outside the NAND pipeline, typically this
+ *               is a software ECC engine, or an hardware engine that is
+ *               outside the NAND controller pipeline.
+ *   - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
+ *                controller's side. This is the case of most of the raw NAND
+ *                controllers. In the pipeline case, the ECC bytes are
+ *                generated/data corrected on the fly when a page is
+ *                written/read.
+ *   - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
+ *            Some NAND chips can correct themselves the data.
+ *
+ * Besides the initial setup and final cleanups, the interfaces are rather
+ * simple:
+ *   - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
+ *              the I/O request type. In case of software correction or external
+ *              engine, this step may involve to derive the ECC bytes and place
+ *              them in the OOB area before a write.
+ *   - finish: Finish an I/O request. Correct the data in case of a read
+ *             request and report the number of corrected bits/uncorrectable
+ *             errors. Most likely empty for write operations, unless you have
+ *             hardware specific stuff to do, like shutting down the engine to
+ *             save power.
+ *
+ * The I/O request should be enclosed in a prepare()/finish() pair of calls
+ * and will behave differently depending on the requested I/O type:
+ *   - raw: Correction disabled
+ *   - ecc: Correction enabled
+ *
+ * The request direction is impacting the logic as well:
+ *   - read: Load data from the NAND chip
+ *   - write: Store data in the NAND chip
+ *
+ * Mixing all this combinations together gives the following behavior.
+ * Those are just examples, drivers are free to add custom steps in their
+ * prepare/finish hook.
+ *
+ * [external ECC engine]
+ *   - external + prepare + raw + read: do nothing
+ *   - external + finish  + raw + read: do nothing
+ *   - external + prepare + raw + write: do nothing
+ *   - external + finish  + raw + write: do nothing
+ *   - external + prepare + ecc + read: do nothing
+ *   - external + finish  + ecc + read: calculate expected ECC bytes, extract
+ *                                      ECC bytes from OOB buffer, correct
+ *                                      and report any bitflip/error
+ *   - external + prepare + ecc + write: calculate ECC bytes and store them at
+ *                                       the right place in the OOB buffer based
+ *                                       on the OOB layout
+ *   - external + finish  + ecc + write: do nothing
+ *
+ * [pipelined ECC engine]
+ *   - pipelined + prepare + raw + read: disable the controller's ECC engine if
+ *                                       activated
+ *   - pipelined + finish  + raw + read: do nothing
+ *   - pipelined + prepare + raw + write: disable the controller's ECC engine if
+ *                                        activated
+ *   - pipelined + finish  + raw + write: do nothing
+ *   - pipelined + prepare + ecc + read: enable the controller's ECC engine if
+ *                                       deactivated
+ *   - pipelined + finish  + ecc + read: check the status, report any
+ *                                       error/bitflip
+ *   - pipelined + prepare + ecc + write: enable the controller's ECC engine if
+ *                                        deactivated
+ *   - pipelined + finish  + ecc + write: do nothing
+ *
+ * [ondie ECC engine]
+ *   - ondie + prepare + raw + read: send commands to disable the on-chip ECC
+ *                                   engine if activated
+ *   - ondie + finish  + raw + read: do nothing
+ *   - ondie + prepare + raw + write: send commands to disable the on-chip ECC
+ *                                    engine if activated
+ *   - ondie + finish  + raw + write: do nothing
+ *   - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
+ *                                   engine if deactivated
+ *   - ondie + finish  + ecc + read: send commands to check the status, report
+ *                                   any error/bitflip
+ *   - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
+ *                                    engine if deactivated
+ *   - ondie + finish  + ecc + write: do nothing
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+
+/**
+ * nand_ecc_init_ctx - Init the ECC engine context
+ * @nand: the NAND device
+ *
+ * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
+ */
+int nand_ecc_init_ctx(struct nand_device *nand)
+{
+	if (!nand->ecc.engine->ops->init_ctx)
+		return 0;
+
+	return nand->ecc.engine->ops->init_ctx(nand);
+}
+EXPORT_SYMBOL(nand_ecc_init_ctx);
+
+/**
+ * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
+ * @nand: the NAND device
+ */
+void nand_ecc_cleanup_ctx(struct nand_device *nand)
+{
+	if (nand->ecc.engine->ops->cleanup_ctx)
+		nand->ecc.engine->ops->cleanup_ctx(nand);
+}
+EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
+
+/**
+ * nand_ecc_prepare_io_req - Prepare an I/O request
+ * @nand: the NAND device
+ * @req: the I/O request
+ */
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+			    struct nand_page_io_req *req)
+{
+	if (!nand->ecc.engine->ops->prepare_io_req)
+		return 0;
+
+	return nand->ecc.engine->ops->prepare_io_req(nand, req);
+}
+EXPORT_SYMBOL(nand_ecc_prepare_io_req);
+
+/**
+ * nand_ecc_finish_io_req - Finish an I/O request
+ * @nand: the NAND device
+ * @req: the I/O request
+ */
+int nand_ecc_finish_io_req(struct nand_device *nand,
+			   struct nand_page_io_req *req)
+{
+	if (!nand->ecc.engine->ops->finish_io_req)
+		return 0;
+
+	return nand->ecc.engine->ops->finish_io_req(nand, req);
+}
+EXPORT_SYMBOL(nand_ecc_finish_io_req);
+
+/* Define default OOB placement schemes for large and small page devices */
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_device *nand = mtd_to_nanddev(mtd);
+	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		if (mtd->oobsize == 16)
+			oobregion->length = 4;
+		else
+			oobregion->length = 3;
+	} else {
+		if (mtd->oobsize == 8)
+			return -ERANGE;
+
+		oobregion->offset = 6;
+		oobregion->length = total_ecc_bytes - 4;
+	}
+
+	return 0;
+}
+
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (mtd->oobsize == 16) {
+		if (section)
+			return -ERANGE;
+
+		oobregion->length = 8;
+		oobregion->offset = 8;
+	} else {
+		oobregion->length = 2;
+		if (!section)
+			oobregion->offset = 3;
+		else
+			oobregion->offset = 6;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+	.ecc = nand_ooblayout_ecc_sp,
+	.free = nand_ooblayout_free_sp,
+};
+
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
+{
+	return &nand_ooblayout_sp_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
+
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_device *nand = mtd_to_nanddev(mtd);
+	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+	if (section || !total_ecc_bytes)
+		return -ERANGE;
+
+	oobregion->length = total_ecc_bytes;
+	oobregion->offset = mtd->oobsize - oobregion->length;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_device *nand = mtd_to_nanddev(mtd);
+	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
+	oobregion->offset = 2;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+	.ecc = nand_ooblayout_ecc_lp,
+	.free = nand_ooblayout_free_lp,
+};
+
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
+{
+	return &nand_ooblayout_lp_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
+
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	struct nand_device *nand = mtd_to_nanddev(mtd);
+	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+	if (section)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		oobregion->offset = 40;
+		break;
+	case 128:
+		oobregion->offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	oobregion->length = total_ecc_bytes;
+	if (oobregion->offset + oobregion->length > mtd->oobsize)
+		return -ERANGE;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_device *nand = mtd_to_nanddev(mtd);
+	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+	int ecc_offset = 0;
+
+	if (section < 0 || section > 1)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		ecc_offset = 40;
+		break;
+	case 128:
+		ecc_offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (section == 0) {
+		oobregion->offset = 2;
+		oobregion->length = ecc_offset - 2;
+	} else {
+		oobregion->offset = ecc_offset + total_ecc_bytes;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+	.ecc = nand_ooblayout_ecc_lp_hamming,
+	.free = nand_ooblayout_free_lp_hamming,
+};
+
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
+{
+	return &nand_ooblayout_lp_hamming_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
+
+static enum nand_ecc_engine_type
+of_get_nand_ecc_engine_type(struct device_node *np)
+{
+	struct device_node *eng_np;
+
+	if (of_property_read_bool(np, "nand-no-ecc-engine"))
+		return NAND_ECC_ENGINE_TYPE_NONE;
+
+	if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
+		return NAND_ECC_ENGINE_TYPE_SOFT;
+
+	eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
+	of_node_put(eng_np);
+
+	if (eng_np) {
+		if (eng_np == np)
+			return NAND_ECC_ENGINE_TYPE_ON_DIE;
+		else
+			return NAND_ECC_ENGINE_TYPE_ON_HOST;
+	}
+
+	return NAND_ECC_ENGINE_TYPE_INVALID;
+}
+
+static const char * const nand_ecc_placement[] = {
+	[NAND_ECC_PLACEMENT_OOB] = "oob",
+	[NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
+};
+
+static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
+{
+	enum nand_ecc_placement placement;
+	const char *pm;
+	int err;
+
+	err = of_property_read_string(np, "nand-ecc-placement", &pm);
+	if (!err) {
+		for (placement = NAND_ECC_PLACEMENT_OOB;
+		     placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
+			if (!strcasecmp(pm, nand_ecc_placement[placement]))
+				return placement;
+		}
+	}
+
+	return NAND_ECC_PLACEMENT_UNKNOWN;
+}
+
+static const char * const nand_ecc_algos[] = {
+	[NAND_ECC_ALGO_HAMMING] = "hamming",
+	[NAND_ECC_ALGO_BCH] = "bch",
+	[NAND_ECC_ALGO_RS] = "rs",
+};
+
+static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
+{
+	enum nand_ecc_algo ecc_algo;
+	const char *pm;
+	int err;
+
+	err = of_property_read_string(np, "nand-ecc-algo", &pm);
+	if (!err) {
+		for (ecc_algo = NAND_ECC_ALGO_HAMMING;
+		     ecc_algo < ARRAY_SIZE(nand_ecc_algos);
+		     ecc_algo++) {
+			if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
+				return ecc_algo;
+		}
+	}
+
+	return NAND_ECC_ALGO_UNKNOWN;
+}
+
+static int of_get_nand_ecc_step_size(struct device_node *np)
+{
+	int ret;
+	u32 val;
+
+	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+	return ret ? ret : val;
+}
+
+static int of_get_nand_ecc_strength(struct device_node *np)
+{
+	int ret;
+	u32 val;
+
+	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+	return ret ? ret : val;
+}
+
+void of_get_nand_ecc_user_config(struct nand_device *nand)
+{
+	struct device_node *dn = nanddev_get_of_node(nand);
+	int strength, size;
+
+	nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
+	nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
+	nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
+
+	strength = of_get_nand_ecc_strength(dn);
+	if (strength >= 0)
+		nand->ecc.user_conf.strength = strength;
+
+	size = of_get_nand_ecc_step_size(dn);
+	if (size >= 0)
+		nand->ecc.user_conf.step_size = size;
+
+	if (of_property_read_bool(dn, "nand-ecc-maximize"))
+		nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
+}
+EXPORT_SYMBOL(of_get_nand_ecc_user_config);
+
+/**
+ * nand_ecc_is_strong_enough - Check if the chip configuration meets the
+ *                             datasheet requirements.
+ *
+ * @nand: Device to check
+ *
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+bool nand_ecc_is_strong_enough(struct nand_device *nand)
+{
+	const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
+	const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
+	struct mtd_info *mtd = nanddev_to_mtd(nand);
+	int corr, ds_corr;
+
+	if (conf->step_size == 0 || reqs->step_size == 0)
+		/* Not enough information */
+		return true;
+
+	/*
+	 * We get the number of corrected bits per page to compare
+	 * the correction density.
+	 */
+	corr = (mtd->writesize * conf->strength) / conf->step_size;
+	ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
+
+	return corr >= ds_corr && conf->strength >= reqs->strength;
+}
+EXPORT_SYMBOL(nand_ecc_is_strong_enough);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Generic ECC engine");
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig
index ae0b8fe..1a0e65b 100644
--- a/drivers/mtd/nand/onenand/Kconfig
+++ b/drivers/mtd/nand/onenand/Kconfig
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig MTD_ONENAND
 	tristate "OneNAND Device Support"
-	depends on MTD
 	depends on HAS_IOMEM
 	help
 	  This enables support for accessing all type of OneNAND flash
@@ -25,7 +24,7 @@
 
 config MTD_ONENAND_OMAP2
 	tristate "OneNAND on OMAP2/OMAP3 support"
-	depends on ARCH_OMAP2 || ARCH_OMAP3
+	depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
 	depends on OF || COMPILE_TEST
 	help
 	  Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
@@ -33,12 +32,12 @@
 	  Enable dmaengine and gpiolib for better performance.
 
 config MTD_ONENAND_SAMSUNG
-        tristate "OneNAND on Samsung SOC controller support"
-        depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4
-        help
-          Support for a OneNAND flash device connected to an Samsung SOC.
-          S3C64XX uses command mapping method.
-          S5PC110/S5PC210 use generic OneNAND method.
+	tristate "OneNAND on Samsung SOC controller support"
+	depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 || COMPILE_TEST
+	help
+	  Support for a OneNAND flash device connected to an Samsung SOC.
+	  S3C64XX uses command mapping method.
+	  S5PC110/S5PC210 use generic OneNAND method.
 
 config MTD_ONENAND_OTP
 	bool "OneNAND OTP Support"
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index f8b624a..a0761c7 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -8,7 +8,7 @@
 
 # Board specific.
 obj-$(CONFIG_MTD_ONENAND_GENERIC)	+= generic.o
-obj-$(CONFIG_MTD_ONENAND_OMAP2)		+= omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG)       += samsung.o
+obj-$(CONFIG_MTD_ONENAND_OMAP2)		+= onenand_omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG)       += onenand_samsung.o
 
 onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 9e81cd9..188b806 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1052,16 +1052,11 @@
 				int thislen)
 {
 	struct onenand_chip *this = mtd->priv;
-	int ret;
 
 	this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0,
 			     mtd->oobsize);
-	ret = mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
-					  column, thislen);
-	if (ret)
-		return ret;
-
-	return 0;
+	return mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
+					   column, thislen);
 }
 
 /**
@@ -2853,7 +2848,7 @@
 
 		/* Exit OTP access mode */
 		this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-		this->wait(mtd, FL_RESETING);
+		this->wait(mtd, FL_RESETTING);
 
 		status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
 		status &= 0x60;
@@ -2924,7 +2919,7 @@
 
 	/* Exit OTP access mode */
 	this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-	this->wait(mtd, FL_RESETING);
+	this->wait(mtd, FL_RESETTING);
 
 	return ret;
 }
@@ -2968,7 +2963,7 @@
 
 	/* Exit OTP access mode */
 	this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-	this->wait(mtd, FL_RESETING);
+	this->wait(mtd, FL_RESETTING);
 
 	return ret;
 }
@@ -3008,7 +3003,7 @@
 
 		/* Exit OTP access mode */
 		this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-		this->wait(mtd, FL_RESETING);
+		this->wait(mtd, FL_RESETTING);
 	} else {
 		ops.mode = MTD_OPS_PLACE_OOB;
 		ops.ooblen = len;
@@ -3259,7 +3254,7 @@
 	switch (density) {
 	case ONENAND_DEVICE_DENSITY_8Gb:
 		this->options |= ONENAND_HAS_NOP_1;
-		/* fall through */
+		fallthrough;
 	case ONENAND_DEVICE_DENSITY_4Gb:
 		if (ONENAND_IS_DDP(this))
 			this->options |= ONENAND_HAS_2PLANE;
@@ -3413,7 +3408,7 @@
 		this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
 
 		this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-		this->wait(mtd, FL_RESETING);
+		this->wait(mtd, FL_RESETTING);
 
 		printk(KERN_INFO "Die %d boundary: %d%s\n", die,
 		       this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3635,7 +3630,7 @@
 	ret = this->wait(mtd, FL_WRITING);
 out:
 	this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
-	this->wait(mtd, FL_RESETING);
+	this->wait(mtd, FL_RESETTING);
 	if (!ret)
 		/* Recalculate device size on boundary change*/
 		flexonenand_get_size(mtd);
@@ -3671,7 +3666,7 @@
 	/* Reset OneNAND to read default register values */
 	this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
 	/* Wait reset */
-	this->wait(mtd, FL_RESETING);
+	this->wait(mtd, FL_RESETTING);
 
 	/* Restore system configuration 1 */
 	this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
similarity index 97%
rename from drivers/mtd/nand/onenand/omap2.c
rename to drivers/mtd/nand/onenand/onenand_omap2.c
index 71a632b..d8c0bd0 100644
--- a/drivers/mtd/nand/onenand/omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -148,13 +148,13 @@
 	unsigned long timeout;
 	u32 syscfg;
 
-	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+	if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
 	    state == FL_VERIFYING_ERASE) {
 		int i = 21;
 		unsigned int intr_flags = ONENAND_INT_MASTER;
 
 		switch (state) {
-		case FL_RESETING:
+		case FL_RESETTING:
 			intr_flags |= ONENAND_INT_RESET;
 			break;
 		case FL_PREPARING_ERASE:
@@ -376,7 +376,7 @@
 	 * context fallback to PIO mode.
 	 */
 	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
-	    count < 384 || in_interrupt() || oops_in_progress )
+	    count < 384 || in_interrupt() || oops_in_progress)
 		goto out_copy;
 
 	xtra = count & 3;
@@ -423,7 +423,7 @@
 	 * context fallback to PIO mode.
 	 */
 	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
-	    count < 384 || in_interrupt() || oops_in_progress )
+	    count < 384 || in_interrupt() || oops_in_progress)
 		goto out_copy;
 
 	dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
@@ -494,11 +494,8 @@
 
 	c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
 	if (IS_ERR(c->int_gpiod)) {
-		r = PTR_ERR(c->int_gpiod);
 		/* Just try again if this happens */
-		if (r != -EPROBE_DEFER)
-			dev_err(dev, "error getting gpio: %d\n", r);
-		return r;
+		return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
 	}
 
 	if (c->int_gpiod) {
@@ -529,7 +526,8 @@
 		 c->gpmc_cs, c->phys_base, c->onenand.base,
 		 c->dma_chan ? "DMA" : "PIO");
 
-	if ((r = onenand_scan(&c->mtd, 1)) < 0)
+	r = onenand_scan(&c->mtd, 1);
+	if (r < 0)
 		goto err_release_dma;
 
 	freq = omap2_onenand_get_freq(c->onenand.version_id);
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
similarity index 98%
rename from drivers/mtd/nand/onenand/samsung.c
rename to drivers/mtd/nand/onenand/onenand_samsung.c
index 55e5536..87b28e3 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -248,7 +248,7 @@
 	}
 
 	/* BootRAM access control */
-	if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+	if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) {
 		if (word_addr == 0)
 			return s3c_read_reg(MANUFACT_ID_OFFSET);
 		if (word_addr == 1)
@@ -289,7 +289,7 @@
 	}
 
 	/* BootRAM access control */
-	if ((unsigned int)addr < ONENAND_DATARAM) {
+	if ((unsigned long)addr < ONENAND_DATARAM) {
 		if (value == ONENAND_CMD_READID) {
 			onenand->bootram_command = 1;
 			return;
@@ -658,7 +658,7 @@
 		dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
 	}
 	if (dma_mapping_error(dev, dma_dst)) {
-		dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
+		dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count);
 		goto normal;
 	}
 	err = s5pc110_dma_ops(dma_dst, dma_src,
@@ -675,12 +675,12 @@
 normal:
 	if (count != mtd->writesize) {
 		/* Copy the bufferram to memory to prevent unaligned access */
-		memcpy(this->page_buf, p, mtd->writesize);
-		p = this->page_buf + offset;
+		memcpy_fromio(this->page_buf, p, mtd->writesize);
+		memcpy(buffer, this->page_buf + offset, count);
+	} else {
+		memcpy_fromio(buffer, p, count);
 	}
 
-	memcpy(buffer, p, count);
-
 	return 0;
 }
 
@@ -728,13 +728,12 @@
 	struct onenand_chip *this = mtd->priv;
 	struct device *dev = &onenand->pdev->dev;
 	unsigned int block, end;
-	int tmp;
 
 	end = this->chipsize >> this->erase_shift;
 
 	for (block = 0; block < end; block++) {
 		unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
-		tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+		s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
 
 		if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
 			dev_err(dev, "block %d is write-protected!\n", block);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index e59de3f..6c46f25 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -12,8 +12,8 @@
 
 menuconfig MTD_RAW_NAND
 	tristate "Raw/Parallel NAND Device Support"
-	depends on MTD
 	select MTD_NAND_CORE
+	select MTD_NAND_ECC
 	select MTD_NAND_ECC_SW_HAMMING
 	help
 	  This enables support for accessing all type of raw/parallel
@@ -213,10 +213,6 @@
 	  Please check the actual NAND chip connected and its support
 	  by the MLC NAND controller.
 
-config MTD_NAND_CM_X270
-	tristate "CM-X270 modules NAND controller"
-	depends on MACH_ARMCORE
-
 config MTD_NAND_PASEMI
 	tristate "PA Semi PWRficient NAND controller"
 	depends on PPC_PASEMI
@@ -419,6 +415,7 @@
 config MTD_NAND_STM32_FMC2
 	tristate "Support for NAND controller on STM32MP SoCs"
 	depends on MACH_STM32MP157 || COMPILE_TEST
+	select MFD_SYSCON
 	help
 	  Enables support for NAND Flash chips on SoCs containing the FMC2
 	  NAND controller. This controller is found on STM32MP SoCs.
@@ -450,6 +447,21 @@
 	  devices. You will need to provide platform-specific functions
 	  via platform_data.
 
+config MTD_NAND_CADENCE
+	tristate "Support Cadence NAND (HPNFC) controller"
+	depends on (OF || COMPILE_TEST) && HAS_IOMEM
+	help
+	  Enable the driver for NAND flash on platforms using a Cadence NAND
+	  controller.
+
+config MTD_NAND_ARASAN
+	tristate "Support for Arasan NAND flash controller"
+	depends on HAS_IOMEM && HAS_DMA
+	select BCH
+	help
+	  Enables the driver for the Arasan NAND flash controller on
+	  Zynq Ultrascale+ MPSoC.
+
 comment "Misc"
 
 config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index a987219..2930f5b 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -25,7 +25,6 @@
 omap2_nand-objs := omap2.o
 obj-$(CONFIG_MTD_NAND_OMAP2) 		+= omap2_nand.o
 obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD)	+= omap_elm.o
-obj-$(CONFIG_MTD_NAND_CM_X270)		+= cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_MARVELL)		+= marvell_nand.o
 obj-$(CONFIG_MTD_NAND_TMIO)		+= tmio_nand.o
 obj-$(CONFIG_MTD_NAND_PLATFORM)		+= plat_nand.o
@@ -57,6 +56,8 @@
 obj-$(CONFIG_MTD_NAND_TEGRA)		+= tegra_nand.o
 obj-$(CONFIG_MTD_NAND_STM32_FMC2)	+= stm32_fmc2_nand.o
 obj-$(CONFIG_MTD_NAND_MESON)		+= meson_nand.o
+obj-$(CONFIG_MTD_NAND_CADENCE)		+= cadence-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ARASAN)		+= arasan-nand-controller.o
 
 nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
 nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 8312182..13de39a 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -19,15 +19,17 @@
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-gpio.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/sizes.h>
 
 /*
  * MTD structure for E3 (Delta)
  */
-struct ams_delta_nand {
+struct gpio_nand {
 	struct nand_controller	base;
 	struct nand_chip	nand_chip;
 	struct gpio_desc	*gpiod_rdy;
@@ -39,41 +41,20 @@
 	struct gpio_desc	*gpiod_cle;
 	struct gpio_descs	*data_gpiods;
 	bool			data_in;
+	unsigned int		tRP;
+	unsigned int		tWP;
+	u8			(*io_read)(struct gpio_nand *this);
+	void			(*io_write)(struct gpio_nand *this, u8 byte);
 };
 
-/*
- * Define partitions for flash devices
- */
-
-static const struct mtd_partition partition_info[] = {
-	{ .name		= "Kernel",
-	  .offset	= 0,
-	  .size		= 3 * SZ_1M + SZ_512K },
-	{ .name		= "u-boot",
-	  .offset	= 3 * SZ_1M + SZ_512K,
-	  .size		= SZ_256K },
-	{ .name		= "u-boot params",
-	  .offset	= 3 * SZ_1M + SZ_512K + SZ_256K,
-	  .size		= SZ_256K },
-	{ .name		= "Amstrad LDR",
-	  .offset	= 4 * SZ_1M,
-	  .size		= SZ_256K },
-	{ .name		= "File system",
-	  .offset	= 4 * SZ_1M + 1 * SZ_256K,
-	  .size		= 27 * SZ_1M },
-	{ .name		= "PBL reserved",
-	  .offset	= 32 * SZ_1M - 3 * SZ_256K,
-	  .size		=  3 * SZ_256K },
-};
-
-static void ams_delta_write_commit(struct ams_delta_nand *priv)
+static void gpio_nand_write_commit(struct gpio_nand *priv)
 {
-	gpiod_set_value(priv->gpiod_nwe, 0);
-	ndelay(40);
 	gpiod_set_value(priv->gpiod_nwe, 1);
+	ndelay(priv->tWP);
+	gpiod_set_value(priv->gpiod_nwe, 0);
 }
 
-static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
 {
 	struct gpio_descs *data_gpiods = priv->data_gpiods;
 	DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -81,10 +62,10 @@
 	gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
 				  data_gpiods->info, values);
 
-	ams_delta_write_commit(priv);
+	gpio_nand_write_commit(priv);
 }
 
-static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
 {
 	struct gpio_descs *data_gpiods = priv->data_gpiods;
 	DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -94,30 +75,30 @@
 		gpiod_direction_output_raw(data_gpiods->desc[i],
 					   test_bit(i, values));
 
-	ams_delta_write_commit(priv);
+	gpio_nand_write_commit(priv);
 
 	priv->data_in = false;
 }
 
-static u8 ams_delta_io_read(struct ams_delta_nand *priv)
+static u8 gpio_nand_io_read(struct gpio_nand *priv)
 {
 	u8 res;
 	struct gpio_descs *data_gpiods = priv->data_gpiods;
 	DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
 
-	gpiod_set_value(priv->gpiod_nre, 0);
-	ndelay(40);
+	gpiod_set_value(priv->gpiod_nre, 1);
+	ndelay(priv->tRP);
 
 	gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
 				  data_gpiods->info, values);
 
-	gpiod_set_value(priv->gpiod_nre, 1);
+	gpiod_set_value(priv->gpiod_nre, 0);
 
 	res = values[0];
 	return res;
 }
 
-static void ams_delta_dir_input(struct ams_delta_nand *priv)
+static void gpio_nand_dir_input(struct gpio_nand *priv)
 {
 	struct gpio_descs *data_gpiods = priv->data_gpiods;
 	int i;
@@ -128,68 +109,67 @@
 	priv->data_in = true;
 }
 
-static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
-				int len)
+static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
 {
 	int i = 0;
 
 	if (len > 0 && priv->data_in)
-		ams_delta_dir_output(priv, buf[i++]);
+		gpio_nand_dir_output(priv, buf[i++]);
 
 	while (i < len)
-		ams_delta_io_write(priv, buf[i++]);
+		priv->io_write(priv, buf[i++]);
 }
 
-static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
+static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
 {
 	int i;
 
-	if (!priv->data_in)
-		ams_delta_dir_input(priv);
+	if (priv->data_gpiods && !priv->data_in)
+		gpio_nand_dir_input(priv);
 
 	for (i = 0; i < len; i++)
-		buf[i] = ams_delta_io_read(priv);
+		buf[i] = priv->io_read(priv);
 }
 
-static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
+static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
 {
-	gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
+	gpiod_set_value(priv->gpiod_nce, assert);
 }
 
-static int ams_delta_exec_op(struct nand_chip *this,
+static int gpio_nand_exec_op(struct nand_chip *this,
 			     const struct nand_operation *op, bool check_only)
 {
-	struct ams_delta_nand *priv = nand_get_controller_data(this);
+	struct gpio_nand *priv = nand_get_controller_data(this);
 	const struct nand_op_instr *instr;
 	int ret = 0;
 
 	if (check_only)
 		return 0;
 
-	ams_delta_ctrl_cs(priv, 1);
+	gpio_nand_ctrl_cs(priv, 1);
 
 	for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
 		switch (instr->type) {
 		case NAND_OP_CMD_INSTR:
 			gpiod_set_value(priv->gpiod_cle, 1);
-			ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+			gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
 			gpiod_set_value(priv->gpiod_cle, 0);
 			break;
 
 		case NAND_OP_ADDR_INSTR:
 			gpiod_set_value(priv->gpiod_ale, 1);
-			ams_delta_write_buf(priv, instr->ctx.addr.addrs,
+			gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
 					    instr->ctx.addr.naddrs);
 			gpiod_set_value(priv->gpiod_ale, 0);
 			break;
 
 		case NAND_OP_DATA_IN_INSTR:
-			ams_delta_read_buf(priv, instr->ctx.data.buf.in,
+			gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
 					   instr->ctx.data.len);
 			break;
 
 		case NAND_OP_DATA_OUT_INSTR:
-			ams_delta_write_buf(priv, instr->ctx.data.buf.out,
+			gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
 					    instr->ctx.data.len);
 			break;
 
@@ -206,28 +186,71 @@
 			break;
 	}
 
-	ams_delta_ctrl_cs(priv, 0);
+	gpio_nand_ctrl_cs(priv, 0);
 
 	return ret;
 }
 
-static const struct nand_controller_ops ams_delta_ops = {
-	.exec_op = ams_delta_exec_op,
+static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
+				     const struct nand_interface_config *cf)
+{
+	struct gpio_nand *priv = nand_get_controller_data(this);
+	const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
+	struct device *dev = &nand_to_mtd(this)->dev;
+
+	if (IS_ERR(sdr))
+		return PTR_ERR(sdr);
+
+	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+		return 0;
+
+	if (priv->gpiod_nre) {
+		priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
+		dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
+	}
+
+	priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
+	dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
+
+	return 0;
+}
+
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+	.exec_op = gpio_nand_exec_op,
+	.attach_chip = gpio_nand_attach_chip,
+	.setup_interface = gpio_nand_setup_interface,
 };
 
 /*
  * Main initialization routine
  */
-static int ams_delta_init(struct platform_device *pdev)
+static int gpio_nand_probe(struct platform_device *pdev)
 {
-	struct ams_delta_nand *priv;
+	struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
+	const struct mtd_partition *partitions = NULL;
+	int num_partitions = 0;
+	struct gpio_nand *priv;
 	struct nand_chip *this;
 	struct mtd_info *mtd;
-	struct gpio_descs *data_gpiods;
+	int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
 	int err = 0;
 
+	if (pdata) {
+		partitions = pdata->parts;
+		num_partitions = pdata->num_parts;
+	}
+
 	/* Allocate memory for MTD device structure and private data */
-	priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
 			    GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
@@ -238,6 +261,7 @@
 	mtd->dev.parent = &pdev->dev;
 
 	nand_set_controller_data(this, priv);
+	nand_set_flash_node(this, pdev->dev.of_node);
 
 	priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
 	if (IS_ERR(priv->gpiod_rdy)) {
@@ -246,34 +270,35 @@
 		return err;
 	}
 
-	this->ecc.mode = NAND_ECC_SOFT;
-	this->ecc.algo = NAND_ECC_HAMMING;
-
 	platform_set_drvdata(pdev, priv);
 
-	/* Set chip enabled, but  */
-	priv->gpiod_nwp = devm_gpiod_get(&pdev->dev, "nwp", GPIOD_OUT_HIGH);
+	/* Set chip enabled but write protected */
+	priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
+						  GPIOD_OUT_HIGH);
 	if (IS_ERR(priv->gpiod_nwp)) {
 		err = PTR_ERR(priv->gpiod_nwp);
 		dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
 		return err;
 	}
 
-	priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
+	priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
+						  GPIOD_OUT_LOW);
 	if (IS_ERR(priv->gpiod_nce)) {
 		err = PTR_ERR(priv->gpiod_nce);
 		dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
 		return err;
 	}
 
-	priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
+	priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
+						  GPIOD_OUT_LOW);
 	if (IS_ERR(priv->gpiod_nre)) {
 		err = PTR_ERR(priv->gpiod_nre);
 		dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
 		return err;
 	}
 
-	priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
+	priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
+						  GPIOD_OUT_LOW);
 	if (IS_ERR(priv->gpiod_nwe)) {
 		err = PTR_ERR(priv->gpiod_nwe);
 		dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
@@ -295,28 +320,69 @@
 	}
 
 	/* Request array of data pins, initialize them as input */
-	data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
-	if (IS_ERR(data_gpiods)) {
-		err = PTR_ERR(data_gpiods);
+	priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
+							  GPIOD_IN);
+	if (IS_ERR(priv->data_gpiods)) {
+		err = PTR_ERR(priv->data_gpiods);
 		dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
 		return err;
 	}
-	priv->data_gpiods = data_gpiods;
-	priv->data_in = true;
+	if (priv->data_gpiods) {
+		if (!priv->gpiod_nwe) {
+			dev_err(&pdev->dev,
+				"mandatory NWE pin not provided by platform\n");
+			return -ENODEV;
+		}
 
-	/* Initialize the NAND controller object embedded in ams_delta_nand. */
-	priv->base.ops = &ams_delta_ops;
+		priv->io_read = gpio_nand_io_read;
+		priv->io_write = gpio_nand_io_write;
+		priv->data_in = true;
+	}
+
+	if (pdev->id_entry)
+		probe = (void *) pdev->id_entry->driver_data;
+	else
+		probe = of_device_get_match_data(&pdev->dev);
+	if (probe)
+		err = probe(pdev, priv);
+	if (err)
+		return err;
+
+	if (!priv->io_read || !priv->io_write) {
+		dev_err(&pdev->dev, "incomplete device configuration\n");
+		return -ENODEV;
+	}
+
+	/* Initialize the NAND controller object embedded in gpio_nand. */
+	priv->base.ops = &gpio_nand_ops;
 	nand_controller_init(&priv->base);
 	this->controller = &priv->base;
 
+	/*
+	 * FIXME: We should release write protection only after nand_scan() to
+	 * be on the safe side but we can't do that until we have a generic way
+	 * to assert/deassert WP from the core.  Even if the core shouldn't
+	 * write things in the nand_scan() path, it should have control on this
+	 * pin just in case we ever need to disable write protection during
+	 * chip detection/initialization.
+	 */
+	/* Release write protection */
+	gpiod_set_value(priv->gpiod_nwp, 0);
+
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	/* Scan to find existence of the device */
 	err = nand_scan(this, 1);
 	if (err)
 		return err;
 
 	/* Register the partitions */
-	err = mtd_device_register(mtd, partition_info,
-				  ARRAY_SIZE(partition_info));
+	err = mtd_device_register(mtd, partitions, num_partitions);
 	if (err)
 		goto err_nand_cleanup;
 
@@ -331,26 +397,52 @@
 /*
  * Clean up routine
  */
-static int ams_delta_cleanup(struct platform_device *pdev)
+static int gpio_nand_remove(struct platform_device *pdev)
 {
-	struct ams_delta_nand *priv = platform_get_drvdata(pdev);
+	struct gpio_nand *priv = platform_get_drvdata(pdev);
 	struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+	int ret;
+
+	/* Apply write protection */
+	gpiod_set_value(priv->gpiod_nwp, 1);
 
 	/* Unregister device */
-	nand_release(mtd_to_nand(mtd));
+	ret = mtd_device_unregister(mtd);
+	WARN_ON(ret);
+	nand_cleanup(mtd_to_nand(mtd));
 
 	return 0;
 }
 
-static struct platform_driver ams_delta_nand_driver = {
-	.probe		= ams_delta_init,
-	.remove		= ams_delta_cleanup,
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_of_id_table[] = {
+	{
+		/* sentinel */
+	},
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+#endif
+
+static const struct platform_device_id gpio_nand_plat_id_table[] = {
+	{
+		.name	= "ams-delta-nand",
+	}, {
+		/* sentinel */
+	},
+};
+MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
+
+static struct platform_driver gpio_nand_driver = {
+	.probe		= gpio_nand_probe,
+	.remove		= gpio_nand_remove,
+	.id_table	= gpio_nand_plat_id_table,
 	.driver		= {
 		.name	= "ams-delta-nand",
+		.of_match_table = of_match_ptr(gpio_nand_of_id_table),
 	},
 };
 
-module_platform_driver(ams_delta_nand_driver);
+module_platform_driver(gpio_nand_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
new file mode 100644
index 0000000..0ee3192
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2020 Xilinx, Inc.
+ * Author:
+ *   Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (fully rewritten):
+ *   Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ *   Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/bch.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PKT_REG				0x00
+#define   PKT_SIZE(x)			FIELD_PREP(GENMASK(10, 0), (x))
+#define   PKT_STEPS(x)			FIELD_PREP(GENMASK(23, 12), (x))
+
+#define MEM_ADDR1_REG			0x04
+
+#define MEM_ADDR2_REG			0x08
+#define   ADDR2_STRENGTH(x)		FIELD_PREP(GENMASK(27, 25), (x))
+#define   ADDR2_CS(x)			FIELD_PREP(GENMASK(31, 30), (x))
+
+#define CMD_REG				0x0C
+#define   CMD_1(x)			FIELD_PREP(GENMASK(7, 0), (x))
+#define   CMD_2(x)			FIELD_PREP(GENMASK(15, 8), (x))
+#define   CMD_PAGE_SIZE(x)		FIELD_PREP(GENMASK(25, 23), (x))
+#define   CMD_DMA_ENABLE		BIT(27)
+#define   CMD_NADDRS(x)			FIELD_PREP(GENMASK(30, 28), (x))
+#define   CMD_ECC_ENABLE		BIT(31)
+
+#define PROG_REG			0x10
+#define   PROG_PGRD			BIT(0)
+#define   PROG_ERASE			BIT(2)
+#define   PROG_STATUS			BIT(3)
+#define   PROG_PGPROG			BIT(4)
+#define   PROG_RDID			BIT(6)
+#define   PROG_RDPARAM			BIT(7)
+#define   PROG_RST			BIT(8)
+#define   PROG_GET_FEATURE		BIT(9)
+#define   PROG_SET_FEATURE		BIT(10)
+
+#define INTR_STS_EN_REG			0x14
+#define INTR_SIG_EN_REG			0x18
+#define INTR_STS_REG			0x1C
+#define   WRITE_READY			BIT(0)
+#define   READ_READY			BIT(1)
+#define   XFER_COMPLETE			BIT(2)
+#define   DMA_BOUNDARY			BIT(6)
+#define   EVENT_MASK			GENMASK(7, 0)
+
+#define READY_STS_REG			0x20
+
+#define DMA_ADDR0_REG			0x50
+#define DMA_ADDR1_REG			0x24
+
+#define FLASH_STS_REG			0x28
+
+#define DATA_PORT_REG			0x30
+
+#define ECC_CONF_REG			0x34
+#define   ECC_CONF_COL(x)		FIELD_PREP(GENMASK(15, 0), (x))
+#define   ECC_CONF_LEN(x)		FIELD_PREP(GENMASK(26, 16), (x))
+#define   ECC_CONF_BCH_EN		BIT(27)
+
+#define ECC_ERR_CNT_REG			0x38
+#define   GET_PKT_ERR_CNT(x)		FIELD_GET(GENMASK(7, 0), (x))
+#define   GET_PAGE_ERR_CNT(x)		FIELD_GET(GENMASK(16, 8), (x))
+
+#define ECC_SP_REG			0x3C
+#define   ECC_SP_CMD1(x)		FIELD_PREP(GENMASK(7, 0), (x))
+#define   ECC_SP_CMD2(x)		FIELD_PREP(GENMASK(15, 8), (x))
+#define   ECC_SP_ADDRS(x)		FIELD_PREP(GENMASK(30, 28), (x))
+
+#define ECC_1ERR_CNT_REG		0x40
+#define ECC_2ERR_CNT_REG		0x44
+
+#define DATA_INTERFACE_REG		0x6C
+#define   DIFACE_SDR_MODE(x)		FIELD_PREP(GENMASK(2, 0), (x))
+#define   DIFACE_DDR_MODE(x)		FIELD_PREP(GENMASK(5, 3), (X))
+#define   DIFACE_SDR			0
+#define   DIFACE_NVDDR			BIT(9)
+
+#define ANFC_MAX_CS			2
+#define ANFC_DFLT_TIMEOUT_US		1000000
+#define ANFC_MAX_CHUNK_SIZE		SZ_1M
+#define ANFC_MAX_PARAM_SIZE		SZ_4K
+#define ANFC_MAX_STEPS			SZ_2K
+#define ANFC_MAX_PKT_SIZE		(SZ_2K - 1)
+#define ANFC_MAX_ADDR_CYC		5U
+#define ANFC_RSVD_ECC_BYTES		21
+
+#define ANFC_XLNX_SDR_DFLT_CORE_CLK	100000000
+#define ANFC_XLNX_SDR_HS_CORE_CLK	80000000
+
+/**
+ * struct anfc_op - Defines how to execute an operation
+ * @pkt_reg: Packet register
+ * @addr1_reg: Memory address 1 register
+ * @addr2_reg: Memory address 2 register
+ * @cmd_reg: Command register
+ * @prog_reg: Program register
+ * @steps: Number of "packets" to read/write
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @len: Data transfer length
+ * @read: Data transfer direction from the controller point of view
+ */
+struct anfc_op {
+	u32 pkt_reg;
+	u32 addr1_reg;
+	u32 addr2_reg;
+	u32 cmd_reg;
+	u32 prog_reg;
+	int steps;
+	unsigned int rdy_timeout_ms;
+	unsigned int len;
+	bool read;
+	u8 *buf;
+};
+
+/**
+ * struct anand - Defines the NAND chip related information
+ * @node:		Used to store NAND chips into a list
+ * @chip:		NAND chip information structure
+ * @cs:			Chip select line
+ * @rb:			Ready-busy line
+ * @page_sz:		Register value of the page_sz field to use
+ * @clk:		Expected clock frequency to use
+ * @timings:		Data interface timing mode to use
+ * @ecc_conf:		Hardware ECC configuration value
+ * @strength:		Register value of the ECC strength
+ * @raddr_cycles:	Row address cycle information
+ * @caddr_cycles:	Column address cycle information
+ * @ecc_bits:		Exact number of ECC bits per syndrome
+ * @ecc_total:		Total number of ECC bytes
+ * @errloc:		Array of errors located with soft BCH
+ * @hw_ecc:		Buffer to store syndromes computed by hardware
+ * @bch:		BCH structure
+ */
+struct anand {
+	struct list_head node;
+	struct nand_chip chip;
+	unsigned int cs;
+	unsigned int rb;
+	unsigned int page_sz;
+	unsigned long clk;
+	u32 timings;
+	u32 ecc_conf;
+	u32 strength;
+	u16 raddr_cycles;
+	u16 caddr_cycles;
+	unsigned int ecc_bits;
+	unsigned int ecc_total;
+	unsigned int *errloc;
+	u8 *hw_ecc;
+	struct bch_control *bch;
+};
+
+/**
+ * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
+ * @dev:		Pointer to the device structure
+ * @base:		Remapped register area
+ * @controller_clk:		Pointer to the system clock
+ * @bus_clk:		Pointer to the flash clock
+ * @controller:		Base controller structure
+ * @chips:		List of all NAND chips attached to the controller
+ * @assigned_cs:	Bitmask describing already assigned CS lines
+ * @cur_clk:		Current clock rate
+ */
+struct arasan_nfc {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *controller_clk;
+	struct clk *bus_clk;
+	struct nand_controller controller;
+	struct list_head chips;
+	unsigned long assigned_cs;
+	unsigned int cur_clk;
+};
+
+static struct anand *to_anand(struct nand_chip *nand)
+{
+	return container_of(nand, struct anand, chip);
+}
+
+static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
+{
+	return container_of(ctrl, struct arasan_nfc, controller);
+}
+
+static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
+{
+	u32 val;
+	int ret;
+
+	ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
+					 val & event, 0,
+					 ANFC_DFLT_TIMEOUT_US);
+	if (ret) {
+		dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
+		return -ETIMEDOUT;
+	}
+
+	writel_relaxed(event, nfc->base + INTR_STS_REG);
+
+	return 0;
+}
+
+static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
+			    unsigned int timeout_ms)
+{
+	struct anand *anand = to_anand(chip);
+	u32 val;
+	int ret;
+
+	/* There is no R/B interrupt, we must poll a register */
+	ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
+					 val & BIT(anand->rb),
+					 1, timeout_ms * 1000);
+	if (ret) {
+		dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
+			readl_relaxed(nfc->base + READY_STS_REG));
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+	writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
+	writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
+	writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
+	writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
+	writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
+}
+
+static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
+			       unsigned int *pktsize)
+{
+	unsigned int nb, sz;
+
+	for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
+		sz = len / nb;
+		if (sz <= ANFC_MAX_PKT_SIZE)
+			break;
+	}
+
+	if (sz * nb != len)
+		return -ENOTSUPP;
+
+	if (steps)
+		*steps = nb;
+
+	if (pktsize)
+		*pktsize = sz;
+
+	return 0;
+}
+
+static int anfc_select_target(struct nand_chip *chip, int target)
+{
+	struct anand *anand = to_anand(chip);
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	int ret;
+
+	/* Update the controller timings and the potential ECC configuration */
+	writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
+
+	/* Update clock frequency */
+	if (nfc->cur_clk != anand->clk) {
+		clk_disable_unprepare(nfc->controller_clk);
+		ret = clk_set_rate(nfc->controller_clk, anand->clk);
+		if (ret) {
+			dev_err(nfc->dev, "Failed to change clock rate\n");
+			return ret;
+		}
+
+		ret = clk_prepare_enable(nfc->controller_clk);
+		if (ret) {
+			dev_err(nfc->dev,
+				"Failed to re-enable the controller clock\n");
+			return ret;
+		}
+
+		nfc->cur_clk = anand->clk;
+	}
+
+	return 0;
+}
+
+/*
+ * When using the embedded hardware ECC engine, the controller is in charge of
+ * feeding the engine with, first, the ECC residue present in the data array.
+ * A typical read operation is:
+ * 1/ Assert the read operation by sending the relevant command/address cycles
+ *    but targeting the column of the first ECC bytes in the OOB area instead of
+ *    the main data directly.
+ * 2/ After having read the relevant number of ECC bytes, the controller uses
+ *    the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
+ *    Register" to move the pointer back at the beginning of the main data.
+ * 3/ It will read the content of the main area for a given size (pktsize) and
+ *    will feed the ECC engine with this buffer again.
+ * 4/ The ECC engine derives the ECC bytes for the given data and compare them
+ *    with the ones already received. It eventually trigger status flags and
+ *    then set the "Buffer Read Ready" flag.
+ * 5/ The corrected data is then available for reading from the data port
+ *    register.
+ *
+ * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
+ * reports uncorrectable errors. Because of this bug, we have to use the
+ * software BCH implementation in the read path.
+ */
+static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+				 int oob_required, int page)
+{
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct anand *anand = to_anand(chip);
+	unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+	unsigned int max_bitflips = 0;
+	dma_addr_t dma_addr;
+	int step, ret;
+	struct anfc_op nfc_op = {
+		.pkt_reg =
+			PKT_SIZE(chip->ecc.size) |
+			PKT_STEPS(chip->ecc.steps),
+		.addr1_reg =
+			(page & 0xFF) << (8 * (anand->caddr_cycles)) |
+			(((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+		.addr2_reg =
+			((page >> 16) & 0xFF) |
+			ADDR2_STRENGTH(anand->strength) |
+			ADDR2_CS(anand->cs),
+		.cmd_reg =
+			CMD_1(NAND_CMD_READ0) |
+			CMD_2(NAND_CMD_READSTART) |
+			CMD_PAGE_SIZE(anand->page_sz) |
+			CMD_DMA_ENABLE |
+			CMD_NADDRS(anand->caddr_cycles +
+				   anand->raddr_cycles),
+		.prog_reg = PROG_PGRD,
+	};
+
+	dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(nfc->dev, dma_addr)) {
+		dev_err(nfc->dev, "Buffer mapping error");
+		return -EIO;
+	}
+
+	writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+	writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+	anfc_trigger_op(nfc, &nfc_op);
+
+	ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+	dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
+	if (ret) {
+		dev_err(nfc->dev, "Error reading page %d\n", page);
+		return ret;
+	}
+
+	/* Store the raw OOB bytes as well */
+	ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
+					 mtd->oobsize, 0);
+	if (ret)
+		return ret;
+
+	/*
+	 * For each step, compute by softare the BCH syndrome over the raw data.
+	 * Compare the theoretical amount of errors and compare with the
+	 * hardware engine feedback.
+	 */
+	for (step = 0; step < chip->ecc.steps; step++) {
+		u8 *raw_buf = &buf[step * chip->ecc.size];
+		unsigned int bit, byte;
+		int bf, i;
+
+		/* Extract the syndrome, it is not necessarily aligned */
+		memset(anand->hw_ecc, 0, chip->ecc.bytes);
+		nand_extract_bits(anand->hw_ecc, 0,
+				  &chip->oob_poi[mtd->oobsize - anand->ecc_total],
+				  anand->ecc_bits * step, anand->ecc_bits);
+
+		bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
+				anand->hw_ecc, NULL, NULL, anand->errloc);
+		if (!bf) {
+			continue;
+		} else if (bf > 0) {
+			for (i = 0; i < bf; i++) {
+				/* Only correct the data, not the syndrome */
+				if (anand->errloc[i] < (chip->ecc.size * 8)) {
+					bit = BIT(anand->errloc[i] & 7);
+					byte = anand->errloc[i] >> 3;
+					raw_buf[byte] ^= bit;
+				}
+			}
+
+			mtd->ecc_stats.corrected += bf;
+			max_bitflips = max_t(unsigned int, max_bitflips, bf);
+
+			continue;
+		}
+
+		bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
+						 NULL, 0, NULL, 0,
+						 chip->ecc.strength);
+		if (bf > 0) {
+			mtd->ecc_stats.corrected += bf;
+			max_bitflips = max_t(unsigned int, max_bitflips, bf);
+			memset(raw_buf, 0xFF, chip->ecc.size);
+		} else if (bf < 0) {
+			mtd->ecc_stats.failed++;
+		}
+	}
+
+	return 0;
+}
+
+static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+				     int oob_required, int page)
+{
+	int ret;
+
+	ret = anfc_select_target(chip, chip->cur_cs);
+	if (ret)
+		return ret;
+
+	return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
+};
+
+static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+				  int oob_required, int page)
+{
+	struct anand *anand = to_anand(chip);
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+	dma_addr_t dma_addr;
+	int ret;
+	struct anfc_op nfc_op = {
+		.pkt_reg =
+			PKT_SIZE(chip->ecc.size) |
+			PKT_STEPS(chip->ecc.steps),
+		.addr1_reg =
+			(page & 0xFF) << (8 * (anand->caddr_cycles)) |
+			(((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+		.addr2_reg =
+			((page >> 16) & 0xFF) |
+			ADDR2_STRENGTH(anand->strength) |
+			ADDR2_CS(anand->cs),
+		.cmd_reg =
+			CMD_1(NAND_CMD_SEQIN) |
+			CMD_2(NAND_CMD_PAGEPROG) |
+			CMD_PAGE_SIZE(anand->page_sz) |
+			CMD_DMA_ENABLE |
+			CMD_NADDRS(anand->caddr_cycles +
+				   anand->raddr_cycles) |
+			CMD_ECC_ENABLE,
+		.prog_reg = PROG_PGPROG,
+	};
+
+	writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
+	writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
+		       ECC_SP_ADDRS(anand->caddr_cycles),
+		       nfc->base + ECC_SP_REG);
+
+	dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(nfc->dev, dma_addr)) {
+		dev_err(nfc->dev, "Buffer mapping error");
+		return -EIO;
+	}
+
+	writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+	writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+	anfc_trigger_op(nfc, &nfc_op);
+	ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+	dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
+	if (ret) {
+		dev_err(nfc->dev, "Error writing page %d\n", page);
+		return ret;
+	}
+
+	/* Spare data is not protected */
+	if (oob_required)
+		ret = nand_write_oob_std(chip, page);
+
+	return ret;
+}
+
+static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+				      int oob_required, int page)
+{
+	int ret;
+
+	ret = anfc_select_target(chip, chip->cur_cs);
+	if (ret)
+		return ret;
+
+	return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
+};
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int anfc_parse_instructions(struct nand_chip *chip,
+				   const struct nand_subop *subop,
+				   struct anfc_op *nfc_op)
+{
+	struct anand *anand = to_anand(chip);
+	const struct nand_op_instr *instr = NULL;
+	bool first_cmd = true;
+	unsigned int op_id;
+	int ret, i;
+
+	memset(nfc_op, 0, sizeof(*nfc_op));
+	nfc_op->addr2_reg = ADDR2_CS(anand->cs);
+	nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
+
+	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+		unsigned int offset, naddrs, pktsize;
+		const u8 *addrs;
+		u8 *buf;
+
+		instr = &subop->instrs[op_id];
+
+		switch (instr->type) {
+		case NAND_OP_CMD_INSTR:
+			if (first_cmd)
+				nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
+			else
+				nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
+
+			first_cmd = false;
+			break;
+
+		case NAND_OP_ADDR_INSTR:
+			offset = nand_subop_get_addr_start_off(subop, op_id);
+			naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+			addrs = &instr->ctx.addr.addrs[offset];
+			nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
+
+			for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
+				if (i < 4)
+					nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
+				else
+					nfc_op->addr2_reg |= addrs[i];
+			}
+
+			break;
+		case NAND_OP_DATA_IN_INSTR:
+			nfc_op->read = true;
+			fallthrough;
+		case NAND_OP_DATA_OUT_INSTR:
+			offset = nand_subop_get_data_start_off(subop, op_id);
+			buf = instr->ctx.data.buf.in;
+			nfc_op->buf = &buf[offset];
+			nfc_op->len = nand_subop_get_data_len(subop, op_id);
+			ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
+						  &pktsize);
+			if (ret)
+				return ret;
+
+			/*
+			 * Number of DATA cycles must be aligned on 4, this
+			 * means the controller might read/write more than
+			 * requested. This is harmless most of the time as extra
+			 * DATA are discarded in the write path and read pointer
+			 * adjusted in the read path.
+			 *
+			 * FIXME: The core should mark operations where
+			 * reading/writing more is allowed so the exec_op()
+			 * implementation can take the right decision when the
+			 * alignment constraint is not met: adjust the number of
+			 * DATA cycles when it's allowed, reject the operation
+			 * otherwise.
+			 */
+			nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
+					   PKT_STEPS(nfc_op->steps);
+			break;
+		case NAND_OP_WAITRDY_INSTR:
+			nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+	unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
+	unsigned int last_len = nfc_op->len % 4;
+	unsigned int offset, dir;
+	u8 *buf = nfc_op->buf;
+	int ret, i;
+
+	for (i = 0; i < nfc_op->steps; i++) {
+		dir = nfc_op->read ? READ_READY : WRITE_READY;
+		ret = anfc_wait_for_event(nfc, dir);
+		if (ret) {
+			dev_err(nfc->dev, "PIO %s ready signal not received\n",
+				nfc_op->read ? "Read" : "Write");
+			return ret;
+		}
+
+		offset = i * (dwords * 4);
+		if (nfc_op->read)
+			ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+				     dwords);
+		else
+			iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+				      dwords);
+	}
+
+	if (last_len) {
+		u32 remainder;
+
+		offset = nfc_op->len - last_len;
+
+		if (nfc_op->read) {
+			remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
+			memcpy(&buf[offset], &remainder, last_len);
+		} else {
+			memcpy(&remainder, &buf[offset], last_len);
+			writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
+		}
+	}
+
+	return anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_misc_data_type_exec(struct nand_chip *chip,
+				    const struct nand_subop *subop,
+				    u32 prog_reg)
+{
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct anfc_op nfc_op = {};
+	int ret;
+
+	ret = anfc_parse_instructions(chip, subop, &nfc_op);
+	if (ret)
+		return ret;
+
+	nfc_op.prog_reg = prog_reg;
+	anfc_trigger_op(nfc, &nfc_op);
+
+	if (nfc_op.rdy_timeout_ms) {
+		ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+		if (ret)
+			return ret;
+	}
+
+	return anfc_rw_pio_op(nfc, &nfc_op);
+}
+
+static int anfc_param_read_type_exec(struct nand_chip *chip,
+				     const struct nand_subop *subop)
+{
+	return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
+}
+
+static int anfc_data_read_type_exec(struct nand_chip *chip,
+				    const struct nand_subop *subop)
+{
+	return anfc_misc_data_type_exec(chip, subop, PROG_PGRD);
+}
+
+static int anfc_param_write_type_exec(struct nand_chip *chip,
+				      const struct nand_subop *subop)
+{
+	return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
+}
+
+static int anfc_data_write_type_exec(struct nand_chip *chip,
+				     const struct nand_subop *subop)
+{
+	return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
+}
+
+static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
+				       const struct nand_subop *subop,
+				       u32 prog_reg)
+{
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct anfc_op nfc_op = {};
+	int ret;
+
+	ret = anfc_parse_instructions(chip, subop, &nfc_op);
+	if (ret)
+		return ret;
+
+	nfc_op.prog_reg = prog_reg;
+	anfc_trigger_op(nfc, &nfc_op);
+
+	ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+	if (ret)
+		return ret;
+
+	if (nfc_op.rdy_timeout_ms)
+		ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+
+	return ret;
+}
+
+static int anfc_status_type_exec(struct nand_chip *chip,
+				 const struct nand_subop *subop)
+{
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	u32 tmp;
+	int ret;
+
+	/* See anfc_check_op() for details about this constraint */
+	if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
+		return -ENOTSUPP;
+
+	ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
+	if (ret)
+		return ret;
+
+	tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
+	memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
+
+	return 0;
+}
+
+static int anfc_reset_type_exec(struct nand_chip *chip,
+				const struct nand_subop *subop)
+{
+	return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
+}
+
+static int anfc_erase_type_exec(struct nand_chip *chip,
+				const struct nand_subop *subop)
+{
+	return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
+}
+
+static int anfc_wait_type_exec(struct nand_chip *chip,
+			       const struct nand_subop *subop)
+{
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct anfc_op nfc_op = {};
+	int ret;
+
+	ret = anfc_parse_instructions(chip, subop, &nfc_op);
+	if (ret)
+		return ret;
+
+	return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+	NAND_OP_PARSER_PATTERN(
+		anfc_param_read_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_param_write_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_data_read_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+		NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_data_write_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+		NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_reset_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_erase_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_status_type_exec,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+	NAND_OP_PARSER_PATTERN(
+		anfc_wait_type_exec,
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+	);
+
+static int anfc_check_op(struct nand_chip *chip,
+			 const struct nand_operation *op)
+{
+	const struct nand_op_instr *instr;
+	int op_id;
+
+	/*
+	 * The controller abstracts all the NAND operations and do not support
+	 * data only operations.
+	 *
+	 * TODO: The nand_op_parser framework should be extended to
+	 * support custom checks on DATA instructions.
+	 */
+	for (op_id = 0; op_id < op->ninstrs; op_id++) {
+		instr = &op->instrs[op_id];
+
+		switch (instr->type) {
+		case NAND_OP_ADDR_INSTR:
+			if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
+				return -ENOTSUPP;
+
+			break;
+		case NAND_OP_DATA_IN_INSTR:
+		case NAND_OP_DATA_OUT_INSTR:
+			if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
+				return -ENOTSUPP;
+
+			if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
+				return -ENOTSUPP;
+
+			break;
+		default:
+			break;
+		}
+	}
+
+	/*
+	 * The controller does not allow to proceed with a CMD+DATA_IN cycle
+	 * manually on the bus by reading data from the data register. Instead,
+	 * the controller abstract a status read operation with its own status
+	 * register after ordering a read status operation. Hence, we cannot
+	 * support any CMD+DATA_IN operation other than a READ STATUS.
+	 *
+	 * TODO: The nand_op_parser() framework should be extended to describe
+	 * fixed patterns instead of open-coding this check here.
+	 */
+	if (op->ninstrs == 2 &&
+	    op->instrs[0].type == NAND_OP_CMD_INSTR &&
+	    op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
+	    op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
+		return -ENOTSUPP;
+
+	return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
+}
+
+static int anfc_exec_op(struct nand_chip *chip,
+			const struct nand_operation *op,
+			bool check_only)
+{
+	int ret;
+
+	if (check_only)
+		return anfc_check_op(chip, op);
+
+	ret = anfc_select_target(chip, op->cs);
+	if (ret)
+		return ret;
+
+	return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
+}
+
+static int anfc_setup_interface(struct nand_chip *chip, int target,
+				const struct nand_interface_config *conf)
+{
+	struct anand *anand = to_anand(chip);
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct device_node *np = nfc->dev->of_node;
+
+	if (target < 0)
+		return 0;
+
+	anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode);
+	anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+
+	/*
+	 * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
+	 * with f > 90MHz (default clock is 100MHz) but signals are unstable
+	 * with higher modes. Hence we decrease a little bit the clock rate to
+	 * 80MHz when using modes 2-5 with this SoC.
+	 */
+	if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
+	    conf->timings.mode >= 2)
+		anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
+
+	return 0;
+}
+
+static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
+{
+	unsigned int bch_gf_mag, ecc_bits;
+
+	switch (step_size) {
+	case SZ_512:
+		bch_gf_mag = 13;
+		break;
+	case SZ_1K:
+		bch_gf_mag = 14;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ecc_bits = bch_gf_mag * strength;
+
+	return DIV_ROUND_UP(ecc_bits, 8);
+}
+
+static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
+
+static const int anfc_hw_ecc_1024_strengths[] = {24};
+
+static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
+	{
+		.stepsize = SZ_512,
+		.strengths = anfc_hw_ecc_512_strengths,
+		.nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
+	},
+	{
+		.stepsize = SZ_1K,
+		.strengths = anfc_hw_ecc_1024_strengths,
+		.nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
+	},
+};
+
+static const struct nand_ecc_caps anfc_hw_ecc_caps = {
+	.stepinfos = anfc_hw_ecc_step_infos,
+	.nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
+	.calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
+};
+
+static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
+				       struct nand_chip *chip)
+{
+	struct anand *anand = to_anand(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
+	int ret;
+
+	switch (mtd->writesize) {
+	case SZ_512:
+	case SZ_2K:
+	case SZ_4K:
+	case SZ_8K:
+	case SZ_16K:
+		break;
+	default:
+		dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
+		return -EINVAL;
+	}
+
+	ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
+	if (ret)
+		return ret;
+
+	switch (ecc->strength) {
+	case 12:
+		anand->strength = 0x1;
+		break;
+	case 8:
+		anand->strength = 0x2;
+		break;
+	case 4:
+		anand->strength = 0x3;
+		break;
+	case 24:
+		anand->strength = 0x4;
+		break;
+	default:
+		dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
+		return -EINVAL;
+	}
+
+	switch (ecc->size) {
+	case SZ_512:
+		bch_gf_mag = 13;
+		bch_prim_poly = 0x201b;
+		break;
+	case SZ_1K:
+		bch_gf_mag = 14;
+		bch_prim_poly = 0x4443;
+		break;
+	default:
+		dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
+		return -EINVAL;
+	}
+
+	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
+
+	ecc->steps = mtd->writesize / ecc->size;
+	ecc->algo = NAND_ECC_ALGO_BCH;
+	anand->ecc_bits = bch_gf_mag * ecc->strength;
+	ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
+	anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
+	ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
+	anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
+			  ECC_CONF_LEN(anand->ecc_total) |
+			  ECC_CONF_BCH_EN;
+
+	anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
+					   sizeof(*anand->errloc), GFP_KERNEL);
+	if (!anand->errloc)
+		return -ENOMEM;
+
+	anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
+	if (!anand->hw_ecc)
+		return -ENOMEM;
+
+	/* Enforce bit swapping to fit the hardware */
+	anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
+	if (!anand->bch)
+		return -EINVAL;
+
+	ecc->read_page = anfc_sel_read_page_hw_ecc;
+	ecc->write_page = anfc_sel_write_page_hw_ecc;
+
+	return 0;
+}
+
+static int anfc_attach_chip(struct nand_chip *chip)
+{
+	struct anand *anand = to_anand(chip);
+	struct arasan_nfc *nfc = to_anfc(chip->controller);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret = 0;
+
+	if (mtd->writesize <= SZ_512)
+		anand->caddr_cycles = 1;
+	else
+		anand->caddr_cycles = 2;
+
+	if (chip->options & NAND_ROW_ADDR_3)
+		anand->raddr_cycles = 3;
+	else
+		anand->raddr_cycles = 2;
+
+	switch (mtd->writesize) {
+	case 512:
+		anand->page_sz = 0;
+		break;
+	case 1024:
+		anand->page_sz = 5;
+		break;
+	case 2048:
+		anand->page_sz = 1;
+		break;
+	case 4096:
+		anand->page_sz = 2;
+		break;
+	case 8192:
+		anand->page_sz = 3;
+		break;
+	case 16384:
+		anand->page_sz = 4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* These hooks are valid for all ECC providers */
+	chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+	chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+	switch (chip->ecc.engine_type) {
+	case NAND_ECC_ENGINE_TYPE_NONE:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
+	case NAND_ECC_ENGINE_TYPE_ON_DIE:
+		break;
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
+		ret = anfc_init_hw_ecc_controller(nfc, chip);
+		break;
+	default:
+		dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+			chip->ecc.engine_type);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static void anfc_detach_chip(struct nand_chip *chip)
+{
+	struct anand *anand = to_anand(chip);
+
+	if (anand->bch)
+		bch_free(anand->bch);
+}
+
+static const struct nand_controller_ops anfc_ops = {
+	.exec_op = anfc_exec_op,
+	.setup_interface = anfc_setup_interface,
+	.attach_chip = anfc_attach_chip,
+	.detach_chip = anfc_detach_chip,
+};
+
+static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
+{
+	struct anand *anand;
+	struct nand_chip *chip;
+	struct mtd_info *mtd;
+	int cs, rb, ret;
+
+	anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
+	if (!anand)
+		return -ENOMEM;
+
+	/* We do not support multiple CS per chip yet */
+	if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) {
+		dev_err(nfc->dev, "Invalid reg property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "reg", &cs);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(np, "nand-rb", &rb);
+	if (ret)
+		return ret;
+
+	if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) {
+		dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb);
+		return -EINVAL;
+	}
+
+	if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+		dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+		return -EINVAL;
+	}
+
+	anand->cs = cs;
+	anand->rb = rb;
+
+	chip = &anand->chip;
+	mtd = nand_to_mtd(chip);
+	mtd->dev.parent = nfc->dev;
+	chip->controller = &nfc->controller;
+	chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+			NAND_USES_DMA;
+
+	nand_set_flash_node(chip, np);
+	if (!mtd->name) {
+		dev_err(nfc->dev, "NAND label property is mandatory\n");
+		return -EINVAL;
+	}
+
+	ret = nand_scan(chip, 1);
+	if (ret) {
+		dev_err(nfc->dev, "Scan operation failed\n");
+		return ret;
+	}
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		nand_cleanup(chip);
+		return ret;
+	}
+
+	list_add_tail(&anand->node, &nfc->chips);
+
+	return 0;
+}
+
+static void anfc_chips_cleanup(struct arasan_nfc *nfc)
+{
+	struct anand *anand, *tmp;
+	struct nand_chip *chip;
+	int ret;
+
+	list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
+		chip = &anand->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+		list_del(&anand->node);
+	}
+}
+
+static int anfc_chips_init(struct arasan_nfc *nfc)
+{
+	struct device_node *np = nfc->dev->of_node, *nand_np;
+	int nchips = of_get_child_count(np);
+	int ret;
+
+	if (!nchips || nchips > ANFC_MAX_CS) {
+		dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+			nchips);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(np, nand_np) {
+		ret = anfc_chip_init(nfc, nand_np);
+		if (ret) {
+			of_node_put(nand_np);
+			anfc_chips_cleanup(nfc);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void anfc_reset(struct arasan_nfc *nfc)
+{
+	/* Disable interrupt signals */
+	writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
+
+	/* Enable interrupt status */
+	writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+	struct arasan_nfc *nfc;
+	int ret;
+
+	nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+	if (!nfc)
+		return -ENOMEM;
+
+	nfc->dev = &pdev->dev;
+	nand_controller_init(&nfc->controller);
+	nfc->controller.ops = &anfc_ops;
+	INIT_LIST_HEAD(&nfc->chips);
+
+	nfc->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(nfc->base))
+		return PTR_ERR(nfc->base);
+
+	anfc_reset(nfc);
+
+	nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+	if (IS_ERR(nfc->controller_clk))
+		return PTR_ERR(nfc->controller_clk);
+
+	nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+	if (IS_ERR(nfc->bus_clk))
+		return PTR_ERR(nfc->bus_clk);
+
+	ret = clk_prepare_enable(nfc->controller_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(nfc->bus_clk);
+	if (ret)
+		goto disable_controller_clk;
+
+	ret = anfc_chips_init(nfc);
+	if (ret)
+		goto disable_bus_clk;
+
+	platform_set_drvdata(pdev, nfc);
+
+	return 0;
+
+disable_bus_clk:
+	clk_disable_unprepare(nfc->bus_clk);
+
+disable_controller_clk:
+	clk_disable_unprepare(nfc->controller_clk);
+
+	return ret;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+	struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+
+	anfc_chips_cleanup(nfc);
+
+	clk_disable_unprepare(nfc->bus_clk);
+	clk_disable_unprepare(nfc->controller_clk);
+
+	return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+	{
+		.compatible = "xlnx,zynqmp-nand-controller",
+	},
+	{
+		.compatible = "arasan,nfc-v3p10",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static struct platform_driver anfc_driver = {
+	.driver = {
+		.name = "arasan-nand-controller",
+		.of_match_table = anfc_ids,
+	},
+	.probe = anfc_probe,
+	.remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 23d11e8..8aab101 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -200,8 +200,10 @@
 	void (*nand_init)(struct atmel_nand_controller *nc,
 			  struct atmel_nand *nand);
 	int (*ecc_init)(struct nand_chip *chip);
-	int (*setup_data_interface)(struct atmel_nand *nand, int csline,
-				    const struct nand_data_interface *conf);
+	int (*setup_interface)(struct atmel_nand *nand, int csline,
+			       const struct nand_interface_config *conf);
+	int (*exec_op)(struct atmel_nand *nand,
+		       const struct nand_operation *op, bool check_only);
 };
 
 struct atmel_nand_controller_caps {
@@ -259,6 +261,7 @@
 	struct regmap *io;
 	struct atmel_nfc_op op;
 	struct completion complete;
+	u32 cfg;
 	int irq;
 
 	/* Only used when instantiating from legacy DT bindings. */
@@ -414,138 +417,6 @@
 	return -EIO;
 }
 
-static u8 atmel_nand_read_byte(struct nand_chip *chip)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-
-	return ioread8(nand->activecs->io.virt);
-}
-
-static void atmel_nand_write_byte(struct nand_chip *chip, u8 byte)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		iowrite16(byte | (byte << 8), nand->activecs->io.virt);
-	else
-		iowrite8(byte, nand->activecs->io.virt);
-}
-
-static void atmel_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-	struct atmel_nand_controller *nc;
-
-	nc = to_nand_controller(chip->controller);
-
-	/*
-	 * If the controller supports DMA, the buffer address is DMA-able and
-	 * len is long enough to make DMA transfers profitable, let's trigger
-	 * a DMA transfer. If it fails, fallback to PIO mode.
-	 */
-	if (nc->dmac && virt_addr_valid(buf) &&
-	    len >= MIN_DMA_LEN &&
-	    !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
-				     DMA_FROM_DEVICE))
-		return;
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		ioread16_rep(nand->activecs->io.virt, buf, len / 2);
-	else
-		ioread8_rep(nand->activecs->io.virt, buf, len);
-}
-
-static void atmel_nand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-	struct atmel_nand_controller *nc;
-
-	nc = to_nand_controller(chip->controller);
-
-	/*
-	 * If the controller supports DMA, the buffer address is DMA-able and
-	 * len is long enough to make DMA transfers profitable, let's trigger
-	 * a DMA transfer. If it fails, fallback to PIO mode.
-	 */
-	if (nc->dmac && virt_addr_valid(buf) &&
-	    len >= MIN_DMA_LEN &&
-	    !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
-				     len, DMA_TO_DEVICE))
-		return;
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
-	else
-		iowrite8_rep(nand->activecs->io.virt, buf, len);
-}
-
-static int atmel_nand_dev_ready(struct nand_chip *chip)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-
-	return gpiod_get_value(nand->activecs->rb.gpio);
-}
-
-static void atmel_nand_select_chip(struct nand_chip *chip, int cs)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-
-	if (cs < 0 || cs >= nand->numcs) {
-		nand->activecs = NULL;
-		chip->legacy.dev_ready = NULL;
-		return;
-	}
-
-	nand->activecs = &nand->cs[cs];
-
-	if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
-		chip->legacy.dev_ready = atmel_nand_dev_ready;
-}
-
-static int atmel_hsmc_nand_dev_ready(struct nand_chip *chip)
-{
-	struct atmel_nand *nand = to_atmel_nand(chip);
-	struct atmel_hsmc_nand_controller *nc;
-	u32 status;
-
-	nc = to_hsmc_nand_controller(chip->controller);
-
-	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
-
-	return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
-}
-
-static void atmel_hsmc_nand_select_chip(struct nand_chip *chip, int cs)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct atmel_nand *nand = to_atmel_nand(chip);
-	struct atmel_hsmc_nand_controller *nc;
-
-	nc = to_hsmc_nand_controller(chip->controller);
-
-	atmel_nand_select_chip(chip, cs);
-
-	if (!nand->activecs) {
-		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
-			     ATMEL_HSMC_NFC_CTRL_DIS);
-		return;
-	}
-
-	if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
-		chip->legacy.dev_ready = atmel_hsmc_nand_dev_ready;
-
-	regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
-			   ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
-			   ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
-			   ATMEL_HSMC_NFC_CFG_RSPARE |
-			   ATMEL_HSMC_NFC_CFG_WSPARE,
-			   ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
-			   ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
-			   ATMEL_HSMC_NFC_CFG_RSPARE);
-	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
-		     ATMEL_HSMC_NFC_CTRL_EN);
-}
-
 static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
 {
 	u8 *addrs = nc->op.addrs;
@@ -596,51 +467,250 @@
 	return ret;
 }
 
-static void atmel_hsmc_nand_cmd_ctrl(struct nand_chip *chip, int dat,
-				     unsigned int ctrl)
+static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
+			       unsigned int len, bool force_8bit)
+{
+	struct atmel_nand_controller *nc;
+
+	nc = to_nand_controller(nand->base.controller);
+
+	/*
+	 * If the controller supports DMA, the buffer address is DMA-able and
+	 * len is long enough to make DMA transfers profitable, let's trigger
+	 * a DMA transfer. If it fails, fallback to PIO mode.
+	 */
+	if (nc->dmac && virt_addr_valid(buf) &&
+	    len >= MIN_DMA_LEN && !force_8bit &&
+	    !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
+				     DMA_FROM_DEVICE))
+		return;
+
+	if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
+		ioread16_rep(nand->activecs->io.virt, buf, len / 2);
+	else
+		ioread8_rep(nand->activecs->io.virt, buf, len);
+}
+
+static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
+				unsigned int len, bool force_8bit)
+{
+	struct atmel_nand_controller *nc;
+
+	nc = to_nand_controller(nand->base.controller);
+
+	/*
+	 * If the controller supports DMA, the buffer address is DMA-able and
+	 * len is long enough to make DMA transfers profitable, let's trigger
+	 * a DMA transfer. If it fails, fallback to PIO mode.
+	 */
+	if (nc->dmac && virt_addr_valid(buf) &&
+	    len >= MIN_DMA_LEN && !force_8bit &&
+	    !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
+				     len, DMA_TO_DEVICE))
+		return;
+
+	if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
+		iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
+	else
+		iowrite8_rep(nand->activecs->io.virt, buf, len);
+}
+
+static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
+{
+	if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
+		return nand_soft_waitrdy(&nand->base, timeout_ms);
+
+	return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
+				 timeout_ms);
+}
+
+static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
+				   unsigned int timeout_ms)
+{
+	struct atmel_hsmc_nand_controller *nc;
+	u32 status, mask;
+
+	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
+		return atmel_nand_waitrdy(nand, timeout_ms);
+
+	nc = to_hsmc_nand_controller(nand->base.controller);
+	mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
+	return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
+					       status, status & mask,
+					       10, timeout_ms * 1000);
+}
+
+static void atmel_nand_select_target(struct atmel_nand *nand,
+				     unsigned int cs)
+{
+	nand->activecs = &nand->cs[cs];
+}
+
+static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
+					  unsigned int cs)
+{
+	struct mtd_info *mtd = nand_to_mtd(&nand->base);
+	struct atmel_hsmc_nand_controller *nc;
+	u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
+		  ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
+		  ATMEL_HSMC_NFC_CFG_RSPARE;
+
+	nand->activecs = &nand->cs[cs];
+	nc = to_hsmc_nand_controller(nand->base.controller);
+	if (nc->cfg == cfg)
+		return;
+
+	regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
+			   ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
+			   ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
+			   ATMEL_HSMC_NFC_CFG_RSPARE |
+			   ATMEL_HSMC_NFC_CFG_WSPARE,
+			   cfg);
+	nc->cfg = cfg;
+}
+
+static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
+				     const struct nand_op_instr *instr)
+{
+	struct atmel_nand_controller *nc;
+	unsigned int i;
+
+	nc = to_nand_controller(nand->base.controller);
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		writeb(instr->ctx.cmd.opcode,
+		       nand->activecs->io.virt + nc->caps->cle_offs);
+		return 0;
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++)
+			writeb(instr->ctx.addr.addrs[i],
+			       nand->activecs->io.virt + nc->caps->ale_offs);
+		return 0;
+	case NAND_OP_DATA_IN_INSTR:
+		atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+				   instr->ctx.data.len,
+				   instr->ctx.data.force_8bit);
+		return 0;
+	case NAND_OP_DATA_OUT_INSTR:
+		atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+				    instr->ctx.data.len,
+				    instr->ctx.data.force_8bit);
+		return 0;
+	case NAND_OP_WAITRDY_INSTR:
+		return atmel_nand_waitrdy(nand,
+					  instr->ctx.waitrdy.timeout_ms);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
+				  const struct nand_operation *op,
+				  bool check_only)
+{
+	unsigned int i;
+	int ret = 0;
+
+	if (check_only)
+		return 0;
+
+	atmel_nand_select_target(nand, op->cs);
+	gpiod_set_value(nand->activecs->csgpio, 0);
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
+		if (ret)
+			break;
+	}
+	gpiod_set_value(nand->activecs->csgpio, 1);
+
+	return ret;
+}
+
+static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
+				    const struct nand_subop *subop)
 {
 	struct atmel_nand *nand = to_atmel_nand(chip);
 	struct atmel_hsmc_nand_controller *nc;
+	unsigned int i, j;
 
 	nc = to_hsmc_nand_controller(chip->controller);
 
-	if (ctrl & NAND_ALE) {
-		if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
-			return;
+	nc->op.cs = nand->activecs->id;
+	for (i = 0; i < subop->ninstrs; i++) {
+		const struct nand_op_instr *instr = &subop->instrs[i];
 
-		nc->op.addrs[nc->op.naddrs++] = dat;
-	} else if (ctrl & NAND_CLE) {
-		if (nc->op.ncmds > 1)
-			return;
+		if (instr->type == NAND_OP_CMD_INSTR) {
+			nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
+			continue;
+		}
 
-		nc->op.cmds[nc->op.ncmds++] = dat;
+		for (j = nand_subop_get_addr_start_off(subop, i);
+		     j < nand_subop_get_num_addr_cyc(subop, i); j++) {
+			nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
+			nc->op.naddrs++;
+		}
 	}
 
-	if (dat == NAND_CMD_NONE) {
-		nc->op.cs = nand->activecs->id;
-		atmel_nfc_exec_op(nc, true);
-	}
+	return atmel_nfc_exec_op(nc, true);
 }
 
-static void atmel_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
-				unsigned int ctrl)
+static int atmel_hsmc_exec_rw(struct nand_chip *chip,
+			      const struct nand_subop *subop)
 {
+	const struct nand_op_instr *instr = subop->instrs;
 	struct atmel_nand *nand = to_atmel_nand(chip);
-	struct atmel_nand_controller *nc;
 
-	nc = to_nand_controller(chip->controller);
+	if (instr->type == NAND_OP_DATA_IN_INSTR)
+		atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+				   instr->ctx.data.len,
+				   instr->ctx.data.force_8bit);
+	else
+		atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+				    instr->ctx.data.len,
+				    instr->ctx.data.force_8bit);
 
-	if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
-		if (ctrl & NAND_NCE)
-			gpiod_set_value(nand->activecs->csgpio, 0);
-		else
-			gpiod_set_value(nand->activecs->csgpio, 1);
-	}
+	return 0;
+}
 
-	if (ctrl & NAND_ALE)
-		writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
-	else if (ctrl & NAND_CLE)
-		writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
+static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
+				   const struct nand_subop *subop)
+{
+	const struct nand_op_instr *instr = subop->instrs;
+	struct atmel_nand *nand = to_atmel_nand(chip);
+
+	return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
+	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
+		NAND_OP_PARSER_PAT_CMD_ELEM(true),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+		NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
+	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
+				   const struct nand_operation *op,
+				   bool check_only)
+{
+	int ret;
+
+	if (check_only)
+		return nand_op_parser_exec_op(&nand->base,
+					      &atmel_hsmc_op_parser, op, true);
+
+	atmel_hsmc_nand_select_target(nand, op->cs);
+	ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
+				     false);
+
+	return ret;
 }
 
 static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
@@ -840,7 +910,7 @@
 	if (ret)
 		return ret;
 
-	atmel_nand_write_buf(chip, buf, mtd->writesize);
+	nand_write_data_op(chip, buf, mtd->writesize, false);
 
 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
 	if (ret) {
@@ -850,7 +920,7 @@
 
 	atmel_nand_pmecc_disable(chip, raw);
 
-	atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
+	nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
 
 	return nand_prog_page_end_op(chip);
 }
@@ -880,11 +950,17 @@
 	if (ret)
 		return ret;
 
-	atmel_nand_read_buf(chip, buf, mtd->writesize);
-	atmel_nand_read_buf(chip, chip->oob_poi, mtd->oobsize);
+	ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
+	if (ret)
+		goto out_disable;
+
+	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
+	if (ret)
+		goto out_disable;
 
 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
 
+out_disable:
 	atmel_nand_pmecc_disable(chip, raw);
 
 	return ret;
@@ -909,8 +985,9 @@
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct atmel_nand *nand = to_atmel_nand(chip);
 	struct atmel_hsmc_nand_controller *nc;
-	int ret, status;
+	int ret;
 
+	atmel_hsmc_nand_select_target(nand, chip->cur_cs);
 	nc = to_hsmc_nand_controller(chip->controller);
 
 	atmel_nfc_copy_to_sram(chip, buf, false);
@@ -941,21 +1018,9 @@
 	if (ret)
 		return ret;
 
-	atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
+	nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
 
-	nc->op.cmds[0] = NAND_CMD_PAGEPROG;
-	nc->op.ncmds = 1;
-	nc->op.cs = nand->activecs->id;
-	ret = atmel_nfc_exec_op(nc, false);
-	if (ret)
-		dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
-			ret);
-
-	status = chip->legacy.waitfunc(chip);
-	if (status & NAND_STATUS_FAIL)
-		return -EIO;
-
-	return ret;
+	return nand_prog_page_end_op(chip);
 }
 
 static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
@@ -983,6 +1048,7 @@
 	struct atmel_hsmc_nand_controller *nc;
 	int ret;
 
+	atmel_hsmc_nand_select_target(nand, chip->cur_cs);
 	nc = to_hsmc_nand_controller(chip->controller);
 
 	/*
@@ -990,12 +1056,9 @@
 	 * connected to a native SoC R/B pin. If that's not the case, fallback
 	 * to the non-optimized one.
 	 */
-	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
-		nand_read_page_op(chip, page, 0, NULL, 0);
-
+	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
 		return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
 						raw);
-	}
 
 	nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
 
@@ -1045,7 +1108,10 @@
 
 static int atmel_nand_pmecc_init(struct nand_chip *chip)
 {
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_device *nanddev = mtd_to_nanddev(mtd);
 	struct atmel_nand *nand = to_atmel_nand(chip);
 	struct atmel_nand_controller *nc;
 	struct atmel_pmecc_user_req req;
@@ -1070,19 +1136,19 @@
 			chip->ecc.size = val;
 	}
 
-	if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+	if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
 	else if (chip->ecc.strength)
 		req.ecc.strength = chip->ecc.strength;
-	else if (chip->base.eccreq.strength)
-		req.ecc.strength = chip->base.eccreq.strength;
+	else if (requirements->strength)
+		req.ecc.strength = requirements->strength;
 	else
 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
 
 	if (chip->ecc.size)
 		req.ecc.sectorsize = chip->ecc.size;
-	else if (chip->base.eccreq.step_size)
-		req.ecc.sectorsize = chip->base.eccreq.step_size;
+	else if (requirements->step_size)
+		req.ecc.sectorsize = requirements->step_size;
 	else
 		req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
 
@@ -1101,14 +1167,14 @@
 	if (IS_ERR(nand->pmecc))
 		return PTR_ERR(nand->pmecc);
 
-	chip->ecc.algo = NAND_ECC_BCH;
+	chip->ecc.algo = NAND_ECC_ALGO_BCH;
 	chip->ecc.size = req.ecc.sectorsize;
 	chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
 	chip->ecc.strength = req.ecc.strength;
 
 	chip->options |= NAND_NO_SUBPAGE_WRITE;
 
-	mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
 
 	return 0;
 }
@@ -1120,15 +1186,15 @@
 
 	nc = to_nand_controller(chip->controller);
 
-	switch (chip->ecc.mode) {
-	case NAND_ECC_NONE:
-	case NAND_ECC_SOFT:
+	switch (chip->ecc.engine_type) {
+	case NAND_ECC_ENGINE_TYPE_NONE:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		/*
 		 * Nothing to do, the core will initialize everything for us.
 		 */
 		break;
 
-	case NAND_ECC_HW:
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		ret = atmel_nand_pmecc_init(chip);
 		if (ret)
 			return ret;
@@ -1142,7 +1208,7 @@
 	default:
 		/* Other modes are not supported. */
 		dev_err(nc->dev, "Unsupported ECC mode: %d\n",
-			chip->ecc.mode);
+			chip->ecc.engine_type);
 		return -ENOTSUPP;
 	}
 
@@ -1157,7 +1223,7 @@
 	if (ret)
 		return ret;
 
-	if (chip->ecc.mode != NAND_ECC_HW)
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 		return 0;
 
 	/* Adjust the ECC operations for the HSMC IP. */
@@ -1170,7 +1236,7 @@
 }
 
 static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
-					const struct nand_data_interface *conf,
+					const struct nand_interface_config *conf,
 					struct atmel_smc_cs_conf *smcconf)
 {
 	u32 ncycles, totalcycles, timeps, mckperiodps;
@@ -1399,9 +1465,9 @@
 	return 0;
 }
 
-static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
+static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
 					int csline,
-					const struct nand_data_interface *conf)
+					const struct nand_interface_config *conf)
 {
 	struct atmel_nand_controller *nc;
 	struct atmel_smc_cs_conf smcconf;
@@ -1424,9 +1490,9 @@
 	return 0;
 }
 
-static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
+static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
 					int csline,
-					const struct nand_data_interface *conf)
+					const struct nand_interface_config *conf)
 {
 	struct atmel_hsmc_nand_controller *nc;
 	struct atmel_smc_cs_conf smcconf;
@@ -1454,8 +1520,8 @@
 	return 0;
 }
 
-static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline,
-					const struct nand_data_interface *conf)
+static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
+				      const struct nand_interface_config *conf)
 {
 	struct atmel_nand *nand = to_atmel_nand(chip);
 	struct atmel_nand_controller *nc;
@@ -1466,7 +1532,19 @@
 	    (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
 		return -EINVAL;
 
-	return nc->caps->ops->setup_data_interface(nand, csline, conf);
+	return nc->caps->ops->setup_interface(nand, csline, conf);
+}
+
+static int atmel_nand_exec_op(struct nand_chip *chip,
+			      const struct nand_operation *op,
+			      bool check_only)
+{
+	struct atmel_nand *nand = to_atmel_nand(chip);
+	struct atmel_nand_controller *nc;
+
+	nc = to_nand_controller(nand->base.controller);
+
+	return nc->caps->ops->exec_op(nand, op, check_only);
 }
 
 static void atmel_nand_init(struct atmel_nand_controller *nc,
@@ -1478,29 +1556,19 @@
 	mtd->dev.parent = nc->dev;
 	nand->base.controller = &nc->base;
 
-	chip->legacy.cmd_ctrl = atmel_nand_cmd_ctrl;
-	chip->legacy.read_byte = atmel_nand_read_byte;
-	chip->legacy.write_byte = atmel_nand_write_byte;
-	chip->legacy.read_buf = atmel_nand_read_buf;
-	chip->legacy.write_buf = atmel_nand_write_buf;
-	chip->legacy.select_chip = atmel_nand_select_chip;
-
-	if (!nc->mck || !nc->caps->ops->setup_data_interface)
+	if (!nc->mck || !nc->caps->ops->setup_interface)
 		chip->options |= NAND_KEEP_TIMINGS;
 
-	/* Some NANDs require a longer delay than the default one (20us). */
-	chip->legacy.chip_delay = 40;
-
 	/*
 	 * Use a bounce buffer when the buffer passed by the MTD user is not
 	 * suitable for DMA.
 	 */
 	if (nc->dmac)
-		chip->options |= NAND_USE_BOUNCE_BUFFER;
+		chip->options |= NAND_USES_DMA;
 
 	/* Default to HW ECC if pmecc is available. */
 	if (nc->pmecc)
-		chip->ecc.mode = NAND_ECC_HW;
+		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 }
 
 static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
@@ -1529,18 +1597,6 @@
 				   smc_nc->ebi_csa->nfd0_on_d16);
 }
 
-static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
-				 struct atmel_nand *nand)
-{
-	struct nand_chip *chip = &nand->base;
-
-	atmel_nand_init(nc, nand);
-
-	/* Overload some methods for the HSMC controller. */
-	chip->legacy.cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
-	chip->legacy.select_chip = atmel_hsmc_nand_select_chip;
-}
-
 static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
 {
 	struct nand_chip *chip = &nand->base;
@@ -1580,9 +1636,8 @@
 
 	nand->numcs = numcs;
 
-	gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
-						      &np->fwnode, GPIOD_IN,
-						      "nand-det");
+	gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
+				     "det", GPIOD_IN, "nand-det");
 	if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
 		dev_err(nc->dev,
 			"Failed to get detect gpio (err = %ld)\n",
@@ -1626,9 +1681,10 @@
 			nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
 			nand->cs[i].rb.id = val;
 		} else {
-			gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
-							"rb", i, &np->fwnode,
-							GPIOD_IN, "nand-rb");
+			gpio = devm_fwnode_gpiod_get_index(nc->dev,
+							   of_fwnode_handle(np),
+							   "rb", i, GPIOD_IN,
+							   "nand-rb");
 			if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
 				dev_err(nc->dev,
 					"Failed to get R/B gpio (err = %ld)\n",
@@ -1642,10 +1698,10 @@
 			}
 		}
 
-		gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
-							      i, &np->fwnode,
-							      GPIOD_OUT_HIGH,
-							      "nand-cs");
+		gpio = devm_fwnode_gpiod_get_index(nc->dev,
+						   of_fwnode_handle(np),
+						   "cs", i, GPIOD_OUT_HIGH,
+						   "nand-cs");
 		if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
 			dev_err(nc->dev,
 				"Failed to get CS gpio (err = %ld)\n",
@@ -1958,7 +2014,8 @@
 
 static const struct nand_controller_ops atmel_nand_controller_ops = {
 	.attach_chip = atmel_nand_attach_chip,
-	.setup_data_interface = atmel_nand_setup_data_interface,
+	.setup_interface = atmel_nand_setup_interface,
+	.exec_op = atmel_nand_exec_op,
 };
 
 static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
@@ -1978,13 +2035,9 @@
 	platform_set_drvdata(pdev, nc);
 
 	nc->pmecc = devm_atmel_pmecc_get(dev);
-	if (IS_ERR(nc->pmecc)) {
-		ret = PTR_ERR(nc->pmecc);
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "Could not get PMECC object (err = %d)\n",
-				ret);
-		return ret;
-	}
+	if (IS_ERR(nc->pmecc))
+		return dev_err_probe(dev, PTR_ERR(nc->pmecc),
+				     "Could not get PMECC object\n");
 
 	if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
 		dma_cap_mask_t mask;
@@ -2250,6 +2303,9 @@
 		return ret;
 
 	hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
+	regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+		     ATMEL_HSMC_NFC_CTRL_DIS);
+
 	if (hsmc_nc->sram.pool)
 		gen_pool_free(hsmc_nc->sram.pool,
 			      (unsigned long)hsmc_nc->sram.virt,
@@ -2302,6 +2358,8 @@
 	/* Initial NFC configuration. */
 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
 		     ATMEL_HSMC_NFC_CFG_DTO_MAX);
+	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+		     ATMEL_HSMC_NFC_CTRL_EN);
 
 	ret = atmel_nand_controller_add_nands(&nc->base);
 	if (ret)
@@ -2319,8 +2377,9 @@
 	.probe = atmel_hsmc_nand_controller_probe,
 	.remove = atmel_hsmc_nand_controller_remove,
 	.ecc_init = atmel_hsmc_nand_ecc_init,
-	.nand_init = atmel_hsmc_nand_init,
-	.setup_data_interface = atmel_hsmc_nand_setup_data_interface,
+	.nand_init = atmel_nand_init,
+	.setup_interface = atmel_hsmc_nand_setup_interface,
+	.exec_op = atmel_hsmc_nand_exec_op,
 };
 
 static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
@@ -2377,16 +2436,17 @@
 
 /*
  * The SMC reg layout of at91rm9200 is completely different which prevents us
- * from re-using atmel_smc_nand_setup_data_interface() for the
- * ->setup_data_interface() hook.
+ * from re-using atmel_smc_nand_setup_interface() for the
+ * ->setup_interface() hook.
  * At this point, there's no support for the at91rm9200 SMC IP, so we leave
- * ->setup_data_interface() unassigned.
+ * ->setup_interface() unassigned.
  */
 static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
 	.probe = atmel_smc_nand_controller_probe,
 	.remove = atmel_smc_nand_controller_remove,
 	.ecc_init = atmel_nand_ecc_init,
 	.nand_init = atmel_smc_nand_init,
+	.exec_op = atmel_smc_nand_exec_op,
 };
 
 static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
@@ -2401,7 +2461,8 @@
 	.remove = atmel_smc_nand_controller_remove,
 	.ecc_init = atmel_nand_ecc_init,
 	.nand_init = atmel_smc_nand_init,
-	.setup_data_interface = atmel_smc_nand_setup_data_interface,
+	.setup_interface = atmel_smc_nand_setup_interface,
+	.exec_op = atmel_smc_nand_exec_op,
 };
 
 static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index e10b760..48901a1 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -16,63 +16,16 @@
 
 
 struct au1550nd_ctx {
+	struct nand_controller controller;
 	struct nand_chip chip;
 
 	int cs;
 	void __iomem *base;
-	void (*write_byte)(struct nand_chip *, u_char);
 };
 
-/**
- * au_read_byte -  read one byte from the chip
- * @this:	NAND chip object
- *
- * read function for 8bit buswidth
- */
-static u_char au_read_byte(struct nand_chip *this)
+static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this)
 {
-	u_char ret = readb(this->legacy.IO_ADDR_R);
-	wmb(); /* drain writebuffer */
-	return ret;
-}
-
-/**
- * au_write_byte -  write one byte to the chip
- * @this:	NAND chip object
- * @byte:	pointer to data byte to write
- *
- * write function for 8it buswidth
- */
-static void au_write_byte(struct nand_chip *this, u_char byte)
-{
-	writeb(byte, this->legacy.IO_ADDR_W);
-	wmb(); /* drain writebuffer */
-}
-
-/**
- * au_read_byte16 -  read one byte endianness aware from the chip
- * @this:	NAND chip object
- *
- * read function for 16bit buswidth with endianness conversion
- */
-static u_char au_read_byte16(struct nand_chip *this)
-{
-	u_char ret = (u_char) cpu_to_le16(readw(this->legacy.IO_ADDR_R));
-	wmb(); /* drain writebuffer */
-	return ret;
-}
-
-/**
- * au_write_byte16 -  write one byte endianness aware to the chip
- * @this:	NAND chip object
- * @byte:	pointer to data byte to write
- *
- * write function for 16bit buswidth with endianness conversion
- */
-static void au_write_byte16(struct nand_chip *this, u_char byte)
-{
-	writew(le16_to_cpu((u16) byte), this->legacy.IO_ADDR_W);
-	wmb(); /* drain writebuffer */
+	return container_of(this, struct au1550nd_ctx, chip);
 }
 
 /**
@@ -83,12 +36,15 @@
  *
  * write function for 8bit buswidth
  */
-static void au_write_buf(struct nand_chip *this, const u_char *buf, int len)
+static void au_write_buf(struct nand_chip *this, const void *buf,
+			 unsigned int len)
 {
+	struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+	const u8 *p = buf;
 	int i;
 
 	for (i = 0; i < len; i++) {
-		writeb(buf[i], this->legacy.IO_ADDR_W);
+		writeb(p[i], ctx->base + MEM_STNAND_DATA);
 		wmb(); /* drain writebuffer */
 	}
 }
@@ -101,12 +57,15 @@
  *
  * read function for 8bit buswidth
  */
-static void au_read_buf(struct nand_chip *this, u_char *buf, int len)
+static void au_read_buf(struct nand_chip *this, void *buf,
+			unsigned int len)
 {
+	struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+	u8 *p = buf;
 	int i;
 
 	for (i = 0; i < len; i++) {
-		buf[i] = readb(this->legacy.IO_ADDR_R);
+		p[i] = readb(ctx->base + MEM_STNAND_DATA);
 		wmb(); /* drain writebuffer */
 	}
 }
@@ -119,17 +78,18 @@
  *
  * write function for 16bit buswidth
  */
-static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
+static void au_write_buf16(struct nand_chip *this, const void *buf,
+			   unsigned int len)
 {
-	int i;
-	u16 *p = (u16 *) buf;
-	len >>= 1;
+	struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+	const u16 *p = buf;
+	unsigned int i;
 
+	len >>= 1;
 	for (i = 0; i < len; i++) {
-		writew(p[i], this->legacy.IO_ADDR_W);
+		writew(p[i], ctx->base + MEM_STNAND_DATA);
 		wmb(); /* drain writebuffer */
 	}
-
 }
 
 /**
@@ -140,218 +100,19 @@
  *
  * read function for 16bit buswidth
  */
-static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len)
 {
-	int i;
-	u16 *p = (u16 *) buf;
-	len >>= 1;
+	struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+	unsigned int i;
+	u16 *p = buf;
 
+	len >>= 1;
 	for (i = 0; i < len; i++) {
-		p[i] = readw(this->legacy.IO_ADDR_R);
+		p[i] = readw(ctx->base + MEM_STNAND_DATA);
 		wmb(); /* drain writebuffer */
 	}
 }
 
-/* Select the chip by setting nCE to low */
-#define NAND_CTL_SETNCE		1
-/* Deselect the chip by setting nCE to high */
-#define NAND_CTL_CLRNCE		2
-/* Select the command latch by setting CLE to high */
-#define NAND_CTL_SETCLE		3
-/* Deselect the command latch by setting CLE to low */
-#define NAND_CTL_CLRCLE		4
-/* Select the address latch by setting ALE to high */
-#define NAND_CTL_SETALE		5
-/* Deselect the address latch by setting ALE to low */
-#define NAND_CTL_CLRALE		6
-
-static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
-						chip);
-
-	switch (cmd) {
-
-	case NAND_CTL_SETCLE:
-		this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
-		break;
-
-	case NAND_CTL_CLRCLE:
-		this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
-		break;
-
-	case NAND_CTL_SETALE:
-		this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
-		break;
-
-	case NAND_CTL_CLRALE:
-		this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
-		/* FIXME: Nobody knows why this is necessary,
-		 * but it works only that way */
-		udelay(1);
-		break;
-
-	case NAND_CTL_SETNCE:
-		/* assert (force assert) chip enable */
-		alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
-		break;
-
-	case NAND_CTL_CLRNCE:
-		/* deassert chip enable */
-		alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
-		break;
-	}
-
-	this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W;
-
-	wmb(); /* Drain the writebuffer */
-}
-
-int au1550_device_ready(struct nand_chip *this)
-{
-	return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
-}
-
-/**
- * au1550_select_chip - control -CE line
- *	Forbid driving -CE manually permitting the NAND controller to do this.
- *	Keeping -CE asserted during the whole sector reads interferes with the
- *	NOR flash and PCMCIA drivers as it causes contention on the static bus.
- *	We only have to hold -CE low for the NAND read commands since the flash
- *	chip needs it to be asserted during chip not ready time but the NAND
- *	controller keeps it released.
- *
- * @this:	NAND chip object
- * @chip:	chipnumber to select, -1 for deselect
- */
-static void au1550_select_chip(struct nand_chip *this, int chip)
-{
-}
-
-/**
- * au1550_command - Send command to NAND device
- * @this:	NAND chip object
- * @command:	the command to be sent
- * @column:	the column address for this command, -1 if none
- * @page_addr:	the page address for this command, -1 if none
- */
-static void au1550_command(struct nand_chip *this, unsigned command,
-			   int column, int page_addr)
-{
-	struct mtd_info *mtd = nand_to_mtd(this);
-	struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
-						chip);
-	int ce_override = 0, i;
-	unsigned long flags = 0;
-
-	/* Begin command latch cycle */
-	au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
-	/*
-	 * Write out the command to the device.
-	 */
-	if (command == NAND_CMD_SEQIN) {
-		int readcmd;
-
-		if (column >= mtd->writesize) {
-			/* OOB area */
-			column -= mtd->writesize;
-			readcmd = NAND_CMD_READOOB;
-		} else if (column < 256) {
-			/* First 256 bytes --> READ0 */
-			readcmd = NAND_CMD_READ0;
-		} else {
-			column -= 256;
-			readcmd = NAND_CMD_READ1;
-		}
-		ctx->write_byte(this, readcmd);
-	}
-	ctx->write_byte(this, command);
-
-	/* Set ALE and clear CLE to start address cycle */
-	au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
-
-	if (column != -1 || page_addr != -1) {
-		au1550_hwcontrol(mtd, NAND_CTL_SETALE);
-
-		/* Serially input address */
-		if (column != -1) {
-			/* Adjust columns for 16 bit buswidth */
-			if (this->options & NAND_BUSWIDTH_16 &&
-					!nand_opcode_8bits(command))
-				column >>= 1;
-			ctx->write_byte(this, column);
-		}
-		if (page_addr != -1) {
-			ctx->write_byte(this, (u8)(page_addr & 0xff));
-
-			if (command == NAND_CMD_READ0 ||
-			    command == NAND_CMD_READ1 ||
-			    command == NAND_CMD_READOOB) {
-				/*
-				 * NAND controller will release -CE after
-				 * the last address byte is written, so we'll
-				 * have to forcibly assert it. No interrupts
-				 * are allowed while we do this as we don't
-				 * want the NOR flash or PCMCIA drivers to
-				 * steal our precious bytes of data...
-				 */
-				ce_override = 1;
-				local_irq_save(flags);
-				au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
-			}
-
-			ctx->write_byte(this, (u8)(page_addr >> 8));
-
-			if (this->options & NAND_ROW_ADDR_3)
-				ctx->write_byte(this,
-						((page_addr >> 16) & 0x0f));
-		}
-		/* Latch in address */
-		au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
-	}
-
-	/*
-	 * Program and erase have their own busy handlers.
-	 * Status and sequential in need no delay.
-	 */
-	switch (command) {
-
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		break;
-
-	case NAND_CMD_READ0:
-	case NAND_CMD_READ1:
-	case NAND_CMD_READOOB:
-		/* Check if we're really driving -CE low (just in case) */
-		if (unlikely(!ce_override))
-			break;
-
-		/* Apply a short delay always to ensure that we do wait tWB. */
-		ndelay(100);
-		/* Wait for a chip to become ready... */
-		for (i = this->legacy.chip_delay;
-		     !this->legacy.dev_ready(this) && i > 0; --i)
-			udelay(1);
-
-		/* Release -CE and re-enable interrupts. */
-		au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
-		local_irq_restore(flags);
-		return;
-	}
-	/* Apply this short delay always to ensure that we do wait tWB. */
-	ndelay(100);
-
-	while(!this->legacy.dev_ready(this));
-}
-
 static int find_nand_cs(unsigned long nand_base)
 {
 	void __iomem *base =
@@ -373,6 +134,122 @@
 	return -ENODEV;
 }
 
+static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms)
+{
+	unsigned long timeout_jiffies = jiffies;
+
+	timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1;
+	do {
+		if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1)
+			return 0;
+
+		usleep_range(10, 100);
+	} while (time_before(jiffies, timeout_jiffies));
+
+	return -ETIMEDOUT;
+}
+
+static int au1550nd_exec_instr(struct nand_chip *this,
+			       const struct nand_op_instr *instr)
+{
+	struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+	unsigned int i;
+	int ret = 0;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		writeb(instr->ctx.cmd.opcode,
+		       ctx->base + MEM_STNAND_CMD);
+		/* Drain the writebuffer */
+		wmb();
+		break;
+
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+			writeb(instr->ctx.addr.addrs[i],
+			       ctx->base + MEM_STNAND_ADDR);
+			/* Drain the writebuffer */
+			wmb();
+		}
+		break;
+
+	case NAND_OP_DATA_IN_INSTR:
+		if ((this->options & NAND_BUSWIDTH_16) &&
+		    !instr->ctx.data.force_8bit)
+			au_read_buf16(this, instr->ctx.data.buf.in,
+				      instr->ctx.data.len);
+		else
+			au_read_buf(this, instr->ctx.data.buf.in,
+				    instr->ctx.data.len);
+		break;
+
+	case NAND_OP_DATA_OUT_INSTR:
+		if ((this->options & NAND_BUSWIDTH_16) &&
+		    !instr->ctx.data.force_8bit)
+			au_write_buf16(this, instr->ctx.data.buf.out,
+				       instr->ctx.data.len);
+		else
+			au_write_buf(this, instr->ctx.data.buf.out,
+				     instr->ctx.data.len);
+		break;
+
+	case NAND_OP_WAITRDY_INSTR:
+		ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (instr->delay_ns)
+		ndelay(instr->delay_ns);
+
+	return ret;
+}
+
+static int au1550nd_exec_op(struct nand_chip *this,
+			    const struct nand_operation *op,
+			    bool check_only)
+{
+	struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+	unsigned int i;
+	int ret;
+
+	if (check_only)
+		return 0;
+
+	/* assert (force assert) chip enable */
+	alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+	/* Drain the writebuffer */
+	wmb();
+
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = au1550nd_exec_instr(this, &op->instrs[i]);
+		if (ret)
+			break;
+	}
+
+	/* deassert chip enable */
+	alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+	/* Drain the writebuffer */
+	wmb();
+
+	return ret;
+}
+
+static int au1550nd_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops au1550nd_ops = {
+	.exec_op = au1550nd_exec_op,
+	.attach_chip = au1550nd_attach_chip,
+};
+
 static int au1550nd_probe(struct platform_device *pdev)
 {
 	struct au1550nd_platdata *pd;
@@ -404,7 +281,7 @@
 		goto out1;
 	}
 
-	ctx->base = ioremap_nocache(r->start, 0x1000);
+	ctx->base = ioremap(r->start, 0x1000);
 	if (!ctx->base) {
 		dev_err(&pdev->dev, "cannot remap NAND memory area\n");
 		ret = -ENODEV;
@@ -424,22 +301,19 @@
 	}
 	ctx->cs = cs;
 
-	this->legacy.dev_ready = au1550_device_ready;
-	this->legacy.select_chip = au1550_select_chip;
-	this->legacy.cmdfunc = au1550_command;
-
-	/* 30 us command delay time */
-	this->legacy.chip_delay = 30;
-	this->ecc.mode = NAND_ECC_SOFT;
-	this->ecc.algo = NAND_ECC_HAMMING;
+	nand_controller_init(&ctx->controller);
+	ctx->controller.ops = &au1550nd_ops;
+	this->controller = &ctx->controller;
 
 	if (pd->devwidth)
 		this->options |= NAND_BUSWIDTH_16;
 
-	this->legacy.read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
-	ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
-	this->legacy.write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
-	this->legacy.read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
 
 	ret = nand_scan(this, 1);
 	if (ret) {
@@ -466,8 +340,12 @@
 {
 	struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
 	struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct nand_chip *chip = &ctx->chip;
+	int ret;
 
-	nand_release(&ctx->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	iounmap(ctx->base);
 	release_mem_region(r->start, 0x1000);
 	kfree(ctx);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
index 8dae97c..dcc70d9 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -60,8 +60,12 @@
 static int bcm47xxnflash_remove(struct platform_device *pdev)
 {
 	struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &nflash->nand_chip;
+	int ret;
 
-	nand_release(&nflash->nand_chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 5917751..8bb17c5 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -391,7 +391,8 @@
 
 	nand_chip->legacy.chip_delay = 50;
 	b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
-	b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+	/* TODO: implement ECC */
+	b47n->nand_chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE;
 
 	/* Enable NAND flash access */
 	bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 0f3c09f..580b91c 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -4,7 +4,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
@@ -102,6 +101,45 @@
 #define NAND_CTRL_RDY			(INTFC_CTLR_READY | INTFC_FLASH_READY)
 #define NAND_POLL_STATUS_TIMEOUT_MS	100
 
+#define EDU_CMD_WRITE          0x00
+#define EDU_CMD_READ           0x01
+#define EDU_STATUS_ACTIVE      BIT(0)
+#define EDU_ERR_STATUS_ERRACK  BIT(0)
+#define EDU_DONE_MASK		GENMASK(1, 0)
+
+#define EDU_CONFIG_MODE_NAND   BIT(0)
+#define EDU_CONFIG_SWAP_BYTE   BIT(1)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define EDU_CONFIG_SWAP_CFG     EDU_CONFIG_SWAP_BYTE
+#else
+#define EDU_CONFIG_SWAP_CFG     0
+#endif
+
+/* edu registers */
+enum edu_reg {
+	EDU_CONFIG = 0,
+	EDU_DRAM_ADDR,
+	EDU_EXT_ADDR,
+	EDU_LENGTH,
+	EDU_CMD,
+	EDU_STOP,
+	EDU_STATUS,
+	EDU_DONE,
+	EDU_ERR_STATUS,
+};
+
+static const u16  edu_regs[] = {
+	[EDU_CONFIG] = 0x00,
+	[EDU_DRAM_ADDR] = 0x04,
+	[EDU_EXT_ADDR] = 0x08,
+	[EDU_LENGTH] = 0x0c,
+	[EDU_CMD] = 0x10,
+	[EDU_STOP] = 0x14,
+	[EDU_STATUS] = 0x18,
+	[EDU_DONE] = 0x1c,
+	[EDU_ERR_STATUS] = 0x20,
+};
+
 /* flash_dma registers */
 enum flash_dma_reg {
 	FLASH_DMA_REVISION = 0,
@@ -117,6 +155,18 @@
 	FLASH_DMA_CURRENT_DESC_EXT,
 };
 
+/* flash_dma registers v0*/
+static const u16 flash_dma_regs_v0[] = {
+	[FLASH_DMA_REVISION]		= 0x00,
+	[FLASH_DMA_FIRST_DESC]		= 0x04,
+	[FLASH_DMA_CTRL]		= 0x08,
+	[FLASH_DMA_MODE]		= 0x0c,
+	[FLASH_DMA_STATUS]		= 0x10,
+	[FLASH_DMA_INTERRUPT_DESC]	= 0x14,
+	[FLASH_DMA_ERROR_STATUS]	= 0x18,
+	[FLASH_DMA_CURRENT_DESC]	= 0x1c,
+};
+
 /* flash_dma registers v1*/
 static const u16 flash_dma_regs_v1[] = {
 	[FLASH_DMA_REVISION]		= 0x00,
@@ -155,6 +205,8 @@
 	BRCMNAND_HAS_WP				= BIT(3),
 };
 
+struct brcmnand_host;
+
 struct brcmnand_controller {
 	struct device		*dev;
 	struct nand_controller	controller;
@@ -173,17 +225,32 @@
 
 	int			cmd_pending;
 	bool			dma_pending;
+	bool                    edu_pending;
 	struct completion	done;
 	struct completion	dma_done;
+	struct completion       edu_done;
 
 	/* List of NAND hosts (one for each chip-select) */
 	struct list_head host_list;
 
+	/* EDU info, per-transaction */
+	const u16               *edu_offsets;
+	void __iomem            *edu_base;
+	int			edu_irq;
+	int                     edu_count;
+	u64                     edu_dram_addr;
+	u32                     edu_ext_addr;
+	u32                     edu_cmd;
+	u32                     edu_config;
+
 	/* flash_dma reg */
 	const u16		*flash_dma_offsets;
 	struct brcm_nand_dma_desc *dma_desc;
 	dma_addr_t		dma_pa;
 
+	int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
+			 u32 len, u8 dma_cmd);
+
 	/* in-memory cache of the FLASH_CACHE, used only for some commands */
 	u8			flash_cache[FC_BYTES];
 
@@ -196,6 +263,7 @@
 	const unsigned int	*block_sizes;
 	unsigned int		max_page_size;
 	const unsigned int	*page_sizes;
+	unsigned int		page_size_shift;
 	unsigned int		max_oob;
 	u32			features;
 
@@ -204,6 +272,7 @@
 	u32			nand_cs_nand_xor;
 	u32			corr_stat_threshold;
 	u32			flash_dma_mode;
+	u32                     flash_edu_mode;
 	bool			pio_poll_mode;
 };
 
@@ -269,8 +338,38 @@
 	BRCMNAND_FC_BASE,
 };
 
-/* BRCMNAND v4.0 */
-static const u16 brcmnand_regs_v40[] = {
+/* BRCMNAND v2.1-v2.2 */
+static const u16 brcmnand_regs_v21[] = {
+	[BRCMNAND_CMD_START]		=  0x04,
+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
+	[BRCMNAND_INTFC_STATUS]		=  0x5c,
+	[BRCMNAND_CS_SELECT]		=  0x14,
+	[BRCMNAND_CS_XOR]		=  0x18,
+	[BRCMNAND_LL_OP]		=     0,
+	[BRCMNAND_CS0_BASE]		=  0x40,
+	[BRCMNAND_CS1_BASE]		=     0,
+	[BRCMNAND_CORR_THRESHOLD]	=     0,
+	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
+	[BRCMNAND_UNCORR_COUNT]		=     0,
+	[BRCMNAND_CORR_COUNT]		=     0,
+	[BRCMNAND_CORR_EXT_ADDR]	=  0x60,
+	[BRCMNAND_CORR_ADDR]		=  0x64,
+	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x68,
+	[BRCMNAND_UNCORR_ADDR]		=  0x6c,
+	[BRCMNAND_SEMAPHORE]		=  0x50,
+	[BRCMNAND_ID]			=  0x54,
+	[BRCMNAND_ID_EXT]		=     0,
+	[BRCMNAND_LL_RDATA]		=     0,
+	[BRCMNAND_OOB_READ_BASE]	=  0x20,
+	[BRCMNAND_OOB_READ_10_BASE]	=     0,
+	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
+	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
+	[BRCMNAND_FC_BASE]		= 0x200,
+};
+
+/* BRCMNAND v3.3-v4.0 */
+static const u16 brcmnand_regs_v33[] = {
 	[BRCMNAND_CMD_START]		=  0x04,
 	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
 	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
@@ -467,6 +566,9 @@
 	CFG_BUS_WIDTH			= BIT(CFG_BUS_WIDTH_SHIFT),
 	CFG_DEVICE_SIZE_SHIFT		= 24,
 
+	/* Only for v2.1 */
+	CFG_PAGE_SIZE_SHIFT_v2_1	= 30,
+
 	/* Only for pre-v7.1 (with no CFG_EXT register) */
 	CFG_PAGE_SIZE_SHIFT		= 20,
 	CFG_BLK_SIZE_SHIFT		= 28,
@@ -502,12 +604,16 @@
 {
 	static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
 	static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
-	static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
+	static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
+	static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
+	static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
+	static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
+	static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
 
 	ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
 
-	/* Only support v4.0+? */
-	if (ctrl->nand_version < 0x0400) {
+	/* Only support v2.1+ */
+	if (ctrl->nand_version < 0x0201) {
 		dev_err(ctrl->dev, "version %#x not supported\n",
 			ctrl->nand_version);
 		return -ENODEV;
@@ -522,8 +628,10 @@
 		ctrl->reg_offsets = brcmnand_regs_v60;
 	else if (ctrl->nand_version >= 0x0500)
 		ctrl->reg_offsets = brcmnand_regs_v50;
-	else if (ctrl->nand_version >= 0x0400)
-		ctrl->reg_offsets = brcmnand_regs_v40;
+	else if (ctrl->nand_version >= 0x0303)
+		ctrl->reg_offsets = brcmnand_regs_v33;
+	else if (ctrl->nand_version >= 0x0201)
+		ctrl->reg_offsets = brcmnand_regs_v21;
 
 	/* Chip-select stride */
 	if (ctrl->nand_version >= 0x0701)
@@ -549,14 +657,32 @@
 		ctrl->max_page_size = 16 * 1024;
 		ctrl->max_block_size = 2 * 1024 * 1024;
 	} else {
-		ctrl->page_sizes = page_sizes;
+		if (ctrl->nand_version >= 0x0304)
+			ctrl->page_sizes = page_sizes_v3_4;
+		else if (ctrl->nand_version >= 0x0202)
+			ctrl->page_sizes = page_sizes_v2_2;
+		else
+			ctrl->page_sizes = page_sizes_v2_1;
+
+		if (ctrl->nand_version >= 0x0202)
+			ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
+		else
+			ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
+
 		if (ctrl->nand_version >= 0x0600)
 			ctrl->block_sizes = block_sizes_v6;
-		else
+		else if (ctrl->nand_version >= 0x0400)
 			ctrl->block_sizes = block_sizes_v4;
+		else if (ctrl->nand_version >= 0x0202)
+			ctrl->block_sizes = block_sizes_v2_2;
+		else
+			ctrl->block_sizes = block_sizes_v2_1;
 
 		if (ctrl->nand_version < 0x0400) {
-			ctrl->max_page_size = 4096;
+			if (ctrl->nand_version < 0x0202)
+				ctrl->max_page_size = 2048;
+			else
+				ctrl->max_page_size = 4096;
 			ctrl->max_block_size = 512 * 1024;
 		}
 	}
@@ -598,6 +724,8 @@
 	/* flash_dma register offsets */
 	if (ctrl->nand_version >= 0x0703)
 		ctrl->flash_dma_offsets = flash_dma_regs_v4;
+	else if (ctrl->nand_version == 0x0602)
+		ctrl->flash_dma_offsets = flash_dma_regs_v0;
 	else
 		ctrl->flash_dma_offsets = flash_dma_regs_v1;
 }
@@ -644,6 +772,22 @@
 	__raw_writel(val, ctrl->nand_fc + word * 4);
 }
 
+static inline void edu_writel(struct brcmnand_controller *ctrl,
+			      enum edu_reg reg, u32 val)
+{
+	u16 offs = ctrl->edu_offsets[reg];
+
+	brcmnand_writel(val, ctrl->edu_base + offs);
+}
+
+static inline u32 edu_readl(struct brcmnand_controller *ctrl,
+			    enum edu_reg reg)
+{
+	u16 offs = ctrl->edu_offsets[reg];
+
+	return brcmnand_readl(ctrl->edu_base + offs);
+}
+
 static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
 {
 
@@ -724,6 +868,9 @@
 	enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
 	int cs = host->cs;
 
+	if (!ctrl->reg_offsets[reg])
+		return;
+
 	if (ctrl->nand_version == 0x0702)
 		bits = 7;
 	else if (ctrl->nand_version >= 0x0600)
@@ -782,8 +929,10 @@
 		return GENMASK(7, 0);
 	else if (ctrl->nand_version >= 0x0600)
 		return GENMASK(6, 0);
-	else
+	else if (ctrl->nand_version >= 0x0303)
 		return GENMASK(5, 0);
+	else
+		return GENMASK(4, 0);
 }
 
 #define NAND_ACC_CONTROL_ECC_SHIFT	16
@@ -913,13 +1062,23 @@
 	return ctrl->flash_dma_base;
 }
 
+static inline bool has_edu(struct brcmnand_controller *ctrl)
+{
+	return ctrl->edu_base;
+}
+
+static inline bool use_dma(struct brcmnand_controller *ctrl)
+{
+	return has_flash_dma(ctrl) || has_edu(ctrl);
+}
+
 static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
 {
 	if (ctrl->pio_poll_mode)
 		return;
 
 	if (has_flash_dma(ctrl)) {
-		ctrl->flash_dma_base = 0;
+		ctrl->flash_dma_base = NULL;
 		disable_irq(ctrl->dma_irq);
 	}
 
@@ -1004,33 +1163,30 @@
 	struct brcmnand_cfg *cfg = &host->hwcfg;
 	int sas = cfg->spare_area_size << cfg->sector_size_1k;
 	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+	u32 next;
 
-	if (section >= sectors * 2)
+	if (section > sectors)
 		return -ERANGE;
 
-	oobregion->offset = (section / 2) * sas;
+	next = (section * sas);
+	if (section < sectors)
+		next += 6;
 
-	if (section & 1) {
-		oobregion->offset += 9;
-		oobregion->length = 7;
+	if (section) {
+		oobregion->offset = ((section - 1) * sas) + 9;
 	} else {
-		oobregion->length = 6;
-
-		/* First sector of each page may have BBI */
-		if (!section) {
-			/*
-			 * Small-page NAND use byte 6 for BBI while large-page
-			 * NAND use bytes 0 and 1.
-			 */
-			if (cfg->page_size > 512) {
-				oobregion->offset += 2;
-				oobregion->length -= 2;
-			} else {
-				oobregion->length--;
-			}
+		if (cfg->page_size > 512) {
+			/* Large page NAND uses first 2 bytes for BBI */
+			oobregion->offset = 2;
+		} else {
+			/* Small page NAND uses last byte before ECC for BBI */
+			oobregion->offset = 0;
+			next--;
 		}
 	}
 
+	oobregion->length = next - oobregion->offset;
+
 	return 0;
 }
 
@@ -1289,6 +1445,52 @@
 	return tbytes;
 }
 
+static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
+{
+	/* initialize edu */
+	edu_writel(ctrl, EDU_ERR_STATUS, 0);
+	edu_readl(ctrl, EDU_ERR_STATUS);
+	edu_writel(ctrl, EDU_DONE, 0);
+	edu_writel(ctrl, EDU_DONE, 0);
+	edu_writel(ctrl, EDU_DONE, 0);
+	edu_writel(ctrl, EDU_DONE, 0);
+	edu_readl(ctrl, EDU_DONE);
+}
+
+/* edu irq */
+static irqreturn_t brcmnand_edu_irq(int irq, void *data)
+{
+	struct brcmnand_controller *ctrl = data;
+
+	if (ctrl->edu_count) {
+		ctrl->edu_count--;
+		while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
+			udelay(1);
+		edu_writel(ctrl, EDU_DONE, 0);
+		edu_readl(ctrl, EDU_DONE);
+	}
+
+	if (ctrl->edu_count) {
+		ctrl->edu_dram_addr += FC_BYTES;
+		ctrl->edu_ext_addr += FC_BYTES;
+
+		edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+		edu_readl(ctrl, EDU_DRAM_ADDR);
+		edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+		edu_readl(ctrl, EDU_EXT_ADDR);
+
+		mb(); /* flush previous writes */
+		edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+		edu_readl(ctrl, EDU_CMD);
+
+		return IRQ_HANDLED;
+	}
+
+	complete(&ctrl->edu_done);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
 {
 	struct brcmnand_controller *ctrl = data;
@@ -1297,6 +1499,16 @@
 	if (ctrl->dma_pending)
 		return IRQ_HANDLED;
 
+	/* check if you need to piggy back on the ctrlrdy irq */
+	if (ctrl->edu_pending) {
+		if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
+	/* Discard interrupts while using dedicated edu irq */
+			return IRQ_HANDLED;
+
+	/* no registered edu irq, call handler */
+		return brcmnand_edu_irq(irq, data);
+	}
+
 	complete(&ctrl->done);
 	return IRQ_HANDLED;
 }
@@ -1635,6 +1847,97 @@
 }
 
 /**
+ *  Kick EDU engine
+ */
+static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+			      u32 len, u8 cmd)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	unsigned long timeo = msecs_to_jiffies(200);
+	int ret = 0;
+	int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+	u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
+	unsigned int trans = len >> FC_SHIFT;
+	dma_addr_t pa;
+
+	pa = dma_map_single(ctrl->dev, buf, len, dir);
+	if (dma_mapping_error(ctrl->dev, pa)) {
+		dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
+		return -ENOMEM;
+	}
+
+	ctrl->edu_pending = true;
+	ctrl->edu_dram_addr = pa;
+	ctrl->edu_ext_addr = addr;
+	ctrl->edu_cmd = edu_cmd;
+	ctrl->edu_count = trans;
+
+	edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+	edu_readl(ctrl,  EDU_DRAM_ADDR);
+	edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+	edu_readl(ctrl, EDU_EXT_ADDR);
+	edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
+	edu_readl(ctrl, EDU_LENGTH);
+
+	/* Start edu engine */
+	mb(); /* flush previous writes */
+	edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+	edu_readl(ctrl, EDU_CMD);
+
+	if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
+		dev_err(ctrl->dev,
+			"timeout waiting for EDU; status %#x, error status %#x\n",
+			edu_readl(ctrl, EDU_STATUS),
+			edu_readl(ctrl, EDU_ERR_STATUS));
+	}
+
+	dma_unmap_single(ctrl->dev, pa, len, dir);
+
+	/* for program page check NAND status */
+	if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+	      INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
+	    edu_cmd == EDU_CMD_WRITE) {
+		dev_info(ctrl->dev, "program failed at %llx\n",
+			 (unsigned long long)addr);
+		ret = -EIO;
+	}
+
+	/* Make sure the EDU status is clean */
+	if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
+		dev_warn(ctrl->dev, "EDU still active: %#x\n",
+			 edu_readl(ctrl, EDU_STATUS));
+
+	if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
+		dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
+			 (unsigned long long)addr);
+		ret = -EIO;
+	}
+
+	ctrl->edu_pending = false;
+	brcmnand_edu_init(ctrl);
+	edu_writel(ctrl, EDU_STOP, 0); /* force stop */
+	edu_readl(ctrl, EDU_STOP);
+
+	if (!ret && edu_cmd == EDU_CMD_READ) {
+		u64 err_addr = 0;
+
+		/*
+		 * check for ECC errors here, subpage ECC errors are
+		 * retained in ECC error address register
+		 */
+		err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+		if (!err_addr) {
+			err_addr = brcmnand_get_correcc_addr(ctrl);
+			if (err_addr)
+				ret = -EUCLEAN;
+		} else
+			ret = -EBADMSG;
+	}
+
+	return ret;
+}
+
+/**
  * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
  * following ahead of time:
  *  - Is this descriptor the beginning or end of a linked list?
@@ -1677,8 +1980,11 @@
 
 	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
 	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
-	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
-	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+	if (ctrl->nand_version > 0x0602) {
+		flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
+				 upper_32_bits(desc));
+		(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+	}
 
 	/* Start FLASH_DMA engine */
 	ctrl->dma_pending = true;
@@ -1756,7 +2062,7 @@
 					mtd->oobsize / trans,
 					host->hwcfg.sector_size_1k);
 
-		if (!ret) {
+		if (ret != -EBADMSG) {
 			*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
 
 			if (*err_addr)
@@ -1834,21 +2140,28 @@
 	u64 err_addr = 0;
 	int err;
 	bool retry = true;
+	bool edu_err = false;
 
 	dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
 
 try_dmaread:
 	brcmnand_clear_ecc_addr(ctrl);
 
-	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
-		err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
-					     CMD_PAGE_READ);
+	if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
+		err = ctrl->dma_trans(host, addr, buf,
+				      trans * FC_BYTES,
+				      CMD_PAGE_READ);
+
 		if (err) {
 			if (mtd_is_bitflip_or_eccerr(err))
 				err_addr = addr;
 			else
 				return -EIO;
 		}
+
+		if (has_edu(ctrl) && err_addr)
+			edu_err = true;
+
 	} else {
 		if (oob)
 			memset(oob, 0x99, mtd->oobsize);
@@ -1896,6 +2209,11 @@
 	if (mtd_is_bitflip(err)) {
 		unsigned int corrected = brcmnand_count_corrected(ctrl);
 
+		/* in case of EDU correctable error we read again using PIO */
+		if (edu_err)
+			err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+						   oob, &err_addr);
+
 		dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
 			(unsigned long long)err_addr);
 		mtd->ecc_stats.corrected += corrected;
@@ -1978,10 +2296,12 @@
 	for (i = 0; i < ctrl->max_oob; i += 4)
 		oob_reg_write(ctrl, i, 0xffffffff);
 
-	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
-		if (brcmnand_dma_trans(host, addr, (u32 *)buf,
-					mtd->writesize, CMD_PROGRAM_PAGE))
+	if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+		if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
+				    CMD_PROGRAM_PAGE))
+
 			ret = -EIO;
+
 		goto out;
 	}
 
@@ -2149,7 +2469,7 @@
 		(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
 		(device_size << CFG_DEVICE_SIZE_SHIFT);
 	if (cfg_offs == cfg_ext_offs) {
-		tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
+		tmp |= (page_size << ctrl->page_size_shift) |
 		       (block_size << CFG_BLK_SIZE_SHIFT);
 		nand_writereg(ctrl, cfg_offs, tmp);
 	} else {
@@ -2161,9 +2481,11 @@
 
 	tmp = nand_readreg(ctrl, acc_control_offs);
 	tmp &= ~brcmnand_ecc_level_mask(ctrl);
-	tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
 	tmp &= ~brcmnand_spare_area_mask(ctrl);
-	tmp |= cfg->spare_area_size;
+	if (ctrl->nand_version >= 0x0302) {
+		tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+		tmp |= cfg->spare_area_size;
+	}
 	nand_writereg(ctrl, acc_control_offs, tmp);
 
 	brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
@@ -2210,6 +2532,8 @@
 {
 	struct mtd_info *mtd = nand_to_mtd(&host->chip);
 	struct nand_chip *chip = &host->chip;
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 	struct brcmnand_controller *ctrl = host->ctrl;
 	struct brcmnand_cfg *cfg = &host->hwcfg;
 	char msg[128];
@@ -2243,34 +2567,34 @@
 	cfg->col_adr_bytes = 2;
 	cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
 
-	if (chip->ecc.mode != NAND_ECC_HW) {
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
 		dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
-			chip->ecc.mode);
+			chip->ecc.engine_type);
 		return -EINVAL;
 	}
 
-	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+	if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
 		if (chip->ecc.strength == 1 && chip->ecc.size == 512)
 			/* Default to Hamming for 1-bit ECC, if unspecified */
-			chip->ecc.algo = NAND_ECC_HAMMING;
+			chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
 		else
 			/* Otherwise, BCH */
-			chip->ecc.algo = NAND_ECC_BCH;
+			chip->ecc.algo = NAND_ECC_ALGO_BCH;
 	}
 
-	if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
-						   chip->ecc.size != 512)) {
+	if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
+	    (chip->ecc.strength != 1 || chip->ecc.size != 512)) {
 		dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
 			chip->ecc.strength, chip->ecc.size);
 		return -EINVAL;
 	}
 
-	if (chip->ecc.mode != NAND_ECC_NONE &&
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
 	    (!chip->ecc.size || !chip->ecc.strength)) {
-		if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
+		if (requirements->step_size && requirements->strength) {
 			/* use detected ECC parameters */
-			chip->ecc.size = chip->base.eccreq.step_size;
-			chip->ecc.strength = chip->base.eccreq.strength;
+			chip->ecc.size = requirements->step_size;
+			chip->ecc.strength = requirements->strength;
 			dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
 				chip->ecc.size, chip->ecc.strength);
 		}
@@ -2278,7 +2602,7 @@
 
 	switch (chip->ecc.size) {
 	case 512:
-		if (chip->ecc.algo == NAND_ECC_HAMMING)
+		if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
 			cfg->ecc_level = 15;
 		else
 			cfg->ecc_level = chip->ecc.strength;
@@ -2349,7 +2673,7 @@
 	 * to/from, and have nand_base pass us a bounce buffer instead, as
 	 * needed.
 	 */
-	chip->options |= NAND_USE_BOUNCE_BUFFER;
+	chip->options |= NAND_USES_DMA;
 
 	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 		chip->bbt_options |= NAND_BBT_NO_OOB;
@@ -2412,7 +2736,7 @@
 	chip->legacy.read_buf = brcmnand_read_buf;
 	chip->legacy.write_buf = brcmnand_write_buf;
 
-	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	chip->ecc.read_page = brcmnand_read_page;
 	chip->ecc.write_page = brcmnand_write_page;
 	chip->ecc.read_page_raw = brcmnand_read_page_raw;
@@ -2490,6 +2814,8 @@
 
 	if (has_flash_dma(ctrl))
 		ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+	else if (has_edu(ctrl))
+		ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
 
 	return 0;
 }
@@ -2504,6 +2830,13 @@
 		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
 	}
 
+	if (has_edu(ctrl)) {
+		ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+		edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
+		edu_readl(ctrl, EDU_CONFIG);
+		brcmnand_edu_init(ctrl);
+	}
+
 	brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
 	brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
 	brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
@@ -2533,6 +2866,8 @@
 EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
 
 static const struct of_device_id brcmnand_of_match[] = {
+	{ .compatible = "brcm,brcmnand-v2.1" },
+	{ .compatible = "brcm,brcmnand-v2.2" },
 	{ .compatible = "brcm,brcmnand-v4.0" },
 	{ .compatible = "brcm,brcmnand-v5.0" },
 	{ .compatible = "brcm,brcmnand-v6.0" },
@@ -2549,6 +2884,49 @@
 /***********************************************************************
  * Platform driver setup (per controller)
  ***********************************************************************/
+static int brcmnand_edu_setup(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
+	if (res) {
+		ctrl->edu_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(ctrl->edu_base))
+			return PTR_ERR(ctrl->edu_base);
+
+		ctrl->edu_offsets = edu_regs;
+
+		edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
+			   EDU_CONFIG_SWAP_CFG);
+		edu_readl(ctrl, EDU_CONFIG);
+
+		/* initialize edu */
+		brcmnand_edu_init(ctrl);
+
+		ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
+		if (ctrl->edu_irq < 0) {
+			dev_warn(dev,
+				 "FLASH EDU enabled, using ctlrdy irq\n");
+		} else {
+			ret = devm_request_irq(dev, ctrl->edu_irq,
+					       brcmnand_edu_irq, 0,
+					       "brcmnand-edu", ctrl);
+			if (ret < 0) {
+				dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
+					ctrl->edu_irq, ret);
+				return ret;
+			}
+
+			dev_info(dev, "FLASH EDU enabled using irq %u\n",
+				 ctrl->edu_irq);
+		}
+	}
+
+	return 0;
+}
 
 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
 {
@@ -2574,6 +2952,7 @@
 
 	init_completion(&ctrl->done);
 	init_completion(&ctrl->dma_done);
+	init_completion(&ctrl->edu_done);
 	nand_controller_init(&ctrl->controller);
 	ctrl->controller.ops = &brcmnand_controller_ops;
 	INIT_LIST_HEAD(&ctrl->host_list);
@@ -2631,6 +3010,16 @@
 		/* initialize the dma version */
 		brcmnand_flash_dma_revision_init(ctrl);
 
+		ret = -EIO;
+		if (ctrl->nand_version >= 0x0700)
+			ret = dma_set_mask_and_coherent(&pdev->dev,
+							DMA_BIT_MASK(40));
+		if (ret)
+			ret = dma_set_mask_and_coherent(&pdev->dev,
+							DMA_BIT_MASK(32));
+		if (ret)
+			goto err;
+
 		/* linked-list and stop on error */
 		flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
 		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
@@ -2661,6 +3050,16 @@
 		}
 
 		dev_info(dev, "enabling FLASH_DMA\n");
+		/* set flash dma transfer function to call */
+		ctrl->dma_trans = brcmnand_dma_trans;
+	} else	{
+		ret = brcmnand_edu_setup(pdev);
+		if (ret < 0)
+			goto err;
+
+		if (has_edu(ctrl))
+			/* set edu transfer function to call */
+			ctrl->dma_trans = brcmnand_edu_trans;
 	}
 
 	/* Disable automatic device ID config, direct addressing */
@@ -2751,9 +3150,15 @@
 {
 	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
 	struct brcmnand_host *host;
+	struct nand_chip *chip;
+	int ret;
 
-	list_for_each_entry(host, &ctrl->host_list, node)
-		nand_release(&host->chip);
+	list_for_each_entry(host, &ctrl->host_list, node) {
+		chip = &host->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+	}
 
 	clk_disable_unprepare(ctrl->clk);
 
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
new file mode 100644
index 0000000..b46786c
--- /dev/null
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -0,0 +1,3039 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cadence NAND flash controller driver
+ *
+ * Copyright (C) 2019 Cadence
+ *
+ * Author: Piotr Sroka <piotrs@cadence.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+/*
+ * HPNFC can work in 3 modes:
+ * -  PIO - can work in master or slave DMA
+ * -  CDMA - needs Master DMA for accessing command descriptors.
+ * -  Generic mode - can use only slave DMA.
+ * CDMA and PIO modes can be used to execute only base commands.
+ * Generic mode can be used to execute any command
+ * on NAND flash memory. Driver uses CDMA mode for
+ * block erasing, page reading, page programing.
+ * Generic mode is used for executing rest of commands.
+ */
+
+#define MAX_ADDRESS_CYC		6
+#define MAX_ERASE_ADDRESS_CYC	3
+#define MAX_DATA_SIZE		0xFFFC
+#define DMA_DATA_SIZE_ALIGN	8
+
+/* Register definition. */
+/*
+ * Command register 0.
+ * Writing data to this register will initiate a new transaction
+ * of the NF controller.
+ */
+#define CMD_REG0			0x0000
+/* Command type field mask. */
+#define		CMD_REG0_CT		GENMASK(31, 30)
+/* Command type CDMA. */
+#define		CMD_REG0_CT_CDMA	0uL
+/* Command type generic. */
+#define		CMD_REG0_CT_GEN		3uL
+/* Command thread number field mask. */
+#define		CMD_REG0_TN		GENMASK(27, 24)
+
+/* Command register 2. */
+#define CMD_REG2			0x0008
+/* Command register 3. */
+#define CMD_REG3			0x000C
+/* Pointer register to select which thread status will be selected. */
+#define CMD_STATUS_PTR			0x0010
+/* Command status register for selected thread. */
+#define CMD_STATUS			0x0014
+
+/* Interrupt status register. */
+#define INTR_STATUS			0x0110
+#define		INTR_STATUS_SDMA_ERR	BIT(22)
+#define		INTR_STATUS_SDMA_TRIGG	BIT(21)
+#define		INTR_STATUS_UNSUPP_CMD	BIT(19)
+#define		INTR_STATUS_DDMA_TERR	BIT(18)
+#define		INTR_STATUS_CDMA_TERR	BIT(17)
+#define		INTR_STATUS_CDMA_IDL	BIT(16)
+
+/* Interrupt enable register. */
+#define INTR_ENABLE				0x0114
+#define		INTR_ENABLE_INTR_EN		BIT(31)
+#define		INTR_ENABLE_SDMA_ERR_EN		BIT(22)
+#define		INTR_ENABLE_SDMA_TRIGG_EN	BIT(21)
+#define		INTR_ENABLE_UNSUPP_CMD_EN	BIT(19)
+#define		INTR_ENABLE_DDMA_TERR_EN	BIT(18)
+#define		INTR_ENABLE_CDMA_TERR_EN	BIT(17)
+#define		INTR_ENABLE_CDMA_IDLE_EN	BIT(16)
+
+/* Controller internal state. */
+#define CTRL_STATUS				0x0118
+#define		CTRL_STATUS_INIT_COMP		BIT(9)
+#define		CTRL_STATUS_CTRL_BUSY		BIT(8)
+
+/* Command Engine threads state. */
+#define TRD_STATUS				0x0120
+
+/* Command Engine interrupt thread error status. */
+#define TRD_ERR_INT_STATUS			0x0128
+/* Command Engine interrupt thread error enable. */
+#define TRD_ERR_INT_STATUS_EN			0x0130
+/* Command Engine interrupt thread complete status. */
+#define TRD_COMP_INT_STATUS			0x0138
+
+/*
+ * Transfer config 0 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_0				0x0400
+/* Offset value from the beginning of the page. */
+#define		TRAN_CFG_0_OFFSET		GENMASK(31, 16)
+/* Numbers of sectors to transfer within singlNF device's page. */
+#define		TRAN_CFG_0_SEC_CNT		GENMASK(7, 0)
+
+/*
+ * Transfer config 1 register.
+ * Configures data transfer parameters.
+ */
+#define TRAN_CFG_1				0x0404
+/* Size of last data sector. */
+#define		TRAN_CFG_1_LAST_SEC_SIZE	GENMASK(31, 16)
+/* Size of not-last data sector. */
+#define		TRAN_CFG_1_SECTOR_SIZE		GENMASK(15, 0)
+
+/* ECC engine configuration register 0. */
+#define ECC_CONFIG_0				0x0428
+/* Correction strength. */
+#define		ECC_CONFIG_0_CORR_STR		GENMASK(10, 8)
+/* Enable erased pages detection mechanism. */
+#define		ECC_CONFIG_0_ERASE_DET_EN	BIT(1)
+/* Enable controller ECC check bits generation and correction. */
+#define		ECC_CONFIG_0_ECC_EN		BIT(0)
+
+/* ECC engine configuration register 1. */
+#define ECC_CONFIG_1				0x042C
+
+/* Multiplane settings register. */
+#define MULTIPLANE_CFG				0x0434
+/* Cache operation settings. */
+#define CACHE_CFG				0x0438
+
+/* DMA settings register. */
+#define DMA_SETINGS				0x043C
+/* Enable SDMA error report on access unprepared slave DMA interface. */
+#define		DMA_SETINGS_SDMA_ERR_RSP	BIT(17)
+
+/* Transferred data block size for the slave DMA module. */
+#define SDMA_SIZE				0x0440
+
+/* Thread number associated with transferred data block
+ * for the slave DMA module.
+ */
+#define SDMA_TRD_NUM				0x0444
+/* Thread number mask. */
+#define		SDMA_TRD_NUM_SDMA_TRD		GENMASK(2, 0)
+
+#define CONTROL_DATA_CTRL			0x0494
+/* Thread number mask. */
+#define		CONTROL_DATA_CTRL_SIZE		GENMASK(15, 0)
+
+#define CTRL_VERSION				0x800
+#define		CTRL_VERSION_REV		GENMASK(7, 0)
+
+/* Available hardware features of the controller. */
+#define CTRL_FEATURES				0x804
+/* Support for NV-DDR2/3 work mode. */
+#define		CTRL_FEATURES_NVDDR_2_3		BIT(28)
+/* Support for NV-DDR work mode. */
+#define		CTRL_FEATURES_NVDDR		BIT(27)
+/* Support for asynchronous work mode. */
+#define		CTRL_FEATURES_ASYNC		BIT(26)
+/* Support for asynchronous work mode. */
+#define		CTRL_FEATURES_N_BANKS		GENMASK(25, 24)
+/* Slave and Master DMA data width. */
+#define		CTRL_FEATURES_DMA_DWITH64	BIT(21)
+/* Availability of Control Data feature.*/
+#define		CTRL_FEATURES_CONTROL_DATA	BIT(10)
+
+/* BCH Engine identification register 0 - correction strengths. */
+#define BCH_CFG_0				0x838
+#define		BCH_CFG_0_CORR_CAP_0		GENMASK(7, 0)
+#define		BCH_CFG_0_CORR_CAP_1		GENMASK(15, 8)
+#define		BCH_CFG_0_CORR_CAP_2		GENMASK(23, 16)
+#define		BCH_CFG_0_CORR_CAP_3		GENMASK(31, 24)
+
+/* BCH Engine identification register 1 - correction strengths. */
+#define BCH_CFG_1				0x83C
+#define		BCH_CFG_1_CORR_CAP_4		GENMASK(7, 0)
+#define		BCH_CFG_1_CORR_CAP_5		GENMASK(15, 8)
+#define		BCH_CFG_1_CORR_CAP_6		GENMASK(23, 16)
+#define		BCH_CFG_1_CORR_CAP_7		GENMASK(31, 24)
+
+/* BCH Engine identification register 2 - sector sizes. */
+#define BCH_CFG_2				0x840
+#define		BCH_CFG_2_SECT_0		GENMASK(15, 0)
+#define		BCH_CFG_2_SECT_1		GENMASK(31, 16)
+
+/* BCH Engine identification register 3. */
+#define BCH_CFG_3				0x844
+#define		BCH_CFG_3_METADATA_SIZE		GENMASK(23, 16)
+
+/* Ready/Busy# line status. */
+#define RBN_SETINGS				0x1004
+
+/* Common settings. */
+#define COMMON_SET				0x1008
+/* 16 bit device connected to the NAND Flash interface. */
+#define		COMMON_SET_DEVICE_16BIT		BIT(8)
+
+/* Skip_bytes registers. */
+#define SKIP_BYTES_CONF				0x100C
+#define		SKIP_BYTES_MARKER_VALUE		GENMASK(31, 16)
+#define		SKIP_BYTES_NUM_OF_BYTES		GENMASK(7, 0)
+
+#define SKIP_BYTES_OFFSET			0x1010
+#define		 SKIP_BYTES_OFFSET_VALUE	GENMASK(23, 0)
+
+/* Timings configuration. */
+#define ASYNC_TOGGLE_TIMINGS			0x101c
+#define		ASYNC_TOGGLE_TIMINGS_TRH	GENMASK(28, 24)
+#define		ASYNC_TOGGLE_TIMINGS_TRP	GENMASK(20, 16)
+#define		ASYNC_TOGGLE_TIMINGS_TWH	GENMASK(12, 8)
+#define		ASYNC_TOGGLE_TIMINGS_TWP	GENMASK(4, 0)
+
+#define	TIMINGS0				0x1024
+#define		TIMINGS0_TADL			GENMASK(31, 24)
+#define		TIMINGS0_TCCS			GENMASK(23, 16)
+#define		TIMINGS0_TWHR			GENMASK(15, 8)
+#define		TIMINGS0_TRHW			GENMASK(7, 0)
+
+#define	TIMINGS1				0x1028
+#define		TIMINGS1_TRHZ			GENMASK(31, 24)
+#define		TIMINGS1_TWB			GENMASK(23, 16)
+#define		TIMINGS1_TVDLY			GENMASK(7, 0)
+
+#define	TIMINGS2				0x102c
+#define		TIMINGS2_TFEAT			GENMASK(25, 16)
+#define		TIMINGS2_CS_HOLD_TIME		GENMASK(13, 8)
+#define		TIMINGS2_CS_SETUP_TIME		GENMASK(5, 0)
+
+/* Configuration of the resynchronization of slave DLL of PHY. */
+#define DLL_PHY_CTRL				0x1034
+#define		DLL_PHY_CTRL_DLL_RST_N		BIT(24)
+#define		DLL_PHY_CTRL_EXTENDED_WR_MODE	BIT(17)
+#define		DLL_PHY_CTRL_EXTENDED_RD_MODE	BIT(16)
+#define		DLL_PHY_CTRL_RS_HIGH_WAIT_CNT	GENMASK(11, 8)
+#define		DLL_PHY_CTRL_RS_IDLE_CNT	GENMASK(7, 0)
+
+/* Register controlling DQ related timing. */
+#define PHY_DQ_TIMING				0x2000
+/* Register controlling DSQ related timing.  */
+#define PHY_DQS_TIMING				0x2004
+#define		PHY_DQS_TIMING_DQS_SEL_OE_END	GENMASK(3, 0)
+#define		PHY_DQS_TIMING_PHONY_DQS_SEL	BIT(16)
+#define		PHY_DQS_TIMING_USE_PHONY_DQS	BIT(20)
+
+/* Register controlling the gate and loopback control related timing. */
+#define PHY_GATE_LPBK_CTRL			0x2008
+#define		PHY_GATE_LPBK_CTRL_RDS		GENMASK(24, 19)
+
+/* Register holds the control for the master DLL logic. */
+#define PHY_DLL_MASTER_CTRL			0x200C
+#define		PHY_DLL_MASTER_CTRL_BYPASS_MODE	BIT(23)
+
+/* Register holds the control for the slave DLL logic. */
+#define PHY_DLL_SLAVE_CTRL			0x2010
+
+/* This register handles the global control settings for the PHY. */
+#define PHY_CTRL				0x2080
+#define		PHY_CTRL_SDR_DQS		BIT(14)
+#define		PHY_CTRL_PHONY_DQS		GENMASK(9, 4)
+
+/*
+ * This register handles the global control settings
+ * for the termination selects for reads.
+ */
+#define PHY_TSEL				0x2084
+
+/* Generic command layout. */
+#define GCMD_LAY_CS			GENMASK_ULL(11, 8)
+/*
+ * This bit informs the minicotroller if it has to wait for tWB
+ * after sending the last CMD/ADDR/DATA in the sequence.
+ */
+#define GCMD_LAY_TWB			BIT_ULL(6)
+/* Type of generic instruction. */
+#define GCMD_LAY_INSTR			GENMASK_ULL(5, 0)
+
+/* Generic CMD sequence type. */
+#define		GCMD_LAY_INSTR_CMD	0
+/* Generic ADDR sequence type. */
+#define		GCMD_LAY_INSTR_ADDR	1
+/* Generic data transfer sequence type. */
+#define		GCMD_LAY_INSTR_DATA	2
+
+/* Input part of generic command type of input is command. */
+#define GCMD_LAY_INPUT_CMD		GENMASK_ULL(23, 16)
+
+/* Generic command address sequence - address fields. */
+#define GCMD_LAY_INPUT_ADDR		GENMASK_ULL(63, 16)
+/* Generic command address sequence - address size. */
+#define GCMD_LAY_INPUT_ADDR_SIZE	GENMASK_ULL(13, 11)
+
+/* Transfer direction field of generic command data sequence. */
+#define GCMD_DIR			BIT_ULL(11)
+/* Read transfer direction of generic command data sequence. */
+#define		GCMD_DIR_READ		0
+/* Write transfer direction of generic command data sequence. */
+#define		GCMD_DIR_WRITE		1
+
+/* ECC enabled flag of generic command data sequence - ECC enabled. */
+#define GCMD_ECC_EN			BIT_ULL(12)
+/* Generic command data sequence - sector size. */
+#define GCMD_SECT_SIZE			GENMASK_ULL(31, 16)
+/* Generic command data sequence - sector count. */
+#define GCMD_SECT_CNT			GENMASK_ULL(39, 32)
+/* Generic command data sequence - last sector size. */
+#define GCMD_LAST_SIZE			GENMASK_ULL(55, 40)
+
+/* CDMA descriptor fields. */
+/* Erase command type of CDMA descriptor. */
+#define CDMA_CT_ERASE		0x1000
+/* Program page command type of CDMA descriptor. */
+#define CDMA_CT_WR		0x2100
+/* Read page command type of CDMA descriptor. */
+#define CDMA_CT_RD		0x2200
+
+/* Flash pointer memory shift. */
+#define CDMA_CFPTR_MEM_SHIFT	24
+/* Flash pointer memory mask. */
+#define CDMA_CFPTR_MEM		GENMASK(26, 24)
+
+/*
+ * Command DMA descriptor flags. If set causes issue interrupt after
+ * the completion of descriptor processing.
+ */
+#define CDMA_CF_INT		BIT(8)
+/*
+ * Command DMA descriptor flags - the next descriptor
+ * address field is valid and descriptor processing should continue.
+ */
+#define CDMA_CF_CONT		BIT(9)
+/* DMA master flag of command DMA descriptor. */
+#define CDMA_CF_DMA_MASTER	BIT(10)
+
+/* Operation complete status of command descriptor. */
+#define CDMA_CS_COMP		BIT(15)
+/* Operation complete status of command descriptor. */
+/* Command descriptor status - operation fail. */
+#define CDMA_CS_FAIL		BIT(14)
+/* Command descriptor status - page erased. */
+#define CDMA_CS_ERP		BIT(11)
+/* Command descriptor status - timeout occurred. */
+#define CDMA_CS_TOUT		BIT(10)
+/*
+ * Maximum amount of correction applied to one ECC sector.
+ * It is part of command descriptor status.
+ */
+#define CDMA_CS_MAXERR		GENMASK(9, 2)
+/* Command descriptor status - uncorrectable ECC error. */
+#define CDMA_CS_UNCE		BIT(1)
+/* Command descriptor status - descriptor error. */
+#define CDMA_CS_ERR		BIT(0)
+
+/* Status of operation - OK. */
+#define STAT_OK			0
+/* Status of operation - FAIL. */
+#define STAT_FAIL		2
+/* Status of operation - uncorrectable ECC error. */
+#define STAT_ECC_UNCORR		3
+/* Status of operation - page erased. */
+#define STAT_ERASED		5
+/* Status of operation - correctable ECC error. */
+#define STAT_ECC_CORR		6
+/* Status of operation - unsuspected state. */
+#define STAT_UNKNOWN		7
+/* Status of operation - operation is not completed yet. */
+#define STAT_BUSY		0xFF
+
+#define BCH_MAX_NUM_CORR_CAPS		8
+#define BCH_MAX_NUM_SECTOR_SIZES	2
+
+struct cadence_nand_timings {
+	u32 async_toggle_timings;
+	u32 timings0;
+	u32 timings1;
+	u32 timings2;
+	u32 dll_phy_ctrl;
+	u32 phy_ctrl;
+	u32 phy_dqs_timing;
+	u32 phy_gate_lpbk_ctrl;
+};
+
+/* Command DMA descriptor. */
+struct cadence_nand_cdma_desc {
+	/* Next descriptor address. */
+	u64 next_pointer;
+
+	/* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
+	u32 flash_pointer;
+	/*field appears in HPNFC version 13*/
+	u16 bank;
+	u16 rsvd0;
+
+	/* Operation the controller needs to perform. */
+	u16 command_type;
+	u16 rsvd1;
+	/* Flags for operation of this command. */
+	u16 command_flags;
+	u16 rsvd2;
+
+	/* System/host memory address required for data DMA commands. */
+	u64 memory_pointer;
+
+	/* Status of operation. */
+	u32 status;
+	u32 rsvd3;
+
+	/* Address pointer to sync buffer location. */
+	u64 sync_flag_pointer;
+
+	/* Controls the buffer sync mechanism. */
+	u32 sync_arguments;
+	u32 rsvd4;
+
+	/* Control data pointer. */
+	u64 ctrl_data_ptr;
+};
+
+/* Interrupt status. */
+struct cadence_nand_irq_status {
+	/* Thread operation complete status. */
+	u32 trd_status;
+	/* Thread operation error. */
+	u32 trd_error;
+	/* Controller status. */
+	u32 status;
+};
+
+/* Cadence NAND flash controller capabilities get from driver data. */
+struct cadence_nand_dt_devdata {
+	/* Skew value of the output signals of the NAND Flash interface. */
+	u32 if_skew;
+	/* It informs if slave DMA interface is connected to DMA engine. */
+	unsigned int has_dma:1;
+};
+
+/* Cadence NAND flash controller capabilities read from registers. */
+struct cdns_nand_caps {
+	/* Maximum number of banks supported by hardware. */
+	u8 max_banks;
+	/* Slave and Master DMA data width in bytes (4 or 8). */
+	u8 data_dma_width;
+	/* Control Data feature supported. */
+	bool data_control_supp;
+	/* Is PHY type DLL. */
+	bool is_phy_type_dll;
+};
+
+struct cdns_nand_ctrl {
+	struct device *dev;
+	struct nand_controller controller;
+	struct cadence_nand_cdma_desc *cdma_desc;
+	/* IP capability. */
+	const struct cadence_nand_dt_devdata *caps1;
+	struct cdns_nand_caps caps2;
+	u8 ctrl_rev;
+	dma_addr_t dma_cdma_desc;
+	u8 *buf;
+	u32 buf_size;
+	u8 curr_corr_str_idx;
+
+	/* Register interface. */
+	void __iomem *reg;
+
+	struct {
+		void __iomem *virt;
+		dma_addr_t dma;
+	} io;
+
+	int irq;
+	/* Interrupts that have happened. */
+	struct cadence_nand_irq_status irq_status;
+	/* Interrupts we are waiting for. */
+	struct cadence_nand_irq_status irq_mask;
+	struct completion complete;
+	/* Protect irq_mask and irq_status. */
+	spinlock_t irq_lock;
+
+	int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
+	struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
+	struct nand_ecc_caps ecc_caps;
+
+	int curr_trans_type;
+
+	struct dma_chan *dmac;
+
+	u32 nf_clk_rate;
+	/*
+	 * Estimated Board delay. The value includes the total
+	 * round trip delay for the signals and is used for deciding on values
+	 * associated with data read capture.
+	 */
+	u32 board_delay;
+
+	struct nand_chip *selected_chip;
+
+	unsigned long assigned_cs;
+	struct list_head chips;
+	u8 bch_metadata_size;
+};
+
+struct cdns_nand_chip {
+	struct cadence_nand_timings timings;
+	struct nand_chip chip;
+	u8 nsels;
+	struct list_head node;
+
+	/*
+	 * part of oob area of NAND flash memory page.
+	 * This part is available for user to read or write.
+	 */
+	u32 avail_oob_size;
+
+	/* Sector size. There are few sectors per mtd->writesize */
+	u32 sector_size;
+	u32 sector_count;
+
+	/* Offset of BBM. */
+	u8 bbm_offs;
+	/* Number of bytes reserved for BBM. */
+	u8 bbm_len;
+	/* ECC strength index. */
+	u8 corr_str_idx;
+
+	u8 cs[];
+};
+
+struct ecc_info {
+	int (*calc_ecc_bytes)(int step_size, int strength);
+	int max_step_size;
+};
+
+static inline struct
+cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
+{
+	return container_of(chip, struct cdns_nand_chip, chip);
+}
+
+static inline struct
+cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
+{
+	return container_of(controller, struct cdns_nand_ctrl, controller);
+}
+
+static bool
+cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
+			u32 buf_len)
+{
+	u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
+
+	return buf && virt_addr_valid(buf) &&
+		likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
+		likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
+}
+
+static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
+				       u32 reg_offset, u32 timeout_us,
+				       u32 mask, bool is_clear)
+{
+	u32 val;
+	int ret;
+
+	ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
+					 val, !(val & mask) == is_clear,
+					 10, timeout_us);
+
+	if (ret < 0) {
+		dev_err(cdns_ctrl->dev,
+			"Timeout while waiting for reg %x with mask %x is clear %d\n",
+			reg_offset, mask, is_clear);
+	}
+
+	return ret;
+}
+
+static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
+				       bool enable)
+{
+	u32 reg;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+	if (enable)
+		reg |= ECC_CONFIG_0_ECC_EN;
+	else
+		reg &= ~ECC_CONFIG_0_ECC_EN;
+
+	writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+	return 0;
+}
+
+static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
+					  u8 corr_str_idx)
+{
+	u32 reg;
+
+	if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
+		return;
+
+	reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+	reg &= ~ECC_CONFIG_0_CORR_STR;
+	reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
+	writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+
+	cdns_ctrl->curr_corr_str_idx = corr_str_idx;
+}
+
+static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
+					     u8 strength)
+{
+	int i, corr_str_idx = -1;
+
+	for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+		if (cdns_ctrl->ecc_strengths[i] == strength) {
+			corr_str_idx = i;
+			break;
+		}
+	}
+
+	return corr_str_idx;
+}
+
+static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
+					    u16 marker_value)
+{
+	u32 reg;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+	reg &= ~SKIP_BYTES_MARKER_VALUE;
+	reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
+			  marker_value);
+
+	writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+
+	return 0;
+}
+
+static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
+					    u8 num_of_bytes,
+					    u32 offset_value,
+					    int enable)
+{
+	u32 reg, skip_bytes_offset;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	if (!enable) {
+		num_of_bytes = 0;
+		offset_value = 0;
+	}
+
+	reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
+	reg &= ~SKIP_BYTES_NUM_OF_BYTES;
+	reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
+			  num_of_bytes);
+	skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
+				       offset_value);
+
+	writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
+	writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
+
+	return 0;
+}
+
+/* Functions enables/disables hardware detection of erased data */
+static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
+					     bool enable,
+					     u8 bitflips_threshold)
+{
+	u32 reg;
+
+	reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
+
+	if (enable)
+		reg |= ECC_CONFIG_0_ERASE_DET_EN;
+	else
+		reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
+
+	writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
+	writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
+}
+
+static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
+					   bool bit_bus16)
+{
+	u32 reg;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
+
+	if (!bit_bus16)
+		reg &= ~COMMON_SET_DEVICE_16BIT;
+	else
+		reg |= COMMON_SET_DEVICE_16BIT;
+	writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
+
+	return 0;
+}
+
+static void
+cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
+			     struct cadence_nand_irq_status *irq_status)
+{
+	writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
+	writel_relaxed(irq_status->trd_status,
+		       cdns_ctrl->reg + TRD_COMP_INT_STATUS);
+	writel_relaxed(irq_status->trd_error,
+		       cdns_ctrl->reg + TRD_ERR_INT_STATUS);
+}
+
+static void
+cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
+			     struct cadence_nand_irq_status *irq_status)
+{
+	irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
+	irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+					       + TRD_COMP_INT_STATUS);
+	irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+					      + TRD_ERR_INT_STATUS);
+}
+
+static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
+			struct cadence_nand_irq_status *irq_status)
+{
+	cadence_nand_read_int_status(cdns_ctrl, irq_status);
+
+	return irq_status->status || irq_status->trd_status ||
+		irq_status->trd_error;
+}
+
+static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
+	memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
+	memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
+	spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device.
+ */
+static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = dev_id;
+	struct cadence_nand_irq_status irq_status;
+	irqreturn_t result = IRQ_NONE;
+
+	spin_lock(&cdns_ctrl->irq_lock);
+
+	if (irq_detected(cdns_ctrl, &irq_status)) {
+		/* Handle interrupt. */
+		/* First acknowledge it. */
+		cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
+		/* Status in the device context for someone to read. */
+		cdns_ctrl->irq_status.status |= irq_status.status;
+		cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
+		cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
+		/* Notify anyone who cares that it happened. */
+		complete(&cdns_ctrl->complete);
+		/* Tell the OS that we've handled this. */
+		result = IRQ_HANDLED;
+	}
+	spin_unlock(&cdns_ctrl->irq_lock);
+
+	return result;
+}
+
+static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
+				      struct cadence_nand_irq_status *irq_mask)
+{
+	writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
+		       cdns_ctrl->reg + INTR_ENABLE);
+
+	writel_relaxed(irq_mask->trd_error,
+		       cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
+}
+
+static void
+cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
+			  struct cadence_nand_irq_status *irq_mask,
+			  struct cadence_nand_irq_status *irq_status)
+{
+	unsigned long timeout = msecs_to_jiffies(10000);
+	unsigned long time_left;
+
+	time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
+						timeout);
+
+	*irq_status = cdns_ctrl->irq_status;
+	if (time_left == 0) {
+		/* Timeout error. */
+		dev_err(cdns_ctrl->dev, "timeout occurred:\n");
+		dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
+			irq_status->status, irq_mask->status);
+		dev_err(cdns_ctrl->dev,
+			"\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
+			irq_status->trd_status, irq_mask->trd_status);
+		dev_err(cdns_ctrl->dev,
+			"\t trd_error = 0x%x, trd_error mask = 0x%x\n",
+			irq_status->trd_error, irq_mask->trd_error);
+	}
+}
+
+/* Execute generic command on NAND controller. */
+static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
+					 u8 chip_nr,
+					 u64 mini_ctrl_cmd)
+{
+	u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
+
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
+	mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
+	mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	cadence_nand_reset_irq(cdns_ctrl);
+
+	writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
+	writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
+
+	/* Select generic command. */
+	reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
+	/* Thread number. */
+	reg |= FIELD_PREP(CMD_REG0_TN, 0);
+
+	/* Issue command. */
+	writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+	return 0;
+}
+
+/* Wait for data on slave DMA interface. */
+static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
+				     u8 *out_sdma_trd,
+				     u32 *out_sdma_size)
+{
+	struct cadence_nand_irq_status irq_mask, irq_status;
+
+	irq_mask.trd_status = 0;
+	irq_mask.trd_error = 0;
+	irq_mask.status = INTR_STATUS_SDMA_TRIGG
+		| INTR_STATUS_SDMA_ERR
+		| INTR_STATUS_UNSUPP_CMD;
+
+	cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+	cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+	if (irq_status.status == 0) {
+		dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
+		return -ETIMEDOUT;
+	}
+
+	if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
+		*out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
+		*out_sdma_trd  = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
+		*out_sdma_trd =
+			FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
+	} else {
+		dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
+			irq_status.status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	u32  reg;
+
+	reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
+
+	cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
+
+	if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
+		cdns_ctrl->caps2.data_dma_width = 8;
+	else
+		cdns_ctrl->caps2.data_dma_width = 4;
+
+	if (reg & CTRL_FEATURES_CONTROL_DATA)
+		cdns_ctrl->caps2.data_control_supp = true;
+
+	if (reg & (CTRL_FEATURES_NVDDR_2_3
+		   | CTRL_FEATURES_NVDDR))
+		cdns_ctrl->caps2.is_phy_type_dll = true;
+}
+
+/* Prepare CDMA descriptor. */
+static void
+cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
+			       char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+				   dma_addr_t ctrl_data_ptr, u16 ctype)
+{
+	struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
+
+	memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
+
+	/* Set fields for one descriptor. */
+	cdma_desc->flash_pointer = flash_ptr;
+	if (cdns_ctrl->ctrl_rev >= 13)
+		cdma_desc->bank = nf_mem;
+	else
+		cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
+
+	cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
+	cdma_desc->command_flags  |= CDMA_CF_INT;
+
+	cdma_desc->memory_pointer = mem_ptr;
+	cdma_desc->status = 0;
+	cdma_desc->sync_flag_pointer = 0;
+	cdma_desc->sync_arguments = 0;
+
+	cdma_desc->command_type = ctype;
+	cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
+}
+
+static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
+					u32 desc_status)
+{
+	if (desc_status & CDMA_CS_ERP)
+		return STAT_ERASED;
+
+	if (desc_status & CDMA_CS_UNCE)
+		return STAT_ECC_UNCORR;
+
+	if (desc_status & CDMA_CS_ERR) {
+		dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
+		return STAT_FAIL;
+	}
+
+	if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
+		return STAT_ECC_CORR;
+
+	return STAT_FAIL;
+}
+
+static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
+	u8 status = STAT_BUSY;
+
+	if (desc_ptr->status & CDMA_CS_FAIL) {
+		status = cadence_nand_check_desc_error(cdns_ctrl,
+						       desc_ptr->status);
+		dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
+	} else if (desc_ptr->status & CDMA_CS_COMP) {
+		/* Descriptor finished with no errors. */
+		if (desc_ptr->command_flags & CDMA_CF_CONT) {
+			dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
+			status = STAT_UNKNOWN;
+		} else {
+			/* Last descriptor.  */
+			status = STAT_OK;
+		}
+	}
+
+	return status;
+}
+
+static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
+				  u8 thread)
+{
+	u32 reg;
+	int status;
+
+	/* Wait for thread ready. */
+	status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
+					     1000000,
+					     BIT(thread), true);
+	if (status)
+		return status;
+
+	cadence_nand_reset_irq(cdns_ctrl);
+	reinit_completion(&cdns_ctrl->complete);
+
+	writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
+		       cdns_ctrl->reg + CMD_REG2);
+	writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
+
+	/* Select CDMA mode. */
+	reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
+	/* Thread number. */
+	reg |= FIELD_PREP(CMD_REG0_TN, thread);
+	/* Issue command. */
+	writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
+
+	return 0;
+}
+
+/* Send SDMA command and wait for finish. */
+static u32
+cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
+				u8 thread)
+{
+	struct cadence_nand_irq_status irq_mask, irq_status = {0};
+	int status;
+
+	irq_mask.trd_status = BIT(thread);
+	irq_mask.trd_error = BIT(thread);
+	irq_mask.status = INTR_STATUS_CDMA_TERR;
+
+	cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
+
+	status = cadence_nand_cdma_send(cdns_ctrl, thread);
+	if (status)
+		return status;
+
+	cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
+
+	if (irq_status.status == 0 && irq_status.trd_status == 0 &&
+	    irq_status.trd_error == 0) {
+		dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
+		return -ETIMEDOUT;
+	}
+	if (irq_status.status & irq_mask.status) {
+		dev_err(cdns_ctrl->dev, "CDMA command failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * ECC size depends on configured ECC strength and on maximum supported
+ * ECC step size.
+ */
+static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
+{
+	int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
+
+	return ALIGN(nbytes, 2);
+}
+
+#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
+	static int \
+	cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
+						    int strength)\
+	{\
+		return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
+	}
+
+CADENCE_NAND_CALC_ECC_BYTES(256)
+CADENCE_NAND_CALC_ECC_BYTES(512)
+CADENCE_NAND_CALC_ECC_BYTES(1024)
+CADENCE_NAND_CALC_ECC_BYTES(2048)
+CADENCE_NAND_CALC_ECC_BYTES(4096)
+
+/* Function reads BCH capabilities. */
+static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
+	int max_step_size = 0, nstrengths, i;
+	u32 reg;
+
+	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
+	cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
+	if (cdns_ctrl->bch_metadata_size < 4) {
+		dev_err(cdns_ctrl->dev,
+			"Driver needs at least 4 bytes of BCH meta data\n");
+		return -EIO;
+	}
+
+	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
+	cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
+	cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
+	cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
+	cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
+
+	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
+	cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
+	cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
+	cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
+	cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
+
+	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
+	cdns_ctrl->ecc_stepinfos[0].stepsize =
+		FIELD_GET(BCH_CFG_2_SECT_0, reg);
+
+	cdns_ctrl->ecc_stepinfos[1].stepsize =
+		FIELD_GET(BCH_CFG_2_SECT_1, reg);
+
+	nstrengths = 0;
+	for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
+		if (cdns_ctrl->ecc_strengths[i] != 0)
+			nstrengths++;
+	}
+
+	ecc_caps->nstepinfos = 0;
+	for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
+		/* ECC strengths are common for all step infos. */
+		cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
+		cdns_ctrl->ecc_stepinfos[i].strengths =
+			cdns_ctrl->ecc_strengths;
+
+		if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
+			ecc_caps->nstepinfos++;
+
+		if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
+			max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
+	}
+	ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
+
+	switch (max_step_size) {
+	case 256:
+		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
+		break;
+	case 512:
+		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
+		break;
+	case 1024:
+		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
+		break;
+	case 2048:
+		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
+		break;
+	case 4096:
+		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
+		break;
+	default:
+		dev_err(cdns_ctrl->dev,
+			"Unsupported sector size(ecc step size) %d\n",
+			max_step_size);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/* Hardware initialization. */
+static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	int status;
+	u32 reg;
+
+	status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					     1000000,
+					     CTRL_STATUS_INIT_COMP, false);
+	if (status)
+		return status;
+
+	reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
+	cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
+
+	dev_info(cdns_ctrl->dev,
+		 "%s: cadence nand controller version reg %x\n",
+		 __func__, reg);
+
+	/* Disable cache and multiplane. */
+	writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
+	writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
+
+	/* Clear all interrupts. */
+	writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
+
+	cadence_nand_get_caps(cdns_ctrl);
+	if (cadence_nand_read_bch_caps(cdns_ctrl))
+		return -EIO;
+
+	/*
+	 * Set IO width access to 8.
+	 * It is because during SW device discovering width access
+	 * is expected to be 8.
+	 */
+	status = cadence_nand_set_access_width16(cdns_ctrl, false);
+
+	return status;
+}
+
+#define TT_MAIN_OOB_AREAS	2
+#define TT_RAW_PAGE		3
+#define TT_BBM			4
+#define TT_MAIN_OOB_AREA_EXT	5
+
+/* Prepare size of data to transfer. */
+static void
+cadence_nand_prepare_data_size(struct nand_chip *chip,
+			       int transfer_type)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	u32 sec_size = 0, offset = 0, sec_cnt = 1;
+	u32 last_sec_size = cdns_chip->sector_size;
+	u32 data_ctrl_size = 0;
+	u32 reg = 0;
+
+	if (cdns_ctrl->curr_trans_type == transfer_type)
+		return;
+
+	switch (transfer_type) {
+	case TT_MAIN_OOB_AREA_EXT:
+		sec_cnt = cdns_chip->sector_count;
+		sec_size = cdns_chip->sector_size;
+		data_ctrl_size = cdns_chip->avail_oob_size;
+		break;
+	case TT_MAIN_OOB_AREAS:
+		sec_cnt = cdns_chip->sector_count;
+		last_sec_size = cdns_chip->sector_size
+			+ cdns_chip->avail_oob_size;
+		sec_size = cdns_chip->sector_size;
+		break;
+	case TT_RAW_PAGE:
+		last_sec_size = mtd->writesize + mtd->oobsize;
+		break;
+	case TT_BBM:
+		offset = mtd->writesize + cdns_chip->bbm_offs;
+		last_sec_size = 8;
+		break;
+	}
+
+	reg = 0;
+	reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
+	reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
+	writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
+
+	reg = 0;
+	reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
+	reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
+	writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
+
+	if (cdns_ctrl->caps2.data_control_supp) {
+		reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
+		reg &= ~CONTROL_DATA_CTRL_SIZE;
+		reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
+		writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
+	}
+
+	cdns_ctrl->curr_trans_type = transfer_type;
+}
+
+static int
+cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
+			   int page, void *buf, void *ctrl_dat, u32 buf_size,
+			   u32 ctrl_dat_size, enum dma_data_direction dir,
+			   bool with_ecc)
+{
+	dma_addr_t dma_buf, dma_ctrl_dat = 0;
+	u8 thread_nr = chip_nr;
+	int status;
+	u16 ctype;
+
+	if (dir == DMA_FROM_DEVICE)
+		ctype = CDMA_CT_RD;
+	else
+		ctype = CDMA_CT_WR;
+
+	cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
+
+	dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
+	if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
+		dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+		return -EIO;
+	}
+
+	if (ctrl_dat && ctrl_dat_size) {
+		dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
+					      ctrl_dat_size, dir);
+		if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
+			dma_unmap_single(cdns_ctrl->dev, dma_buf,
+					 buf_size, dir);
+			dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+			return -EIO;
+		}
+	}
+
+	cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
+				       dma_buf, dma_ctrl_dat, ctype);
+
+	status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+
+	dma_unmap_single(cdns_ctrl->dev, dma_buf,
+			 buf_size, dir);
+
+	if (ctrl_dat && ctrl_dat_size)
+		dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
+				 ctrl_dat_size, dir);
+	if (status)
+		return status;
+
+	return cadence_nand_cdma_finish(cdns_ctrl);
+}
+
+static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
+				     struct cadence_nand_timings *t)
+{
+	writel_relaxed(t->async_toggle_timings,
+		       cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
+	writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
+	writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
+	writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
+
+	if (cdns_ctrl->caps2.is_phy_type_dll)
+		writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
+
+	writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
+
+	if (cdns_ctrl->caps2.is_phy_type_dll) {
+		writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
+		writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
+		writel_relaxed(t->phy_dqs_timing,
+			       cdns_ctrl->reg + PHY_DQS_TIMING);
+		writel_relaxed(t->phy_gate_lpbk_ctrl,
+			       cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
+		writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
+			       cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
+		writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
+	}
+}
+
+static int cadence_nand_select_target(struct nand_chip *chip)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+	if (chip == cdns_ctrl->selected_chip)
+		return 0;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
+
+	cadence_nand_set_ecc_strength(cdns_ctrl,
+				      cdns_chip->corr_str_idx);
+
+	cadence_nand_set_erase_detection(cdns_ctrl, true,
+					 chip->ecc.strength);
+
+	cdns_ctrl->curr_trans_type = -1;
+	cdns_ctrl->selected_chip = chip;
+
+	return 0;
+}
+
+static int cadence_nand_erase(struct nand_chip *chip, u32 page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	int status;
+	u8 thread_nr = cdns_chip->cs[chip->cur_cs];
+
+	cadence_nand_cdma_desc_prepare(cdns_ctrl,
+				       cdns_chip->cs[chip->cur_cs],
+				       page, 0, 0,
+				       CDMA_CT_ERASE);
+	status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
+	if (status) {
+		dev_err(cdns_ctrl->dev, "erase operation failed\n");
+		return -EIO;
+	}
+
+	status = cadence_nand_cdma_finish(cdns_ctrl);
+	if (status)
+		return status;
+
+	return 0;
+}
+
+static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
+{
+	int status;
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	cadence_nand_prepare_data_size(chip, TT_BBM);
+
+	cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+	/*
+	 * Read only bad block marker from offset
+	 * defined by a memory manufacturer.
+	 */
+	status = cadence_nand_cdma_transfer(cdns_ctrl,
+					    cdns_chip->cs[chip->cur_cs],
+					    page, cdns_ctrl->buf, NULL,
+					    mtd->oobsize,
+					    0, DMA_FROM_DEVICE, false);
+	if (status) {
+		dev_err(cdns_ctrl->dev, "read BBM failed\n");
+		return -EIO;
+	}
+
+	memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
+
+	return 0;
+}
+
+static int cadence_nand_write_page(struct nand_chip *chip,
+				   const u8 *buf, int oob_required,
+				   int page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int status;
+	u16 marker_val = 0xFFFF;
+
+	status = cadence_nand_select_target(chip);
+	if (status)
+		return status;
+
+	cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+					 mtd->writesize
+					 + cdns_chip->bbm_offs,
+					 1);
+
+	if (oob_required) {
+		marker_val = *(u16 *)(chip->oob_poi
+				      + cdns_chip->bbm_offs);
+	} else {
+		/* Set oob data to 0xFF. */
+		memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
+		       cdns_chip->avail_oob_size);
+	}
+
+	cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
+
+	cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+
+	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+	    cdns_ctrl->caps2.data_control_supp) {
+		u8 *oob;
+
+		if (oob_required)
+			oob = chip->oob_poi;
+		else
+			oob = cdns_ctrl->buf + mtd->writesize;
+
+		status = cadence_nand_cdma_transfer(cdns_ctrl,
+						    cdns_chip->cs[chip->cur_cs],
+						    page, (void *)buf, oob,
+						    mtd->writesize,
+						    cdns_chip->avail_oob_size,
+						    DMA_TO_DEVICE, true);
+		if (status) {
+			dev_err(cdns_ctrl->dev, "write page failed\n");
+			return -EIO;
+		}
+
+		return 0;
+	}
+
+	if (oob_required) {
+		/* Transfer the data to the oob area. */
+		memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
+		       cdns_chip->avail_oob_size);
+	}
+
+	memcpy(cdns_ctrl->buf, buf, mtd->writesize);
+
+	cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+
+	return cadence_nand_cdma_transfer(cdns_ctrl,
+					  cdns_chip->cs[chip->cur_cs],
+					  page, cdns_ctrl->buf, NULL,
+					  mtd->writesize
+					  + cdns_chip->avail_oob_size,
+					  0, DMA_TO_DEVICE, true);
+}
+
+static int cadence_nand_write_oob(struct nand_chip *chip, int page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
+
+	return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_write_page_raw(struct nand_chip *chip,
+				       const u8 *buf, int oob_required,
+				       int page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int writesize = mtd->writesize;
+	int oobsize = mtd->oobsize;
+	int ecc_steps = chip->ecc.steps;
+	int ecc_size = chip->ecc.size;
+	int ecc_bytes = chip->ecc.bytes;
+	void *tmp_buf = cdns_ctrl->buf;
+	int oob_skip = cdns_chip->bbm_len;
+	size_t size = writesize + oobsize;
+	int i, pos, len;
+	int status = 0;
+
+	status = cadence_nand_select_target(chip);
+	if (status)
+		return status;
+
+	/*
+	 * Fill the buffer with 0xff first except the full page transfer.
+	 * This simplifies the logic.
+	 */
+	if (!buf || !oob_required)
+		memset(tmp_buf, 0xff, size);
+
+	cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+	/* Arrange the buffer for syndrome payload/ecc layout. */
+	if (buf) {
+		for (i = 0; i < ecc_steps; i++) {
+			pos = i * (ecc_size + ecc_bytes);
+			len = ecc_size;
+
+			if (pos >= writesize)
+				pos += oob_skip;
+			else if (pos + len > writesize)
+				len = writesize - pos;
+
+			memcpy(tmp_buf + pos, buf, len);
+			buf += len;
+			if (len < ecc_size) {
+				len = ecc_size - len;
+				memcpy(tmp_buf + writesize + oob_skip, buf,
+				       len);
+				buf += len;
+			}
+		}
+	}
+
+	if (oob_required) {
+		const u8 *oob = chip->oob_poi;
+		u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+			(cdns_chip->sector_size + chip->ecc.bytes)
+			+ cdns_chip->sector_size + oob_skip;
+
+		/* BBM at the beginning of the OOB area. */
+		memcpy(tmp_buf + writesize, oob, oob_skip);
+
+		/* OOB free. */
+		memcpy(tmp_buf + oob_data_offset, oob,
+		       cdns_chip->avail_oob_size);
+		oob += cdns_chip->avail_oob_size;
+
+		/* OOB ECC. */
+		for (i = 0; i < ecc_steps; i++) {
+			pos = ecc_size + i * (ecc_size + ecc_bytes);
+			if (i == (ecc_steps - 1))
+				pos += cdns_chip->avail_oob_size;
+
+			len = ecc_bytes;
+
+			if (pos >= writesize)
+				pos += oob_skip;
+			else if (pos + len > writesize)
+				len = writesize - pos;
+
+			memcpy(tmp_buf + pos, oob, len);
+			oob += len;
+			if (len < ecc_bytes) {
+				len = ecc_bytes - len;
+				memcpy(tmp_buf + writesize + oob_skip, oob,
+				       len);
+				oob += len;
+			}
+		}
+	}
+
+	cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+
+	return cadence_nand_cdma_transfer(cdns_ctrl,
+					  cdns_chip->cs[chip->cur_cs],
+					  page, cdns_ctrl->buf, NULL,
+					  mtd->writesize +
+					  mtd->oobsize,
+					  0, DMA_TO_DEVICE, false);
+}
+
+static int cadence_nand_write_oob_raw(struct nand_chip *chip,
+				      int page)
+{
+	return cadence_nand_write_page_raw(chip, NULL, true, page);
+}
+
+static int cadence_nand_read_page(struct nand_chip *chip,
+				  u8 *buf, int oob_required, int page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int status = 0;
+	int ecc_err_count = 0;
+
+	status = cadence_nand_select_target(chip);
+	if (status)
+		return status;
+
+	cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
+					 mtd->writesize
+					 + cdns_chip->bbm_offs, 1);
+
+	/*
+	 * If data buffer can be accessed by DMA and data_control feature
+	 * is supported then transfer data and oob directly.
+	 */
+	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
+	    cdns_ctrl->caps2.data_control_supp) {
+		u8 *oob;
+
+		if (oob_required)
+			oob = chip->oob_poi;
+		else
+			oob = cdns_ctrl->buf + mtd->writesize;
+
+		cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
+		status = cadence_nand_cdma_transfer(cdns_ctrl,
+						    cdns_chip->cs[chip->cur_cs],
+						    page, buf, oob,
+						    mtd->writesize,
+						    cdns_chip->avail_oob_size,
+						    DMA_FROM_DEVICE, true);
+	/* Otherwise use bounce buffer. */
+	} else {
+		cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
+		status = cadence_nand_cdma_transfer(cdns_ctrl,
+						    cdns_chip->cs[chip->cur_cs],
+						    page, cdns_ctrl->buf,
+						    NULL, mtd->writesize
+						    + cdns_chip->avail_oob_size,
+						    0, DMA_FROM_DEVICE, true);
+
+		memcpy(buf, cdns_ctrl->buf, mtd->writesize);
+		if (oob_required)
+			memcpy(chip->oob_poi,
+			       cdns_ctrl->buf + mtd->writesize,
+			       mtd->oobsize);
+	}
+
+	switch (status) {
+	case STAT_ECC_UNCORR:
+		mtd->ecc_stats.failed++;
+		ecc_err_count++;
+		break;
+	case STAT_ECC_CORR:
+		ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
+					  cdns_ctrl->cdma_desc->status);
+		mtd->ecc_stats.corrected += ecc_err_count;
+		break;
+	case STAT_ERASED:
+	case STAT_OK:
+		break;
+	default:
+		dev_err(cdns_ctrl->dev, "read page failed\n");
+		return -EIO;
+	}
+
+	if (oob_required)
+		if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
+			return -EIO;
+
+	return ecc_err_count;
+}
+
+/* Reads OOB data from the device. */
+static int cadence_nand_read_oob(struct nand_chip *chip, int page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+
+	return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
+}
+
+static int cadence_nand_read_page_raw(struct nand_chip *chip,
+				      u8 *buf, int oob_required, int page)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int oob_skip = cdns_chip->bbm_len;
+	int writesize = mtd->writesize;
+	int ecc_steps = chip->ecc.steps;
+	int ecc_size = chip->ecc.size;
+	int ecc_bytes = chip->ecc.bytes;
+	void *tmp_buf = cdns_ctrl->buf;
+	int i, pos, len;
+	int status = 0;
+
+	status = cadence_nand_select_target(chip);
+	if (status)
+		return status;
+
+	cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
+
+	cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
+	status = cadence_nand_cdma_transfer(cdns_ctrl,
+					    cdns_chip->cs[chip->cur_cs],
+					    page, cdns_ctrl->buf, NULL,
+					    mtd->writesize
+					    + mtd->oobsize,
+					    0, DMA_FROM_DEVICE, false);
+
+	switch (status) {
+	case STAT_ERASED:
+	case STAT_OK:
+		break;
+	default:
+		dev_err(cdns_ctrl->dev, "read raw page failed\n");
+		return -EIO;
+	}
+
+	/* Arrange the buffer for syndrome payload/ecc layout. */
+	if (buf) {
+		for (i = 0; i < ecc_steps; i++) {
+			pos = i * (ecc_size + ecc_bytes);
+			len = ecc_size;
+
+			if (pos >= writesize)
+				pos += oob_skip;
+			else if (pos + len > writesize)
+				len = writesize - pos;
+
+			memcpy(buf, tmp_buf + pos, len);
+			buf += len;
+			if (len < ecc_size) {
+				len = ecc_size - len;
+				memcpy(buf, tmp_buf + writesize + oob_skip,
+				       len);
+				buf += len;
+			}
+		}
+	}
+
+	if (oob_required) {
+		u8 *oob = chip->oob_poi;
+		u32 oob_data_offset = (cdns_chip->sector_count - 1) *
+			(cdns_chip->sector_size + chip->ecc.bytes)
+			+ cdns_chip->sector_size + oob_skip;
+
+		/* OOB free. */
+		memcpy(oob, tmp_buf + oob_data_offset,
+		       cdns_chip->avail_oob_size);
+
+		/* BBM at the beginning of the OOB area. */
+		memcpy(oob, tmp_buf + writesize, oob_skip);
+
+		oob += cdns_chip->avail_oob_size;
+
+		/* OOB ECC */
+		for (i = 0; i < ecc_steps; i++) {
+			pos = ecc_size + i * (ecc_size + ecc_bytes);
+			len = ecc_bytes;
+
+			if (i == (ecc_steps - 1))
+				pos += cdns_chip->avail_oob_size;
+
+			if (pos >= writesize)
+				pos += oob_skip;
+			else if (pos + len > writesize)
+				len = writesize - pos;
+
+			memcpy(oob, tmp_buf + pos, len);
+			oob += len;
+			if (len < ecc_bytes) {
+				len = ecc_bytes - len;
+				memcpy(oob, tmp_buf + writesize + oob_skip,
+				       len);
+				oob += len;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int cadence_nand_read_oob_raw(struct nand_chip *chip,
+				     int page)
+{
+	return cadence_nand_read_page_raw(chip, NULL, true, page);
+}
+
+static void cadence_nand_slave_dma_transfer_finished(void *data)
+{
+	struct completion *finished = data;
+
+	complete(finished);
+}
+
+static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+					   void *buf,
+					   dma_addr_t dev_dma, size_t len,
+					   enum dma_data_direction dir)
+{
+	DECLARE_COMPLETION_ONSTACK(finished);
+	struct dma_chan *chan;
+	struct dma_device *dma_dev;
+	dma_addr_t src_dma, dst_dma, buf_dma;
+	struct dma_async_tx_descriptor *tx;
+	dma_cookie_t cookie;
+
+	chan = cdns_ctrl->dmac;
+	dma_dev = chan->device;
+
+	buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
+	if (dma_mapping_error(dma_dev->dev, buf_dma)) {
+		dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
+		goto err;
+	}
+
+	if (dir == DMA_FROM_DEVICE) {
+		src_dma = cdns_ctrl->io.dma;
+		dst_dma = buf_dma;
+	} else {
+		src_dma = buf_dma;
+		dst_dma = cdns_ctrl->io.dma;
+	}
+
+	tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
+		goto err_unmap;
+	}
+
+	tx->callback = cadence_nand_slave_dma_transfer_finished;
+	tx->callback_param = &finished;
+
+	cookie = dmaengine_submit(tx);
+	if (dma_submit_error(cookie)) {
+		dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
+		goto err_unmap;
+	}
+
+	dma_async_issue_pending(cdns_ctrl->dmac);
+	wait_for_completion(&finished);
+
+	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+	return 0;
+
+err_unmap:
+	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+
+err:
+	dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+
+	return -EIO;
+}
+
+static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
+				 u8 *buf, int len)
+{
+	u8 thread_nr = 0;
+	u32 sdma_size;
+	int status;
+
+	/* Wait until slave DMA interface is ready to data transfer. */
+	status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+	if (status)
+		return status;
+
+	if (!cdns_ctrl->caps1->has_dma) {
+		int len_in_words = len >> 2;
+
+		/* read alingment data */
+		ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+		if (sdma_size > len) {
+			/* read rest data from slave DMA interface if any */
+			ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+				     sdma_size / 4 - len_in_words);
+			/* copy rest of data */
+			memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
+			       len - (len_in_words << 2));
+		}
+		return 0;
+	}
+
+	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+		status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
+							 cdns_ctrl->io.dma,
+							 len, DMA_FROM_DEVICE);
+		if (status == 0)
+			return 0;
+
+		dev_warn(cdns_ctrl->dev,
+			 "Slave DMA transfer failed. Try again using bounce buffer.");
+	}
+
+	/* If DMA transfer is not possible or failed then use bounce buffer. */
+	status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+						 cdns_ctrl->io.dma,
+						 sdma_size, DMA_FROM_DEVICE);
+
+	if (status) {
+		dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+		return status;
+	}
+
+	memcpy(buf, cdns_ctrl->buf, len);
+
+	return 0;
+}
+
+static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
+				  const u8 *buf, int len)
+{
+	u8 thread_nr = 0;
+	u32 sdma_size;
+	int status;
+
+	/* Wait until slave DMA interface is ready to data transfer. */
+	status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
+	if (status)
+		return status;
+
+	if (!cdns_ctrl->caps1->has_dma) {
+		int len_in_words = len >> 2;
+
+		iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
+		if (sdma_size > len) {
+			/* copy rest of data */
+			memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
+			       len - (len_in_words << 2));
+			/* write all expected by nand controller data */
+			iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
+				      sdma_size / 4 - len_in_words);
+		}
+
+		return 0;
+	}
+
+	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
+		status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
+							 cdns_ctrl->io.dma,
+							 len, DMA_TO_DEVICE);
+		if (status == 0)
+			return 0;
+
+		dev_warn(cdns_ctrl->dev,
+			 "Slave DMA transfer failed. Try again using bounce buffer.");
+	}
+
+	/* If DMA transfer is not possible or failed then use bounce buffer. */
+	memcpy(cdns_ctrl->buf, buf, len);
+
+	status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
+						 cdns_ctrl->io.dma,
+						 sdma_size, DMA_TO_DEVICE);
+
+	if (status)
+		dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
+
+	return status;
+}
+
+static int cadence_nand_force_byte_access(struct nand_chip *chip,
+					  bool force_8bit)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	int status;
+
+	/*
+	 * Callers of this function do not verify if the NAND is using a 16-bit
+	 * an 8-bit bus for normal operations, so we need to take care of that
+	 * here by leaving the configuration unchanged if the NAND does not have
+	 * the NAND_BUSWIDTH_16 flag set.
+	 */
+	if (!(chip->options & NAND_BUSWIDTH_16))
+		return 0;
+
+	status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
+
+	return status;
+}
+
+static int cadence_nand_cmd_opcode(struct nand_chip *chip,
+				   const struct nand_subop *subop)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	const struct nand_op_instr *instr;
+	unsigned int op_id = 0;
+	u64 mini_ctrl_cmd = 0;
+	int ret;
+
+	instr = &subop->instrs[op_id];
+
+	if (instr->delay_ns > 0)
+		mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+				    GCMD_LAY_INSTR_CMD);
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
+				    instr->ctx.cmd.opcode);
+
+	ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+					    cdns_chip->cs[chip->cur_cs],
+					    mini_ctrl_cmd);
+	if (ret)
+		dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
+			instr->ctx.cmd.opcode);
+
+	return ret;
+}
+
+static int cadence_nand_cmd_address(struct nand_chip *chip,
+				    const struct nand_subop *subop)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	const struct nand_op_instr *instr;
+	unsigned int op_id = 0;
+	u64 mini_ctrl_cmd = 0;
+	unsigned int offset, naddrs;
+	u64 address = 0;
+	const u8 *addrs;
+	int ret;
+	int i;
+
+	instr = &subop->instrs[op_id];
+
+	if (instr->delay_ns > 0)
+		mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+				    GCMD_LAY_INSTR_ADDR);
+
+	offset = nand_subop_get_addr_start_off(subop, op_id);
+	naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+	addrs = &instr->ctx.addr.addrs[offset];
+
+	for (i = 0; i < naddrs; i++)
+		address |= (u64)addrs[i] << (8 * i);
+
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
+				    address);
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
+				    naddrs - 1);
+
+	ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+					    cdns_chip->cs[chip->cur_cs],
+					    mini_ctrl_cmd);
+	if (ret)
+		dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
+
+	return ret;
+}
+
+static int cadence_nand_cmd_erase(struct nand_chip *chip,
+				  const struct nand_subop *subop)
+{
+	unsigned int op_id;
+
+	if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
+		int i;
+		const struct nand_op_instr *instr = NULL;
+		unsigned int offset, naddrs;
+		const u8 *addrs;
+		u32 page = 0;
+
+		instr = &subop->instrs[1];
+		offset = nand_subop_get_addr_start_off(subop, 1);
+		naddrs = nand_subop_get_num_addr_cyc(subop, 1);
+		addrs = &instr->ctx.addr.addrs[offset];
+
+		for (i = 0; i < naddrs; i++)
+			page |= (u32)addrs[i] << (8 * i);
+
+		return cadence_nand_erase(chip, page);
+	}
+
+	/*
+	 * If it is not an erase operation then handle operation
+	 * by calling exec_op function.
+	 */
+	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+		int ret;
+		const struct nand_operation nand_op = {
+			.cs = chip->cur_cs,
+			.instrs =  &subop->instrs[op_id],
+			.ninstrs = 1};
+		ret = chip->controller->ops->exec_op(chip, &nand_op, false);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int cadence_nand_cmd_data(struct nand_chip *chip,
+				 const struct nand_subop *subop)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	const struct nand_op_instr *instr;
+	unsigned int offset, op_id = 0;
+	u64 mini_ctrl_cmd = 0;
+	int len = 0;
+	int ret;
+
+	instr = &subop->instrs[op_id];
+
+	if (instr->delay_ns > 0)
+		mini_ctrl_cmd |= GCMD_LAY_TWB;
+
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
+				    GCMD_LAY_INSTR_DATA);
+
+	if (instr->type == NAND_OP_DATA_OUT_INSTR)
+		mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
+					    GCMD_DIR_WRITE);
+
+	len = nand_subop_get_data_len(subop, op_id);
+	offset = nand_subop_get_data_start_off(subop, op_id);
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
+	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
+	if (instr->ctx.data.force_8bit) {
+		ret = cadence_nand_force_byte_access(chip, true);
+		if (ret) {
+			dev_err(cdns_ctrl->dev,
+				"cannot change byte access generic data cmd failed\n");
+			return ret;
+		}
+	}
+
+	ret = cadence_nand_generic_cmd_send(cdns_ctrl,
+					    cdns_chip->cs[chip->cur_cs],
+					    mini_ctrl_cmd);
+	if (ret) {
+		dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
+		return ret;
+	}
+
+	if (instr->type == NAND_OP_DATA_IN_INSTR) {
+		void *buf = instr->ctx.data.buf.in + offset;
+
+		ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
+	} else {
+		const void *buf = instr->ctx.data.buf.out + offset;
+
+		ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
+	}
+
+	if (ret) {
+		dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
+		return ret;
+	}
+
+	if (instr->ctx.data.force_8bit) {
+		ret = cadence_nand_force_byte_access(chip, false);
+		if (ret) {
+			dev_err(cdns_ctrl->dev,
+				"cannot change byte access generic data cmd failed\n");
+		}
+	}
+
+	return ret;
+}
+
+static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
+				    const struct nand_subop *subop)
+{
+	int status;
+	unsigned int op_id = 0;
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	const struct nand_op_instr *instr = &subop->instrs[op_id];
+	u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+
+	status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
+					     timeout_us,
+					     BIT(cdns_chip->cs[chip->cur_cs]),
+					     false);
+	return status;
+}
+
+static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
+	NAND_OP_PARSER_PATTERN(
+		cadence_nand_cmd_erase,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
+		NAND_OP_PARSER_PAT_CMD_ELEM(false),
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+	NAND_OP_PARSER_PATTERN(
+		cadence_nand_cmd_opcode,
+		NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+	NAND_OP_PARSER_PATTERN(
+		cadence_nand_cmd_address,
+		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
+	NAND_OP_PARSER_PATTERN(
+		cadence_nand_cmd_data,
+		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
+	NAND_OP_PARSER_PATTERN(
+		cadence_nand_cmd_data,
+		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
+	NAND_OP_PARSER_PATTERN(
+		cadence_nand_cmd_waitrdy,
+		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
+	);
+
+static int cadence_nand_exec_op(struct nand_chip *chip,
+				const struct nand_operation *op,
+				bool check_only)
+{
+	if (!check_only) {
+		int status = cadence_nand_select_target(chip);
+
+		if (status)
+			return status;
+	}
+
+	return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
+				      check_only);
+}
+
+static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
+				       struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = cdns_chip->bbm_len;
+	oobregion->length = cdns_chip->avail_oob_size
+		- cdns_chip->bbm_len;
+
+	return 0;
+}
+
+static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = cdns_chip->avail_oob_size;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
+	.free = cadence_nand_ooblayout_free,
+	.ecc = cadence_nand_ooblayout_ecc,
+};
+
+static int calc_cycl(u32 timing, u32 clock)
+{
+	if (timing == 0 || clock == 0)
+		return 0;
+
+	if ((timing % clock) > 0)
+		return timing / clock;
+	else
+		return timing / clock - 1;
+}
+
+/* Calculate max data valid window. */
+static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+				u32 board_delay_skew_min, u32 ext_mode)
+{
+	if (ext_mode == 0)
+		clk_period /= 2;
+
+	return (trp_cnt + 1) * clk_period + trhoh_min +
+		board_delay_skew_min;
+}
+
+/* Calculate data valid window. */
+static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
+			    u32 trea_max, u32 ext_mode)
+{
+	if (ext_mode == 0)
+		clk_period /= 2;
+
+	return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
+}
+
+static int
+cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
+			     const struct nand_interface_config *conf)
+{
+	const struct nand_sdr_timings *sdr;
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	struct cadence_nand_timings *t = &cdns_chip->timings;
+	u32 reg;
+	u32 board_delay = cdns_ctrl->board_delay;
+	u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
+					    cdns_ctrl->nf_clk_rate);
+	u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
+	u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
+	u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
+	u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
+	u32 if_skew = cdns_ctrl->caps1->if_skew;
+	u32 board_delay_skew_min = board_delay - if_skew;
+	u32 board_delay_skew_max = board_delay + if_skew;
+	u32 dqs_sampl_res, phony_dqs_mod;
+	u32 tdvw, tdvw_min, tdvw_max;
+	u32 ext_rd_mode, ext_wr_mode;
+	u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
+	u32 sampling_point;
+
+	sdr = nand_get_sdr_timings(conf);
+	if (IS_ERR(sdr))
+		return PTR_ERR(sdr);
+
+	memset(t, 0, sizeof(*t));
+	/* Sampling point calculation. */
+
+	if (cdns_ctrl->caps2.is_phy_type_dll)
+		phony_dqs_mod = 2;
+	else
+		phony_dqs_mod = 1;
+
+	dqs_sampl_res = clk_period / phony_dqs_mod;
+
+	tdvw_min = sdr->tREA_max + board_delay_skew_max;
+	/*
+	 * The idea of those calculation is to get the optimum value
+	 * for tRP and tRH timings. If it is NOT possible to sample data
+	 * with optimal tRP/tRH settings, the parameters will be extended.
+	 * If clk_period is 50ns (the lowest value) this condition is met
+	 * for asynchronous timing modes 1, 2, 3, 4 and 5.
+	 * If clk_period is 20ns the condition is met only
+	 * for asynchronous timing mode 5.
+	 */
+	if (sdr->tRC_min <= clk_period &&
+	    sdr->tRP_min <= (clk_period / 2) &&
+	    sdr->tREH_min <= (clk_period / 2)) {
+		/* Performance mode. */
+		ext_rd_mode = 0;
+		tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+				 sdr->tREA_max, ext_rd_mode);
+		tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
+					 board_delay_skew_min,
+					 ext_rd_mode);
+		/*
+		 * Check if data valid window and sampling point can be found
+		 * and is not on the edge (ie. we have hold margin).
+		 * If not extend the tRP timings.
+		 */
+		if (tdvw > 0) {
+			if (tdvw_max <= tdvw_min ||
+			    (tdvw_max % dqs_sampl_res) == 0) {
+				/*
+				 * No valid sampling point so the RE pulse need
+				 * to be widen widening by half clock cycle.
+				 */
+				ext_rd_mode = 1;
+			}
+		} else {
+			/*
+			 * There is no valid window
+			 * to be able to sample data the tRP need to be widen.
+			 * Very safe calculations are performed here.
+			 */
+			trp_cnt = (sdr->tREA_max + board_delay_skew_max
+				   + dqs_sampl_res) / clk_period;
+			ext_rd_mode = 1;
+		}
+
+	} else {
+		/* Extended read mode. */
+		u32 trh;
+
+		ext_rd_mode = 1;
+		trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
+		trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
+		if (sdr->tREH_min >= trh)
+			trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
+		else
+			trh_cnt = calc_cycl(trh, clk_period);
+
+		tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
+				 sdr->tREA_max, ext_rd_mode);
+		/*
+		 * Check if data valid window and sampling point can be found
+		 * or if it is at the edge check if previous is valid
+		 * - if not extend the tRP timings.
+		 */
+		if (tdvw > 0) {
+			tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+						 sdr->tRHOH_min,
+						 board_delay_skew_min,
+						 ext_rd_mode);
+
+			if ((((tdvw_max / dqs_sampl_res)
+			      * dqs_sampl_res) <= tdvw_min) ||
+			    (((tdvw_max % dqs_sampl_res) == 0) &&
+			     (((tdvw_max / dqs_sampl_res - 1)
+			       * dqs_sampl_res) <= tdvw_min))) {
+				/*
+				 * Data valid window width is lower than
+				 * sampling resolution and do not hit any
+				 * sampling point to be sure the sampling point
+				 * will be found the RE low pulse width will be
+				 *  extended by one clock cycle.
+				 */
+				trp_cnt = trp_cnt + 1;
+			}
+		} else {
+			/*
+			 * There is no valid window to be able to sample data.
+			 * The tRP need to be widen.
+			 * Very safe calculations are performed here.
+			 */
+			trp_cnt = (sdr->tREA_max + board_delay_skew_max
+				   + dqs_sampl_res) / clk_period;
+		}
+	}
+
+	tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
+				 sdr->tRHOH_min,
+				 board_delay_skew_min, ext_rd_mode);
+
+	if (sdr->tWC_min <= clk_period &&
+	    (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
+	    (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
+		ext_wr_mode = 0;
+	} else {
+		u32 twh;
+
+		ext_wr_mode = 1;
+		twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
+		if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
+			twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
+					    clk_period);
+
+		twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
+		if (sdr->tWH_min >= twh)
+			twh = sdr->tWH_min;
+
+		twh_cnt = calc_cycl(twh + if_skew, clk_period);
+	}
+
+	reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
+	reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
+	reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
+	reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
+	t->async_toggle_timings = reg;
+	dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
+
+	tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
+	tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
+	twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
+	trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
+	reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
+
+	/*
+	 * If timing exceeds delay field in timing register
+	 * then use maximum value.
+	 */
+	if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
+		reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
+	else
+		reg |= TIMINGS0_TCCS;
+
+	reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
+	reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
+	t->timings0 = reg;
+	dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
+
+	/* The following is related to single signal so skew is not needed. */
+	trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
+	trhz_cnt = trhz_cnt + 1;
+	twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
+	/*
+	 * Because of the two stage syncflop the value must be increased by 3
+	 * first value is related with sync, second value is related
+	 * with output if delay.
+	 */
+	twb_cnt = twb_cnt + 3 + 5;
+	/*
+	 * The following is related to the we edge of the random data input
+	 * sequence so skew is not needed.
+	 */
+	tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
+	reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
+	reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
+	reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
+	t->timings1 = reg;
+	dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
+
+	tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
+	if (tfeat_cnt < twb_cnt)
+		tfeat_cnt = twb_cnt;
+
+	tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
+	tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
+
+	reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
+	reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
+	reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
+	t->timings2 = reg;
+	dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
+
+	if (cdns_ctrl->caps2.is_phy_type_dll) {
+		reg = DLL_PHY_CTRL_DLL_RST_N;
+		if (ext_wr_mode)
+			reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
+		if (ext_rd_mode)
+			reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
+
+		reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
+		reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
+		t->dll_phy_ctrl = reg;
+		dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
+	}
+
+	/* Sampling point calculation. */
+	if ((tdvw_max % dqs_sampl_res) > 0)
+		sampling_point = tdvw_max / dqs_sampl_res;
+	else
+		sampling_point = (tdvw_max / dqs_sampl_res - 1);
+
+	if (sampling_point * dqs_sampl_res > tdvw_min) {
+		dll_phy_dqs_timing =
+			FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
+		dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
+		phony_dqs_timing = sampling_point / phony_dqs_mod;
+
+		if ((sampling_point % 2) > 0) {
+			dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
+			if ((tdvw_max % dqs_sampl_res) == 0)
+				/*
+				 * Calculation for sampling point at the edge
+				 * of data and being odd number.
+				 */
+				phony_dqs_timing = (tdvw_max / dqs_sampl_res)
+					/ phony_dqs_mod - 1;
+
+			if (!cdns_ctrl->caps2.is_phy_type_dll)
+				phony_dqs_timing--;
+
+		} else {
+			phony_dqs_timing--;
+		}
+		rd_del_sel = phony_dqs_timing + 3;
+	} else {
+		dev_warn(cdns_ctrl->dev,
+			 "ERROR : cannot find valid sampling point\n");
+	}
+
+	reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
+	if (cdns_ctrl->caps2.is_phy_type_dll)
+		reg  |= PHY_CTRL_SDR_DQS;
+	t->phy_ctrl = reg;
+	dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
+
+	if (cdns_ctrl->caps2.is_phy_type_dll) {
+		dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
+		dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
+		dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
+			dll_phy_dqs_timing);
+		t->phy_dqs_timing = dll_phy_dqs_timing;
+
+		reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
+		dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
+			reg);
+		t->phy_gate_lpbk_ctrl = reg;
+
+		dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
+			PHY_DLL_MASTER_CTRL_BYPASS_MODE);
+		dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
+	}
+
+	return 0;
+}
+
+static int cadence_nand_attach_chip(struct nand_chip *chip)
+{
+	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
+	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
+	u32 ecc_size;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
+
+	if (chip->options & NAND_BUSWIDTH_16) {
+		ret = cadence_nand_set_access_width16(cdns_ctrl, true);
+		if (ret)
+			return ret;
+	}
+
+	chip->bbt_options |= NAND_BBT_USE_FLASH;
+	chip->bbt_options |= NAND_BBT_NO_OOB;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+	chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+	cdns_chip->bbm_offs = chip->badblockpos;
+	cdns_chip->bbm_offs &= ~0x01;
+	/* this value should be even number */
+	cdns_chip->bbm_len = 2;
+
+	ret = nand_ecc_choose_conf(chip,
+				   &cdns_ctrl->ecc_caps,
+				   mtd->oobsize - cdns_chip->bbm_len);
+	if (ret) {
+		dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
+		return ret;
+	}
+
+	dev_dbg(cdns_ctrl->dev,
+		"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+		chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+	/* Error correction configuration. */
+	cdns_chip->sector_size = chip->ecc.size;
+	cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+	ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+
+	cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
+
+	if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
+		cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
+
+	if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
+	    > mtd->oobsize)
+		cdns_chip->avail_oob_size -= 4;
+
+	ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
+	if (ret < 0)
+		return -EINVAL;
+
+	cdns_chip->corr_str_idx = (u8)ret;
+
+	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
+					1000000,
+					CTRL_STATUS_CTRL_BUSY, true))
+		return -ETIMEDOUT;
+
+	cadence_nand_set_ecc_strength(cdns_ctrl,
+				      cdns_chip->corr_str_idx);
+
+	cadence_nand_set_erase_detection(cdns_ctrl, true,
+					 chip->ecc.strength);
+
+	/* Override the default read operations. */
+	chip->ecc.read_page = cadence_nand_read_page;
+	chip->ecc.read_page_raw = cadence_nand_read_page_raw;
+	chip->ecc.write_page = cadence_nand_write_page;
+	chip->ecc.write_page_raw = cadence_nand_write_page_raw;
+	chip->ecc.read_oob = cadence_nand_read_oob;
+	chip->ecc.write_oob = cadence_nand_write_oob;
+	chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
+	chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
+
+	if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
+		cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
+
+	/* Is 32-bit DMA supported? */
+	ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
+		return ret;
+	}
+
+	mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
+
+	return 0;
+}
+
+static const struct nand_controller_ops cadence_nand_controller_ops = {
+	.attach_chip = cadence_nand_attach_chip,
+	.exec_op = cadence_nand_exec_op,
+	.setup_interface = cadence_nand_setup_interface,
+};
+
+static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
+				  struct device_node *np)
+{
+	struct cdns_nand_chip *cdns_chip;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	int nsels, ret, i;
+	u32 cs;
+
+	nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+	if (nsels <= 0) {
+		dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
+		return -EINVAL;
+	}
+
+	/* Allocate the nand chip structure. */
+	cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
+				 (nsels * sizeof(u8)),
+				 GFP_KERNEL);
+	if (!cdns_chip) {
+		dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
+		return -ENOMEM;
+	}
+
+	cdns_chip->nsels = nsels;
+
+	for (i = 0; i < nsels; i++) {
+		/* Retrieve CS id. */
+		ret = of_property_read_u32_index(np, "reg", i, &cs);
+		if (ret) {
+			dev_err(cdns_ctrl->dev,
+				"could not retrieve reg property: %d\n",
+				ret);
+			return ret;
+		}
+
+		if (cs >= cdns_ctrl->caps2.max_banks) {
+			dev_err(cdns_ctrl->dev,
+				"invalid reg value: %u (max CS = %d)\n",
+				cs, cdns_ctrl->caps2.max_banks);
+			return -EINVAL;
+		}
+
+		if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
+			dev_err(cdns_ctrl->dev,
+				"CS %d already assigned\n", cs);
+			return -EINVAL;
+		}
+
+		cdns_chip->cs[i] = cs;
+	}
+
+	chip = &cdns_chip->chip;
+	chip->controller = &cdns_ctrl->controller;
+	nand_set_flash_node(chip, np);
+
+	mtd = nand_to_mtd(chip);
+	mtd->dev.parent = cdns_ctrl->dev;
+
+	/*
+	 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+	 * in the DT node, this entry will be overwritten in nand_scan_ident().
+	 */
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+	ret = nand_scan(chip, cdns_chip->nsels);
+	if (ret) {
+		dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
+		return ret;
+	}
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		dev_err(cdns_ctrl->dev,
+			"failed to register mtd device: %d\n", ret);
+		nand_cleanup(chip);
+		return ret;
+	}
+
+	list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
+
+	return 0;
+}
+
+static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	struct cdns_nand_chip *entry, *temp;
+	struct nand_chip *chip;
+	int ret;
+
+	list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
+		chip = &entry->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+		list_del(&entry->node);
+	}
+}
+
+static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	struct device_node *np = cdns_ctrl->dev->of_node;
+	struct device_node *nand_np;
+	int max_cs = cdns_ctrl->caps2.max_banks;
+	int nchips, ret;
+
+	nchips = of_get_child_count(np);
+
+	if (nchips > max_cs) {
+		dev_err(cdns_ctrl->dev,
+			"too many NAND chips: %d (max = %d CS)\n",
+			nchips, max_cs);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(np, nand_np) {
+		ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
+		if (ret) {
+			of_node_put(nand_np);
+			cadence_nand_chips_cleanup(cdns_ctrl);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+{
+	/* Disable interrupts. */
+	writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
+}
+
+static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	dma_cap_mask_t mask;
+	int ret;
+
+	cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+						  sizeof(*cdns_ctrl->cdma_desc),
+						  &cdns_ctrl->dma_cdma_desc,
+						  GFP_KERNEL);
+	if (!cdns_ctrl->dma_cdma_desc)
+		return -ENOMEM;
+
+	cdns_ctrl->buf_size = SZ_16K;
+	cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+	if (!cdns_ctrl->buf) {
+		ret = -ENOMEM;
+		goto free_buf_desc;
+	}
+
+	if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
+			     IRQF_SHARED, "cadence-nand-controller",
+			     cdns_ctrl)) {
+		dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
+		ret = -ENODEV;
+		goto free_buf;
+	}
+
+	spin_lock_init(&cdns_ctrl->irq_lock);
+	init_completion(&cdns_ctrl->complete);
+
+	ret = cadence_nand_hw_init(cdns_ctrl);
+	if (ret)
+		goto disable_irq;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	if (cdns_ctrl->caps1->has_dma) {
+		cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+		if (!cdns_ctrl->dmac) {
+			dev_err(cdns_ctrl->dev,
+				"Unable to get a DMA channel\n");
+			ret = -EBUSY;
+			goto disable_irq;
+		}
+	}
+
+	nand_controller_init(&cdns_ctrl->controller);
+	INIT_LIST_HEAD(&cdns_ctrl->chips);
+
+	cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
+	cdns_ctrl->curr_corr_str_idx = 0xFF;
+
+	ret = cadence_nand_chips_init(cdns_ctrl);
+	if (ret) {
+		dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+			ret);
+		goto dma_release_chnl;
+	}
+
+	kfree(cdns_ctrl->buf);
+	cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+	if (!cdns_ctrl->buf) {
+		ret = -ENOMEM;
+		goto dma_release_chnl;
+	}
+
+	return 0;
+
+dma_release_chnl:
+	if (cdns_ctrl->dmac)
+		dma_release_channel(cdns_ctrl->dmac);
+
+disable_irq:
+	cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+
+free_buf:
+	kfree(cdns_ctrl->buf);
+
+free_buf_desc:
+	dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+			  cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+	return ret;
+}
+
+/* Driver exit point. */
+static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+{
+	cadence_nand_chips_cleanup(cdns_ctrl);
+	cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+	kfree(cdns_ctrl->buf);
+	dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+			  cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
+
+	if (cdns_ctrl->dmac)
+		dma_release_channel(cdns_ctrl->dmac);
+}
+
+struct cadence_nand_dt {
+	struct cdns_nand_ctrl cdns_ctrl;
+	struct clk *clk;
+};
+
+static const struct cadence_nand_dt_devdata cadence_nand_default = {
+	.if_skew = 0,
+	.has_dma = 1,
+};
+
+static const struct of_device_id cadence_nand_dt_ids[] = {
+	{
+		.compatible = "cdns,hp-nfc",
+		.data = &cadence_nand_default
+	}, {}
+};
+
+MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
+
+static int cadence_nand_dt_probe(struct platform_device *ofdev)
+{
+	struct resource *res;
+	struct cadence_nand_dt *dt;
+	struct cdns_nand_ctrl *cdns_ctrl;
+	int ret;
+	const struct of_device_id *of_id;
+	const struct cadence_nand_dt_devdata *devdata;
+	u32 val;
+
+	of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
+	if (of_id) {
+		ofdev->id_entry = of_id->data;
+		devdata = of_id->data;
+	} else {
+		pr_err("Failed to find the right device id.\n");
+		return -ENOMEM;
+	}
+
+	dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+	if (!dt)
+		return -ENOMEM;
+
+	cdns_ctrl = &dt->cdns_ctrl;
+	cdns_ctrl->caps1 = devdata;
+
+	cdns_ctrl->dev = &ofdev->dev;
+	cdns_ctrl->irq = platform_get_irq(ofdev, 0);
+	if (cdns_ctrl->irq < 0)
+		return cdns_ctrl->irq;
+
+	dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
+
+	cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
+	if (IS_ERR(cdns_ctrl->reg))
+		return PTR_ERR(cdns_ctrl->reg);
+
+	res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
+	cdns_ctrl->io.dma = res->start;
+	cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
+	if (IS_ERR(cdns_ctrl->io.virt))
+		return PTR_ERR(cdns_ctrl->io.virt);
+
+	dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+	if (IS_ERR(dt->clk))
+		return PTR_ERR(dt->clk);
+
+	cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
+
+	ret = of_property_read_u32(ofdev->dev.of_node,
+				   "cdns,board-delay-ps", &val);
+	if (ret) {
+		val = 4830;
+		dev_info(cdns_ctrl->dev,
+			 "missing cdns,board-delay-ps property, %d was set\n",
+			 val);
+	}
+	cdns_ctrl->board_delay = val;
+
+	ret = cadence_nand_init(cdns_ctrl);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(ofdev, dt);
+	return 0;
+}
+
+static int cadence_nand_dt_remove(struct platform_device *ofdev)
+{
+	struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
+
+	cadence_nand_remove(&dt->cdns_ctrl);
+
+	return 0;
+}
+
+static struct platform_driver cadence_nand_dt_driver = {
+	.probe		= cadence_nand_dt_probe,
+	.remove		= cadence_nand_dt_remove,
+	.driver		= {
+		.name	= "cadence-nand-controller",
+		.of_match_table = cadence_nand_dt_ids,
+	},
+};
+
+module_platform_driver(cadence_nand_dt_driver);
+
+MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
+
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index cc50092..04502d2 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -546,11 +546,6 @@
 	return nand_prog_page_end_op(chip);
 }
 
-static int cafe_nand_block_bad(struct nand_chip *chip, loff_t ofs)
-{
-	return 0;
-}
-
 /* F_2[X]/(X**6+X+1)  */
 static unsigned short gf64_mul(u8 a, u8 b)
 {
@@ -634,7 +629,8 @@
 		goto out_free_dma;
 	}
 
-	cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+	cafe->nand.ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+	cafe->nand.ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
 	cafe->nand.ecc.size = mtd->writesize;
 	cafe->nand.ecc.bytes = 14;
 	cafe->nand.ecc.strength = 4;
@@ -718,10 +714,8 @@
 	/* Enable the following for a flash based bad block table */
 	cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
 
-	if (skipbbt) {
-		cafe->nand.options |= NAND_SKIP_BBTSCAN;
-		cafe->nand.legacy.block_bad = cafe_nand_block_bad;
-	}
+	if (skipbbt)
+		cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
 
 	if (numtimings && numtimings != 3) {
 		dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
@@ -816,11 +810,14 @@
 	struct mtd_info *mtd = pci_get_drvdata(pdev);
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct cafe_priv *cafe = nand_get_controller_data(chip);
+	int ret;
 
 	/* Disable NAND IRQ in global IRQ mask register */
 	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
 	free_irq(pdev->irq, mtd);
-	nand_release(chip);
+	ret = mtd_device_unregister(mtd);
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	free_rs(cafe->rs);
 	pci_iounmap(pdev, cafe->mmio);
 	dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
deleted file mode 100644
index 045b617..0000000
--- a/drivers/mtd/nand/raw/cmx270_nand.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  Copyright (C) 2006 Compulab, Ltd.
- *  Mike Rapoport <mike@compulab.co.il>
- *
- *  Derived from drivers/mtd/nand/h1910.c (removed in v3.10)
- *       Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *       Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   CM-X270 board.
- */
-
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include <mach/pxa2xx-regs.h>
-
-#define GPIO_NAND_CS	(11)
-#define GPIO_NAND_RB	(89)
-
-/* MTD structure for CM-X270 board */
-static struct mtd_info *cmx270_nand_mtd;
-
-/* remaped IO address of the device */
-static void __iomem *cmx270_nand_io;
-
-/*
- * Define static partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
-	[0] = {
-		.name	= "cmx270-0",
-		.offset	= 0,
-		.size	= MTDPART_SIZ_FULL
-	}
-};
-#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-
-static u_char cmx270_read_byte(struct nand_chip *this)
-{
-	return (readl(this->legacy.IO_ADDR_R) >> 16);
-}
-
-static void cmx270_write_buf(struct nand_chip *this, const u_char *buf,
-			     int len)
-{
-	int i;
-
-	for (i=0; i<len; i++)
-		writel((*buf++ << 16), this->legacy.IO_ADDR_W);
-}
-
-static void cmx270_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
-	int i;
-
-	for (i=0; i<len; i++)
-		*buf++ = readl(this->legacy.IO_ADDR_R) >> 16;
-}
-
-static inline void nand_cs_on(void)
-{
-	gpio_set_value(GPIO_NAND_CS, 0);
-}
-
-static void nand_cs_off(void)
-{
-	dsb();
-
-	gpio_set_value(GPIO_NAND_CS, 1);
-}
-
-/*
- *	hardware specific access to control-lines
- */
-static void cmx270_hwcontrol(struct nand_chip *this, int dat,
-			     unsigned int ctrl)
-{
-	unsigned int nandaddr = (unsigned int)this->legacy.IO_ADDR_W;
-
-	dsb();
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if ( ctrl & NAND_ALE )
-			nandaddr |=  (1 << 3);
-		else
-			nandaddr &= ~(1 << 3);
-		if ( ctrl & NAND_CLE )
-			nandaddr |=  (1 << 2);
-		else
-			nandaddr &= ~(1 << 2);
-		if ( ctrl & NAND_NCE )
-			nand_cs_on();
-		else
-			nand_cs_off();
-	}
-
-	dsb();
-	this->legacy.IO_ADDR_W = (void __iomem*)nandaddr;
-	if (dat != NAND_CMD_NONE)
-		writel((dat << 16), this->legacy.IO_ADDR_W);
-
-	dsb();
-}
-
-/*
- *	read device ready pin
- */
-static int cmx270_device_ready(struct nand_chip *this)
-{
-	dsb();
-
-	return (gpio_get_value(GPIO_NAND_RB));
-}
-
-/*
- * Main initialization routine
- */
-static int __init cmx270_init(void)
-{
-	struct nand_chip *this;
-	int ret;
-
-	if (!(machine_is_armcore() && cpu_is_pxa27x()))
-		return -ENODEV;
-
-	ret = gpio_request(GPIO_NAND_CS, "NAND CS");
-	if (ret) {
-		pr_warn("CM-X270: failed to request NAND CS gpio\n");
-		return ret;
-	}
-
-	gpio_direction_output(GPIO_NAND_CS, 1);
-
-	ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
-	if (ret) {
-		pr_warn("CM-X270: failed to request NAND R/B gpio\n");
-		goto err_gpio_request;
-	}
-
-	gpio_direction_input(GPIO_NAND_RB);
-
-	/* Allocate memory for MTD device structure and private data */
-	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!this) {
-		ret = -ENOMEM;
-		goto err_kzalloc;
-	}
-
-	cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
-	if (!cmx270_nand_io) {
-		pr_debug("Unable to ioremap NAND device\n");
-		ret = -EINVAL;
-		goto err_ioremap;
-	}
-
-	cmx270_nand_mtd = nand_to_mtd(this);
-
-	/* Link the private data with the MTD structure */
-	cmx270_nand_mtd->owner = THIS_MODULE;
-
-	/* insert callbacks */
-	this->legacy.IO_ADDR_R = cmx270_nand_io;
-	this->legacy.IO_ADDR_W = cmx270_nand_io;
-	this->legacy.cmd_ctrl = cmx270_hwcontrol;
-	this->legacy.dev_ready = cmx270_device_ready;
-
-	/* 15 us command delay time */
-	this->legacy.chip_delay = 20;
-	this->ecc.mode = NAND_ECC_SOFT;
-	this->ecc.algo = NAND_ECC_HAMMING;
-
-	/* read/write functions */
-	this->legacy.read_byte = cmx270_read_byte;
-	this->legacy.read_buf = cmx270_read_buf;
-	this->legacy.write_buf = cmx270_write_buf;
-
-	/* Scan to find existence of the device */
-	ret = nand_scan(this, 1);
-	if (ret) {
-		pr_notice("No NAND device\n");
-		goto err_scan;
-	}
-
-	/* Register the partitions */
-	ret = mtd_device_register(cmx270_nand_mtd, partition_info,
-				  NUM_PARTITIONS);
-	if (ret)
-		goto err_scan;
-
-	/* Return happy */
-	return 0;
-
-err_scan:
-	iounmap(cmx270_nand_io);
-err_ioremap:
-	kfree(this);
-err_kzalloc:
-	gpio_free(GPIO_NAND_RB);
-err_gpio_request:
-	gpio_free(GPIO_NAND_CS);
-
-	return ret;
-
-}
-module_init(cmx270_init);
-
-/*
- * Clean up routine
- */
-static void __exit cmx270_cleanup(void)
-{
-	/* Release resources, unregister device */
-	nand_release(mtd_to_nand(cmx270_nand_mtd));
-
-	gpio_free(GPIO_NAND_RB);
-	gpio_free(GPIO_NAND_CS);
-
-	iounmap(cmx270_nand_io);
-
-	kfree(mtd_to_nand(cmx270_nand_mtd));
-}
-module_exit(cmx270_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
-MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index e2322ce..282203d 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -21,9 +21,9 @@
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
+#include <linux/iopoll.h>
 
 #include <asm/msr.h>
-#include <asm/io.h>
 
 #define NR_CS553X_CONTROLLERS	4
 
@@ -89,76 +89,151 @@
 #define CS_NAND_ECC_CLRECC	(1<<1)
 #define CS_NAND_ECC_ENECC	(1<<0)
 
-static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len)
+struct cs553x_nand_controller {
+	struct nand_controller base;
+	struct nand_chip chip;
+	void __iomem *mmio;
+};
+
+static struct cs553x_nand_controller *
+to_cs553x(struct nand_controller *controller)
 {
+	return container_of(controller, struct cs553x_nand_controller, base);
+}
+
+static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
+				  u32 ctl, u8 data)
+{
+	u8 status;
+	int ret;
+
+	writeb(ctl, cs553x->mmio + MM_NAND_CTL);
+	writeb(data, cs553x->mmio + MM_NAND_IO);
+	ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
+					!(status & CS_NAND_CTLR_BUSY), 1,
+					100000);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
+			   unsigned int len)
+{
+	writeb(0, cs553x->mmio + MM_NAND_CTL);
 	while (unlikely(len > 0x800)) {
-		memcpy_fromio(buf, this->legacy.IO_ADDR_R, 0x800);
+		memcpy_fromio(buf, cs553x->mmio, 0x800);
 		buf += 0x800;
 		len -= 0x800;
 	}
-	memcpy_fromio(buf, this->legacy.IO_ADDR_R, len);
+	memcpy_fromio(buf, cs553x->mmio, len);
 }
 
-static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len)
+static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
+			    const void *buf, unsigned int len)
 {
+	writeb(0, cs553x->mmio + MM_NAND_CTL);
 	while (unlikely(len > 0x800)) {
-		memcpy_toio(this->legacy.IO_ADDR_R, buf, 0x800);
+		memcpy_toio(cs553x->mmio, buf, 0x800);
 		buf += 0x800;
 		len -= 0x800;
 	}
-	memcpy_toio(this->legacy.IO_ADDR_R, buf, len);
+	memcpy_toio(cs553x->mmio, buf, len);
 }
 
-static unsigned char cs553x_read_byte(struct nand_chip *this)
+static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
+			     unsigned int timeout_ms)
 {
-	return readb(this->legacy.IO_ADDR_R);
+	u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
+	u8 status;
+
+	return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
+				  (status & mask) == CS_NAND_STS_FLASH_RDY, 100,
+				  timeout_ms * 1000);
 }
 
-static void cs553x_write_byte(struct nand_chip *this, u_char byte)
+static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
+			     const struct nand_op_instr *instr)
 {
-	int i = 100000;
+	unsigned int i;
+	int ret = 0;
 
-	while (i && readb(this->legacy.IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
-		udelay(1);
-		i--;
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
+					     instr->ctx.cmd.opcode);
+		break;
+
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+			ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
+						     instr->ctx.addr.addrs[i]);
+			if (ret)
+				break;
+		}
+		break;
+
+	case NAND_OP_DATA_IN_INSTR:
+		cs553x_data_in(cs553x, instr->ctx.data.buf.in,
+			       instr->ctx.data.len);
+		break;
+
+	case NAND_OP_DATA_OUT_INSTR:
+		cs553x_data_out(cs553x, instr->ctx.data.buf.out,
+				instr->ctx.data.len);
+		break;
+
+	case NAND_OP_WAITRDY_INSTR:
+		ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
+		break;
 	}
-	writeb(byte, this->legacy.IO_ADDR_W + 0x801);
+
+	if (instr->delay_ns)
+		ndelay(instr->delay_ns);
+
+	return ret;
 }
 
-static void cs553x_hwcontrol(struct nand_chip *this, int cmd,
-			     unsigned int ctrl)
+static int cs553x_exec_op(struct nand_chip *this,
+			  const struct nand_operation *op,
+			  bool check_only)
 {
-	void __iomem *mmio_base = this->legacy.IO_ADDR_R;
-	if (ctrl & NAND_CTRL_CHANGE) {
-		unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
-		writeb(ctl, mmio_base + MM_NAND_CTL);
+	struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+	unsigned int i;
+	int ret;
+
+	if (check_only)
+		return true;
+
+	/* De-assert the CE pin */
+	writeb(0, cs553x->mmio + MM_NAND_CTL);
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
+		if (ret)
+			break;
 	}
-	if (cmd != NAND_CMD_NONE)
-		cs553x_write_byte(this, cmd);
-}
 
-static int cs553x_device_ready(struct nand_chip *this)
-{
-	void __iomem *mmio_base = this->legacy.IO_ADDR_R;
-	unsigned char foo = readb(mmio_base + MM_NAND_STS);
+	/* Re-assert the CE pin. */
+	writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
 
-	return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
+	return ret;
 }
 
 static void cs_enable_hwecc(struct nand_chip *this, int mode)
 {
-	void __iomem *mmio_base = this->legacy.IO_ADDR_R;
+	struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
 
-	writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
+	writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
 }
 
 static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
 			    u_char *ecc_code)
 {
+	struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
 	uint32_t ecc;
-	void __iomem *mmio_base = this->legacy.IO_ADDR_R;
 
-	ecc = readl(mmio_base + MM_NAND_STS);
+	ecc = readl(cs553x->mmio + MM_NAND_STS);
 
 	ecc_code[1] = ecc >> 8;
 	ecc_code[0] = ecc >> 16;
@@ -166,10 +241,31 @@
 	return 0;
 }
 
-static struct mtd_info *cs553x_mtd[4];
+static struct cs553x_nand_controller *controllers[4];
+
+static int cs553x_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
+	chip->ecc.size = 256;
+	chip->ecc.bytes = 3;
+	chip->ecc.hwctl  = cs_enable_hwecc;
+	chip->ecc.calculate = cs_calculate_ecc;
+	chip->ecc.correct  = nand_correct_data;
+	chip->ecc.strength = 1;
+
+	return 0;
+}
+
+static const struct nand_controller_ops cs553x_nand_controller_ops = {
+	.exec_op = cs553x_exec_op,
+	.attach_chip = cs553x_attach_chip,
+};
 
 static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
 {
+	struct cs553x_nand_controller *controller;
 	int err = 0;
 	struct nand_chip *this;
 	struct mtd_info *new_mtd;
@@ -183,41 +279,29 @@
 	}
 
 	/* Allocate memory for MTD device structure and private data */
-	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!this) {
+	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+	if (!controller) {
 		err = -ENOMEM;
 		goto out;
 	}
 
+	this = &controller->chip;
+	nand_controller_init(&controller->base);
+	controller->base.ops = &cs553x_nand_controller_ops;
+	this->controller = &controller->base;
 	new_mtd = nand_to_mtd(this);
 
 	/* Link the private data with the MTD structure */
 	new_mtd->owner = THIS_MODULE;
 
 	/* map physical address */
-	this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = ioremap(adr, 4096);
-	if (!this->legacy.IO_ADDR_R) {
+	controller->mmio = ioremap(adr, 4096);
+	if (!controller->mmio) {
 		pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
 		err = -EIO;
 		goto out_mtd;
 	}
 
-	this->legacy.cmd_ctrl = cs553x_hwcontrol;
-	this->legacy.dev_ready = cs553x_device_ready;
-	this->legacy.read_byte = cs553x_read_byte;
-	this->legacy.read_buf = cs553x_read_buf;
-	this->legacy.write_buf = cs553x_write_buf;
-
-	this->legacy.chip_delay = 0;
-
-	this->ecc.mode = NAND_ECC_HW;
-	this->ecc.size = 256;
-	this->ecc.bytes = 3;
-	this->ecc.hwctl  = cs_enable_hwecc;
-	this->ecc.calculate = cs_calculate_ecc;
-	this->ecc.correct  = nand_correct_data;
-	this->ecc.strength = 1;
-
 	/* Enable the following for a flash based bad block table */
 	this->bbt_options = NAND_BBT_USE_FLASH;
 
@@ -232,15 +316,15 @@
 	if (err)
 		goto out_free;
 
-	cs553x_mtd[cs] = new_mtd;
+	controllers[cs] = controller;
 	goto out;
 
 out_free:
 	kfree(new_mtd->name);
 out_ior:
-	iounmap(this->legacy.IO_ADDR_R);
+	iounmap(controller->mmio);
 out_mtd:
-	kfree(this);
+	kfree(controller);
 out:
 	return err;
 }
@@ -295,9 +379,10 @@
 	/* Register all devices together here. This means we can easily hack it to
 	   do mtdconcat etc. if we want to. */
 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-		if (cs553x_mtd[i]) {
+		if (controllers[i]) {
 			/* If any devices registered, return success. Else the last error. */
-			mtd_device_register(cs553x_mtd[i], NULL, 0);
+			mtd_device_register(nand_to_mtd(&controllers[i]->chip),
+					    NULL, 0);
 			err = 0;
 		}
 	}
@@ -312,26 +397,26 @@
 	int i;
 
 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-		struct mtd_info *mtd = cs553x_mtd[i];
-		struct nand_chip *this;
-		void __iomem *mmio_base;
+		struct cs553x_nand_controller *controller = controllers[i];
+		struct nand_chip *this = &controller->chip;
+		struct mtd_info *mtd = nand_to_mtd(this);
+		int ret;
 
 		if (!mtd)
 			continue;
 
-		this = mtd_to_nand(mtd);
-		mmio_base = this->legacy.IO_ADDR_R;
-
 		/* Release resources, unregister device */
-		nand_release(this);
+		ret = mtd_device_unregister(mtd);
+		WARN_ON(ret);
+		nand_cleanup(this);
 		kfree(mtd->name);
-		cs553x_mtd[i] = NULL;
+		controllers[i] = NULL;
 
 		/* unmap physical address */
-		iounmap(mmio_base);
+		iounmap(controller->mmio);
 
 		/* Free the MTD device structure */
-		kfree(this);
+		kfree(controller);
 	}
 }
 
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 25c185b..bfd3f44 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -14,7 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/slab.h>
@@ -38,6 +38,7 @@
  * outputs in a "wire-AND" configuration, with no per-chip signals.
  */
 struct davinci_nand_info {
+	struct nand_controller	controller;
 	struct nand_chip	chip;
 
 	struct platform_device	*pdev;
@@ -81,46 +82,6 @@
 /*----------------------------------------------------------------------*/
 
 /*
- * Access to hardware control lines:  ALE, CLE, secondary chipselect.
- */
-
-static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd,
-				   unsigned int ctrl)
-{
-	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
-	void __iomem			*addr = info->current_cs;
-
-	/* Did the control lines change? */
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
-			addr += info->mask_cle;
-		else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
-			addr += info->mask_ale;
-
-		nand->legacy.IO_ADDR_W = addr;
-	}
-
-	if (cmd != NAND_CMD_NONE)
-		iowrite8(cmd, nand->legacy.IO_ADDR_W);
-}
-
-static void nand_davinci_select_chip(struct nand_chip *nand, int chip)
-{
-	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
-
-	info->current_cs = info->vaddr;
-
-	/* maybe kick in a second chipselect */
-	if (chip > 0)
-		info->current_cs += info->mask_chipsel;
-
-	info->chip.legacy.IO_ADDR_W = info->current_cs;
-	info->chip.legacy.IO_ADDR_R = info->chip.legacy.IO_ADDR_W;
-}
-
-/*----------------------------------------------------------------------*/
-
-/*
  * 1-bit hardware ECC ... context maintained for each core chipselect
  */
 
@@ -207,7 +168,7 @@
 /*
  * 4-bit hardware ECC ... context maintained over entire AEMIF
  *
- * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
  * since that forces use of a problematic "infix OOB" layout.
  * Among other things, it trashes manufacturer bad block markers.
  * Also, and specific to this hardware, it ECC-protects the "prepad"
@@ -410,48 +371,71 @@
 	return corrected;
 }
 
-/*----------------------------------------------------------------------*/
-
-/*
- * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
- * how these chips are normally wired.  This translates to both 8 and 16
- * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
+/**
+ * nand_davinci_read_page_hwecc_oob_first - Hardware ECC page read with ECC
+ *                                          data read from OOB area
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
  *
- * For now we assume that configuration, or any other one which ignores
- * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
- * and have that transparently morphed into multiple NAND operations.
+ * Hardware ECC for large page chips, which requires the ECC data to be
+ * extracted from the OOB before the actual data is read.
  */
-static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf,
-				  int len)
+static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
+						  uint8_t *buf,
+						  int oob_required, int page)
 {
-	if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
-		ioread32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
-	else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
-		ioread16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
-	else
-		ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
-}
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *p = buf;
+	uint8_t *ecc_code = chip->ecc.code_buf;
+	unsigned int max_bitflips = 0;
 
-static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf,
-				   int len)
-{
-	if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
-		iowrite32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
-	else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
-		iowrite16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
-	else
-		iowrite8_rep(chip->legacy.IO_ADDR_R, buf, len);
-}
+	/* Read the OOB area first */
+	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+	if (ret)
+		return ret;
 
-/*
- * Check hardware register for wait status. Returns 1 if device is ready,
- * 0 if it is still busy.
- */
-static int nand_davinci_dev_ready(struct nand_chip *chip)
-{
-	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+	/* Move read cursor to start of page */
+	ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
+	if (ret)
+		return ret;
 
-	return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int stat;
+
+		chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+		ret = nand_read_data_op(chip, p, eccsize, false, false);
+		if (ret)
+			return ret;
+
+		stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+		if (stat == -EBADMSG &&
+		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+			/* check for empty pages with bitflips */
+			stat = nand_check_erased_ecc_chunk(p, eccsize,
+							   &ecc_code[i],
+							   eccbytes, NULL, 0,
+							   chip->ecc.strength);
+		}
+
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+	return max_bitflips;
 }
 
 /*----------------------------------------------------------------------*/
@@ -542,11 +526,11 @@
 		if (!of_property_read_string(pdev->dev.of_node,
 			"ti,davinci-ecc-mode", &mode)) {
 			if (!strncmp("none", mode, 4))
-				pdata->ecc_mode = NAND_ECC_NONE;
+				pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
 			if (!strncmp("soft", mode, 4))
-				pdata->ecc_mode = NAND_ECC_SOFT;
+				pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
 			if (!strncmp("hw", mode, 2))
-				pdata->ecc_mode = NAND_ECC_HW;
+				pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 		}
 		if (!of_property_read_u32(pdev->dev.of_node,
 			"ti,davinci-ecc-bits", &prop))
@@ -597,22 +581,33 @@
 	if (IS_ERR(pdata))
 		return PTR_ERR(pdata);
 
-	switch (info->chip.ecc.mode) {
-	case NAND_ECC_NONE:
+	/* Use board-specific ECC config */
+	info->chip.ecc.engine_type = pdata->engine_type;
+	info->chip.ecc.placement = pdata->ecc_placement;
+
+	switch (info->chip.ecc.engine_type) {
+	case NAND_ECC_ENGINE_TYPE_NONE:
 		pdata->ecc_bits = 0;
 		break;
-	case NAND_ECC_SOFT:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		pdata->ecc_bits = 0;
 		/*
-		 * This driver expects Hamming based ECC when ecc_mode is set
-		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
-		 * avoid adding an extra ->ecc_algo field to
-		 * davinci_nand_pdata.
+		 * This driver expects Hamming based ECC when engine_type is set
+		 * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+		 * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
+		 * field to davinci_nand_pdata.
 		 */
-		info->chip.ecc.algo = NAND_ECC_HAMMING;
+		info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
 		break;
-	case NAND_ECC_HW:
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		if (pdata->ecc_bits == 4) {
+			int chunks = mtd->writesize / 512;
+
+			if (!chunks || mtd->oobsize < 16) {
+				dev_dbg(&info->pdev->dev, "too small\n");
+				return -EINVAL;
+			}
+
 			/*
 			 * No sanity checks:  CPUs must support this,
 			 * and the chips may not use NAND_BUSWIDTH_16.
@@ -634,14 +629,35 @@
 			info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
 			info->chip.ecc.bytes = 10;
 			info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
-			info->chip.ecc.algo = NAND_ECC_BCH;
+			info->chip.ecc.algo = NAND_ECC_ALGO_BCH;
+
+			/*
+			 * Update ECC layout if needed ... for 1-bit HW ECC, the
+			 * default is OK, but it allocates 6 bytes when only 3
+			 * are needed (for each 512 bytes). For 4-bit HW ECC,
+			 * the default is not usable: 10 bytes needed, not 6.
+			 *
+			 * For small page chips, preserve the manufacturer's
+			 * badblock marking data ... and make sure a flash BBT
+			 * table marker fits in the free bytes.
+			 */
+			if (chunks == 1) {
+				mtd_set_ooblayout(mtd,
+						  &hwecc4_small_ooblayout_ops);
+			} else if (chunks == 4 || chunks == 8) {
+				mtd_set_ooblayout(mtd,
+						  nand_get_large_page_ooblayout());
+				info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+			} else {
+				return -EIO;
+			}
 		} else {
 			/* 1bit ecc hamming */
 			info->chip.ecc.calculate = nand_davinci_calculate_1bit;
 			info->chip.ecc.correct = nand_davinci_correct_1bit;
 			info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
 			info->chip.ecc.bytes = 3;
-			info->chip.ecc.algo = NAND_ECC_HAMMING;
+			info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
 		}
 		info->chip.ecc.size = 512;
 		info->chip.ecc.strength = pdata->ecc_bits;
@@ -650,39 +666,111 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Update ECC layout if needed ... for 1-bit HW ECC, the default
-	 * is OK, but it allocates 6 bytes when only 3 are needed (for
-	 * each 512 bytes).  For the 4-bit HW ECC, that default is not
-	 * usable:  10 bytes are needed, not 6.
-	 */
-	if (pdata->ecc_bits == 4) {
-		int chunks = mtd->writesize / 512;
+	return ret;
+}
 
-		if (!chunks || mtd->oobsize < 16) {
-			dev_dbg(&info->pdev->dev, "too small\n");
-			return -EINVAL;
-		}
+static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
+				 unsigned int len, bool force_8bit)
+{
+	u32 alignment = ((uintptr_t)buf | len) & 3;
 
-		/* For small page chips, preserve the manufacturer's
-		 * badblock marking data ... and make sure a flash BBT
-		 * table marker fits in the free bytes.
-		 */
-		if (chunks == 1) {
-			mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
-		} else if (chunks == 4 || chunks == 8) {
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
-		} else {
-			return -EIO;
+	if (force_8bit || (alignment & 1))
+		ioread8_rep(info->current_cs, buf, len);
+	else if (alignment & 3)
+		ioread16_rep(info->current_cs, buf, len >> 1);
+	else
+		ioread32_rep(info->current_cs, buf, len >> 2);
+}
+
+static void nand_davinci_data_out(struct davinci_nand_info *info,
+				  const void *buf, unsigned int len,
+				  bool force_8bit)
+{
+	u32 alignment = ((uintptr_t)buf | len) & 3;
+
+	if (force_8bit || (alignment & 1))
+		iowrite8_rep(info->current_cs, buf, len);
+	else if (alignment & 3)
+		iowrite16_rep(info->current_cs, buf, len >> 1);
+	else
+		iowrite32_rep(info->current_cs, buf, len >> 2);
+}
+
+static int davinci_nand_exec_instr(struct davinci_nand_info *info,
+				   const struct nand_op_instr *instr)
+{
+	unsigned int i, timeout_us;
+	u32 status;
+	int ret;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		iowrite8(instr->ctx.cmd.opcode,
+			 info->current_cs + info->mask_cle);
+		break;
+
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+			iowrite8(instr->ctx.addr.addrs[i],
+				 info->current_cs + info->mask_ale);
 		}
+		break;
+
+	case NAND_OP_DATA_IN_INSTR:
+		nand_davinci_data_in(info, instr->ctx.data.buf.in,
+				     instr->ctx.data.len,
+				     instr->ctx.data.force_8bit);
+		break;
+
+	case NAND_OP_DATA_OUT_INSTR:
+		nand_davinci_data_out(info, instr->ctx.data.buf.out,
+				      instr->ctx.data.len,
+				      instr->ctx.data.force_8bit);
+		break;
+
+	case NAND_OP_WAITRDY_INSTR:
+		timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+		ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
+						 status, status & BIT(0), 100,
+						 timeout_us);
+		if (ret)
+			return ret;
+
+		break;
 	}
 
-	return ret;
+	if (instr->delay_ns)
+		ndelay(instr->delay_ns);
+
+	return 0;
+}
+
+static int davinci_nand_exec_op(struct nand_chip *chip,
+				const struct nand_operation *op,
+				bool check_only)
+{
+	struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+	unsigned int i;
+
+	if (check_only)
+		return 0;
+
+	info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
+
+	for (i = 0; i < op->ninstrs; i++) {
+		int ret;
+
+		ret = davinci_nand_exec_instr(info, &op->instrs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 static const struct nand_controller_ops davinci_nand_controller_ops = {
 	.attach_chip = davinci_nand_attach_chip,
+	.exec_op = davinci_nand_exec_op,
 };
 
 static int nand_davinci_probe(struct platform_device *pdev)
@@ -746,11 +834,6 @@
 	mtd->dev.parent		= &pdev->dev;
 	nand_set_flash_node(&info->chip, pdev->dev.of_node);
 
-	info->chip.legacy.IO_ADDR_R	= vaddr;
-	info->chip.legacy.IO_ADDR_W	= vaddr;
-	info->chip.legacy.chip_delay	= 0;
-	info->chip.legacy.select_chip	= nand_davinci_select_chip;
-
 	/* options such as NAND_BBT_USE_FLASH */
 	info->chip.bbt_options	= pdata->bbt_options;
 	/* options such as 16-bit widths */
@@ -767,17 +850,6 @@
 	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
 	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
 
-	/* Set address of hardware control function */
-	info->chip.legacy.cmd_ctrl	= nand_davinci_hwcontrol;
-	info->chip.legacy.dev_ready	= nand_davinci_dev_ready;
-
-	/* Speed up buffer I/O */
-	info->chip.legacy.read_buf     = nand_davinci_read_buf;
-	info->chip.legacy.write_buf    = nand_davinci_write_buf;
-
-	/* Use board-specific ECC config */
-	info->chip.ecc.mode	= pdata->ecc_mode;
-
 	spin_lock_irq(&davinci_nand_lock);
 
 	/* put CSxNAND into NAND mode */
@@ -788,7 +860,9 @@
 	spin_unlock_irq(&davinci_nand_lock);
 
 	/* Scan to find existence of the device(s) */
-	info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops;
+	nand_controller_init(&info->controller);
+	info->controller.ops = &davinci_nand_controller_ops;
+	info->chip.controller = &info->controller;
 	ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
 	if (ret < 0) {
 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
@@ -817,13 +891,17 @@
 static int nand_davinci_remove(struct platform_device *pdev)
 {
 	struct davinci_nand_info *info = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &info->chip;
+	int ret;
 
 	spin_lock_irq(&davinci_nand_lock);
-	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+	if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
 		ecc4_busy = false;
 	spin_unlock_irq(&davinci_nand_lock);
 
-	nand_release(&info->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 3102ddb..fa2439c 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -21,7 +21,6 @@
 #include "denali.h"
 
 #define DENALI_NAND_NAME    "denali-nand"
-#define DENALI_DEFAULT_OOB_SKIP_BYTES	8
 
 /* for Indexed Addressing */
 #define DENALI_INDEXED_CTRL	0x00
@@ -762,9 +761,10 @@
 	return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
 }
 
-static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
-				       const struct nand_data_interface *conf)
+static int denali_setup_interface(struct nand_chip *chip, int chipnr,
+				  const struct nand_interface_config *conf)
 {
+	static const unsigned int data_setup_on_host = 10000;
 	struct denali_controller *denali = to_denali_controller(chip);
 	struct denali_chip_sel *sel;
 	const struct nand_sdr_timings *timings;
@@ -797,15 +797,6 @@
 
 	sel = &to_denali_chip(chip)->sels[chipnr];
 
-	/* tREA -> ACC_CLKS */
-	acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
-	acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
-
-	tmp = ioread32(denali->reg + ACC_CLKS);
-	tmp &= ~ACC_CLKS__VALUE;
-	tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
-	sel->acc_clks = tmp;
-
 	/* tRWH -> RE_2_WE */
 	re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
 	re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
@@ -863,14 +854,45 @@
 	tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
 	sel->rdwr_en_hi_cnt = tmp;
 
-	/* tRP, tWP -> RDWR_EN_LO_CNT */
+	/*
+	 * tREA -> ACC_CLKS
+	 * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
+	 */
+
+	/*
+	 * Determine the minimum of acc_clks to meet the setup timing when
+	 * capturing the incoming data.
+	 *
+	 * The delay on the chip side is well-defined as tREA, but we need to
+	 * take additional delay into account. This includes a certain degree
+	 * of unknowledge, such as signal propagation delays on the PCB and
+	 * in the SoC, load capacity of the I/O pins, etc.
+	 */
+	acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
+
+	/* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
 	rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
+
+	/* Extend rdwr_en_lo to meet the data hold timing */
+	rdwr_en_lo = max_t(int, rdwr_en_lo,
+			   acc_clks - timings->tRHOH_min / t_x);
+
+	/* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
 	rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
 				     t_x);
-	rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
 	rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
 	rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
 
+	/* Center the data latch timing for extra safety */
+	acc_clks = (acc_clks + rdwr_en_lo +
+		    DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
+	acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+	tmp = ioread32(denali->reg + ACC_CLKS);
+	tmp &= ~ACC_CLKS__VALUE;
+	tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
+	sel->acc_clks = tmp;
+
 	tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
 	tmp &= ~RDWR_EN_LO_CNT__VALUE;
 	tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
@@ -1151,7 +1173,7 @@
 static const struct nand_controller_ops denali_controller_ops = {
 	.attach_chip = denali_attach_chip,
 	.exec_op = denali_exec_op,
-	.setup_data_interface = denali_setup_data_interface,
+	.setup_interface = denali_setup_interface,
 };
 
 int denali_chip_init(struct denali_controller *denali,
@@ -1204,18 +1226,19 @@
 		mtd->name = "denali-nand";
 
 	if (denali->dma_avail) {
-		chip->options |= NAND_USE_BOUNCE_BUFFER;
+		chip->options |= NAND_USES_DMA;
 		chip->buf_align = 16;
 	}
 
-	/* clk rate info is needed for setup_data_interface */
+	/* clk rate info is needed for setup_interface */
 	if (!denali->clk_rate || !denali->clk_x_rate)
 		chip->options |= NAND_KEEP_TIMINGS;
 
 	chip->bbt_options |= NAND_BBT_USE_FLASH;
 	chip->bbt_options |= NAND_BBT_NO_OOB;
 	chip->options |= NAND_NO_SUBPAGE_WRITE;
-	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
 	chip->ecc.read_page = denali_read_page;
 	chip->ecc.write_page = denali_write_page;
 	chip->ecc.read_page_raw = denali_read_page_raw;
@@ -1302,21 +1325,23 @@
 
 	/*
 	 * Set how many bytes should be skipped before writing data in OOB.
-	 * If a non-zero value has already been set (by firmware or something),
-	 * just use it. Otherwise, set the driver's default.
+	 * If a platform requests a non-zero value, set it to the register.
+	 * Otherwise, read the value out, expecting it has already been set up
+	 * by firmware.
 	 */
-	denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
-	if (!denali->oob_skip_bytes) {
-		denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+	if (denali->oob_skip_bytes)
 		iowrite32(denali->oob_skip_bytes,
 			  denali->reg + SPARE_AREA_SKIP_BYTES);
-	}
+	else
+		denali->oob_skip_bytes = ioread32(denali->reg +
+						  SPARE_AREA_SKIP_BYTES);
 
 	iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
 	iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
 	iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
 	iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
 	iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+	iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
 
 	denali_clear_irq_all(denali);
 
@@ -1335,10 +1360,17 @@
 
 void denali_remove(struct denali_controller *denali)
 {
-	struct denali_chip *dchip;
+	struct denali_chip *dchip, *tmp;
+	struct nand_chip *chip;
+	int ret;
 
-	list_for_each_entry(dchip, &denali->chips, node)
-		nand_release(&dchip->chip);
+	list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
+		chip = &dchip->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+		list_del(&dchip->node);
+	}
 
 	denali_disable_irq(denali);
 }
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index e5cdcda..ac46eb7 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -328,7 +328,7 @@
 	struct nand_chip chip;
 	struct list_head node;
 	unsigned int nsels;
-	struct denali_chip_sel sels[0];
+	struct denali_chip_sel sels[];
 };
 
 /**
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 5e14836..f08740a 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -6,6 +6,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
@@ -14,6 +15,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/reset.h>
 
 #include "denali.h"
 
@@ -22,11 +24,14 @@
 	struct clk *clk;	/* core clock */
 	struct clk *clk_x;	/* bus interface clock */
 	struct clk *clk_ecc;	/* ECC circuit clock */
+	struct reset_control *rst;	/* core reset */
+	struct reset_control *rst_reg;	/* register reset */
 };
 
 struct denali_dt_data {
 	unsigned int revision;
 	unsigned int caps;
+	unsigned int oob_skip_bytes;
 	const struct nand_ecc_caps *ecc_caps;
 };
 
@@ -34,6 +39,7 @@
 		     512, 8, 15);
 static const struct denali_dt_data denali_socfpga_data = {
 	.caps = DENALI_CAP_HW_ECC_FIXUP,
+	.oob_skip_bytes = 2,
 	.ecc_caps = &denali_socfpga_ecc_caps,
 };
 
@@ -42,6 +48,7 @@
 static const struct denali_dt_data denali_uniphier_v5a_data = {
 	.caps = DENALI_CAP_HW_ECC_FIXUP |
 		DENALI_CAP_DMA_64BIT,
+	.oob_skip_bytes = 8,
 	.ecc_caps = &denali_uniphier_v5a_ecc_caps,
 };
 
@@ -51,6 +58,7 @@
 	.revision = 0x0501,
 	.caps = DENALI_CAP_HW_ECC_FIXUP |
 		DENALI_CAP_DMA_64BIT,
+	.oob_skip_bytes = 8,
 	.ecc_caps = &denali_uniphier_v5b_ecc_caps,
 };
 
@@ -102,47 +110,6 @@
 	return denali_chip_init(denali, dchip);
 }
 
-/* Backward compatibility for old platforms */
-static int denali_dt_legacy_chip_init(struct denali_controller *denali)
-{
-	struct denali_chip *dchip;
-	int nsels, i;
-
-	nsels = denali->nbanks;
-
-	dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
-			     GFP_KERNEL);
-	if (!dchip)
-		return -ENOMEM;
-
-	dchip->nsels = nsels;
-
-	for (i = 0; i < nsels; i++)
-		dchip->sels[i].bank = i;
-
-	nand_set_flash_node(&dchip->chip, denali->dev->of_node);
-
-	return denali_chip_init(denali, dchip);
-}
-
-/*
- * Check the DT binding.
- * The new binding expects chip subnodes in the controller node.
- * So, #address-cells = <1>; #size-cells = <0>; are required.
- * Check the #size-cells to distinguish the binding.
- */
-static bool denali_dt_is_legacy_binding(struct device_node *np)
-{
-	u32 cells;
-	int ret;
-
-	ret = of_property_read_u32(np, "#size-cells", &cells);
-	if (ret)
-		return true;
-
-	return cells != 0;
-}
-
 static int denali_dt_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -159,18 +126,18 @@
 	denali = &dt->controller;
 
 	data = of_device_get_match_data(dev);
-	if (data) {
-		denali->revision = data->revision;
-		denali->caps = data->caps;
-		denali->ecc_caps = data->ecc_caps;
-	}
+	if (WARN_ON(!data))
+		return -EINVAL;
+
+	denali->revision = data->revision;
+	denali->caps = data->caps;
+	denali->oob_skip_bytes = data->oob_skip_bytes;
+	denali->ecc_caps = data->ecc_caps;
 
 	denali->dev = dev;
 	denali->irq = platform_get_irq(pdev, 0);
-	if (denali->irq < 0) {
-		dev_err(dev, "no irq defined\n");
+	if (denali->irq < 0)
 		return denali->irq;
-	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg");
 	denali->reg = devm_ioremap_resource(dev, res);
@@ -194,6 +161,14 @@
 	if (IS_ERR(dt->clk_ecc))
 		return PTR_ERR(dt->clk_ecc);
 
+	dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
+	if (IS_ERR(dt->rst))
+		return PTR_ERR(dt->rst);
+
+	dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
+	if (IS_ERR(dt->rst_reg))
+		return PTR_ERR(dt->rst_reg);
+
 	ret = clk_prepare_enable(dt->clk);
 	if (ret)
 		return ret;
@@ -209,21 +184,35 @@
 	denali->clk_rate = clk_get_rate(dt->clk);
 	denali->clk_x_rate = clk_get_rate(dt->clk_x);
 
-	ret = denali_init(denali);
+	/*
+	 * Deassert the register reset, and the core reset in this order.
+	 * Deasserting the core reset while the register reset is asserted
+	 * will cause unpredictable behavior in the controller.
+	 */
+	ret = reset_control_deassert(dt->rst_reg);
 	if (ret)
 		goto out_disable_clk_ecc;
 
-	if (denali_dt_is_legacy_binding(dev->of_node)) {
-		ret = denali_dt_legacy_chip_init(denali);
-		if (ret)
+	ret = reset_control_deassert(dt->rst);
+	if (ret)
+		goto out_assert_rst_reg;
+
+	/*
+	 * When the reset is deasserted, the initialization sequence is kicked
+	 * (bootstrap process). The driver must wait until it finished.
+	 * Otherwise, it will result in unpredictable behavior.
+	 */
+	usleep_range(200, 1000);
+
+	ret = denali_init(denali);
+	if (ret)
+		goto out_assert_rst;
+
+	for_each_child_of_node(dev->of_node, np) {
+		ret = denali_dt_chip_init(denali, np);
+		if (ret) {
+			of_node_put(np);
 			goto out_remove_denali;
-	} else {
-		for_each_child_of_node(dev->of_node, np) {
-			ret = denali_dt_chip_init(denali, np);
-			if (ret) {
-				of_node_put(np);
-				goto out_remove_denali;
-			}
 		}
 	}
 
@@ -233,6 +222,10 @@
 
 out_remove_denali:
 	denali_remove(denali);
+out_assert_rst:
+	reset_control_assert(dt->rst);
+out_assert_rst_reg:
+	reset_control_assert(dt->rst_reg);
 out_disable_clk_ecc:
 	clk_disable_unprepare(dt->clk_ecc);
 out_disable_clk_x:
@@ -248,6 +241,8 @@
 	struct denali_dt *dt = platform_get_drvdata(pdev);
 
 	denali_remove(&dt->controller);
+	reset_control_assert(dt->rst);
+	reset_control_assert(dt->rst_reg);
 	clk_disable_unprepare(dt->clk_ecc);
 	clk_disable_unprepare(dt->clk_x);
 	clk_disable_unprepare(dt->clk);
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index d62aa52..20c085a 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -74,15 +74,15 @@
 		return ret;
 	}
 
-	denali->reg = ioremap_nocache(csr_base, csr_len);
+	denali->reg = ioremap(csr_base, csr_len);
 	if (!denali->reg) {
 		dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
 		return -ENOMEM;
 	}
 
-	denali->host = ioremap_nocache(mem_base, mem_len);
+	denali->host = ioremap(mem_base, mem_len);
 	if (!denali->host) {
-		dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
+		dev_err(&dev->dev, "Spectra: ioremap failed!");
 		ret = -ENOMEM;
 		goto out_unmap_reg;
 	}
@@ -100,7 +100,7 @@
 		goto out_remove_denali;
 	}
 
-	dchip->chip.ecc.options |= NAND_ECC_MAXIMIZE;
+	dchip->chip.base.ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
 
 	dchip->nsels = nsels;
 
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 522390b..26b265e 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -58,6 +58,7 @@
 static struct mtd_info *doclist = NULL;
 
 struct doc_priv {
+	struct nand_controller base;
 	void __iomem *virtadr;
 	unsigned long physadr;
 	u_char ChipID;
@@ -69,6 +70,7 @@
 	int mh1_page;
 	struct rs_control *rs_decoder;
 	struct mtd_info *nextdoc;
+	bool supports_32b_reads;
 
 	/* Handle the last stage of initialization (BBT scan, partitioning) */
 	int (*late_init)(struct mtd_info *mtd);
@@ -84,10 +86,6 @@
 #define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
 #define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
 
-static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
-			      unsigned int bitmask);
-static void doc200x_select_chip(struct nand_chip *this, int chip);
-
 static int debug = 0;
 module_param(debug, int, 0);
 
@@ -302,20 +300,6 @@
 	WriteDOC(datum, docptr, 2k_CDSN_IO);
 }
 
-static u_char doc2000_read_byte(struct nand_chip *this)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	u_char ret;
-
-	ReadDOC(docptr, CDSNSlowIO);
-	DoC_Delay(doc, 2);
-	ret = ReadDOC(docptr, 2k_CDSN_IO);
-	if (debug)
-		printk("read_byte returns %02x\n", ret);
-	return ret;
-}
-
 static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
 			     int len)
 {
@@ -337,33 +321,42 @@
 {
 	struct doc_priv *doc = nand_get_controller_data(this);
 	void __iomem *docptr = doc->virtadr;
+	u32 *buf32 = (u32 *)buf;
 	int i;
 
 	if (debug)
 		printk("readbuf of %d bytes: ", len);
 
-	for (i = 0; i < len; i++)
-		buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+	if (!doc->supports_32b_reads ||
+	    ((((unsigned long)buf) | len) & 3)) {
+		for (i = 0; i < len; i++)
+			buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+	} else {
+		for (i = 0; i < len / 4; i++)
+			buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i);
+	}
 }
 
-static void doc2000_readbuf_dword(struct nand_chip *this, u_char *buf, int len)
+/*
+ * We need our own readid() here because it's called before the NAND chip
+ * has been initialized, and calling nand_op_readid() would lead to a NULL
+ * pointer exception when dereferencing the NAND timings.
+ */
+static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id)
 {
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
+	u8 addr = 0;
+	struct nand_op_instr instrs[] = {
+		NAND_OP_CMD(NAND_CMD_READID, 0),
+		NAND_OP_ADDR(1, &addr, 50),
+		NAND_OP_8BIT_DATA_IN(2, id, 0),
+	};
 
-	if (debug)
-		printk("readbuf_dword of %d bytes: ", len);
+	struct nand_operation op = NAND_OPERATION(cs, instrs);
 
-	if (unlikely((((unsigned long)buf) | len) & 3)) {
-		for (i = 0; i < len; i++) {
-			*(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
-		}
-	} else {
-		for (i = 0; i < len; i += 4) {
-			*(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
-		}
-	}
+	if (!id)
+		op.ninstrs--;
+
+	this->controller->ops->exec_op(this, &op, false);
 }
 
 static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
@@ -371,20 +364,11 @@
 	struct nand_chip *this = mtd_to_nand(mtd);
 	struct doc_priv *doc = nand_get_controller_data(this);
 	uint16_t ret;
+	u8 id[2];
 
-	doc200x_select_chip(this, nr);
-	doc200x_hwcontrol(this, NAND_CMD_READID,
-			  NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-	doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
-	doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+	doc200x_readid(this, nr, id);
 
-	/* We can't use dev_ready here, but at least we wait for the
-	 * command to complete
-	 */
-	udelay(50);
-
-	ret = this->legacy.read_byte(this) << 8;
-	ret |= this->legacy.read_byte(this);
+	ret = ((u16)id[0] << 8) | id[1];
 
 	if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
 		/* First chip probe. See if we get same results by 32-bit access */
@@ -394,18 +378,12 @@
 		} ident;
 		void __iomem *docptr = doc->virtadr;
 
-		doc200x_hwcontrol(this, NAND_CMD_READID,
-				  NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-		doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
-		doc200x_hwcontrol(this, NAND_CMD_NONE,
-				  NAND_NCE | NAND_CTRL_CHANGE);
-
-		udelay(50);
+		doc200x_readid(this, nr, NULL);
 
 		ident.dword = readl(docptr + DoC_2k_CDSN_IO);
 		if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
 			pr_info("DiskOnChip 2000 responds to DWORD access\n");
-			this->legacy.read_buf = &doc2000_readbuf_dword;
+			doc->supports_32b_reads = true;
 		}
 	}
 
@@ -434,20 +412,6 @@
 	pr_debug("Detected %d chips per floor.\n", i);
 }
 
-static int doc200x_wait(struct nand_chip *this)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-
-	int status;
-
-	DoC_WaitReady(doc);
-	nand_status_op(this, NULL);
-	DoC_WaitReady(doc);
-	status = (int)this->legacy.read_byte(this);
-
-	return status;
-}
-
 static void doc2001_write_byte(struct nand_chip *this, u_char datum)
 {
 	struct doc_priv *doc = nand_get_controller_data(this);
@@ -458,19 +422,6 @@
 	WriteDOC(datum, docptr, WritePipeTerm);
 }
 
-static u_char doc2001_read_byte(struct nand_chip *this)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	//ReadDOC(docptr, CDSNSlowIO);
-	/* 11.4.5 -- delay twice to allow extended length cycle */
-	DoC_Delay(doc, 2);
-	ReadDOC(docptr, ReadPipeInit);
-	//return ReadDOC(docptr, Mil_CDSN_IO);
-	return ReadDOC(docptr, LastDataRead);
-}
-
 static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
 {
 	struct doc_priv *doc = nand_get_controller_data(this);
@@ -499,20 +450,6 @@
 	buf[i] = ReadDOC(docptr, LastDataRead);
 }
 
-static u_char doc2001plus_read_byte(struct nand_chip *this)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	u_char ret;
-
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ret = ReadDOC(docptr, Mplus_LastDataRead);
-	if (debug)
-		printk("read_byte returns %02x\n", ret);
-	return ret;
-}
-
 static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
 {
 	struct doc_priv *doc = nand_get_controller_data(this);
@@ -550,9 +487,12 @@
 	}
 
 	/* Terminate read pipeline */
-	buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
-	if (debug && i < 16)
-		printk("%02x ", buf[len - 2]);
+	if (len >= 2) {
+		buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+		if (debug && i < 16)
+			printk("%02x ", buf[len - 2]);
+	}
+
 	buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
 	if (debug && i < 16)
 		printk("%02x ", buf[len - 1]);
@@ -560,226 +500,163 @@
 		printk("\n");
 }
 
-static void doc2001plus_select_chip(struct nand_chip *this, int chip)
+static void doc200x_write_control(struct doc_priv *doc, u8 value)
+{
+	WriteDOC(value, doc->virtadr, CDSNControl);
+	/* 11.4.3 -- 4 NOPs after CSDNControl write */
+	DoC_Delay(doc, 4);
+}
+
+static void doc200x_exec_instr(struct nand_chip *this,
+			       const struct nand_op_instr *instr)
 {
 	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int floor = 0;
+	unsigned int i;
 
-	if (debug)
-		printk("select chip (%d)\n", chip);
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE);
+		doc2000_write_byte(this, instr->ctx.cmd.opcode);
+		break;
 
-	if (chip == -1) {
-		/* Disable flash internally */
-		WriteDOC(0, docptr, Mplus_FlashSelect);
-		return;
+	case NAND_OP_ADDR_INSTR:
+		doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE);
+		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+			u8 addr = instr->ctx.addr.addrs[i];
+
+			if (DoC_is_2000(doc))
+				doc2000_write_byte(this, addr);
+			else
+				doc2001_write_byte(this, addr);
+		}
+		break;
+
+	case NAND_OP_DATA_IN_INSTR:
+		doc200x_write_control(doc, CDSN_CTRL_CE);
+		if (DoC_is_2000(doc))
+			doc2000_readbuf(this, instr->ctx.data.buf.in,
+					instr->ctx.data.len);
+		else
+			doc2001_readbuf(this, instr->ctx.data.buf.in,
+					instr->ctx.data.len);
+		break;
+
+	case NAND_OP_DATA_OUT_INSTR:
+		doc200x_write_control(doc, CDSN_CTRL_CE);
+		if (DoC_is_2000(doc))
+			doc2000_writebuf(this, instr->ctx.data.buf.out,
+					 instr->ctx.data.len);
+		else
+			doc2001_writebuf(this, instr->ctx.data.buf.out,
+					 instr->ctx.data.len);
+		break;
+
+	case NAND_OP_WAITRDY_INSTR:
+		DoC_WaitReady(doc);
+		break;
 	}
 
-	floor = chip / doc->chips_per_floor;
-	chip -= (floor * doc->chips_per_floor);
+	if (instr->delay_ns)
+		ndelay(instr->delay_ns);
+}
+
+static int doc200x_exec_op(struct nand_chip *this,
+			   const struct nand_operation *op,
+			   bool check_only)
+{
+	struct doc_priv *doc = nand_get_controller_data(this);
+	unsigned int i;
+
+	if (check_only)
+		return true;
+
+	doc->curchip = op->cs % doc->chips_per_floor;
+	doc->curfloor = op->cs / doc->chips_per_floor;
+
+	WriteDOC(doc->curfloor, doc->virtadr, FloorSelect);
+	WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect);
+
+	/* Assert CE pin */
+	doc200x_write_control(doc, CDSN_CTRL_CE);
+
+	for (i = 0; i < op->ninstrs; i++)
+		doc200x_exec_instr(this, &op->instrs[i]);
+
+	/* De-assert CE pin */
+	doc200x_write_control(doc, 0);
+
+	return 0;
+}
+
+static void doc2001plus_write_pipe_term(struct doc_priv *doc)
+{
+	WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+	WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+}
+
+static void doc2001plus_exec_instr(struct nand_chip *this,
+				   const struct nand_op_instr *instr)
+{
+	struct doc_priv *doc = nand_get_controller_data(this);
+	unsigned int i;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd);
+		doc2001plus_write_pipe_term(doc);
+		break;
+
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+			u8 addr = instr->ctx.addr.addrs[i];
+
+			WriteDOC(addr, doc->virtadr, Mplus_FlashAddress);
+		}
+		doc2001plus_write_pipe_term(doc);
+		/* deassert ALE */
+		WriteDOC(0, doc->virtadr, Mplus_FlashControl);
+		break;
+
+	case NAND_OP_DATA_IN_INSTR:
+		doc2001plus_readbuf(this, instr->ctx.data.buf.in,
+				    instr->ctx.data.len);
+		break;
+	case NAND_OP_DATA_OUT_INSTR:
+		doc2001plus_writebuf(this, instr->ctx.data.buf.out,
+				     instr->ctx.data.len);
+		doc2001plus_write_pipe_term(doc);
+		break;
+	case NAND_OP_WAITRDY_INSTR:
+		DoC_WaitReady(doc);
+		break;
+	}
+
+	if (instr->delay_ns)
+		ndelay(instr->delay_ns);
+}
+
+static int doc2001plus_exec_op(struct nand_chip *this,
+			       const struct nand_operation *op,
+			       bool check_only)
+{
+	struct doc_priv *doc = nand_get_controller_data(this);
+	unsigned int i;
+
+	if (check_only)
+		return true;
+
+	doc->curchip = op->cs % doc->chips_per_floor;
+	doc->curfloor = op->cs / doc->chips_per_floor;
 
 	/* Assert ChipEnable and deassert WriteProtect */
-	WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
-	nand_reset_op(this);
+	WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect);
 
-	doc->curchip = chip;
-	doc->curfloor = floor;
-}
+	for (i = 0; i < op->ninstrs; i++)
+		doc2001plus_exec_instr(this, &op->instrs[i]);
 
-static void doc200x_select_chip(struct nand_chip *this, int chip)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int floor = 0;
+	/* De-assert ChipEnable */
+	WriteDOC(0, doc->virtadr, Mplus_FlashSelect);
 
-	if (debug)
-		printk("select chip (%d)\n", chip);
-
-	if (chip == -1)
-		return;
-
-	floor = chip / doc->chips_per_floor;
-	chip -= (floor * doc->chips_per_floor);
-
-	/* 11.4.4 -- deassert CE before changing chip */
-	doc200x_hwcontrol(this, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-
-	WriteDOC(floor, docptr, FloorSelect);
-	WriteDOC(chip, docptr, CDSNDeviceSelect);
-
-	doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
-	doc->curchip = chip;
-	doc->curfloor = floor;
-}
-
-#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
-
-static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
-			      unsigned int ctrl)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		doc->CDSNControl &= ~CDSN_CTRL_MSK;
-		doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
-		if (debug)
-			printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
-		WriteDOC(doc->CDSNControl, docptr, CDSNControl);
-		/* 11.4.3 -- 4 NOPs after CSDNControl write */
-		DoC_Delay(doc, 4);
-	}
-	if (cmd != NAND_CMD_NONE) {
-		if (DoC_is_2000(doc))
-			doc2000_write_byte(this, cmd);
-		else
-			doc2001_write_byte(this, cmd);
-	}
-}
-
-static void doc2001plus_command(struct nand_chip *this, unsigned command,
-				int column, int page_addr)
-{
-	struct mtd_info *mtd = nand_to_mtd(this);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	/*
-	 * Must terminate write pipeline before sending any commands
-	 * to the device.
-	 */
-	if (command == NAND_CMD_PAGEPROG) {
-		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-	}
-
-	/*
-	 * Write out the command to the device.
-	 */
-	if (command == NAND_CMD_SEQIN) {
-		int readcmd;
-
-		if (column >= mtd->writesize) {
-			/* OOB area */
-			column -= mtd->writesize;
-			readcmd = NAND_CMD_READOOB;
-		} else if (column < 256) {
-			/* First 256 bytes --> READ0 */
-			readcmd = NAND_CMD_READ0;
-		} else {
-			column -= 256;
-			readcmd = NAND_CMD_READ1;
-		}
-		WriteDOC(readcmd, docptr, Mplus_FlashCmd);
-	}
-	WriteDOC(command, docptr, Mplus_FlashCmd);
-	WriteDOC(0, docptr, Mplus_WritePipeTerm);
-	WriteDOC(0, docptr, Mplus_WritePipeTerm);
-
-	if (column != -1 || page_addr != -1) {
-		/* Serially input address */
-		if (column != -1) {
-			/* Adjust columns for 16 bit buswidth */
-			if (this->options & NAND_BUSWIDTH_16 &&
-					!nand_opcode_8bits(command))
-				column >>= 1;
-			WriteDOC(column, docptr, Mplus_FlashAddress);
-		}
-		if (page_addr != -1) {
-			WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
-			WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
-			if (this->options & NAND_ROW_ADDR_3) {
-				WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
-				printk("high density\n");
-			}
-		}
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		/* deassert ALE */
-		if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
-		    command == NAND_CMD_READOOB || command == NAND_CMD_READID)
-			WriteDOC(0, docptr, Mplus_FlashControl);
-	}
-
-	/*
-	 * program and erase have their own busy handlers
-	 * status and sequential in needs no delay
-	 */
-	switch (command) {
-
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		if (this->legacy.dev_ready)
-			break;
-		udelay(this->legacy.chip_delay);
-		WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		while (!(this->legacy.read_byte(this) & 0x40)) ;
-		return;
-
-		/* This applies to read commands */
-	default:
-		/*
-		 * If we don't have access to the busy pin, we apply the given
-		 * command delay
-		 */
-		if (!this->legacy.dev_ready) {
-			udelay(this->legacy.chip_delay);
-			return;
-		}
-	}
-
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
-	ndelay(100);
-	/* wait until command is processed */
-	while (!this->legacy.dev_ready(this)) ;
-}
-
-static int doc200x_dev_ready(struct nand_chip *this)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	if (DoC_is_MillenniumPlus(doc)) {
-		/* 11.4.2 -- must NOP four times before checking FR/B# */
-		DoC_Delay(doc, 4);
-		if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
-			if (debug)
-				printk("not ready\n");
-			return 0;
-		}
-		if (debug)
-			printk("was ready\n");
-		return 1;
-	} else {
-		/* 11.4.2 -- must NOP four times before checking FR/B# */
-		DoC_Delay(doc, 4);
-		if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
-			if (debug)
-				printk("not ready\n");
-			return 0;
-		}
-		/* 11.4.2 -- Must NOP twice if it's ready */
-		DoC_Delay(doc, 2);
-		if (debug)
-			printk("was ready\n");
-		return 1;
-	}
-}
-
-static int doc200x_block_bad(struct nand_chip *this, loff_t ofs)
-{
-	/* This is our last resort if we couldn't find or create a BBT.  Just
-	   pretend all blocks are good. */
 	return 0;
 }
 
@@ -1169,7 +1046,7 @@
 		"    NoOfBootImageBlocks   = %d\n"
 		"    NoOfBinaryPartitions  = %d\n"
 		"    NoOfBDTLPartitions    = %d\n"
-		"    BlockMultiplerBits    = %d\n"
+		"    BlockMultiplierBits   = %d\n"
 		"    FormatFlgs            = %d\n"
 		"    OsakVersion           = %d.%d.%d.%d\n"
 		"    PercentUsed           = %d\n",
@@ -1344,9 +1221,6 @@
 	struct nand_chip *this = mtd_to_nand(mtd);
 	struct doc_priv *doc = nand_get_controller_data(this);
 
-	this->legacy.read_byte = doc2000_read_byte;
-	this->legacy.write_buf = doc2000_writebuf;
-	this->legacy.read_buf = doc2000_readbuf;
 	doc->late_init = nftl_scan_bbt;
 
 	doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1360,10 +1234,6 @@
 	struct nand_chip *this = mtd_to_nand(mtd);
 	struct doc_priv *doc = nand_get_controller_data(this);
 
-	this->legacy.read_byte = doc2001_read_byte;
-	this->legacy.write_buf = doc2001_writebuf;
-	this->legacy.read_buf = doc2001_readbuf;
-
 	ReadDOC(doc->virtadr, ChipID);
 	ReadDOC(doc->virtadr, ChipID);
 	ReadDOC(doc->virtadr, ChipID);
@@ -1390,13 +1260,7 @@
 	struct nand_chip *this = mtd_to_nand(mtd);
 	struct doc_priv *doc = nand_get_controller_data(this);
 
-	this->legacy.read_byte = doc2001plus_read_byte;
-	this->legacy.write_buf = doc2001plus_writebuf;
-	this->legacy.read_buf = doc2001plus_readbuf;
 	doc->late_init = inftl_scan_bbt;
-	this->legacy.cmd_ctrl = NULL;
-	this->legacy.select_chip = doc2001plus_select_chip;
-	this->legacy.cmdfunc = doc2001plus_command;
 	this->ecc.hwctl = doc2001plus_enable_hwecc;
 
 	doc->chips_per_floor = 1;
@@ -1405,6 +1269,33 @@
 	return 1;
 }
 
+static int doc200x_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
+	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+	chip->ecc.size = 512;
+	chip->ecc.bytes = 6;
+	chip->ecc.strength = 2;
+	chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+	chip->ecc.hwctl = doc200x_enable_hwecc;
+	chip->ecc.calculate = doc200x_calculate_ecc;
+	chip->ecc.correct = doc200x_correct_data;
+
+	return 0;
+}
+
+static const struct nand_controller_ops doc200x_ops = {
+	.exec_op = doc200x_exec_op,
+	.attach_chip = doc200x_attach_chip,
+};
+
+static const struct nand_controller_ops doc2001plus_ops = {
+	.exec_op = doc2001plus_exec_op,
+	.attach_chip = doc200x_attach_chip,
+};
+
 static int __init doc_probe(unsigned long physadr)
 {
 	struct nand_chip *nand = NULL;
@@ -1482,7 +1373,7 @@
 			break;
 		case DOC_ChipID_DocMilPlus32:
 			pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
-			/* fall through */
+			fallthrough;
 		default:
 			ret = -ENODEV;
 			goto notfound;
@@ -1548,7 +1439,6 @@
 		goto fail;
 	}
 
-
 	/*
 	 * Allocate a RS codec instance
 	 *
@@ -1566,6 +1456,12 @@
 		goto fail;
 	}
 
+	nand_controller_init(&doc->base);
+	if (ChipID == DOC_ChipID_DocMilPlus16)
+		doc->base.ops = &doc2001plus_ops;
+	else
+		doc->base.ops = &doc200x_ops;
+
 	mtd			= nand_to_mtd(nand);
 	nand->bbt_td		= (struct nand_bbt_descr *) (doc + 1);
 	nand->bbt_md		= nand->bbt_td + 1;
@@ -1573,24 +1469,11 @@
 	mtd->owner		= THIS_MODULE;
 	mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
 
+	nand->controller	= &doc->base;
 	nand_set_controller_data(nand, doc);
-	nand->legacy.select_chip	= doc200x_select_chip;
-	nand->legacy.cmd_ctrl		= doc200x_hwcontrol;
-	nand->legacy.dev_ready	= doc200x_dev_ready;
-	nand->legacy.waitfunc	= doc200x_wait;
-	nand->legacy.block_bad	= doc200x_block_bad;
-	nand->ecc.hwctl		= doc200x_enable_hwecc;
-	nand->ecc.calculate	= doc200x_calculate_ecc;
-	nand->ecc.correct	= doc200x_correct_data;
-
-	nand->ecc.mode		= NAND_ECC_HW_SYNDROME;
-	nand->ecc.size		= 512;
-	nand->ecc.bytes		= 6;
-	nand->ecc.strength	= 2;
-	nand->ecc.options	= NAND_ECC_GENERIC_ERASED_CHECK;
 	nand->bbt_options	= NAND_BBT_USE_FLASH;
 	/* Skip the automatic BBT scan so we can run it manually */
-	nand->options		|= NAND_SKIP_BBTSCAN;
+	nand->options		|= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
 
 	doc->physadr		= physadr;
 	doc->virtadr		= virtadr;
@@ -1641,13 +1524,16 @@
 	struct mtd_info *mtd, *nextmtd;
 	struct nand_chip *nand;
 	struct doc_priv *doc;
+	int ret;
 
 	for (mtd = doclist; mtd; mtd = nextmtd) {
 		nand = mtd_to_nand(mtd);
 		doc = nand_get_controller_data(nand);
 
 		nextmtd = doc->nextdoc;
-		nand_release(nand);
+		ret = mtd_device_unregister(mtd);
+		WARN_ON(ret);
+		nand_cleanup(nand);
 		iounmap(doc->virtadr);
 		release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
 		free_rs(doc->rs_decoder);
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 634c550..b2af7f8 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -244,7 +244,7 @@
 		return -EIO;
 	}
 
-	if (chip->ecc.mode != NAND_ECC_HW)
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 		return 0;
 
 	elbc_fcm_ctrl->max_bitflips = 0;
@@ -324,8 +324,7 @@
 	/* READ0 and READ1 read the entire buffer to use hardware ECC. */
 	case NAND_CMD_READ1:
 		column += 256;
-
-	/* fall-through */
+		fallthrough;
 	case NAND_CMD_READ0:
 		dev_dbg(priv->dev,
 		        "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
@@ -728,12 +727,12 @@
 	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
 	unsigned int al;
 
-	switch (chip->ecc.mode) {
+	switch (chip->ecc.engine_type) {
 	/*
 	 * if ECC was not chosen in DT, decide whether to use HW or SW ECC from
 	 * CS Base Register
 	 */
-	case NAND_ECC_NONE:
+	case NAND_ECC_ENGINE_TYPE_NONE:
 		/* If CS Base Register selects full hardware ECC then use it */
 		if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
 		    BR_DECC_CHK_GEN) {
@@ -741,23 +740,23 @@
 			chip->ecc.write_page = fsl_elbc_write_page;
 			chip->ecc.write_subpage = fsl_elbc_write_subpage;
 
-			chip->ecc.mode = NAND_ECC_HW;
+			chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 			mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
 			chip->ecc.size = 512;
 			chip->ecc.bytes = 3;
 			chip->ecc.strength = 1;
 		} else {
 			/* otherwise fall back to default software ECC */
-			chip->ecc.mode = NAND_ECC_SOFT;
-			chip->ecc.algo = NAND_ECC_HAMMING;
+			chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+			chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
 		}
 		break;
 
 	/* if SW ECC was chosen in DT, we do not need to set anything here */
-	case NAND_ECC_SOFT:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		break;
 
-	/* should we also implement NAND_ECC_HW to do as the code above? */
+	/* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */
 	default:
 		return -EINVAL;
 	}
@@ -787,8 +786,8 @@
 	        chip->page_shift);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
 	        chip->phys_erase_shift);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
-	        chip->ecc.mode);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.engine_type = %d\n",
+		chip->ecc.engine_type);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
 	        chip->ecc.steps);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
@@ -957,8 +956,13 @@
 {
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
 	struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+	struct nand_chip *chip = &priv->chip;
+	int ret;
 
-	nand_release(&priv->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
+
 	fsl_elbc_chip_remove(priv);
 
 	mutex_lock(&fsl_elbc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 2af09ed..e345f9d 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -309,7 +309,7 @@
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
 		ifc_nand_ctrl->index += column;
 
-		if (chip->ecc.mode == NAND_ECC_HW)
+		if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
 			ifc_nand_ctrl->eccread = 1;
 
 		fsl_ifc_do_read(chip, 0, mtd);
@@ -707,6 +707,30 @@
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+	u32 csor;
+
+	csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+	/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+	if (csor & CSOR_NAND_ECC_DEC_EN) {
+		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+		mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+		/* Hardware generates ECC per 512 Bytes */
+		chip->ecc.size = 512;
+		if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+			chip->ecc.bytes = 8;
+			chip->ecc.strength = 4;
+		} else {
+			chip->ecc.bytes = 16;
+			chip->ecc.strength = 8;
+		}
+	} else {
+		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+	}
 
 	dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
 		nanddev_ntargets(&chip->base));
@@ -724,8 +748,8 @@
 							chip->page_shift);
 	dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
 							chip->phys_erase_shift);
-	dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
-							chip->ecc.mode);
+	dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__,
+							chip->ecc.engine_type);
 	dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
 							chip->ecc.steps);
 	dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
@@ -910,25 +934,6 @@
 		return -ENODEV;
 	}
 
-	/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
-	if (csor & CSOR_NAND_ECC_DEC_EN) {
-		chip->ecc.mode = NAND_ECC_HW;
-		mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
-
-		/* Hardware generates ECC per 512 Bytes */
-		chip->ecc.size = 512;
-		if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
-			chip->ecc.bytes = 8;
-			chip->ecc.strength = 4;
-		} else {
-			chip->ecc.bytes = 16;
-			chip->ecc.strength = 8;
-		}
-	} else {
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_HAMMING;
-	}
-
 	ret = fsl_ifc_sram_init(priv);
 	if (ret)
 		return ret;
@@ -1093,8 +1098,13 @@
 static int fsl_ifc_nand_remove(struct platform_device *dev)
 {
 	struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+	struct nand_chip *chip = &priv->chip;
+	int ret;
 
-	nand_release(&priv->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
+
 	fsl_ifc_chip_remove(priv);
 
 	mutex_lock(&fsl_ifc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 20b0ee1..d5813b9 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -14,32 +14,23 @@
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/mtd.h>
-#include <linux/of_address.h>
 #include <linux/of_platform.h>
-#include <linux/of_gpio.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <asm/fsl_lbc.h>
 
-#define FSL_UPM_WAIT_RUN_PATTERN  0x1
-#define FSL_UPM_WAIT_WRITE_BYTE   0x2
-#define FSL_UPM_WAIT_WRITE_BUFFER 0x4
-
 struct fsl_upm_nand {
+	struct nand_controller base;
 	struct device *dev;
 	struct nand_chip chip;
-	int last_ctrl;
-	struct mtd_partition *parts;
 	struct fsl_upm upm;
 	uint8_t upm_addr_offset;
 	uint8_t upm_cmd_offset;
 	void __iomem *io_base;
-	int rnb_gpio[NAND_MAX_CHIPS];
+	struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS];
 	uint32_t mchip_offsets[NAND_MAX_CHIPS];
 	uint32_t mchip_count;
 	uint32_t mchip_number;
-	int chip_delay;
-	uint32_t wait_flags;
 };
 
 static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
@@ -48,105 +39,6 @@
 			    chip);
 }
 
-static int fun_chip_ready(struct nand_chip *chip)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-
-	if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
-		return 1;
-
-	dev_vdbg(fun->dev, "busy\n");
-	return 0;
-}
-
-static void fun_wait_rnb(struct fsl_upm_nand *fun)
-{
-	if (fun->rnb_gpio[fun->mchip_number] >= 0) {
-		int cnt = 1000000;
-
-		while (--cnt && !fun_chip_ready(&fun->chip))
-			cpu_relax();
-		if (!cnt)
-			dev_err(fun->dev, "tired waiting for RNB\n");
-	} else {
-		ndelay(100);
-	}
-}
-
-static void fun_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-	u32 mar;
-
-	if (!(ctrl & fun->last_ctrl)) {
-		fsl_upm_end_pattern(&fun->upm);
-
-		if (cmd == NAND_CMD_NONE)
-			return;
-
-		fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
-	}
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if (ctrl & NAND_ALE)
-			fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
-		else if (ctrl & NAND_CLE)
-			fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
-	}
-
-	mar = (cmd << (32 - fun->upm.width)) |
-		fun->mchip_offsets[fun->mchip_number];
-	fsl_upm_run_pattern(&fun->upm, chip->legacy.IO_ADDR_R, mar);
-
-	if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
-		fun_wait_rnb(fun);
-}
-
-static void fun_select_chip(struct nand_chip *chip, int mchip_nr)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-
-	if (mchip_nr == -1) {
-		chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-	} else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
-		fun->mchip_number = mchip_nr;
-		chip->legacy.IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
-		chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
-	} else {
-		BUG();
-	}
-}
-
-static uint8_t fun_read_byte(struct nand_chip *chip)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-
-	return in_8(fun->chip.legacy.IO_ADDR_R);
-}
-
-static void fun_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-	int i;
-
-	for (i = 0; i < len; i++)
-		buf[i] = in_8(fun->chip.legacy.IO_ADDR_R);
-}
-
-static void fun_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-	int i;
-
-	for (i = 0; i < len; i++) {
-		out_8(fun->chip.legacy.IO_ADDR_W, buf[i]);
-		if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
-			fun_wait_rnb(fun);
-	}
-	if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
-		fun_wait_rnb(fun);
-}
-
 static int fun_chip_init(struct fsl_upm_nand *fun,
 			 const struct device_node *upm_np,
 			 const struct resource *io_res)
@@ -155,21 +47,9 @@
 	int ret;
 	struct device_node *flash_np;
 
-	fun->chip.legacy.IO_ADDR_R = fun->io_base;
-	fun->chip.legacy.IO_ADDR_W = fun->io_base;
-	fun->chip.legacy.cmd_ctrl = fun_cmd_ctrl;
-	fun->chip.legacy.chip_delay = fun->chip_delay;
-	fun->chip.legacy.read_byte = fun_read_byte;
-	fun->chip.legacy.read_buf = fun_read_buf;
-	fun->chip.legacy.write_buf = fun_write_buf;
-	fun->chip.ecc.mode = NAND_ECC_SOFT;
-	fun->chip.ecc.algo = NAND_ECC_HAMMING;
-	if (fun->mchip_count > 1)
-		fun->chip.legacy.select_chip = fun_select_chip;
-
-	if (fun->rnb_gpio[0] >= 0)
-		fun->chip.legacy.dev_ready = fun_chip_ready;
-
+	fun->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+	fun->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
+	fun->chip.controller = &fun->base;
 	mtd->dev.parent = fun->dev;
 
 	flash_np = of_get_next_child(upm_np, NULL);
@@ -177,8 +57,9 @@
 		return -ENODEV;
 
 	nand_set_flash_node(&fun->chip, flash_np);
-	mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%pOFn", (u64)io_res->start,
-			      flash_np);
+	mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn",
+				   (u64)io_res->start,
+				   flash_np);
 	if (!mtd->name) {
 		ret = -ENOMEM;
 		goto err;
@@ -191,51 +72,130 @@
 	ret = mtd_device_register(mtd, NULL, 0);
 err:
 	of_node_put(flash_np);
-	if (ret)
-		kfree(mtd->name);
 	return ret;
 }
 
+static int func_exec_instr(struct nand_chip *chip,
+			   const struct nand_op_instr *instr)
+{
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+	u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number];
+	unsigned int i;
+	const u8 *out;
+	u8 *in;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+		mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) |
+		      reg_offs;
+		fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+		fsl_upm_end_pattern(&fun->upm);
+		return 0;
+
+	case NAND_OP_ADDR_INSTR:
+		fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+		for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+			mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) |
+			      reg_offs;
+			fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+		}
+		fsl_upm_end_pattern(&fun->upm);
+		return 0;
+
+	case NAND_OP_DATA_IN_INSTR:
+		in = instr->ctx.data.buf.in;
+		for (i = 0; i < instr->ctx.data.len; i++)
+			in[i] = in_8(fun->io_base + reg_offs);
+		return 0;
+
+	case NAND_OP_DATA_OUT_INSTR:
+		out = instr->ctx.data.buf.out;
+		for (i = 0; i < instr->ctx.data.len; i++)
+			out_8(fun->io_base + reg_offs, out[i]);
+		return 0;
+
+	case NAND_OP_WAITRDY_INSTR:
+		if (!fun->rnb_gpio[fun->mchip_number])
+			return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+		return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number],
+					 instr->ctx.waitrdy.timeout_ms);
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+		       bool check_only)
+{
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+	unsigned int i;
+	int ret;
+
+	if (op->cs > NAND_MAX_CHIPS)
+		return -EINVAL;
+
+	if (check_only)
+		return 0;
+
+	fun->mchip_number = op->cs;
+
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = func_exec_instr(chip, &op->instrs[i]);
+		if (ret)
+			return ret;
+
+		if (op->instrs[i].delay_ns)
+			ndelay(op->instrs[i].delay_ns);
+	}
+
+	return 0;
+}
+
+static const struct nand_controller_ops fun_ops = {
+	.exec_op = fun_exec_op,
+};
+
 static int fun_probe(struct platform_device *ofdev)
 {
 	struct fsl_upm_nand *fun;
-	struct resource io_res;
+	struct resource *io_res;
 	const __be32 *prop;
-	int rnb_gpio;
 	int ret;
 	int size;
 	int i;
 
-	fun = kzalloc(sizeof(*fun), GFP_KERNEL);
+	fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL);
 	if (!fun)
 		return -ENOMEM;
 
-	ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
-	if (ret) {
-		dev_err(&ofdev->dev, "can't get IO base\n");
-		goto err1;
-	}
+	io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+	fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res);
+	if (IS_ERR(fun->io_base))
+		return PTR_ERR(fun->io_base);
 
-	ret = fsl_upm_find(io_res.start, &fun->upm);
+	ret = fsl_upm_find(io_res->start, &fun->upm);
 	if (ret) {
 		dev_err(&ofdev->dev, "can't find UPM\n");
-		goto err1;
+		return ret;
 	}
 
 	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
 			       &size);
 	if (!prop || size != sizeof(uint32_t)) {
 		dev_err(&ofdev->dev, "can't get UPM address offset\n");
-		ret = -EINVAL;
-		goto err1;
+		return -EINVAL;
 	}
 	fun->upm_addr_offset = *prop;
 
 	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
 	if (!prop || size != sizeof(uint32_t)) {
 		dev_err(&ofdev->dev, "can't get UPM command offset\n");
-		ret = -EINVAL;
-		goto err1;
+		return -EINVAL;
 	}
 	fun->upm_cmd_offset = *prop;
 
@@ -245,7 +205,7 @@
 		fun->mchip_count = size / sizeof(uint32_t);
 		if (fun->mchip_count >= NAND_MAX_CHIPS) {
 			dev_err(&ofdev->dev, "too much multiple chips\n");
-			goto err1;
+			return -EINVAL;
 		}
 		for (i = 0; i < fun->mchip_count; i++)
 			fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
@@ -254,81 +214,38 @@
 	}
 
 	for (i = 0; i < fun->mchip_count; i++) {
-		fun->rnb_gpio[i] = -1;
-		rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
-		if (rnb_gpio >= 0) {
-			ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
-			if (ret) {
-				dev_err(&ofdev->dev,
-					"can't request RNB gpio #%d\n", i);
-				goto err2;
-			}
-			gpio_direction_input(rnb_gpio);
-			fun->rnb_gpio[i] = rnb_gpio;
-		} else if (rnb_gpio == -EINVAL) {
+		fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev,
+								 NULL, i,
+								 GPIOD_IN);
+		if (IS_ERR(fun->rnb_gpio[i])) {
 			dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
-			goto err2;
+			return PTR_ERR(fun->rnb_gpio[i]);
 		}
 	}
 
-	prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
-	if (prop)
-		fun->chip_delay = be32_to_cpup(prop);
-	else
-		fun->chip_delay = 50;
-
-	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
-	if (prop && size == sizeof(uint32_t))
-		fun->wait_flags = be32_to_cpup(prop);
-	else
-		fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
-				  FSL_UPM_WAIT_WRITE_BYTE;
-
-	fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
-					    resource_size(&io_res));
-	if (!fun->io_base) {
-		ret = -ENOMEM;
-		goto err2;
-	}
-
+	nand_controller_init(&fun->base);
+	fun->base.ops = &fun_ops;
 	fun->dev = &ofdev->dev;
-	fun->last_ctrl = NAND_CLE;
 
-	ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
+	ret = fun_chip_init(fun, ofdev->dev.of_node, io_res);
 	if (ret)
-		goto err2;
+		return ret;
 
 	dev_set_drvdata(&ofdev->dev, fun);
 
 	return 0;
-err2:
-	for (i = 0; i < fun->mchip_count; i++) {
-		if (fun->rnb_gpio[i] < 0)
-			break;
-		gpio_free(fun->rnb_gpio[i]);
-	}
-err1:
-	kfree(fun);
-
-	return ret;
 }
 
 static int fun_remove(struct platform_device *ofdev)
 {
 	struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
-	struct mtd_info *mtd = nand_to_mtd(&fun->chip);
-	int i;
+	struct nand_chip *chip = &fun->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
 
-	nand_release(&fun->chip);
-	kfree(mtd->name);
-
-	for (i = 0; i < fun->mchip_count; i++) {
-		if (fun->rnb_gpio[i] < 0)
-			break;
-		gpio_free(fun->rnb_gpio[i]);
-	}
-
-	kfree(fun);
+	ret = mtd_device_unregister(mtd);
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 81e4b0f..663ff53 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
@@ -93,6 +94,14 @@
 
 #define FSMC_BUSY_WAIT_TIMEOUT	(1 * HZ)
 
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ *  TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ *  TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL	7000
+#define TINDEL	5000
+
 struct fsmc_nand_timings {
 	u8 tclr;
 	u8 tar;
@@ -277,7 +286,7 @@
 {
 	unsigned long hclk = clk_get_rate(host->clk);
 	unsigned long hclkn = NSEC_PER_SEC / hclk;
-	u32 thiz, thold, twait, tset;
+	u32 thiz, thold, twait, tset, twait_min;
 
 	if (sdrt->tRC_min < 30000)
 		return -EOPNOTSUPP;
@@ -309,13 +318,6 @@
 	else if (tims->thold > FSMC_THOLD_MASK)
 		tims->thold = FSMC_THOLD_MASK;
 
-	twait = max(sdrt->tRP_min, sdrt->tWP_min);
-	tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
-	if (tims->twait == 0)
-		tims->twait = 1;
-	else if (tims->twait > FSMC_TWAIT_MASK)
-		tims->twait = FSMC_TWAIT_MASK;
-
 	tset = max(sdrt->tCS_min - sdrt->tWP_min,
 		   sdrt->tCEA_max - sdrt->tREA_max);
 	tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
@@ -324,11 +326,26 @@
 	else if (tims->tset > FSMC_TSET_MASK)
 		tims->tset = FSMC_TSET_MASK;
 
+	/*
+	 * According to SPEAr300 Reference Manual (RM0082) which gives more
+	 * information related to FSMSC timings than the SPEAr600 one (RM0305),
+	 *   twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+	 */
+	twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+		    + TOUTDEL + TINDEL;
+	twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+	tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+	if (tims->twait == 0)
+		tims->twait = 1;
+	else if (tims->twait > FSMC_TWAIT_MASK)
+		tims->twait = FSMC_TWAIT_MASK;
+
 	return 0;
 }
 
-static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
-				     const struct nand_data_interface *conf)
+static int fsmc_setup_interface(struct nand_chip *nand, int csline,
+				const struct nand_interface_config *conf)
 {
 	struct fsmc_nand_data *host = nand_to_fsmc(nand);
 	struct fsmc_nand_timings tims;
@@ -608,6 +625,9 @@
 	unsigned int op_id;
 	int i;
 
+	if (check_only)
+		return 0;
+
 	pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
 
 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -650,6 +670,9 @@
 						instr->ctx.waitrdy.timeout_ms);
 			break;
 		}
+
+		if (instr->delay_ns)
+			ndelay(instr->delay_ns);
 	}
 
 	return ret;
@@ -691,7 +714,7 @@
 	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
 		nand_read_page_op(chip, page, s * eccsize, NULL, 0);
 		chip->ecc.hwctl(chip, NAND_ECC_READ);
-		ret = nand_read_data_op(chip, p, eccsize, false);
+		ret = nand_read_data_op(chip, p, eccsize, false, false);
 		if (ret)
 			return ret;
 
@@ -809,11 +832,12 @@
 
 	i = 0;
 	while (num_err--) {
-		change_bit(0, (unsigned long *)&err_idx[i]);
-		change_bit(1, (unsigned long *)&err_idx[i]);
+		err_idx[i] ^= 3;
 
 		if (err_idx[i] < chip->ecc.size * 8) {
-			change_bit(err_idx[i], (unsigned long *)dat);
+			int err = err_idx[i];
+
+			dat[err >> 3] ^= BIT(err & 7);
 			i++;
 		}
 	}
@@ -876,6 +900,20 @@
 	struct mtd_info *mtd = nand_to_mtd(nand);
 	struct fsmc_nand_data *host = nand_to_fsmc(nand);
 
+	if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+		nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+	if (!nand->ecc.size)
+		nand->ecc.size = 512;
+
+	if (AMBA_REV_BITS(host->pid) >= 8) {
+		nand->ecc.read_page = fsmc_read_page_hwecc;
+		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+		nand->ecc.correct = fsmc_bch8_correct_data;
+		nand->ecc.bytes = 13;
+		nand->ecc.strength = 8;
+	}
+
 	if (AMBA_REV_BITS(host->pid) >= 8) {
 		switch (mtd->oobsize) {
 		case 16:
@@ -896,24 +934,25 @@
 		return 0;
 	}
 
-	switch (nand->ecc.mode) {
-	case NAND_ECC_HW:
+	switch (nand->ecc.engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
 		nand->ecc.calculate = fsmc_read_hwecc_ecc1;
 		nand->ecc.correct = nand_correct_data;
+		nand->ecc.hwctl = fsmc_enable_hwecc;
 		nand->ecc.bytes = 3;
 		nand->ecc.strength = 1;
 		nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
 		break;
 
-	case NAND_ECC_SOFT:
-		if (nand->ecc.algo == NAND_ECC_BCH) {
+	case NAND_ECC_ENGINE_TYPE_SOFT:
+		if (nand->ecc.algo == NAND_ECC_ALGO_BCH) {
 			dev_info(host->dev,
 				 "Using 4-bit SW BCH ECC scheme\n");
 			break;
 		}
 
-	case NAND_ECC_ON_DIE:
+	case NAND_ECC_ENGINE_TYPE_ON_DIE:
 		break;
 
 	default:
@@ -925,7 +964,7 @@
 	 * Don't set layout for BCH4 SW ECC. This will be
 	 * generated later in nand_bch_init() later.
 	 */
-	if (nand->ecc.mode == NAND_ECC_HW) {
+	if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
 		switch (mtd->oobsize) {
 		case 16:
 		case 64:
@@ -947,7 +986,7 @@
 static const struct nand_controller_ops fsmc_nand_controller_ops = {
 	.attach_chip = fsmc_nand_attach_chip,
 	.exec_op = fsmc_exec_op,
-	.setup_data_interface = fsmc_setup_data_interface,
+	.setup_interface = fsmc_setup_interface,
 };
 
 /**
@@ -1051,13 +1090,6 @@
 
 	mtd->dev.parent = &pdev->dev;
 
-	/*
-	 * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
-	 * can overwrite this value if the DT provides a different value.
-	 */
-	nand->ecc.mode = NAND_ECC_HW;
-	nand->ecc.hwctl = fsmc_enable_hwecc;
-	nand->ecc.size = 512;
 	nand->badblockbits = 7;
 
 	if (host->mode == USE_DMA_ACCESS) {
@@ -1082,14 +1114,6 @@
 		nand->options |= NAND_KEEP_TIMINGS;
 	}
 
-	if (AMBA_REV_BITS(host->pid) >= 8) {
-		nand->ecc.read_page = fsmc_read_page_hwecc;
-		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
-		nand->ecc.correct = fsmc_bch8_correct_data;
-		nand->ecc.bytes = 13;
-		nand->ecc.strength = 8;
-	}
-
 	nand_controller_init(&host->base);
 	host->base.ops = &fsmc_nand_controller_ops;
 	nand->controller = &host->base;
@@ -1134,7 +1158,12 @@
 	struct fsmc_nand_data *host = platform_get_drvdata(pdev);
 
 	if (host) {
-		nand_release(&host->nand);
+		struct nand_chip *chip = &host->nand;
+		int ret;
+
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
 		fsmc_nand_disable(host);
 
 		if (host->mode == USE_DMA_ACCESS) {
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index f6b1235..fdf073d 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -25,8 +25,11 @@
 #include <linux/mtd/nand-gpio.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/delay.h>
 
 struct gpiomtd {
+	struct nand_controller	base;
+	void __iomem		*io;
 	void __iomem		*io_sync;
 	struct nand_chip	nand_chip;
 	struct gpio_nand_platdata plat;
@@ -69,34 +72,109 @@
 static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
 #endif
 
-static void gpio_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
-			       unsigned int ctrl)
+static int gpio_nand_exec_instr(struct nand_chip *chip,
+				const struct nand_op_instr *instr)
 {
 	struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+	unsigned int i;
 
-	gpio_nand_dosync(gpiomtd);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if (gpiomtd->nce)
-			gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
-		gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
-		gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
 		gpio_nand_dosync(gpiomtd);
-	}
-	if (cmd == NAND_CMD_NONE)
-		return;
+		gpiod_set_value(gpiomtd->cle, 1);
+		gpio_nand_dosync(gpiomtd);
+		writeb(instr->ctx.cmd.opcode, gpiomtd->io);
+		gpio_nand_dosync(gpiomtd);
+		gpiod_set_value(gpiomtd->cle, 0);
+		return 0;
 
-	writeb(cmd, gpiomtd->nand_chip.legacy.IO_ADDR_W);
-	gpio_nand_dosync(gpiomtd);
+	case NAND_OP_ADDR_INSTR:
+		gpio_nand_dosync(gpiomtd);
+		gpiod_set_value(gpiomtd->ale, 1);
+		gpio_nand_dosync(gpiomtd);
+		for (i = 0; i < instr->ctx.addr.naddrs; i++)
+			writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
+		gpio_nand_dosync(gpiomtd);
+		gpiod_set_value(gpiomtd->ale, 0);
+		return 0;
+
+	case NAND_OP_DATA_IN_INSTR:
+		gpio_nand_dosync(gpiomtd);
+		if ((chip->options & NAND_BUSWIDTH_16) &&
+		    !instr->ctx.data.force_8bit)
+			ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
+				     instr->ctx.data.len / 2);
+		else
+			ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
+				    instr->ctx.data.len);
+		return 0;
+
+	case NAND_OP_DATA_OUT_INSTR:
+		gpio_nand_dosync(gpiomtd);
+		if ((chip->options & NAND_BUSWIDTH_16) &&
+		    !instr->ctx.data.force_8bit)
+			iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
+				      instr->ctx.data.len / 2);
+		else
+			iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
+				     instr->ctx.data.len);
+		return 0;
+
+	case NAND_OP_WAITRDY_INSTR:
+		if (!gpiomtd->rdy)
+			return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+		return nand_gpio_waitrdy(chip, gpiomtd->rdy,
+					 instr->ctx.waitrdy.timeout_ms);
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
 }
 
-static int gpio_nand_devready(struct nand_chip *chip)
+static int gpio_nand_exec_op(struct nand_chip *chip,
+			     const struct nand_operation *op,
+			     bool check_only)
 {
 	struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+	unsigned int i;
+	int ret = 0;
 
-	return gpiod_get_value(gpiomtd->rdy);
+	if (check_only)
+		return 0;
+
+	gpio_nand_dosync(gpiomtd);
+	gpiod_set_value(gpiomtd->nce, 0);
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
+		if (ret)
+			break;
+
+		if (op->instrs[i].delay_ns)
+			ndelay(op->instrs[i].delay_ns);
+	}
+	gpio_nand_dosync(gpiomtd);
+	gpiod_set_value(gpiomtd->nce, 1);
+
+	return ret;
 }
 
+static int gpio_nand_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+	.exec_op = gpio_nand_exec_op,
+	.attach_chip = gpio_nand_attach_chip,
+};
+
 #ifdef CONFIG_OF
 static const struct of_device_id gpio_nand_id_table[] = {
 	{ .compatible = "gpio-control-nand" },
@@ -190,8 +268,12 @@
 static int gpio_nand_remove(struct platform_device *pdev)
 {
 	struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &gpiomtd->nand_chip;
+	int ret;
 
-	nand_release(&gpiomtd->nand_chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	/* Enable write protection and disable the chip */
 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
@@ -221,9 +303,9 @@
 	chip = &gpiomtd->nand_chip;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	chip->legacy.IO_ADDR_R = devm_ioremap_resource(dev, res);
-	if (IS_ERR(chip->legacy.IO_ADDR_R))
-		return PTR_ERR(chip->legacy.IO_ADDR_R);
+	gpiomtd->io = devm_ioremap_resource(dev, res);
+	if (IS_ERR(gpiomtd->io))
+		return PTR_ERR(gpiomtd->io);
 
 	res = gpio_nand_get_io_sync(pdev);
 	if (res) {
@@ -265,17 +347,13 @@
 		ret = PTR_ERR(gpiomtd->rdy);
 		goto out_ce;
 	}
-	/* Using RDY pin */
-	if (gpiomtd->rdy)
-		chip->legacy.dev_ready = gpio_nand_devready;
+
+	nand_controller_init(&gpiomtd->base);
+	gpiomtd->base.ops = &gpio_nand_ops;
 
 	nand_set_flash_node(chip, pdev->dev.of_node);
-	chip->legacy.IO_ADDR_W	= chip->legacy.IO_ADDR_R;
-	chip->ecc.mode		= NAND_ECC_SOFT;
-	chip->ecc.algo		= NAND_ECC_HAMMING;
 	chip->options		= gpiomtd->plat.options;
-	chip->legacy.chip_delay	= gpiomtd->plat.chip_delay;
-	chip->legacy.cmd_ctrl	= gpio_nand_cmd_ctrl;
+	chip->controller	= &gpiomtd->base;
 
 	mtd			= nand_to_mtd(chip);
 	mtd->dev.parent		= dev;
@@ -286,6 +364,13 @@
 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
 		gpiod_direction_output(gpiomtd->nwp, 1);
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	ret = nand_scan(chip, 1);
 	if (ret)
 		goto err_wp;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 60f1469..cb76311 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -274,8 +274,8 @@
 	default:
 		dev_err(this->dev,
 			"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
-			chip->base.eccreq.strength,
-			chip->base.eccreq.step_size);
+			nanddev_get_ecc_requirements(&chip->base)->strength,
+			nanddev_get_ecc_requirements(&chip->base)->step_size);
 		return -EINVAL;
 	}
 	geo->ecc_chunk_size = ecc_step;
@@ -512,6 +512,8 @@
 static int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
 	struct nand_chip *chip = &this->nand;
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 
 	if (chip->ecc.strength > 0 && chip->ecc.size > 0)
 		return set_geometry_by_ecc_info(this, chip->ecc.strength,
@@ -519,13 +521,12 @@
 
 	if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
 				|| legacy_set_geometry(this)) {
-		if (!(chip->base.eccreq.strength > 0 &&
-		      chip->base.eccreq.step_size > 0))
+		if (!(requirements->strength > 0 && requirements->step_size > 0))
 			return -EINVAL;
 
 		return set_geometry_by_ecc_info(this,
-						chip->base.eccreq.strength,
-						chip->base.eccreq.step_size);
+						requirements->strength,
+						requirements->step_size);
 	}
 
 	return 0;
@@ -710,14 +711,32 @@
 			      (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
 }
 
-static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
+static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
 {
 	struct gpmi_nfc_hardware_timing *hw = &this->hw;
 	struct resources *r = &this->resources;
 	void __iomem *gpmi_regs = r->gpmi_regs;
 	unsigned int dll_wait_time_us;
+	int ret;
 
-	clk_set_rate(r->clock[0], hw->clk_rate);
+	/* Clock dividers do NOT guarantee a clean clock signal on its output
+	 * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
+	 * all clock dividers provide these guarantee.
+	 */
+	if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
+		clk_disable_unprepare(r->clock[0]);
+
+	ret = clk_set_rate(r->clock[0], hw->clk_rate);
+	if (ret) {
+		dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
+		return ret;
+	}
+
+	if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
+		ret = clk_prepare_enable(r->clock[0]);
+		if (ret)
+			return ret;
+	}
 
 	writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
 	writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
@@ -736,10 +755,12 @@
 
 	/* Wait for the DLL to settle. */
 	udelay(dll_wait_time_us);
+
+	return 0;
 }
 
-static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
-				     const struct nand_data_interface *conf)
+static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
+				const struct nand_interface_config *conf)
 {
 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
 	const struct nand_sdr_timings *sdr;
@@ -838,158 +859,6 @@
 	return false;
 }
 
-/**
- * gpmi_copy_bits - copy bits from one memory region to another
- * @dst: destination buffer
- * @dst_bit_off: bit offset we're starting to write at
- * @src: source buffer
- * @src_bit_off: bit offset we're starting to read from
- * @nbits: number of bits to copy
- *
- * This functions copies bits from one memory region to another, and is used by
- * the GPMI driver to copy ECC sections which are not guaranteed to be byte
- * aligned.
- *
- * src and dst should not overlap.
- *
- */
-static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
-			   size_t src_bit_off, size_t nbits)
-{
-	size_t i;
-	size_t nbytes;
-	u32 src_buffer = 0;
-	size_t bits_in_src_buffer = 0;
-
-	if (!nbits)
-		return;
-
-	/*
-	 * Move src and dst pointers to the closest byte pointer and store bit
-	 * offsets within a byte.
-	 */
-	src += src_bit_off / 8;
-	src_bit_off %= 8;
-
-	dst += dst_bit_off / 8;
-	dst_bit_off %= 8;
-
-	/*
-	 * Initialize the src_buffer value with bits available in the first
-	 * byte of data so that we end up with a byte aligned src pointer.
-	 */
-	if (src_bit_off) {
-		src_buffer = src[0] >> src_bit_off;
-		if (nbits >= (8 - src_bit_off)) {
-			bits_in_src_buffer += 8 - src_bit_off;
-		} else {
-			src_buffer &= GENMASK(nbits - 1, 0);
-			bits_in_src_buffer += nbits;
-		}
-		nbits -= bits_in_src_buffer;
-		src++;
-	}
-
-	/* Calculate the number of bytes that can be copied from src to dst. */
-	nbytes = nbits / 8;
-
-	/* Try to align dst to a byte boundary. */
-	if (dst_bit_off) {
-		if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
-			src_buffer |= src[0] << bits_in_src_buffer;
-			bits_in_src_buffer += 8;
-			src++;
-			nbytes--;
-		}
-
-		if (bits_in_src_buffer >= (8 - dst_bit_off)) {
-			dst[0] &= GENMASK(dst_bit_off - 1, 0);
-			dst[0] |= src_buffer << dst_bit_off;
-			src_buffer >>= (8 - dst_bit_off);
-			bits_in_src_buffer -= (8 - dst_bit_off);
-			dst_bit_off = 0;
-			dst++;
-			if (bits_in_src_buffer > 7) {
-				bits_in_src_buffer -= 8;
-				dst[0] = src_buffer;
-				dst++;
-				src_buffer >>= 8;
-			}
-		}
-	}
-
-	if (!bits_in_src_buffer && !dst_bit_off) {
-		/*
-		 * Both src and dst pointers are byte aligned, thus we can
-		 * just use the optimized memcpy function.
-		 */
-		if (nbytes)
-			memcpy(dst, src, nbytes);
-	} else {
-		/*
-		 * src buffer is not byte aligned, hence we have to copy each
-		 * src byte to the src_buffer variable before extracting a byte
-		 * to store in dst.
-		 */
-		for (i = 0; i < nbytes; i++) {
-			src_buffer |= src[i] << bits_in_src_buffer;
-			dst[i] = src_buffer;
-			src_buffer >>= 8;
-		}
-	}
-	/* Update dst and src pointers */
-	dst += nbytes;
-	src += nbytes;
-
-	/*
-	 * nbits is the number of remaining bits. It should not exceed 8 as
-	 * we've already copied as much bytes as possible.
-	 */
-	nbits %= 8;
-
-	/*
-	 * If there's no more bits to copy to the destination and src buffer
-	 * was already byte aligned, then we're done.
-	 */
-	if (!nbits && !bits_in_src_buffer)
-		return;
-
-	/* Copy the remaining bits to src_buffer */
-	if (nbits)
-		src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
-			      bits_in_src_buffer;
-	bits_in_src_buffer += nbits;
-
-	/*
-	 * In case there were not enough bits to get a byte aligned dst buffer
-	 * prepare the src_buffer variable to match the dst organization (shift
-	 * src_buffer by dst_bit_off and retrieve the least significant bits
-	 * from dst).
-	 */
-	if (dst_bit_off)
-		src_buffer = (src_buffer << dst_bit_off) |
-			     (*dst & GENMASK(dst_bit_off - 1, 0));
-	bits_in_src_buffer += dst_bit_off;
-
-	/*
-	 * Keep most significant bits from dst if we end up with an unaligned
-	 * number of bits.
-	 */
-	nbytes = bits_in_src_buffer / 8;
-	if (bits_in_src_buffer % 8) {
-		src_buffer |= (dst[nbytes] &
-			       GENMASK(7, bits_in_src_buffer % 8)) <<
-			      (nbytes * 8);
-		nbytes++;
-	}
-
-	/* Copy the remaining bytes to dst */
-	for (i = 0; i < nbytes; i++) {
-		dst[i] = src_buffer;
-		src_buffer >>= 8;
-	}
-}
-
 /* add our owner bbt descriptor */
 static uint8_t scan_ff_pattern[] = { 0xff };
 static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -1152,20 +1021,19 @@
 {
 	struct platform_device *pdev = this->pdev;
 	struct dma_chan *dma_chan;
+	int ret = 0;
 
 	/* request dma channel */
-	dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
-	if (!dma_chan) {
-		dev_err(this->dev, "Failed to request DMA channel.\n");
-		goto acquire_err;
+	dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
+	if (IS_ERR(dma_chan)) {
+		ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
+				    "DMA channel request failed\n");
+		release_dma_channels(this);
+	} else {
+		this->dma_chans[0] = dma_chan;
 	}
 
-	this->dma_chans[0] = dma_chan;
-	return 0;
-
-acquire_err:
-	release_dma_channels(this);
-	return -EINVAL;
+	return ret;
 }
 
 static int gpmi_get_clks(struct gpmi_nand_data *this)
@@ -1184,15 +1052,6 @@
 		r->clock[i] = clk;
 	}
 
-	if (GPMI_IS_MX6(this))
-		/*
-		 * Set the default value for the gpmi clock.
-		 *
-		 * If you want to use the ONFI nand which is in the
-		 * Synchronous Mode, you should change the clock as you need.
-		 */
-		clk_set_rate(r->clock[0], 22000000);
-
 	return 0;
 
 err_clock:
@@ -1716,7 +1575,7 @@
  * inline (interleaved with payload DATA), and do not align data chunk on
  * byte boundaries.
  * We thus need to take care moving the payload data and ECC bits stored in the
- * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ * page into the provided buffers, which is why we're using nand_extract_bits().
  *
  * See set_geometry_by_ecc_info inline comments to have a full description
  * of the layout used by the GPMI controller.
@@ -1765,9 +1624,8 @@
 	/* Extract interleaved payload data and ECC bits */
 	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
 		if (buf)
-			gpmi_copy_bits(buf, step * eccsize * 8,
-				       tmp_buf, src_bit_off,
-				       eccsize * 8);
+			nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
+					  src_bit_off, eccsize * 8);
 		src_bit_off += eccsize * 8;
 
 		/* Align last ECC block to align a byte boundary */
@@ -1776,9 +1634,8 @@
 			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
 
 		if (oob_required)
-			gpmi_copy_bits(oob, oob_bit_off,
-				       tmp_buf, src_bit_off,
-				       eccbits);
+			nand_extract_bits(oob, oob_bit_off, tmp_buf,
+					  src_bit_off, eccbits);
 
 		src_bit_off += eccbits;
 		oob_bit_off += eccbits;
@@ -1803,7 +1660,7 @@
  * inline (interleaved with payload DATA), and do not align data chunk on
  * byte boundaries.
  * We thus need to take care moving the OOB area at the right place in the
- * final page, which is why we're using gpmi_copy_bits.
+ * final page, which is why we're using nand_extract_bits().
  *
  * See set_geometry_by_ecc_info inline comments to have a full description
  * of the layout used by the GPMI controller.
@@ -1842,8 +1699,8 @@
 	/* Interleave payload data and ECC bits */
 	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
 		if (buf)
-			gpmi_copy_bits(tmp_buf, dst_bit_off,
-				       buf, step * eccsize * 8, eccsize * 8);
+			nand_extract_bits(tmp_buf, dst_bit_off, buf,
+					  step * eccsize * 8, eccsize * 8);
 		dst_bit_off += eccsize * 8;
 
 		/* Align last ECC block to align a byte boundary */
@@ -1852,8 +1709,8 @@
 			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
 
 		if (oob_required)
-			gpmi_copy_bits(tmp_buf, dst_bit_off,
-				       oob, oob_bit_off, eccbits);
+			nand_extract_bits(tmp_buf, dst_bit_off, oob,
+					  oob_bit_off, eccbits);
 
 		dst_bit_off += eccbits;
 		oob_bit_off += eccbits;
@@ -2187,7 +2044,7 @@
 	ecc->write_page_raw = gpmi_ecc_write_page_raw;
 	ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
 	ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
-	ecc->mode	= NAND_ECC_HW;
+	ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	ecc->size	= bch_geo->ecc_chunk_size;
 	ecc->strength	= bch_geo->ecc_strength;
 	mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
@@ -2411,6 +2268,9 @@
 	struct completion *dma_completion, *bch_completion;
 	unsigned long to;
 
+	if (check_only)
+		return 0;
+
 	this->ntransfers = 0;
 	for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
 		this->transfers[i].direction = DMA_NONE;
@@ -2429,7 +2289,9 @@
 	 */
 	if (this->hw.must_apply_timings) {
 		this->hw.must_apply_timings = false;
-		gpmi_nfc_apply_timings(this);
+		ret = gpmi_nfc_apply_timings(this);
+		if (ret)
+			goto out_pm;
 	}
 
 	dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2558,6 +2420,7 @@
 
 	this->bch = false;
 
+out_pm:
 	pm_runtime_mark_last_busy(this->dev);
 	pm_runtime_put_autosuspend(this->dev);
 
@@ -2566,7 +2429,7 @@
 
 static const struct nand_controller_ops gpmi_nand_controller_ops = {
 	.attach_chip = gpmi_nand_attach_chip,
-	.setup_data_interface = gpmi_setup_data_interface,
+	.setup_interface = gpmi_setup_interface,
 	.exec_op = gpmi_nfc_exec_op,
 };
 
@@ -2675,7 +2538,7 @@
 
 	ret = __gpmi_enable_clk(this, true);
 	if (ret)
-		goto exit_nfc_init;
+		goto exit_acquire_resources;
 
 	pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
 	pm_runtime_use_autosuspend(&pdev->dev);
@@ -2710,11 +2573,15 @@
 static int gpmi_nand_remove(struct platform_device *pdev)
 {
 	struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &this->nand;
+	int ret;
 
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
-	nand_release(&this->nand);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	gpmi_free_dma_buffer(this);
 	release_resources(this);
 	return 0;
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 6a4626a..8b2122c 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -186,7 +186,7 @@
 	hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
 	hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
 
-	if (chip->ecc.mode == NAND_ECC_NONE) {
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
 		hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
 			<< HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
 
@@ -468,7 +468,7 @@
 
 	case NAND_CMD_STATUS:
 		flag = hinfc_read(host, HINFC504_CON);
-		if (chip->ecc.mode == NAND_ECC_HW)
+		if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
 			hinfc_write(host,
 				    flag & ~(HINFC504_CON_ECCTYPE_MASK <<
 				    HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
@@ -721,7 +721,7 @@
 	}
 	hinfc_write(host, flag, HINFC504_CON);
 
-	if (chip->ecc.mode == NAND_ECC_HW)
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
 		hisi_nfc_ecc_probe(host);
 
 	return 0;
@@ -751,10 +751,8 @@
 	mtd  = nand_to_mtd(chip);
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "no IRQ resource defined\n");
+	if (irq < 0)
 		return -ENXIO;
-	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	host->iobase = devm_ioremap_resource(dev, res);
@@ -808,8 +806,12 @@
 static int hisi_nfc_remove(struct platform_device *pdev)
 {
 	struct hinfc_host *host = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &host->chip;
+	int ret;
 
-	nand_release(&host->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index e30feb5..96c5ae8 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config MTD_NAND_JZ4780
 	tristate "JZ4780 NAND controller"
+	depends on MIPS || COMPILE_TEST
 	depends on JZ4780_NEMC
 	help
 	  Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index c954189..8e22cd6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -124,7 +124,6 @@
 {
 	struct device *dev = &pdev->dev;
 	struct ingenic_ecc *ecc;
-	struct resource *res;
 
 	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
 	if (!ecc)
@@ -134,8 +133,7 @@
 	if (!ecc->ops)
 		return -EINVAL;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ecc->base = devm_ioremap_resource(dev, res);
+	ecc->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(ecc->base))
 		return PTR_ERR(ecc->base);
 
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 4b7c399..0e9d426 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -27,9 +27,6 @@
 
 #define DRV_NAME	"ingenic-nand"
 
-/* Command delay when there is no R/B pin. */
-#define RB_DELAY_US	100
-
 struct jz_soc_info {
 	unsigned long data_offset;
 	unsigned long addr_offset;
@@ -49,7 +46,6 @@
 	struct nand_controller controller;
 	unsigned int num_banks;
 	struct list_head chips;
-	int selected;
 	struct ingenic_nand_cs cs[];
 };
 
@@ -102,7 +98,7 @@
 	return 0;
 }
 
-const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
 	.ecc = qi_lb60_ooblayout_ecc,
 	.free = qi_lb60_ooblayout_free,
 };
@@ -142,51 +138,6 @@
 	.free = jz4725b_ooblayout_free,
 };
 
-static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr)
-{
-	struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-	struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
-	struct ingenic_nand_cs *cs;
-
-	/* Ensure the currently selected chip is deasserted. */
-	if (chipnr == -1 && nfc->selected >= 0) {
-		cs = &nfc->cs[nfc->selected];
-		jz4780_nemc_assert(nfc->dev, cs->bank, false);
-	}
-
-	nfc->selected = chipnr;
-}
-
-static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
-				  unsigned int ctrl)
-{
-	struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-	struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
-	struct ingenic_nand_cs *cs;
-
-	if (WARN_ON(nfc->selected < 0))
-		return;
-
-	cs = &nfc->cs[nfc->selected];
-
-	jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_ALE)
-		writeb(cmd, cs->base + nfc->soc_info->addr_offset);
-	else if (ctrl & NAND_CLE)
-		writeb(cmd, cs->base + nfc->soc_info->cmd_offset);
-}
-
-static int ingenic_nand_dev_ready(struct nand_chip *chip)
-{
-	struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-
-	return !gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
 static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
 {
 	struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
@@ -243,8 +194,8 @@
 				  (chip->ecc.strength / 8);
 	}
 
-	switch (chip->ecc.mode) {
-	case NAND_ECC_HW:
+	switch (chip->ecc.engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		if (!nfc->ecc) {
 			dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
 			return -ENODEV;
@@ -253,23 +204,23 @@
 		chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
 		chip->ecc.calculate = ingenic_nand_ecc_calculate;
 		chip->ecc.correct = ingenic_nand_ecc_correct;
-		/* fall through */
-	case NAND_ECC_SOFT:
+		fallthrough;
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
 			 (nfc->ecc) ? "hardware ECC" : "software ECC",
 			 chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
 		break;
-	case NAND_ECC_NONE:
+	case NAND_ECC_ENGINE_TYPE_NONE:
 		dev_info(nfc->dev, "not using ECC\n");
 		break;
 	default:
 		dev_err(nfc->dev, "ECC mode %d not supported\n",
-			chip->ecc.mode);
+			chip->ecc.engine_type);
 		return -EINVAL;
 	}
 
 	/* The NAND core will generate the ECC layout for SW ECC */
-	if (chip->ecc.mode != NAND_ECC_HW)
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 		return 0;
 
 	/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
@@ -292,14 +243,99 @@
 	/* For legacy reasons we use a different layout on the qi,lb60 board. */
 	if (of_machine_is_compatible("qi,lb60"))
 		mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
-	else
+	else if (nfc->soc_info->oob_layout)
 		mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
+	else
+		mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
 
 	return 0;
 }
 
+static int ingenic_nand_exec_instr(struct nand_chip *chip,
+				   struct ingenic_nand_cs *cs,
+				   const struct nand_op_instr *instr)
+{
+	struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+	struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+	unsigned int i;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		writeb(instr->ctx.cmd.opcode,
+		       cs->base + nfc->soc_info->cmd_offset);
+		return 0;
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++)
+			writeb(instr->ctx.addr.addrs[i],
+			       cs->base + nfc->soc_info->addr_offset);
+		return 0;
+	case NAND_OP_DATA_IN_INSTR:
+		if (instr->ctx.data.force_8bit ||
+		    !(chip->options & NAND_BUSWIDTH_16))
+			ioread8_rep(cs->base + nfc->soc_info->data_offset,
+				    instr->ctx.data.buf.in,
+				    instr->ctx.data.len);
+		else
+			ioread16_rep(cs->base + nfc->soc_info->data_offset,
+				     instr->ctx.data.buf.in,
+				     instr->ctx.data.len);
+		return 0;
+	case NAND_OP_DATA_OUT_INSTR:
+		if (instr->ctx.data.force_8bit ||
+		    !(chip->options & NAND_BUSWIDTH_16))
+			iowrite8_rep(cs->base + nfc->soc_info->data_offset,
+				     instr->ctx.data.buf.out,
+				     instr->ctx.data.len);
+		else
+			iowrite16_rep(cs->base + nfc->soc_info->data_offset,
+				      instr->ctx.data.buf.out,
+				      instr->ctx.data.len);
+		return 0;
+	case NAND_OP_WAITRDY_INSTR:
+		if (!nand->busy_gpio)
+			return nand_soft_waitrdy(chip,
+						 instr->ctx.waitrdy.timeout_ms);
+
+		return nand_gpio_waitrdy(chip, nand->busy_gpio,
+					 instr->ctx.waitrdy.timeout_ms);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int ingenic_nand_exec_op(struct nand_chip *chip,
+				const struct nand_operation *op,
+				bool check_only)
+{
+	struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+	struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+	struct ingenic_nand_cs *cs;
+	unsigned int i;
+	int ret = 0;
+
+	if (check_only)
+		return 0;
+
+	cs = &nfc->cs[op->cs];
+	jz4780_nemc_assert(nfc->dev, cs->bank, true);
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]);
+		if (ret)
+			break;
+
+		if (op->instrs[i].delay_ns)
+			ndelay(op->instrs[i].delay_ns);
+	}
+	jz4780_nemc_assert(nfc->dev, cs->bank, false);
+
+	return ret;
+}
+
 static const struct nand_controller_ops ingenic_nand_controller_ops = {
 	.attach_chip = ingenic_nand_attach_chip,
+	.exec_op = ingenic_nand_exec_op,
 };
 
 static int ingenic_nand_init_chip(struct platform_device *pdev,
@@ -339,10 +375,20 @@
 		ret = PTR_ERR(nand->busy_gpio);
 		dev_err(dev, "failed to request busy GPIO: %d\n", ret);
 		return ret;
-	} else if (nand->busy_gpio) {
-		nand->chip.legacy.dev_ready = ingenic_nand_dev_ready;
 	}
 
+	/*
+	 * The rb-gpios semantics was undocumented and qi,lb60 (along with
+	 * the ingenic driver) got it wrong. The active state encodes the
+	 * NAND ready state, which is high level. Since there's no signal
+	 * inverter on this board, it should be active-high. Let's fix that
+	 * here for older DTs so we can re-use the generic nand_gpio_waitrdy()
+	 * helper, and be consistent with what other drivers do.
+	 */
+	if (of_machine_is_compatible("qi,lb60") &&
+	    gpiod_is_active_low(nand->busy_gpio))
+		gpiod_toggle_active_low(nand->busy_gpio);
+
 	nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
 
 	if (IS_ERR(nand->wp_gpio)) {
@@ -359,13 +405,8 @@
 		return -ENOMEM;
 	mtd->dev.parent = dev;
 
-	chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset;
-	chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset;
-	chip->legacy.chip_delay = RB_DELAY_US;
 	chip->options = NAND_NO_SUBPAGE_WRITE;
-	chip->legacy.select_chip = ingenic_nand_select_chip;
-	chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl;
-	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	chip->controller = &nfc->controller;
 	nand_set_flash_node(chip, np);
 
@@ -387,13 +428,18 @@
 
 static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
 {
-	struct ingenic_nand *chip;
+	struct ingenic_nand *ingenic_chip;
+	struct nand_chip *chip;
+	int ret;
 
 	while (!list_empty(&nfc->chips)) {
-		chip = list_first_entry(&nfc->chips,
-					struct ingenic_nand, chip_list);
-		nand_release(&chip->chip);
-		list_del(&chip->chip_list);
+		ingenic_chip = list_first_entry(&nfc->chips,
+						struct ingenic_nand, chip_list);
+		chip = &ingenic_chip->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+		list_del(&ingenic_chip->chip_list);
 	}
 }
 
@@ -488,7 +534,6 @@
 	.data_offset = 0x00000000,
 	.cmd_offset = 0x00008000,
 	.addr_offset = 0x00010000,
-	.oob_layout = &nand_ooblayout_lp_ops,
 };
 
 static const struct jz_soc_info jz4725b_soc_info = {
@@ -502,7 +547,6 @@
 	.data_offset = 0x00000000,
 	.cmd_offset = 0x00400000,
 	.addr_offset = 0x00800000,
-	.oob_layout = &nand_ooblayout_lp_ops,
 };
 
 static const struct of_device_id ingenic_nand_dt_match[] = {
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
index 6c852ea..2d0e0a2 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -145,10 +145,10 @@
 	switch (size8) {
 	case 3:
 		dest8[2] = (val >> 16) & 0xff;
-		/* fall-through */
+		fallthrough;
 	case 2:
 		dest8[1] = (val >> 8) & 0xff;
-		/* fall-through */
+		fallthrough;
 	case 1:
 		dest8[0] = val & 0xff;
 		break;
diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
index 13fea64..54e3777 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
@@ -90,8 +90,8 @@
 	 * If the written data is completely 0xff, we also want to write 0xff as
 	 * ECC, otherwise we will get in trouble when doing subpage writes.
 	 */
-	if (memcmp(ecc_code, empty_block_ecc, ARRAY_SIZE(empty_block_ecc)) == 0)
-		memset(ecc_code, 0xff, ARRAY_SIZE(empty_block_ecc));
+	if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0)
+		memset(ecc_code, 0xff, sizeof(empty_block_ecc));
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index 079266a..d67dbff 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -123,10 +123,10 @@
 	switch (size8) {
 	case 3:
 		dest8[2] = (val >> 16) & 0xff;
-		/* fall through */
+		fallthrough;
 	case 2:
 		dest8[1] = (val >> 8) & 0xff;
-		/* fall through */
+		fallthrough;
 	case 1:
 		dest8[0] = val & 0xff;
 		break;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index cba6fe7..012876e 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -30,6 +30,7 @@
 #define NAND_MFR_SAMSUNG	0xec
 #define NAND_MFR_SANDISK	0x45
 #define NAND_MFR_STMICRO	0x20
+/* Kioxia is new name of Toshiba memory. */
 #define NAND_MFR_TOSHIBA	0x98
 #define NAND_MFR_WINBOND	0xef
 
@@ -52,12 +53,12 @@
 };
 
 /**
- * struct nand_manufacturer - NAND Flash Manufacturer structure
+ * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor
  * @name: Manufacturer name
  * @id: manufacturer ID code of device.
  * @ops: manufacturer operations
  */
-struct nand_manufacturer {
+struct nand_manufacturer_desc {
 	int id;
 	char *name;
 	const struct nand_manufacturer_ops *ops;
@@ -74,15 +75,25 @@
 extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
 extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
 
+/* MLC pairing schemes */
+extern const struct mtd_pairing_scheme dist3_pairing_scheme;
+
 /* Core functions */
-const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id);
 int nand_bbm_get_next_page(struct nand_chip *chip, int page);
 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
 		    int allowbbt);
-int onfi_fill_data_interface(struct nand_chip *chip,
-			     enum nand_data_interface_type type,
-			     int timing_mode);
+void onfi_fill_interface_config(struct nand_chip *chip,
+				struct nand_interface_config *iface,
+				enum nand_interface_type type,
+				unsigned int timing_mode);
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings);
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+				 struct nand_interface_config *iface,
+				 struct nand_sdr_timings *spec_timings);
+const struct nand_interface_config *nand_get_reset_interface_config(void);
 int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
 int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
@@ -105,6 +116,15 @@
 	return true;
 }
 
+static inline int nand_check_op(struct nand_chip *chip,
+				const struct nand_operation *op)
+{
+	if (!nand_has_exec_op(chip))
+		return 0;
+
+	return chip->controller->ops->exec_op(chip, op, true);
+}
+
 static inline int nand_exec_op(struct nand_chip *chip,
 			       const struct nand_operation *op)
 {
@@ -117,10 +137,10 @@
 	return chip->controller->ops->exec_op(chip, op, false);
 }
 
-static inline bool nand_has_setup_data_iface(struct nand_chip *chip)
+static inline bool nand_controller_can_setup_interface(struct nand_chip *chip)
 {
 	if (!chip->controller || !chip->controller->ops ||
-	    !chip->controller->ops->setup_data_interface)
+	    !chip->controller->ops->setup_interface)
 		return false;
 
 	if (chip->options & NAND_KEEP_TIMINGS)
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 78b31f8..9e728c7 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -648,6 +648,9 @@
 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 	struct device *dev = &host->pdev->dev;
 
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
 	host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
 	if (!host->dma_buf)
 		return -ENOMEM;
@@ -656,8 +659,17 @@
 	if (!host->dummy_buf)
 		return -ENOMEM;
 
-	chip->ecc.mode = NAND_ECC_HW;
 	chip->ecc.size = 512;
+	chip->ecc.hwctl = lpc32xx_ecc_enable;
+	chip->ecc.read_page_raw = lpc32xx_read_page;
+	chip->ecc.read_page = lpc32xx_read_page;
+	chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+	chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+	chip->ecc.write_oob = lpc32xx_write_oob;
+	chip->ecc.read_oob = lpc32xx_read_oob;
+	chip->ecc.strength = 4;
+	chip->ecc.bytes = 10;
+
 	mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 	host->mlcsubpages = mtd->writesize / 512;
 
@@ -741,15 +753,6 @@
 	platform_set_drvdata(pdev, host);
 
 	/* Initialize function pointers */
-	nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
-	nand_chip->ecc.read_page_raw = lpc32xx_read_page;
-	nand_chip->ecc.read_page = lpc32xx_read_page;
-	nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
-	nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
-	nand_chip->ecc.write_oob = lpc32xx_write_oob;
-	nand_chip->ecc.read_oob = lpc32xx_read_oob;
-	nand_chip->ecc.strength = 4;
-	nand_chip->ecc.bytes = 10;
 	nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
 
 	nand_chip->options = NAND_NO_SUBPAGE_WRITE;
@@ -773,7 +776,6 @@
 
 	host->irq = platform_get_irq(pdev, 0);
 	if (host->irq < 0) {
-		dev_err(&pdev->dev, "failed to get platform irq\n");
 		res = -EINVAL;
 		goto release_dma_chan;
 	}
@@ -827,8 +829,13 @@
 static int lpc32xx_nand_remove(struct platform_device *pdev)
 {
 	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &host->nand_chip;
+	int ret;
 
-	nand_release(&host->nand_chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
+
 	free_irq(host->irq, host);
 	if (use_dma)
 		dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 163f976..dc7785e 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -775,6 +775,9 @@
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
 	/* OOB and ECC CPU and DMA work areas */
 	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
 
@@ -786,11 +789,22 @@
 	if (mtd->writesize <= 512)
 		mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 
+	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
 	/* These sizes remain the same regardless of page size */
 	chip->ecc.size = 256;
+	chip->ecc.strength = 1;
 	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
 	chip->ecc.prepad = 0;
 	chip->ecc.postpad = 0;
+	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+	chip->ecc.correct = nand_correct_data;
+	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
 
 	/*
 	 * Use a custom BBT marker setup for small page FLASH that
@@ -881,20 +895,9 @@
 	platform_set_drvdata(pdev, host);
 
 	/* NAND callbacks for LPC32xx SLC hardware */
-	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
 	chip->legacy.read_byte = lpc32xx_nand_read_byte;
 	chip->legacy.read_buf = lpc32xx_nand_read_buf;
 	chip->legacy.write_buf = lpc32xx_nand_write_buf;
-	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
-	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
-	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
-	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
-	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
-	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
-	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
-	chip->ecc.correct = nand_correct_data;
-	chip->ecc.strength = 1;
-	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
 
 	/*
 	 * Allocate a large enough buffer for a single huge page plus
@@ -947,8 +950,12 @@
 {
 	uint32_t tmp;
 	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &host->nand_chip;
+	int ret;
 
-	nand_release(&host->nand_chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	dma_release_channel(host->dma_chan);
 
 	/* Force CE high */
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index ef163ff..d00c916 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -227,6 +227,8 @@
 #define XTYPE_MASK		7
 
 /**
+ * struct marvell_hw_ecc_layout - layout of Marvell ECC
+ *
  * Marvell ECC engine works differently than the others, in order to limit the
  * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
  * per subpage, and depending on a the desired strength needed by the NAND chip,
@@ -292,6 +294,8 @@
 };
 
 /**
+ * struct marvell_nand_chip_sel - CS line description
+ *
  * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
  * is made by a field in NDCB0 register, and in another field in NDCB2 register.
  * The datasheet describes the logic with an error: ADDR5 field is once
@@ -312,14 +316,15 @@
 };
 
 /**
- * NAND chip structure: stores NAND chip device related information
+ * struct marvell_nand_chip - stores NAND chip device related information
  *
  * @chip:		Base NAND chip structure
  * @node:		Used to store NAND chips into a list
- * @layout		NAND layout when using hardware ECC
+ * @layout:		NAND layout when using hardware ECC
  * @ndcr:		Controller register value for this NAND chip
  * @ndtr0:		Timing registers 0 value for this NAND chip
  * @ndtr1:		Timing registers 1 value for this NAND chip
+ * @addr_cyc:		Amount of cycles needed to pass column address
  * @selected_die:	Current active CS
  * @nsels:		Number of CS lines required by the NAND chip
  * @sels:		Array of CS lines descriptions
@@ -334,7 +339,7 @@
 	int addr_cyc;
 	int selected_die;
 	unsigned int nsels;
-	struct marvell_nand_chip_sel sels[0];
+	struct marvell_nand_chip_sel sels[];
 };
 
 static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
@@ -349,7 +354,8 @@
 }
 
 /**
- * NAND controller capabilities for distinction between compatible strings
+ * struct marvell_nfc_caps - NAND controller capabilities for distinction
+ *                           between compatible strings
  *
  * @max_cs_nb:		Number of Chip Select lines available
  * @max_rb_nb:		Number of Ready/Busy lines available
@@ -372,7 +378,7 @@
 };
 
 /**
- * NAND controller structure: stores Marvell NAND controller information
+ * struct marvell_nfc - stores Marvell NAND controller information
  *
  * @controller:		Base controller structure
  * @dev:		Parent device (used to print error messages)
@@ -383,7 +389,9 @@
  * @assigned_cs:	Bitmask describing already assigned CS lines
  * @chips:		List containing all the NAND chips attached to
  *			this NAND controller
+ * @selected_chip:	Currently selected target chip
  * @caps:		NAND controller capabilities for each compatible string
+ * @use_dma:		Whetner DMA is used
  * @dma_chan:		DMA channel (NFCv1 only)
  * @dma_buf:		32-bit aligned buffer for DMA transfers (NFCv1 only)
  */
@@ -411,7 +419,8 @@
 }
 
 /**
- * NAND controller timings expressed in NAND Controller clock cycles
+ * struct marvell_nfc_timings - NAND controller timings expressed in NAND
+ *                              Controller clock cycles
  *
  * @tRP:		ND_nRE pulse width
  * @tRH:		ND_nRE high duration
@@ -455,8 +464,8 @@
 						     period_ns))
 
 /**
- * NAND driver structure filled during the parsing of the ->exec_op() subop
- * subset of instructions.
+ * struct marvell_nfc_op - filled during the parsing of the ->exec_op()
+ *                         subop subset of instructions.
  *
  * @ndcb:		Array of values written to NDCBx registers
  * @cle_ale_delay_ns:	Optional delay after the last CMD or ADDR cycle
@@ -685,9 +694,31 @@
 	return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
 }
 
+static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask,
+				   u32 expected_val, unsigned long timeout_ms)
+{
+	unsigned long limit;
+	u32 st;
+
+	limit = jiffies + msecs_to_jiffies(timeout_ms);
+	do {
+		st = readl_relaxed(nfc->regs + NDSR);
+		if (st & NDSR_RDY(1))
+			st |= NDSR_RDY(0);
+
+		if ((st & mask) == expected_val)
+			return 0;
+
+		cpu_relax();
+	} while (time_after(limit, jiffies));
+
+	return -ETIMEDOUT;
+}
+
 static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
 {
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+	struct mtd_info *mtd = nand_to_mtd(chip);
 	u32 pending;
 	int ret;
 
@@ -695,12 +726,18 @@
 	if (!timeout_ms)
 		timeout_ms = IRQ_TIMEOUT;
 
-	init_completion(&nfc->complete);
+	if (mtd->oops_panic_write) {
+		ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0),
+					      NDSR_RDY(0),
+					      timeout_ms);
+	} else {
+		init_completion(&nfc->complete);
 
-	marvell_nfc_enable_int(nfc, NDCR_RDYM);
-	ret = wait_for_completion_timeout(&nfc->complete,
-					  msecs_to_jiffies(timeout_ms));
-	marvell_nfc_disable_int(nfc, NDCR_RDYM);
+		marvell_nfc_enable_int(nfc, NDCR_RDYM);
+		ret = wait_for_completion_timeout(&nfc->complete,
+						  msecs_to_jiffies(timeout_ms));
+		marvell_nfc_disable_int(nfc, NDCR_RDYM);
+	}
 	pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
 
 	/*
@@ -780,7 +817,7 @@
 		 * When enabling BCH, set threshold to 0 to always know the
 		 * number of corrected bitflips.
 		 */
-		if (chip->ecc.algo == NAND_ECC_BCH)
+		if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
 			writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
 	}
 }
@@ -792,7 +829,7 @@
 
 	if (ndcr & NDCR_ECC_EN) {
 		writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
-		if (chip->ecc.algo == NAND_ECC_BCH)
+		if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
 			writel_relaxed(0, nfc->regs + NDECCCTRL);
 	}
 }
@@ -932,14 +969,14 @@
 }
 
 /*
- * Check a chunk is correct or not according to hardware ECC engine.
+ * Check if a chunk is correct or not according to the hardware ECC engine.
  * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
  * mtd->ecc_stats.failure is not, the function will instead return a non-zero
  * value indicating that a check on the emptyness of the subpage must be
- * performed before declaring the subpage corrupted.
+ * performed before actually declaring the subpage as "corrupted".
  */
-static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
-				      unsigned int *max_bitflips)
+static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
+					     unsigned int *max_bitflips)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -966,7 +1003,7 @@
 	if (ndsr & NDSR_CORERR) {
 		writel_relaxed(ndsr, nfc->regs + NDSR);
 
-		if (chip->ecc.algo == NAND_ECC_BCH)
+		if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
 			bf = NDSR_ERRCNT(ndsr);
 		else
 			bf = 1;
@@ -1053,7 +1090,7 @@
 	marvell_nfc_enable_hw_ecc(chip);
 	marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
 					    page);
-	ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+	ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
 	marvell_nfc_disable_hw_ecc(chip);
 
 	if (!ret)
@@ -1096,6 +1133,8 @@
 						const u8 *oob_buf, bool raw,
 						int page)
 {
+	const struct nand_sdr_timings *sdr =
+		nand_get_sdr_timings(nand_get_interface_config(chip));
 	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
 	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
@@ -1141,7 +1180,7 @@
 		return ret;
 
 	ret = marvell_nfc_wait_op(chip,
-				  PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+				  PSEC_TO_MSEC(sdr->tPROG_max));
 	return ret;
 }
 
@@ -1224,12 +1263,12 @@
 
 		/* Read spare bytes */
 		nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
-				  spare_len, false);
+				  spare_len, false, false);
 
 		/* Read ECC bytes */
 		nand_read_data_op(chip, oob + ecc_offset +
 				  (ALIGN(lt->ecc_bytes, 32) * chunk),
-				  ecc_len, false);
+				  ecc_len, false, false);
 	}
 
 	return 0;
@@ -1336,7 +1375,7 @@
 		/* Read the chunk and detect number of bitflips */
 		marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
 						  spare, spare_len, page);
-		ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+		ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
 		if (ret)
 			failure_mask |= BIT(chunk);
 
@@ -1358,10 +1397,9 @@
 	 */
 
 	/*
-	 * In case there is any subpage read error reported by ->correct(), we
-	 * usually re-read only ECC bytes in raw mode and check if the whole
-	 * page is empty. In this case, it is normal that the ECC check failed
-	 * and we just ignore the error.
+	 * In case there is any subpage read error, we usually re-read only ECC
+	 * bytes in raw mode and check if the whole page is empty. In this case,
+	 * it is normal that the ECC check failed and we just ignore the error.
 	 *
 	 * However, it has been empirically observed that for some layouts (e.g
 	 * 2k page, 8b strength per 512B chunk), the controller tries to correct
@@ -1563,6 +1601,8 @@
 					     const u8 *buf,
 					     int oob_required, int page)
 {
+	const struct nand_sdr_timings *sdr =
+		nand_get_sdr_timings(nand_get_interface_config(chip));
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
 	const u8 *data = buf;
@@ -1599,8 +1639,7 @@
 		marvell_nfc_wait_ndrun(chip);
 	}
 
-	ret = marvell_nfc_wait_op(chip,
-				  PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+	ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
 
 	marvell_nfc_disable_hw_ecc(chip);
 
@@ -2107,7 +2146,8 @@
 {
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
 
-	marvell_nfc_select_target(chip, op->cs);
+	if (!check_only)
+		marvell_nfc_select_target(chip, op->cs);
 
 	if (nfc->caps->is_nfcv2)
 		return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
@@ -2166,8 +2206,8 @@
 	.free = marvell_nand_ooblayout_free,
 };
 
-static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
-					 struct nand_ecc_ctrl *ecc)
+static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
+					       struct nand_ecc_ctrl *ecc)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -2215,7 +2255,7 @@
 	ecc->size = l->data_bytes;
 
 	if (ecc->strength == 1) {
-		chip->ecc.algo = NAND_ECC_HAMMING;
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
 		ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
 		ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
 		ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
@@ -2225,7 +2265,7 @@
 		ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
 		ecc->write_oob = ecc->write_oob_raw;
 	} else {
-		chip->ecc.algo = NAND_ECC_BCH;
+		chip->ecc.algo = NAND_ECC_ALGO_BCH;
 		ecc->strength = 16;
 		ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
 		ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
@@ -2244,13 +2284,16 @@
 				 struct nand_ecc_ctrl *ecc)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
 	int ret;
 
-	if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
-		if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
-			ecc->size = chip->base.eccreq.step_size;
-			ecc->strength = chip->base.eccreq.strength;
+	if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+	    (!ecc->size || !ecc->strength)) {
+		if (requirements->step_size && requirements->strength) {
+			ecc->size = requirements->step_size;
+			ecc->strength = requirements->strength;
 		} else {
 			dev_info(nfc->dev,
 				 "No minimum ECC strength, using 1b/512B\n");
@@ -2259,15 +2302,15 @@
 		}
 	}
 
-	switch (ecc->mode) {
-	case NAND_ECC_HW:
-		ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+	switch (ecc->engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
+		ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
 		if (ret)
 			return ret;
 		break;
-	case NAND_ECC_NONE:
-	case NAND_ECC_SOFT:
-	case NAND_ECC_ON_DIE:
+	case NAND_ECC_ENGINE_TYPE_NONE:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
+	case NAND_ECC_ENGINE_TYPE_ON_DIE:
 		if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
 		    mtd->writesize != SZ_2K) {
 			dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
@@ -2305,9 +2348,8 @@
 	.pattern = bbt_mirror_pattern
 };
 
-static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
-					    const struct nand_data_interface
-					    *conf)
+static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+				       const struct nand_interface_config *conf)
 {
 	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -2465,7 +2507,7 @@
 		return ret;
 	}
 
-	if (chip->ecc.mode == NAND_ECC_HW) {
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
 		/*
 		 * Subpage write not available with hardware ECC, prohibit also
 		 * subpage read as in userspace subpage access would still be
@@ -2508,7 +2550,7 @@
 static const struct nand_controller_ops marvell_nand_controller_ops = {
 	.attach_chip = marvell_nand_attach_chip,
 	.exec_op = marvell_nfc_exec_op,
-	.setup_data_interface = marvell_nfc_setup_data_interface,
+	.setup_interface = marvell_nfc_setup_interface,
 };
 
 static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
@@ -2640,11 +2682,11 @@
 	 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
 	 * in the DT node, this entry will be overwritten in nand_scan_ident().
 	 */
-	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 
 	/*
 	 * Save a reference value for timing registers before
-	 * ->setup_data_interface() is called.
+	 * ->setup_interface() is called.
 	 */
 	marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
 	marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
@@ -2676,9 +2718,14 @@
 static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
 {
 	struct marvell_nand_chip *entry, *temp;
+	struct nand_chip *chip;
+	int ret;
 
 	list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
-		nand_release(&entry->chip);
+		chip = &entry->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
 		list_del(&entry->node);
 	}
 }
@@ -2748,16 +2795,18 @@
 	if (ret)
 		return ret;
 
-	nfc->dma_chan =	dma_request_slave_channel(nfc->dev, "data");
-	if (!nfc->dma_chan) {
-		dev_err(nfc->dev,
-			"Unable to request data DMA channel\n");
-		return -ENODEV;
+	nfc->dma_chan =	dma_request_chan(nfc->dev, "data");
+	if (IS_ERR(nfc->dma_chan)) {
+		ret = PTR_ERR(nfc->dma_chan);
+		nfc->dma_chan = NULL;
+		return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n");
 	}
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!r)
-		return -ENXIO;
+	if (!r) {
+		ret = -ENXIO;
+		goto release_channel;
+	}
 
 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -2768,7 +2817,7 @@
 	ret = dmaengine_slave_config(nfc->dma_chan, &config);
 	if (ret < 0) {
 		dev_err(nfc->dev, "Failed to configure DMA channel\n");
-		return ret;
+		goto release_channel;
 	}
 
 	/*
@@ -2778,12 +2827,20 @@
 	 * the provided buffer.
 	 */
 	nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
-	if (!nfc->dma_buf)
-		return -ENOMEM;
+	if (!nfc->dma_buf) {
+		ret = -ENOMEM;
+		goto release_channel;
+	}
 
 	nfc->use_dma = true;
 
 	return 0;
+
+release_channel:
+	dma_release_channel(nfc->dma_chan);
+	nfc->dma_chan = NULL;
+
+	return ret;
 }
 
 static void marvell_nfc_reset(struct marvell_nfc *nfc)
@@ -2846,7 +2903,6 @@
 static int marvell_nfc_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct resource *r;
 	struct marvell_nfc *nfc;
 	int ret;
 	int irq;
@@ -2861,16 +2917,13 @@
 	nfc->controller.ops = &marvell_nand_controller_ops;
 	INIT_LIST_HEAD(&nfc->chips);
 
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nfc->regs = devm_ioremap_resource(dev, r);
+	nfc->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(nfc->regs))
 		return PTR_ERR(nfc->regs);
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "failed to retrieve irq\n");
+	if (irq < 0)
 		return irq;
-	}
 
 	nfc->core_clk = devm_clk_get(&pdev->dev, "core");
 
@@ -2927,10 +2980,13 @@
 
 	ret = marvell_nand_chips_init(dev, nfc);
 	if (ret)
-		goto unprepare_reg_clk;
+		goto release_dma;
 
 	return 0;
 
+release_dma:
+	if (nfc->use_dma)
+		dma_release_channel(nfc->dma_chan);
 unprepare_reg_clk:
 	clk_disable_unprepare(nfc->reg_clk);
 unprepare_core_clk:
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index ab7ab6a..817bddc 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -118,7 +118,7 @@
 	u8 *data_buf;
 	__le64 *info_buf;
 	u32 nsels;
-	u8 sels[0];
+	u8 sels[];
 };
 
 struct meson_nand_ecc {
@@ -573,10 +573,10 @@
 static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
 						int page, bool in)
 {
+	const struct nand_sdr_timings *sdr =
+		nand_get_sdr_timings(nand_get_interface_config(nand));
 	struct mtd_info *mtd = nand_to_mtd(nand);
 	struct meson_nfc *nfc = nand_get_controller_data(nand);
-	const struct nand_sdr_timings *sdr =
-		nand_get_sdr_timings(&nand->data_interface);
 	u32 *addrs = nfc->cmdfifo.rw.addrs;
 	u32 cs = nfc->param.chip_select;
 	u32 cmd0, cmd_num, row_start;
@@ -626,9 +626,9 @@
 static int meson_nfc_write_page_sub(struct nand_chip *nand,
 				    int page, int raw)
 {
-	struct mtd_info *mtd = nand_to_mtd(nand);
 	const struct nand_sdr_timings *sdr =
-		nand_get_sdr_timings(&nand->data_interface);
+		nand_get_sdr_timings(nand_get_interface_config(nand));
+	struct mtd_info *mtd = nand_to_mtd(nand);
 	struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
 	struct meson_nfc *nfc = nand_get_controller_data(nand);
 	int data_len, info_len;
@@ -899,6 +899,9 @@
 	u32 op_id, delay_idle, cmd;
 	int i;
 
+	if (check_only)
+		return 0;
+
 	meson_nfc_select_chip(nand, op->cs);
 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
 		instr = &op->instrs[op_id];
@@ -1097,8 +1100,8 @@
 }
 
 static
-int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline,
-				   const struct nand_data_interface *conf)
+int meson_nfc_setup_interface(struct nand_chip *nand, int csline,
+			      const struct nand_interface_config *conf)
 {
 	struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
 	const struct nand_sdr_timings *timings;
@@ -1197,7 +1200,7 @@
 	if (ret)
 		return -EINVAL;
 
-	nand->ecc.mode = NAND_ECC_HW;
+	nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	nand->ecc.write_page_raw = meson_nfc_write_page_raw;
 	nand->ecc.write_page = meson_nfc_write_page_hwecc;
 	nand->ecc.write_oob_raw = nand_write_oob_std;
@@ -1222,7 +1225,7 @@
 static const struct nand_controller_ops meson_nand_controller_ops = {
 	.attach_chip = meson_nand_attach_chip,
 	.detach_chip = meson_nand_detach_chip,
-	.setup_data_interface = meson_nfc_setup_data_interface,
+	.setup_interface = meson_nfc_setup_interface,
 	.exec_op = meson_nfc_exec_op,
 };
 
@@ -1269,7 +1272,7 @@
 	nand_set_flash_node(nand, np);
 	nand_set_controller_data(nand, nfc);
 
-	nand->options |= NAND_USE_BOUNCE_BUFFER;
+	nand->options |= NAND_USES_DMA;
 	mtd = nand_to_mtd(nand);
 	mtd->owner = THIS_MODULE;
 	mtd->dev.parent = dev;
@@ -1402,10 +1405,8 @@
 	}
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "no NFC IRQ resource\n");
+	if (irq < 0)
 		return -EINVAL;
-	}
 
 	ret = meson_nfc_clk_init(nfc);
 	if (ret) {
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 8b90def..5b9271b 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -104,6 +104,7 @@
 #define NFC_TIMEOUT		(HZ / 10)	/* 1/10 s */
 
 struct mpc5121_nfc_prv {
+	struct nand_controller	controller;
 	struct nand_chip	chip;
 	int			irq;
 	void __iomem		*regs;
@@ -290,7 +291,6 @@
 /* Control chips select signal on ADS5121 board */
 static void ads5121_select_chip(struct nand_chip *nand, int chip)
 {
-	struct mtd_info *mtd = nand_to_mtd(nand);
 	struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
 	u8 v;
 
@@ -438,7 +438,7 @@
 		buffer += blksize;
 		offset += blksize;
 		size -= blksize;
-	};
+	}
 }
 
 /* Copy data from/to NFC main and spare buffers */
@@ -602,6 +602,19 @@
 		iounmap(prv->csreg);
 }
 
+static int mpc5121_nfc_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops mpc5121_nfc_ops = {
+	.attach_chip = mpc5121_nfc_attach_chip,
+};
+
 static int mpc5121_nfc_probe(struct platform_device *op)
 {
 	struct device_node *dn = op->dev.of_node;
@@ -634,6 +647,10 @@
 	chip = &prv->chip;
 	mtd = nand_to_mtd(chip);
 
+	nand_controller_init(&prv->controller);
+	prv->controller.ops = &mpc5121_nfc_ops;
+	chip->controller = &prv->controller;
+
 	mtd->dev.parent = dev;
 	nand_set_controller_data(chip, prv);
 	nand_set_flash_node(chip, dn);
@@ -688,8 +705,6 @@
 	chip->legacy.set_features = nand_get_set_features_notsupp;
 	chip->legacy.get_features = nand_get_set_features_notsupp;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
-	chip->ecc.mode = NAND_ECC_SOFT;
-	chip->ecc.algo = NAND_ECC_HAMMING;
 
 	/* Support external chip-select logic on ADS5121 board */
 	if (of_machine_is_compatible("fsl,mpc5121ads")) {
@@ -755,6 +770,13 @@
 		goto error;
 	}
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	/* Detect NAND chips */
 	retval = nand_scan(chip, be32_to_cpup(chips_no));
 	if (retval) {
@@ -805,8 +827,11 @@
 {
 	struct device *dev = &op->dev;
 	struct mtd_info *mtd = dev_get_drvdata(dev);
+	int ret;
 
-	nand_release(mtd_to_nand(mtd));
+	ret = mtd_device_unregister(mtd);
+	WARN_ON(ret);
+	nand_cleanup(mtd_to_nand(mtd));
 	mpc5121_nfc_free(dev, mtd);
 
 	return 0;
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 74595b6..75f1fa3 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -527,10 +527,8 @@
 	}
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "failed to get irq: %d\n", irq);
+	if (irq < 0)
 		return irq;
-	}
 
 	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
 	if (ret) {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 08008c8..5c5c921 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -131,7 +131,7 @@
 	u32 spare_per_sector;
 
 	int nsels;
-	u8 sels[0];
+	u8 sels[];
 	/* nothing after this field */
 };
 
@@ -387,44 +387,6 @@
 	return 0;
 }
 
-static void mtk_nfc_select_chip(struct nand_chip *nand, int chip)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(nand);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
-
-	if (chip < 0)
-		return;
-
-	mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
-
-	nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
-}
-
-static int mtk_nfc_dev_ready(struct nand_chip *nand)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(nand);
-
-	if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
-		return 0;
-
-	return 1;
-}
-
-static void mtk_nfc_cmd_ctrl(struct nand_chip *chip, int dat,
-			     unsigned int ctrl)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-
-	if (ctrl & NAND_ALE) {
-		mtk_nfc_send_address(nfc, dat);
-	} else if (ctrl & NAND_CLE) {
-		mtk_nfc_hw_reset(nfc);
-
-		nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
-		mtk_nfc_send_command(nfc, dat);
-	}
-}
-
 static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
 {
 	int rc;
@@ -501,8 +463,76 @@
 		mtk_nfc_write_byte(chip, buf[i]);
 }
 
-static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
-					const struct nand_data_interface *conf)
+static int mtk_nfc_exec_instr(struct nand_chip *chip,
+			      const struct nand_op_instr *instr)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	unsigned int i;
+	u32 status;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode);
+		return 0;
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++)
+			mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]);
+		return 0;
+	case NAND_OP_DATA_IN_INSTR:
+		mtk_nfc_read_buf(chip, instr->ctx.data.buf.in,
+				 instr->ctx.data.len);
+		return 0;
+	case NAND_OP_DATA_OUT_INSTR:
+		mtk_nfc_write_buf(chip, instr->ctx.data.buf.out,
+				  instr->ctx.data.len);
+		return 0;
+	case NAND_OP_WAITRDY_INSTR:
+		return readl_poll_timeout(nfc->regs + NFI_STA, status,
+					  !(status & STA_BUSY), 20,
+					  instr->ctx.waitrdy.timeout_ms * 1000);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(nand);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+	mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
+
+	nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL);
+}
+
+static int mtk_nfc_exec_op(struct nand_chip *chip,
+			   const struct nand_operation *op,
+			   bool check_only)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	unsigned int i;
+	int ret = 0;
+
+	if (check_only)
+		return 0;
+
+	mtk_nfc_hw_reset(nfc);
+	nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+	mtk_nfc_select_target(chip, op->cs);
+
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = mtk_nfc_exec_instr(chip, &op->instrs[i]);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline,
+				   const struct nand_interface_config *conf)
 {
 	struct mtk_nfc *nfc = nand_get_controller_data(chip);
 	const struct nand_sdr_timings *timings;
@@ -803,6 +833,7 @@
 	u32 reg;
 	int ret;
 
+	mtk_nfc_select_target(chip, chip->cur_cs);
 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 
 	if (!raw) {
@@ -920,6 +951,7 @@
 	u8 *buf;
 	int rc;
 
+	mtk_nfc_select_target(chip, chip->cur_cs);
 	start = data_offs / chip->ecc.size;
 	end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
 
@@ -1221,21 +1253,23 @@
 static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
 {
 	struct nand_chip *nand = mtd_to_nand(mtd);
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&nand->base);
 	struct mtk_nfc *nfc = nand_get_controller_data(nand);
 	u32 spare;
 	int free, ret;
 
 	/* support only ecc hw mode */
-	if (nand->ecc.mode != NAND_ECC_HW) {
-		dev_err(dev, "ecc.mode not supported\n");
+	if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+		dev_err(dev, "ecc.engine_type not supported\n");
 		return -EINVAL;
 	}
 
 	/* if optional dt settings not present */
 	if (!nand->ecc.size || !nand->ecc.strength) {
 		/* use datasheet requirements */
-		nand->ecc.strength = nand->base.eccreq.strength;
-		nand->ecc.size = nand->base.eccreq.step_size;
+		nand->ecc.strength = requirements->strength;
+		nand->ecc.size = requirements->step_size;
 
 		/*
 		 * align eccstrength and eccsize
@@ -1325,7 +1359,8 @@
 
 static const struct nand_controller_ops mtk_nfc_controller_ops = {
 	.attach_chip = mtk_nfc_attach_chip,
-	.setup_data_interface = mtk_nfc_setup_data_interface,
+	.setup_interface = mtk_nfc_setup_interface,
+	.exec_op = mtk_nfc_exec_op,
 };
 
 static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
@@ -1380,17 +1415,10 @@
 	nand_set_flash_node(nand, np);
 	nand_set_controller_data(nand, nfc);
 
-	nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
-	nand->legacy.dev_ready = mtk_nfc_dev_ready;
-	nand->legacy.select_chip = mtk_nfc_select_chip;
-	nand->legacy.write_byte = mtk_nfc_write_byte;
-	nand->legacy.write_buf = mtk_nfc_write_buf;
-	nand->legacy.read_byte = mtk_nfc_read_byte;
-	nand->legacy.read_buf = mtk_nfc_read_buf;
-	nand->legacy.cmd_ctrl = mtk_nfc_cmd_ctrl;
+	nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
 
 	/* set default mode in case dt entry is missing */
-	nand->ecc.mode = NAND_ECC_HW;
+	nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 
 	nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
 	nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
@@ -1540,7 +1568,6 @@
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
-		dev_err(dev, "no nfi irq resource\n");
 		ret = -EINVAL;
 		goto clk_disable;
 	}
@@ -1579,13 +1606,18 @@
 static int mtk_nfc_remove(struct platform_device *pdev)
 {
 	struct mtk_nfc *nfc = platform_get_drvdata(pdev);
-	struct mtk_nfc_nand_chip *chip;
+	struct mtk_nfc_nand_chip *mtk_chip;
+	struct nand_chip *chip;
+	int ret;
 
 	while (!list_empty(&nfc->chips)) {
-		chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
-					node);
-		nand_release(&chip->nand);
-		list_del(&chip->node);
+		mtk_chip = list_first_entry(&nfc->chips,
+					    struct mtk_nfc_nand_chip, node);
+		chip = &mtk_chip->nand;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+		list_del(&mtk_chip->node);
 	}
 
 	mtk_ecc_release(nfc->ecc);
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 59554c1..684c51e 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -137,8 +137,8 @@
 	u32 (*get_ecc_status)(struct mxc_nand_host *);
 	const struct mtd_ooblayout_ops *ooblayout;
 	void (*select_chip)(struct nand_chip *chip, int cs);
-	int (*setup_data_interface)(struct nand_chip *chip, int csline,
-				    const struct nand_data_interface *conf);
+	int (*setup_interface)(struct nand_chip *chip, int csline,
+			       const struct nand_interface_config *conf);
 	void (*enable_hwecc)(struct nand_chip *chip, bool enable);
 
 	/*
@@ -669,7 +669,7 @@
 	struct mxc_nand_host *host = nand_get_controller_data(chip);
 	uint16_t config1;
 
-	if (chip->ecc.mode != NAND_ECC_HW)
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 		return;
 
 	config1 = readw(NFC_V1_V2_CONFIG1);
@@ -687,7 +687,7 @@
 	struct mxc_nand_host *host = nand_get_controller_data(chip);
 	uint32_t config2;
 
-	if (chip->ecc.mode != NAND_ECC_HW)
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 		return;
 
 	config2 = readl(NFC_V3_CONFIG2);
@@ -1117,7 +1117,8 @@
 	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
 	uint16_t config1 = 0;
 
-	if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
+	if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+	    mtd->writesize)
 		config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
 
 	if (!host->devtype_data->irqpending_quirk)
@@ -1139,8 +1140,8 @@
 	writew(0x4, NFC_V1_V2_WRPROT);
 }
 
-static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline,
-					const struct nand_data_interface *conf)
+static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline,
+				       const struct nand_interface_config *conf)
 {
 	struct mxc_nand_host *host = nand_get_controller_data(chip);
 	int tRC_min_ns, tRC_ps, ret;
@@ -1227,7 +1228,7 @@
 	if (mtd->writesize) {
 		uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
 
-		if (nand_chip->ecc.mode == NAND_ECC_HW)
+		if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
 			config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
 
 		host->eccsize = get_eccsize(mtd);
@@ -1303,7 +1304,7 @@
 	}
 
 	if (mtd->writesize) {
-		if (chip->ecc.mode == NAND_ECC_HW)
+		if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
 			config2 |= NFC_V3_CONFIG2_ECC_EN;
 
 		config2 |= NFC_V3_CONFIG2_PPB(
@@ -1432,7 +1433,7 @@
 }
 
 /*
- * The generic flash bbt decriptors overlap with our ecc
+ * The generic flash bbt descriptors overlap with our ecc
  * hardware, so define some i.MX specific ones.
  */
 static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
@@ -1521,7 +1522,7 @@
 	.get_ecc_status = get_ecc_status_v2,
 	.ooblayout = &mxc_v2_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v2,
-	.setup_data_interface = mxc_nand_v2_setup_data_interface,
+	.setup_interface = mxc_nand_v2_setup_interface,
 	.enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
 	.irqpending_quirk = 0,
 	.needs_ip = 0,
@@ -1680,8 +1681,13 @@
 	struct mxc_nand_host *host = nand_get_controller_data(chip);
 	struct device *dev = mtd->dev.parent;
 
-	switch (chip->ecc.mode) {
-	case NAND_ECC_HW:
+	chip->ecc.bytes = host->devtype_data->eccbytes;
+	host->eccsize = host->devtype_data->eccsize;
+	chip->ecc.size = 512;
+	mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
+
+	switch (chip->ecc.engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		chip->ecc.read_page = mxc_nand_read_page;
 		chip->ecc.read_page_raw = mxc_nand_read_page_raw;
 		chip->ecc.read_oob = mxc_nand_read_oob;
@@ -1690,7 +1696,7 @@
 		chip->ecc.write_oob = mxc_nand_write_oob;
 		break;
 
-	case NAND_ECC_SOFT:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		break;
 
 	default:
@@ -1728,7 +1734,7 @@
 	 */
 	host->used_oobsize = min(mtd->oobsize, 218U);
 
-	if (chip->ecc.mode == NAND_ECC_HW) {
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
 		if (is_imx21_nfc(host) || is_imx27_nfc(host))
 			chip->ecc.strength = 1;
 		else
@@ -1738,17 +1744,17 @@
 	return 0;
 }
 
-static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr,
-				      const struct nand_data_interface *conf)
+static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
+				 const struct nand_interface_config *conf)
 {
 	struct mxc_nand_host *host = nand_get_controller_data(chip);
 
-	return host->devtype_data->setup_data_interface(chip, chipnr, conf);
+	return host->devtype_data->setup_interface(chip, chipnr, conf);
 }
 
 static const struct nand_controller_ops mxcnd_controller_ops = {
 	.attach_chip = mxcnd_attach_chip,
-	.setup_data_interface = mxcnd_setup_data_interface,
+	.setup_interface = mxcnd_setup_interface,
 };
 
 static int mxcnd_probe(struct platform_device *pdev)
@@ -1809,7 +1815,7 @@
 	if (err < 0)
 		return err;
 
-	if (!host->devtype_data->setup_data_interface)
+	if (!host->devtype_data->setup_interface)
 		this->options |= NAND_KEEP_TIMINGS;
 
 	if (host->devtype_data->needs_ip) {
@@ -1835,19 +1841,7 @@
 	if (host->devtype_data->axi_offset)
 		host->regs_axi = host->base + host->devtype_data->axi_offset;
 
-	this->ecc.bytes = host->devtype_data->eccbytes;
-	host->eccsize = host->devtype_data->eccsize;
-
 	this->legacy.select_chip = host->devtype_data->select_chip;
-	this->ecc.size = 512;
-	mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
-
-	if (host->pdata.hw_ecc) {
-		this->ecc.mode = NAND_ECC_HW;
-	} else {
-		this->ecc.mode = NAND_ECC_SOFT;
-		this->ecc.algo = NAND_ECC_HAMMING;
-	}
 
 	/* NAND bus width determines access functions used by upper layer */
 	if (host->pdata.width == 2)
@@ -1919,8 +1913,12 @@
 static int mxcnd_remove(struct platform_device *pdev)
 {
 	struct mxc_nand_host *host = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &host->nand;
+	int ret;
 
-	nand_release(&host->nand);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	if (host->clk_act)
 		clk_disable_unprepare(host->clk);
 
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index 9d49e6c..d66b5b0 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -393,6 +393,9 @@
 	int ret = 0;
 	unsigned int op_id;
 
+	if (check_only)
+		return 0;
+
 	mxic_nfc_cs_enable(nfc);
 	init_completion(&nfc->complete);
 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -448,8 +451,8 @@
 	return ret;
 }
 
-static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
-					 const struct nand_data_interface *conf)
+static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+				    const struct nand_interface_config *conf)
 {
 	struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
 	const struct nand_sdr_timings *sdr;
@@ -477,7 +480,7 @@
 
 static const struct nand_controller_ops mxic_nand_controller_ops = {
 	.exec_op = mxic_nfc_exec_op,
-	.setup_data_interface = mxic_nfc_setup_data_interface,
+	.setup_interface = mxic_nfc_setup_interface,
 };
 
 static int mxic_nfc_probe(struct platform_device *pdev)
@@ -524,10 +527,8 @@
 	nand_chip->controller = &nfc->controller;
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "failed to retrieve irq\n");
+	if (irq < 0)
 		return irq;
-	}
 
 	mxic_nfc_hw_init(nfc);
 
@@ -555,8 +556,13 @@
 static int mxic_nfc_remove(struct platform_device *pdev)
 {
 	struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &nfc->chip;
+	int ret;
 
-	nand_release(&nfc->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
+
 	mxic_nfc_clk_disable(nfc);
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index db66c1b..1f0d542 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -34,6 +34,7 @@
 #include <linux/mm.h>
 #include <linux/types.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/nand_bch.h>
 #include <linux/interrupt.h>
@@ -45,164 +46,54 @@
 
 #include "internals.h"
 
-/* Define default oob placement schemes for large and small page devices */
-static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
+static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
+				       struct mtd_pairing_info *info)
 {
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int lastpage = (mtd->erasesize / mtd->writesize) - 1;
+	int dist = 3;
 
-	if (section > 1)
-		return -ERANGE;
+	if (page == lastpage)
+		dist = 2;
 
-	if (!section) {
-		oobregion->offset = 0;
-		if (mtd->oobsize == 16)
-			oobregion->length = 4;
-		else
-			oobregion->length = 3;
+	if (!page || (page & 1)) {
+		info->group = 0;
+		info->pair = (page + 1) / 2;
 	} else {
-		if (mtd->oobsize == 8)
-			return -ERANGE;
-
-		oobregion->offset = 6;
-		oobregion->length = ecc->total - 4;
+		info->group = 1;
+		info->pair = (page + 1 - dist) / 2;
 	}
 
 	return 0;
 }
 
-static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
+static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
+					const struct mtd_pairing_info *info)
 {
-	if (section > 1)
-		return -ERANGE;
+	int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
+	int page = info->pair * 2;
+	int dist = 3;
 
-	if (mtd->oobsize == 16) {
-		if (section)
-			return -ERANGE;
+	if (!info->group && !info->pair)
+		return 0;
 
-		oobregion->length = 8;
-		oobregion->offset = 8;
-	} else {
-		oobregion->length = 2;
-		if (!section)
-			oobregion->offset = 3;
-		else
-			oobregion->offset = 6;
-	}
+	if (info->pair == lastpair && info->group)
+		dist = 2;
 
-	return 0;
-}
+	if (!info->group)
+		page--;
+	else if (info->pair)
+		page += dist - 1;
 
-const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
-	.ecc = nand_ooblayout_ecc_sp,
-	.free = nand_ooblayout_free_sp,
-};
-EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
-
-static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section || !ecc->total)
-		return -ERANGE;
-
-	oobregion->length = ecc->total;
-	oobregion->offset = mtd->oobsize - oobregion->length;
-
-	return 0;
-}
-
-static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = mtd->oobsize - ecc->total - 2;
-	oobregion->offset = 2;
-
-	return 0;
-}
-
-const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
-	.ecc = nand_ooblayout_ecc_lp,
-	.free = nand_ooblayout_free_lp,
-};
-EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
-
-/*
- * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
- * are placed at a fixed offset.
- */
-static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
-					 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section)
-		return -ERANGE;
-
-	switch (mtd->oobsize) {
-	case 64:
-		oobregion->offset = 40;
-		break;
-	case 128:
-		oobregion->offset = 80;
-		break;
-	default:
+	if (page >= mtd->erasesize / mtd->writesize)
 		return -EINVAL;
-	}
 
-	oobregion->length = ecc->total;
-	if (oobregion->offset + oobregion->length > mtd->oobsize)
-		return -ERANGE;
-
-	return 0;
+	return page;
 }
 
-static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
-					  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int ecc_offset = 0;
-
-	if (section < 0 || section > 1)
-		return -ERANGE;
-
-	switch (mtd->oobsize) {
-	case 64:
-		ecc_offset = 40;
-		break;
-	case 128:
-		ecc_offset = 80;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (section == 0) {
-		oobregion->offset = 2;
-		oobregion->length = ecc_offset - 2;
-	} else {
-		oobregion->offset = ecc_offset + ecc->total;
-		oobregion->length = mtd->oobsize - oobregion->offset;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
-	.ecc = nand_ooblayout_ecc_lp_hamming,
-	.free = nand_ooblayout_free_lp_hamming,
+const struct mtd_pairing_scheme dist3_pairing_scheme = {
+	.ngroups = 2,
+	.get_info = nand_pairing_dist3_get_info,
+	.get_wunit = nand_pairing_dist3_get_wunit,
 };
 
 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
@@ -225,6 +116,50 @@
 }
 
 /**
+ * nand_extract_bits - Copy unaligned bits from one buffer to another one
+ * @dst: destination buffer
+ * @dst_off: bit offset at which the writing starts
+ * @src: source buffer
+ * @src_off: bit offset at which the reading starts
+ * @nbits: number of bits to copy from @src to @dst
+ *
+ * Copy bits from one memory region to another (overlap authorized).
+ */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+		       unsigned int src_off, unsigned int nbits)
+{
+	unsigned int tmp, n;
+
+	dst += dst_off / 8;
+	dst_off %= 8;
+	src += src_off / 8;
+	src_off %= 8;
+
+	while (nbits) {
+		n = min3(8 - dst_off, 8 - src_off, nbits);
+
+		tmp = (*src >> src_off) & GENMASK(n - 1, 0);
+		*dst &= ~GENMASK(n - 1 + dst_off, dst_off);
+		*dst |= tmp << dst_off;
+
+		dst_off += n;
+		if (dst_off >= 8) {
+			dst++;
+			dst_off -= 8;
+		}
+
+		src_off += n;
+		if (src_off >= 8) {
+			src++;
+			src_off -= 8;
+		}
+
+		nbits -= n;
+	}
+}
+EXPORT_SYMBOL_GPL(nand_extract_bits);
+
+/**
  * nand_select_target() - Select a NAND target (A.K.A. die)
  * @chip: NAND chip object
  * @cs: the CS line to select. Note that this CS id is always from the chip
@@ -345,6 +280,9 @@
 
 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
 {
+	if (chip->options & NAND_NO_BBM_QUIRK)
+		return 0;
+
 	if (chip->legacy.block_bad)
 		return chip->legacy.block_bad(chip, ofs);
 
@@ -676,16 +614,22 @@
 		return -ENOTSUPP;
 
 	/* Wait tWB before polling the STATUS reg. */
-	timings = nand_get_sdr_timings(&chip->data_interface);
+	timings = nand_get_sdr_timings(nand_get_interface_config(chip));
 	ndelay(PSEC_TO_NSEC(timings->tWB_max));
 
 	ret = nand_status_op(chip, NULL);
 	if (ret)
 		return ret;
 
-	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+	/*
+	 * +1 below is necessary because if we are now in the last fraction
+	 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+	 * small jiffy fraction - possibly leading to false timeout
+	 */
+	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 	do {
-		ret = nand_read_data_op(chip, &status, sizeof(status), true);
+		ret = nand_read_data_op(chip, &status, sizeof(status), true,
+					false);
 		if (ret)
 			break;
 
@@ -771,7 +715,7 @@
 			u8 status;
 
 			ret = nand_read_data_op(chip, &status, sizeof(status),
-						true);
+						true, false);
 			if (ret)
 				return;
 
@@ -795,7 +739,7 @@
 }
 
 /**
- * nand_reset_data_interface - Reset data interface and timings
+ * nand_reset_interface - Reset data interface and timings
  * @chip: The NAND chip
  * @chipnr: Internal die id
  *
@@ -803,11 +747,12 @@
  *
  * Returns 0 for success or negative error code otherwise.
  */
-static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
+static int nand_reset_interface(struct nand_chip *chip, int chipnr)
 {
+	const struct nand_controller_ops *ops = chip->controller->ops;
 	int ret;
 
-	if (!nand_has_setup_data_iface(chip))
+	if (!nand_controller_can_setup_interface(chip))
 		return 0;
 
 	/*
@@ -824,9 +769,9 @@
 	 * timings to timing mode 0.
 	 */
 
-	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
-	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
-							&chip->data_interface);
+	chip->current_interface_config = nand_get_reset_interface_config();
+	ret = ops->setup_interface(chip, chipnr,
+				   chip->current_interface_config);
 	if (ret)
 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
 
@@ -834,28 +779,36 @@
 }
 
 /**
- * nand_setup_data_interface - Setup the best data interface and timings
+ * nand_setup_interface - Setup the best data interface and timings
  * @chip: The NAND chip
  * @chipnr: Internal die id
  *
- * Find and configure the best data interface and NAND timings supported by
- * the chip and the driver.
- * First tries to retrieve supported timing modes from ONFI information,
- * and if the NAND chip does not support ONFI, relies on the
- * ->onfi_timing_mode_default specified in the nand_ids table.
+ * Configure what has been reported to be the best data interface and NAND
+ * timings supported by the chip and the driver.
  *
  * Returns 0 for success or negative error code otherwise.
  */
-static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
+static int nand_setup_interface(struct nand_chip *chip, int chipnr)
 {
-	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
-		chip->onfi_timing_mode_default,
-	};
+	const struct nand_controller_ops *ops = chip->controller->ops;
+	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
 	int ret;
 
-	if (!nand_has_setup_data_iface(chip))
+	if (!nand_controller_can_setup_interface(chip))
 		return 0;
 
+	/*
+	 * A nand_reset_interface() put both the NAND chip and the NAND
+	 * controller in timings mode 0. If the default mode for this chip is
+	 * also 0, no need to proceed to the change again. Plus, at probe time,
+	 * nand_setup_interface() uses ->set/get_features() which would
+	 * fail anyway as the parameter page is not available yet.
+	 */
+	if (!chip->best_interface_config)
+		return 0;
+
+	tmode_param[0] = chip->best_interface_config->timings.mode;
+
 	/* Change the mode on the chip side (if supported by the NAND chip) */
 	if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
 		nand_select_target(chip, chipnr);
@@ -867,14 +820,13 @@
 	}
 
 	/* Change the mode on the controller side */
-	ret = chip->controller->ops->setup_data_interface(chip, chipnr,
-							&chip->data_interface);
+	ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
 	if (ret)
 		return ret;
 
 	/* Check the mode has been accepted by the chip, if supported */
 	if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
-		return 0;
+		goto update_interface_config;
 
 	memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
 	nand_select_target(chip, chipnr);
@@ -884,12 +836,15 @@
 	if (ret)
 		goto err_reset_chip;
 
-	if (tmode_param[0] != chip->onfi_timing_mode_default) {
+	if (tmode_param[0] != chip->best_interface_config->timings.mode) {
 		pr_warn("timing mode %d not acknowledged by the NAND chip\n",
-			chip->onfi_timing_mode_default);
+			chip->best_interface_config->timings.mode);
 		goto err_reset_chip;
 	}
 
+update_interface_config:
+	chip->current_interface_config = chip->best_interface_config;
+
 	return 0;
 
 err_reset_chip:
@@ -897,7 +852,7 @@
 	 * Fallback to mode 0 if the chip explicitly did not ack the chosen
 	 * timing mode.
 	 */
-	nand_reset_data_interface(chip, chipnr);
+	nand_reset_interface(chip, chipnr);
 	nand_select_target(chip, chipnr);
 	nand_reset_op(chip);
 	nand_deselect_target(chip);
@@ -906,59 +861,90 @@
 }
 
 /**
- * nand_init_data_interface - find the best data interface and timings
+ * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
+ *                                NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
+ *
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
+ */
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+				 struct nand_interface_config *iface,
+				 struct nand_sdr_timings *spec_timings)
+{
+	const struct nand_controller_ops *ops = chip->controller->ops;
+	int best_mode = 0, mode, ret;
+
+	iface->type = NAND_SDR_IFACE;
+
+	if (spec_timings) {
+		iface->timings.sdr = *spec_timings;
+		iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
+
+		/* Verify the controller supports the requested interface */
+		ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+					   iface);
+		if (!ret) {
+			chip->best_interface_config = iface;
+			return ret;
+		}
+
+		/* Fallback to slower modes */
+		best_mode = iface->timings.mode;
+	} else if (chip->parameters.onfi) {
+		best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
+	}
+
+	for (mode = best_mode; mode >= 0; mode--) {
+		onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
+
+		ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+					   iface);
+		if (!ret)
+			break;
+	}
+
+	chip->best_interface_config = iface;
+
+	return 0;
+}
+
+/**
+ * nand_choose_interface_config - find the best data interface and timings
  * @chip: The NAND chip
  *
  * Find the best data interface and NAND timings supported by the chip
- * and the driver.
- * First tries to retrieve supported timing modes from ONFI information,
- * and if the NAND chip does not support ONFI, relies on the
- * ->onfi_timing_mode_default specified in the nand_ids table. After this
- * function nand_chip->data_interface is initialized with the best timing mode
- * available.
+ * and the driver. Eventually let the NAND manufacturer driver propose his own
+ * set of timings.
+ *
+ * After this function nand_chip->interface_config is initialized with the best
+ * timing mode available.
  *
  * Returns 0 for success or negative error code otherwise.
  */
-static int nand_init_data_interface(struct nand_chip *chip)
+static int nand_choose_interface_config(struct nand_chip *chip)
 {
-	int modes, mode, ret;
+	struct nand_interface_config *iface;
+	int ret;
 
-	if (!nand_has_setup_data_iface(chip))
+	if (!nand_controller_can_setup_interface(chip))
 		return 0;
 
-	/*
-	 * First try to identify the best timings from ONFI parameters and
-	 * if the NAND does not support ONFI, fallback to the default ONFI
-	 * timing mode.
-	 */
-	if (chip->parameters.onfi) {
-		modes = chip->parameters.onfi->async_timing_mode;
-	} else {
-		if (!chip->onfi_timing_mode_default)
-			return 0;
+	iface = kzalloc(sizeof(*iface), GFP_KERNEL);
+	if (!iface)
+		return -ENOMEM;
 
-		modes = GENMASK(chip->onfi_timing_mode_default, 0);
-	}
+	if (chip->ops.choose_interface_config)
+		ret = chip->ops.choose_interface_config(chip, iface);
+	else
+		ret = nand_choose_best_sdr_timings(chip, iface, NULL);
 
-	for (mode = fls(modes) - 1; mode >= 0; mode--) {
-		ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
-		if (ret)
-			continue;
+	if (ret)
+		kfree(iface);
 
-		/*
-		 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
-		 * controller supports the requested timings.
-		 */
-		ret = chip->controller->ops->setup_data_interface(chip,
-						 NAND_DATA_IFACE_CHECK_ONLY,
-						 &chip->data_interface);
-		if (!ret) {
-			chip->onfi_timing_mode_default = mode;
-			break;
-		}
-	}
-
-	return 0;
+	return ret;
 }
 
 /**
@@ -1019,9 +1005,9 @@
 				     unsigned int offset_in_page, void *buf,
 				     unsigned int len)
 {
-	struct mtd_info *mtd = nand_to_mtd(chip);
 	const struct nand_sdr_timings *sdr =
-		nand_get_sdr_timings(&chip->data_interface);
+		nand_get_sdr_timings(nand_get_interface_config(chip));
+	struct mtd_info *mtd = nand_to_mtd(chip);
 	u8 addrs[4];
 	struct nand_op_instr instrs[] = {
 		NAND_OP_CMD(NAND_CMD_READ0, 0),
@@ -1063,7 +1049,7 @@
 				     unsigned int len)
 {
 	const struct nand_sdr_timings *sdr =
-		nand_get_sdr_timings(&chip->data_interface);
+		nand_get_sdr_timings(nand_get_interface_config(chip));
 	u8 addrs[5];
 	struct nand_op_instr instrs[] = {
 		NAND_OP_CMD(NAND_CMD_READ0, 0),
@@ -1160,7 +1146,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_PARAM, 0),
 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
@@ -1215,7 +1201,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		u8 addrs[2] = {};
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
@@ -1289,9 +1275,9 @@
 				  unsigned int offset_in_page, const void *buf,
 				  unsigned int len, bool prog)
 {
-	struct mtd_info *mtd = nand_to_mtd(chip);
 	const struct nand_sdr_timings *sdr =
-		nand_get_sdr_timings(&chip->data_interface);
+		nand_get_sdr_timings(nand_get_interface_config(chip));
+	struct mtd_info *mtd = nand_to_mtd(chip);
 	u8 addrs[5] = {};
 	struct nand_op_instr instrs[] = {
 		/*
@@ -1414,7 +1400,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_PAGEPROG,
 				    PSEC_TO_NSEC(sdr->tWB_max)),
@@ -1521,7 +1507,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		u8 addrs[2];
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_RNDIN, 0),
@@ -1576,7 +1562,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_READID, 0),
 			NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
@@ -1615,7 +1601,7 @@
 {
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_STATUS,
 				    PSEC_TO_NSEC(sdr->tADL_min)),
@@ -1684,7 +1670,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_ERASE1, 0),
@@ -1743,7 +1729,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
@@ -1790,7 +1776,7 @@
 
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
@@ -1847,7 +1833,7 @@
 {
 	if (nand_has_exec_op(chip)) {
 		const struct nand_sdr_timings *sdr =
-			nand_get_sdr_timings(&chip->data_interface);
+			nand_get_sdr_timings(nand_get_interface_config(chip));
 		struct nand_op_instr instrs[] = {
 			NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
@@ -1869,6 +1855,8 @@
  * @buf: buffer used to store the data
  * @len: length of the buffer
  * @force_8bit: force 8-bit bus access
+ * @check_only: do not actually run the command, only checks if the
+ *              controller driver supports it
  *
  * This function does a raw data read on the bus. Usually used after launching
  * another NAND operation like nand_read_page_op().
@@ -1877,7 +1865,7 @@
  * Returns 0 on success, a negative error code otherwise.
  */
 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
-		      bool force_8bit)
+		      bool force_8bit, bool check_only)
 {
 	if (!len || !buf)
 		return -EINVAL;
@@ -1890,9 +1878,15 @@
 
 		instrs[0].ctx.data.force_8bit = force_8bit;
 
+		if (check_only)
+			return nand_check_op(chip, &op);
+
 		return nand_exec_op(chip, &op);
 	}
 
+	if (check_only)
+		return 0;
+
 	if (force_8bit) {
 		u8 *p = buf;
 		unsigned int i;
@@ -2113,7 +2107,7 @@
 	char *prefix = "      ";
 	unsigned int i;
 
-	pr_debug("executing subop:\n");
+	pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
 
 	for (i = 0; i < ctx->ninstrs; i++) {
 		instr = &ctx->instrs[i];
@@ -2177,6 +2171,7 @@
 			   const struct nand_operation *op, bool check_only)
 {
 	struct nand_op_parser_ctx ctx = {
+		.subop.cs = op->cs,
 		.subop.instrs = op->instrs,
 		.instrs = op->instrs,
 		.ninstrs = op->ninstrs,
@@ -2368,17 +2363,16 @@
  * @chipnr: Internal die id
  *
  * Save the timings data structure, then apply SDR timings mode 0 (see
- * nand_reset_data_interface for details), do the reset operation, and
- * apply back the previous timings.
+ * nand_reset_interface for details), do the reset operation, and apply
+ * back the previous timings.
  *
  * Returns 0 on success, a negative error code otherwise.
  */
 int nand_reset(struct nand_chip *chip, int chipnr)
 {
-	struct nand_data_interface saved_data_intf = chip->data_interface;
 	int ret;
 
-	ret = nand_reset_data_interface(chip, chipnr);
+	ret = nand_reset_interface(chip, chipnr);
 	if (ret)
 		return ret;
 
@@ -2393,18 +2387,7 @@
 	if (ret)
 		return ret;
 
-	/*
-	 * A nand_reset_data_interface() put both the NAND chip and the NAND
-	 * controller in timings mode 0. If the default mode for this chip is
-	 * also 0, no need to proceed to the change again. Plus, at probe time,
-	 * nand_setup_data_interface() uses ->set/get_features() which would
-	 * fail anyway as the parameter page is not available yet.
-	 */
-	if (!chip->onfi_timing_mode_default)
-		return 0;
-
-	chip->data_interface = saved_data_intf;
-	ret = nand_setup_data_interface(chip, chipnr);
+	ret = nand_setup_interface(chip, chipnr);
 	if (ret)
 		return ret;
 
@@ -2621,7 +2604,7 @@
 
 	if (oob_required) {
 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
-					false);
+					false, false);
 		if (ret)
 			return ret;
 	}
@@ -2631,6 +2614,47 @@
 EXPORT_SYMBOL(nand_read_page_raw);
 
 /**
+ * nand_monolithic_read_page_raw - Monolithic page read in raw mode
+ * @chip: NAND chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This is a raw page read, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be loaded in the NAND cache and sent over the
+ * bus (from the NAND chip to the NAND controller) in a single
+ * operation. This is an alternative to nand_read_page_raw(), which
+ * first reads the main data, and if the OOB data is requested too,
+ * then reads more data on the bus.
+ */
+int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
+				  int oob_required, int page)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int size = mtd->writesize;
+	u8 *read_buf = buf;
+	int ret;
+
+	if (oob_required) {
+		size += mtd->oobsize;
+
+		if (buf != chip->data_buf)
+			read_buf = nand_get_data_buf(chip);
+	}
+
+	ret = nand_read_page_op(chip, page, 0, read_buf, size);
+	if (ret)
+		return ret;
+
+	if (buf != chip->data_buf)
+		memcpy(buf, read_buf, mtd->writesize);
+
+	return 0;
+}
+EXPORT_SYMBOL(nand_monolithic_read_page_raw);
+
+/**
  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
  * @chip: nand chip info structure
  * @buf: buffer to store read data
@@ -2653,7 +2677,7 @@
 		return ret;
 
 	for (steps = chip->ecc.steps; steps > 0; steps--) {
-		ret = nand_read_data_op(chip, buf, eccsize, false);
+		ret = nand_read_data_op(chip, buf, eccsize, false, false);
 		if (ret)
 			return ret;
 
@@ -2661,14 +2685,14 @@
 
 		if (chip->ecc.prepad) {
 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
-						false);
+						false, false);
 			if (ret)
 				return ret;
 
 			oob += chip->ecc.prepad;
 		}
 
-		ret = nand_read_data_op(chip, oob, eccbytes, false);
+		ret = nand_read_data_op(chip, oob, eccbytes, false, false);
 		if (ret)
 			return ret;
 
@@ -2676,7 +2700,7 @@
 
 		if (chip->ecc.postpad) {
 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
-						false);
+						false, false);
 			if (ret)
 				return ret;
 
@@ -2686,7 +2710,7 @@
 
 	size = mtd->oobsize - (oob - chip->oob_poi);
 	if (size) {
-		ret = nand_read_data_op(chip, oob, size, false);
+		ret = nand_read_data_op(chip, oob, size, false, false);
 		if (ret)
 			return ret;
 	}
@@ -2879,14 +2903,15 @@
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		chip->ecc.hwctl(chip, NAND_ECC_READ);
 
-		ret = nand_read_data_op(chip, p, eccsize, false);
+		ret = nand_read_data_op(chip, p, eccsize, false, false);
 		if (ret)
 			return ret;
 
 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
 	}
 
-	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+				false);
 	if (ret)
 		return ret;
 
@@ -2922,76 +2947,6 @@
 }
 
 /**
- * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
- */
-static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
-					  int oob_required, int page)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	int i, eccsize = chip->ecc.size, ret;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	uint8_t *ecc_code = chip->ecc.code_buf;
-	uint8_t *ecc_calc = chip->ecc.calc_buf;
-	unsigned int max_bitflips = 0;
-
-	/* Read the OOB area first */
-	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
-	if (ret)
-		return ret;
-
-	ret = nand_read_page_op(chip, page, 0, NULL, 0);
-	if (ret)
-		return ret;
-
-	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		chip->ecc.hwctl(chip, NAND_ECC_READ);
-
-		ret = nand_read_data_op(chip, p, eccsize, false);
-		if (ret)
-			return ret;
-
-		chip->ecc.calculate(chip, p, &ecc_calc[i]);
-
-		stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
-		if (stat == -EBADMSG &&
-		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
-			/* check for empty pages with bitflips */
-			stat = nand_check_erased_ecc_chunk(p, eccsize,
-						&ecc_code[i], eccbytes,
-						NULL, 0,
-						chip->ecc.strength);
-		}
-
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-	return max_bitflips;
-}
-
-/**
  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
  * @chip: nand chip info structure
  * @buf: buffer to store read data
@@ -3022,13 +2977,13 @@
 
 		chip->ecc.hwctl(chip, NAND_ECC_READ);
 
-		ret = nand_read_data_op(chip, p, eccsize, false);
+		ret = nand_read_data_op(chip, p, eccsize, false, false);
 		if (ret)
 			return ret;
 
 		if (chip->ecc.prepad) {
 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
-						false);
+						false, false);
 			if (ret)
 				return ret;
 
@@ -3037,7 +2992,7 @@
 
 		chip->ecc.hwctl(chip, NAND_ECC_READSYN);
 
-		ret = nand_read_data_op(chip, oob, eccbytes, false);
+		ret = nand_read_data_op(chip, oob, eccbytes, false, false);
 		if (ret)
 			return ret;
 
@@ -3047,7 +3002,7 @@
 
 		if (chip->ecc.postpad) {
 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
-						false);
+						false, false);
 			if (ret)
 				return ret;
 
@@ -3075,7 +3030,7 @@
 	/* Calculate remaining oob bytes */
 	i = mtd->oobsize - (oob - chip->oob_poi);
 	if (i) {
-		ret = nand_read_data_op(chip, oob, i, false);
+		ret = nand_read_data_op(chip, oob, i, false, false);
 		if (ret)
 			return ret;
 	}
@@ -3131,10 +3086,10 @@
 	if (retry_mode >= chip->read_retries)
 		return -EINVAL;
 
-	if (!chip->setup_read_retry)
+	if (!chip->ops.setup_read_retry)
 		return -EOPNOTSUPP;
 
-	return chip->setup_read_retry(chip, retry_mode);
+	return chip->ops.setup_read_retry(chip, retry_mode);
 }
 
 static void nand_wait_readrdy(struct nand_chip *chip)
@@ -3144,7 +3099,7 @@
 	if (!(chip->options & NAND_NEED_READRDY))
 		return;
 
-	sdr = nand_get_sdr_timings(&chip->data_interface);
+	sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
 	WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
 }
 
@@ -3167,7 +3122,7 @@
 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
 
 	uint8_t *bufpoi, *oob, *buf;
-	int use_bufpoi;
+	int use_bounce_buf;
 	unsigned int max_bitflips = 0;
 	int retry_mode = 0;
 	bool ecc_fail = false;
@@ -3185,25 +3140,25 @@
 	oob_required = oob ? 1 : 0;
 
 	while (1) {
-		unsigned int ecc_failures = mtd->ecc_stats.failed;
+		struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
 
 		bytes = min(mtd->writesize - col, readlen);
 		aligned = (bytes == mtd->writesize);
 
 		if (!aligned)
-			use_bufpoi = 1;
-		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
-			use_bufpoi = !virt_addr_valid(buf) ||
-				     !IS_ALIGNED((unsigned long)buf,
-						 chip->buf_align);
+			use_bounce_buf = 1;
+		else if (chip->options & NAND_USES_DMA)
+			use_bounce_buf = !virt_addr_valid(buf) ||
+					 !IS_ALIGNED((unsigned long)buf,
+						     chip->buf_align);
 		else
-			use_bufpoi = 0;
+			use_bounce_buf = 0;
 
 		/* Is the current page in the buffer? */
 		if (realpage != chip->pagecache.page || oob) {
-			bufpoi = use_bufpoi ? chip->data_buf : buf;
+			bufpoi = use_bounce_buf ? chip->data_buf : buf;
 
-			if (use_bufpoi && aligned)
+			if (use_bounce_buf && aligned)
 				pr_debug("%s: using read bounce buffer for buf@%p\n",
 						 __func__, buf);
 
@@ -3224,16 +3179,19 @@
 				ret = chip->ecc.read_page(chip, bufpoi,
 							  oob_required, page);
 			if (ret < 0) {
-				if (use_bufpoi)
+				if (use_bounce_buf)
 					/* Invalidate page cache */
 					chip->pagecache.page = -1;
 				break;
 			}
 
-			/* Transfer not aligned data */
-			if (use_bufpoi) {
+			/*
+			 * Copy back the data in the initial buffer when reading
+			 * partial pages or when a bounce buffer is required.
+			 */
+			if (use_bounce_buf) {
 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
-				    !(mtd->ecc_stats.failed - ecc_failures) &&
+				    !(mtd->ecc_stats.failed - ecc_stats.failed) &&
 				    (ops->mode != MTD_OPS_RAW)) {
 					chip->pagecache.page = realpage;
 					chip->pagecache.bitflips = ret;
@@ -3241,7 +3199,7 @@
 					/* Invalidate page cache */
 					chip->pagecache.page = -1;
 				}
-				memcpy(buf, chip->data_buf + col, bytes);
+				memcpy(buf, bufpoi + col, bytes);
 			}
 
 			if (unlikely(oob)) {
@@ -3256,7 +3214,7 @@
 
 			nand_wait_readrdy(chip);
 
-			if (mtd->ecc_stats.failed - ecc_failures) {
+			if (mtd->ecc_stats.failed - ecc_stats.failed) {
 				if (retry_mode + 1 < chip->read_retries) {
 					retry_mode++;
 					ret = nand_setup_read_retry(chip,
@@ -3264,8 +3222,8 @@
 					if (ret < 0)
 						break;
 
-					/* Reset failures; retry */
-					mtd->ecc_stats.failed = ecc_failures;
+					/* Reset ecc_stats; retry */
+					mtd->ecc_stats = ecc_stats;
 					goto read_retry;
 				} else {
 					/* No more retry modes; real failure */
@@ -3374,7 +3332,7 @@
 			sndrnd = 1;
 		toread = min_t(int, length, chunk);
 
-		ret = nand_read_data_op(chip, bufpoi, toread, false);
+		ret = nand_read_data_op(chip, bufpoi, toread, false, false);
 		if (ret)
 			return ret;
 
@@ -3382,7 +3340,7 @@
 		length -= toread;
 	}
 	if (length > 0) {
-		ret = nand_read_data_op(chip, bufpoi, length, false);
+		ret = nand_read_data_op(chip, bufpoi, length, false, false);
 		if (ret)
 			return ret;
 	}
@@ -3635,6 +3593,42 @@
 EXPORT_SYMBOL(nand_write_page_raw);
 
 /**
+ * nand_monolithic_write_page_raw - Monolithic page write in raw mode
+ * @chip: NAND chip info structure
+ * @buf: data buffer to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * This is a raw page write, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be sent over the bus and effectively programmed
+ * into the NAND chip arrays in a single operation. This is an
+ * alternative to nand_write_page_raw(), which first sends the main
+ * data, then eventually send the OOB data by latching more data
+ * cycles on the NAND bus, and finally sends the program command to
+ * synchronyze the NAND chip cache.
+ */
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
+				   int oob_required, int page)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int size = mtd->writesize;
+	u8 *write_buf = (u8 *)buf;
+
+	if (oob_required) {
+		size += mtd->oobsize;
+
+		if (buf != chip->data_buf) {
+			write_buf = nand_get_data_buf(chip);
+			memcpy(write_buf, buf, mtd->writesize);
+		}
+	}
+
+	return nand_prog_page_op(chip, page, 0, write_buf, size);
+}
+EXPORT_SYMBOL(nand_monolithic_write_page_raw);
+
+/**
  * nand_write_page_raw_syndrome - [INTERN] raw page write function
  * @chip: nand chip info structure
  * @buf: data buffer
@@ -4013,20 +4007,23 @@
 	while (1) {
 		int bytes = mtd->writesize;
 		uint8_t *wbuf = buf;
-		int use_bufpoi;
+		int use_bounce_buf;
 		int part_pagewr = (column || writelen < mtd->writesize);
 
 		if (part_pagewr)
-			use_bufpoi = 1;
-		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
-			use_bufpoi = !virt_addr_valid(buf) ||
-				     !IS_ALIGNED((unsigned long)buf,
-						 chip->buf_align);
+			use_bounce_buf = 1;
+		else if (chip->options & NAND_USES_DMA)
+			use_bounce_buf = !virt_addr_valid(buf) ||
+					 !IS_ALIGNED((unsigned long)buf,
+						     chip->buf_align);
 		else
-			use_bufpoi = 0;
+			use_bounce_buf = 0;
 
-		/* Partial page write?, or need to use bounce buffer */
-		if (use_bufpoi) {
+		/*
+		 * Copy the data from the initial buffer when doing partial page
+		 * writes or when a bounce buffer is required.
+		 */
+		if (use_bounce_buf) {
 			pr_debug("%s: using write bounce buffer for buf@%p\n",
 					 __func__, buf);
 			if (part_pagewr)
@@ -4327,16 +4324,22 @@
 /**
  * nand_suspend - [MTD Interface] Suspend the NAND flash
  * @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
  */
 static int nand_suspend(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret = 0;
 
 	mutex_lock(&chip->lock);
-	chip->suspended = 1;
+	if (chip->ops.suspend)
+		ret = chip->ops.suspend(chip);
+	if (!ret)
+		chip->suspended = 1;
 	mutex_unlock(&chip->lock);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -4348,11 +4351,14 @@
 	struct nand_chip *chip = mtd_to_nand(mtd);
 
 	mutex_lock(&chip->lock);
-	if (chip->suspended)
+	if (chip->suspended) {
+		if (chip->ops.resume)
+			chip->ops.resume(chip);
 		chip->suspended = 0;
-	else
+	} else {
 		pr_err("%s called for a chip which is not in suspended state\n",
 			__func__);
+	}
 	mutex_unlock(&chip->lock);
 }
 
@@ -4366,6 +4372,38 @@
 	nand_suspend(mtd);
 }
 
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (!chip->ops.lock_area)
+		return -ENOTSUPP;
+
+	return chip->ops.lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (!chip->ops.unlock_area)
+		return -ENOTSUPP;
+
+	return chip->ops.unlock_area(chip, ofs, len);
+}
+
 /* Set default functions */
 static void nand_set_defaults(struct nand_chip *chip)
 {
@@ -4553,6 +4591,8 @@
 static bool find_full_id_nand(struct nand_chip *chip,
 			      struct nand_flash_dev *type)
 {
+	struct nand_device *base = &chip->base;
+	struct nand_ecc_props requirements;
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_memory_organization *memorg;
 	u8 *id_data = chip->id.data;
@@ -4574,10 +4614,9 @@
 					   memorg->pagesize *
 					   memorg->pages_per_eraseblock);
 		chip->options |= type->options;
-		chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
-		chip->base.eccreq.step_size = NAND_ECC_STEP(type);
-		chip->onfi_timing_mode_default =
-					type->onfi_timing_mode_default;
+		requirements.strength = NAND_ECC_STRENGTH(type);
+		requirements.step_size = NAND_ECC_STEP(type);
+		nanddev_set_ecc_requirements(base, &requirements);
 
 		chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
 		if (!chip->parameters.model)
@@ -4643,9 +4682,9 @@
 }
 
 static const char *
-nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
+nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
 {
-	return manufacturer ? manufacturer->name : "Unknown";
+	return manufacturer_desc ? manufacturer_desc->name : "Unknown";
 }
 
 /*
@@ -4653,7 +4692,7 @@
  */
 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
 {
-	const struct nand_manufacturer *manufacturer;
+	const struct nand_manufacturer_desc *manufacturer_desc;
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_memory_organization *memorg;
 	int busw, ret;
@@ -4710,8 +4749,8 @@
 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
 
 	/* Try to identify manufacturer */
-	manufacturer = nand_get_manufacturer(maf_id);
-	chip->manufacturer.desc = manufacturer;
+	manufacturer_desc = nand_get_manufacturer_desc(maf_id);
+	chip->manufacturer.desc = manufacturer_desc;
 
 	if (!type)
 		type = nand_flash_ids;
@@ -4790,7 +4829,7 @@
 		 */
 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
 			maf_id, dev_id);
-		pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
+		pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
 			mtd->name);
 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
@@ -4825,7 +4864,7 @@
 
 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
 		maf_id, dev_id);
-	pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
+	pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
 		chip->parameters.model);
 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
 		(int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
@@ -4838,90 +4877,101 @@
 	return ret;
 }
 
-static const char * const nand_ecc_modes[] = {
-	[NAND_ECC_NONE]		= "none",
-	[NAND_ECC_SOFT]		= "soft",
-	[NAND_ECC_HW]		= "hw",
-	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
-	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
-	[NAND_ECC_ON_DIE]	= "on-die",
-};
-
-static int of_get_nand_ecc_mode(struct device_node *np)
+static enum nand_ecc_engine_type
+of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
 {
+	enum nand_ecc_legacy_mode {
+		NAND_ECC_INVALID,
+		NAND_ECC_NONE,
+		NAND_ECC_SOFT,
+		NAND_ECC_SOFT_BCH,
+		NAND_ECC_HW,
+		NAND_ECC_HW_SYNDROME,
+		NAND_ECC_ON_DIE,
+	};
+	const char * const nand_ecc_legacy_modes[] = {
+		[NAND_ECC_NONE]		= "none",
+		[NAND_ECC_SOFT]		= "soft",
+		[NAND_ECC_SOFT_BCH]	= "soft_bch",
+		[NAND_ECC_HW]		= "hw",
+		[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
+		[NAND_ECC_ON_DIE]	= "on-die",
+	};
+	enum nand_ecc_legacy_mode eng_type;
 	const char *pm;
-	int err, i;
+	int err;
 
 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
-	if (err < 0)
-		return err;
+	if (err)
+		return NAND_ECC_ENGINE_TYPE_INVALID;
 
-	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
-		if (!strcasecmp(pm, nand_ecc_modes[i]))
-			return i;
-
-	/*
-	 * For backward compatibility we support few obsoleted values that don't
-	 * have their mappings into nand_ecc_modes_t anymore (they were merged
-	 * with other enums).
-	 */
-	if (!strcasecmp(pm, "soft_bch"))
-		return NAND_ECC_SOFT;
-
-	return -ENODEV;
-}
-
-static const char * const nand_ecc_algos[] = {
-	[NAND_ECC_HAMMING]	= "hamming",
-	[NAND_ECC_BCH]		= "bch",
-	[NAND_ECC_RS]		= "rs",
-};
-
-static int of_get_nand_ecc_algo(struct device_node *np)
-{
-	const char *pm;
-	int err, i;
-
-	err = of_property_read_string(np, "nand-ecc-algo", &pm);
-	if (!err) {
-		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
-			if (!strcasecmp(pm, nand_ecc_algos[i]))
-				return i;
-		return -ENODEV;
+	for (eng_type = NAND_ECC_NONE;
+	     eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
+		if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
+			switch (eng_type) {
+			case NAND_ECC_NONE:
+				return NAND_ECC_ENGINE_TYPE_NONE;
+			case NAND_ECC_SOFT:
+			case NAND_ECC_SOFT_BCH:
+				return NAND_ECC_ENGINE_TYPE_SOFT;
+			case NAND_ECC_HW:
+			case NAND_ECC_HW_SYNDROME:
+				return NAND_ECC_ENGINE_TYPE_ON_HOST;
+			case NAND_ECC_ON_DIE:
+				return NAND_ECC_ENGINE_TYPE_ON_DIE;
+			default:
+				break;
+			}
+		}
 	}
 
-	/*
-	 * For backward compatibility we also read "nand-ecc-mode" checking
-	 * for some obsoleted values that were specifying ECC algorithm.
-	 */
+	return NAND_ECC_ENGINE_TYPE_INVALID;
+}
+
+static enum nand_ecc_placement
+of_get_rawnand_ecc_placement_legacy(struct device_node *np)
+{
+	const char *pm;
+	int err;
+
 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
-	if (err < 0)
-		return err;
+	if (!err) {
+		if (!strcasecmp(pm, "hw_syndrome"))
+			return NAND_ECC_PLACEMENT_INTERLEAVED;
+	}
 
-	if (!strcasecmp(pm, "soft"))
-		return NAND_ECC_HAMMING;
-	else if (!strcasecmp(pm, "soft_bch"))
-		return NAND_ECC_BCH;
-
-	return -ENODEV;
+	return NAND_ECC_PLACEMENT_UNKNOWN;
 }
 
-static int of_get_nand_ecc_step_size(struct device_node *np)
+static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
 {
-	int ret;
-	u32 val;
+	const char *pm;
+	int err;
 
-	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
-	return ret ? ret : val;
+	err = of_property_read_string(np, "nand-ecc-mode", &pm);
+	if (!err) {
+		if (!strcasecmp(pm, "soft"))
+			return NAND_ECC_ALGO_HAMMING;
+		else if (!strcasecmp(pm, "soft_bch"))
+			return NAND_ECC_ALGO_BCH;
+	}
+
+	return NAND_ECC_ALGO_UNKNOWN;
 }
 
-static int of_get_nand_ecc_strength(struct device_node *np)
+static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
 {
-	int ret;
-	u32 val;
+	struct device_node *dn = nand_get_flash_node(chip);
+	struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
 
-	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
-	return ret ? ret : val;
+	if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+		user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
+
+	if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
+		user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
+
+	if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
+		user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
 }
 
 static int of_get_nand_bus_width(struct device_node *np)
@@ -4945,10 +4995,10 @@
 	return of_property_read_bool(np, "nand-on-flash-bbt");
 }
 
-static int nand_dt_init(struct nand_chip *chip)
+static int rawnand_dt_init(struct nand_chip *chip)
 {
+	struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
 	struct device_node *dn = nand_get_flash_node(chip);
-	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
 
 	if (!dn)
 		return 0;
@@ -4962,25 +5012,29 @@
 	if (of_get_nand_on_flash_bbt(dn))
 		chip->bbt_options |= NAND_BBT_USE_FLASH;
 
-	ecc_mode = of_get_nand_ecc_mode(dn);
-	ecc_algo = of_get_nand_ecc_algo(dn);
-	ecc_strength = of_get_nand_ecc_strength(dn);
-	ecc_step = of_get_nand_ecc_step_size(dn);
+	of_get_nand_ecc_user_config(nand);
+	of_get_nand_ecc_legacy_user_config(chip);
 
-	if (ecc_mode >= 0)
-		chip->ecc.mode = ecc_mode;
+	/*
+	 * If neither the user nor the NAND controller have requested a specific
+	 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
+	 */
+	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 
-	if (ecc_algo >= 0)
-		chip->ecc.algo = ecc_algo;
+	/*
+	 * Use the user requested engine type, unless there is none, in this
+	 * case default to the NAND controller choice, otherwise fallback to
+	 * the raw NAND default one.
+	 */
+	if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
+		chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+		chip->ecc.engine_type = nand->ecc.defaults.engine_type;
 
-	if (ecc_strength >= 0)
-		chip->ecc.strength = ecc_strength;
-
-	if (ecc_step > 0)
-		chip->ecc.size = ecc_step;
-
-	if (of_property_read_bool(dn, "nand-ecc-maximize"))
-		chip->ecc.options |= NAND_ECC_MAXIMIZE;
+	chip->ecc.placement = nand->ecc.user_conf.placement;
+	chip->ecc.algo = nand->ecc.user_conf.algo;
+	chip->ecc.strength = nand->ecc.user_conf.strength;
+	chip->ecc.size = nand->ecc.user_conf.step_size;
 
 	return 0;
 }
@@ -5016,9 +5070,9 @@
 	mutex_init(&chip->lock);
 
 	/* Enforce the right timings for reset/detection */
-	onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+	chip->current_interface_config = nand_get_reset_interface_config();
 
-	ret = nand_dt_init(chip);
+	ret = rawnand_dt_init(chip);
 	if (ret)
 		return ret;
 
@@ -5085,23 +5139,85 @@
 	kfree(chip->parameters.onfi);
 }
 
+static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	switch (ecc->placement) {
+	case NAND_ECC_PLACEMENT_UNKNOWN:
+	case NAND_ECC_PLACEMENT_OOB:
+		/* Use standard hwecc read page function? */
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_hwecc;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_std;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_std;
+		if (!ecc->read_subpage)
+			ecc->read_subpage = nand_read_subpage;
+		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+			ecc->write_subpage = nand_write_subpage_hwecc;
+		fallthrough;
+
+	case NAND_ECC_PLACEMENT_INTERLEAVED:
+		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+		    (!ecc->read_page ||
+		     ecc->read_page == nand_read_page_hwecc ||
+		     !ecc->write_page ||
+		     ecc->write_page == nand_write_page_hwecc)) {
+			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+			return -EINVAL;
+		}
+		/* Use standard syndrome read/write page function? */
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_syndrome;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_syndrome;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw_syndrome;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw_syndrome;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_syndrome;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_syndrome;
+		break;
+
+	default:
+		pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
+			ecc->placement);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_device *nanddev = mtd_to_nanddev(mtd);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
 
-	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
+	if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
 		return -EINVAL;
 
 	switch (ecc->algo) {
-	case NAND_ECC_HAMMING:
+	case NAND_ECC_ALGO_HAMMING:
 		ecc->calculate = nand_calculate_ecc;
 		ecc->correct = nand_correct_data;
 		ecc->read_page = nand_read_page_swecc;
 		ecc->read_subpage = nand_read_subpage;
 		ecc->write_page = nand_write_page_swecc;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw;
 		ecc->read_oob = nand_read_oob_std;
 		ecc->write_oob = nand_write_oob_std;
 		if (!ecc->size)
@@ -5113,7 +5229,7 @@
 			ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
 
 		return 0;
-	case NAND_ECC_BCH:
+	case NAND_ECC_ALGO_BCH:
 		if (!mtd_nand_has_bch()) {
 			WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
 			return -EINVAL;
@@ -5123,8 +5239,10 @@
 		ecc->read_page = nand_read_page_swecc;
 		ecc->read_subpage = nand_read_subpage;
 		ecc->write_page = nand_write_page_swecc;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw;
 		ecc->read_oob = nand_read_oob_std;
 		ecc->write_oob = nand_write_oob_std;
 
@@ -5149,7 +5267,7 @@
 				return -EINVAL;
 			}
 
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+			mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
 
 		}
 
@@ -5158,8 +5276,8 @@
 		 * used, otherwise we don't know how many bytes can really be
 		 * used.
 		 */
-		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
-		    ecc->options & NAND_ECC_MAXIMIZE) {
+		if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
+		    nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
 			int steps, bytes;
 
 			/* Always prefer 1k blocks over 512bytes ones */
@@ -5253,10 +5371,12 @@
 nand_match_ecc_req(struct nand_chip *chip,
 		   const struct nand_ecc_caps *caps, int oobavail)
 {
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	const struct nand_ecc_step_info *stepinfo;
-	int req_step = chip->base.eccreq.step_size;
-	int req_strength = chip->base.eccreq.strength;
+	int req_step = requirements->step_size;
+	int req_strength = requirements->strength;
 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
 	int best_step, best_strength, best_ecc_bytes;
 	int best_ecc_bytes_total = INT_MAX;
@@ -5397,11 +5517,12 @@
  * @caps: ECC engine caps info structure
  * @oobavail: OOB size that the ECC engine can use
  *
- * Choose the ECC configuration according to following logic
+ * Choose the ECC configuration according to following logic.
  *
  * 1. If both ECC step size and ECC strength are already set (usually by DT)
  *    then check if it is supported by this controller.
- * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
+ * 2. If the user provided the nand-ecc-maximize property, then select maximum
+ *    ECC strength.
  * 3. Otherwise, try to match the ECC step size and ECC strength closest
  *    to the chip's requirement. If available OOB size can't fit the chip
  *    requirement then fallback to the maximum ECC step size and ECC strength.
@@ -5412,6 +5533,7 @@
 			 const struct nand_ecc_caps *caps, int oobavail)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_device *nanddev = mtd_to_nanddev(mtd);
 
 	if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
 		return -EINVAL;
@@ -5419,7 +5541,7 @@
 	if (chip->ecc.size && chip->ecc.strength)
 		return nand_check_ecc_caps(chip, caps, oobavail);
 
-	if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+	if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
 		return nand_maximize_ecc(chip, caps, oobavail);
 
 	if (!nand_match_ecc_req(chip, caps, oobavail))
@@ -5429,41 +5551,6 @@
 }
 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
 
-/*
- * Check if the chip configuration meet the datasheet requirements.
-
- * If our configuration corrects A bits per B bytes and the minimum
- * required correction level is X bits per Y bytes, then we must ensure
- * both of the following are true:
- *
- * (1) A / B >= X / Y
- * (2) A >= X
- *
- * Requirement (1) ensures we can correct for the required bitflip density.
- * Requirement (2) ensures we can correct even when all bitflips are clumped
- * in the same sector.
- */
-static bool nand_ecc_strength_good(struct nand_chip *chip)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int corr, ds_corr;
-
-	if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
-		/* Not enough information */
-		return true;
-
-	/*
-	 * We get the number of corrected bits per page to compare
-	 * the correction density.
-	 */
-	corr = (mtd->writesize * ecc->strength) / ecc->size;
-	ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
-		  chip->base.eccreq.step_size;
-
-	return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
-}
-
 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
 {
 	struct nand_chip *chip = container_of(nand, struct nand_chip,
@@ -5551,15 +5638,17 @@
 	 * If no default placement scheme is given, select an appropriate one.
 	 */
 	if (!mtd->ooblayout &&
-	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
+	    !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	      ecc->algo == NAND_ECC_ALGO_BCH)) {
 		switch (mtd->oobsize) {
 		case 8:
 		case 16:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
+			mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
 			break;
 		case 64:
 		case 128:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
+			mtd_set_ooblayout(mtd,
+					  nand_get_large_page_hamming_ooblayout());
 			break;
 		default:
 			/*
@@ -5569,9 +5658,9 @@
 			 * page with ECC layout when ->oobsize <= 128 for
 			 * compatibility reasons.
 			 */
-			if (ecc->mode == NAND_ECC_NONE) {
+			if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
 				mtd_set_ooblayout(mtd,
-						&nand_ooblayout_lp_ops);
+						  nand_get_large_page_ooblayout());
 				break;
 			}
 
@@ -5587,61 +5676,11 @@
 	 * selected and we have 256 byte pagesize fallback to software ECC
 	 */
 
-	switch (ecc->mode) {
-	case NAND_ECC_HW_OOB_FIRST:
-		/* Similar to NAND_ECC_HW, but a separate read_page handle */
-		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
-			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
-			ret = -EINVAL;
+	switch (ecc->engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
+		ret = nand_set_ecc_on_host_ops(chip);
+		if (ret)
 			goto err_nand_manuf_cleanup;
-		}
-		if (!ecc->read_page)
-			ecc->read_page = nand_read_page_hwecc_oob_first;
-		/* fall through */
-
-	case NAND_ECC_HW:
-		/* Use standard hwecc read page function? */
-		if (!ecc->read_page)
-			ecc->read_page = nand_read_page_hwecc;
-		if (!ecc->write_page)
-			ecc->write_page = nand_write_page_hwecc;
-		if (!ecc->read_page_raw)
-			ecc->read_page_raw = nand_read_page_raw;
-		if (!ecc->write_page_raw)
-			ecc->write_page_raw = nand_write_page_raw;
-		if (!ecc->read_oob)
-			ecc->read_oob = nand_read_oob_std;
-		if (!ecc->write_oob)
-			ecc->write_oob = nand_write_oob_std;
-		if (!ecc->read_subpage)
-			ecc->read_subpage = nand_read_subpage;
-		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
-			ecc->write_subpage = nand_write_subpage_hwecc;
-		/* fall through */
-
-	case NAND_ECC_HW_SYNDROME:
-		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
-		    (!ecc->read_page ||
-		     ecc->read_page == nand_read_page_hwecc ||
-		     !ecc->write_page ||
-		     ecc->write_page == nand_write_page_hwecc)) {
-			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
-			ret = -EINVAL;
-			goto err_nand_manuf_cleanup;
-		}
-		/* Use standard syndrome read/write page function? */
-		if (!ecc->read_page)
-			ecc->read_page = nand_read_page_syndrome;
-		if (!ecc->write_page)
-			ecc->write_page = nand_write_page_syndrome;
-		if (!ecc->read_page_raw)
-			ecc->read_page_raw = nand_read_page_raw_syndrome;
-		if (!ecc->write_page_raw)
-			ecc->write_page_raw = nand_write_page_raw_syndrome;
-		if (!ecc->read_oob)
-			ecc->read_oob = nand_read_oob_syndrome;
-		if (!ecc->write_oob)
-			ecc->write_oob = nand_write_oob_syndrome;
 
 		if (mtd->writesize >= ecc->size) {
 			if (!ecc->strength) {
@@ -5653,19 +5692,17 @@
 		}
 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
 			ecc->size, mtd->writesize);
-		ecc->mode = NAND_ECC_SOFT;
-		ecc->algo = NAND_ECC_HAMMING;
-		/* fall through */
+		ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+		ecc->algo = NAND_ECC_ALGO_HAMMING;
+		fallthrough;
 
-	case NAND_ECC_SOFT:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		ret = nand_set_ecc_soft_ops(chip);
-		if (ret) {
-			ret = -EINVAL;
+		if (ret)
 			goto err_nand_manuf_cleanup;
-		}
 		break;
 
-	case NAND_ECC_ON_DIE:
+	case NAND_ECC_ENGINE_TYPE_ON_DIE:
 		if (!ecc->read_page || !ecc->write_page) {
 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
 			ret = -EINVAL;
@@ -5677,8 +5714,8 @@
 			ecc->write_oob = nand_write_oob_std;
 		break;
 
-	case NAND_ECC_NONE:
-		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
+	case NAND_ECC_ENGINE_TYPE_NONE:
+		pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
 		ecc->read_page = nand_read_page_raw;
 		ecc->write_page = nand_write_page_raw;
 		ecc->read_oob = nand_read_oob_std;
@@ -5691,7 +5728,7 @@
 		break;
 
 	default:
-		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
+		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
 		ret = -EINVAL;
 		goto err_nand_manuf_cleanup;
 	}
@@ -5725,7 +5762,10 @@
 		ret = -EINVAL;
 		goto err_nand_manuf_cleanup;
 	}
+
 	ecc->total = ecc->steps * ecc->bytes;
+	chip->base.ecc.ctx.total = ecc->total;
+
 	if (ecc->total > mtd->oobsize) {
 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
 		ret = -EINVAL;
@@ -5743,9 +5783,11 @@
 	mtd->oobavail = ret;
 
 	/* ECC sanity check: warn if it's too weak */
-	if (!nand_ecc_strength_good(chip))
-		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
-			mtd->name);
+	if (!nand_ecc_is_strong_enough(&chip->base))
+		pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
+			mtd->name, chip->ecc.strength, chip->ecc.size,
+			nanddev_get_ecc_requirements(&chip->base)->strength,
+			nanddev_get_ecc_requirements(&chip->base)->step_size);
 
 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
@@ -5766,8 +5808,8 @@
 	chip->pagecache.page = -1;
 
 	/* Large page NAND with SOFT_ECC should support subpage reads */
-	switch (ecc->mode) {
-	case NAND_ECC_SOFT:
+	switch (ecc->engine_type) {
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		if (chip->page_shift > 9)
 			chip->options |= NAND_SUBPAGE_READ;
 		break;
@@ -5792,8 +5834,8 @@
 	mtd->_read_oob = nand_read_oob;
 	mtd->_write_oob = nand_write_oob;
 	mtd->_sync = nand_sync;
-	mtd->_lock = NULL;
-	mtd->_unlock = NULL;
+	mtd->_lock = nand_lock;
+	mtd->_unlock = nand_unlock;
 	mtd->_suspend = nand_suspend;
 	mtd->_resume = nand_resume;
 	mtd->_reboot = nand_shutdown;
@@ -5810,16 +5852,16 @@
 	if (!mtd->bitflip_threshold)
 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
 
-	/* Initialize the ->data_interface field. */
-	ret = nand_init_data_interface(chip);
+	/* Find the fastest data interface for this chip */
+	ret = nand_choose_interface_config(chip);
 	if (ret)
 		goto err_nanddev_cleanup;
 
 	/* Enter fastest possible mode on all dies. */
 	for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
-		ret = nand_setup_data_interface(chip, i);
+		ret = nand_setup_interface(chip, i);
 		if (ret)
-			goto err_nanddev_cleanup;
+			goto err_free_interface_config;
 	}
 
 	/* Check, if we should skip the bad block table scan */
@@ -5829,10 +5871,12 @@
 	/* Build bad block table */
 	ret = nand_create_bbt(chip);
 	if (ret)
-		goto err_nanddev_cleanup;
+		goto err_free_interface_config;
 
 	return 0;
 
+err_free_interface_config:
+	kfree(chip->best_interface_config);
 
 err_nanddev_cleanup:
 	nanddev_cleanup(&chip->base);
@@ -5909,8 +5953,8 @@
  */
 void nand_cleanup(struct nand_chip *chip)
 {
-	if (chip->ecc.mode == NAND_ECC_SOFT &&
-	    chip->ecc.algo == NAND_ECC_BCH)
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_BCH)
 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
 
 	nanddev_cleanup(&chip->base);
@@ -5926,6 +5970,9 @@
 			& NAND_BBT_DYNAMICSTRUCT)
 		kfree(chip->badblock_pattern);
 
+	/* Free the data interface */
+	kfree(chip->best_interface_config);
+
 	/* Free manufacturer priv data. */
 	nand_manufacturer_cleanup(chip);
 
@@ -5938,18 +5985,6 @@
 
 EXPORT_SYMBOL_GPL(nand_cleanup);
 
-/**
- * nand_release - [NAND Interface] Unregister the MTD device and free resources
- *		  held by the NAND device
- * @chip: NAND chip object
- */
-void nand_release(struct nand_chip *chip)
-{
-	mtd_device_unregister(nand_to_mtd(chip));
-	nand_cleanup(chip);
-}
-EXPORT_SYMBOL_GPL(nand_release);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 96045d6..344a24f 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -1226,7 +1226,7 @@
 		return -ENOMEM;
 
 	/*
-	 * If no primary table decriptor is given, scan the device to build a
+	 * If no primary table descriptor is given, scan the device to build a
 	 * memory based bad block table.
 	 */
 	if (!td) {
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
index 1752731..9d19ac1 100644
--- a/drivers/mtd/nand/raw/nand_bch.c
+++ b/drivers/mtd/nand/raw/nand_bch.c
@@ -41,7 +41,7 @@
 	unsigned int i;
 
 	memset(code, 0, chip->ecc.bytes);
-	encode_bch(nbc->bch, buf, chip->ecc.size, code);
+	bch_encode(nbc->bch, buf, chip->ecc.size, code);
 
 	/* apply mask so that an erased page is a valid codeword */
 	for (i = 0; i < chip->ecc.bytes; i++)
@@ -67,7 +67,7 @@
 	unsigned int *errloc = nbc->errloc;
 	int i, count;
 
-	count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+	count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
 			   NULL, errloc);
 	if (count > 0) {
 		for (i = 0; i < count; i++) {
@@ -130,7 +130,7 @@
 	if (!nbc)
 		goto fail;
 
-	nbc->bch = init_bch(m, t, 0);
+	nbc->bch = bch_init(m, t, 0, false);
 	if (!nbc->bch)
 		goto fail;
 
@@ -165,6 +165,7 @@
 	 */
 	nand->ecc.steps = eccsteps;
 	nand->ecc.total = eccsteps * eccbytes;
+	nand->base.ecc.ctx.total = nand->ecc.total;
 	if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
 		pr_warn("invalid ecc layout\n");
 		goto fail;
@@ -182,7 +183,7 @@
 		goto fail;
 
 	memset(erased_page, 0xff, eccsize);
-	encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+	bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask);
 	kfree(erased_page);
 
 	for (i = 0; i < eccbytes; i++)
@@ -205,7 +206,7 @@
 void nand_bch_free(struct nand_bch_control *nbc)
 {
 	if (nbc) {
-		free_bch(nbc->bch);
+		bch_free(nbc->bch);
 		kfree(nbc->errloc);
 		kfree(nbc->eccmask);
 		kfree(nbc);
diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c
index 09fdced..b6a46b1 100644
--- a/drivers/mtd/nand/raw/nand_ecc.c
+++ b/drivers/mtd/nand/raw/nand_ecc.c
@@ -131,7 +131,7 @@
 	/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
 	uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
 	uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
-	uint32_t uninitialized_var(rp17);	/* to make compiler happy */
+	uint32_t rp17;
 	uint32_t par;		/* the cumulative parity for all data */
 	uint32_t tmppar;	/* the cumulative parity for this iteration;
 				   for rp12, rp14 and rp16 at the end of the
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
index 3338c68..4412c40 100644
--- a/drivers/mtd/nand/raw/nand_esmt.c
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -10,27 +10,32 @@
 
 static void esmt_nand_decode_id(struct nand_chip *chip)
 {
+	struct nand_device *base = &chip->base;
+	struct nand_ecc_props requirements = {};
+
 	nand_decode_ext_id(chip);
 
 	/* Extract ECC requirements from 5th id byte. */
 	if (chip->id.len >= 5 && nand_is_slc(chip)) {
-		chip->base.eccreq.step_size = 512;
+		requirements.step_size = 512;
 		switch (chip->id.data[4] & 0x3) {
 		case 0x0:
-			chip->base.eccreq.strength = 4;
+			requirements.strength = 4;
 			break;
 		case 0x1:
-			chip->base.eccreq.strength = 2;
+			requirements.strength = 2;
 			break;
 		case 0x2:
-			chip->base.eccreq.strength = 1;
+			requirements.strength = 1;
 			break;
 		default:
 			WARN(1, "Could not get ECC info");
-			chip->base.eccreq.step_size = 0;
+			requirements.step_size = 0;
 			break;
 		}
 	}
+
+	nanddev_set_ecc_requirements(base, &requirements);
 }
 
 static int esmt_nand_init(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 194e422..a9f50c9 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -26,7 +26,7 @@
 struct hynix_read_retry {
 	int nregs;
 	const u8 *regs;
-	u8 values[0];
+	u8 values[];
 };
 
 /**
@@ -337,7 +337,7 @@
 	rr->nregs = nregs;
 	rr->regs = hynix_1xnm_mlc_read_retry_regs;
 	hynix->read_retry = rr;
-	chip->setup_read_retry = hynix_nand_setup_read_retry;
+	chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
 	chip->read_retries = nmodes;
 
 out:
@@ -495,34 +495,36 @@
 static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
 						bool valid_jedecid)
 {
+	struct nand_device *base = &chip->base;
+	struct nand_ecc_props requirements = {};
 	u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
 
 	if (valid_jedecid) {
 		/* Reference: H27UCG8T2E datasheet */
-		chip->base.eccreq.step_size = 1024;
+		requirements.step_size = 1024;
 
 		switch (ecc_level) {
 		case 0:
-			chip->base.eccreq.step_size = 0;
-			chip->base.eccreq.strength = 0;
+			requirements.step_size = 0;
+			requirements.strength = 0;
 			break;
 		case 1:
-			chip->base.eccreq.strength = 4;
+			requirements.strength = 4;
 			break;
 		case 2:
-			chip->base.eccreq.strength = 24;
+			requirements.strength = 24;
 			break;
 		case 3:
-			chip->base.eccreq.strength = 32;
+			requirements.strength = 32;
 			break;
 		case 4:
-			chip->base.eccreq.strength = 40;
+			requirements.strength = 40;
 			break;
 		case 5:
-			chip->base.eccreq.strength = 50;
+			requirements.strength = 50;
 			break;
 		case 6:
-			chip->base.eccreq.strength = 60;
+			requirements.strength = 60;
 			break;
 		default:
 			/*
@@ -543,14 +545,14 @@
 		if (nand_tech < 3) {
 			/* > 26nm, reference: H27UBG8T2A datasheet */
 			if (ecc_level < 5) {
-				chip->base.eccreq.step_size = 512;
-				chip->base.eccreq.strength = 1 << ecc_level;
+				requirements.step_size = 512;
+				requirements.strength = 1 << ecc_level;
 			} else if (ecc_level < 7) {
 				if (ecc_level == 5)
-					chip->base.eccreq.step_size = 2048;
+					requirements.step_size = 2048;
 				else
-					chip->base.eccreq.step_size = 1024;
-				chip->base.eccreq.strength = 24;
+					requirements.step_size = 1024;
+				requirements.strength = 24;
 			} else {
 				/*
 				 * We should never reach this case, but if that
@@ -563,18 +565,20 @@
 		} else {
 			/* <= 26nm, reference: H27UBG8T2B datasheet */
 			if (!ecc_level) {
-				chip->base.eccreq.step_size = 0;
-				chip->base.eccreq.strength = 0;
+				requirements.step_size = 0;
+				requirements.strength = 0;
 			} else if (ecc_level < 5) {
-				chip->base.eccreq.step_size = 512;
-				chip->base.eccreq.strength = 1 << (ecc_level - 1);
+				requirements.step_size = 512;
+				requirements.strength = 1 << (ecc_level - 1);
 			} else {
-				chip->base.eccreq.step_size = 1024;
-				chip->base.eccreq.strength = 24 +
+				requirements.step_size = 1024;
+				requirements.strength = 24 +
 							(8 * (ecc_level - 5));
 			}
 		}
 	}
+
+	nanddev_set_ecc_requirements(base, &requirements);
 }
 
 static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
@@ -673,6 +677,15 @@
 	nand_set_manufacturer_data(chip, NULL);
 }
 
+static int
+h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
+				       struct nand_interface_config *iface)
+{
+	onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+	return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
 static int hynix_nand_init(struct nand_chip *chip)
 {
 	struct hynix_nand *hynix;
@@ -689,6 +702,11 @@
 
 	nand_set_manufacturer_data(chip, hynix);
 
+	if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
+		     sizeof("H27UCG8T2ATR-BC") - 1))
+		chip->ops.choose_interface_config =
+			h27ucg8t2atrbc_choose_interface_config;
+
 	ret = hynix_nand_rr_init(chip);
 	if (ret)
 		hynix_nand_cleanup(chip);
diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c
index ba27902..b994579 100644
--- a/drivers/mtd/nand/raw/nand_ids.c
+++ b/drivers/mtd/nand/raw/nand_ids.c
@@ -28,8 +28,7 @@
 	 */
 	{"TC58NVG0S3E 1G 3.3V 8-bit",
 		{ .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
-		  SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
-		  2 },
+		  SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
 	{"TC58NVG2S0F 4G 3.3V 8-bit",
 		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
 		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
@@ -51,7 +50,10 @@
 	{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
 		{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
 		  SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
-		  NAND_ECC_INFO(40, SZ_1K), 4 },
+		  NAND_ECC_INFO(40, SZ_1K) },
+	{"TH58NVG2S3HBAI4 4G 3.3V 8-bit",
+		{ .id = {0x98, 0xdc, 0x91, 0x15, 0x76} },
+		  SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) },
 
 	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
 	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
@@ -166,7 +168,7 @@
 };
 
 /* Manufacturer IDs */
-static const struct nand_manufacturer nand_manufacturers[] = {
+static const struct nand_manufacturer_desc nand_manufacturer_descs[] = {
 	{NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
 	{NAND_MFR_ATO, "ATO"},
 	{NAND_MFR_EON, "Eon"},
@@ -186,20 +188,20 @@
 };
 
 /**
- * nand_get_manufacturer - Get manufacturer information from the manufacturer
- *			   ID
+ * nand_get_manufacturer_desc - Get manufacturer information from the
+ *                              manufacturer ID
  * @id: manufacturer ID
  *
- * Returns a pointer a nand_manufacturer object if the manufacturer is defined
+ * Returns a nand_manufacturer_desc object if the manufacturer is defined
  * in the NAND manufacturers database, NULL otherwise.
  */
-const struct nand_manufacturer *nand_get_manufacturer(u8 id)
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++)
-		if (nand_manufacturers[i].id == id)
-			return &nand_manufacturers[i];
+	for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++)
+		if (nand_manufacturer_descs[i].id == id)
+			return &nand_manufacturer_descs[i];
 
 	return NULL;
 }
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 9b540e7..85b6d93 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -16,18 +16,23 @@
 
 #include "internals.h"
 
+#define JEDEC_PARAM_PAGES 3
+
 /*
  * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
  */
 int nand_jedec_detect(struct nand_chip *chip)
 {
+	struct nand_device *base = &chip->base;
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_memory_organization *memorg;
 	struct nand_jedec_params *p;
 	struct jedec_ecc_info *ecc;
+	bool use_datain = false;
 	int jedec_version = 0;
 	char id[5];
 	int i, val, ret;
+	u16 crc;
 
 	memorg = nanddev_get_memorg(&chip->base);
 
@@ -41,25 +46,31 @@
 	if (!p)
 		return -ENOMEM;
 
-	ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
-	if (ret) {
-		ret = 0;
-		goto free_jedec_param_page;
-	}
+	if (!nand_has_exec_op(chip) ||
+	    !nand_read_data_op(chip, p, sizeof(*p), true, true))
+		use_datain = true;
 
-	for (i = 0; i < 3; i++) {
-		ret = nand_read_data_op(chip, p, sizeof(*p), true);
+	for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
+		if (!i)
+			ret = nand_read_param_page_op(chip, 0x40, p,
+						      sizeof(*p));
+		else if (use_datain)
+			ret = nand_read_data_op(chip, p, sizeof(*p), true,
+						false);
+		else
+			ret = nand_change_read_column_op(chip, sizeof(*p) * i,
+							 p, sizeof(*p), true);
 		if (ret) {
 			ret = 0;
 			goto free_jedec_param_page;
 		}
 
-		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
-				le16_to_cpu(p->crc))
+		crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
+		if (crc == le16_to_cpu(p->crc))
 			break;
 	}
 
-	if (i == 3) {
+	if (i == JEDEC_PARAM_PAGES) {
 		pr_err("Could not find valid JEDEC parameter page; aborting\n");
 		goto free_jedec_param_page;
 	}
@@ -110,8 +121,12 @@
 	ecc = &p->ecc_info[0];
 
 	if (ecc->codeword_size >= 9) {
-		chip->base.eccreq.strength = ecc->ecc_bits;
-		chip->base.eccreq.step_size = 1 << ecc->codeword_size;
+		struct nand_ecc_props requirements = {
+			.strength = ecc->ecc_bits,
+			.step_size = 1 << ecc->codeword_size,
+		};
+
+		nanddev_set_ecc_requirements(base, &requirements);
 	} else {
 		pr_warn("Invalid codeword size\n");
 	}
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index f2526ec..2bcc037 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -225,7 +225,8 @@
 	do {
 		u8 status;
 
-		ret = nand_read_data_op(chip, &status, sizeof(status), true);
+		ret = nand_read_data_op(chip, &status, sizeof(status), true,
+					false);
 		if (ret)
 			return;
 
@@ -331,8 +332,7 @@
 		 */
 		if (column == -1 && page_addr == -1)
 			return;
-		/* fall through */
-
+		fallthrough;
 	default:
 		/*
 		 * If we don't have access to the busy pin, we apply the given
@@ -354,6 +354,9 @@
 
 static void nand_ccs_delay(struct nand_chip *chip)
 {
+	const struct nand_sdr_timings *sdr =
+		nand_get_sdr_timings(nand_get_interface_config(chip));
+
 	/*
 	 * The controller already takes care of waiting for tCCS when the RNDIN
 	 * or RNDOUT command is sent, return directly.
@@ -365,8 +368,8 @@
 	 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
 	 * (which should be safe for all NANDs).
 	 */
-	if (nand_has_setup_data_iface(chip))
-		ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
+	if (nand_controller_can_setup_interface(chip))
+		ndelay(sdr->tCCS_min / 1000);
 	else
 		ndelay(500);
 }
@@ -483,8 +486,7 @@
 				      NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 		chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
 				      NAND_NCE | NAND_CTRL_CHANGE);
-
-		/* fall through - This applies to read commands */
+		fallthrough;	/* This applies to read commands */
 	default:
 		/*
 		 * If we don't have access to the busy pin, we apply the given
@@ -554,7 +556,8 @@
 					break;
 			} else {
 				ret = nand_read_data_op(chip, &status,
-							sizeof(status), true);
+							sizeof(status), true,
+							false);
 				if (ret)
 					return ret;
 
@@ -565,7 +568,7 @@
 		} while (time_before(jiffies, timeo));
 	}
 
-	ret = nand_read_data_op(chip, &status, sizeof(status), true);
+	ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
 	if (ret)
 		return ret;
 
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 58511ae..1472f92 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -6,11 +6,31 @@
  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  */
 
+#include "linux/delay.h"
 #include "internals.h"
 
 #define MACRONIX_READ_RETRY_BIT BIT(0)
 #define MACRONIX_NUM_READ_RETRY_MODES 6
 
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER	\
+	(MACRONIX_RANDOMIZER_ENPGM |	\
+	 MACRONIX_RANDOMIZER_RANDEN |	\
+	 MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT	\
+	(MACRONIX_RANDOMIZER_RANDEN |	\
+	 MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
 struct nand_onfi_vendor_macronix {
 	u8 reserved;
 	u8 reliability_func;
@@ -29,20 +49,88 @@
 	return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
 }
 
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+	u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+	int ret;
+
+	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+				feature);
+	if (ret < 0)
+		return ret;
+
+	if (feature[0])
+		return feature[0];
+
+	feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+	ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+				feature);
+	if (ret < 0)
+		return ret;
+
+	/* RANDEN and RANDOPT OTP bits are programmed */
+	feature[0] = 0x0;
+	ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+	if (ret < 0)
+		return ret;
+
+	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+				feature);
+	if (ret < 0)
+		return ret;
+
+	feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+	ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+				feature);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
 static void macronix_nand_onfi_init(struct nand_chip *chip)
 {
 	struct nand_parameters *p = &chip->parameters;
 	struct nand_onfi_vendor_macronix *mxic;
+	struct device_node *dn = nand_get_flash_node(chip);
+	int rand_otp = 0;
+	int ret;
 
 	if (!p->onfi)
 		return;
 
+	if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
+		rand_otp = 1;
+
 	mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+	/* Subpage write is prohibited in randomizer operatoin */
+	if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+	    mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+		if (p->supports_set_get_features) {
+			bitmap_set(p->set_feature_list,
+				   ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+			bitmap_set(p->get_feature_list,
+				   ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+			ret = macronix_nand_randomizer_check_enable(chip);
+			if (ret < 0) {
+				bitmap_clear(p->set_feature_list,
+					     ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+					     1);
+				bitmap_clear(p->get_feature_list,
+					     ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+					     1);
+				pr_info("Macronix NAND randomizer failed\n");
+			} else {
+				pr_info("Macronix NAND randomizer enabled\n");
+			}
+		}
+	}
+
 	if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
 		return;
 
 	chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
-	chip->setup_read_retry = macronix_nand_setup_read_retry;
+	chip->ops.setup_read_retry = macronix_nand_setup_read_retry;
 
 	if (p->supports_set_get_features) {
 		bitmap_set(p->set_feature_list,
@@ -59,7 +147,7 @@
  */
 static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
 {
-	unsigned int i;
+	int i;
 	static const char * const broken_get_timings[] = {
 		"MX30LF1G18AC",
 		"MX30LF1G28AC",
@@ -80,12 +168,9 @@
 	if (!chip->parameters.supports_set_get_features)
 		return;
 
-	for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
-		if (!strcmp(broken_get_timings[i], chip->parameters.model))
-			break;
-	}
-
-	if (i == ARRAY_SIZE(broken_get_timings))
+	i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+			 chip->parameters.model);
+	if (i < 0)
 		return;
 
 	bitmap_clear(chip->parameters.get_feature_list,
@@ -94,6 +179,143 @@
 		     ONFI_FEATURE_ADDR_TIMING_MODE, 1);
 }
 
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+	u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+	int ret;
+
+	feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+	nand_select_target(chip, 0);
+	ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+				feature);
+	nand_deselect_target(chip);
+	if (ret)
+		pr_err("%s all blocks failed\n", __func__);
+
+	return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+	u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+	int ret;
+
+	feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+	nand_select_target(chip, 0);
+	ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+				feature);
+	nand_deselect_target(chip);
+	if (ret)
+		pr_err("%s all blocks failed\n", __func__);
+
+	return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+	u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+	int ret;
+
+	bitmap_set(chip->parameters.get_feature_list,
+		   ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+	feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+	nand_select_target(chip, 0);
+	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+				feature);
+	nand_deselect_target(chip);
+	if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+		if (ret)
+			pr_err("Block protection check failed\n");
+
+		bitmap_clear(chip->parameters.get_feature_list,
+			     ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+		return;
+	}
+
+	bitmap_set(chip->parameters.set_feature_list,
+		   ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+	chip->ops.lock_area = mxic_nand_lock;
+	chip->ops.unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+	int ret;
+
+	if (nand_has_exec_op(chip)) {
+		struct nand_op_instr instrs[] = {
+			NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+		};
+
+		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+		ret = nand_exec_op(chip, &op);
+		if (ret)
+			return ret;
+
+	} else {
+		chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+	}
+
+	return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+	int ret;
+
+	nand_select_target(chip, 0);
+	ret = nand_power_down_op(chip);
+	if (ret < 0)
+		pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+	nand_deselect_target(chip);
+
+	return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+	/*
+	 * Toggle #CS pin to resume NAND device and don't care
+	 * of the others CLE, #WE, #RE pins status.
+	 * A NAND controller ensure it is able to assert/de-assert #CS
+	 * by sending any byte over the NAND bus.
+	 * i.e.,
+	 * NAND power down command or reset command w/o R/B# status checking.
+	 */
+	nand_select_target(chip, 0);
+	nand_power_down_op(chip);
+	/* The minimum of a recovery time tRDP is 35 us */
+	usleep_range(35, 100);
+	nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+	int i;
+	static const char * const deep_power_down_dev[] = {
+		"MX30UF1G28AD",
+		"MX30UF2G28AD",
+		"MX30UF4G28AD",
+	};
+
+	i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+			 chip->parameters.model);
+	if (i < 0)
+		return;
+
+	chip->ops.suspend = mxic_nand_suspend;
+	chip->ops.resume = mxic_nand_resume;
+}
+
 static int macronix_nand_init(struct nand_chip *chip)
 {
 	if (nand_is_slc(chip))
@@ -101,6 +323,8 @@
 
 	macronix_nand_fix_broken_get_timings(chip);
 	macronix_nand_onfi_init(chip);
+	macronix_nand_block_protection_support(chip);
+	macronix_nand_deep_power_down_support(chip);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 5665403..c019288 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -84,7 +84,7 @@
 		struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
 
 		chip->read_retries = micron->read_retry_options;
-		chip->setup_read_retry = micron_nand_setup_read_retry;
+		chip->ops.setup_read_retry = micron_nand_setup_read_retry;
 	}
 
 	if (p->supports_set_get_features) {
@@ -192,6 +192,7 @@
 	struct micron_nand *micron = nand_get_manufacturer_data(chip);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	unsigned int step, max_bitflips = 0;
+	bool use_datain = false;
 	int ret;
 
 	if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
@@ -211,8 +212,27 @@
 	 * in non-raw mode, even if the user did not request those bytes.
 	 */
 	if (!oob_required) {
-		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
-					false);
+		/*
+		 * We first check which operation is supported by the controller
+		 * before running it. This trick makes it possible to support
+		 * all controllers, even the most constraints, without almost
+		 * any performance hit.
+		 *
+		 * TODO: could be enhanced to avoid repeating the same check
+		 * over and over in the fast path.
+		 */
+		if (!nand_has_exec_op(chip) ||
+		    !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+				       true))
+			use_datain = true;
+
+		if (use_datain)
+			ret = nand_read_data_op(chip, chip->oob_poi,
+						mtd->oobsize, false, false);
+		else
+			ret = nand_change_read_column_op(chip, mtd->writesize,
+							 chip->oob_poi,
+							 mtd->oobsize, false);
 		if (ret)
 			return ret;
 	}
@@ -285,6 +305,7 @@
 				 int oob_required, int page)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
+	bool use_datain = false;
 	u8 status;
 	int ret, max_bitflips = 0;
 
@@ -300,14 +321,36 @@
 	if (ret)
 		goto out;
 
-	ret = nand_exit_status_op(chip);
-	if (ret)
-		goto out;
+	/*
+	 * We first check which operation is supported by the controller before
+	 * running it. This trick makes it possible to support all controllers,
+	 * even the most constraints, without almost any performance hit.
+	 *
+	 * TODO: could be enhanced to avoid repeating the same check over and
+	 * over in the fast path.
+	 */
+	if (!nand_has_exec_op(chip) ||
+	    !nand_read_data_op(chip, buf, mtd->writesize, false, true))
+		use_datain = true;
 
-	ret = nand_read_data_op(chip, buf, mtd->writesize, false);
-	if (!ret && oob_required)
-		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+	if (use_datain) {
+		ret = nand_exit_status_op(chip);
+		if (ret)
+			goto out;
+
+		ret = nand_read_data_op(chip, buf, mtd->writesize, false,
 					false);
+		if (!ret && oob_required)
+			ret = nand_read_data_op(chip, chip->oob_poi,
+						mtd->oobsize, false, false);
+	} else {
+		ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
+						 false);
+		if (!ret && oob_required)
+			ret = nand_change_read_column_op(chip, mtd->writesize,
+							 chip->oob_poi,
+							 mtd->oobsize, false);
+	}
 
 	if (chip->ecc.strength == 4)
 		max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
@@ -370,6 +413,8 @@
  */
 static int micron_supports_on_die_ecc(struct nand_chip *chip)
 {
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 	u8 id[5];
 	int ret;
 
@@ -382,7 +427,7 @@
 	/*
 	 * We only support on-die ECC of 4/512 or 8/512
 	 */
-	if  (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
+	if  (requirements->strength != 4 && requirements->strength != 8)
 		return MICRON_ON_DIE_UNSUPPORTED;
 
 	/* 0x2 means on-die ECC is available. */
@@ -423,7 +468,7 @@
 	/*
 	 * We only support on-die ECC of 4/512 or 8/512
 	 */
-	if  (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
+	if  (requirements->strength != 4 && requirements->strength != 8)
 		return MICRON_ON_DIE_UNSUPPORTED;
 
 	return MICRON_ON_DIE_SUPPORTED;
@@ -431,6 +476,9 @@
 
 static int micron_nand_init(struct nand_chip *chip)
 {
+	struct nand_device *base = &chip->base;
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(base);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct micron_nand *micron;
 	int ondie;
@@ -454,13 +502,13 @@
 	ondie = micron_supports_on_die_ecc(chip);
 
 	if (ondie == MICRON_ON_DIE_MANDATORY &&
-	    chip->ecc.mode != NAND_ECC_ON_DIE) {
+	    chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_DIE) {
 		pr_err("On-die ECC forcefully enabled, not supported\n");
 		ret = -EINVAL;
 		goto err_free_manuf_data;
 	}
 
-	if (chip->ecc.mode == NAND_ECC_ON_DIE) {
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) {
 		if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
 			pr_err("On-die ECC selected but not supported\n");
 			ret = -EINVAL;
@@ -480,7 +528,7 @@
 		 * That's not needed for 8-bit ECC, because the status expose
 		 * a better approximation of the number of bitflips in a page.
 		 */
-		if (chip->base.eccreq.strength == 4) {
+		if (requirements->strength == 4) {
 			micron->ecc.rawbuf = kmalloc(mtd->writesize +
 						     mtd->oobsize,
 						     GFP_KERNEL);
@@ -490,17 +538,17 @@
 			}
 		}
 
-		if (chip->base.eccreq.strength == 4)
+		if (requirements->strength == 4)
 			mtd_set_ooblayout(mtd,
 					  &micron_nand_on_die_4_ooblayout_ops);
 		else
 			mtd_set_ooblayout(mtd,
 					  &micron_nand_on_die_8_ooblayout_ops);
 
-		chip->ecc.bytes = chip->base.eccreq.strength * 2;
+		chip->ecc.bytes = requirements->strength * 2;
 		chip->ecc.size = 512;
-		chip->ecc.strength = chip->base.eccreq.strength;
-		chip->ecc.algo = NAND_ECC_BCH;
+		chip->ecc.strength = requirements->strength;
+		chip->ecc.algo = NAND_ECC_ALGO_BCH;
 		chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
 		chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
 
@@ -508,8 +556,10 @@
 			chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
 			chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
 		} else {
-			chip->ecc.read_page_raw = nand_read_page_raw;
-			chip->ecc.write_page_raw = nand_write_page_raw;
+			if (!chip->ecc.read_page_raw)
+				chip->ecc.read_page_raw = nand_read_page_raw;
+			if (!chip->ecc.write_page_raw)
+				chip->ecc.write_page_raw = nand_write_page_raw;
 		}
 	}
 
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 8fe8d7b..45649e0 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -16,6 +16,8 @@
 
 #include "internals.h"
 
+#define ONFI_PARAM_PAGES 3
+
 u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
 {
 	int i;
@@ -32,6 +34,8 @@
 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
 					    struct nand_onfi_params *p)
 {
+	struct nand_device *base = &chip->base;
+	struct nand_ecc_props requirements;
 	struct onfi_ext_param_page *ep;
 	struct onfi_ext_section *s;
 	struct onfi_ext_ecc_info *ecc;
@@ -45,12 +49,10 @@
 	if (!ep)
 		return -ENOMEM;
 
-	/* Send our own NAND_CMD_PARAM. */
-	ret = nand_read_param_page_op(chip, 0, NULL, 0);
-	if (ret)
-		goto ext_out;
-
-	/* Use the Change Read Column command to skip the ONFI param pages. */
+	/*
+	 * Use the Change Read Column command to skip the ONFI param pages and
+	 * ensure we read at the right location.
+	 */
 	ret = nand_change_read_column_op(chip,
 					 sizeof(*p) * p->num_of_param_pages,
 					 ep, len, true);
@@ -94,8 +96,10 @@
 		goto ext_out;
 	}
 
-	chip->base.eccreq.strength = ecc->ecc_bits;
-	chip->base.eccreq.step_size = 1 << ecc->codeword_size;
+	requirements.strength = ecc->ecc_bits;
+	requirements.step_size = 1 << ecc->codeword_size;
+	nanddev_set_ecc_requirements(base, &requirements);
+
 	ret = 0;
 
 ext_out:
@@ -139,13 +143,16 @@
  */
 int nand_onfi_detect(struct nand_chip *chip)
 {
+	struct nand_device *base = &chip->base;
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_memory_organization *memorg;
-	struct nand_onfi_params *p;
+	struct nand_onfi_params *p = NULL, *pbuf;
 	struct onfi_params *onfi;
+	bool use_datain = false;
 	int onfi_version = 0;
 	char id[4];
 	int i, ret, val;
+	u16 crc;
 
 	memorg = nanddev_get_memorg(&chip->base);
 
@@ -155,43 +162,54 @@
 		return 0;
 
 	/* ONFI chip: allocate a buffer to hold its parameter page */
-	p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
-	if (!p)
+	pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
+	if (!pbuf)
 		return -ENOMEM;
 
-	ret = nand_read_param_page_op(chip, 0, NULL, 0);
-	if (ret) {
-		ret = 0;
-		goto free_onfi_param_page;
-	}
+	if (!nand_has_exec_op(chip) ||
+	    !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true))
+		use_datain = true;
 
-	for (i = 0; i < 3; i++) {
-		ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
+	for (i = 0; i < ONFI_PARAM_PAGES; i++) {
+		if (!i)
+			ret = nand_read_param_page_op(chip, 0, &pbuf[i],
+						      sizeof(*pbuf));
+		else if (use_datain)
+			ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
+						true, false);
+		else
+			ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
+							 &pbuf[i], sizeof(*pbuf),
+							 true);
 		if (ret) {
 			ret = 0;
 			goto free_onfi_param_page;
 		}
 
-		if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
-		    le16_to_cpu(p[i].crc)) {
-			if (i)
-				memcpy(p, &p[i], sizeof(*p));
+		crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
+		if (crc == le16_to_cpu(pbuf[i].crc)) {
+			p = &pbuf[i];
 			break;
 		}
 	}
 
-	if (i == 3) {
-		const void *srcbufs[3] = {p, p + 1, p + 2};
+	if (i == ONFI_PARAM_PAGES) {
+		const void *srcbufs[ONFI_PARAM_PAGES];
+		unsigned int j;
+
+		for (j = 0; j < ONFI_PARAM_PAGES; j++)
+			srcbufs[j] = pbuf + j;
 
 		pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
-		nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
-				       sizeof(*p));
+		nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
+				       sizeof(*pbuf));
 
-		if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
-				le16_to_cpu(p->crc)) {
+		crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
+		if (crc != le16_to_cpu(pbuf->crc)) {
 			pr_err("ONFI parameter recovery failed, aborting\n");
 			goto free_onfi_param_page;
 		}
+		p = pbuf;
 	}
 
 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
@@ -252,8 +270,12 @@
 		chip->options |= NAND_BUSWIDTH_16;
 
 	if (p->ecc_bits != 0xff) {
-		chip->base.eccreq.strength = p->ecc_bits;
-		chip->base.eccreq.step_size = 512;
+		struct nand_ecc_props requirements = {
+			.strength = p->ecc_bits,
+			.step_size = 512,
+		};
+
+		nanddev_set_ecc_requirements(base, &requirements);
 	} else if (onfi_version >= 21 &&
 		(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
 
@@ -299,14 +321,14 @@
 	chip->parameters.onfi = onfi;
 
 	/* Identification done, free the full ONFI parameter page and exit */
-	kfree(p);
+	kfree(pbuf);
 
 	return 1;
 
 free_model:
 	kfree(chip->parameters.model);
 free_onfi_param_page:
-	kfree(p);
+	kfree(pbuf);
 
 	return ret;
 }
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
index 3a4a19e..0be6b75 100644
--- a/drivers/mtd/nand/raw/nand_samsung.c
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -10,6 +10,8 @@
 
 static void samsung_nand_decode_id(struct nand_chip *chip)
 {
+	struct nand_device *base = &chip->base;
+	struct nand_ecc_props requirements = {};
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_memory_organization *memorg;
 
@@ -71,23 +73,23 @@
 		/* Extract ECC requirements from 5th id byte*/
 		extid = (chip->id.data[4] >> 4) & 0x07;
 		if (extid < 5) {
-			chip->base.eccreq.step_size = 512;
-			chip->base.eccreq.strength = 1 << extid;
+			requirements.step_size = 512;
+			requirements.strength = 1 << extid;
 		} else {
-			chip->base.eccreq.step_size = 1024;
+			requirements.step_size = 1024;
 			switch (extid) {
 			case 5:
-				chip->base.eccreq.strength = 24;
+				requirements.strength = 24;
 				break;
 			case 6:
-				chip->base.eccreq.strength = 40;
+				requirements.strength = 40;
 				break;
 			case 7:
-				chip->base.eccreq.strength = 60;
+				requirements.strength = 60;
 				break;
 			default:
 				WARN(1, "Could not decode ECC info");
-				chip->base.eccreq.step_size = 0;
+				requirements.step_size = 0;
 			}
 		}
 	} else {
@@ -97,8 +99,8 @@
 			switch (chip->id.data[1]) {
 			/* K9F4G08U0D-S[I|C]B0(T00) */
 			case 0xDC:
-				chip->base.eccreq.step_size = 512;
-				chip->base.eccreq.strength = 1;
+				requirements.step_size = 512;
+				requirements.strength = 1;
 				break;
 
 			/* K9F1G08U0E 21nm chips do not support subpage write */
@@ -112,6 +114,8 @@
 			}
 		}
 	}
+
+	nanddev_set_ecc_requirements(base, &requirements);
 }
 
 static int samsung_nand_init(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index f12b7a7..94d8326 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -12,13 +12,23 @@
 
 #define ONFI_DYN_TIMING_MAX U16_MAX
 
-static const struct nand_data_interface onfi_sdr_timings[] = {
+/*
+ * For non-ONFI chips we use the highest possible value for tPROG and tBERS.
+ * tR and tCCS will take the default values precised in the ONFI specification
+ * for timing mode 0, respectively 200us and 500ns.
+ *
+ * These four values are tweaked to be more accurate in the case of ONFI chips.
+ */
+static const struct nand_interface_config onfi_sdr_timings[] = {
 	/* Mode 0 */
 	{
 		.type = NAND_SDR_IFACE,
+		.timings.mode = 0,
 		.timings.sdr = {
 			.tCCS_min = 500000,
 			.tR_max = 200000000,
+			.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+			.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
 			.tADL_min = 400000,
 			.tALH_min = 20000,
 			.tALS_min = 50000,
@@ -58,9 +68,12 @@
 	/* Mode 1 */
 	{
 		.type = NAND_SDR_IFACE,
+		.timings.mode = 1,
 		.timings.sdr = {
 			.tCCS_min = 500000,
 			.tR_max = 200000000,
+			.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+			.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
 			.tADL_min = 400000,
 			.tALH_min = 10000,
 			.tALS_min = 25000,
@@ -100,9 +113,12 @@
 	/* Mode 2 */
 	{
 		.type = NAND_SDR_IFACE,
+		.timings.mode = 2,
 		.timings.sdr = {
 			.tCCS_min = 500000,
 			.tR_max = 200000000,
+			.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+			.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
 			.tADL_min = 400000,
 			.tALH_min = 10000,
 			.tALS_min = 15000,
@@ -142,9 +158,12 @@
 	/* Mode 3 */
 	{
 		.type = NAND_SDR_IFACE,
+		.timings.mode = 3,
 		.timings.sdr = {
 			.tCCS_min = 500000,
 			.tR_max = 200000000,
+			.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+			.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
 			.tADL_min = 400000,
 			.tALH_min = 5000,
 			.tALS_min = 10000,
@@ -184,9 +203,12 @@
 	/* Mode 4 */
 	{
 		.type = NAND_SDR_IFACE,
+		.timings.mode = 4,
 		.timings.sdr = {
 			.tCCS_min = 500000,
 			.tR_max = 200000000,
+			.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+			.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
 			.tADL_min = 400000,
 			.tALH_min = 5000,
 			.tALS_min = 10000,
@@ -226,9 +248,12 @@
 	/* Mode 5 */
 	{
 		.type = NAND_SDR_IFACE,
+		.timings.mode = 5,
 		.timings.sdr = {
 			.tCCS_min = 500000,
 			.tR_max = 200000000,
+			.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+			.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
 			.tADL_min = 400000,
 			.tALH_min = 5000,
 			.tALS_min = 10000,
@@ -267,23 +292,79 @@
 	},
 };
 
-/**
- * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
- * given ONFI mode
- * @mode: The ONFI timing mode
- */
-int onfi_fill_data_interface(struct nand_chip *chip,
-			     enum nand_data_interface_type type,
-			     int timing_mode)
+/* All NAND chips share the same reset data interface: SDR mode 0 */
+const struct nand_interface_config *nand_get_reset_interface_config(void)
 {
-	struct nand_data_interface *iface = &chip->data_interface;
+	return &onfi_sdr_timings[0];
+}
+
+/**
+ * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a
+ *                              set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
+{
+	const struct nand_sdr_timings *onfi_timings;
+	int mode;
+
+	for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) {
+		onfi_timings = &onfi_sdr_timings[mode].timings.sdr;
+
+		if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+		    spec_timings->tADL_min <= onfi_timings->tADL_min &&
+		    spec_timings->tALH_min <= onfi_timings->tALH_min &&
+		    spec_timings->tALS_min <= onfi_timings->tALS_min &&
+		    spec_timings->tAR_min <= onfi_timings->tAR_min &&
+		    spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+		    spec_timings->tCH_min <= onfi_timings->tCH_min &&
+		    spec_timings->tCLH_min <= onfi_timings->tCLH_min &&
+		    spec_timings->tCLR_min <= onfi_timings->tCLR_min &&
+		    spec_timings->tCLS_min <= onfi_timings->tCLS_min &&
+		    spec_timings->tCOH_min <= onfi_timings->tCOH_min &&
+		    spec_timings->tCS_min <= onfi_timings->tCS_min &&
+		    spec_timings->tDH_min <= onfi_timings->tDH_min &&
+		    spec_timings->tDS_min <= onfi_timings->tDS_min &&
+		    spec_timings->tIR_min <= onfi_timings->tIR_min &&
+		    spec_timings->tRC_min <= onfi_timings->tRC_min &&
+		    spec_timings->tREH_min <= onfi_timings->tREH_min &&
+		    spec_timings->tRHOH_min <= onfi_timings->tRHOH_min &&
+		    spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+		    spec_timings->tRLOH_min <= onfi_timings->tRLOH_min &&
+		    spec_timings->tRP_min <= onfi_timings->tRP_min &&
+		    spec_timings->tRR_min <= onfi_timings->tRR_min &&
+		    spec_timings->tWC_min <= onfi_timings->tWC_min &&
+		    spec_timings->tWH_min <= onfi_timings->tWH_min &&
+		    spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+		    spec_timings->tWP_min <= onfi_timings->tWP_min &&
+		    spec_timings->tWW_min <= onfi_timings->tWW_min)
+			return mode;
+	}
+
+	return 0;
+}
+
+/**
+ * onfi_fill_interface_config - Initialize an interface config from a given
+ *                              ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @type: The interface type
+ * @timing_mode: The ONFI timing mode
+ */
+void onfi_fill_interface_config(struct nand_chip *chip,
+				struct nand_interface_config *iface,
+				enum nand_interface_type type,
+				unsigned int timing_mode)
+{
 	struct onfi_params *onfi = chip->parameters.onfi;
 
-	if (type != NAND_SDR_IFACE)
-		return -EINVAL;
+	if (WARN_ON(type != NAND_SDR_IFACE))
+		return;
 
-	if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
-		return -EINVAL;
+	if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
+		return;
 
 	*iface = onfi_sdr_timings[timing_mode];
 
@@ -302,22 +383,5 @@
 
 		/* nanoseconds -> picoseconds */
 		timings->tCCS_min = 1000UL * onfi->tCCS;
-	} else {
-		struct nand_sdr_timings *timings = &iface->timings.sdr;
-		/*
-		 * For non-ONFI chips we use the highest possible value for
-		 * tPROG and tBERS. tR and tCCS will take the default values
-		 * precised in the ONFI specification for timing mode 0,
-		 * respectively 200us and 500ns.
-		 */
-
-		/* microseconds -> picoseconds */
-		timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
-		timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
-
-		timings->tR_max = 200000000;
-		timings->tCCS_min = 500000;
 	}
-
-	return 0;
 }
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index 9c03fbb..cf4f379 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -14,14 +14,68 @@
 /* Recommended to rewrite for BENAND */
 #define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED	BIT(3)
 
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ	0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK		0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR		0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS		8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+						 u8 *buf)
+{
+	u8 *ecc_status = buf;
+
+	if (nand_has_exec_op(chip)) {
+		const struct nand_sdr_timings *sdr =
+			nand_get_sdr_timings(nand_get_interface_config(chip));
+		struct nand_op_instr instrs[] = {
+			NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+				    PSEC_TO_NSEC(sdr->tADL_min)),
+			NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+		};
+		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+		return nand_exec_op(chip, &op);
+	}
+
+	return -ENOTSUPP;
+}
+
 static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	int ret;
 	unsigned int max_bitflips = 0;
-	u8 status;
+	u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
 
 	/* Check Status */
+	ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+	if (!ret) {
+		unsigned int i, bitflips = 0;
+
+		for (i = 0; i < chip->ecc.steps; i++) {
+			bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+			if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+				mtd->ecc_stats.failed++;
+			} else {
+				mtd->ecc_stats.corrected += bitflips;
+				max_bitflips = max(max_bitflips, bitflips);
+			}
+		}
+
+		return max_bitflips;
+	}
+
+	/*
+	 * Fallback to regular status check if
+	 * toshiba_nand_benand_read_eccstatus_op() failed.
+	 */
 	ret = nand_status_op(chip, &status);
 	if (ret)
 		return ret;
@@ -86,11 +140,13 @@
 
 	chip->options |= NAND_SUBPAGE_READ;
 
-	mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
 }
 
 static void toshiba_nand_decode_id(struct nand_chip *chip)
 {
+	struct nand_device *base = &chip->base;
+	struct nand_ecc_props requirements = {};
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_memory_organization *memorg;
 
@@ -108,7 +164,7 @@
 	 */
 	if (chip->id.len >= 6 && nand_is_slc(chip) &&
 	    (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
-	    !(chip->id.data[4] & 0x80) /* !BENAND */) {
+	    !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
 		memorg->oobsize = 32 * memorg->pagesize >> 9;
 		mtd->oobsize = memorg->oobsize;
 	}
@@ -121,23 +177,98 @@
 	 *  - 24nm: 8 bit ECC for each 512Byte is required.
 	 */
 	if (chip->id.len >= 6 && nand_is_slc(chip)) {
-		chip->base.eccreq.step_size = 512;
+		requirements.step_size = 512;
 		switch (chip->id.data[5] & 0x7) {
 		case 0x4:
-			chip->base.eccreq.strength = 1;
+			requirements.strength = 1;
 			break;
 		case 0x5:
-			chip->base.eccreq.strength = 4;
+			requirements.strength = 4;
 			break;
 		case 0x6:
-			chip->base.eccreq.strength = 8;
+			requirements.strength = 8;
 			break;
 		default:
 			WARN(1, "Could not get ECC info");
-			chip->base.eccreq.step_size = 0;
+			requirements.step_size = 0;
 			break;
 		}
 	}
+
+	nanddev_set_ecc_requirements(base, &requirements);
+}
+
+static int
+tc58teg5dclta00_choose_interface_config(struct nand_chip *chip,
+					struct nand_interface_config *iface)
+{
+	onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 5);
+
+	return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+tc58nvg0s3e_choose_interface_config(struct nand_chip *chip,
+				    struct nand_interface_config *iface)
+{
+	onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2);
+
+	return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip,
+					struct nand_interface_config *iface)
+{
+	struct nand_sdr_timings *sdr = &iface->timings.sdr;
+
+	/* Start with timings from the closest timing mode, mode 4. */
+	onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+	/* Patch timings that differ from mode 4. */
+	sdr->tALS_min = 12000;
+	sdr->tCHZ_max = 20000;
+	sdr->tCLS_min = 12000;
+	sdr->tCOH_min = 0;
+	sdr->tDS_min = 12000;
+	sdr->tRHOH_min = 25000;
+	sdr->tRHW_min = 30000;
+	sdr->tRHZ_max = 60000;
+	sdr->tWHR_min = 60000;
+
+	/* Patch timings not part of onfi timing mode. */
+	sdr->tPROG_max = 700000000;
+	sdr->tBERS_max = 5000000000;
+
+	return nand_choose_best_sdr_timings(chip, iface, sdr);
+}
+
+static int tc58teg5dclta00_init(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	chip->ops.choose_interface_config =
+		&tc58teg5dclta00_choose_interface_config;
+	chip->options |= NAND_NEED_SCRAMBLING;
+	mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
+
+	return 0;
+}
+
+static int tc58nvg0s3e_init(struct nand_chip *chip)
+{
+	chip->ops.choose_interface_config =
+		&tc58nvg0s3e_choose_interface_config;
+
+	return 0;
+}
+
+static int th58nvg2s3hbai4_init(struct nand_chip *chip)
+{
+	chip->ops.choose_interface_config =
+		&th58nvg2s3hbai4_choose_interface_config;
+
+	return 0;
 }
 
 static int toshiba_nand_init(struct nand_chip *chip)
@@ -146,10 +277,20 @@
 		chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
 
 	/* Check that chip is BENAND and ECC mode is on-die */
-	if (nand_is_slc(chip) && chip->ecc.mode == NAND_ECC_ON_DIE &&
+	if (nand_is_slc(chip) &&
+	    chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
 	    chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
 		toshiba_nand_benand_init(chip);
 
+	if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
+		tc58teg5dclta00_init(chip);
+	if (!strncmp("TC58NVG0S3E", chip->parameters.model,
+		     sizeof("TC58NVG0S3E") - 1))
+		tc58nvg0s3e_init(chip);
+	if (!strncmp("TH58NVG2S3HBAI4", chip->parameters.model,
+		     sizeof("TH58NVG2S3HBAI4") - 1))
+		th58nvg2s3hbai4_init(chip);
+
 	return 0;
 }
 
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 9a70754..9a9f1c2 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -353,6 +353,9 @@
 	void *file_buf;
 	struct page *held_pages[NS_MAX_HELD_PAGES];
 	int held_cnt;
+
+	/* debugfs entry */
+	struct dentry *dent;
 };
 
 /*
@@ -432,7 +435,7 @@
 /* MTD structure for NAND controller */
 static struct mtd_info *nsmtd;
 
-static int nandsim_show(struct seq_file *m, void *private)
+static int ns_show(struct seq_file *m, void *private)
 {
 	unsigned long wmin = -1, wmax = 0, avg;
 	unsigned long deciles[10], decile_max[10], tot = 0;
@@ -483,19 +486,18 @@
 
 	return 0;
 }
-DEFINE_SHOW_ATTRIBUTE(nandsim);
+DEFINE_SHOW_ATTRIBUTE(ns);
 
 /**
- * nandsim_debugfs_create - initialize debugfs
- * @dev: nandsim device description object
+ * ns_debugfs_create - initialize debugfs
+ * @ns: nandsim device description object
  *
  * This function creates all debugfs files for UBI device @ubi. Returns zero in
  * case of success and a negative error code in case of failure.
  */
-static int nandsim_debugfs_create(struct nandsim *dev)
+static int ns_debugfs_create(struct nandsim *ns)
 {
 	struct dentry *root = nsmtd->dbg.dfs_dir;
-	struct dentry *dent;
 
 	/*
 	 * Just skip debugfs initialization when the debugfs directory is
@@ -508,9 +510,9 @@
 		return 0;
 	}
 
-	dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
-				   root, dev, &nandsim_fops);
-	if (IS_ERR_OR_NULL(dent)) {
+	ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
+				       &ns_fops);
+	if (IS_ERR_OR_NULL(ns->dent)) {
 		NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
 		return -1;
 	}
@@ -518,13 +520,18 @@
 	return 0;
 }
 
+static void ns_debugfs_remove(struct nandsim *ns)
+{
+	debugfs_remove_recursive(ns->dent);
+}
+
 /*
  * Allocate array of page pointers, create slab allocation for an array
  * and initialize the array by NULL pointers.
  *
  * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
  */
-static int __init alloc_device(struct nandsim *ns)
+static int __init ns_alloc_device(struct nandsim *ns)
 {
 	struct file *cfile;
 	int i, err;
@@ -536,12 +543,12 @@
 		if (!(cfile->f_mode & FMODE_CAN_READ)) {
 			NS_ERR("alloc_device: cache file not readable\n");
 			err = -EINVAL;
-			goto err_close;
+			goto err_close_filp;
 		}
 		if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
 			NS_ERR("alloc_device: cache file not writeable\n");
 			err = -EINVAL;
-			goto err_close;
+			goto err_close_filp;
 		}
 		ns->pages_written =
 			vzalloc(array_size(sizeof(unsigned long),
@@ -549,16 +556,24 @@
 		if (!ns->pages_written) {
 			NS_ERR("alloc_device: unable to allocate pages written array\n");
 			err = -ENOMEM;
-			goto err_close;
+			goto err_close_filp;
 		}
 		ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
 		if (!ns->file_buf) {
 			NS_ERR("alloc_device: unable to allocate file buf\n");
 			err = -ENOMEM;
-			goto err_free;
+			goto err_free_pw;
 		}
 		ns->cfile = cfile;
+
 		return 0;
+
+err_free_pw:
+		vfree(ns->pages_written);
+err_close_filp:
+		filp_close(cfile, NULL);
+
+		return err;
 	}
 
 	ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
@@ -573,22 +588,22 @@
 						ns->geom.pgszoob, 0, 0, NULL);
 	if (!ns->nand_pages_slab) {
 		NS_ERR("cache_create: unable to create kmem_cache\n");
-		return -ENOMEM;
+		err = -ENOMEM;
+		goto err_free_pg;
 	}
 
 	return 0;
 
-err_free:
-	vfree(ns->pages_written);
-err_close:
-	filp_close(cfile, NULL);
+err_free_pg:
+	vfree(ns->pages);
+
 	return err;
 }
 
 /*
  * Free any allocated pages, and free the array of page pointers.
  */
-static void free_device(struct nandsim *ns)
+static void ns_free_device(struct nandsim *ns)
 {
 	int i;
 
@@ -610,7 +625,7 @@
 	}
 }
 
-static char __init *get_partition_name(int i)
+static char __init *ns_get_partition_name(int i)
 {
 	return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
 }
@@ -620,7 +635,7 @@
  *
  * RETURNS: 0 if success, -ERRNO if failure.
  */
-static int __init init_nandsim(struct mtd_info *mtd)
+static int __init ns_init(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct nandsim   *ns   = nand_get_controller_data(chip);
@@ -693,7 +708,7 @@
 			NS_ERR("bad partition size.\n");
 			return -EINVAL;
 		}
-		ns->partitions[i].name   = get_partition_name(i);
+		ns->partitions[i].name = ns_get_partition_name(i);
 		if (!ns->partitions[i].name) {
 			NS_ERR("unable to allocate memory.\n");
 			return -ENOMEM;
@@ -707,12 +722,14 @@
 	if (remains) {
 		if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
 			NS_ERR("too many partitions.\n");
-			return -EINVAL;
+			ret = -EINVAL;
+			goto free_partition_names;
 		}
-		ns->partitions[i].name   = get_partition_name(i);
+		ns->partitions[i].name = ns_get_partition_name(i);
 		if (!ns->partitions[i].name) {
 			NS_ERR("unable to allocate memory.\n");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto free_partition_names;
 		}
 		ns->partitions[i].offset = next_offset;
 		ns->partitions[i].size   = remains;
@@ -739,33 +756,48 @@
 	printk("sector address bytes: %u\n",    ns->geom.secaddrbytes);
 	printk("options: %#x\n",                ns->options);
 
-	if ((ret = alloc_device(ns)) != 0)
-		return ret;
+	ret = ns_alloc_device(ns);
+	if (ret)
+		goto free_partition_names;
 
 	/* Allocate / initialize the internal buffer */
 	ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
 	if (!ns->buf.byte) {
 		NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
 			ns->geom.pgszoob);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto free_device;
 	}
 	memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
 
 	return 0;
+
+free_device:
+	ns_free_device(ns);
+free_partition_names:
+	for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+		kfree(ns->partitions[i].name);
+
+	return ret;
 }
 
 /*
  * Free the nandsim structure.
  */
-static void free_nandsim(struct nandsim *ns)
+static void ns_free(struct nandsim *ns)
 {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+		kfree(ns->partitions[i].name);
+
 	kfree(ns->buf.byte);
-	free_device(ns);
+	ns_free_device(ns);
 
 	return;
 }
 
-static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
 {
 	char *w;
 	int zero_ok;
@@ -793,7 +825,7 @@
 	return 0;
 }
 
-static int parse_weakblocks(void)
+static int ns_parse_weakblocks(void)
 {
 	char *w;
 	int zero_ok;
@@ -830,7 +862,7 @@
 	return 0;
 }
 
-static int erase_error(unsigned int erase_block_no)
+static int ns_erase_error(unsigned int erase_block_no)
 {
 	struct weak_block *wb;
 
@@ -844,7 +876,7 @@
 	return 0;
 }
 
-static int parse_weakpages(void)
+static int ns_parse_weakpages(void)
 {
 	char *w;
 	int zero_ok;
@@ -881,7 +913,7 @@
 	return 0;
 }
 
-static int write_error(unsigned int page_no)
+static int ns_write_error(unsigned int page_no)
 {
 	struct weak_page *wp;
 
@@ -895,7 +927,7 @@
 	return 0;
 }
 
-static int parse_gravepages(void)
+static int ns_parse_gravepages(void)
 {
 	char *g;
 	int zero_ok;
@@ -932,7 +964,7 @@
 	return 0;
 }
 
-static int read_error(unsigned int page_no)
+static int ns_read_error(unsigned int page_no)
 {
 	struct grave_page *gp;
 
@@ -946,25 +978,7 @@
 	return 0;
 }
 
-static void free_lists(void)
-{
-	struct list_head *pos, *n;
-	list_for_each_safe(pos, n, &weak_blocks) {
-		list_del(pos);
-		kfree(list_entry(pos, struct weak_block, list));
-	}
-	list_for_each_safe(pos, n, &weak_pages) {
-		list_del(pos);
-		kfree(list_entry(pos, struct weak_page, list));
-	}
-	list_for_each_safe(pos, n, &grave_pages) {
-		list_del(pos);
-		kfree(list_entry(pos, struct grave_page, list));
-	}
-	kfree(erase_block_wear);
-}
-
-static int setup_wear_reporting(struct mtd_info *mtd)
+static int ns_setup_wear_reporting(struct mtd_info *mtd)
 {
 	size_t mem;
 
@@ -982,7 +996,7 @@
 	return 0;
 }
 
-static void update_wear(unsigned int erase_block_no)
+static void ns_update_wear(unsigned int erase_block_no)
 {
 	if (!erase_block_wear)
 		return;
@@ -1001,7 +1015,7 @@
 /*
  * Returns the string representation of 'state' state.
  */
-static char *get_state_name(uint32_t state)
+static char *ns_get_state_name(uint32_t state)
 {
 	switch (NS_STATE(state)) {
 		case STATE_CMD_READ0:
@@ -1061,7 +1075,7 @@
  *
  * RETURNS: 1 if wrong command, 0 if right.
  */
-static int check_command(int cmd)
+static int ns_check_command(int cmd)
 {
 	switch (cmd) {
 
@@ -1088,7 +1102,7 @@
 /*
  * Returns state after command is accepted by command number.
  */
-static uint32_t get_state_by_command(unsigned command)
+static uint32_t ns_get_state_by_command(unsigned command)
 {
 	switch (command) {
 		case NAND_CMD_READ0:
@@ -1126,7 +1140,7 @@
 /*
  * Move an address byte to the correspondent internal register.
  */
-static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
+static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
 {
 	uint byte = (uint)bt;
 
@@ -1144,9 +1158,10 @@
 /*
  * Switch to STATE_READY state.
  */
-static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
+static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
 {
-	NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+	NS_DBG("switch_to_ready_state: switch to %s state\n",
+	       ns_get_state_name(STATE_READY));
 
 	ns->state       = STATE_READY;
 	ns->nxstate     = STATE_UNKNOWN;
@@ -1203,7 +1218,7 @@
  *          -1 - several matches.
  *           0 - operation is found.
  */
-static int find_operation(struct nandsim *ns, uint32_t flag)
+static int ns_find_operation(struct nandsim *ns, uint32_t flag)
 {
 	int opsfound = 0;
 	int i, j, idx = 0;
@@ -1256,7 +1271,8 @@
 		ns->state = ns->op[ns->stateidx];
 		ns->nxstate = ns->op[ns->stateidx + 1];
 		NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
-				idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+		       idx, ns_get_state_name(ns->state),
+		       ns_get_state_name(ns->nxstate));
 		return 0;
 	}
 
@@ -1264,13 +1280,13 @@
 		/* Nothing was found. Try to ignore previous commands (if any) and search again */
 		if (ns->npstates != 0) {
 			NS_DBG("find_operation: no operation found, try again with state %s\n",
-					get_state_name(ns->state));
+			       ns_get_state_name(ns->state));
 			ns->npstates = 0;
-			return find_operation(ns, 0);
+			return ns_find_operation(ns, 0);
 
 		}
 		NS_DBG("find_operation: no operations found\n");
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 		return -2;
 	}
 
@@ -1287,7 +1303,7 @@
 	return -1;
 }
 
-static void put_pages(struct nandsim *ns)
+static void ns_put_pages(struct nandsim *ns)
 {
 	int i;
 
@@ -1296,7 +1312,8 @@
 }
 
 /* Get page cache pages in advance to provide NOFS memory allocation */
-static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
+			loff_t pos)
 {
 	pgoff_t index, start_index, end_index;
 	struct page *page;
@@ -1316,7 +1333,7 @@
 				page = find_or_create_page(mapping, index, GFP_NOFS);
 			}
 			if (page == NULL) {
-				put_pages(ns);
+				ns_put_pages(ns);
 				return -ENOMEM;
 			}
 			unlock_page(page);
@@ -1326,35 +1343,37 @@
 	return 0;
 }
 
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
+			    size_t count, loff_t pos)
 {
 	ssize_t tx;
 	int err;
 	unsigned int noreclaim_flag;
 
-	err = get_pages(ns, file, count, pos);
+	err = ns_get_pages(ns, file, count, pos);
 	if (err)
 		return err;
 	noreclaim_flag = memalloc_noreclaim_save();
 	tx = kernel_read(file, buf, count, &pos);
 	memalloc_noreclaim_restore(noreclaim_flag);
-	put_pages(ns);
+	ns_put_pages(ns);
 	return tx;
 }
 
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
+			     size_t count, loff_t pos)
 {
 	ssize_t tx;
 	int err;
 	unsigned int noreclaim_flag;
 
-	err = get_pages(ns, file, count, pos);
+	err = ns_get_pages(ns, file, count, pos);
 	if (err)
 		return err;
 	noreclaim_flag = memalloc_noreclaim_save();
 	tx = kernel_write(file, buf, count, &pos);
 	memalloc_noreclaim_restore(noreclaim_flag);
-	put_pages(ns);
+	ns_put_pages(ns);
 	return tx;
 }
 
@@ -1374,11 +1393,11 @@
 	return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
 }
 
-static int do_read_error(struct nandsim *ns, int num)
+static int ns_do_read_error(struct nandsim *ns, int num)
 {
 	unsigned int page_no = ns->regs.row;
 
-	if (read_error(page_no)) {
+	if (ns_read_error(page_no)) {
 		prandom_bytes(ns->buf.byte, num);
 		NS_WARN("simulating read error in page %u\n", page_no);
 		return 1;
@@ -1386,7 +1405,7 @@
 	return 0;
 }
 
-static void do_bit_flips(struct nandsim *ns, int num)
+static void ns_do_bit_flips(struct nandsim *ns, int num)
 {
 	if (bitflips && prandom_u32() < (1 << 22)) {
 		int flips = 1;
@@ -1406,7 +1425,7 @@
 /*
  * Fill the NAND buffer with data read from the specified page.
  */
-static void read_page(struct nandsim *ns, int num)
+static void ns_read_page(struct nandsim *ns, int num)
 {
 	union ns_mem *mypage;
 
@@ -1420,15 +1439,16 @@
 
 			NS_DBG("read_page: page %d written, reading from %d\n",
 				ns->regs.row, ns->regs.column + ns->regs.off);
-			if (do_read_error(ns, num))
+			if (ns_do_read_error(ns, num))
 				return;
 			pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
-			tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
+			tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
+					  pos);
 			if (tx != num) {
 				NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
 				return;
 			}
-			do_bit_flips(ns, num);
+			ns_do_bit_flips(ns, num);
 		}
 		return;
 	}
@@ -1440,17 +1460,17 @@
 	} else {
 		NS_DBG("read_page: page %d allocated, reading from %d\n",
 			ns->regs.row, ns->regs.column + ns->regs.off);
-		if (do_read_error(ns, num))
+		if (ns_do_read_error(ns, num))
 			return;
 		memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
-		do_bit_flips(ns, num);
+		ns_do_bit_flips(ns, num);
 	}
 }
 
 /*
  * Erase all pages in the specified sector.
  */
-static void erase_sector(struct nandsim *ns)
+static void ns_erase_sector(struct nandsim *ns)
 {
 	union ns_mem *mypage;
 	int i;
@@ -1478,7 +1498,7 @@
 /*
  * Program the specified page with the contents from the NAND buffer.
  */
-static int prog_page(struct nandsim *ns, int num)
+static int ns_prog_page(struct nandsim *ns, int num)
 {
 	int i;
 	union ns_mem *mypage;
@@ -1497,7 +1517,7 @@
 			memset(ns->file_buf, 0xff, ns->geom.pgszoob);
 		} else {
 			all = 0;
-			tx = read_file(ns, ns->cfile, pg_off, num, off);
+			tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
 			if (tx != num) {
 				NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
 				return -1;
@@ -1507,14 +1527,15 @@
 			pg_off[i] &= ns->buf.byte[i];
 		if (all) {
 			loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
-			tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
+			tx = ns_write_file(ns, ns->cfile, ns->file_buf,
+					   ns->geom.pgszoob, pos);
 			if (tx != ns->geom.pgszoob) {
 				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
 				return -1;
 			}
 			__set_bit(ns->regs.row, ns->pages_written);
 		} else {
-			tx = write_file(ns, ns->cfile, pg_off, num, off);
+			tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
 			if (tx != num) {
 				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
 				return -1;
@@ -1552,7 +1573,7 @@
  *
  * RETURNS: 0 if success, -1 if error.
  */
-static int do_state_action(struct nandsim *ns, uint32_t action)
+static int ns_do_state_action(struct nandsim *ns, uint32_t action)
 {
 	int num;
 	int busdiv = ns->busw == 8 ? 1 : 2;
@@ -1579,7 +1600,7 @@
 			break;
 		}
 		num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
-		read_page(ns, num);
+		ns_read_page(ns, num);
 
 		NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
 			num, NS_RAW_OFFSET(ns) + ns->regs.off);
@@ -1622,14 +1643,14 @@
 				ns->regs.row, NS_RAW_OFFSET(ns));
 		NS_LOG("erase sector %u\n", erase_block_no);
 
-		erase_sector(ns);
+		ns_erase_sector(ns);
 
 		NS_MDELAY(erase_delay);
 
 		if (erase_block_wear)
-			update_wear(erase_block_no);
+			ns_update_wear(erase_block_no);
 
-		if (erase_error(erase_block_no)) {
+		if (ns_erase_error(erase_block_no)) {
 			NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
 			return -1;
 		}
@@ -1653,7 +1674,7 @@
 			return -1;
 		}
 
-		if (prog_page(ns, num) == -1)
+		if (ns_prog_page(ns, num) == -1)
 			return -1;
 
 		page_no = ns->regs.row;
@@ -1665,7 +1686,7 @@
 		NS_UDELAY(programm_delay);
 		NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
 
-		if (write_error(page_no)) {
+		if (ns_write_error(page_no)) {
 			NS_WARN("simulating write failure in page %u\n", page_no);
 			return -1;
 		}
@@ -1702,7 +1723,7 @@
 /*
  * Switch simulator's state.
  */
-static void switch_state(struct nandsim *ns)
+static void ns_switch_state(struct nandsim *ns)
 {
 	if (ns->op) {
 		/*
@@ -1716,11 +1737,13 @@
 
 		NS_DBG("switch_state: operation is known, switch to the next state, "
 			"state: %s, nxstate: %s\n",
-			get_state_name(ns->state), get_state_name(ns->nxstate));
+		       ns_get_state_name(ns->state),
+		       ns_get_state_name(ns->nxstate));
 
 		/* See, whether we need to do some action */
-		if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		if ((ns->state & ACTION_MASK) &&
+		    ns_do_state_action(ns, ns->state) < 0) {
+			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 			return;
 		}
 
@@ -1734,15 +1757,16 @@
 		 *  The only event causing the switch_state function to
 		 *  be called with yet unknown operation is new command.
 		 */
-		ns->state = get_state_by_command(ns->regs.command);
+		ns->state = ns_get_state_by_command(ns->regs.command);
 
 		NS_DBG("switch_state: operation is unknown, try to find it\n");
 
-		if (find_operation(ns, 0) != 0)
+		if (ns_find_operation(ns, 0))
 			return;
 
-		if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		if ((ns->state & ACTION_MASK) &&
+		    ns_do_state_action(ns, ns->state) < 0) {
+			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 			return;
 		}
 	}
@@ -1770,7 +1794,7 @@
 
 		NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
 
-		switch_to_ready_state(ns, status);
+		ns_switch_to_ready_state(ns, status);
 
 		return;
 	} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
@@ -1784,7 +1808,8 @@
 
 		NS_DBG("switch_state: the next state is data I/O, switch, "
 			"state: %s, nxstate: %s\n",
-			get_state_name(ns->state), get_state_name(ns->nxstate));
+		       ns_get_state_name(ns->state),
+		       ns_get_state_name(ns->nxstate));
 
 		/*
 		 * Set the internal register to the count of bytes which
@@ -1862,8 +1887,8 @@
 		return outb;
 	}
 	if (!(ns->state & STATE_DATAOUT_MASK)) {
-		NS_WARN("read_byte: unexpected data output cycle, state is %s "
-			"return %#x\n", get_state_name(ns->state), (uint)outb);
+		NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
+			ns_get_state_name(ns->state), (uint)outb);
 		return outb;
 	}
 
@@ -1902,7 +1927,7 @@
 		NS_DBG("read_byte: all bytes were read\n");
 
 		if (NS_STATE(ns->nxstate) == STATE_READY)
-			switch_state(ns);
+			ns_switch_state(ns);
 	}
 
 	return outb;
@@ -1929,12 +1954,12 @@
 
 		if (byte == NAND_CMD_RESET) {
 			NS_LOG("reset chip\n");
-			switch_to_ready_state(ns, NS_STATUS_OK(ns));
+			ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
 			return;
 		}
 
 		/* Check that the command byte is correct */
-		if (check_command(byte)) {
+		if (ns_check_command(byte)) {
 			NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
 			return;
 		}
@@ -1943,7 +1968,7 @@
 			|| NS_STATE(ns->state) == STATE_DATAOUT) {
 			int row = ns->regs.row;
 
-			switch_state(ns);
+			ns_switch_state(ns);
 			if (byte == NAND_CMD_RNDOUT)
 				ns->regs.row = row;
 		}
@@ -1958,16 +1983,17 @@
 				 * was expected but command was input. In this case ignore
 				 * previous command(s)/state(s) and accept the last one.
 				 */
-				NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
-					"ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+				NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
+					(uint)byte,
+					ns_get_state_name(ns->nxstate));
 			}
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 		}
 
 		NS_DBG("command byte corresponding to %s state accepted\n",
-			get_state_name(get_state_by_command(byte)));
+			ns_get_state_name(ns_get_state_by_command(byte)));
 		ns->regs.command = byte;
-		switch_state(ns);
+		ns_switch_state(ns);
 
 	} else if (ns->lines.ale == 1) {
 		/*
@@ -1978,11 +2004,13 @@
 
 			NS_DBG("write_byte: operation isn't known yet, identify it\n");
 
-			if (find_operation(ns, 1) < 0)
+			if (ns_find_operation(ns, 1) < 0)
 				return;
 
-			if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-				switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			if ((ns->state & ACTION_MASK) &&
+			    ns_do_state_action(ns, ns->state) < 0) {
+				ns_switch_to_ready_state(ns,
+							 NS_STATUS_FAILED(ns));
 				return;
 			}
 
@@ -2004,20 +2032,20 @@
 
 		/* Check that chip is expecting address */
 		if (!(ns->nxstate & STATE_ADDR_MASK)) {
-			NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
-				"switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
+			       (uint)byte, ns_get_state_name(ns->nxstate));
+			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 			return;
 		}
 
 		/* Check if this is expected byte */
 		if (ns->regs.count == ns->regs.num) {
 			NS_ERR("write_byte: no more address bytes expected\n");
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 			return;
 		}
 
-		accept_addr_byte(ns, byte);
+		ns_accept_addr_byte(ns, byte);
 
 		ns->regs.count += 1;
 
@@ -2026,7 +2054,7 @@
 
 		if (ns->regs.count == ns->regs.num) {
 			NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
-			switch_state(ns);
+			ns_switch_state(ns);
 		}
 
 	} else {
@@ -2036,10 +2064,10 @@
 
 		/* Check that chip is expecting data input */
 		if (!(ns->state & STATE_DATAIN_MASK)) {
-			NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
-				"switch to %s\n", (uint)byte,
-				get_state_name(ns->state), get_state_name(STATE_READY));
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
+			       (uint)byte, ns_get_state_name(ns->state),
+			       ns_get_state_name(STATE_READY));
+			ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 			return;
 		}
 
@@ -2069,16 +2097,16 @@
 
 	/* Check that chip is expecting data input */
 	if (!(ns->state & STATE_DATAIN_MASK)) {
-		NS_ERR("write_buf: data input isn't expected, state is %s, "
-			"switch to STATE_READY\n", get_state_name(ns->state));
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
+		       ns_get_state_name(ns->state));
+		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 		return;
 	}
 
 	/* Check if these are expected bytes */
 	if (ns->regs.count + len > ns->regs.num) {
 		NS_ERR("write_buf: too many input bytes\n");
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 		return;
 	}
 
@@ -2105,7 +2133,7 @@
 	}
 	if (!(ns->state & STATE_DATAOUT_MASK)) {
 		NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
-			get_state_name(ns->state));
+			ns_get_state_name(ns->state));
 		return;
 	}
 
@@ -2121,7 +2149,7 @@
 	/* Check if these are expected bytes */
 	if (ns->regs.count + len > ns->regs.num) {
 		NS_ERR("read_buf: too many bytes to read\n");
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 		return;
 	}
 
@@ -2130,7 +2158,7 @@
 
 	if (ns->regs.count == ns->regs.num) {
 		if (NS_STATE(ns->nxstate) == STATE_READY)
-			switch_state(ns);
+			ns_switch_state(ns);
 	}
 
 	return;
@@ -2144,6 +2172,9 @@
 	const struct nand_op_instr *instr = NULL;
 	struct nandsim *ns = nand_get_controller_data(chip);
 
+	if (check_only)
+		return 0;
+
 	ns->lines.ce = 1;
 
 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -2180,6 +2211,9 @@
 {
 	unsigned int eccsteps, eccbytes;
 
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+	chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
 	if (!bch)
 		return 0;
 
@@ -2203,8 +2237,6 @@
 		return -EINVAL;
 	}
 
-	chip->ecc.mode = NAND_ECC_SOFT;
-	chip->ecc.algo = NAND_ECC_BCH;
 	chip->ecc.size = 512;
 	chip->ecc.strength = bch;
 	chip->ecc.bytes = eccbytes;
@@ -2224,9 +2256,10 @@
  */
 static int __init ns_init_module(void)
 {
+	struct list_head *pos, *n;
 	struct nand_chip *chip;
 	struct nandsim *ns;
-	int retval = -ENOMEM, i;
+	int ret;
 
 	if (bus_width != 8 && bus_width != 16) {
 		NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
@@ -2242,8 +2275,6 @@
 	nsmtd       = nand_to_mtd(chip);
 	nand_set_controller_data(chip, (void *)ns);
 
-	chip->ecc.mode   = NAND_ECC_SOFT;
-	chip->ecc.algo   = NAND_ECC_HAMMING;
 	/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
 	/* and 'badblocks' parameters to work */
 	chip->options   |= NAND_SKIP_BBTSCAN;
@@ -2251,16 +2282,16 @@
 	switch (bbt) {
 	case 2:
 		chip->bbt_options |= NAND_BBT_NO_OOB;
-		/* fall through */
+		fallthrough;
 	case 1:
 		chip->bbt_options |= NAND_BBT_USE_FLASH;
-		/* fall through */
+		fallthrough;
 	case 0:
 		break;
 	default:
 		NS_ERR("bbt has to be 0..2\n");
-		retval = -EINVAL;
-		goto error;
+		ret = -EINVAL;
+		goto free_ns_struct;
 	}
 	/*
 	 * Perform minimum nandsim structure initialization to handle
@@ -2285,23 +2316,26 @@
 
 	nsmtd->owner = THIS_MODULE;
 
-	if ((retval = parse_weakblocks()) != 0)
-		goto error;
+	ret = ns_parse_weakblocks();
+	if (ret)
+		goto free_ns_struct;
 
-	if ((retval = parse_weakpages()) != 0)
-		goto error;
+	ret = ns_parse_weakpages();
+	if (ret)
+		goto free_wb_list;
 
-	if ((retval = parse_gravepages()) != 0)
-		goto error;
+	ret = ns_parse_gravepages();
+	if (ret)
+		goto free_wp_list;
 
 	nand_controller_init(&ns->base);
 	ns->base.ops = &ns_controller_ops;
 	chip->controller = &ns->base;
 
-	retval = nand_scan(chip, 1);
-	if (retval) {
+	ret = nand_scan(chip, 1);
+	if (ret) {
 		NS_ERR("Could not scan NAND Simulator device\n");
-		goto error;
+		goto free_gp_list;
 	}
 
 	if (overridesize) {
@@ -2313,8 +2347,8 @@
 
 		if (new_size >> overridesize != nsmtd->erasesize) {
 			NS_ERR("overridesize is too big\n");
-			retval = -EINVAL;
-			goto err_exit;
+			ret = -EINVAL;
+			goto cleanup_nand;
 		}
 
 		/* N.B. This relies on nand_scan not doing anything with the size before we change it */
@@ -2325,39 +2359,60 @@
 		chip->pagemask = (targetsize >> chip->page_shift) - 1;
 	}
 
-	if ((retval = setup_wear_reporting(nsmtd)) != 0)
-		goto err_exit;
+	ret = ns_setup_wear_reporting(nsmtd);
+	if (ret)
+		goto cleanup_nand;
 
-	if ((retval = init_nandsim(nsmtd)) != 0)
-		goto err_exit;
+	ret = ns_init(nsmtd);
+	if (ret)
+		goto free_ebw;
 
-	if ((retval = nand_create_bbt(chip)) != 0)
-		goto err_exit;
+	ret = nand_create_bbt(chip);
+	if (ret)
+		goto free_ns_object;
 
-	if ((retval = parse_badblocks(ns, nsmtd)) != 0)
-		goto err_exit;
+	ret = ns_parse_badblocks(ns, nsmtd);
+	if (ret)
+		goto free_ns_object;
 
 	/* Register NAND partitions */
-	retval = mtd_device_register(nsmtd, &ns->partitions[0],
-				     ns->nbparts);
-	if (retval != 0)
-		goto err_exit;
+	ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
+	if (ret)
+		goto free_ns_object;
 
-	if ((retval = nandsim_debugfs_create(ns)) != 0)
-		goto err_exit;
+	ret = ns_debugfs_create(ns);
+	if (ret)
+		goto unregister_mtd;
 
         return 0;
 
-err_exit:
-	free_nandsim(ns);
-	nand_release(chip);
-	for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
-		kfree(ns->partitions[i].name);
-error:
+unregister_mtd:
+	WARN_ON(mtd_device_unregister(nsmtd));
+free_ns_object:
+	ns_free(ns);
+free_ebw:
+	kfree(erase_block_wear);
+cleanup_nand:
+	nand_cleanup(chip);
+free_gp_list:
+	list_for_each_safe(pos, n, &grave_pages) {
+		list_del(pos);
+		kfree(list_entry(pos, struct grave_page, list));
+	}
+free_wp_list:
+	list_for_each_safe(pos, n, &weak_pages) {
+		list_del(pos);
+		kfree(list_entry(pos, struct weak_page, list));
+	}
+free_wb_list:
+	list_for_each_safe(pos, n, &weak_blocks) {
+		list_del(pos);
+		kfree(list_entry(pos, struct weak_block, list));
+	}
+free_ns_struct:
 	kfree(ns);
-	free_lists();
 
-	return retval;
+	return ret;
 }
 
 module_init(ns_init_module);
@@ -2369,14 +2424,30 @@
 {
 	struct nand_chip *chip = mtd_to_nand(nsmtd);
 	struct nandsim *ns = nand_get_controller_data(chip);
-	int i;
+	struct list_head *pos, *n;
 
-	free_nandsim(ns);    /* Free nandsim private resources */
-	nand_release(chip); /* Unregister driver */
-	for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
-		kfree(ns->partitions[i].name);
-	kfree(ns);        /* Free other structures */
-	free_lists();
+	ns_debugfs_remove(ns);
+	WARN_ON(mtd_device_unregister(nsmtd));
+	ns_free(ns);
+	kfree(erase_block_wear);
+	nand_cleanup(chip);
+
+	list_for_each_safe(pos, n, &grave_pages) {
+		list_del(pos);
+		kfree(list_entry(pos, struct grave_page, list));
+	}
+
+	list_for_each_safe(pos, n, &weak_pages) {
+		list_del(pos);
+		kfree(list_entry(pos, struct weak_page, list));
+	}
+
+	list_for_each_safe(pos, n, &weak_blocks) {
+		list_del(pos);
+		kfree(list_entry(pos, struct weak_block, list));
+	}
+
+	kfree(ns);
 }
 
 module_exit(ns_cleanup_module);
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index d324396..0fb4ba9 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -149,7 +149,7 @@
 	chip->ecc.correct = nand_correct_data;
 	chip->ecc.hwctl = ndfc_enable_hwecc;
 	chip->ecc.calculate = ndfc_calculate_ecc;
-	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	chip->ecc.size = 256;
 	chip->ecc.bytes = 3;
 	chip->ecc.strength = 1;
@@ -244,9 +244,13 @@
 static int ndfc_remove(struct platform_device *ofdev)
 {
 	struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
-	struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
+	struct nand_chip *chip = &ndfc->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
 
-	nand_release(&ndfc->chip);
+	ret = mtd_device_unregister(mtd);
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	kfree(mtd->name);
 
 	return 0;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 6ec65f4..512f607 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -884,8 +884,8 @@
 	int stat = 0;
 
 	/* Ex NAND_ECC_HW12_2048 */
-	if ((info->nand.ecc.mode == NAND_ECC_HW) &&
-			(info->nand.ecc.size  == 2048))
+	if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+	    info->nand.ecc.size == 2048)
 		blockCnt = 4;
 	else
 		blockCnt = 1;
@@ -1967,10 +1967,8 @@
 
 	case NAND_OMAP_PREFETCH_IRQ:
 		info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
-		if (info->gpmc_irq_fifo <= 0) {
-			dev_err(dev, "Error getting fifo IRQ\n");
+		if (info->gpmc_irq_fifo <= 0)
 			return -ENODEV;
-		}
 		err = devm_request_irq(dev, info->gpmc_irq_fifo,
 				       omap_nand_irq, IRQF_SHARED,
 				       "gpmc-nand-fifo", info);
@@ -1982,10 +1980,8 @@
 		}
 
 		info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
-		if (info->gpmc_irq_count <= 0) {
-			dev_err(dev, "Error getting IRQ count\n");
+		if (info->gpmc_irq_count <= 0)
 			return -ENODEV;
-		}
 		err = devm_request_irq(dev, info->gpmc_irq_count,
 				       omap_nand_irq, IRQF_SHARED,
 				       "gpmc-nand-count", info);
@@ -2010,12 +2006,12 @@
 		return -EINVAL;
 
 	/*
-	 * Bail out earlier to let NAND_ECC_SOFT code create its own
+	 * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
 	 * ooblayout instead of using ours.
 	 */
 	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_HAMMING;
+		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
 		return 0;
 	}
 
@@ -2023,7 +2019,7 @@
 	switch (info->ecc_opt) {
 	case OMAP_ECC_HAM1_CODE_HW:
 		dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
-		chip->ecc.mode		= NAND_ECC_HW;
+		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
 		chip->ecc.bytes		= 3;
 		chip->ecc.size		= 512;
 		chip->ecc.strength	= 1;
@@ -2040,7 +2036,7 @@
 
 	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
 		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
-		chip->ecc.mode		= NAND_ECC_HW;
+		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
 		chip->ecc.size		= 512;
 		chip->ecc.bytes		= 7;
 		chip->ecc.strength	= 4;
@@ -2060,7 +2056,7 @@
 
 	case OMAP_ECC_BCH4_CODE_HW:
 		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
-		chip->ecc.mode		= NAND_ECC_HW;
+		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
 		chip->ecc.size		= 512;
 		/* 14th bit is kept reserved for ROM-code compatibility */
 		chip->ecc.bytes		= 7 + 1;
@@ -2082,7 +2078,7 @@
 
 	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
 		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
-		chip->ecc.mode		= NAND_ECC_HW;
+		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
 		chip->ecc.size		= 512;
 		chip->ecc.bytes		= 13;
 		chip->ecc.strength	= 8;
@@ -2102,7 +2098,7 @@
 
 	case OMAP_ECC_BCH8_CODE_HW:
 		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
-		chip->ecc.mode		= NAND_ECC_HW;
+		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
 		chip->ecc.size		= 512;
 		/* 14th bit is kept reserved for ROM-code compatibility */
 		chip->ecc.bytes		= 13 + 1;
@@ -2125,7 +2121,7 @@
 
 	case OMAP_ECC_BCH16_CODE_HW:
 		pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
-		chip->ecc.mode		= NAND_ECC_HW;
+		chip->ecc.engine_type	= NAND_ECC_ENGINE_TYPE_ON_HOST;
 		chip->ecc.size		= 512;
 		chip->ecc.bytes		= 26;
 		chip->ecc.strength	= 16;
@@ -2287,14 +2283,18 @@
 	struct mtd_info *mtd = platform_get_drvdata(pdev);
 	struct nand_chip *nand_chip = mtd_to_nand(mtd);
 	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int ret;
+
 	if (nand_chip->ecc.priv) {
 		nand_bch_free(nand_chip->ecc.priv);
 		nand_chip->ecc.priv = NULL;
 	}
 	if (info->dma)
 		dma_release_channel(info->dma);
-	nand_release(nand_chip);
-	return 0;
+	ret = mtd_device_unregister(mtd);
+	WARN_ON(ret);
+	nand_cleanup(nand_chip);
+	return ret;
 }
 
 static const struct of_device_id omap_nand_ids[] = {
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 6e0e31e..4b79952 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -2,7 +2,7 @@
 /*
  * Error Location Module
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
  */
 
 #define DRIVER_NAME	"omap-elm"
@@ -456,13 +456,13 @@
 					ELM_SYNDROME_FRAGMENT_5 + offset);
 			regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
 					ELM_SYNDROME_FRAGMENT_4 + offset);
-			/* fall through */
+			fallthrough;
 		case BCH8_ECC:
 			regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
 					ELM_SYNDROME_FRAGMENT_3 + offset);
 			regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
 					ELM_SYNDROME_FRAGMENT_2 + offset);
-			/* fall through */
+			fallthrough;
 		case BCH4_ECC:
 			regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
 					ELM_SYNDROME_FRAGMENT_1 + offset);
@@ -504,13 +504,13 @@
 					regs->elm_syndrome_fragment_5[i]);
 			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
 					regs->elm_syndrome_fragment_4[i]);
-			/* fall through */
+			fallthrough;
 		case BCH8_ECC:
 			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
 					regs->elm_syndrome_fragment_3[i]);
 			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
 					regs->elm_syndrome_fragment_2[i]);
-			/* fall through */
+			fallthrough;
 		case BCH4_ECC:
 			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
 					regs->elm_syndrome_fragment_1[i]);
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index a3dcdf2..2c87c7d 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -22,6 +22,7 @@
 #include <linux/platform_data/mtd-orion_nand.h>
 
 struct orion_nand_info {
+	struct nand_controller controller;
 	struct nand_chip chip;
 	struct clk *clk;
 };
@@ -82,6 +83,19 @@
 		buf[i++] = readb(io_base);
 }
 
+static int orion_nand_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops orion_nand_ops = {
+	.attach_chip = orion_nand_attach_chip,
+};
+
 static int __init orion_nand_probe(struct platform_device *pdev)
 {
 	struct orion_nand_info *info;
@@ -101,6 +115,10 @@
 	nc = &info->chip;
 	mtd = nand_to_mtd(nc);
 
+	nand_controller_init(&info->controller);
+	info->controller.ops = &orion_nand_ops;
+	nc->controller = &info->controller;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	io_base = devm_ioremap_resource(&pdev->dev, res);
 
@@ -139,8 +157,6 @@
 	nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
 	nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
 	nc->legacy.read_buf = orion_nand_read_buf;
-	nc->ecc.mode = NAND_ECC_SOFT;
-	nc->ecc.algo = NAND_ECC_HAMMING;
 
 	if (board->chip_delay)
 		nc->legacy.chip_delay = board->chip_delay;
@@ -173,6 +189,13 @@
 		return ret;
 	}
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	nc->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	ret = nand_scan(nc, 1);
 	if (ret)
 		goto no_dev;
@@ -195,8 +218,12 @@
 {
 	struct orion_nand_info *info = platform_get_drvdata(pdev);
 	struct nand_chip *chip = &info->chip;
+	int ret;
 
-	nand_release(chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+
+	nand_cleanup(chip);
 
 	clk_disable_unprepare(info->clk);
 
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index 23c222b..f449470 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -144,8 +144,7 @@
 		if (err)
 			goto err_cleanup_nand;
 
-		oxnas->chips[oxnas->nchips] = chip;
-		++oxnas->nchips;
+		oxnas->chips[oxnas->nchips++] = chip;
 	}
 
 	/* Exit if no chips found */
@@ -182,7 +181,8 @@
 
 	for (i = 0; i < oxnas->nchips; i++) {
 		chip = oxnas->chips[i];
-		nand_release(chip);
+		WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+		nand_cleanup(chip);
 	}
 
 	clk_disable_unprepare(oxnas->clk);
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index 066ff6d..b0ba1fd 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -29,6 +29,7 @@
 
 static unsigned int lpcctl;
 static struct mtd_info *pasemi_nand_mtd;
+static struct nand_controller controller;
 static const char driver_name[] = "pasemi-nand";
 
 static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len)
@@ -68,11 +69,24 @@
 	inl(lpcctl);
 }
 
-int pasemi_device_ready(struct nand_chip *chip)
+static int pasemi_device_ready(struct nand_chip *chip)
 {
 	return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
 }
 
+static int pasemi_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops pasemi_ops = {
+	.attach_chip = pasemi_attach_chip,
+};
+
 static int pasemi_nand_probe(struct platform_device *ofdev)
 {
 	struct device *dev = &ofdev->dev;
@@ -100,6 +114,10 @@
 		goto out;
 	}
 
+	controller.ops = &pasemi_ops;
+	nand_controller_init(&controller);
+	chip->controller = &controller;
+
 	pasemi_nand_mtd = nand_to_mtd(chip);
 
 	/* Link the private data with the MTD structure */
@@ -132,12 +150,17 @@
 	chip->legacy.read_buf = pasemi_read_buf;
 	chip->legacy.write_buf = pasemi_write_buf;
 	chip->legacy.chip_delay = 0;
-	chip->ecc.mode = NAND_ECC_SOFT;
-	chip->ecc.algo = NAND_ECC_HAMMING;
 
 	/* Enable the following for a flash based bad block table */
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	/* Scan to find existence of the device */
 	err = nand_scan(chip, 1);
 	if (err)
@@ -169,6 +192,7 @@
 static int pasemi_nand_remove(struct platform_device *ofdev)
 {
 	struct nand_chip *chip;
+	int ret;
 
 	if (!pasemi_nand_mtd)
 		return 0;
@@ -176,7 +200,9 @@
 	chip = mtd_to_nand(pasemi_nand_mtd);
 
 	/* Release resources, unregister device */
-	nand_release(chip);
+	ret = mtd_device_unregister(pasemi_nand_mtd);
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	release_region(lpcctl, 4);
 
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 3a495b2..0ee08c4 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -14,10 +14,24 @@
 #include <linux/mtd/platnand.h>
 
 struct plat_nand_data {
+	struct nand_controller	controller;
 	struct nand_chip	chip;
 	void __iomem		*io_base;
 };
 
+static int plat_nand_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops plat_nand_ops = {
+	.attach_chip = plat_nand_attach_chip,
+};
+
 /*
  * Probe for the NAND device.
  */
@@ -46,6 +60,10 @@
 	if (!data)
 		return -ENOMEM;
 
+	data->controller.ops = &plat_nand_ops;
+	nand_controller_init(&data->controller);
+	data->chip.controller = &data->controller;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	data->io_base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(data->io_base))
@@ -66,9 +84,6 @@
 	data->chip.options |= pdata->chip.options;
 	data->chip.bbt_options |= pdata->chip.bbt_options;
 
-	data->chip.ecc.mode = NAND_ECC_SOFT;
-	data->chip.ecc.algo = NAND_ECC_HAMMING;
-
 	platform_set_drvdata(pdev, data);
 
 	/* Handle any platform specific setup */
@@ -78,6 +93,13 @@
 			goto out;
 	}
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	/* Scan to find existence of the device */
 	err = nand_scan(&data->chip, pdata->chip.nr_chips);
 	if (err)
@@ -106,8 +128,12 @@
 {
 	struct plat_nand_data *data = platform_get_drvdata(pdev);
 	struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+	struct nand_chip *chip = &data->chip;
+	int ret;
 
-	nand_release(&data->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	if (pdata->ctrl.remove)
 		pdata->ctrl.remove(pdev);
 
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index c10995c..bb181e1 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,7 +2,6 @@
 /*
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  */
-
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
@@ -2552,7 +2551,7 @@
 	ecc->write_page_raw	= qcom_nandc_write_page_raw;
 	ecc->write_oob		= qcom_nandc_write_oob;
 
-	ecc->mode = NAND_ECC_HW;
+	ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 
 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
 
@@ -2632,6 +2631,29 @@
 	.attach_chip = qcom_nand_attach_chip,
 };
 
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+	if (nandc->props->is_bam) {
+		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+					 MAX_REG_RD *
+					 sizeof(*nandc->reg_read_buf),
+					 DMA_FROM_DEVICE);
+
+		if (nandc->tx_chan)
+			dma_release_channel(nandc->tx_chan);
+
+		if (nandc->rx_chan)
+			dma_release_channel(nandc->rx_chan);
+
+		if (nandc->cmd_chan)
+			dma_release_channel(nandc->cmd_chan);
+	} else {
+		if (nandc->chan)
+			dma_release_channel(nandc->chan);
+	}
+}
+
 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
 {
 	int ret;
@@ -2677,22 +2699,31 @@
 			return -EIO;
 		}
 
-		nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
-		if (!nandc->tx_chan) {
-			dev_err(nandc->dev, "failed to request tx channel\n");
-			return -ENODEV;
+		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+		if (IS_ERR(nandc->tx_chan)) {
+			ret = PTR_ERR(nandc->tx_chan);
+			nandc->tx_chan = NULL;
+			dev_err_probe(nandc->dev, ret,
+				      "tx DMA channel request failed\n");
+			goto unalloc;
 		}
 
-		nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
-		if (!nandc->rx_chan) {
-			dev_err(nandc->dev, "failed to request rx channel\n");
-			return -ENODEV;
+		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+		if (IS_ERR(nandc->rx_chan)) {
+			ret = PTR_ERR(nandc->rx_chan);
+			nandc->rx_chan = NULL;
+			dev_err_probe(nandc->dev, ret,
+				      "rx DMA channel request failed\n");
+			goto unalloc;
 		}
 
-		nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
-		if (!nandc->cmd_chan) {
-			dev_err(nandc->dev, "failed to request cmd channel\n");
-			return -ENODEV;
+		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+		if (IS_ERR(nandc->cmd_chan)) {
+			ret = PTR_ERR(nandc->cmd_chan);
+			nandc->cmd_chan = NULL;
+			dev_err_probe(nandc->dev, ret,
+				      "cmd DMA channel request failed\n");
+			goto unalloc;
 		}
 
 		/*
@@ -2706,14 +2737,17 @@
 		if (!nandc->bam_txn) {
 			dev_err(nandc->dev,
 				"failed to allocate bam transaction\n");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto unalloc;
 		}
 	} else {
-		nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
-		if (!nandc->chan) {
-			dev_err(nandc->dev,
-				"failed to request slave channel\n");
-			return -ENODEV;
+		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+		if (IS_ERR(nandc->chan)) {
+			ret = PTR_ERR(nandc->chan);
+			nandc->chan = NULL;
+			dev_err_probe(nandc->dev, ret,
+				      "rxtx DMA channel request failed\n");
+			return ret;
 		}
 	}
 
@@ -2724,29 +2758,9 @@
 	nandc->controller.ops = &qcom_nandc_ops;
 
 	return 0;
-}
-
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
-	if (nandc->props->is_bam) {
-		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-					 MAX_REG_RD *
-					 sizeof(*nandc->reg_read_buf),
-					 DMA_FROM_DEVICE);
-
-		if (nandc->tx_chan)
-			dma_release_channel(nandc->tx_chan);
-
-		if (nandc->rx_chan)
-			dma_release_channel(nandc->rx_chan);
-
-		if (nandc->cmd_chan)
-			dma_release_channel(nandc->cmd_chan);
-	} else {
-		if (nandc->chan)
-			dma_release_channel(nandc->chan);
-	}
+unalloc:
+	qcom_nandc_unalloc(nandc);
+	return ret;
 }
 
 /* one time setup of a few nand controller registers */
@@ -2763,7 +2777,16 @@
 	/* enable ADM or BAM DMA */
 	if (nandc->props->is_bam) {
 		nand_ctrl = nandc_read(nandc, NAND_CTRL);
-		nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
+
+		/*
+		 *NAND_CTRL is an operational registers, and CPU
+		 * access to operational registers are read only
+		 * in BAM mode. So update the NAND_CTRL register
+		 * only if it is not in BAM mode. In most cases BAM
+		 * mode will be enabled in bootloader
+		 */
+		if (!(nand_ctrl & BAM_MODE_EN))
+			nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
 	} else {
 		nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
 	}
@@ -2818,7 +2841,7 @@
 	chip->legacy.block_markbad	= qcom_nandc_block_markbad;
 
 	chip->controller = &nandc->controller;
-	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
+	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
 			 NAND_SKIP_BBTSCAN;
 
 	/* set up initial status value */
@@ -2944,10 +2967,6 @@
 	if (!nandc->base_dma)
 		return -ENXIO;
 
-	ret = qcom_nandc_alloc(nandc);
-	if (ret)
-		goto err_nandc_alloc;
-
 	ret = clk_prepare_enable(nandc->core_clk);
 	if (ret)
 		goto err_core_clk;
@@ -2956,6 +2975,10 @@
 	if (ret)
 		goto err_aon_clk;
 
+	ret = qcom_nandc_alloc(nandc);
+	if (ret)
+		goto err_nandc_alloc;
+
 	ret = qcom_nandc_setup(nandc);
 	if (ret)
 		goto err_setup;
@@ -2967,15 +2990,14 @@
 	return 0;
 
 err_setup:
+	qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
 	clk_disable_unprepare(nandc->aon_clk);
 err_aon_clk:
 	clk_disable_unprepare(nandc->core_clk);
 err_core_clk:
-	qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
 	dma_unmap_resource(dev, res->start, resource_size(res),
 			   DMA_BIDIRECTIONAL, 0);
-
 	return ret;
 }
 
@@ -2984,10 +3006,15 @@
 	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	struct qcom_nand_host *host;
+	struct nand_chip *chip;
+	int ret;
 
-	list_for_each_entry(host, &nandc->host_list, node)
-		nand_release(&host->chip);
-
+	list_for_each_entry(host, &nandc->host_list, node) {
+		chip = &host->chip;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+	}
 
 	qcom_nandc_unalloc(nandc);
 
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 7777425..c742354 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -651,7 +651,8 @@
 	dev->card_registered = 1;
 	return 0;
 error3:
-	nand_release(dev->chip);
+	WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
+	nand_cleanup(dev->chip);
 error1:
 	/* Force card redetect */
 	dev->card_detected = 0;
@@ -670,7 +671,8 @@
 		return;
 
 	device_remove_file(&mtd->dev, &dev_attr_media_type);
-	nand_release(dev->chip);
+	WARN_ON(mtd_device_unregister(mtd));
+	nand_cleanup(dev->chip);
 	r852_engine_disable(dev);
 	dev->card_registered = 0;
 }
@@ -815,6 +817,29 @@
 	return ret;
 }
 
+static int r852_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
+	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
+	chip->ecc.size = R852_DMA_LEN;
+	chip->ecc.bytes = SM_OOB_SIZE;
+	chip->ecc.strength = 2;
+	chip->ecc.hwctl = r852_ecc_hwctl;
+	chip->ecc.calculate = r852_ecc_calculate;
+	chip->ecc.correct = r852_ecc_correct;
+
+	/* TODO: hack */
+	chip->ecc.read_oob = r852_read_oob;
+
+	return 0;
+}
+
+static const struct nand_controller_ops r852_ops = {
+	.attach_chip = r852_attach_chip,
+};
+
 static int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 {
 	int error;
@@ -856,18 +881,6 @@
 	chip->legacy.read_buf = r852_read_buf;
 	chip->legacy.write_buf = r852_write_buf;
 
-	/* ecc */
-	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-	chip->ecc.size = R852_DMA_LEN;
-	chip->ecc.bytes = SM_OOB_SIZE;
-	chip->ecc.strength = 2;
-	chip->ecc.hwctl = r852_ecc_hwctl;
-	chip->ecc.calculate = r852_ecc_calculate;
-	chip->ecc.correct = r852_ecc_correct;
-
-	/* TODO: hack */
-	chip->ecc.read_oob = r852_read_oob;
-
 	/* init our device structure */
 	dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
 
@@ -879,6 +892,10 @@
 	dev->pci_dev = pci_dev;
 	pci_set_drvdata(pci_dev, dev);
 
+	nand_controller_init(&dev->controller);
+	dev->controller.ops = &r852_ops;
+	chip->controller = &dev->controller;
+
 	dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
 		&dev->phys_bounce_buffer, GFP_KERNEL);
 
diff --git a/drivers/mtd/nand/raw/r852.h b/drivers/mtd/nand/raw/r852.h
index e9ce299..96fe301 100644
--- a/drivers/mtd/nand/raw/r852.h
+++ b/drivers/mtd/nand/raw/r852.h
@@ -104,6 +104,7 @@
 #define DMA_MEMORY	1
 
 struct r852_device {
+	struct nand_controller		controller;
 	void __iomem *mmio;		/* mmio */
 	struct nand_chip *chip;		/* nand chip backpointer */
 	struct pci_dev *pci_dev;	/* pci backpointer */
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 0009c18..fbd0fa4 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -291,7 +291,7 @@
 	int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
 	int tacls, twrph0, twrph1;
 	unsigned long clkrate = clk_get_rate(info->clk);
-	unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
+	unsigned long set, cfg, mask;
 	unsigned long flags;
 
 	/* calculate the timing information for the controller */
@@ -779,7 +779,8 @@
 
 		for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
 			pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
-			nand_release(&ptr->chip);
+			WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
+			nand_cleanup(&ptr->chip);
 		}
 	}
 
@@ -807,8 +808,8 @@
 	return -ENODEV;
 }
 
-static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline,
-					const struct nand_data_interface *conf)
+static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
+					const struct nand_interface_config *conf)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
@@ -903,7 +904,7 @@
 	nmtd->info	   = info;
 	nmtd->set	   = set;
 
-	chip->ecc.mode = info->platform->ecc_mode;
+	chip->ecc.engine_type = info->platform->engine_type;
 
 	/*
 	 * If you use u-boot BBT creation code, specifying this flag will
@@ -928,24 +929,24 @@
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
 
-	switch (chip->ecc.mode) {
+	switch (chip->ecc.engine_type) {
 
-	case NAND_ECC_NONE:
+	case NAND_ECC_ENGINE_TYPE_NONE:
 		dev_info(info->device, "ECC disabled\n");
 		break;
 
-	case NAND_ECC_SOFT:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		/*
-		 * This driver expects Hamming based ECC when ecc_mode is set
-		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
-		 * avoid adding an extra ecc_algo field to
-		 * s3c2410_platform_nand.
+		 * This driver expects Hamming based ECC when engine_type is set
+		 * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+		 * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
+		 * to s3c2410_platform_nand.
 		 */
-		chip->ecc.algo = NAND_ECC_HAMMING;
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
 		dev_info(info->device, "soft ECC\n");
 		break;
 
-	case NAND_ECC_HW:
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		chip->ecc.calculate = s3c2410_nand_calculate_ecc;
 		chip->ecc.correct   = s3c2410_nand_correct_data;
 		chip->ecc.strength  = 1;
@@ -998,7 +999,7 @@
 
 static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
 	.attach_chip = s3c2410_nand_attach_chip,
-	.setup_data_interface = s3c2410_nand_setup_data_interface,
+	.setup_interface = s3c2410_nand_setup_interface,
 };
 
 static const struct of_device_id s3c24xx_nand_dt_ids[] = {
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index e509c93..13df4bd 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1039,13 +1039,13 @@
 		chip->ecc.strength = 4;
 		chip->ecc.read_page = flctl_read_page_hwecc;
 		chip->ecc.write_page = flctl_write_page_hwecc;
-		chip->ecc.mode = NAND_ECC_HW;
+		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 
 		/* 4 symbols ECC enabled */
 		flctl->flcmncr_base |= _4ECCEN;
 	} else {
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_HAMMING;
+		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
 	}
 
 	return 0;
@@ -1129,10 +1129,8 @@
 	flctl->fifo = res->start + 0x24; /* FLDTFIFO */
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
+	if (irq < 0)
 		return irq;
-	}
 
 	ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
 			       "flste", flctl);
@@ -1206,9 +1204,13 @@
 static int flctl_remove(struct platform_device *pdev)
 {
 	struct sh_flctl *flctl = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &flctl->chip;
+	int ret;
 
 	flctl_release_dma(flctl);
-	nand_release(&flctl->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	pm_runtime_disable(&pdev->dev);
 
 	return 0;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index d8c52a0..af98bcc 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 
 struct sharpsl_nand {
+	struct nand_controller	controller;
 	struct nand_chip	chip;
 
 	void __iomem		*io;
@@ -96,6 +97,25 @@
 	return readb(sharpsl->io + ECCCNTR) != 0;
 }
 
+static int sharpsl_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
+	chip->ecc.size = 256;
+	chip->ecc.bytes = 3;
+	chip->ecc.strength = 1;
+	chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
+	chip->ecc.calculate = sharpsl_nand_calculate_ecc;
+	chip->ecc.correct = nand_correct_data;
+
+	return 0;
+}
+
+static const struct nand_controller_ops sharpsl_ops = {
+	.attach_chip = sharpsl_attach_chip,
+};
+
 /*
  * Main initialization routine
  */
@@ -136,6 +156,10 @@
 	/* Get pointer to private data */
 	this = (struct nand_chip *)(&sharpsl->chip);
 
+	nand_controller_init(&sharpsl->controller);
+	sharpsl->controller.ops = &sharpsl_ops;
+	this->controller = &sharpsl->controller;
+
 	/* Link the private data with the MTD structure */
 	mtd = nand_to_mtd(this);
 	mtd->dev.parent = &pdev->dev;
@@ -156,15 +180,7 @@
 	this->legacy.dev_ready = sharpsl_nand_dev_ready;
 	/* 15 us command delay time */
 	this->legacy.chip_delay = 15;
-	/* set eccmode using hardware ECC */
-	this->ecc.mode = NAND_ECC_HW;
-	this->ecc.size = 256;
-	this->ecc.bytes = 3;
-	this->ecc.strength = 1;
 	this->badblock_pattern = data->badblock_pattern;
-	this->ecc.hwctl = sharpsl_nand_enable_hwecc;
-	this->ecc.calculate = sharpsl_nand_calculate_ecc;
-	this->ecc.correct = nand_correct_data;
 
 	/* Scan to find existence of the device */
 	err = nand_scan(this, 1);
@@ -199,13 +215,19 @@
 static int sharpsl_nand_remove(struct platform_device *pdev)
 {
 	struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &sharpsl->chip;
+	int ret;
 
-	/* Release resources, unregister device */
-	nand_release(&sharpsl->chip);
+	/* Unregister device */
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+
+	/* Release resources */
+	nand_cleanup(chip);
 
 	iounmap(sharpsl->io);
 
-	/* Free the MTD device structure */
+	/* Free the driver's structure */
 	kfree(sharpsl);
 
 	return 0;
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 7c94fc5..fb39cc7 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -22,6 +22,7 @@
 #define FPGA_NAND_DATA_SHIFT		16
 
 struct socrates_nand_host {
+	struct nand_controller	controller;
 	struct nand_chip	nand_chip;
 	void __iomem		*io_base;
 	struct device		*dev;
@@ -116,6 +117,19 @@
 	return 1;
 }
 
+static int socrates_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops socrates_ops = {
+	.attach_chip = socrates_attach_chip,
+};
+
 /*
  * Probe for the NAND device.
  */
@@ -141,6 +155,10 @@
 	mtd = nand_to_mtd(nand_chip);
 	host->dev = &ofdev->dev;
 
+	nand_controller_init(&host->controller);
+	host->controller.ops = &socrates_ops;
+	nand_chip->controller = &host->controller;
+
 	/* link the private data structures */
 	nand_set_controller_data(nand_chip, host);
 	nand_set_flash_node(nand_chip, ofdev->dev.of_node);
@@ -153,12 +171,16 @@
 	nand_chip->legacy.read_buf = socrates_nand_read_buf;
 	nand_chip->legacy.dev_ready = socrates_nand_device_ready;
 
-	nand_chip->ecc.mode = NAND_ECC_SOFT;	/* enable ECC */
-	nand_chip->ecc.algo = NAND_ECC_HAMMING;
-
 	/* TODO: I have no idea what real delay is. */
 	nand_chip->legacy.chip_delay = 20;	/* 20us command delay time */
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	dev_set_drvdata(&ofdev->dev, host);
 
 	res = nand_scan(nand_chip, 1);
@@ -182,8 +204,12 @@
 static int socrates_nand_remove(struct platform_device *ofdev)
 {
 	struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+	struct nand_chip *chip = &host->nand_chip;
+	int ret;
 
-	nand_release(&host->nand_chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	iounmap(host->io_base);
 
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 5c06e0b..550bda4 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -4,16 +4,20 @@
  * Author: Christophe Kerello <christophe.kerello@st.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/reset.h>
 
 /* Bad block marker length */
@@ -37,8 +41,7 @@
 /* Max ECC buffer length */
 #define FMC2_MAX_ECC_BUF_LEN		(FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
 
-#define FMC2_TIMEOUT_US			1000
-#define FMC2_TIMEOUT_MS			1000
+#define FMC2_TIMEOUT_MS			5000
 
 /* Timings */
 #define FMC2_THIZ			1
@@ -85,20 +88,16 @@
 /* Register: FMC2_PCR */
 #define FMC2_PCR_PWAITEN		BIT(1)
 #define FMC2_PCR_PBKEN			BIT(2)
-#define FMC2_PCR_PWID_MASK		GENMASK(5, 4)
-#define FMC2_PCR_PWID(x)		(((x) & 0x3) << 4)
+#define FMC2_PCR_PWID			GENMASK(5, 4)
 #define FMC2_PCR_PWID_BUSWIDTH_8	0
 #define FMC2_PCR_PWID_BUSWIDTH_16	1
 #define FMC2_PCR_ECCEN			BIT(6)
 #define FMC2_PCR_ECCALG			BIT(8)
-#define FMC2_PCR_TCLR_MASK		GENMASK(12, 9)
-#define FMC2_PCR_TCLR(x)		(((x) & 0xf) << 9)
+#define FMC2_PCR_TCLR			GENMASK(12, 9)
 #define FMC2_PCR_TCLR_DEFAULT		0xf
-#define FMC2_PCR_TAR_MASK		GENMASK(16, 13)
-#define FMC2_PCR_TAR(x)			(((x) & 0xf) << 13)
+#define FMC2_PCR_TAR			GENMASK(16, 13)
 #define FMC2_PCR_TAR_DEFAULT		0xf
-#define FMC2_PCR_ECCSS_MASK		GENMASK(19, 17)
-#define FMC2_PCR_ECCSS(x)		(((x) & 0x7) << 17)
+#define FMC2_PCR_ECCSS			GENMASK(19, 17)
 #define FMC2_PCR_ECCSS_512		1
 #define FMC2_PCR_ECCSS_2048		3
 #define FMC2_PCR_BCHECC			BIT(24)
@@ -108,17 +107,17 @@
 #define FMC2_SR_NWRF			BIT(6)
 
 /* Register: FMC2_PMEM */
-#define FMC2_PMEM_MEMSET(x)		(((x) & 0xff) << 0)
-#define FMC2_PMEM_MEMWAIT(x)		(((x) & 0xff) << 8)
-#define FMC2_PMEM_MEMHOLD(x)		(((x) & 0xff) << 16)
-#define FMC2_PMEM_MEMHIZ(x)		(((x) & 0xff) << 24)
+#define FMC2_PMEM_MEMSET		GENMASK(7, 0)
+#define FMC2_PMEM_MEMWAIT		GENMASK(15, 8)
+#define FMC2_PMEM_MEMHOLD		GENMASK(23, 16)
+#define FMC2_PMEM_MEMHIZ		GENMASK(31, 24)
 #define FMC2_PMEM_DEFAULT		0x0a0a0a0a
 
 /* Register: FMC2_PATT */
-#define FMC2_PATT_ATTSET(x)		(((x) & 0xff) << 0)
-#define FMC2_PATT_ATTWAIT(x)		(((x) & 0xff) << 8)
-#define FMC2_PATT_ATTHOLD(x)		(((x) & 0xff) << 16)
-#define FMC2_PATT_ATTHIZ(x)		(((x) & 0xff) << 24)
+#define FMC2_PATT_ATTSET		GENMASK(7, 0)
+#define FMC2_PATT_ATTWAIT		GENMASK(15, 8)
+#define FMC2_PATT_ATTHOLD		GENMASK(23, 16)
+#define FMC2_PATT_ATTHIZ		GENMASK(31, 24)
 #define FMC2_PATT_DEFAULT		0x0a0a0a0a
 
 /* Register: FMC2_ISR */
@@ -133,9 +132,9 @@
 /* Register: FMC2_CSQCFGR1 */
 #define FMC2_CSQCFGR1_CMD2EN		BIT(1)
 #define FMC2_CSQCFGR1_DMADEN		BIT(2)
-#define FMC2_CSQCFGR1_ACYNBR(x)		(((x) & 0x7) << 4)
-#define FMC2_CSQCFGR1_CMD1(x)		(((x) & 0xff) << 8)
-#define FMC2_CSQCFGR1_CMD2(x)		(((x) & 0xff) << 16)
+#define FMC2_CSQCFGR1_ACYNBR		GENMASK(6, 4)
+#define FMC2_CSQCFGR1_CMD1		GENMASK(15, 8)
+#define FMC2_CSQCFGR1_CMD2		GENMASK(23, 16)
 #define FMC2_CSQCFGR1_CMD1T		BIT(24)
 #define FMC2_CSQCFGR1_CMD2T		BIT(25)
 
@@ -143,13 +142,13 @@
 #define FMC2_CSQCFGR2_SQSDTEN		BIT(0)
 #define FMC2_CSQCFGR2_RCMD2EN		BIT(1)
 #define FMC2_CSQCFGR2_DMASEN		BIT(2)
-#define FMC2_CSQCFGR2_RCMD1(x)		(((x) & 0xff) << 8)
-#define FMC2_CSQCFGR2_RCMD2(x)		(((x) & 0xff) << 16)
+#define FMC2_CSQCFGR2_RCMD1		GENMASK(15, 8)
+#define FMC2_CSQCFGR2_RCMD2		GENMASK(23, 16)
 #define FMC2_CSQCFGR2_RCMD1T		BIT(24)
 #define FMC2_CSQCFGR2_RCMD2T		BIT(25)
 
 /* Register: FMC2_CSQCFGR3 */
-#define FMC2_CSQCFGR3_SNBR(x)		(((x) & 0x1f) << 8)
+#define FMC2_CSQCFGR3_SNBR		GENMASK(13, 8)
 #define FMC2_CSQCFGR3_AC1T		BIT(16)
 #define FMC2_CSQCFGR3_AC2T		BIT(17)
 #define FMC2_CSQCFGR3_AC3T		BIT(18)
@@ -160,15 +159,15 @@
 #define FMC2_CSQCFGR3_RAC2T		BIT(23)
 
 /* Register: FMC2_CSQCAR1 */
-#define FMC2_CSQCAR1_ADDC1(x)		(((x) & 0xff) << 0)
-#define FMC2_CSQCAR1_ADDC2(x)		(((x) & 0xff) << 8)
-#define FMC2_CSQCAR1_ADDC3(x)		(((x) & 0xff) << 16)
-#define FMC2_CSQCAR1_ADDC4(x)		(((x) & 0xff) << 24)
+#define FMC2_CSQCAR1_ADDC1		GENMASK(7, 0)
+#define FMC2_CSQCAR1_ADDC2		GENMASK(15, 8)
+#define FMC2_CSQCAR1_ADDC3		GENMASK(23, 16)
+#define FMC2_CSQCAR1_ADDC4		GENMASK(31, 24)
 
 /* Register: FMC2_CSQCAR2 */
-#define FMC2_CSQCAR2_ADDC5(x)		(((x) & 0xff) << 0)
-#define FMC2_CSQCAR2_NANDCEN(x)		(((x) & 0x3) << 10)
-#define FMC2_CSQCAR2_SAO(x)		(((x) & 0xffff) << 16)
+#define FMC2_CSQCAR2_ADDC5		GENMASK(7, 0)
+#define FMC2_CSQCAR2_NANDCEN		GENMASK(11, 10)
+#define FMC2_CSQCAR2_SAO		GENMASK(31, 16)
 
 /* Register: FMC2_CSQIER */
 #define FMC2_CSQIER_TCIE		BIT(0)
@@ -189,28 +188,23 @@
 /* Register: FMC2_BCHDSR0 */
 #define FMC2_BCHDSR0_DUE		BIT(0)
 #define FMC2_BCHDSR0_DEF		BIT(1)
-#define FMC2_BCHDSR0_DEN_MASK		GENMASK(7, 4)
-#define FMC2_BCHDSR0_DEN_SHIFT		4
+#define FMC2_BCHDSR0_DEN		GENMASK(7, 4)
 
 /* Register: FMC2_BCHDSR1 */
-#define FMC2_BCHDSR1_EBP1_MASK		GENMASK(12, 0)
-#define FMC2_BCHDSR1_EBP2_MASK		GENMASK(28, 16)
-#define FMC2_BCHDSR1_EBP2_SHIFT		16
+#define FMC2_BCHDSR1_EBP1		GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2		GENMASK(28, 16)
 
 /* Register: FMC2_BCHDSR2 */
-#define FMC2_BCHDSR2_EBP3_MASK		GENMASK(12, 0)
-#define FMC2_BCHDSR2_EBP4_MASK		GENMASK(28, 16)
-#define FMC2_BCHDSR2_EBP4_SHIFT		16
+#define FMC2_BCHDSR2_EBP3		GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4		GENMASK(28, 16)
 
 /* Register: FMC2_BCHDSR3 */
-#define FMC2_BCHDSR3_EBP5_MASK		GENMASK(12, 0)
-#define FMC2_BCHDSR3_EBP6_MASK		GENMASK(28, 16)
-#define FMC2_BCHDSR3_EBP6_SHIFT		16
+#define FMC2_BCHDSR3_EBP5		GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6		GENMASK(28, 16)
 
 /* Register: FMC2_BCHDSR4 */
-#define FMC2_BCHDSR4_EBP7_MASK		GENMASK(12, 0)
-#define FMC2_BCHDSR4_EBP8_MASK		GENMASK(28, 16)
-#define FMC2_BCHDSR4_EBP8_SHIFT		16
+#define FMC2_BCHDSR4_EBP7		GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8		GENMASK(28, 16)
 
 enum stm32_fmc2_ecc {
 	FMC2_ECC_HAM = 1,
@@ -251,7 +245,8 @@
 	struct nand_controller base;
 	struct stm32_fmc2_nand nand;
 	struct device *dev;
-	void __iomem *io_base;
+	struct device *cdev;
+	struct regmap *regmap;
 	void __iomem *data_base[FMC2_MAX_CE];
 	void __iomem *cmd_base[FMC2_MAX_CE];
 	void __iomem *addr_base[FMC2_MAX_CE];
@@ -281,47 +276,42 @@
 	return container_of(base, struct stm32_fmc2_nfc, base);
 }
 
-/* Timings configuration */
-static void stm32_fmc2_timings_init(struct nand_chip *chip)
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
 	struct stm32_fmc2_timings *timings = &nand->timings;
-	u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
 	u32 pmem, patt;
 
 	/* Set tclr/tar timings */
-	pcr &= ~FMC2_PCR_TCLR_MASK;
-	pcr |= FMC2_PCR_TCLR(timings->tclr);
-	pcr &= ~FMC2_PCR_TAR_MASK;
-	pcr |= FMC2_PCR_TAR(timings->tar);
+	regmap_update_bits(nfc->regmap, FMC2_PCR,
+			   FMC2_PCR_TCLR | FMC2_PCR_TAR,
+			   FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
+			   FIELD_PREP(FMC2_PCR_TAR, timings->tar));
 
 	/* Set tset/twait/thold/thiz timings in common bank */
-	pmem = FMC2_PMEM_MEMSET(timings->tset_mem);
-	pmem |= FMC2_PMEM_MEMWAIT(timings->twait);
-	pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem);
-	pmem |= FMC2_PMEM_MEMHIZ(timings->thiz);
+	pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
+	pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
+	pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
+	pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
+	regmap_write(nfc->regmap, FMC2_PMEM, pmem);
 
 	/* Set tset/twait/thold/thiz timings in attribut bank */
-	patt = FMC2_PATT_ATTSET(timings->tset_att);
-	patt |= FMC2_PATT_ATTWAIT(timings->twait);
-	patt |= FMC2_PATT_ATTHOLD(timings->thold_att);
-	patt |= FMC2_PATT_ATTHIZ(timings->thiz);
-
-	writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
-	writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM);
-	writel_relaxed(patt, fmc2->io_base + FMC2_PATT);
+	patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
+	patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
+	patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
+	patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
+	regmap_write(nfc->regmap, FMC2_PATT, patt);
 }
 
-/* Controller configuration */
-static void stm32_fmc2_setup(struct nand_chip *chip)
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
-	u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+	u32 pcr = 0, pcr_mask;
 
 	/* Configure ECC algorithm (default configuration is Hamming) */
-	pcr &= ~FMC2_PCR_ECCALG;
-	pcr &= ~FMC2_PCR_BCHECC;
+	pcr_mask = FMC2_PCR_ECCALG;
+	pcr_mask |= FMC2_PCR_BCHECC;
 	if (chip->ecc.strength == FMC2_ECC_BCH8) {
 		pcr |= FMC2_PCR_ECCALG;
 		pcr |= FMC2_PCR_BCHECC;
@@ -330,195 +320,159 @@
 	}
 
 	/* Set buswidth */
-	pcr &= ~FMC2_PCR_PWID_MASK;
+	pcr_mask |= FMC2_PCR_PWID;
 	if (chip->options & NAND_BUSWIDTH_16)
-		pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+		pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
 
 	/* Set ECC sector size */
-	pcr &= ~FMC2_PCR_ECCSS_MASK;
-	pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512);
+	pcr_mask |= FMC2_PCR_ECCSS;
+	pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
 
-	writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+	regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr);
 }
 
-/* Select target */
-static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr)
+static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
 	struct dma_slave_config dma_cfg;
 	int ret;
 
-	if (nand->cs_used[chipnr] == fmc2->cs_sel)
+	if (nand->cs_used[chipnr] == nfc->cs_sel)
 		return 0;
 
-	fmc2->cs_sel = nand->cs_used[chipnr];
+	nfc->cs_sel = nand->cs_used[chipnr];
+	stm32_fmc2_nfc_setup(chip);
+	stm32_fmc2_nfc_timings_init(chip);
 
-	/* FMC2 setup routine */
-	stm32_fmc2_setup(chip);
-
-	/* Apply timings */
-	stm32_fmc2_timings_init(chip);
-
-	if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) {
+	if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
 		memset(&dma_cfg, 0, sizeof(dma_cfg));
-		dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel];
-		dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+		dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+		dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
 		dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 		dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 		dma_cfg.src_maxburst = 32;
 		dma_cfg.dst_maxburst = 32;
 
-		ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg);
+		ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
 		if (ret) {
-			dev_err(fmc2->dev, "tx DMA engine slave config failed\n");
+			dev_err(nfc->dev, "tx DMA engine slave config failed\n");
 			return ret;
 		}
 
-		ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg);
+		ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
 		if (ret) {
-			dev_err(fmc2->dev, "rx DMA engine slave config failed\n");
+			dev_err(nfc->dev, "rx DMA engine slave config failed\n");
 			return ret;
 		}
 	}
 
-	if (fmc2->dma_ecc_ch) {
+	if (nfc->dma_ecc_ch) {
 		/*
 		 * Hamming: we read HECCR register
 		 * BCH4/BCH8: we read BCHDSRSx registers
 		 */
 		memset(&dma_cfg, 0, sizeof(dma_cfg));
-		dma_cfg.src_addr = fmc2->io_phys_addr;
+		dma_cfg.src_addr = nfc->io_phys_addr;
 		dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
 				    FMC2_HECCR : FMC2_BCHDSR0;
 		dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
-		ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg);
+		ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
 		if (ret) {
-			dev_err(fmc2->dev, "ECC DMA engine slave config failed\n");
+			dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
 			return ret;
 		}
 
 		/* Calculate ECC length needed for one sector */
-		fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
-				    FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+		nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+				   FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
 	}
 
 	return 0;
 }
 
-/* Set bus width to 16-bit or 8-bit */
-static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set)
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
 {
-	u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+	u32 pcr;
 
-	pcr &= ~FMC2_PCR_PWID_MASK;
-	if (set)
-		pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
-	writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+	pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
+		    FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
+
+	regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr);
 }
 
-/* Enable/disable ECC */
-static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable)
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
 {
-	u32 pcr = readl(fmc2->io_base + FMC2_PCR);
-
-	pcr &= ~FMC2_PCR_ECCEN;
-	if (enable)
-		pcr |= FMC2_PCR_ECCEN;
-	writel(pcr, fmc2->io_base + FMC2_PCR);
+	regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN,
+			   enable ? FMC2_PCR_ECCEN : 0);
 }
 
-/* Enable irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-	u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+	nfc->irq_state = FMC2_IRQ_SEQ;
 
-	csqier |= FMC2_CSQIER_TCIE;
-
-	fmc2->irq_state = FMC2_IRQ_SEQ;
-
-	writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+	regmap_update_bits(nfc->regmap, FMC2_CSQIER,
+			   FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE);
 }
 
-/* Disable irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-	u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+	regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0);
 
-	csqier &= ~FMC2_CSQIER_TCIE;
-
-	writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
-
-	fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+	nfc->irq_state = FMC2_IRQ_UNKNOWN;
 }
 
-/* Clear irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-	writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR);
+	regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ);
 }
 
-/* Enable irq sources in case of bch is used */
-static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2,
-					     int mode)
+static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode)
 {
-	u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+	nfc->irq_state = FMC2_IRQ_BCH;
 
 	if (mode == NAND_ECC_WRITE)
-		bchier |= FMC2_BCHIER_EPBRIE;
+		regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+				   FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE);
 	else
-		bchier |= FMC2_BCHIER_DERIE;
-
-	fmc2->irq_state = FMC2_IRQ_BCH;
-
-	writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+		regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+				   FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE);
 }
 
-/* Disable irq sources in case of bch is used */
-static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
 {
-	u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+	regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+			   FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0);
 
-	bchier &= ~FMC2_BCHIER_DERIE;
-	bchier &= ~FMC2_BCHIER_EPBRIE;
-
-	writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
-
-	fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+	nfc->irq_state = FMC2_IRQ_UNKNOWN;
 }
 
-/* Clear irq sources in case of bch is used */
-static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
 {
-	writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR);
+	regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ);
 }
 
 /*
  * Enable ECC logic and reset syndrome/parity bits previously calculated
  * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
  */
-static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
+static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 
-	stm32_fmc2_set_ecc(fmc2, false);
+	stm32_fmc2_nfc_set_ecc(nfc, false);
 
 	if (chip->ecc.strength != FMC2_ECC_HAM) {
-		u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+		regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+				   mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
 
-		if (mode == NAND_ECC_WRITE)
-			pcr |= FMC2_PCR_WEN;
-		else
-			pcr &= ~FMC2_PCR_WEN;
-		writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
-
-		reinit_completion(&fmc2->complete);
-		stm32_fmc2_clear_bch_irq(fmc2);
-		stm32_fmc2_enable_bch_irq(fmc2, mode);
+		reinit_completion(&nfc->complete);
+		stm32_fmc2_nfc_clear_bch_irq(nfc);
+		stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
 	}
 
-	stm32_fmc2_set_ecc(fmc2, true);
+	stm32_fmc2_nfc_set_ecc(nfc, true);
 }
 
 /*
@@ -526,40 +480,37 @@
  * ECC is 3 bytes for 512 bytes of data (supports error correction up to
  * max of 1-bit)
  */
-static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
 {
 	ecc[0] = ecc_sta;
 	ecc[1] = ecc_sta >> 8;
 	ecc[2] = ecc_sta >> 16;
 }
 
-static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
-				    u8 *ecc)
+static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
+					u8 *ecc)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	u32 sr, heccr;
 	int ret;
 
-	ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
-					 sr, sr & FMC2_SR_NWRF, 10,
-					 FMC2_TIMEOUT_MS);
+	ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+				       sr & FMC2_SR_NWRF, 1,
+				       1000 * FMC2_TIMEOUT_MS);
 	if (ret) {
-		dev_err(fmc2->dev, "ham timeout\n");
+		dev_err(nfc->dev, "ham timeout\n");
 		return ret;
 	}
 
-	heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR);
-
-	stm32_fmc2_ham_set_ecc(heccr, ecc);
-
-	/* Disable ECC */
-	stm32_fmc2_set_ecc(fmc2, false);
+	regmap_read(nfc->regmap, FMC2_HECCR, &heccr);
+	stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
+	stm32_fmc2_nfc_set_ecc(nfc, false);
 
 	return 0;
 }
 
-static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
-				  u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
+				      u8 *read_ecc, u8 *calc_ecc)
 {
 	u8 bit_position = 0, b0, b1, b2;
 	u32 byte_addr = 0, b;
@@ -615,28 +566,28 @@
  * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
  * max of 4-bit/8-bit)
  */
-static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
-				    u8 *ecc)
+static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
+					u8 *ecc)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	u32 bchpbr;
 
 	/* Wait until the BCH code is ready */
-	if (!wait_for_completion_timeout(&fmc2->complete,
+	if (!wait_for_completion_timeout(&nfc->complete,
 					 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-		dev_err(fmc2->dev, "bch timeout\n");
-		stm32_fmc2_disable_bch_irq(fmc2);
+		dev_err(nfc->dev, "bch timeout\n");
+		stm32_fmc2_nfc_disable_bch_irq(nfc);
 		return -ETIMEDOUT;
 	}
 
 	/* Read parity bits */
-	bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1);
+	regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr);
 	ecc[0] = bchpbr;
 	ecc[1] = bchpbr >> 8;
 	ecc[2] = bchpbr >> 16;
 	ecc[3] = bchpbr >> 24;
 
-	bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2);
+	regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr);
 	ecc[4] = bchpbr;
 	ecc[5] = bchpbr >> 8;
 	ecc[6] = bchpbr >> 16;
@@ -644,24 +595,22 @@
 	if (chip->ecc.strength == FMC2_ECC_BCH8) {
 		ecc[7] = bchpbr >> 24;
 
-		bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3);
+		regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr);
 		ecc[8] = bchpbr;
 		ecc[9] = bchpbr >> 8;
 		ecc[10] = bchpbr >> 16;
 		ecc[11] = bchpbr >> 24;
 
-		bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4);
+		regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr);
 		ecc[12] = bchpbr;
 	}
 
-	/* Disable ECC */
-	stm32_fmc2_set_ecc(fmc2, false);
+	stm32_fmc2_nfc_set_ecc(nfc, false);
 
 	return 0;
 }
 
-/* BCH algorithm correction */
-static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
 {
 	u32 bchdsr0 = ecc_sta[0];
 	u32 bchdsr1 = ecc_sta[1];
@@ -680,16 +629,16 @@
 	if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
 		return -EBADMSG;
 
-	pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK;
-	pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT;
-	pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK;
-	pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT;
-	pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK;
-	pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT;
-	pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK;
-	pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT;
+	pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
+	pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
+	pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
+	pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
+	pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
+	pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
+	pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
+	pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
 
-	den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT;
+	den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
 	for (i = 0; i < den; i++) {
 		if (pos[i] < eccsize * 8) {
 			change_bit(pos[i], (unsigned long *)dat);
@@ -700,34 +649,29 @@
 	return nb_errs;
 }
 
-static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
-				  u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
+				      u8 *read_ecc, u8 *calc_ecc)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	u32 ecc_sta[5];
 
 	/* Wait until the decoding error is ready */
-	if (!wait_for_completion_timeout(&fmc2->complete,
+	if (!wait_for_completion_timeout(&nfc->complete,
 					 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-		dev_err(fmc2->dev, "bch timeout\n");
-		stm32_fmc2_disable_bch_irq(fmc2);
+		dev_err(nfc->dev, "bch timeout\n");
+		stm32_fmc2_nfc_disable_bch_irq(nfc);
 		return -ETIMEDOUT;
 	}
 
-	ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0);
-	ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1);
-	ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2);
-	ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3);
-	ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4);
+	regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5);
 
-	/* Disable ECC */
-	stm32_fmc2_set_ecc(fmc2, false);
+	stm32_fmc2_nfc_set_ecc(nfc, false);
 
-	return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta);
+	return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
 }
 
-static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
-				int oob_required, int page)
+static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
+				    int oob_required, int page)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	int ret, i, s, stat, eccsize = chip->ecc.size;
@@ -789,35 +733,34 @@
 }
 
 /* Sequencer read/write configuration */
-static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
-				    int raw, bool write_data)
+static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
+					int raw, bool write_data)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	struct mtd_info *mtd = nand_to_mtd(chip);
-	u32 csqcfgr1, csqcfgr2, csqcfgr3;
-	u32 csqar1, csqar2;
 	u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
-	u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+	/*
+	 * cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3
+	 * cfg[3] => csqar1, cfg[4] => csqar2
+	 */
+	u32 cfg[5];
 
-	if (write_data)
-		pcr |= FMC2_PCR_WEN;
-	else
-		pcr &= ~FMC2_PCR_WEN;
-	writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+	regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+			   write_data ? FMC2_PCR_WEN : 0);
 
 	/*
 	 * - Set Program Page/Page Read command
 	 * - Enable DMA request data
 	 * - Set timings
 	 */
-	csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
+	cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
 	if (write_data)
-		csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN);
+		cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
 	else
-		csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) |
-			    FMC2_CSQCFGR1_CMD2EN |
-			    FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) |
-			    FMC2_CSQCFGR1_CMD2T;
+		cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
+			  FMC2_CSQCFGR1_CMD2EN |
+			  FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
+			  FMC2_CSQCFGR1_CMD2T;
 
 	/*
 	 * - Set Random Data Input/Random Data Read command
@@ -826,29 +769,29 @@
 	 * - Set timings
 	 */
 	if (write_data)
-		csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN);
+		cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
 	else
-		csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) |
-			   FMC2_CSQCFGR2_RCMD2EN |
-			   FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) |
-			   FMC2_CSQCFGR2_RCMD1T |
-			   FMC2_CSQCFGR2_RCMD2T;
+		cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
+			 FMC2_CSQCFGR2_RCMD2EN |
+			 FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) |
+			 FMC2_CSQCFGR2_RCMD1T |
+			 FMC2_CSQCFGR2_RCMD2T;
 	if (!raw) {
-		csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
-		csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN;
+		cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
+		cfg[1] |= FMC2_CSQCFGR2_SQSDTEN;
 	}
 
 	/*
 	 * - Set the number of sectors to be written
 	 * - Set timings
 	 */
-	csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1);
+	cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
 	if (write_data) {
-		csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
+		cfg[2] |= FMC2_CSQCFGR3_RAC2T;
 		if (chip->options & NAND_ROW_ADDR_3)
-			csqcfgr3 |= FMC2_CSQCFGR3_AC5T;
+			cfg[2] |= FMC2_CSQCFGR3_AC5T;
 		else
-			csqcfgr3 |= FMC2_CSQCFGR3_AC4T;
+			cfg[2] |= FMC2_CSQCFGR3_AC4T;
 	}
 
 	/*
@@ -856,8 +799,8 @@
 	 * Byte 1 and byte 2 => column, we start at 0x0
 	 * Byte 3 and byte 4 => page
 	 */
-	csqar1 = FMC2_CSQCAR1_ADDC3(page);
-	csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
+	cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
+	cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
 
 	/*
 	 * - Set chip enable number
@@ -865,43 +808,39 @@
 	 * - Calculate the number of address cycles to be issued
 	 * - Set byte 5 of address cycle if needed
 	 */
-	csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel);
+	cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
 	if (chip->options & NAND_BUSWIDTH_16)
-		csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1);
+		cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
 	else
-		csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset);
+		cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
 	if (chip->options & NAND_ROW_ADDR_3) {
-		csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5);
-		csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
+		cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
+		cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
 	} else {
-		csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4);
+		cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
 	}
 
-	writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1);
-	writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2);
-	writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3);
-	writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1);
-	writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2);
+	regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5);
 }
 
-static void stm32_fmc2_dma_callback(void *arg)
+static void stm32_fmc2_nfc_dma_callback(void *arg)
 {
 	complete((struct completion *)arg);
 }
 
 /* Read/write data from/to a page */
-static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
-			   int raw, bool write_data)
+static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+			       int raw, bool write_data)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	struct dma_async_tx_descriptor *desc_data, *desc_ecc;
 	struct scatterlist *sg;
-	struct dma_chan *dma_ch = fmc2->dma_rx_ch;
+	struct dma_chan *dma_ch = nfc->dma_rx_ch;
 	enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
 	enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
-	u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR);
 	int eccsteps = chip->ecc.steps;
 	int eccsize = chip->ecc.size;
+	unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
 	const u8 *p = buf;
 	int s, ret;
 
@@ -909,20 +848,20 @@
 	if (write_data) {
 		dma_data_dir = DMA_TO_DEVICE;
 		dma_transfer_dir = DMA_MEM_TO_DEV;
-		dma_ch = fmc2->dma_tx_ch;
+		dma_ch = nfc->dma_tx_ch;
 	}
 
-	for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) {
+	for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
 		sg_set_buf(sg, p, eccsize);
 		p += eccsize;
 	}
 
-	ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl,
+	ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
 			 eccsteps, dma_data_dir);
 	if (ret < 0)
 		return ret;
 
-	desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl,
+	desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
 					    eccsteps, dma_transfer_dir,
 					    DMA_PREP_INTERRUPT);
 	if (!desc_data) {
@@ -930,10 +869,10 @@
 		goto err_unmap_data;
 	}
 
-	reinit_completion(&fmc2->dma_data_complete);
-	reinit_completion(&fmc2->complete);
-	desc_data->callback = stm32_fmc2_dma_callback;
-	desc_data->callback_param = &fmc2->dma_data_complete;
+	reinit_completion(&nfc->dma_data_complete);
+	reinit_completion(&nfc->complete);
+	desc_data->callback = stm32_fmc2_nfc_dma_callback;
+	desc_data->callback_param = &nfc->dma_data_complete;
 	ret = dma_submit_error(dmaengine_submit(desc_data));
 	if (ret)
 		goto err_unmap_data;
@@ -942,19 +881,19 @@
 
 	if (!write_data && !raw) {
 		/* Configure DMA ECC status */
-		p = fmc2->ecc_buf;
-		for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) {
-			sg_set_buf(sg, p, fmc2->dma_ecc_len);
-			p += fmc2->dma_ecc_len;
+		p = nfc->ecc_buf;
+		for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+			sg_set_buf(sg, p, nfc->dma_ecc_len);
+			p += nfc->dma_ecc_len;
 		}
 
-		ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+		ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
 				 eccsteps, dma_data_dir);
 		if (ret < 0)
 			goto err_unmap_data;
 
-		desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch,
-						   fmc2->dma_ecc_sg.sgl,
+		desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+						   nfc->dma_ecc_sg.sgl,
 						   eccsteps, dma_transfer_dir,
 						   DMA_PREP_INTERRUPT);
 		if (!desc_ecc) {
@@ -962,76 +901,73 @@
 			goto err_unmap_ecc;
 		}
 
-		reinit_completion(&fmc2->dma_ecc_complete);
-		desc_ecc->callback = stm32_fmc2_dma_callback;
-		desc_ecc->callback_param = &fmc2->dma_ecc_complete;
+		reinit_completion(&nfc->dma_ecc_complete);
+		desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
+		desc_ecc->callback_param = &nfc->dma_ecc_complete;
 		ret = dma_submit_error(dmaengine_submit(desc_ecc));
 		if (ret)
 			goto err_unmap_ecc;
 
-		dma_async_issue_pending(fmc2->dma_ecc_ch);
+		dma_async_issue_pending(nfc->dma_ecc_ch);
 	}
 
-	stm32_fmc2_clear_seq_irq(fmc2);
-	stm32_fmc2_enable_seq_irq(fmc2);
+	stm32_fmc2_nfc_clear_seq_irq(nfc);
+	stm32_fmc2_nfc_enable_seq_irq(nfc);
 
 	/* Start the transfer */
-	csqcr |= FMC2_CSQCR_CSQSTART;
-	writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR);
+	regmap_update_bits(nfc->regmap, FMC2_CSQCR,
+			   FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART);
 
 	/* Wait end of sequencer transfer */
-	if (!wait_for_completion_timeout(&fmc2->complete,
-					 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-		dev_err(fmc2->dev, "seq timeout\n");
-		stm32_fmc2_disable_seq_irq(fmc2);
+	if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
+		dev_err(nfc->dev, "seq timeout\n");
+		stm32_fmc2_nfc_disable_seq_irq(nfc);
 		dmaengine_terminate_all(dma_ch);
 		if (!write_data && !raw)
-			dmaengine_terminate_all(fmc2->dma_ecc_ch);
+			dmaengine_terminate_all(nfc->dma_ecc_ch);
 		ret = -ETIMEDOUT;
 		goto err_unmap_ecc;
 	}
 
 	/* Wait DMA data transfer completion */
-	if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
-					 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-		dev_err(fmc2->dev, "data DMA timeout\n");
+	if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
+		dev_err(nfc->dev, "data DMA timeout\n");
 		dmaengine_terminate_all(dma_ch);
 		ret = -ETIMEDOUT;
 	}
 
 	/* Wait DMA ECC transfer completion */
 	if (!write_data && !raw) {
-		if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
-					msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
-			dev_err(fmc2->dev, "ECC DMA timeout\n");
-			dmaengine_terminate_all(fmc2->dma_ecc_ch);
+		if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
+						 timeout)) {
+			dev_err(nfc->dev, "ECC DMA timeout\n");
+			dmaengine_terminate_all(nfc->dma_ecc_ch);
 			ret = -ETIMEDOUT;
 		}
 	}
 
 err_unmap_ecc:
 	if (!write_data && !raw)
-		dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+		dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
 			     eccsteps, dma_data_dir);
 
 err_unmap_data:
-	dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir);
+	dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
 
 	return ret;
 }
 
-static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
-				      const u8 *buf, int oob_required,
-				      int page, int raw)
+static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
+				    int oob_required, int page, int raw)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	int ret;
 
 	/* Configure the sequencer */
-	stm32_fmc2_rw_page_init(chip, page, raw, true);
+	stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
 
 	/* Write the page */
-	ret = stm32_fmc2_xfer(chip, buf, raw, true);
+	ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
 	if (ret)
 		return ret;
 
@@ -1047,55 +983,52 @@
 	return nand_prog_page_end_op(chip);
 }
 
-static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip,
-					   const u8 *buf,
-					   int oob_required,
-					   int page)
+static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
+					 int oob_required, int page)
 {
 	int ret;
 
-	/* Select the target */
-	ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+	ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
 	if (ret)
 		return ret;
 
-	return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
+	return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
 }
 
-static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip,
-					       const u8 *buf,
-					       int oob_required,
-					       int page)
+static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
+					     const u8 *buf, int oob_required,
+					     int page)
 {
 	int ret;
 
-	/* Select the target */
-	ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+	ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
 	if (ret)
 		return ret;
 
-	return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
+	return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
 }
 
 /* Get a status indicating which sectors have errors */
-static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2)
+static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
 {
-	u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR);
+	u32 csqemsr;
 
-	return csqemsr & FMC2_CSQEMSR_SEM;
+	regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr);
+
+	return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr);
 }
 
-static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
-					u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
+				      u8 *read_ecc, u8 *calc_ecc)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	int eccstrength = chip->ecc.strength;
 	int i, s, eccsize = chip->ecc.size;
-	u32 *ecc_sta = (u32 *)fmc2->ecc_buf;
-	u16 sta_map = stm32_fmc2_get_mapping_status(fmc2);
+	u32 *ecc_sta = (u32 *)nfc->ecc_buf;
+	u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
 	unsigned int max_bitflips = 0;
 
 	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
@@ -1104,10 +1037,11 @@
 		if (eccstrength == FMC2_ECC_HAM) {
 			/* Ecc_sta = FMC2_HECCR */
 			if (sta_map & BIT(s)) {
-				stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]);
-				stat = stm32_fmc2_ham_correct(chip, dat,
-							      &read_ecc[i],
-							      &calc_ecc[i]);
+				stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
+							   &calc_ecc[i]);
+				stat = stm32_fmc2_nfc_ham_correct(chip, dat,
+								  &read_ecc[i],
+								  &calc_ecc[i]);
 			}
 			ecc_sta++;
 		} else {
@@ -1119,8 +1053,8 @@
 			 * Ecc_sta[4] = FMC2_BCHDSR4
 			 */
 			if (sta_map & BIT(s))
-				stat = stm32_fmc2_bch_decode(eccsize, dat,
-							     ecc_sta);
+				stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
+								 ecc_sta);
 			ecc_sta += 5;
 		}
 
@@ -1143,30 +1077,29 @@
 	return max_bitflips;
 }
 
-static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
-					  int oob_required, int page)
+static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
+					int oob_required, int page)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	u8 *ecc_calc = chip->ecc.calc_buf;
 	u8 *ecc_code = chip->ecc.code_buf;
 	u16 sta_map;
 	int ret;
 
-	/* Select the target */
-	ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+	ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
 	if (ret)
 		return ret;
 
 	/* Configure the sequencer */
-	stm32_fmc2_rw_page_init(chip, page, 0, false);
+	stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
 
 	/* Read the page */
-	ret = stm32_fmc2_xfer(chip, buf, 0, false);
+	ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
 	if (ret)
 		return ret;
 
-	sta_map = stm32_fmc2_get_mapping_status(fmc2);
+	sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
 
 	/* Check if errors happen */
 	if (likely(!sta_map)) {
@@ -1193,22 +1126,21 @@
 	return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
 }
 
-static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
-					      int oob_required, int page)
+static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
+					    int oob_required, int page)
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	int ret;
 
-	/* Select the target */
-	ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+	ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
 	if (ret)
 		return ret;
 
 	/* Configure the sequencer */
-	stm32_fmc2_rw_page_init(chip, page, 1, false);
+	stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
 
 	/* Read the page */
-	ret = stm32_fmc2_xfer(chip, buf, 1, false);
+	ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
 	if (ret)
 		return ret;
 
@@ -1221,31 +1153,31 @@
 	return 0;
 }
 
-static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id)
+static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
 {
-	struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id;
+	struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
 
-	if (fmc2->irq_state == FMC2_IRQ_SEQ)
+	if (nfc->irq_state == FMC2_IRQ_SEQ)
 		/* Sequencer is used */
-		stm32_fmc2_disable_seq_irq(fmc2);
-	else if (fmc2->irq_state == FMC2_IRQ_BCH)
+		stm32_fmc2_nfc_disable_seq_irq(nfc);
+	else if (nfc->irq_state == FMC2_IRQ_BCH)
 		/* BCH is used */
-		stm32_fmc2_disable_bch_irq(fmc2);
+		stm32_fmc2_nfc_disable_bch_irq(nfc);
 
-	complete(&fmc2->complete);
+	complete(&nfc->complete);
 
 	return IRQ_HANDLED;
 }
 
-static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
-				 unsigned int len, bool force_8bit)
+static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
+				     unsigned int len, bool force_8bit)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
-	void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel];
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+	void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
 
 	if (force_8bit && chip->options & NAND_BUSWIDTH_16)
 		/* Reconfigure bus width to 8-bit */
-		stm32_fmc2_set_buswidth_16(fmc2, false);
+		stm32_fmc2_nfc_set_buswidth_16(nfc, false);
 
 	if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
 		if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
@@ -1281,18 +1213,18 @@
 
 	if (force_8bit && chip->options & NAND_BUSWIDTH_16)
 		/* Reconfigure bus width to 16-bit */
-		stm32_fmc2_set_buswidth_16(fmc2, true);
+		stm32_fmc2_nfc_set_buswidth_16(nfc, true);
 }
 
-static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
-				  unsigned int len, bool force_8bit)
+static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
+				      unsigned int len, bool force_8bit)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
-	void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel];
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+	void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
 
 	if (force_8bit && chip->options & NAND_BUSWIDTH_16)
 		/* Reconfigure bus width to 8-bit */
-		stm32_fmc2_set_buswidth_16(fmc2, false);
+		stm32_fmc2_nfc_set_buswidth_16(nfc, false);
 
 	if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
 		if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
@@ -1328,48 +1260,49 @@
 
 	if (force_8bit && chip->options & NAND_BUSWIDTH_16)
 		/* Reconfigure bus width to 16-bit */
-		stm32_fmc2_set_buswidth_16(fmc2, true);
+		stm32_fmc2_nfc_set_buswidth_16(nfc, true);
 }
 
-static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
+				  unsigned long timeout_ms)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	const struct nand_sdr_timings *timings;
 	u32 isr, sr;
 
 	/* Check if there is no pending requests to the NAND flash */
-	if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
-					      sr & FMC2_SR_NWRF, 1,
-					      FMC2_TIMEOUT_US))
-		dev_warn(fmc2->dev, "Waitrdy timeout\n");
+	if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+				     sr & FMC2_SR_NWRF, 1,
+				     1000 * FMC2_TIMEOUT_MS))
+		dev_warn(nfc->dev, "Waitrdy timeout\n");
 
 	/* Wait tWB before R/B# signal is low */
-	timings = nand_get_sdr_timings(&chip->data_interface);
+	timings = nand_get_sdr_timings(nand_get_interface_config(chip));
 	ndelay(PSEC_TO_NSEC(timings->tWB_max));
 
 	/* R/B# signal is low, clear high level flag */
-	writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+	regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF);
 
 	/* Wait R/B# signal is high */
-	return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
-						 isr, isr & FMC2_ISR_IHLF,
-						 5, 1000 * timeout_ms);
+	return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr,
+					isr & FMC2_ISR_IHLF, 5,
+					1000 * FMC2_TIMEOUT_MS);
 }
 
-static int stm32_fmc2_exec_op(struct nand_chip *chip,
-			      const struct nand_operation *op,
-			      bool check_only)
+static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
+				  const struct nand_operation *op,
+				  bool check_only)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	const struct nand_op_instr *instr = NULL;
-	unsigned int op_id, i;
+	unsigned int op_id, i, timeout;
 	int ret;
 
-	ret = stm32_fmc2_select_chip(chip, op->cs);
-	if (ret)
-		return ret;
-
 	if (check_only)
+		return 0;
+
+	ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
+	if (ret)
 		return ret;
 
 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -1378,30 +1311,30 @@
 		switch (instr->type) {
 		case NAND_OP_CMD_INSTR:
 			writeb_relaxed(instr->ctx.cmd.opcode,
-				       fmc2->cmd_base[fmc2->cs_sel]);
+				       nfc->cmd_base[nfc->cs_sel]);
 			break;
 
 		case NAND_OP_ADDR_INSTR:
 			for (i = 0; i < instr->ctx.addr.naddrs; i++)
 				writeb_relaxed(instr->ctx.addr.addrs[i],
-					       fmc2->addr_base[fmc2->cs_sel]);
+					       nfc->addr_base[nfc->cs_sel]);
 			break;
 
 		case NAND_OP_DATA_IN_INSTR:
-			stm32_fmc2_read_data(chip, instr->ctx.data.buf.in,
-					     instr->ctx.data.len,
-					     instr->ctx.data.force_8bit);
+			stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
+						 instr->ctx.data.len,
+						 instr->ctx.data.force_8bit);
 			break;
 
 		case NAND_OP_DATA_OUT_INSTR:
-			stm32_fmc2_write_data(chip, instr->ctx.data.buf.out,
-					      instr->ctx.data.len,
-					      instr->ctx.data.force_8bit);
+			stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
+						  instr->ctx.data.len,
+						  instr->ctx.data.force_8bit);
 			break;
 
 		case NAND_OP_WAITRDY_INSTR:
-			ret = stm32_fmc2_waitrdy(chip,
-						 instr->ctx.waitrdy.timeout_ms);
+			timeout = instr->ctx.waitrdy.timeout_ms;
+			ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
 			break;
 		}
 	}
@@ -1409,21 +1342,21 @@
 	return ret;
 }
 
-/* Controller initialization */
-static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
 {
-	u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
-	u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1);
+	u32 pcr;
+
+	regmap_read(nfc->regmap, FMC2_PCR, &pcr);
 
 	/* Set CS used to undefined */
-	fmc2->cs_sel = -1;
+	nfc->cs_sel = -1;
 
 	/* Enable wait feature and nand flash memory bank */
 	pcr |= FMC2_PCR_PWAITEN;
 	pcr |= FMC2_PCR_PBKEN;
 
 	/* Set buswidth to 8 bits mode for identification */
-	pcr &= ~FMC2_PCR_PWID_MASK;
+	pcr &= ~FMC2_PCR_PWID;
 
 	/* ECC logic is disabled */
 	pcr &= ~FMC2_PCR_ECCEN;
@@ -1434,32 +1367,32 @@
 	pcr &= ~FMC2_PCR_WEN;
 
 	/* Set default ECC sector size */
-	pcr &= ~FMC2_PCR_ECCSS_MASK;
-	pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048);
+	pcr &= ~FMC2_PCR_ECCSS;
+	pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
 
 	/* Set default tclr/tar timings */
-	pcr &= ~FMC2_PCR_TCLR_MASK;
-	pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT);
-	pcr &= ~FMC2_PCR_TAR_MASK;
-	pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT);
+	pcr &= ~FMC2_PCR_TCLR;
+	pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
+	pcr &= ~FMC2_PCR_TAR;
+	pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
 
 	/* Enable FMC2 controller */
-	bcr1 |= FMC2_BCR1_FMC2EN;
+	if (nfc->dev == nfc->cdev)
+		regmap_update_bits(nfc->regmap, FMC2_BCR1,
+				   FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
 
-	writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1);
-	writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
-	writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM);
-	writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT);
+	regmap_write(nfc->regmap, FMC2_PCR, pcr);
+	regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT);
+	regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT);
 }
 
-/* Controller timings */
-static void stm32_fmc2_calc_timings(struct nand_chip *chip,
-				    const struct nand_sdr_timings *sdrt)
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
+					const struct nand_sdr_timings *sdrt)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
 	struct stm32_fmc2_timings *tims = &nand->timings;
-	unsigned long hclk = clk_get_rate(fmc2->clk);
+	unsigned long hclk = clk_get_rate(nfc->clk);
 	unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
 	unsigned long timing, tar, tclr, thiz, twait;
 	unsigned long tset_mem, tset_att, thold_mem, thold_att;
@@ -1583,8 +1516,8 @@
 	tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
 }
 
-static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
-				      const struct nand_data_interface *conf)
+static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+					  const struct nand_interface_config *conf)
 {
 	const struct nand_sdr_timings *sdrt;
 
@@ -1595,77 +1528,102 @@
 	if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
 		return 0;
 
-	stm32_fmc2_calc_timings(chip, sdrt);
-
-	/* Apply timings */
-	stm32_fmc2_timings_init(chip);
+	stm32_fmc2_nfc_calc_timings(chip, sdrt);
+	stm32_fmc2_nfc_timings_init(chip);
 
 	return 0;
 }
 
-/* DMA configuration */
-static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
+static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
 {
-	int ret;
+	int ret = 0;
 
-	fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
-	fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
-	fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
-
-	if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
-		dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
-		return 0;
+	nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
+	if (IS_ERR(nfc->dma_tx_ch)) {
+		ret = PTR_ERR(nfc->dma_tx_ch);
+		if (ret != -ENODEV && ret != -EPROBE_DEFER)
+			dev_err(nfc->dev,
+				"failed to request tx DMA channel: %d\n", ret);
+		nfc->dma_tx_ch = NULL;
+		goto err_dma;
 	}
 
-	ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+	nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
+	if (IS_ERR(nfc->dma_rx_ch)) {
+		ret = PTR_ERR(nfc->dma_rx_ch);
+		if (ret != -ENODEV && ret != -EPROBE_DEFER)
+			dev_err(nfc->dev,
+				"failed to request rx DMA channel: %d\n", ret);
+		nfc->dma_rx_ch = NULL;
+		goto err_dma;
+	}
+
+	nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
+	if (IS_ERR(nfc->dma_ecc_ch)) {
+		ret = PTR_ERR(nfc->dma_ecc_ch);
+		if (ret != -ENODEV && ret != -EPROBE_DEFER)
+			dev_err(nfc->dev,
+				"failed to request ecc DMA channel: %d\n", ret);
+		nfc->dma_ecc_ch = NULL;
+		goto err_dma;
+	}
+
+	ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
 	if (ret)
 		return ret;
 
 	/* Allocate a buffer to store ECC status registers */
-	fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN,
-				     GFP_KERNEL);
-	if (!fmc2->ecc_buf)
+	nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+	if (!nfc->ecc_buf)
 		return -ENOMEM;
 
-	ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+	ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
 	if (ret)
 		return ret;
 
-	init_completion(&fmc2->dma_data_complete);
-	init_completion(&fmc2->dma_ecc_complete);
+	init_completion(&nfc->dma_data_complete);
+	init_completion(&nfc->dma_ecc_complete);
 
 	return 0;
+
+err_dma:
+	if (ret == -ENODEV) {
+		dev_warn(nfc->dev,
+			 "DMAs not defined in the DT, polling mode is used\n");
+		ret = 0;
+	}
+
+	return ret;
 }
 
-/* NAND callbacks setup */
-static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 
 	/*
 	 * Specific callbacks to read/write a page depending on
 	 * the mode (polling/sequencer) and the algo used (Hamming, BCH).
 	 */
-	if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) {
+	if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
 		/* DMA => use sequencer mode callbacks */
-		chip->ecc.correct = stm32_fmc2_sequencer_correct;
-		chip->ecc.write_page = stm32_fmc2_sequencer_write_page;
-		chip->ecc.read_page = stm32_fmc2_sequencer_read_page;
-		chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw;
-		chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw;
+		chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
+		chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
+		chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
+		chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
+		chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
 	} else {
 		/* No DMA => use polling mode callbacks */
-		chip->ecc.hwctl = stm32_fmc2_hwctl;
+		chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
 		if (chip->ecc.strength == FMC2_ECC_HAM) {
 			/* Hamming is used */
-			chip->ecc.calculate = stm32_fmc2_ham_calculate;
-			chip->ecc.correct = stm32_fmc2_ham_correct;
+			chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
+			chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
 			chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
 		} else {
 			/* BCH is used */
-			chip->ecc.calculate = stm32_fmc2_bch_calculate;
-			chip->ecc.correct = stm32_fmc2_bch_correct;
-			chip->ecc.read_page = stm32_fmc2_read_page;
+			chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
+			chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
+			chip->ecc.read_page = stm32_fmc2_nfc_read_page;
 		}
 	}
 
@@ -1678,9 +1636,8 @@
 		chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
 }
 
-/* FMC2 layout */
-static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
-					 struct mtd_oob_region *oobregion)
+static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -1694,8 +1651,8 @@
 	return 0;
 }
 
-static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
-					  struct mtd_oob_region *oobregion)
+static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -1709,13 +1666,12 @@
 	return 0;
 }
 
-static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = {
-	.ecc = stm32_fmc2_nand_ooblayout_ecc,
-	.free = stm32_fmc2_nand_ooblayout_free,
+static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
+	.ecc = stm32_fmc2_nfc_ooblayout_ecc,
+	.free = stm32_fmc2_nfc_ooblayout_free,
 };
 
-/* FMC2 caps */
-static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
 {
 	/* Hamming */
 	if (strength == FMC2_ECC_HAM)
@@ -1729,68 +1685,70 @@
 	return 8;
 }
 
-NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes,
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
 		     FMC2_ECC_STEP_SIZE,
 		     FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
 
-/* FMC2 controller ops */
-static int stm32_fmc2_attach_chip(struct nand_chip *chip)
+static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
 {
-	struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+	struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	int ret;
 
 	/*
-	 * Only NAND_ECC_HW mode is actually supported
+	 * Only NAND_ECC_ENGINE_TYPE_ON_HOST mode is actually supported
 	 * Hamming => ecc.strength = 1
 	 * BCH4 => ecc.strength = 4
 	 * BCH8 => ecc.strength = 8
 	 * ECC sector size = 512
 	 */
-	if (chip->ecc.mode != NAND_ECC_HW) {
-		dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n");
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+		dev_err(nfc->dev,
+			"nand_ecc_engine_type is not well defined in the DT\n");
 		return -EINVAL;
 	}
 
-	ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps,
+	/* Default ECC settings in case they are not set in the device tree */
+	if (!chip->ecc.size)
+		chip->ecc.size = FMC2_ECC_STEP_SIZE;
+
+	if (!chip->ecc.strength)
+		chip->ecc.strength = FMC2_ECC_BCH8;
+
+	ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
 				   mtd->oobsize - FMC2_BBM_LEN);
 	if (ret) {
-		dev_err(fmc2->dev, "no valid ECC settings set\n");
+		dev_err(nfc->dev, "no valid ECC settings set\n");
 		return ret;
 	}
 
 	if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
-		dev_err(fmc2->dev, "nand page size is not supported\n");
+		dev_err(nfc->dev, "nand page size is not supported\n");
 		return -EINVAL;
 	}
 
 	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 		chip->bbt_options |= NAND_BBT_NO_OOB;
 
-	/* NAND callbacks setup */
-	stm32_fmc2_nand_callbacks_setup(chip);
+	stm32_fmc2_nfc_nand_callbacks_setup(chip);
 
-	/* Define ECC layout */
-	mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops);
+	mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
 
-	/* Configure bus width to 16-bit */
-	if (chip->options & NAND_BUSWIDTH_16)
-		stm32_fmc2_set_buswidth_16(fmc2, true);
+	stm32_fmc2_nfc_setup(chip);
 
 	return 0;
 }
 
-static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = {
-	.attach_chip = stm32_fmc2_attach_chip,
-	.exec_op = stm32_fmc2_exec_op,
-	.setup_data_interface = stm32_fmc2_setup_interface,
+static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
+	.attach_chip = stm32_fmc2_nfc_attach_chip,
+	.exec_op = stm32_fmc2_nfc_exec_op,
+	.setup_interface = stm32_fmc2_nfc_setup_interface,
 };
 
-/* FMC2 probe */
-static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
-				  struct device_node *dn)
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
+				      struct device_node *dn)
 {
-	struct stm32_fmc2_nand *nand = &fmc2->nand;
+	struct stm32_fmc2_nand *nand = &nfc->nand;
 	u32 cs;
 	int ret, i;
 
@@ -1799,29 +1757,29 @@
 
 	nand->ncs /= sizeof(u32);
 	if (!nand->ncs) {
-		dev_err(fmc2->dev, "invalid reg property size\n");
+		dev_err(nfc->dev, "invalid reg property size\n");
 		return -EINVAL;
 	}
 
 	for (i = 0; i < nand->ncs; i++) {
 		ret = of_property_read_u32_index(dn, "reg", i, &cs);
 		if (ret) {
-			dev_err(fmc2->dev, "could not retrieve reg property: %d\n",
+			dev_err(nfc->dev, "could not retrieve reg property: %d\n",
 				ret);
 			return ret;
 		}
 
-		if (cs > FMC2_MAX_CE) {
-			dev_err(fmc2->dev, "invalid reg value: %d\n", cs);
+		if (cs >= FMC2_MAX_CE) {
+			dev_err(nfc->dev, "invalid reg value: %d\n", cs);
 			return -EINVAL;
 		}
 
-		if (fmc2->cs_assigned & BIT(cs)) {
-			dev_err(fmc2->dev, "cs already assigned: %d\n", cs);
+		if (nfc->cs_assigned & BIT(cs)) {
+			dev_err(nfc->dev, "cs already assigned: %d\n", cs);
 			return -EINVAL;
 		}
 
-		fmc2->cs_assigned |= BIT(cs);
+		nfc->cs_assigned |= BIT(cs);
 		nand->cs_used[i] = cs;
 	}
 
@@ -1830,25 +1788,25 @@
 	return 0;
 }
 
-static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
+static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
 {
-	struct device_node *dn = fmc2->dev->of_node;
+	struct device_node *dn = nfc->dev->of_node;
 	struct device_node *child;
 	int nchips = of_get_child_count(dn);
 	int ret = 0;
 
 	if (!nchips) {
-		dev_err(fmc2->dev, "NAND chip not defined\n");
+		dev_err(nfc->dev, "NAND chip not defined\n");
 		return -EINVAL;
 	}
 
 	if (nchips > 1) {
-		dev_err(fmc2->dev, "too many NAND chips defined\n");
+		dev_err(nfc->dev, "too many NAND chips defined\n");
 		return -EINVAL;
 	}
 
 	for_each_child_of_node(dn, child) {
-		ret = stm32_fmc2_parse_child(fmc2, child);
+		ret = stm32_fmc2_nfc_parse_child(nfc, child);
 		if (ret < 0) {
 			of_node_put(child);
 			return ret;
@@ -1858,198 +1816,236 @@
 	return ret;
 }
 
-static int stm32_fmc2_probe(struct platform_device *pdev)
+static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc)
+{
+	struct device *dev = nfc->dev;
+	bool ebi_found = false;
+
+	if (dev->parent && of_device_is_compatible(dev->parent->of_node,
+						   "st,stm32mp1-fmc2-ebi"))
+		ebi_found = true;
+
+	if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) {
+		if (ebi_found) {
+			nfc->cdev = dev->parent;
+
+			return 0;
+		}
+
+		return -EINVAL;
+	}
+
+	if (ebi_found)
+		return -EINVAL;
+
+	nfc->cdev = dev;
+
+	return 0;
+}
+
+static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct reset_control *rstc;
-	struct stm32_fmc2_nfc *fmc2;
+	struct stm32_fmc2_nfc *nfc;
 	struct stm32_fmc2_nand *nand;
 	struct resource *res;
 	struct mtd_info *mtd;
 	struct nand_chip *chip;
+	struct resource cres;
 	int chip_cs, mem_region, ret, irq;
+	int start_region = 0;
 
-	fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL);
-	if (!fmc2)
+	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+	if (!nfc)
 		return -ENOMEM;
 
-	fmc2->dev = dev;
-	nand_controller_init(&fmc2->base);
-	fmc2->base.ops = &stm32_fmc2_nand_controller_ops;
+	nfc->dev = dev;
+	nand_controller_init(&nfc->base);
+	nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
 
-	ret = stm32_fmc2_parse_dt(fmc2);
+	ret = stm32_fmc2_nfc_set_cdev(nfc);
 	if (ret)
 		return ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	fmc2->io_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(fmc2->io_base))
-		return PTR_ERR(fmc2->io_base);
+	ret = stm32_fmc2_nfc_parse_dt(nfc);
+	if (ret)
+		return ret;
 
-	fmc2->io_phys_addr = res->start;
+	ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres);
+	if (ret)
+		return ret;
 
-	for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
+	nfc->io_phys_addr = cres.start;
+
+	nfc->regmap = device_node_to_regmap(nfc->cdev->of_node);
+	if (IS_ERR(nfc->regmap))
+		return PTR_ERR(nfc->regmap);
+
+	if (nfc->dev == nfc->cdev)
+		start_region = 1;
+
+	for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
 	     chip_cs++, mem_region += 3) {
-		if (!(fmc2->cs_assigned & BIT(chip_cs)))
+		if (!(nfc->cs_assigned & BIT(chip_cs)))
 			continue;
 
 		res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
-		fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res);
-		if (IS_ERR(fmc2->data_base[chip_cs]))
-			return PTR_ERR(fmc2->data_base[chip_cs]);
+		nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+		if (IS_ERR(nfc->data_base[chip_cs]))
+			return PTR_ERR(nfc->data_base[chip_cs]);
 
-		fmc2->data_phys_addr[chip_cs] = res->start;
+		nfc->data_phys_addr[chip_cs] = res->start;
 
 		res = platform_get_resource(pdev, IORESOURCE_MEM,
 					    mem_region + 1);
-		fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
-		if (IS_ERR(fmc2->cmd_base[chip_cs]))
-			return PTR_ERR(fmc2->cmd_base[chip_cs]);
+		nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
+		if (IS_ERR(nfc->cmd_base[chip_cs]))
+			return PTR_ERR(nfc->cmd_base[chip_cs]);
 
 		res = platform_get_resource(pdev, IORESOURCE_MEM,
 					    mem_region + 2);
-		fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
-		if (IS_ERR(fmc2->addr_base[chip_cs]))
-			return PTR_ERR(fmc2->addr_base[chip_cs]);
+		nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
+		if (IS_ERR(nfc->addr_base[chip_cs]))
+			return PTR_ERR(nfc->addr_base[chip_cs]);
 	}
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		if (irq != -EPROBE_DEFER)
-			dev_err(dev, "IRQ error missing or invalid\n");
+	if (irq < 0)
 		return irq;
-	}
 
-	ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
-			       dev_name(dev), fmc2);
+	ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
+			       dev_name(dev), nfc);
 	if (ret) {
 		dev_err(dev, "failed to request irq\n");
 		return ret;
 	}
 
-	init_completion(&fmc2->complete);
+	init_completion(&nfc->complete);
 
-	fmc2->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(fmc2->clk))
-		return PTR_ERR(fmc2->clk);
+	nfc->clk = devm_clk_get(nfc->cdev, NULL);
+	if (IS_ERR(nfc->clk))
+		return PTR_ERR(nfc->clk);
 
-	ret = clk_prepare_enable(fmc2->clk);
+	ret = clk_prepare_enable(nfc->clk);
 	if (ret) {
 		dev_err(dev, "can not enable the clock\n");
 		return ret;
 	}
 
 	rstc = devm_reset_control_get(dev, NULL);
-	if (!IS_ERR(rstc)) {
+	if (IS_ERR(rstc)) {
+		ret = PTR_ERR(rstc);
+		if (ret == -EPROBE_DEFER)
+			goto err_clk_disable;
+	} else {
 		reset_control_assert(rstc);
 		reset_control_deassert(rstc);
 	}
 
-	/* DMA setup */
-	ret = stm32_fmc2_dma_setup(fmc2);
+	ret = stm32_fmc2_nfc_dma_setup(nfc);
 	if (ret)
-		return ret;
+		goto err_release_dma;
 
-	/* FMC2 init routine */
-	stm32_fmc2_init(fmc2);
+	stm32_fmc2_nfc_init(nfc);
 
-	nand = &fmc2->nand;
+	nand = &nfc->nand;
 	chip = &nand->chip;
 	mtd = nand_to_mtd(chip);
 	mtd->dev.parent = dev;
 
-	chip->controller = &fmc2->base;
+	chip->controller = &nfc->base;
 	chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
-			 NAND_USE_BOUNCE_BUFFER;
-
-	/* Default ECC settings */
-	chip->ecc.mode = NAND_ECC_HW;
-	chip->ecc.size = FMC2_ECC_STEP_SIZE;
-	chip->ecc.strength = FMC2_ECC_BCH8;
+			 NAND_USES_DMA;
 
 	/* Scan to find existence of the device */
 	ret = nand_scan(chip, nand->ncs);
 	if (ret)
-		goto err_scan;
+		goto err_release_dma;
 
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret)
-		goto err_device_register;
+		goto err_nand_cleanup;
 
-	platform_set_drvdata(pdev, fmc2);
+	platform_set_drvdata(pdev, nfc);
 
 	return 0;
 
-err_device_register:
+err_nand_cleanup:
 	nand_cleanup(chip);
 
-err_scan:
-	if (fmc2->dma_ecc_ch)
-		dma_release_channel(fmc2->dma_ecc_ch);
-	if (fmc2->dma_tx_ch)
-		dma_release_channel(fmc2->dma_tx_ch);
-	if (fmc2->dma_rx_ch)
-		dma_release_channel(fmc2->dma_rx_ch);
+err_release_dma:
+	if (nfc->dma_ecc_ch)
+		dma_release_channel(nfc->dma_ecc_ch);
+	if (nfc->dma_tx_ch)
+		dma_release_channel(nfc->dma_tx_ch);
+	if (nfc->dma_rx_ch)
+		dma_release_channel(nfc->dma_rx_ch);
 
-	sg_free_table(&fmc2->dma_data_sg);
-	sg_free_table(&fmc2->dma_ecc_sg);
+	sg_free_table(&nfc->dma_data_sg);
+	sg_free_table(&nfc->dma_ecc_sg);
 
-	clk_disable_unprepare(fmc2->clk);
+err_clk_disable:
+	clk_disable_unprepare(nfc->clk);
 
 	return ret;
 }
 
-static int stm32_fmc2_remove(struct platform_device *pdev)
+static int stm32_fmc2_nfc_remove(struct platform_device *pdev)
 {
-	struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev);
-	struct stm32_fmc2_nand *nand = &fmc2->nand;
+	struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
+	struct stm32_fmc2_nand *nand = &nfc->nand;
+	struct nand_chip *chip = &nand->chip;
+	int ret;
 
-	nand_release(&nand->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
-	if (fmc2->dma_ecc_ch)
-		dma_release_channel(fmc2->dma_ecc_ch);
-	if (fmc2->dma_tx_ch)
-		dma_release_channel(fmc2->dma_tx_ch);
-	if (fmc2->dma_rx_ch)
-		dma_release_channel(fmc2->dma_rx_ch);
+	if (nfc->dma_ecc_ch)
+		dma_release_channel(nfc->dma_ecc_ch);
+	if (nfc->dma_tx_ch)
+		dma_release_channel(nfc->dma_tx_ch);
+	if (nfc->dma_rx_ch)
+		dma_release_channel(nfc->dma_rx_ch);
 
-	sg_free_table(&fmc2->dma_data_sg);
-	sg_free_table(&fmc2->dma_ecc_sg);
+	sg_free_table(&nfc->dma_data_sg);
+	sg_free_table(&nfc->dma_ecc_sg);
 
-	clk_disable_unprepare(fmc2->clk);
+	clk_disable_unprepare(nfc->clk);
 
 	return 0;
 }
 
-static int __maybe_unused stm32_fmc2_suspend(struct device *dev)
+static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
 {
-	struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+	struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
 
-	clk_disable_unprepare(fmc2->clk);
+	clk_disable_unprepare(nfc->clk);
 
 	pinctrl_pm_select_sleep_state(dev);
 
 	return 0;
 }
 
-static int __maybe_unused stm32_fmc2_resume(struct device *dev)
+static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
 {
-	struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
-	struct stm32_fmc2_nand *nand = &fmc2->nand;
+	struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+	struct stm32_fmc2_nand *nand = &nfc->nand;
 	int chip_cs, ret;
 
 	pinctrl_pm_select_default_state(dev);
 
-	ret = clk_prepare_enable(fmc2->clk);
+	ret = clk_prepare_enable(nfc->clk);
 	if (ret) {
 		dev_err(dev, "can not enable the clock\n");
 		return ret;
 	}
 
-	stm32_fmc2_init(fmc2);
+	stm32_fmc2_nfc_init(nfc);
 
 	for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
-		if (!(fmc2->cs_assigned & BIT(chip_cs)))
+		if (!(nfc->cs_assigned & BIT(chip_cs)))
 			continue;
 
 		nand_reset(&nand->chip, chip_cs);
@@ -2058,27 +2054,28 @@
 	return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend,
-			 stm32_fmc2_resume);
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
+			 stm32_fmc2_nfc_resume);
 
-static const struct of_device_id stm32_fmc2_match[] = {
+static const struct of_device_id stm32_fmc2_nfc_match[] = {
 	{.compatible = "st,stm32mp15-fmc2"},
+	{.compatible = "st,stm32mp1-fmc2-nfc"},
 	{}
 };
-MODULE_DEVICE_TABLE(of, stm32_fmc2_match);
+MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
 
-static struct platform_driver stm32_fmc2_driver = {
-	.probe	= stm32_fmc2_probe,
-	.remove	= stm32_fmc2_remove,
+static struct platform_driver stm32_fmc2_nfc_driver = {
+	.probe	= stm32_fmc2_nfc_probe,
+	.remove	= stm32_fmc2_nfc_remove,
 	.driver	= {
-		.name = "stm32_fmc2_nand",
-		.of_match_table = stm32_fmc2_match,
-		.pm = &stm32_fmc2_pm_ops,
+		.name = "stm32_fmc2_nfc",
+		.of_match_table = stm32_fmc2_nfc_match,
+		.pm = &stm32_fmc2_nfc_pm_ops,
 	},
 };
-module_platform_driver(stm32_fmc2_driver);
+module_platform_driver(stm32_fmc2_nfc_driver);
 
-MODULE_ALIAS("platform:stm32_fmc2_nand");
+MODULE_ALIAS("platform:stm32_fmc2_nfc");
 MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 45c376f..2a7ca30 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -195,7 +195,7 @@
 	u32 timing_cfg;
 	u32 timing_ctl;
 	int nsels;
-	struct sunxi_nand_chip_sel sels[0];
+	struct sunxi_nand_chip_sel sels[];
 };
 
 static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
@@ -1376,8 +1376,8 @@
 #define sunxi_nand_lookup_timing(l, p, c) \
 			_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
 
-static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
-					const struct nand_data_interface *conf)
+static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline,
+				     const struct nand_interface_config *conf)
 {
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
@@ -1575,7 +1575,7 @@
 	 * only have 2 bytes available in the first user data
 	 * section.
 	 */
-	if (!section && ecc->mode == NAND_ECC_HW) {
+	if (!section && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
 		oobregion->offset = 2;
 		oobregion->length = 2;
 
@@ -1609,12 +1609,13 @@
 	static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
 	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
 	struct mtd_info *mtd = nand_to_mtd(nand);
+	struct nand_device *nanddev = mtd_to_nanddev(mtd);
 	struct sunxi_nand_hw_ecc *data;
 	int nsectors;
 	int ret;
 	int i;
 
-	if (ecc->options & NAND_ECC_MAXIMIZE) {
+	if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
 		int bytes;
 
 		ecc->size = 1024;
@@ -1698,7 +1699,7 @@
 		ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
 		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
 		ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
-		nand->options |= NAND_USE_BOUNCE_BUFFER;
+		nand->options |= NAND_USES_DMA;
 	} else {
 		ecc->read_page = sunxi_nfc_hw_ecc_read_page;
 		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1720,11 +1721,11 @@
 
 static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
 {
-	switch (ecc->mode) {
-	case NAND_ECC_HW:
+	switch (ecc->engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		sunxi_nand_hw_ecc_ctrl_cleanup(ecc);
 		break;
-	case NAND_ECC_NONE:
+	case NAND_ECC_ENGINE_TYPE_NONE:
 	default:
 		break;
 	}
@@ -1732,6 +1733,8 @@
 
 static int sunxi_nand_attach_chip(struct nand_chip *nand)
 {
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&nand->base);
 	struct nand_ecc_ctrl *ecc = &nand->ecc;
 	struct device_node *np = nand_get_flash_node(nand);
 	int ret;
@@ -1745,21 +1748,21 @@
 	nand->options |= NAND_SUBPAGE_READ;
 
 	if (!ecc->size) {
-		ecc->size = nand->base.eccreq.step_size;
-		ecc->strength = nand->base.eccreq.strength;
+		ecc->size = requirements->step_size;
+		ecc->strength = requirements->strength;
 	}
 
 	if (!ecc->size || !ecc->strength)
 		return -EINVAL;
 
-	switch (ecc->mode) {
-	case NAND_ECC_HW:
+	switch (ecc->engine_type) {
+	case NAND_ECC_ENGINE_TYPE_ON_HOST:
 		ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
 		if (ret)
 			return ret;
 		break;
-	case NAND_ECC_NONE:
-	case NAND_ECC_SOFT:
+	case NAND_ECC_ENGINE_TYPE_NONE:
+	case NAND_ECC_ENGINE_TYPE_SOFT:
 		break;
 	default:
 		return -EINVAL;
@@ -1907,7 +1910,8 @@
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	const struct nand_op_parser *parser;
 
-	sunxi_nfc_select_chip(nand, op->cs);
+	if (!check_only)
+		sunxi_nfc_select_chip(nand, op->cs);
 
 	if (sunxi_nand->sels[op->cs].rb >= 0)
 		parser = &sunxi_nfc_op_parser;
@@ -1919,7 +1923,7 @@
 
 static const struct nand_controller_ops sunxi_nand_controller_ops = {
 	.attach_chip = sunxi_nand_attach_chip,
-	.setup_data_interface = sunxi_nfc_setup_data_interface,
+	.setup_interface = sunxi_nfc_setup_interface,
 	.exec_op = sunxi_nfc_exec_op,
 };
 
@@ -1990,7 +1994,7 @@
 	 * Set the ECC mode to the default value in case nothing is specified
 	 * in the DT.
 	 */
-	nand->ecc.mode = NAND_ECC_HW;
+	nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	nand_set_flash_node(nand, np);
 
 	mtd = nand_to_mtd(nand);
@@ -2038,13 +2042,18 @@
 static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
 {
 	struct sunxi_nand_chip *sunxi_nand;
+	struct nand_chip *chip;
+	int ret;
 
 	while (!list_empty(&nfc->chips)) {
 		sunxi_nand = list_first_entry(&nfc->chips,
 					      struct sunxi_nand_chip,
 					      node);
-		nand_release(&sunxi_nand->nand);
-		sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc);
+		chip = &sunxi_nand->nand;
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
+		sunxi_nand_ecc_cleanup(&chip->ecc);
 		list_del(&sunxi_nand->node);
 	}
 }
@@ -2071,10 +2080,8 @@
 		return PTR_ERR(nfc->regs);
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "failed to retrieve irq\n");
+	if (irq < 0)
 		return irq;
-	}
 
 	nfc->ahb_clk = devm_clk_get(dev, "ahb");
 	if (IS_ERR(nfc->ahb_clk)) {
@@ -2125,8 +2132,16 @@
 	if (ret)
 		goto out_ahb_reset_reassert;
 
-	nfc->dmac = dma_request_slave_channel(dev, "rxtx");
-	if (nfc->dmac) {
+	nfc->dmac = dma_request_chan(dev, "rxtx");
+	if (IS_ERR(nfc->dmac)) {
+		ret = PTR_ERR(nfc->dmac);
+		if (ret == -EPROBE_DEFER)
+			goto out_ahb_reset_reassert;
+
+		/* Ignore errors to fall back to PIO mode */
+		dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
+		nfc->dmac = NULL;
+	} else {
 		struct dma_slave_config dmac_cfg = { };
 
 		dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
@@ -2140,9 +2155,6 @@
 		if (nfc->caps->extra_mbus_conf)
 			writel(readl(nfc->regs + NFC_REG_CTL) |
 			       NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
-
-	} else {
-		dev_warn(dev, "failed to request rxtx DMA channel\n");
 	}
 
 	platform_set_drvdata(pdev, nfc);
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index 9acf2de..359187b 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -113,53 +113,11 @@
 
 #define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
 
-static void tango_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
-{
-	struct tango_chip *tchip = to_tango_chip(chip);
-
-	if (ctrl & NAND_CLE)
-		writeb_relaxed(dat, tchip->base + PBUS_CMD);
-
-	if (ctrl & NAND_ALE)
-		writeb_relaxed(dat, tchip->base + PBUS_ADDR);
-}
-
-static int tango_dev_ready(struct nand_chip *chip)
-{
-	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
-
-	return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY;
-}
-
-static u8 tango_read_byte(struct nand_chip *chip)
-{
-	struct tango_chip *tchip = to_tango_chip(chip);
-
-	return readb_relaxed(tchip->base + PBUS_DATA);
-}
-
-static void tango_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
-	struct tango_chip *tchip = to_tango_chip(chip);
-
-	ioread8_rep(tchip->base + PBUS_DATA, buf, len);
-}
-
-static void tango_write_buf(struct nand_chip *chip, const u8 *buf, int len)
-{
-	struct tango_chip *tchip = to_tango_chip(chip);
-
-	iowrite8_rep(tchip->base + PBUS_DATA, buf, len);
-}
-
-static void tango_select_chip(struct nand_chip *chip, int idx)
+static void tango_select_target(struct nand_chip *chip, unsigned int cs)
 {
 	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
 	struct tango_chip *tchip = to_tango_chip(chip);
 
-	if (idx < 0)
-		return; /* No "chip unselect" function */
-
 	writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
 	writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
 	writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
@@ -168,6 +126,69 @@
 	writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
 }
 
+static int tango_waitrdy(struct nand_chip *chip, unsigned int timeout_ms)
+{
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+	u32 status;
+
+	return readl_relaxed_poll_timeout(nfc->pbus_base + PBUS_CS_CTRL,
+					  status, status & PBUS_IORDY, 20,
+					  timeout_ms);
+}
+
+static int tango_exec_instr(struct nand_chip *chip,
+			    const struct nand_op_instr *instr)
+{
+	struct tango_chip *tchip = to_tango_chip(chip);
+	unsigned int i;
+
+	switch (instr->type) {
+	case NAND_OP_CMD_INSTR:
+		writeb_relaxed(instr->ctx.cmd.opcode, tchip->base + PBUS_CMD);
+		return 0;
+	case NAND_OP_ADDR_INSTR:
+		for (i = 0; i < instr->ctx.addr.naddrs; i++)
+			writeb_relaxed(instr->ctx.addr.addrs[i],
+				       tchip->base + PBUS_ADDR);
+		return 0;
+	case NAND_OP_DATA_IN_INSTR:
+		ioread8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.in,
+			    instr->ctx.data.len);
+		return 0;
+	case NAND_OP_DATA_OUT_INSTR:
+		iowrite8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.out,
+			     instr->ctx.data.len);
+		return 0;
+	case NAND_OP_WAITRDY_INSTR:
+		return tango_waitrdy(chip,
+				     instr->ctx.waitrdy.timeout_ms);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int tango_exec_op(struct nand_chip *chip,
+			 const struct nand_operation *op,
+			 bool check_only)
+{
+	unsigned int i;
+	int ret = 0;
+
+	if (check_only)
+		return 0;
+
+	tango_select_target(chip, op->cs);
+	for (i = 0; i < op->ninstrs; i++) {
+		ret = tango_exec_instr(chip, &op->instrs[i]);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
 /*
  * The controller does not check for bitflips in erased pages,
  * therefore software must check instead.
@@ -279,6 +300,7 @@
 	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
 	int err, res, len = mtd->writesize;
 
+	tango_select_target(chip, chip->cur_cs);
 	if (oob_required)
 		chip->ecc.read_oob(chip, page);
 
@@ -300,22 +322,30 @@
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
-	int err, status, len = mtd->writesize;
+	const struct nand_sdr_timings *timings;
+	int err, len = mtd->writesize;
+	u8 status;
 
 	/* Calling tango_write_oob() would send PAGEPROG twice */
 	if (oob_required)
 		return -ENOTSUPP;
 
+	tango_select_target(chip, chip->cur_cs);
 	writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
 	err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
 	if (err)
 		return err;
 
-	status = chip->legacy.waitfunc(chip);
-	if (status & NAND_STATUS_FAIL)
-		return -EIO;
+	timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+	err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max));
+	if (err)
+		return err;
 
-	return 0;
+	err = nand_status_op(chip, &status);
+	if (err)
+		return err;
+
+	return (status & NAND_STATUS_FAIL) ? -EIO : 0;
 }
 
 static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
@@ -326,7 +356,9 @@
 		/* skip over "len" bytes */
 		nand_change_read_column_op(chip, *pos, NULL, 0, false);
 	} else {
-		tango_read_buf(chip, *buf, len);
+		struct tango_chip *tchip = to_tango_chip(chip);
+
+		ioread8_rep(tchip->base + PBUS_DATA, *buf, len);
 		*buf += len;
 	}
 }
@@ -339,7 +371,9 @@
 		/* skip over "len" bytes */
 		nand_change_write_column_op(chip, *pos, NULL, 0, false);
 	} else {
-		tango_write_buf(chip, *buf, len);
+		struct tango_chip *tchip = to_tango_chip(chip);
+
+		iowrite8_rep(tchip->base + PBUS_DATA, *buf, len);
 		*buf += len;
 	}
 }
@@ -420,6 +454,7 @@
 static int tango_read_page_raw(struct nand_chip *chip, u8 *buf,
 			       int oob_required, int page)
 {
+	tango_select_target(chip, chip->cur_cs);
 	nand_read_page_op(chip, page, 0, NULL, 0);
 	raw_read(chip, buf, chip->oob_poi);
 	return 0;
@@ -428,6 +463,7 @@
 static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf,
 				int oob_required, int page)
 {
+	tango_select_target(chip, chip->cur_cs);
 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 	raw_write(chip, buf, chip->oob_poi);
 	return nand_prog_page_end_op(chip);
@@ -435,6 +471,7 @@
 
 static int tango_read_oob(struct nand_chip *chip, int page)
 {
+	tango_select_target(chip, chip->cur_cs);
 	nand_read_page_op(chip, page, 0, NULL, 0);
 	raw_read(chip, NULL, chip->oob_poi);
 	return 0;
@@ -442,6 +479,7 @@
 
 static int tango_write_oob(struct nand_chip *chip, int page)
 {
+	tango_select_target(chip, chip->cur_cs);
 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 	raw_write(chip, NULL, chip->oob_poi);
 	return nand_prog_page_end_op(chip);
@@ -477,7 +515,7 @@
 }
 
 static int tango_set_timings(struct nand_chip *chip, int csline,
-			     const struct nand_data_interface *conf)
+			     const struct nand_interface_config *conf)
 {
 	const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
 	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
@@ -511,8 +549,8 @@
 {
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
 
-	ecc->mode = NAND_ECC_HW;
-	ecc->algo = NAND_ECC_BCH;
+	ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+	ecc->algo = NAND_ECC_ALGO_BCH;
 	ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
 
 	ecc->read_page_raw = tango_read_page_raw;
@@ -527,7 +565,8 @@
 
 static const struct nand_controller_ops tango_controller_ops = {
 	.attach_chip = tango_attach_chip,
-	.setup_data_interface = tango_set_timings,
+	.setup_interface = tango_set_timings,
+	.exec_op = tango_exec_op,
 };
 
 static int chip_init(struct device *dev, struct device_node *np)
@@ -562,13 +601,7 @@
 	ecc = &chip->ecc;
 	mtd = nand_to_mtd(chip);
 
-	chip->legacy.read_byte = tango_read_byte;
-	chip->legacy.write_buf = tango_write_buf;
-	chip->legacy.read_buf = tango_read_buf;
-	chip->legacy.select_chip = tango_select_chip;
-	chip->legacy.cmd_ctrl = tango_cmd_ctrl;
-	chip->legacy.dev_ready = tango_dev_ready;
-	chip->options = NAND_USE_BOUNCE_BUFFER |
+	chip->options = NAND_USES_DMA |
 			NAND_NO_SUBPAGE_WRITE |
 			NAND_WAIT_TCCS;
 	chip->controller = &nfc->hw;
@@ -600,14 +633,19 @@
 
 static int tango_nand_remove(struct platform_device *pdev)
 {
-	int cs;
 	struct tango_nfc *nfc = platform_get_drvdata(pdev);
+	struct nand_chip *chip;
+	int cs, ret;
 
 	dma_release_channel(nfc->chan);
 
 	for (cs = 0; cs < MAX_CS; ++cs) {
-		if (nfc->chips[cs])
-			nand_release(&nfc->chips[cs]->nand_chip);
+		if (nfc->chips[cs]) {
+			chip = &nfc->chips[cs]->nand_chip;
+			ret = mtd_device_unregister(nand_to_mtd(chip));
+			WARN_ON(ret);
+			nand_cleanup(chip);
+		}
 	}
 
 	return 0;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 3cc9a4c..fbf6772 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -467,7 +467,9 @@
 			      const struct nand_operation *op,
 			      bool check_only)
 {
-	tegra_nand_select_target(chip, op->cs);
+	if (!check_only)
+		tegra_nand_select_target(chip, op->cs);
+
 	return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
 				      check_only);
 }
@@ -477,7 +479,7 @@
 {
 	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 
-	if (chip->ecc.algo == NAND_ECC_BCH && enable)
+	if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
 		writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
 	else
 		writel_relaxed(0, ctrl->regs + BCH_CONFIG);
@@ -811,8 +813,8 @@
 	writel_relaxed(reg, ctrl->regs + TIMING_2);
 }
 
-static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline,
-					const struct nand_data_interface *conf)
+static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
+				      const struct nand_interface_config *conf)
 {
 	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
 	const struct nand_sdr_timings *timings;
@@ -838,7 +840,10 @@
 				   int strength_len, int bits_per_step,
 				   int oobsize)
 {
-	bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
+	struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(base);
+	bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
 	int i;
 
 	/*
@@ -853,7 +858,7 @@
 		} else {
 			strength_sel = strength[i];
 
-			if (strength_sel < chip->base.eccreq.strength)
+			if (strength_sel < requirements->strength)
 				continue;
 		}
 
@@ -875,7 +880,7 @@
 	int strength_len, bits_per_step;
 
 	switch (chip->ecc.algo) {
-	case NAND_ECC_RS:
+	case NAND_ECC_ALGO_RS:
 		bits_per_step = BITS_PER_STEP_RS;
 		if (chip->options & NAND_IS_BOOT_MEDIUM) {
 			strength = rs_strength_bootable;
@@ -885,7 +890,7 @@
 			strength_len = ARRAY_SIZE(rs_strength);
 		}
 		break;
-	case NAND_ECC_BCH:
+	case NAND_ECC_ALGO_BCH:
 		bits_per_step = BITS_PER_STEP_BCH;
 		if (chip->options & NAND_IS_BOOT_MEDIUM) {
 			strength = bch_strength_bootable;
@@ -906,6 +911,8 @@
 static int tegra_nand_attach_chip(struct nand_chip *chip)
 {
 	struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+	const struct nand_ecc_props *requirements =
+		nanddev_get_ecc_requirements(&chip->base);
 	struct tegra_nand_chip *nand = to_tegra_chip(chip);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	int bits_per_step;
@@ -914,12 +921,12 @@
 	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 		chip->bbt_options |= NAND_BBT_NO_OOB;
 
-	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 	chip->ecc.size = 512;
 	chip->ecc.steps = mtd->writesize / chip->ecc.size;
-	if (chip->base.eccreq.step_size != 512) {
+	if (requirements->step_size != 512) {
 		dev_err(ctrl->dev, "Unsupported step size %d\n",
-			chip->base.eccreq.step_size);
+			requirements->step_size);
 		return -EINVAL;
 	}
 
@@ -933,14 +940,14 @@
 	if (chip->options & NAND_BUSWIDTH_16)
 		nand->config |= CONFIG_BUS_WIDTH_16;
 
-	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+	if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
 		if (mtd->writesize < 2048)
-			chip->ecc.algo = NAND_ECC_RS;
+			chip->ecc.algo = NAND_ECC_ALGO_RS;
 		else
-			chip->ecc.algo = NAND_ECC_BCH;
+			chip->ecc.algo = NAND_ECC_ALGO_BCH;
 	}
 
-	if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
+	if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
 		dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
 		return -EINVAL;
 	}
@@ -950,7 +957,7 @@
 		if (ret < 0) {
 			dev_err(ctrl->dev,
 				"No valid strength found, minimum %d\n",
-				chip->base.eccreq.strength);
+				requirements->strength);
 			return ret;
 		}
 
@@ -961,7 +968,7 @@
 			   CONFIG_SKIP_SPARE_SIZE_4;
 
 	switch (chip->ecc.algo) {
-	case NAND_ECC_RS:
+	case NAND_ECC_ALGO_RS:
 		bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
 		mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
 		nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
@@ -982,7 +989,7 @@
 			return -EINVAL;
 		}
 		break;
-	case NAND_ECC_BCH:
+	case NAND_ECC_ALGO_BCH:
 		bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
 		mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
 		nand->bch_config = BCH_ENABLE;
@@ -1011,7 +1018,7 @@
 	}
 
 	dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
-		 chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
+		 chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
 		 chip->ecc.strength);
 
 	chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
@@ -1051,7 +1058,7 @@
 static const struct nand_controller_ops tegra_nand_controller_ops = {
 	.attach_chip = &tegra_nand_attach_chip,
 	.exec_op = tegra_nand_exec_op,
-	.setup_data_interface = tegra_nand_setup_data_interface,
+	.setup_interface = tegra_nand_setup_interface,
 };
 
 static int tegra_nand_chips_init(struct device *dev,
@@ -1113,7 +1120,7 @@
 	if (!mtd->name)
 		mtd->name = "tegra_nand";
 
-	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
+	chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
 
 	ret = nand_scan(chip, 1);
 	if (ret)
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index 4e9a6d9..aa6c7e7 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -103,6 +103,7 @@
 /*--------------------------------------------------------------------------*/
 
 struct tmio_nand {
+	struct nand_controller controller;
 	struct nand_chip chip;
 	struct completion comp;
 
@@ -355,6 +356,25 @@
 		cell->disable(dev);
 }
 
+static int tmio_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
+	chip->ecc.size = 512;
+	chip->ecc.bytes = 6;
+	chip->ecc.strength = 2;
+	chip->ecc.hwctl = tmio_nand_enable_hwecc;
+	chip->ecc.calculate = tmio_nand_calculate_ecc;
+	chip->ecc.correct = tmio_nand_correct_data;
+
+	return 0;
+}
+
+static const struct nand_controller_ops tmio_ops = {
+	.attach_chip = tmio_attach_chip,
+};
+
 static int tmio_probe(struct platform_device *dev)
 {
 	struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
@@ -385,6 +405,10 @@
 	mtd->name = "tmio-nand";
 	mtd->dev.parent = &dev->dev;
 
+	nand_controller_init(&tmio->controller);
+	tmio->controller.ops = &tmio_ops;
+	nand_chip->controller = &tmio->controller;
+
 	tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
 	if (!tmio->ccr)
 		return -EIO;
@@ -409,15 +433,6 @@
 	nand_chip->legacy.write_buf = tmio_nand_write_buf;
 	nand_chip->legacy.read_buf = tmio_nand_read_buf;
 
-	/* set eccmode using hardware ECC */
-	nand_chip->ecc.mode = NAND_ECC_HW;
-	nand_chip->ecc.size = 512;
-	nand_chip->ecc.bytes = 6;
-	nand_chip->ecc.strength = 2;
-	nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
-	nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
-	nand_chip->ecc.correct = tmio_nand_correct_data;
-
 	if (data)
 		nand_chip->badblock_pattern = data->badblock_pattern;
 
@@ -458,8 +473,12 @@
 static int tmio_remove(struct platform_device *dev)
 {
 	struct tmio_nand *tmio = platform_get_drvdata(dev);
+	struct nand_chip *chip = &tmio->chip;
+	int ret;
 
-	nand_release(&tmio->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	tmio_hw_stop(dev, tmio);
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 2642d5b..fe8ed24 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -253,6 +253,11 @@
 {
 	struct mtd_info *mtd = nand_to_mtd(chip);
 
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
+		return 0;
+
+	chip->ecc.strength = 1;
+
 	if (mtd->writesize >= 512) {
 		chip->ecc.size = 512;
 		chip->ecc.bytes = 6;
@@ -261,6 +266,10 @@
 		chip->ecc.bytes = 3;
 	}
 
+	chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+	chip->ecc.correct = txx9ndfmc_correct_data;
+	chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+
 	return 0;
 }
 
@@ -326,11 +335,6 @@
 		chip->legacy.write_buf = txx9ndfmc_write_buf;
 		chip->legacy.cmd_ctrl = txx9ndfmc_cmd_ctrl;
 		chip->legacy.dev_ready = txx9ndfmc_dev_ready;
-		chip->ecc.calculate = txx9ndfmc_calculate_ecc;
-		chip->ecc.correct = txx9ndfmc_correct_data;
-		chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
-		chip->ecc.mode = NAND_ECC_HW;
-		chip->ecc.strength = 1;
 		chip->legacy.chip_delay = 100;
 		chip->controller = &drvdata->controller;
 
@@ -371,7 +375,7 @@
 static int __exit txx9ndfmc_remove(struct platform_device *dev)
 {
 	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
-	int i;
+	int ret, i;
 
 	if (!drvdata)
 		return 0;
@@ -385,7 +389,9 @@
 		chip = mtd_to_nand(mtd);
 		txx9_priv = nand_get_controller_data(chip);
 
-		nand_release(chip);
+		ret = mtd_device_unregister(nand_to_mtd(chip));
+		WARN_ON(ret);
+		nand_cleanup(chip);
 		kfree(txx9_priv->mtdname);
 		kfree(txx9_priv);
 	}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index b6f114d..40d70f9 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -323,11 +323,6 @@
 			    CONFIG_ECC_MODE_SHIFT, ecc_mode);
 }
 
-static inline void vf610_nfc_transfer_size(struct vf610_nfc *nfc, int size)
-{
-	vf610_nfc_write(nfc, NFC_SECTOR_SIZE, size);
-}
-
 static inline void vf610_nfc_run(struct vf610_nfc *nfc, u32 col, u32 row,
 				 u32 cmd1, u32 cmd2, u32 trfr_sz)
 {
@@ -502,7 +497,9 @@
 			     const struct nand_operation *op,
 			     bool check_only)
 {
-	vf610_nfc_select_target(chip, op->cs);
+	if (!check_only)
+		vf610_nfc_select_target(chip, op->cs);
+
 	return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
 				      check_only);
 }
@@ -730,7 +727,7 @@
 	else
 		vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
 
-	if (nfc->chip.ecc.mode == NAND_ECC_HW) {
+	if (nfc->chip.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
 		/* Set ECC status offset in SRAM */
 		vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
 				    CONFIG_ECC_SRAM_ADDR_MASK,
@@ -759,7 +756,7 @@
 		return -ENXIO;
 	}
 
-	if (chip->ecc.mode != NAND_ECC_HW)
+	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 		return 0;
 
 	if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
@@ -777,7 +774,7 @@
 		mtd->oobsize = 64;
 
 	/* Use default large page ECC layout defined in NAND core */
-	mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
 	if (chip->ecc.strength == 32) {
 		nfc->ecc_mode = ECC_60_BYTE;
 		chip->ecc.bytes = 60;
@@ -917,8 +914,12 @@
 static int vf610_nfc_remove(struct platform_device *pdev)
 {
 	struct vf610_nfc *nfc = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &nfc->chip;
+	int ret;
 
-	nand_release(&nfc->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 	clk_disable_unprepare(nfc->clk);
 	return 0;
 }
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 018311d..236fd8c 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -62,6 +62,7 @@
 #define NAND_CON_NANDM		1
 
 struct xway_nand_data {
+	struct nand_controller	controller;
 	struct nand_chip	chip;
 	unsigned long		csflags;
 	void __iomem		*nandaddr;
@@ -145,6 +146,19 @@
 		xway_writeb(nand_to_mtd(chip), NAND_WRITE_DATA, buf[i]);
 }
 
+static int xway_attach_chip(struct nand_chip *chip)
+{
+	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+
+	return 0;
+}
+
+static const struct nand_controller_ops xway_nand_ops = {
+	.attach_chip = xway_attach_chip,
+};
+
 /*
  * Probe for the NAND device.
  */
@@ -180,8 +194,9 @@
 	data->chip.legacy.read_byte = xway_read_byte;
 	data->chip.legacy.chip_delay = 30;
 
-	data->chip.ecc.mode = NAND_ECC_SOFT;
-	data->chip.ecc.algo = NAND_ECC_HAMMING;
+	nand_controller_init(&data->controller);
+	data->controller.ops = &xway_nand_ops;
+	data->chip.controller = &data->controller;
 
 	platform_set_drvdata(pdev, data);
 	nand_set_controller_data(&data->chip, data);
@@ -203,6 +218,13 @@
 		    | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
 		    | cs_flag, EBU_NAND_CON);
 
+	/*
+	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
+	 * Set ->engine_type before registering the NAND devices in order to
+	 * provide a driver specific default value.
+	 */
+	data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
 	/* Scan to find existence of the device */
 	err = nand_scan(&data->chip, 1);
 	if (err)
@@ -221,8 +243,12 @@
 static int xway_nand_remove(struct platform_device *pdev)
 {
 	struct xway_nand_data *data = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &data->chip;
+	int ret;
 
-	nand_release(&data->chip);
+	ret = mtd_device_unregister(nand_to_mtd(chip));
+	WARN_ON(ret);
+	nand_cleanup(chip);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 55e636e..8794a1f 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -16,6 +16,7 @@
 #include <linux/mtd/spinand.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/spi-mem.h>
 
@@ -370,10 +371,11 @@
 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
 }
 
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
+			      u8 ndummy, u8 *buf)
 {
-	struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
-						 SPINAND_MAX_ID_LEN);
+	struct spi_mem_op op = SPINAND_READID_OP(
+		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
 	int ret;
 
 	ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -417,7 +419,7 @@
 		 * fixed, so let's return the maximum possible value so that
 		 * wear-leveling layers move the data immediately.
 		 */
-		return nand->eccreq.strength;
+		return nanddev_get_ecc_requirements(nand)->strength;
 
 	case STATUS_ECC_UNCOR_ERROR:
 		return -EBADMSG;
@@ -495,7 +497,7 @@
 
 	mutex_lock(&spinand->lock);
 
-	nanddev_io_for_each_page(nand, from, ops, &iter) {
+	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
 		ret = spinand_select_target(spinand, iter.req.pos.target);
 		if (ret)
 			break;
@@ -543,7 +545,7 @@
 
 	mutex_lock(&spinand->lock);
 
-	nanddev_io_for_each_page(nand, to, ops, &iter) {
+	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
 		ret = spinand_select_target(spinand, iter.req.pos.target);
 		if (ret)
 			break;
@@ -760,24 +762,62 @@
 	&winbond_spinand_manufacturer,
 };
 
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
+static int spinand_manufacturer_match(struct spinand_device *spinand,
+				      enum spinand_readid_method rdid_method)
 {
+	u8 *id = spinand->id.data;
 	unsigned int i;
 	int ret;
 
 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
-		ret = spinand_manufacturers[i]->ops->detect(spinand);
-		if (ret > 0) {
-			spinand->manufacturer = spinand_manufacturers[i];
-			return 0;
-		} else if (ret < 0) {
-			return ret;
-		}
-	}
+		const struct spinand_manufacturer *manufacturer =
+			spinand_manufacturers[i];
 
+		if (id[0] != manufacturer->id)
+			continue;
+
+		ret = spinand_match_and_init(spinand,
+					     manufacturer->chips,
+					     manufacturer->nchips,
+					     rdid_method);
+		if (ret < 0)
+			continue;
+
+		spinand->manufacturer = manufacturer;
+		return 0;
+	}
 	return -ENOTSUPP;
 }
 
+static int spinand_id_detect(struct spinand_device *spinand)
+{
+	u8 *id = spinand->id.data;
+	int ret;
+
+	ret = spinand_read_id_op(spinand, 0, 0, id);
+	if (ret)
+		return ret;
+	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
+	if (!ret)
+		return 0;
+
+	ret = spinand_read_id_op(spinand, 1, 0, id);
+	if (ret)
+		return ret;
+	ret = spinand_manufacturer_match(spinand,
+					 SPINAND_READID_METHOD_OPCODE_ADDR);
+	if (!ret)
+		return 0;
+
+	ret = spinand_read_id_op(spinand, 0, 1, id);
+	if (ret)
+		return ret;
+	ret = spinand_manufacturer_match(spinand,
+					 SPINAND_READID_METHOD_OPCODE_DUMMY);
+
+	return ret;
+}
+
 static int spinand_manufacturer_init(struct spinand_device *spinand)
 {
 	if (spinand->manufacturer->ops->init)
@@ -833,9 +873,9 @@
  * @spinand: SPI NAND object
  * @table: SPI NAND device description table
  * @table_size: size of the device description table
+ * @rdid_method: read id method to match
  *
- * Should be used by SPI NAND manufacturer drivers when they want to find a
- * match between a device ID retrieved through the READ_ID command and an
+ * Match between a device ID retrieved through the READ_ID command and an
  * entry in the SPI NAND description table. If a match is found, the spinand
  * object will be initialized with information provided by the matching
  * spinand_info entry.
@@ -844,8 +884,10 @@
  */
 int spinand_match_and_init(struct spinand_device *spinand,
 			   const struct spinand_info *table,
-			   unsigned int table_size, u16 devid)
+			   unsigned int table_size,
+			   enum spinand_readid_method rdid_method)
 {
+	u8 *id = spinand->id.data;
 	struct nand_device *nand = spinand_to_nand(spinand);
 	unsigned int i;
 
@@ -853,13 +895,17 @@
 		const struct spinand_info *info = &table[i];
 		const struct spi_mem_op *op;
 
-		if (devid != info->devid)
+		if (rdid_method != info->devid.method)
+			continue;
+
+		if (memcmp(id + 1, info->devid.id, info->devid.len))
 			continue;
 
 		nand->memorg = table[i].memorg;
-		nand->eccreq = table[i].eccreq;
+		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
 		spinand->eccinfo = table[i].eccinfo;
 		spinand->flags = table[i].flags;
+		spinand->id.len = 1 + table[i].devid.len;
 		spinand->select_target = table[i].select_target;
 
 		op = spinand_select_op_variant(spinand,
@@ -896,13 +942,7 @@
 	if (ret)
 		return ret;
 
-	ret = spinand_read_id_op(spinand, spinand->id.data);
-	if (ret)
-		return ret;
-
-	spinand->id.len = SPINAND_MAX_ID_LEN;
-
-	ret = spinand_manufacturer_detect(spinand);
+	ret = spinand_id_detect(spinand);
 	if (ret) {
 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
 			spinand->id.data);
@@ -1050,8 +1090,8 @@
 	mtd->oobavail = ret;
 
 	/* Propagate ECC information to mtd_info */
-	mtd->ecc_strength = nand->eccreq.strength;
-	mtd->ecc_step_size = nand->eccreq.step_size;
+	mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength;
+	mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size;
 
 	return 0;
 
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index b13b397..33c6740 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -132,6 +132,35 @@
 	.free = gd5fxgq4_variant2_ooblayout_free,
 };
 
+static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 128;
+	oobregion->length = 128;
+
+	return 0;
+}
+
+static int gd5fxgq4xc_ooblayout_256_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 1;
+	oobregion->length = 127;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgq4xc_oob_256_ops = {
+	.ecc = gd5fxgq4xc_ooblayout_256_ecc,
+	.free = gd5fxgq4xc_ooblayout_256_free,
+};
+
 static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
 					u8 status)
 {
@@ -195,7 +224,8 @@
 }
 
 static const struct spinand_info gigadevice_spinand_table[] = {
-	SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+	SPINAND_INFO("GD5F1GQ4xA",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -204,7 +234,8 @@
 		     SPINAND_HAS_QE_BIT,
 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
 				     gd5fxgq4xa_ecc_get_status)),
-	SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+	SPINAND_INFO("GD5F2GQ4xA",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
 		     NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -213,7 +244,8 @@
 		     SPINAND_HAS_QE_BIT,
 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
 				     gd5fxgq4xa_ecc_get_status)),
-	SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+	SPINAND_INFO("GD5F4GQ4xA",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
 		     NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -222,7 +254,28 @@
 		     SPINAND_HAS_QE_BIT,
 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
 				     gd5fxgq4xa_ecc_get_status)),
-	SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+	SPINAND_INFO("GD5F4GQ4RC",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
+				     gd5fxgq4ufxxg_ecc_get_status)),
+	SPINAND_INFO("GD5F4GQ4UC",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
+				     gd5fxgq4ufxxg_ecc_get_status)),
+	SPINAND_INFO("GD5F1GQ4UExxG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -231,7 +284,8 @@
 		     SPINAND_HAS_QE_BIT,
 		     SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
 				     gd5fxgq4uexxg_ecc_get_status)),
-	SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
+	SPINAND_INFO("GD5F1GQ4UFxxG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
@@ -242,39 +296,13 @@
 				     gd5fxgq4ufxxg_ecc_get_status)),
 };
 
-static int gigadevice_spinand_detect(struct spinand_device *spinand)
-{
-	u8 *id = spinand->id.data;
-	u16 did;
-	int ret;
-
-	/*
-	 * Earlier GDF5-series devices (A,E) return [0][MID][DID]
-	 * Later (F) devices return [MID][DID1][DID2]
-	 */
-
-	if (id[0] == SPINAND_MFR_GIGADEVICE)
-		did = (id[1] << 8) + id[2];
-	else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
-		did = id[2];
-	else
-		return 0;
-
-	ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
-				     ARRAY_SIZE(gigadevice_spinand_table),
-				     did);
-	if (ret)
-		return ret;
-
-	return 1;
-}
-
 static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
-	.detect = gigadevice_spinand_detect,
 };
 
 const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
 	.id = SPINAND_MFR_GIGADEVICE,
 	.name = "GigaDevice",
+	.chips = gigadevice_spinand_table,
+	.nchips = ARRAY_SIZE(gigadevice_spinand_table),
 	.ops = &gigadevice_spinand_manuf_ops,
 };
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 21def3f..cd7a9ca 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -84,10 +84,11 @@
 		 * data around if it's not necessary.
 		 */
 		if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
-			return nand->eccreq.strength;
+			return nanddev_get_ecc_requirements(nand)->strength;
 
-		if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
-			return nand->eccreq.strength;
+		if (WARN_ON(eccsr > nanddev_get_ecc_requirements(nand)->strength ||
+			    !eccsr))
+			return nanddev_get_ecc_requirements(nand)->strength;
 
 		return eccsr;
 
@@ -99,7 +100,8 @@
 }
 
 static const struct spinand_info macronix_spinand_table[] = {
-	SPINAND_INFO("MX35LF1GE4AB", 0x12,
+	SPINAND_INFO("MX35LF1GE4AB",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(4, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -108,7 +110,8 @@
 		     SPINAND_HAS_QE_BIT,
 		     SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
 				     mx35lf1ge4ab_ecc_get_status)),
-	SPINAND_INFO("MX35LF2GE4AB", 0x22,
+	SPINAND_INFO("MX35LF2GE4AB",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
 		     NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
 		     NAND_ECCREQ(4, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -116,35 +119,35 @@
 					      &update_cache_variants),
 		     SPINAND_HAS_QE_BIT,
 		     SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+	SPINAND_INFO("MX31LF1GE4BC",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
+		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0 /*SPINAND_HAS_QE_BIT*/,
+		     SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+				     mx35lf1ge4ab_ecc_get_status)),
+	SPINAND_INFO("MX31UF1GE4BC",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
+		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0 /*SPINAND_HAS_QE_BIT*/,
+		     SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+				     mx35lf1ge4ab_ecc_get_status)),
 };
 
-static int macronix_spinand_detect(struct spinand_device *spinand)
-{
-	u8 *id = spinand->id.data;
-	int ret;
-
-	/*
-	 * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
-	 * raw_id is garbage.
-	 */
-	if (id[1] != SPINAND_MFR_MACRONIX)
-		return 0;
-
-	ret = spinand_match_and_init(spinand, macronix_spinand_table,
-				     ARRAY_SIZE(macronix_spinand_table),
-				     id[2]);
-	if (ret)
-		return ret;
-
-	return 1;
-}
-
 static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
-	.detect = macronix_spinand_detect,
 };
 
 const struct spinand_manufacturer macronix_spinand_manufacturer = {
 	.id = SPINAND_MFR_MACRONIX,
 	.name = "Macronix",
+	.chips = macronix_spinand_table,
+	.nchips = ARRAY_SIZE(macronix_spinand_table),
 	.ops = &macronix_spinand_manuf_ops,
 };
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 7d7b1f7..5d370cf 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -18,6 +18,16 @@
 #define MICRON_STATUS_ECC_4TO6_BITFLIPS	(3 << 4)
 #define MICRON_STATUS_ECC_7TO8_BITFLIPS	(5 << 4)
 
+#define MICRON_CFG_CR			BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG	0xD0
+
+#define MICRON_SELECT_DIE(x)	((x) << 6)
+
 static SPINAND_OP_VARIANTS(read_cache_variants,
 		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
 		SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -34,38 +44,52 @@
 		SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
 		SPINAND_PROG_LOAD(false, 0, NULL, 0));
 
-static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
-					struct mtd_oob_region *region)
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *region)
 {
 	if (section)
 		return -ERANGE;
 
-	region->offset = 64;
-	region->length = 64;
+	region->offset = mtd->oobsize / 2;
+	region->length = mtd->oobsize / 2;
 
 	return 0;
 }
 
-static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
-					 struct mtd_oob_region *region)
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *region)
 {
 	if (section)
 		return -ERANGE;
 
 	/* Reserve 2 bytes for the BBM. */
 	region->offset = 2;
-	region->length = 62;
+	region->length = (mtd->oobsize / 2) - 2;
 
 	return 0;
 }
 
-static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
-	.ecc = mt29f2g01abagd_ooblayout_ecc,
-	.free = mt29f2g01abagd_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+	.ecc = micron_8_ooblayout_ecc,
+	.free = micron_8_ooblayout_free,
 };
 
-static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
-					 u8 status)
+static int micron_select_target(struct spinand_device *spinand,
+				unsigned int target)
+{
+	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+						      spinand->scratchbuf);
+
+	if (target > 1)
+		return -EINVAL;
+
+	*spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+	return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+				   u8 status)
 {
 	switch (status & MICRON_STATUS_ECC_MASK) {
 	case STATUS_ECC_NO_BITFLIPS:
@@ -91,43 +115,131 @@
 }
 
 static const struct spinand_info micron_spinand_table[] = {
-	SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+	/* M79A 2Gb 3.3V */
+	SPINAND_INFO("MT29F2G01ABAGD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
-				     mt29f2g01abagd_ecc_get_status)),
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status)),
+	/* M79A 2Gb 1.8V */
+	SPINAND_INFO("MT29F2G01ABBGD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status)),
+	/* M78A 1Gb 3.3V */
+	SPINAND_INFO("MT29F1G01ABAFD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status)),
+	/* M78A 1Gb 1.8V */
+	SPINAND_INFO("MT29F1G01ABAFD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status)),
+	/* M79A 4Gb 3.3V */
+	SPINAND_INFO("MT29F4G01ADAGD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
+		     NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     0,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status),
+		     SPINAND_SELECT_TARGET(micron_select_target)),
+	/* M70A 4Gb 3.3V */
+	SPINAND_INFO("MT29F4G01ABAFD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     SPINAND_HAS_CR_FEAT_BIT,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status)),
+	/* M70A 4Gb 1.8V */
+	SPINAND_INFO("MT29F4G01ABBFD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     SPINAND_HAS_CR_FEAT_BIT,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status)),
+	/* M70A 8Gb 3.3V */
+	SPINAND_INFO("MT29F8G01ADAFD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     SPINAND_HAS_CR_FEAT_BIT,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status),
+		     SPINAND_SELECT_TARGET(micron_select_target)),
+	/* M70A 8Gb 1.8V */
+	SPINAND_INFO("MT29F8G01ADBFD",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_variants,
+					      &update_cache_variants),
+		     SPINAND_HAS_CR_FEAT_BIT,
+		     SPINAND_ECCINFO(&micron_8_ooblayout,
+				     micron_8_ecc_get_status),
+		     SPINAND_SELECT_TARGET(micron_select_target)),
 };
 
-static int micron_spinand_detect(struct spinand_device *spinand)
+static int micron_spinand_init(struct spinand_device *spinand)
 {
-	u8 *id = spinand->id.data;
-	int ret;
-
 	/*
-	 * Micron SPI NAND read ID need a dummy byte,
-	 * so the first byte in raw_id is dummy.
+	 * M70A device series enable Continuous Read feature at Power-up,
+	 * which is not supported. Disable this bit to avoid any possible
+	 * failure.
 	 */
-	if (id[1] != SPINAND_MFR_MICRON)
-		return 0;
+	if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+		return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
 
-	ret = spinand_match_and_init(spinand, micron_spinand_table,
-				     ARRAY_SIZE(micron_spinand_table), id[2]);
-	if (ret)
-		return ret;
-
-	return 1;
+	return 0;
 }
 
 static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
-	.detect = micron_spinand_detect,
+	.init = micron_spinand_init,
 };
 
 const struct spinand_manufacturer micron_spinand_manufacturer = {
 	.id = SPINAND_MFR_MICRON,
 	.name = "Micron",
+	.chips = micron_spinand_table,
+	.nchips = ARRAY_SIZE(micron_spinand_table),
 	.ops = &micron_spinand_manuf_ops,
 };
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 5230768..519ade5 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -97,7 +97,8 @@
 
 
 static const struct spinand_info paragon_spinand_table[] = {
-	SPINAND_INFO("PN26G01A", 0xe1,
+	SPINAND_INFO("PN26G01A",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -106,7 +107,8 @@
 		     0,
 		     SPINAND_ECCINFO(&pn26g0xa_ooblayout,
 				     pn26g0xa_ecc_get_status)),
-	SPINAND_INFO("PN26G02A", 0xe2,
+	SPINAND_INFO("PN26G02A",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -117,31 +119,13 @@
 				     pn26g0xa_ecc_get_status)),
 };
 
-static int paragon_spinand_detect(struct spinand_device *spinand)
-{
-	u8 *id = spinand->id.data;
-	int ret;
-
-	/* Read ID returns [0][MID][DID] */
-
-	if (id[1] != SPINAND_MFR_PARAGON)
-		return 0;
-
-	ret = spinand_match_and_init(spinand, paragon_spinand_table,
-				     ARRAY_SIZE(paragon_spinand_table),
-				     id[2]);
-	if (ret)
-		return ret;
-
-	return 1;
-}
-
 static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
-	.detect = paragon_spinand_detect,
 };
 
 const struct spinand_manufacturer paragon_spinand_manufacturer = {
 	.id = SPINAND_MFR_PARAGON,
 	.name = "Paragon",
+	.chips = paragon_spinand_table,
+	.nchips = ARRAY_SIZE(paragon_spinand_table),
 	.ops = &paragon_spinand_manuf_ops,
 };
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 1cb3760..6fe7bd2 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/mtd/spinand.h>
 
+/* Kioxia is new name of Toshiba memory. */
 #define SPINAND_MFR_TOSHIBA		0x98
 #define TOSH_STATUS_ECC_HAS_BITFLIPS_T	(3 << 4)
 
@@ -19,14 +20,26 @@
 		SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
 		SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
 
+static SPINAND_OP_VARIANTS(write_cache_x4_variants,
+		SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+		SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_x4_variants,
+		SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+		SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/**
+ * Backward compatibility for 1st generation Serial NAND devices
+ * which don't support Quad Program Load operation.
+ */
 static SPINAND_OP_VARIANTS(write_cache_variants,
 		SPINAND_PROG_LOAD(true, 0, NULL, 0));
 
 static SPINAND_OP_VARIANTS(update_cache_variants,
 		SPINAND_PROG_LOAD(false, 0, NULL, 0));
 
-static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
-				     struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *region)
 {
 	if (section > 0)
 		return -ERANGE;
@@ -37,8 +50,8 @@
 	return 0;
 }
 
-static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
-				      struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *region)
 {
 	if (section > 0)
 		return -ERANGE;
@@ -50,13 +63,13 @@
 	return 0;
 }
 
-static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
-	.ecc = tc58cxgxsx_ooblayout_ecc,
-	.free = tc58cxgxsx_ooblayout_free,
+static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
+	.ecc = tx58cxgxsxraix_ooblayout_ecc,
+	.free = tx58cxgxsxraix_ooblayout_free,
 };
 
-static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
-				      u8 status)
+static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+					 u8 status)
 {
 	struct nand_device *nand = spinand_to_nand(spinand);
 	u8 mbf = 0;
@@ -77,12 +90,12 @@
 		 * data around if it's not necessary.
 		 */
 		if (spi_mem_exec_op(spinand->spimem, &op))
-			return nand->eccreq.strength;
+			return nanddev_get_ecc_requirements(nand)->strength;
 
 		mbf >>= 4;
 
-		if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
-			return nand->eccreq.strength;
+		if (WARN_ON(mbf > nanddev_get_ecc_requirements(nand)->strength || !mbf))
+			return nanddev_get_ecc_requirements(nand)->strength;
 
 		return mbf;
 
@@ -94,95 +107,174 @@
 }
 
 static const struct spinand_info toshiba_spinand_table[] = {
-	/* 3.3V 1Gb */
-	SPINAND_INFO("TC58CVG0S3", 0xC2,
+	/* 3.3V 1Gb (1st generation) */
+	SPINAND_INFO("TC58CVG0S3HRAIG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
-				     tc58cxgxsx_ecc_get_status)),
-	/* 3.3V 2Gb */
-	SPINAND_INFO("TC58CVG1S3", 0xCB,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 3.3V 2Gb (1st generation) */
+	SPINAND_INFO("TC58CVG1S3HRAIG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
-				     tc58cxgxsx_ecc_get_status)),
-	/* 3.3V 4Gb */
-	SPINAND_INFO("TC58CVG2S0", 0xCD,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 3.3V 4Gb (1st generation) */
+	SPINAND_INFO("TC58CVG2S0HRAIG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
 		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
-				     tc58cxgxsx_ecc_get_status)),
-	/* 1.8V 1Gb */
-	SPINAND_INFO("TC58CYG0S3", 0xB2,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 1Gb (1st generation) */
+	SPINAND_INFO("TC58CYG0S3HRAIG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
 		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
-				     tc58cxgxsx_ecc_get_status)),
-	/* 1.8V 2Gb */
-	SPINAND_INFO("TC58CYG1S3", 0xBB,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 2Gb (1st generation) */
+	SPINAND_INFO("TC58CYG1S3HRAIG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
-				     tc58cxgxsx_ecc_get_status)),
-	/* 1.8V 4Gb */
-	SPINAND_INFO("TC58CYG2S0", 0xBD,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 4Gb (1st generation) */
+	SPINAND_INFO("TC58CYG2S0HRAIG",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
 		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
 		     NAND_ECCREQ(8, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
 					      &write_cache_variants,
 					      &update_cache_variants),
 		     0,
-		     SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
-				     tc58cxgxsx_ecc_get_status)),
-};
-
-static int toshiba_spinand_detect(struct spinand_device *spinand)
-{
-	u8 *id = spinand->id.data;
-	int ret;
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
 
 	/*
-	 * Toshiba SPI NAND read ID needs a dummy byte,
-	 * so the first byte in id is garbage.
+	 * 2nd generation serial nand has HOLD_D which is equivalent to
+	 * QE_BIT.
 	 */
-	if (id[1] != SPINAND_MFR_TOSHIBA)
-		return 0;
-
-	ret = spinand_match_and_init(spinand, toshiba_spinand_table,
-				     ARRAY_SIZE(toshiba_spinand_table),
-				     id[2]);
-	if (ret)
-		return ret;
-
-	return 1;
-}
+	/* 3.3V 1Gb (2nd generation) */
+	SPINAND_INFO("TC58CVG0S3HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
+		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 3.3V 2Gb (2nd generation) */
+	SPINAND_INFO("TC58CVG1S3HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
+		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 3.3V 4Gb (2nd generation) */
+	SPINAND_INFO("TC58CVG2S0HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 3.3V 8Gb (2nd generation) */
+	SPINAND_INFO("TH58CVG3S0HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+		     NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 1Gb (2nd generation) */
+	SPINAND_INFO("TC58CYG0S3HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
+		     NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 2Gb (2nd generation) */
+	SPINAND_INFO("TC58CYG1S3HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
+		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 4Gb (2nd generation) */
+	SPINAND_INFO("TC58CYG2S0HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
+		     NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+	/* 1.8V 8Gb (2nd generation) */
+	SPINAND_INFO("TH58CYG3S0HRAIJ",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
+		     NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+		     NAND_ECCREQ(8, 512),
+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+					      &write_cache_x4_variants,
+					      &update_cache_x4_variants),
+		     SPINAND_HAS_QE_BIT,
+		     SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+				     tx58cxgxsxraix_ecc_get_status)),
+};
 
 static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
-	.detect = toshiba_spinand_detect,
 };
 
 const struct spinand_manufacturer toshiba_spinand_manufacturer = {
 	.id = SPINAND_MFR_TOSHIBA,
 	.name = "Toshiba",
+	.chips = toshiba_spinand_table,
+	.nchips = ARRAY_SIZE(toshiba_spinand_table),
 	.ops = &toshiba_spinand_manuf_ops,
 };
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index a6c17e0..7668442 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -75,7 +75,8 @@
 }
 
 static const struct spinand_info winbond_spinand_table[] = {
-	SPINAND_INFO("W25M02GV", 0xAB,
+	SPINAND_INFO("W25M02GV",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab),
 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
 		     NAND_ECCREQ(1, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -84,7 +85,8 @@
 		     0,
 		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
 		     SPINAND_SELECT_TARGET(w25m02gv_select_target)),
-	SPINAND_INFO("W25N01GV", 0xAA,
+	SPINAND_INFO("W25N01GV",
+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa),
 		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
 		     NAND_ECCREQ(1, 512),
 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -94,31 +96,6 @@
 		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
 };
 
-/**
- * winbond_spinand_detect - initialize device related part in spinand_device
- * struct if it is a Winbond device.
- * @spinand: SPI NAND device structure
- */
-static int winbond_spinand_detect(struct spinand_device *spinand)
-{
-	u8 *id = spinand->id.data;
-	int ret;
-
-	/*
-	 * Winbond SPI NAND read ID need a dummy byte,
-	 * so the first byte in raw_id is dummy.
-	 */
-	if (id[1] != SPINAND_MFR_WINBOND)
-		return 0;
-
-	ret = spinand_match_and_init(spinand, winbond_spinand_table,
-				     ARRAY_SIZE(winbond_spinand_table), id[2]);
-	if (ret)
-		return ret;
-
-	return 1;
-}
-
 static int winbond_spinand_init(struct spinand_device *spinand)
 {
 	struct nand_device *nand = spinand_to_nand(spinand);
@@ -138,12 +115,13 @@
 }
 
 static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
-	.detect = winbond_spinand_detect,
 	.init = winbond_spinand_init,
 };
 
 const struct spinand_manufacturer winbond_spinand_manufacturer = {
 	.id = SPINAND_MFR_WINBOND,
 	.name = "Winbond",
+	.chips = winbond_spinand_table,
+	.nchips = ARRAY_SIZE(winbond_spinand_table),
 	.ops = &winbond_spinand_manuf_ops,
 };
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index f98363c..e723543 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -12,7 +12,7 @@
 	  boards.
 
 config MTD_BCM63XX_PARTS
-	tristate "BCM63XX CFE partitioning parser"
+	bool "BCM63XX CFE partitioning parser"
 	depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
 	select CRC32
 	select MTD_PARSER_IMAGETAG
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
index 8fd6176..2611669 100644
--- a/drivers/mtd/parsers/afs.c
+++ b/drivers/mtd/parsers/afs.c
@@ -126,8 +126,8 @@
 	 * Static checks cannot see that we bail out if we have an error
 	 * reading the footer.
 	 */
-	u_int uninitialized_var(iis_ptr);
-	u_int uninitialized_var(img_ptr);
+	u_int iis_ptr;
+	u_int img_ptr;
 	u_int ptr;
 	size_t sz;
 	int ret;
diff --git a/drivers/mtd/parsers/bcm63xxpart.c b/drivers/mtd/parsers/bcm63xxpart.c
index 78f90c6..b15bdad 100644
--- a/drivers/mtd/parsers/bcm63xxpart.c
+++ b/drivers/mtd/parsers/bcm63xxpart.c
@@ -22,6 +22,11 @@
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
 
+#ifdef CONFIG_MIPS
+#include <asm/bootinfo.h>
+#include <asm/fw/cfe/cfe_api.h>
+#endif /* CONFIG_MIPS */
+
 #define BCM963XX_CFE_BLOCK_SIZE		SZ_64K	/* always at least 64KiB */
 
 #define BCM963XX_CFE_MAGIC_OFFSET	0x4e0
@@ -32,28 +37,15 @@
 #define STR_NULL_TERMINATE(x) \
 	do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
 
-static int bcm63xx_detect_cfe(struct mtd_info *master)
+static inline int bcm63xx_detect_cfe(void)
 {
-	char buf[9];
-	int ret;
-	size_t retlen;
+	int ret = 0;
 
-	ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
-		       (void *)buf);
-	buf[retlen] = 0;
+#ifdef CONFIG_MIPS
+	ret = (fw_arg3 == CFE_EPTSEAL);
+#endif /* CONFIG_MIPS */
 
-	if (ret)
-		return ret;
-
-	if (strncmp("cfe-v", buf, 5) == 0)
-		return 0;
-
-	/* very old CFE's do not have the cfe-v string, so check for magic */
-	ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen,
-		       (void *)buf);
-	buf[retlen] = 0;
-
-	return strncmp("CFE1CFE1", buf, 8);
+	return ret;
 }
 
 static int bcm63xx_read_nvram(struct mtd_info *master,
@@ -138,7 +130,7 @@
 	struct bcm963xx_nvram *nvram = NULL;
 	int ret;
 
-	if (bcm63xx_detect_cfe(master))
+	if (!bcm63xx_detect_cfe())
 		return -EINVAL;
 
 	nvram = vzalloc(sizeof(*nvram));
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index 0dca515..0ddff1a 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -9,7 +9,7 @@
  *
  * mtdparts=<mtddef>[;<mtddef]
  * <mtddef>  := <mtd-id>:<partdef>[,<partdef>]
- * <partdef> := <size>[@<offset>][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk][slc]
  * <mtd-id>  := unique name used in mapping driver/device (mtd->name)
  * <size>    := standard linux memsize OR "-" to denote all remaining space
  *              size is automatically truncated at end of device
@@ -92,7 +92,7 @@
 	int name_len;
 	unsigned char *extra_mem;
 	char delim;
-	unsigned int mask_flags;
+	unsigned int mask_flags, add_flags;
 
 	/* fetch the partition size */
 	if (*s == '-') {
@@ -109,6 +109,7 @@
 
 	/* fetch partition name and flags */
 	mask_flags = 0; /* this is going to be a regular partition */
+	add_flags = 0;
 	delim = 0;
 
 	/* check for offset */
@@ -152,6 +153,12 @@
 		s += 2;
 	}
 
+	/* if slc is found use emulated SLC mode on this partition*/
+	if (!strncmp(s, "slc", 3)) {
+		add_flags |= MTD_SLC_ON_MLC_EMULATION;
+		s += 3;
+	}
+
 	/* test if more partitions are following */
 	if (*s == ',') {
 		if (size == SIZE_REMAINING) {
@@ -184,6 +191,7 @@
 	parts[this_part].size = size;
 	parts[this_part].offset = offset;
 	parts[this_part].mask_flags = mask_flags;
+	parts[this_part].add_flags = add_flags;
 	if (name)
 		strlcpy(extra_mem, name, name_len + 1);
 	else
diff --git a/drivers/mtd/parsers/ofpart.c b/drivers/mtd/parsers/ofpart.c
index 3caeabf..daf507c 100644
--- a/drivers/mtd/parsers/ofpart.c
+++ b/drivers/mtd/parsers/ofpart.c
@@ -117,6 +117,9 @@
 		if (of_get_property(pp, "lock", &len))
 			parts[i].mask_flags |= MTD_POWERUP_LOCK;
 
+		if (of_property_read_bool(pp, "slc-mode"))
+			parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION;
+
 		i++;
 	}
 
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 4744bf9..b9f2724 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -247,7 +247,8 @@
 
 	/* FTL can contain -1 entries that are by default filled with bits */
 	if (block == -1) {
-		memset(buffer, 0xFF, SM_SECTOR_SIZE);
+		if (buffer)
+			memset(buffer, 0xFF, SM_SECTOR_SIZE);
 		return 0;
 	}
 
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index f237fcd..ffc4b38 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -1,12 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig MTD_SPI_NOR
-	tristate "SPI-NOR device support"
+	tristate "SPI NOR device support"
 	depends on MTD
 	depends on MTD && SPI_MASTER
 	select SPI_MEM
 	help
 	  This is the framework for the SPI NOR which can be used by the SPI
-	  device drivers and the SPI-NOR device driver.
+	  device drivers and the SPI NOR device driver.
 
 if MTD_SPI_NOR
 
@@ -24,87 +24,6 @@
 	  Please note that some tools/drivers/filesystems may not work with
 	  4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
 
-config SPI_ASPEED_SMC
-	tristate "Aspeed flash controllers in SPI mode"
-	depends on ARCH_ASPEED || COMPILE_TEST
-	depends on HAS_IOMEM && OF
-	help
-	  This enables support for the Firmware Memory controller (FMC)
-	  in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
-	  and support for the SPI flash memory controller (SPI) for
-	  the host firmware. The implementation only supports SPI NOR.
-
-config SPI_CADENCE_QUADSPI
-	tristate "Cadence Quad SPI controller"
-	depends on OF && (ARM || ARM64 || COMPILE_TEST)
-	help
-	  Enable support for the Cadence Quad SPI Flash controller.
-
-	  Cadence QSPI is a specialized controller for connecting an SPI
-	  Flash over 1/2/4-bit wide bus. Enable this option if you have a
-	  device with a Cadence QSPI controller and want to access the
-	  Flash as an MTD device.
-
-config SPI_HISI_SFC
-	tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
-	depends on ARCH_HISI || COMPILE_TEST
-	depends on HAS_IOMEM
-	help
-	  This enables support for hisilicon SPI-NOR flash controller.
-
-config SPI_MTK_QUADSPI
-	tristate "MediaTek Quad SPI controller"
-	depends on HAS_IOMEM
-	help
-	  This enables support for the Quad SPI controller in master mode.
-	  This controller does not support generic SPI. It only supports
-	  SPI NOR.
-
-config SPI_NXP_SPIFI
-	tristate "NXP SPI Flash Interface (SPIFI)"
-	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
-	depends on HAS_IOMEM
-	help
-	  Enable support for the NXP LPC SPI Flash Interface controller.
-
-	  SPIFI is a specialized controller for connecting serial SPI
-	  Flash. Enable this option if you have a device with a SPIFI
-	  controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
-	tristate
-
-config SPI_INTEL_SPI_PCI
-	tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
-	depends on X86 && PCI
-	select SPI_INTEL_SPI
-	help
-	  This enables PCI support for the Intel PCH/PCU SPI controller in
-	  master mode. This controller is present in modern Intel hardware
-	  and is used to hold BIOS and other persistent settings. Using
-	  this driver it is possible to upgrade BIOS directly from Linux.
-
-	  Say N here unless you know what you are doing. Overwriting the
-	  SPI flash may render the system unbootable.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
-	tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
-	depends on X86
-	select SPI_INTEL_SPI
-	help
-	  This enables platform support for the Intel PCH/PCU SPI
-	  controller in master mode. This controller is present in modern
-	  Intel hardware and is used to hold BIOS and other persistent
-	  settings. Using this driver it is possible to upgrade BIOS
-	  directly from Linux.
-
-	  Say N here unless you know what you are doing. Overwriting the
-	  SPI flash may render the system unbootable.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called intel-spi-platform.
+source "drivers/mtd/spi-nor/controllers/Kconfig"
 
 endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 9c5ed03..6539238 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,10 +1,22 @@
 # SPDX-License-Identifier: GPL-2.0
+
+spi-nor-objs			:= core.o sfdp.o
+spi-nor-objs			+= atmel.o
+spi-nor-objs			+= catalyst.o
+spi-nor-objs			+= eon.o
+spi-nor-objs			+= esmt.o
+spi-nor-objs			+= everspin.o
+spi-nor-objs			+= fujitsu.o
+spi-nor-objs			+= gigadevice.o
+spi-nor-objs			+= intel.o
+spi-nor-objs			+= issi.o
+spi-nor-objs			+= macronix.o
+spi-nor-objs			+= micron-st.o
+spi-nor-objs			+= spansion.o
+spi-nor-objs			+= sst.o
+spi-nor-objs			+= winbond.o
+spi-nor-objs			+= xilinx.o
+spi-nor-objs			+= xmc.o
 obj-$(CONFIG_MTD_SPI_NOR)	+= spi-nor.o
-obj-$(CONFIG_SPI_ASPEED_SMC)	+= aspeed-smc.o
-obj-$(CONFIG_SPI_CADENCE_QUADSPI)	+= cadence-quadspi.o
-obj-$(CONFIG_SPI_HISI_SFC)	+= hisi-sfc.o
-obj-$(CONFIG_SPI_MTK_QUADSPI)    += mtk-quadspi.o
-obj-$(CONFIG_SPI_NXP_SPIFI)	+= nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI)	+= intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI)	+= intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM)	+= intel-spi-platform.o
+
+obj-$(CONFIG_MTD_SPI_NOR)	+= controllers/
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
new file mode 100644
index 0000000..deacf87
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/*
+ * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the
+ * block protection bits. We don't support them. But legacy behavior in linux
+ * is to unlock the whole flash array on startup. Therefore, we have to support
+ * exactly this operation.
+ */
+static int atmel_at25fs_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	return -EOPNOTSUPP;
+}
+
+static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	int ret;
+
+	/* We only support unlocking the whole flash array */
+	if (ofs || len != nor->params->size)
+		return -EINVAL;
+
+	/* Write 0x00 to the status register to disable write protection */
+	ret = spi_nor_write_sr_and_check(nor, 0);
+	if (ret)
+		dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n");
+
+	return ret;
+}
+
+static int atmel_at25fs_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	return -EOPNOTSUPP;
+}
+
+static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = {
+	.lock = atmel_at25fs_lock,
+	.unlock = atmel_at25fs_unlock,
+	.is_locked = atmel_at25fs_is_locked,
+};
+
+static void atmel_at25fs_default_init(struct spi_nor *nor)
+{
+	nor->params->locking_ops = &atmel_at25fs_locking_ops;
+}
+
+static const struct spi_nor_fixups atmel_at25fs_fixups = {
+	.default_init = atmel_at25fs_default_init,
+};
+
+static const struct flash_info atmel_parts[] = {
+	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
+	{ "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K | SPI_NOR_HAS_LOCK)
+		.fixups = &atmel_at25fs_fixups },
+	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K | SPI_NOR_HAS_LOCK)
+		.fixups = &atmel_at25fs_fixups },
+
+	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
+
+	{ "at25sl321",	INFO(0x1f4216, 0, 64 * 1024, 64,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
+	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
+	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
+
+	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_atmel = {
+	.name = "atmel",
+	.parts = atmel_parts,
+	.nparts = ARRAY_SIZE(atmel_parts),
+};
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
deleted file mode 100644
index 97a5e1e..0000000
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ /dev/null
@@ -1,1538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for Cadence QSPI Controller
- *
- * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
- */
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/sched.h>
-#include <linux/spi/spi.h>
-#include <linux/timer.h>
-
-#define CQSPI_NAME			"cadence-qspi"
-#define CQSPI_MAX_CHIPSELECT		16
-
-/* Quirks */
-#define CQSPI_NEEDS_WR_DELAY		BIT(0)
-
-/* Capabilities mask */
-#define CQSPI_BASE_HWCAPS_MASK					\
-	(SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST |		\
-	SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 |	\
-	SNOR_HWCAPS_PP)
-
-struct cqspi_st;
-
-struct cqspi_flash_pdata {
-	struct spi_nor	nor;
-	struct cqspi_st	*cqspi;
-	u32		clk_rate;
-	u32		read_delay;
-	u32		tshsl_ns;
-	u32		tsd2d_ns;
-	u32		tchsh_ns;
-	u32		tslch_ns;
-	u8		inst_width;
-	u8		addr_width;
-	u8		data_width;
-	u8		cs;
-	bool		registered;
-	bool		use_direct_mode;
-};
-
-struct cqspi_st {
-	struct platform_device	*pdev;
-
-	struct clk		*clk;
-	unsigned int		sclk;
-
-	void __iomem		*iobase;
-	void __iomem		*ahb_base;
-	resource_size_t		ahb_size;
-	struct completion	transfer_complete;
-	struct mutex		bus_mutex;
-
-	struct dma_chan		*rx_chan;
-	struct completion	rx_dma_complete;
-	dma_addr_t		mmap_phys_base;
-
-	int			current_cs;
-	int			current_page_size;
-	int			current_erase_size;
-	int			current_addr_width;
-	unsigned long		master_ref_clk_hz;
-	bool			is_decoded_cs;
-	u32			fifo_depth;
-	u32			fifo_width;
-	bool			rclk_en;
-	u32			trigger_address;
-	u32			wr_delay;
-	struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
-};
-
-struct cqspi_driver_platdata {
-	u32 hwcaps_mask;
-	u8 quirks;
-};
-
-/* Operation timeout value */
-#define CQSPI_TIMEOUT_MS			500
-#define CQSPI_READ_TIMEOUT_MS			10
-
-/* Instruction type */
-#define CQSPI_INST_TYPE_SINGLE			0
-#define CQSPI_INST_TYPE_DUAL			1
-#define CQSPI_INST_TYPE_QUAD			2
-#define CQSPI_INST_TYPE_OCTAL			3
-
-#define CQSPI_DUMMY_CLKS_PER_BYTE		8
-#define CQSPI_DUMMY_BYTES_MAX			4
-#define CQSPI_DUMMY_CLKS_MAX			31
-
-#define CQSPI_STIG_DATA_LEN_MAX			8
-
-/* Register map */
-#define CQSPI_REG_CONFIG			0x00
-#define CQSPI_REG_CONFIG_ENABLE_MASK		BIT(0)
-#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL	BIT(7)
-#define CQSPI_REG_CONFIG_DECODE_MASK		BIT(9)
-#define CQSPI_REG_CONFIG_CHIPSELECT_LSB		10
-#define CQSPI_REG_CONFIG_DMA_MASK		BIT(15)
-#define CQSPI_REG_CONFIG_BAUD_LSB		19
-#define CQSPI_REG_CONFIG_IDLE_LSB		31
-#define CQSPI_REG_CONFIG_CHIPSELECT_MASK	0xF
-#define CQSPI_REG_CONFIG_BAUD_MASK		0xF
-
-#define CQSPI_REG_RD_INSTR			0x04
-#define CQSPI_REG_RD_INSTR_OPCODE_LSB		0
-#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB	8
-#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB	12
-#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB	16
-#define CQSPI_REG_RD_INSTR_MODE_EN_LSB		20
-#define CQSPI_REG_RD_INSTR_DUMMY_LSB		24
-#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK	0x3
-#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK	0x3
-#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK	0x3
-#define CQSPI_REG_RD_INSTR_DUMMY_MASK		0x1F
-
-#define CQSPI_REG_WR_INSTR			0x08
-#define CQSPI_REG_WR_INSTR_OPCODE_LSB		0
-#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB	12
-#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB	16
-
-#define CQSPI_REG_DELAY				0x0C
-#define CQSPI_REG_DELAY_TSLCH_LSB		0
-#define CQSPI_REG_DELAY_TCHSH_LSB		8
-#define CQSPI_REG_DELAY_TSD2D_LSB		16
-#define CQSPI_REG_DELAY_TSHSL_LSB		24
-#define CQSPI_REG_DELAY_TSLCH_MASK		0xFF
-#define CQSPI_REG_DELAY_TCHSH_MASK		0xFF
-#define CQSPI_REG_DELAY_TSD2D_MASK		0xFF
-#define CQSPI_REG_DELAY_TSHSL_MASK		0xFF
-
-#define CQSPI_REG_READCAPTURE			0x10
-#define CQSPI_REG_READCAPTURE_BYPASS_LSB	0
-#define CQSPI_REG_READCAPTURE_DELAY_LSB		1
-#define CQSPI_REG_READCAPTURE_DELAY_MASK	0xF
-
-#define CQSPI_REG_SIZE				0x14
-#define CQSPI_REG_SIZE_ADDRESS_LSB		0
-#define CQSPI_REG_SIZE_PAGE_LSB			4
-#define CQSPI_REG_SIZE_BLOCK_LSB		16
-#define CQSPI_REG_SIZE_ADDRESS_MASK		0xF
-#define CQSPI_REG_SIZE_PAGE_MASK		0xFFF
-#define CQSPI_REG_SIZE_BLOCK_MASK		0x3F
-
-#define CQSPI_REG_SRAMPARTITION			0x18
-#define CQSPI_REG_INDIRECTTRIGGER		0x1C
-
-#define CQSPI_REG_DMA				0x20
-#define CQSPI_REG_DMA_SINGLE_LSB		0
-#define CQSPI_REG_DMA_BURST_LSB			8
-#define CQSPI_REG_DMA_SINGLE_MASK		0xFF
-#define CQSPI_REG_DMA_BURST_MASK		0xFF
-
-#define CQSPI_REG_REMAP				0x24
-#define CQSPI_REG_MODE_BIT			0x28
-
-#define CQSPI_REG_SDRAMLEVEL			0x2C
-#define CQSPI_REG_SDRAMLEVEL_RD_LSB		0
-#define CQSPI_REG_SDRAMLEVEL_WR_LSB		16
-#define CQSPI_REG_SDRAMLEVEL_RD_MASK		0xFFFF
-#define CQSPI_REG_SDRAMLEVEL_WR_MASK		0xFFFF
-
-#define CQSPI_REG_IRQSTATUS			0x40
-#define CQSPI_REG_IRQMASK			0x44
-
-#define CQSPI_REG_INDIRECTRD			0x60
-#define CQSPI_REG_INDIRECTRD_START_MASK		BIT(0)
-#define CQSPI_REG_INDIRECTRD_CANCEL_MASK	BIT(1)
-#define CQSPI_REG_INDIRECTRD_DONE_MASK		BIT(5)
-
-#define CQSPI_REG_INDIRECTRDWATERMARK		0x64
-#define CQSPI_REG_INDIRECTRDSTARTADDR		0x68
-#define CQSPI_REG_INDIRECTRDBYTES		0x6C
-
-#define CQSPI_REG_CMDCTRL			0x90
-#define CQSPI_REG_CMDCTRL_EXECUTE_MASK		BIT(0)
-#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK	BIT(1)
-#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB		12
-#define CQSPI_REG_CMDCTRL_WR_EN_LSB		15
-#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB		16
-#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB		19
-#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB		20
-#define CQSPI_REG_CMDCTRL_RD_EN_LSB		23
-#define CQSPI_REG_CMDCTRL_OPCODE_LSB		24
-#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK		0x7
-#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK	0x3
-#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK		0x7
-
-#define CQSPI_REG_INDIRECTWR			0x70
-#define CQSPI_REG_INDIRECTWR_START_MASK		BIT(0)
-#define CQSPI_REG_INDIRECTWR_CANCEL_MASK	BIT(1)
-#define CQSPI_REG_INDIRECTWR_DONE_MASK		BIT(5)
-
-#define CQSPI_REG_INDIRECTWRWATERMARK		0x74
-#define CQSPI_REG_INDIRECTWRSTARTADDR		0x78
-#define CQSPI_REG_INDIRECTWRBYTES		0x7C
-
-#define CQSPI_REG_CMDADDRESS			0x94
-#define CQSPI_REG_CMDREADDATALOWER		0xA0
-#define CQSPI_REG_CMDREADDATAUPPER		0xA4
-#define CQSPI_REG_CMDWRITEDATALOWER		0xA8
-#define CQSPI_REG_CMDWRITEDATAUPPER		0xAC
-
-/* Interrupt status bits */
-#define CQSPI_REG_IRQ_MODE_ERR			BIT(0)
-#define CQSPI_REG_IRQ_UNDERFLOW			BIT(1)
-#define CQSPI_REG_IRQ_IND_COMP			BIT(2)
-#define CQSPI_REG_IRQ_IND_RD_REJECT		BIT(3)
-#define CQSPI_REG_IRQ_WR_PROTECTED_ERR		BIT(4)
-#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR		BIT(5)
-#define CQSPI_REG_IRQ_WATERMARK			BIT(6)
-#define CQSPI_REG_IRQ_IND_SRAM_FULL		BIT(12)
-
-#define CQSPI_IRQ_MASK_RD		(CQSPI_REG_IRQ_WATERMARK	| \
-					 CQSPI_REG_IRQ_IND_SRAM_FULL	| \
-					 CQSPI_REG_IRQ_IND_COMP)
-
-#define CQSPI_IRQ_MASK_WR		(CQSPI_REG_IRQ_IND_COMP		| \
-					 CQSPI_REG_IRQ_WATERMARK	| \
-					 CQSPI_REG_IRQ_UNDERFLOW)
-
-#define CQSPI_IRQ_STATUS_MASK		0x1FFFF
-
-static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
-{
-	u32 val;
-
-	return readl_relaxed_poll_timeout(reg, val,
-					  (((clr ? ~val : val) & mask) == mask),
-					  10, CQSPI_TIMEOUT_MS * 1000);
-}
-
-static bool cqspi_is_idle(struct cqspi_st *cqspi)
-{
-	u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
-
-	return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
-}
-
-static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
-{
-	u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
-
-	reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
-	return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
-}
-
-static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
-{
-	struct cqspi_st *cqspi = dev;
-	unsigned int irq_status;
-
-	/* Read interrupt status */
-	irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
-
-	/* Clear interrupt */
-	writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
-
-	irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
-
-	if (irq_status)
-		complete(&cqspi->transfer_complete);
-
-	return IRQ_HANDLED;
-}
-
-static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	u32 rdreg = 0;
-
-	rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
-	rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
-	rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
-
-	return rdreg;
-}
-
-static int cqspi_wait_idle(struct cqspi_st *cqspi)
-{
-	const unsigned int poll_idle_retry = 3;
-	unsigned int count = 0;
-	unsigned long timeout;
-
-	timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
-	while (1) {
-		/*
-		 * Read few times in succession to ensure the controller
-		 * is indeed idle, that is, the bit does not transition
-		 * low again.
-		 */
-		if (cqspi_is_idle(cqspi))
-			count++;
-		else
-			count = 0;
-
-		if (count >= poll_idle_retry)
-			return 0;
-
-		if (time_after(jiffies, timeout)) {
-			/* Timeout, in busy mode. */
-			dev_err(&cqspi->pdev->dev,
-				"QSPI is still busy after %dms timeout.\n",
-				CQSPI_TIMEOUT_MS);
-			return -ETIMEDOUT;
-		}
-
-		cpu_relax();
-	}
-}
-
-static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
-{
-	void __iomem *reg_base = cqspi->iobase;
-	int ret;
-
-	/* Write the CMDCTRL without start execution. */
-	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
-	/* Start execute */
-	reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
-	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
-
-	/* Polling for completion. */
-	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
-				 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
-	if (ret) {
-		dev_err(&cqspi->pdev->dev,
-			"Flash command execution timed out.\n");
-		return ret;
-	}
-
-	/* Polling QSPI idle status. */
-	return cqspi_wait_idle(cqspi);
-}
-
-static int cqspi_command_read(struct spi_nor *nor,
-			      const u8 *txbuf, const unsigned n_tx,
-			      u8 *rxbuf, const unsigned n_rx)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int rdreg;
-	unsigned int reg;
-	unsigned int read_len;
-	int status;
-
-	if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
-		dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
-			n_rx, rxbuf);
-		return -EINVAL;
-	}
-
-	reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
-
-	rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
-	writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
-
-	reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
-
-	/* 0 means 1 byte. */
-	reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
-		<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
-	status = cqspi_exec_flash_cmd(cqspi, reg);
-	if (status)
-		return status;
-
-	reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
-
-	/* Put the read value into rx_buf */
-	read_len = (n_rx > 4) ? 4 : n_rx;
-	memcpy(rxbuf, &reg, read_len);
-	rxbuf += read_len;
-
-	if (n_rx > 4) {
-		reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
-
-		read_len = n_rx - read_len;
-		memcpy(rxbuf, &reg, read_len);
-	}
-
-	return 0;
-}
-
-static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
-			       const u8 *txbuf, const unsigned n_tx)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int reg;
-	unsigned int data;
-	u32 write_len;
-	int ret;
-
-	if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
-		dev_err(nor->dev,
-			"Invalid input argument, cmdlen %d txbuf 0x%p\n",
-			n_tx, txbuf);
-		return -EINVAL;
-	}
-
-	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
-	if (n_tx) {
-		reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
-		reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
-			<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
-		data = 0;
-		write_len = (n_tx > 4) ? 4 : n_tx;
-		memcpy(&data, txbuf, write_len);
-		txbuf += write_len;
-		writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
-
-		if (n_tx > 4) {
-			data = 0;
-			write_len = n_tx - 4;
-			memcpy(&data, txbuf, write_len);
-			writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
-		}
-	}
-	ret = cqspi_exec_flash_cmd(cqspi, reg);
-	return ret;
-}
-
-static int cqspi_command_write_addr(struct spi_nor *nor,
-				    const u8 opcode, const unsigned int addr)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int reg;
-
-	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
-	reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
-	reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
-		<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
-
-	writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
-
-	return cqspi_exec_flash_cmd(cqspi, reg);
-}
-
-static int cqspi_read_setup(struct spi_nor *nor)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int dummy_clk = 0;
-	unsigned int reg;
-
-	reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
-	reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
-
-	/* Setup dummy clock cycles */
-	dummy_clk = nor->read_dummy;
-	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
-		return -EOPNOTSUPP;
-
-	if (dummy_clk / 8) {
-		reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
-		/* Set mode bits high to ensure chip doesn't enter XIP */
-		writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
-
-		/* Need to subtract the mode byte (8 clocks). */
-		if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
-			dummy_clk -= 8;
-
-		if (dummy_clk)
-			reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
-			       << CQSPI_REG_RD_INSTR_DUMMY_LSB;
-	}
-
-	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
-
-	/* Set address width */
-	reg = readl(reg_base + CQSPI_REG_SIZE);
-	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
-	reg |= (nor->addr_width - 1);
-	writel(reg, reg_base + CQSPI_REG_SIZE);
-	return 0;
-}
-
-static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
-				       loff_t from_addr, const size_t n_rx)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	void __iomem *ahb_base = cqspi->ahb_base;
-	unsigned int remaining = n_rx;
-	unsigned int mod_bytes = n_rx % 4;
-	unsigned int bytes_to_read = 0;
-	u8 *rxbuf_end = rxbuf + n_rx;
-	int ret = 0;
-
-	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
-	writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
-
-	/* Clear all interrupts. */
-	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
-
-	writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
-
-	reinit_completion(&cqspi->transfer_complete);
-	writel(CQSPI_REG_INDIRECTRD_START_MASK,
-	       reg_base + CQSPI_REG_INDIRECTRD);
-
-	while (remaining > 0) {
-		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
-				msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
-			ret = -ETIMEDOUT;
-
-		bytes_to_read = cqspi_get_rd_sram_level(cqspi);
-
-		if (ret && bytes_to_read == 0) {
-			dev_err(nor->dev, "Indirect read timeout, no bytes\n");
-			goto failrd;
-		}
-
-		while (bytes_to_read != 0) {
-			unsigned int word_remain = round_down(remaining, 4);
-
-			bytes_to_read *= cqspi->fifo_width;
-			bytes_to_read = bytes_to_read > remaining ?
-					remaining : bytes_to_read;
-			bytes_to_read = round_down(bytes_to_read, 4);
-			/* Read 4 byte word chunks then single bytes */
-			if (bytes_to_read) {
-				ioread32_rep(ahb_base, rxbuf,
-					     (bytes_to_read / 4));
-			} else if (!word_remain && mod_bytes) {
-				unsigned int temp = ioread32(ahb_base);
-
-				bytes_to_read = mod_bytes;
-				memcpy(rxbuf, &temp, min((unsigned int)
-							 (rxbuf_end - rxbuf),
-							 bytes_to_read));
-			}
-			rxbuf += bytes_to_read;
-			remaining -= bytes_to_read;
-			bytes_to_read = cqspi_get_rd_sram_level(cqspi);
-		}
-
-		if (remaining > 0)
-			reinit_completion(&cqspi->transfer_complete);
-	}
-
-	/* Check indirect done status */
-	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
-				 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
-	if (ret) {
-		dev_err(nor->dev,
-			"Indirect read completion error (%i)\n", ret);
-		goto failrd;
-	}
-
-	/* Disable interrupt */
-	writel(0, reg_base + CQSPI_REG_IRQMASK);
-
-	/* Clear indirect completion status */
-	writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
-
-	return 0;
-
-failrd:
-	/* Disable interrupt */
-	writel(0, reg_base + CQSPI_REG_IRQMASK);
-
-	/* Cancel the indirect read */
-	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
-	       reg_base + CQSPI_REG_INDIRECTRD);
-	return ret;
-}
-
-static int cqspi_write_setup(struct spi_nor *nor)
-{
-	unsigned int reg;
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-
-	/* Set opcode. */
-	reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
-	writel(reg, reg_base + CQSPI_REG_WR_INSTR);
-	reg = cqspi_calc_rdreg(nor, nor->program_opcode);
-	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
-
-	reg = readl(reg_base + CQSPI_REG_SIZE);
-	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
-	reg |= (nor->addr_width - 1);
-	writel(reg, reg_base + CQSPI_REG_SIZE);
-	return 0;
-}
-
-static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
-					const u8 *txbuf, const size_t n_tx)
-{
-	const unsigned int page_size = nor->page_size;
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int remaining = n_tx;
-	unsigned int write_bytes;
-	int ret;
-
-	writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
-	writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
-
-	/* Clear all interrupts. */
-	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
-
-	writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
-
-	reinit_completion(&cqspi->transfer_complete);
-	writel(CQSPI_REG_INDIRECTWR_START_MASK,
-	       reg_base + CQSPI_REG_INDIRECTWR);
-	/*
-	 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
-	 * Controller programming sequence, couple of cycles of
-	 * QSPI_REF_CLK delay is required for the above bit to
-	 * be internally synchronized by the QSPI module. Provide 5
-	 * cycles of delay.
-	 */
-	if (cqspi->wr_delay)
-		ndelay(cqspi->wr_delay);
-
-	while (remaining > 0) {
-		size_t write_words, mod_bytes;
-
-		write_bytes = remaining > page_size ? page_size : remaining;
-		write_words = write_bytes / 4;
-		mod_bytes = write_bytes % 4;
-		/* Write 4 bytes at a time then single bytes. */
-		if (write_words) {
-			iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
-			txbuf += (write_words * 4);
-		}
-		if (mod_bytes) {
-			unsigned int temp = 0xFFFFFFFF;
-
-			memcpy(&temp, txbuf, mod_bytes);
-			iowrite32(temp, cqspi->ahb_base);
-			txbuf += mod_bytes;
-		}
-
-		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
-					msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
-			dev_err(nor->dev, "Indirect write timeout\n");
-			ret = -ETIMEDOUT;
-			goto failwr;
-		}
-
-		remaining -= write_bytes;
-
-		if (remaining > 0)
-			reinit_completion(&cqspi->transfer_complete);
-	}
-
-	/* Check indirect done status */
-	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
-				 CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
-	if (ret) {
-		dev_err(nor->dev,
-			"Indirect write completion error (%i)\n", ret);
-		goto failwr;
-	}
-
-	/* Disable interrupt. */
-	writel(0, reg_base + CQSPI_REG_IRQMASK);
-
-	/* Clear indirect completion status */
-	writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
-
-	cqspi_wait_idle(cqspi);
-
-	return 0;
-
-failwr:
-	/* Disable interrupt. */
-	writel(0, reg_base + CQSPI_REG_IRQMASK);
-
-	/* Cancel the indirect write */
-	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
-	       reg_base + CQSPI_REG_INDIRECTWR);
-	return ret;
-}
-
-static void cqspi_chipselect(struct spi_nor *nor)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int chip_select = f_pdata->cs;
-	unsigned int reg;
-
-	reg = readl(reg_base + CQSPI_REG_CONFIG);
-	if (cqspi->is_decoded_cs) {
-		reg |= CQSPI_REG_CONFIG_DECODE_MASK;
-	} else {
-		reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
-
-		/* Convert CS if without decoder.
-		 * CS0 to 4b'1110
-		 * CS1 to 4b'1101
-		 * CS2 to 4b'1011
-		 * CS3 to 4b'0111
-		 */
-		chip_select = 0xF & ~(1 << chip_select);
-	}
-
-	reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
-		 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
-	reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
-	    << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
-	writel(reg, reg_base + CQSPI_REG_CONFIG);
-}
-
-static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *iobase = cqspi->iobase;
-	unsigned int reg;
-
-	/* configure page size and block size. */
-	reg = readl(iobase + CQSPI_REG_SIZE);
-	reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
-	reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
-	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
-	reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
-	reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
-	reg |= (nor->addr_width - 1);
-	writel(reg, iobase + CQSPI_REG_SIZE);
-
-	/* configure the chip select */
-	cqspi_chipselect(nor);
-
-	/* Store the new configuration of the controller */
-	cqspi->current_page_size = nor->page_size;
-	cqspi->current_erase_size = nor->mtd.erasesize;
-	cqspi->current_addr_width = nor->addr_width;
-}
-
-static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
-					   const unsigned int ns_val)
-{
-	unsigned int ticks;
-
-	ticks = ref_clk_hz / 1000;	/* kHz */
-	ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
-
-	return ticks;
-}
-
-static void cqspi_delay(struct spi_nor *nor)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	void __iomem *iobase = cqspi->iobase;
-	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
-	unsigned int tshsl, tchsh, tslch, tsd2d;
-	unsigned int reg;
-	unsigned int tsclk;
-
-	/* calculate the number of ref ticks for one sclk tick */
-	tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
-
-	tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
-	/* this particular value must be at least one sclk */
-	if (tshsl < tsclk)
-		tshsl = tsclk;
-
-	tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
-	tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
-	tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
-
-	reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
-	       << CQSPI_REG_DELAY_TSHSL_LSB;
-	reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
-		<< CQSPI_REG_DELAY_TCHSH_LSB;
-	reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
-		<< CQSPI_REG_DELAY_TSLCH_LSB;
-	reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
-		<< CQSPI_REG_DELAY_TSD2D_LSB;
-	writel(reg, iobase + CQSPI_REG_DELAY);
-}
-
-static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
-{
-	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
-	void __iomem *reg_base = cqspi->iobase;
-	u32 reg, div;
-
-	/* Recalculate the baudrate divisor based on QSPI specification. */
-	div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
-
-	reg = readl(reg_base + CQSPI_REG_CONFIG);
-	reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
-	reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
-	writel(reg, reg_base + CQSPI_REG_CONFIG);
-}
-
-static void cqspi_readdata_capture(struct cqspi_st *cqspi,
-				   const bool bypass,
-				   const unsigned int delay)
-{
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int reg;
-
-	reg = readl(reg_base + CQSPI_REG_READCAPTURE);
-
-	if (bypass)
-		reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
-	else
-		reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
-
-	reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
-		 << CQSPI_REG_READCAPTURE_DELAY_LSB);
-
-	reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
-		<< CQSPI_REG_READCAPTURE_DELAY_LSB;
-
-	writel(reg, reg_base + CQSPI_REG_READCAPTURE);
-}
-
-static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
-{
-	void __iomem *reg_base = cqspi->iobase;
-	unsigned int reg;
-
-	reg = readl(reg_base + CQSPI_REG_CONFIG);
-
-	if (enable)
-		reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
-	else
-		reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
-
-	writel(reg, reg_base + CQSPI_REG_CONFIG);
-}
-
-static void cqspi_configure(struct spi_nor *nor)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	const unsigned int sclk = f_pdata->clk_rate;
-	int switch_cs = (cqspi->current_cs != f_pdata->cs);
-	int switch_ck = (cqspi->sclk != sclk);
-
-	if ((cqspi->current_page_size != nor->page_size) ||
-	    (cqspi->current_erase_size != nor->mtd.erasesize) ||
-	    (cqspi->current_addr_width != nor->addr_width))
-		switch_cs = 1;
-
-	if (switch_cs || switch_ck)
-		cqspi_controller_enable(cqspi, 0);
-
-	/* Switch chip select. */
-	if (switch_cs) {
-		cqspi->current_cs = f_pdata->cs;
-		cqspi_configure_cs_and_sizes(nor);
-	}
-
-	/* Setup baudrate divisor and delays */
-	if (switch_ck) {
-		cqspi->sclk = sclk;
-		cqspi_config_baudrate_div(cqspi);
-		cqspi_delay(nor);
-		cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
-				       f_pdata->read_delay);
-	}
-
-	if (switch_cs || switch_ck)
-		cqspi_controller_enable(cqspi, 1);
-}
-
-static int cqspi_set_protocol(struct spi_nor *nor, const int read)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-
-	f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
-	f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
-	f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
-	if (read) {
-		switch (nor->read_proto) {
-		case SNOR_PROTO_1_1_1:
-			f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-			break;
-		case SNOR_PROTO_1_1_2:
-			f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
-			break;
-		case SNOR_PROTO_1_1_4:
-			f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
-			break;
-		case SNOR_PROTO_1_1_8:
-			f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	cqspi_configure(nor);
-
-	return 0;
-}
-
-static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
-			   size_t len, const u_char *buf)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	int ret;
-
-	ret = cqspi_set_protocol(nor, 0);
-	if (ret)
-		return ret;
-
-	ret = cqspi_write_setup(nor);
-	if (ret)
-		return ret;
-
-	if (f_pdata->use_direct_mode) {
-		memcpy_toio(cqspi->ahb_base + to, buf, len);
-		ret = cqspi_wait_idle(cqspi);
-	} else {
-		ret = cqspi_indirect_write_execute(nor, to, buf, len);
-	}
-	if (ret)
-		return ret;
-
-	return len;
-}
-
-static void cqspi_rx_dma_callback(void *param)
-{
-	struct cqspi_st *cqspi = param;
-
-	complete(&cqspi->rx_dma_complete);
-}
-
-static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
-				     loff_t from, size_t len)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-	dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
-	int ret = 0;
-	struct dma_async_tx_descriptor *tx;
-	dma_cookie_t cookie;
-	dma_addr_t dma_dst;
-
-	if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
-		memcpy_fromio(buf, cqspi->ahb_base + from, len);
-		return 0;
-	}
-
-	dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
-	if (dma_mapping_error(nor->dev, dma_dst)) {
-		dev_err(nor->dev, "dma mapping failed\n");
-		return -ENOMEM;
-	}
-	tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
-				       len, flags);
-	if (!tx) {
-		dev_err(nor->dev, "device_prep_dma_memcpy error\n");
-		ret = -EIO;
-		goto err_unmap;
-	}
-
-	tx->callback = cqspi_rx_dma_callback;
-	tx->callback_param = cqspi;
-	cookie = tx->tx_submit(tx);
-	reinit_completion(&cqspi->rx_dma_complete);
-
-	ret = dma_submit_error(cookie);
-	if (ret) {
-		dev_err(nor->dev, "dma_submit_error %d\n", cookie);
-		ret = -EIO;
-		goto err_unmap;
-	}
-
-	dma_async_issue_pending(cqspi->rx_chan);
-	if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
-					 msecs_to_jiffies(len))) {
-		dmaengine_terminate_sync(cqspi->rx_chan);
-		dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
-		ret = -ETIMEDOUT;
-		goto err_unmap;
-	}
-
-err_unmap:
-	dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
-
-	return ret;
-}
-
-static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
-			  size_t len, u_char *buf)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	int ret;
-
-	ret = cqspi_set_protocol(nor, 1);
-	if (ret)
-		return ret;
-
-	ret = cqspi_read_setup(nor);
-	if (ret)
-		return ret;
-
-	if (f_pdata->use_direct_mode)
-		ret = cqspi_direct_read_execute(nor, buf, from, len);
-	else
-		ret = cqspi_indirect_read_execute(nor, buf, from, len);
-	if (ret)
-		return ret;
-
-	return len;
-}
-
-static int cqspi_erase(struct spi_nor *nor, loff_t offs)
-{
-	int ret;
-
-	ret = cqspi_set_protocol(nor, 0);
-	if (ret)
-		return ret;
-
-	/* Send write enable, then erase commands. */
-	ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
-	if (ret)
-		return ret;
-
-	/* Set up command buffer. */
-	ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-
-	mutex_lock(&cqspi->bus_mutex);
-
-	return 0;
-}
-
-static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
-	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
-
-	mutex_unlock(&cqspi->bus_mutex);
-}
-
-static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
-	int ret;
-
-	ret = cqspi_set_protocol(nor, 0);
-	if (!ret)
-		ret = cqspi_command_read(nor, &opcode, 1, buf, len);
-
-	return ret;
-}
-
-static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
-	int ret;
-
-	ret = cqspi_set_protocol(nor, 0);
-	if (!ret)
-		ret = cqspi_command_write(nor, opcode, buf, len);
-
-	return ret;
-}
-
-static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
-				    struct cqspi_flash_pdata *f_pdata,
-				    struct device_node *np)
-{
-	if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
-		dev_err(&pdev->dev, "couldn't determine read-delay\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
-		dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
-		dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
-		dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
-		dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
-		dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
-		return -ENXIO;
-	}
-
-	return 0;
-}
-
-static int cqspi_of_get_pdata(struct platform_device *pdev)
-{
-	struct device_node *np = pdev->dev.of_node;
-	struct cqspi_st *cqspi = platform_get_drvdata(pdev);
-
-	cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
-
-	if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
-		dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
-		dev_err(&pdev->dev, "couldn't determine fifo-width\n");
-		return -ENXIO;
-	}
-
-	if (of_property_read_u32(np, "cdns,trigger-address",
-				 &cqspi->trigger_address)) {
-		dev_err(&pdev->dev, "couldn't determine trigger-address\n");
-		return -ENXIO;
-	}
-
-	cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
-
-	return 0;
-}
-
-static void cqspi_controller_init(struct cqspi_st *cqspi)
-{
-	u32 reg;
-
-	cqspi_controller_enable(cqspi, 0);
-
-	/* Configure the remap address register, no remap */
-	writel(0, cqspi->iobase + CQSPI_REG_REMAP);
-
-	/* Disable all interrupts. */
-	writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
-
-	/* Configure the SRAM split to 1:1 . */
-	writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
-
-	/* Load indirect trigger address. */
-	writel(cqspi->trigger_address,
-	       cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
-
-	/* Program read watermark -- 1/2 of the FIFO. */
-	writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
-	       cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
-	/* Program write watermark -- 1/8 of the FIFO. */
-	writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
-	       cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
-
-	/* Enable Direct Access Controller */
-	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
-	reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
-	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
-
-	cqspi_controller_enable(cqspi, 1);
-}
-
-static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
-{
-	dma_cap_mask_t mask;
-
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_MEMCPY, mask);
-
-	cqspi->rx_chan = dma_request_chan_by_mask(&mask);
-	if (IS_ERR(cqspi->rx_chan)) {
-		dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
-		cqspi->rx_chan = NULL;
-	}
-	init_completion(&cqspi->rx_dma_complete);
-}
-
-static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
-{
-	struct platform_device *pdev = cqspi->pdev;
-	struct device *dev = &pdev->dev;
-	const struct cqspi_driver_platdata *ddata;
-	struct spi_nor_hwcaps hwcaps;
-	struct cqspi_flash_pdata *f_pdata;
-	struct spi_nor *nor;
-	struct mtd_info *mtd;
-	unsigned int cs;
-	int i, ret;
-
-	ddata = of_device_get_match_data(dev);
-	if (!ddata) {
-		dev_err(dev, "Couldn't find driver data\n");
-		return -EINVAL;
-	}
-	hwcaps.mask = ddata->hwcaps_mask;
-
-	/* Get flash device data */
-	for_each_available_child_of_node(dev->of_node, np) {
-		ret = of_property_read_u32(np, "reg", &cs);
-		if (ret) {
-			dev_err(dev, "Couldn't determine chip select.\n");
-			goto err;
-		}
-
-		if (cs >= CQSPI_MAX_CHIPSELECT) {
-			ret = -EINVAL;
-			dev_err(dev, "Chip select %d out of range.\n", cs);
-			goto err;
-		}
-
-		f_pdata = &cqspi->f_pdata[cs];
-		f_pdata->cqspi = cqspi;
-		f_pdata->cs = cs;
-
-		ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
-		if (ret)
-			goto err;
-
-		nor = &f_pdata->nor;
-		mtd = &nor->mtd;
-
-		mtd->priv = nor;
-
-		nor->dev = dev;
-		spi_nor_set_flash_node(nor, np);
-		nor->priv = f_pdata;
-
-		nor->read_reg = cqspi_read_reg;
-		nor->write_reg = cqspi_write_reg;
-		nor->read = cqspi_read;
-		nor->write = cqspi_write;
-		nor->erase = cqspi_erase;
-		nor->prepare = cqspi_prep;
-		nor->unprepare = cqspi_unprep;
-
-		mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
-					   dev_name(dev), cs);
-		if (!mtd->name) {
-			ret = -ENOMEM;
-			goto err;
-		}
-
-		ret = spi_nor_scan(nor, NULL, &hwcaps);
-		if (ret)
-			goto err;
-
-		ret = mtd_device_register(mtd, NULL, 0);
-		if (ret)
-			goto err;
-
-		f_pdata->registered = true;
-
-		if (mtd->size <= cqspi->ahb_size) {
-			f_pdata->use_direct_mode = true;
-			dev_dbg(nor->dev, "using direct mode for %s\n",
-				mtd->name);
-
-			if (!cqspi->rx_chan)
-				cqspi_request_mmap_dma(cqspi);
-		}
-	}
-
-	return 0;
-
-err:
-	for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
-		if (cqspi->f_pdata[i].registered)
-			mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
-	return ret;
-}
-
-static int cqspi_probe(struct platform_device *pdev)
-{
-	struct device_node *np = pdev->dev.of_node;
-	struct device *dev = &pdev->dev;
-	struct cqspi_st *cqspi;
-	struct resource *res;
-	struct resource *res_ahb;
-	struct reset_control *rstc, *rstc_ocp;
-	const struct cqspi_driver_platdata *ddata;
-	int ret;
-	int irq;
-
-	cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
-	if (!cqspi)
-		return -ENOMEM;
-
-	mutex_init(&cqspi->bus_mutex);
-	cqspi->pdev = pdev;
-	platform_set_drvdata(pdev, cqspi);
-
-	/* Obtain configuration from OF. */
-	ret = cqspi_of_get_pdata(pdev);
-	if (ret) {
-		dev_err(dev, "Cannot get mandatory OF data.\n");
-		return -ENODEV;
-	}
-
-	/* Obtain QSPI clock. */
-	cqspi->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(cqspi->clk)) {
-		dev_err(dev, "Cannot claim QSPI clock.\n");
-		return PTR_ERR(cqspi->clk);
-	}
-
-	/* Obtain and remap controller address. */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	cqspi->iobase = devm_ioremap_resource(dev, res);
-	if (IS_ERR(cqspi->iobase)) {
-		dev_err(dev, "Cannot remap controller address.\n");
-		return PTR_ERR(cqspi->iobase);
-	}
-
-	/* Obtain and remap AHB address. */
-	res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
-	if (IS_ERR(cqspi->ahb_base)) {
-		dev_err(dev, "Cannot remap AHB address.\n");
-		return PTR_ERR(cqspi->ahb_base);
-	}
-	cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
-	cqspi->ahb_size = resource_size(res_ahb);
-
-	init_completion(&cqspi->transfer_complete);
-
-	/* Obtain IRQ line. */
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "Cannot obtain IRQ.\n");
-		return -ENXIO;
-	}
-
-	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0) {
-		pm_runtime_put_noidle(dev);
-		return ret;
-	}
-
-	ret = clk_prepare_enable(cqspi->clk);
-	if (ret) {
-		dev_err(dev, "Cannot enable QSPI clock.\n");
-		goto probe_clk_failed;
-	}
-
-	/* Obtain QSPI reset control */
-	rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
-	if (IS_ERR(rstc)) {
-		dev_err(dev, "Cannot get QSPI reset.\n");
-		return PTR_ERR(rstc);
-	}
-
-	rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
-	if (IS_ERR(rstc_ocp)) {
-		dev_err(dev, "Cannot get QSPI OCP reset.\n");
-		return PTR_ERR(rstc_ocp);
-	}
-
-	reset_control_assert(rstc);
-	reset_control_deassert(rstc);
-
-	reset_control_assert(rstc_ocp);
-	reset_control_deassert(rstc_ocp);
-
-	cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
-	ddata  = of_device_get_match_data(dev);
-	if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
-		cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
-						   cqspi->master_ref_clk_hz);
-
-	ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
-			       pdev->name, cqspi);
-	if (ret) {
-		dev_err(dev, "Cannot request IRQ.\n");
-		goto probe_irq_failed;
-	}
-
-	cqspi_wait_idle(cqspi);
-	cqspi_controller_init(cqspi);
-	cqspi->current_cs = -1;
-	cqspi->sclk = 0;
-
-	ret = cqspi_setup_flash(cqspi, np);
-	if (ret) {
-		dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
-		goto probe_setup_failed;
-	}
-
-	return ret;
-probe_setup_failed:
-	cqspi_controller_enable(cqspi, 0);
-probe_irq_failed:
-	clk_disable_unprepare(cqspi->clk);
-probe_clk_failed:
-	pm_runtime_put_sync(dev);
-	pm_runtime_disable(dev);
-	return ret;
-}
-
-static int cqspi_remove(struct platform_device *pdev)
-{
-	struct cqspi_st *cqspi = platform_get_drvdata(pdev);
-	int i;
-
-	for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
-		if (cqspi->f_pdata[i].registered)
-			mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
-
-	cqspi_controller_enable(cqspi, 0);
-
-	if (cqspi->rx_chan)
-		dma_release_channel(cqspi->rx_chan);
-
-	clk_disable_unprepare(cqspi->clk);
-
-	pm_runtime_put_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int cqspi_suspend(struct device *dev)
-{
-	struct cqspi_st *cqspi = dev_get_drvdata(dev);
-
-	cqspi_controller_enable(cqspi, 0);
-	return 0;
-}
-
-static int cqspi_resume(struct device *dev)
-{
-	struct cqspi_st *cqspi = dev_get_drvdata(dev);
-
-	cqspi_controller_enable(cqspi, 1);
-	return 0;
-}
-
-static const struct dev_pm_ops cqspi__dev_pm_ops = {
-	.suspend = cqspi_suspend,
-	.resume = cqspi_resume,
-};
-
-#define CQSPI_DEV_PM_OPS	(&cqspi__dev_pm_ops)
-#else
-#define CQSPI_DEV_PM_OPS	NULL
-#endif
-
-static const struct cqspi_driver_platdata cdns_qspi = {
-	.hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
-};
-
-static const struct cqspi_driver_platdata k2g_qspi = {
-	.hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
-	.quirks = CQSPI_NEEDS_WR_DELAY,
-};
-
-static const struct cqspi_driver_platdata am654_ospi = {
-	.hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8,
-	.quirks = CQSPI_NEEDS_WR_DELAY,
-};
-
-static const struct of_device_id cqspi_dt_ids[] = {
-	{
-		.compatible = "cdns,qspi-nor",
-		.data = &cdns_qspi,
-	},
-	{
-		.compatible = "ti,k2g-qspi",
-		.data = &k2g_qspi,
-	},
-	{
-		.compatible = "ti,am654-ospi",
-		.data = &am654_ospi,
-	},
-	{ /* end of table */ }
-};
-
-MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
-
-static struct platform_driver cqspi_platform_driver = {
-	.probe = cqspi_probe,
-	.remove = cqspi_remove,
-	.driver = {
-		.name = CQSPI_NAME,
-		.pm = CQSPI_DEV_PM_OPS,
-		.of_match_table = cqspi_dt_ids,
-	},
-};
-
-module_platform_driver(cqspi_platform_driver);
-
-MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" CQSPI_NAME);
-MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
-MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
new file mode 100644
index 0000000..011b83e
--- /dev/null
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info catalyst_parts[] = {
+	/* Catalyst / On Semiconductor -- non-JEDEC */
+	{ "cat25c11", CAT25_INFO(16, 8, 16, 1,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "cat25c03", CAT25_INFO(32, 8, 16, 2,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "cat25c09", CAT25_INFO(128, 8, 32, 2,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "cat25c17", CAT25_INFO(256, 8, 32, 2,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "cat25128", CAT25_INFO(2048, 8, 64, 2,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_catalyst = {
+	.name = "catalyst",
+	.parts = catalyst_parts,
+	.nparts = ARRAY_SIZE(catalyst_parts),
+};
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
new file mode 100644
index 0000000..5c0e0ec
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SPI_ASPEED_SMC
+	tristate "Aspeed flash controllers in SPI mode"
+	depends on ARCH_ASPEED || COMPILE_TEST
+	depends on HAS_IOMEM && OF
+	help
+	  This enables support for the Firmware Memory controller (FMC)
+	  in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+	  and support for the SPI flash memory controller (SPI) for
+	  the host firmware. The implementation only supports SPI NOR.
+
+config SPI_HISI_SFC
+	tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)"
+	depends on ARCH_HISI || COMPILE_TEST
+	depends on HAS_IOMEM
+	help
+	  This enables support for HiSilicon FMC SPI NOR flash controller.
+
+config SPI_NXP_SPIFI
+	tristate "NXP SPI Flash Interface (SPIFI)"
+	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+	depends on HAS_IOMEM
+	help
+	  Enable support for the NXP LPC SPI Flash Interface controller.
+
+	  SPIFI is a specialized controller for connecting serial SPI
+	  Flash. Enable this option if you have a device with a SPIFI
+	  controller and want to access the Flash as a mtd device.
+
+config SPI_INTEL_SPI
+	tristate
+
+config SPI_INTEL_SPI_PCI
+	tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+	depends on X86 && PCI
+	select SPI_INTEL_SPI
+	help
+	  This enables PCI support for the Intel PCH/PCU SPI controller in
+	  master mode. This controller is present in modern Intel hardware
+	  and is used to hold BIOS and other persistent settings. Using
+	  this driver it is possible to upgrade BIOS directly from Linux.
+
+	  Say N here unless you know what you are doing. Overwriting the
+	  SPI flash may render the system unbootable.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called intel-spi-pci.
+
+config SPI_INTEL_SPI_PLATFORM
+	tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+	depends on X86
+	select SPI_INTEL_SPI
+	help
+	  This enables platform support for the Intel PCH/PCU SPI
+	  controller in master mode. This controller is present in modern
+	  Intel hardware and is used to hold BIOS and other persistent
+	  settings. Using this driver it is possible to upgrade BIOS
+	  directly from Linux.
+
+	  Say N here unless you know what you are doing. Overwriting the
+	  SPI flash may render the system unbootable.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
new file mode 100644
index 0000000..e7abba4
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SPI_ASPEED_SMC)	+= aspeed-smc.o
+obj-$(CONFIG_SPI_HISI_SFC)	+= hisi-sfc.o
+obj-$(CONFIG_SPI_NXP_SPIFI)	+= nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI)	+= intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PCI)	+= intel-spi-pci.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM)	+= intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
similarity index 97%
rename from drivers/mtd/spi-nor/aspeed-smc.c
rename to drivers/mtd/spi-nor/controllers/aspeed-smc.c
index 009c1da..7225870 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -109,7 +109,7 @@
 	void __iomem *ahb_base;			/* per-chip windows resource */
 	u32 ahb_window_size;			/* full mapping window size */
 
-	struct aspeed_smc_chip *chips[0];	/* pointers to attached chips */
+	struct aspeed_smc_chip *chips[];	/* pointers to attached chips */
 };
 
 /*
@@ -305,7 +305,7 @@
 	writel(ctl, chip->ctl);		/* default to fread or read mode */
 }
 
-static int aspeed_smc_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int aspeed_smc_prep(struct spi_nor *nor)
 {
 	struct aspeed_smc_chip *chip = nor->priv;
 
@@ -313,14 +313,15 @@
 	return 0;
 }
 
-static void aspeed_smc_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void aspeed_smc_unprep(struct spi_nor *nor)
 {
 	struct aspeed_smc_chip *chip = nor->priv;
 
 	mutex_unlock(&chip->controller->mutex);
 }
 
-static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+			       size_t len)
 {
 	struct aspeed_smc_chip *chip = nor->priv;
 
@@ -331,8 +332,8 @@
 	return 0;
 }
 
-static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
-				int len)
+static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+				size_t len)
 {
 	struct aspeed_smc_chip *chip = nor->priv;
 
@@ -353,7 +354,7 @@
 	default:
 		WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
 			  nor->addr_width);
-		/* FALLTHROUGH */
+		fallthrough;
 	case 3:
 		cmdaddr = addr & 0xFFFFFF;
 		cmdaddr |= cmd << 24;
@@ -726,7 +727,7 @@
 
 	/*
 	 * TODO: Adjust clocks if fast read is supported and interpret
-	 * SPI-NOR flags to adjust controller settings.
+	 * SPI NOR flags to adjust controller settings.
 	 */
 	if (chip->nor.read_proto == SNOR_PROTO_1_1_1) {
 		if (chip->nor.read_dummy == 0)
@@ -746,6 +747,15 @@
 	return 0;
 }
 
+static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
+	.prepare = aspeed_smc_prep,
+	.unprepare = aspeed_smc_unprep,
+	.read_reg = aspeed_smc_read_reg,
+	.write_reg = aspeed_smc_write_reg,
+	.read = aspeed_smc_read_user,
+	.write = aspeed_smc_write_user,
+};
+
 static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
 				  struct device_node *np, struct resource *r)
 {
@@ -805,12 +815,7 @@
 		nor->dev = dev;
 		nor->priv = chip;
 		spi_nor_set_flash_node(nor, child);
-		nor->read = aspeed_smc_read_user;
-		nor->write = aspeed_smc_write_user;
-		nor->read_reg = aspeed_smc_read_reg;
-		nor->write_reg = aspeed_smc_write_reg;
-		nor->prepare = aspeed_smc_prep;
-		nor->unprepare = aspeed_smc_unprep;
+		nor->controller_ops = &aspeed_smc_controller_ops;
 
 		ret = aspeed_smc_chip_setup_init(chip, r);
 		if (ret)
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
similarity index 95%
rename from drivers/mtd/spi-nor/hisi-sfc.c
rename to drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 8fcc480..fd2c19a 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * HiSilicon SPI Nor Flash Controller Driver
+ * HiSilicon FMC SPI NOR flash controller driver
  *
  * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
  */
@@ -144,7 +144,7 @@
 	writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
 }
 
-static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+static int hisi_spi_nor_prep(struct spi_nor *nor)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
@@ -167,7 +167,7 @@
 	return ret;
 }
 
-static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+static void hisi_spi_nor_unprep(struct spi_nor *nor)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
@@ -177,7 +177,7 @@
 }
 
 static int hisi_spi_nor_op_reg(struct spi_nor *nor,
-				u8 opcode, int len, u8 optype)
+				u8 opcode, size_t len, u8 optype)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
@@ -200,7 +200,7 @@
 }
 
 static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
-		int len)
+				 size_t len)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
@@ -215,7 +215,7 @@
 }
 
 static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
-				u8 *buf, int len)
+				  const u8 *buf, size_t len)
 {
 	struct hifmc_priv *priv = nor->priv;
 	struct hifmc_host *host = priv->host;
@@ -311,6 +311,15 @@
 	return len;
 }
 
+static const struct spi_nor_controller_ops hisi_controller_ops = {
+	.prepare = hisi_spi_nor_prep,
+	.unprepare = hisi_spi_nor_unprep,
+	.read_reg = hisi_spi_nor_read_reg,
+	.write_reg = hisi_spi_nor_write_reg,
+	.read = hisi_spi_nor_read,
+	.write = hisi_spi_nor_write,
+};
+
 /**
  * Get spi flash device information and register it as a mtd device.
  */
@@ -357,14 +366,8 @@
 	}
 	priv->host = host;
 	nor->priv = priv;
+	nor->controller_ops = &hisi_controller_ops;
 
-	nor->prepare = hisi_spi_nor_prep;
-	nor->unprepare = hisi_spi_nor_unprep;
-	nor->read_reg = hisi_spi_nor_read_reg;
-	nor->write_reg = hisi_spi_nor_write_reg;
-	nor->read = hisi_spi_nor_read;
-	nor->write = hisi_spi_nor_write;
-	nor->erase = NULL;
 	ret = spi_nor_scan(nor, NULL, &hwcaps);
 	if (ret)
 		return ret;
@@ -474,7 +477,6 @@
 
 	hisi_spi_nor_unregister_all(host);
 	mutex_destroy(&host->lock);
-	clk_disable_unprepare(host->clk);
 	return 0;
 }
 
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
similarity index 81%
rename from drivers/mtd/spi-nor/intel-spi-pci.c
rename to drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 3cda8e7..555fe55 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
@@ -20,6 +20,10 @@
 	.type = INTEL_SPI_BXT,
 };
 
+static const struct intel_spi_boardinfo cnl_info = {
+	.type = INTEL_SPI_CNL,
+};
+
 static int intel_spi_pci_probe(struct pci_dev *pdev,
 			       const struct pci_device_id *id)
 {
@@ -61,13 +65,20 @@
 
 static const struct pci_device_id intel_spi_pci_ids[] = {
 	{ PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
 	{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
 	{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
 	{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+	{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+	{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
 	{ },
 };
 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
similarity index 100%
rename from drivers/mtd/spi-nor/intel-spi-platform.c
rename to drivers/mtd/spi-nor/controllers/intel-spi-platform.c
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c
similarity index 93%
rename from drivers/mtd/spi-nor/intel-spi.c
rename to drivers/mtd/spi-nor/controllers/intel-spi.c
index 43e55a2..b54a56a 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.c
@@ -108,6 +108,10 @@
 #define BXT_FREG_NUM			12
 #define BXT_PR_NUM			6
 
+#define CNL_PR				0x84
+#define CNL_FREG_NUM			6
+#define CNL_PR_NUM			5
+
 #define LVSCC				0xc4
 #define UVSCC				0xc8
 #define ERASE_OPCODE_SHIFT		8
@@ -187,12 +191,16 @@
 		dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
 			readl(ispi->pregs + PR(i)));
 
-	value = readl(ispi->sregs + SSFSTS_CTL);
-	dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
-	dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
-		readl(ispi->sregs + PREOP_OPTYPE));
-	dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
-	dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
+	if (ispi->sregs) {
+		value = readl(ispi->sregs + SSFSTS_CTL);
+		dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
+		dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
+			readl(ispi->sregs + PREOP_OPTYPE));
+		dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
+			readl(ispi->sregs + OPMENU0));
+		dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
+			readl(ispi->sregs + OPMENU1));
+	}
 
 	if (ispi->info->type == INTEL_SPI_BYT)
 		dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
@@ -284,7 +292,7 @@
 	u32 val;
 
 	return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
-				  !(val & HSFSTS_CTL_SCIP), 40,
+				  !(val & HSFSTS_CTL_SCIP), 0,
 				  INTEL_SPI_TIMEOUT * 1000);
 }
 
@@ -293,7 +301,7 @@
 	u32 val;
 
 	return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
-				  !(val & SSFSTS_CTL_SCIP), 40,
+				  !(val & SSFSTS_CTL_SCIP), 0,
 				  INTEL_SPI_TIMEOUT * 1000);
 }
 
@@ -340,6 +348,13 @@
 		ispi->erase_64k = true;
 		break;
 
+	case INTEL_SPI_CNL:
+		ispi->sregs = NULL;
+		ispi->pregs = ispi->base + CNL_PR;
+		ispi->nregions = CNL_FREG_NUM;
+		ispi->pr_num = CNL_PR_NUM;
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -367,6 +382,11 @@
 		    !(uvscc & ERASE_64K_OPCODE_MASK))
 			ispi->erase_64k = false;
 
+	if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) {
+		dev_err(ispi->dev, "software sequencer not supported, but required\n");
+		return -EINVAL;
+	}
+
 	/*
 	 * Some controllers can only do basic operations using hardware
 	 * sequencer. All other operations are supposed to be carried out
@@ -383,7 +403,7 @@
 	val = readl(ispi->base + HSFSTS_CTL);
 	ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
 
-	if (ispi->locked) {
+	if (ispi->locked && ispi->sregs) {
 		/*
 		 * BIOS programs allowed opcodes and then locks down the
 		 * register. So read back what opcodes it decided to support.
@@ -426,7 +446,7 @@
 	return 0;
 }
 
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
 {
 	u32 val, status;
 	int ret;
@@ -469,7 +489,7 @@
 	return 0;
 }
 
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
 			      int optype)
 {
 	u32 val = 0, status;
@@ -535,7 +555,8 @@
 	return 0;
 }
 
-static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+			      size_t len)
 {
 	struct intel_spi *ispi = nor->priv;
 	int ret;
@@ -555,7 +576,8 @@
 	return intel_spi_read_block(ispi, buf, len);
 }
 
-static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+			       size_t len)
 {
 	struct intel_spi *ispi = nor->priv;
 	int ret;
@@ -590,6 +612,15 @@
 		return 0;
 	}
 
+	/*
+	 * We hope that HW sequencer will do the right thing automatically and
+	 * with the SW sequencer we cannot use preopcode anyway, so just ignore
+	 * the Write Disable operation and pretend it was completed
+	 * successfully.
+	 */
+	if (opcode == SPINOR_OP_WRDI)
+		return 0;
+
 	writel(0, ispi->base + FADDR);
 
 	/* Write the value beforehand */
@@ -864,6 +895,14 @@
 	}
 }
 
+static const struct spi_nor_controller_ops intel_spi_controller_ops = {
+	.read_reg = intel_spi_read_reg,
+	.write_reg = intel_spi_write_reg,
+	.read = intel_spi_read,
+	.write = intel_spi_write,
+	.erase = intel_spi_erase,
+};
+
 struct intel_spi *intel_spi_probe(struct device *dev,
 	struct resource *mem, const struct intel_spi_boardinfo *info)
 {
@@ -897,11 +936,7 @@
 
 	ispi->nor.dev = ispi->dev;
 	ispi->nor.priv = ispi;
-	ispi->nor.read_reg = intel_spi_read_reg;
-	ispi->nor.write_reg = intel_spi_write_reg;
-	ispi->nor.read = intel_spi_read;
-	ispi->nor.write = intel_spi_write;
-	ispi->nor.erase = intel_spi_erase;
+	ispi->nor.controller_ops = &intel_spi_controller_ops;
 
 	ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
 	if (ret) {
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
similarity index 100%
rename from drivers/mtd/spi-nor/intel-spi.h
rename to drivers/mtd/spi-nor/controllers/intel-spi.h
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
similarity index 94%
rename from drivers/mtd/spi-nor/nxp-spifi.c
rename to drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 4a87158..5703e83 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ * SPI NOR driver for NXP SPI Flash Interface (SPIFI)
  *
  * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
  *
@@ -123,7 +123,8 @@
 	return ret;
 }
 
-static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+			      size_t len)
 {
 	struct nxp_spifi *spifi = nor->priv;
 	u32 cmd;
@@ -145,7 +146,8 @@
 	return nxp_spifi_wait_for_cmd(spifi);
 }
 
-static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
+			       size_t len)
 {
 	struct nxp_spifi *spifi = nor->priv;
 	u32 cmd;
@@ -263,9 +265,18 @@
 static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
 {
 	u8 id[SPI_NOR_MAX_ID_LEN];
-	nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+	nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+				      SPI_NOR_MAX_ID_LEN);
 }
 
+static const struct spi_nor_controller_ops nxp_spifi_controller_ops = {
+	.read_reg  = nxp_spifi_read_reg,
+	.write_reg = nxp_spifi_write_reg,
+	.read  = nxp_spifi_read,
+	.write = nxp_spifi_write,
+	.erase = nxp_spifi_erase,
+};
+
 static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
 				 struct device_node *np)
 {
@@ -332,11 +343,7 @@
 	spifi->nor.dev   = spifi->dev;
 	spi_nor_set_flash_node(&spifi->nor, np);
 	spifi->nor.priv  = spifi;
-	spifi->nor.read  = nxp_spifi_read;
-	spifi->nor.write = nxp_spifi_write;
-	spifi->nor.erase = nxp_spifi_erase;
-	spifi->nor.read_reg  = nxp_spifi_read_reg;
-	spifi->nor.write_reg = nxp_spifi_write_reg;
+	spifi->nor.controller_ops = &nxp_spifi_controller_ops;
 
 	/*
 	 * The first read on a hard reset isn't reliable so do a
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
new file mode 100644
index 0000000..2b26a87
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.c
@@ -0,0 +1,3505 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task_stack.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
+
+#define SPI_NOR_MAX_ADDR_WIDTH	4
+
+/**
+ * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
+ *                           transfer
+ * @nor:        pointer to 'struct spi_nor'
+ * @op:         pointer to 'struct spi_mem_op' template for transfer
+ *
+ * If we have to use the bounce buffer, the data field in @op will be updated.
+ *
+ * Return: true if the bounce buffer is needed, false if not
+ */
+static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
+{
+	/* op->data.buf.in occupies the same memory as op->data.buf.out */
+	if (object_is_on_stack(op->data.buf.in) ||
+	    !virt_addr_valid(op->data.buf.in)) {
+		if (op->data.nbytes > nor->bouncebuf_size)
+			op->data.nbytes = nor->bouncebuf_size;
+		op->data.buf.in = nor->bouncebuf;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * spi_nor_spimem_exec_op() - execute a memory operation
+ * @nor:        pointer to 'struct spi_nor'
+ * @op:         pointer to 'struct spi_mem_op' template for transfer
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
+{
+	int error;
+
+	error = spi_mem_adjust_op_size(nor->spimem, op);
+	if (error)
+		return error;
+
+	return spi_mem_exec_op(nor->spimem, op);
+}
+
+/**
+ * spi_nor_spimem_read_data() - read data from flash's memory region via
+ *                              spi-mem
+ * @nor:        pointer to 'struct spi_nor'
+ * @from:       offset to read from
+ * @len:        number of bytes to read
+ * @buf:        pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
+					size_t len, u8 *buf)
+{
+	struct spi_mem_op op =
+		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+			   SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+			   SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+			   SPI_MEM_OP_DATA_IN(len, buf, 1));
+	bool usebouncebuf;
+	ssize_t nbytes;
+	int error;
+
+	/* get transfer protocols. */
+	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+	op.dummy.buswidth = op.addr.buswidth;
+	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+	/* convert the dummy cycles to the number of bytes */
+	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+	usebouncebuf = spi_nor_spimem_bounce(nor, &op);
+
+	if (nor->dirmap.rdesc) {
+		nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
+					     op.data.nbytes, op.data.buf.in);
+	} else {
+		error = spi_nor_spimem_exec_op(nor, &op);
+		if (error)
+			return error;
+		nbytes = op.data.nbytes;
+	}
+
+	if (usebouncebuf && nbytes > 0)
+		memcpy(buf, op.data.buf.in, nbytes);
+
+	return nbytes;
+}
+
+/**
+ * spi_nor_read_data() - read data from flash memory
+ * @nor:        pointer to 'struct spi_nor'
+ * @from:       offset to read from
+ * @len:        number of bytes to read
+ * @buf:        pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
+{
+	if (nor->spimem)
+		return spi_nor_spimem_read_data(nor, from, len, buf);
+
+	return nor->controller_ops->read(nor, from, len, buf);
+}
+
+/**
+ * spi_nor_spimem_write_data() - write data to flash memory via
+ *                               spi-mem
+ * @nor:        pointer to 'struct spi_nor'
+ * @to:         offset to write to
+ * @len:        number of bytes to write
+ * @buf:        pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
+					 size_t len, const u8 *buf)
+{
+	struct spi_mem_op op =
+		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+			   SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+			   SPI_MEM_OP_NO_DUMMY,
+			   SPI_MEM_OP_DATA_OUT(len, buf, 1));
+	ssize_t nbytes;
+	int error;
+
+	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+		op.addr.nbytes = 0;
+
+	if (spi_nor_spimem_bounce(nor, &op))
+		memcpy(nor->bouncebuf, buf, op.data.nbytes);
+
+	if (nor->dirmap.wdesc) {
+		nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
+					      op.data.nbytes, op.data.buf.out);
+	} else {
+		error = spi_nor_spimem_exec_op(nor, &op);
+		if (error)
+			return error;
+		nbytes = op.data.nbytes;
+	}
+
+	return nbytes;
+}
+
+/**
+ * spi_nor_write_data() - write data to flash memory
+ * @nor:        pointer to 'struct spi_nor'
+ * @to:         offset to write to
+ * @len:        number of bytes to write
+ * @buf:        pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+			   const u8 *buf)
+{
+	if (nor->spimem)
+		return spi_nor_spimem_write_data(nor, to, len, buf);
+
+	return nor->controller_ops->write(nor, to, len, buf);
+}
+
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_enable(struct spi_nor *nor)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_NO_DATA);
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+						     NULL, 0);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_disable(struct spi_nor *nor)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_NO_DATA);
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+						     NULL, 0);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @sr:		pointer to a DMA-able buffer where the value of the
+ *              Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+						    sr, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor:	pointer to 'struct spi_nor'
+ * @fsr:	pointer to a DMA-able buffer where the value of the
+ *              Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_IN(1, fsr, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+						    fsr, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor:	pointer to 'struct spi_nor'
+ * @cr:		pointer to a DMA-able buffer where the value of the
+ *              Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_IN(1, cr, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d reading CR\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
+ *		address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
+						  SPINOR_OP_EN4B :
+						  SPINOR_OP_EX4B,
+						  1),
+				  SPI_MEM_OP_NO_ADDR,
+				  SPI_MEM_OP_NO_DUMMY,
+				  SPI_MEM_OP_NO_DATA);
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor,
+						     enable ? SPINOR_OP_EN4B :
+							      SPINOR_OP_EX4B,
+						     NULL, 0);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+	return ret;
+}
+
+/**
+ * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
+ * flashes.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
+ *		address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+	int ret;
+
+	nor->bouncebuf[0] = enable << 7;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+						     nor->bouncebuf, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @ear:	value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
+{
+	int ret;
+
+	nor->bouncebuf[0] = ear;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+						     nor->bouncebuf, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @sr:		pointer to a DMA-able buffer where the value of the
+ *              Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+						    sr, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
+ * the flash is ready for new commands.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spi_nor_xsr_ready(struct spi_nor *nor)
+{
+	int ret;
+
+	ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	return !!(nor->bouncebuf[0] & XSR_RDY);
+}
+
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor:	pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_NO_DATA);
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+						     NULL, 0);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d clearing SR\n", ret);
+}
+
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spi_nor_sr_ready(struct spi_nor *nor)
+{
+	int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+	if (ret)
+		return ret;
+
+	if (nor->flags & SNOR_F_USE_CLSR &&
+	    nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+		if (nor->bouncebuf[0] & SR_E_ERR)
+			dev_err(nor->dev, "Erase Error occurred\n");
+		else
+			dev_err(nor->dev, "Programming Error occurred\n");
+
+		spi_nor_clear_sr(nor);
+
+		/*
+		 * WEL bit remains set to one when an erase or page program
+		 * error occurs. Issue a Write Disable command to protect
+		 * against inadvertent writes that can possibly corrupt the
+		 * contents of the memory.
+		 */
+		ret = spi_nor_write_disable(nor);
+		if (ret)
+			return ret;
+
+		return -EIO;
+	}
+
+	return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor:	pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_NO_DATA);
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+						     NULL, 0);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
+}
+
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+	int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
+
+	if (ret)
+		return ret;
+
+	if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+		if (nor->bouncebuf[0] & FSR_E_ERR)
+			dev_err(nor->dev, "Erase operation failed.\n");
+		else
+			dev_err(nor->dev, "Program operation failed.\n");
+
+		if (nor->bouncebuf[0] & FSR_PT_ERR)
+			dev_err(nor->dev,
+			"Attempted to modify a protected sector.\n");
+
+		spi_nor_clear_fsr(nor);
+
+		/*
+		 * WEL bit remains set to one when an erase or page program
+		 * error occurs. Issue a Write Disable command to protect
+		 * against inadvertent writes that can possibly corrupt the
+		 * contents of the memory.
+		 */
+		ret = spi_nor_write_disable(nor);
+		if (ret)
+			return ret;
+
+		return -EIO;
+	}
+
+	return !!(nor->bouncebuf[0] & FSR_READY);
+}
+
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
+ */
+static int spi_nor_ready(struct spi_nor *nor)
+{
+	int sr, fsr;
+
+	if (nor->flags & SNOR_F_READY_XSR_RDY)
+		sr = spi_nor_xsr_ready(nor);
+	else
+		sr = spi_nor_sr_ready(nor);
+	if (sr < 0)
+		return sr;
+	fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+	if (fsr < 0)
+		return fsr;
+	return sr && fsr;
+}
+
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor:		pointer to "struct spi_nor".
+ * @timeout_jiffies:	jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+						unsigned long timeout_jiffies)
+{
+	unsigned long deadline;
+	int timeout = 0, ret;
+
+	deadline = jiffies + timeout_jiffies;
+
+	while (!timeout) {
+		if (time_after_eq(jiffies, deadline))
+			timeout = 1;
+
+		ret = spi_nor_ready(nor);
+		if (ret < 0)
+			return ret;
+		if (ret)
+			return 0;
+
+		cond_resched();
+	}
+
+	dev_dbg(nor->dev, "flash operation timed out\n");
+
+	return -ETIMEDOUT;
+}
+
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor:	pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+	return spi_nor_wait_till_ready_with_timeout(nor,
+						    DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @sr:		pointer to DMA-able buffer to write to the Status Register.
+ * @len:	number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+	int ret;
+
+	ret = spi_nor_write_enable(nor);
+	if (ret)
+		return ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+						     sr, len);
+	}
+
+	if (ret) {
+		dev_dbg(nor->dev, "error %d writing SR\n", ret);
+		return ret;
+	}
+
+	return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor:	pointer to a 'struct spi_nor'.
+ * @sr1:	byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+	int ret;
+
+	nor->bouncebuf[0] = sr1;
+
+	ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+	if (ret)
+		return ret;
+
+	ret = spi_nor_read_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	if (nor->bouncebuf[0] != sr1) {
+		dev_dbg(nor->dev, "SR1: read back test failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor:	pointer to a 'struct spi_nor'.
+ * @sr1:	byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+	int ret;
+	u8 *sr_cr = nor->bouncebuf;
+	u8 cr_written;
+
+	/* Make sure we don't overwrite the contents of Status Register 2. */
+	if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+		ret = spi_nor_read_cr(nor, &sr_cr[1]);
+		if (ret)
+			return ret;
+	} else if (nor->params->quad_enable) {
+		/*
+		 * If the Status Register 2 Read command (35h) is not
+		 * supported, we should at least be sure we don't
+		 * change the value of the SR2 Quad Enable bit.
+		 *
+		 * We can safely assume that when the Quad Enable method is
+		 * set, the value of the QE bit is one, as a consequence of the
+		 * nor->params->quad_enable() call.
+		 *
+		 * We can safely assume that the Quad Enable bit is present in
+		 * the Status Register 2 at BIT(1). According to the JESD216
+		 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+		 * Write Status (01h) command is available just for the cases
+		 * in which the QE bit is described in SR2 at BIT(1).
+		 */
+		sr_cr[1] = SR2_QUAD_EN_BIT1;
+	} else {
+		sr_cr[1] = 0;
+	}
+
+	sr_cr[0] = sr1;
+
+	ret = spi_nor_write_sr(nor, sr_cr, 2);
+	if (ret)
+		return ret;
+
+	if (nor->flags & SNOR_F_NO_READ_CR)
+		return 0;
+
+	cr_written = sr_cr[1];
+
+	ret = spi_nor_read_cr(nor, &sr_cr[1]);
+	if (ret)
+		return ret;
+
+	if (cr_written != sr_cr[1]) {
+		dev_dbg(nor->dev, "CR: read back test failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor:	pointer to a 'struct spi_nor'.
+ * @cr:		byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+	int ret;
+	u8 *sr_cr = nor->bouncebuf;
+	u8 sr_written;
+
+	/* Keep the current value of the Status Register 1. */
+	ret = spi_nor_read_sr(nor, sr_cr);
+	if (ret)
+		return ret;
+
+	sr_cr[1] = cr;
+
+	ret = spi_nor_write_sr(nor, sr_cr, 2);
+	if (ret)
+		return ret;
+
+	sr_written = sr_cr[0];
+
+	ret = spi_nor_read_sr(nor, sr_cr);
+	if (ret)
+		return ret;
+
+	if (sr_written != sr_cr[0]) {
+		dev_dbg(nor->dev, "SR: Read back test failed\n");
+		return -EIO;
+	}
+
+	if (nor->flags & SNOR_F_NO_READ_CR)
+		return 0;
+
+	ret = spi_nor_read_cr(nor, &sr_cr[1]);
+	if (ret)
+		return ret;
+
+	if (cr != sr_cr[1]) {
+		dev_dbg(nor->dev, "CR: read back test failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor:	pointer to a 'struct spi_nor'.
+ * @sr1:	byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+	if (nor->flags & SNOR_F_HAS_16BIT_SR)
+		return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+	return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @sr2:	pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+	int ret;
+
+	ret = spi_nor_write_enable(nor);
+	if (ret)
+		return ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+						     sr2, 1);
+	}
+
+	if (ret) {
+		dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+		return ret;
+	}
+
+	return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @sr2:	pointer to DMA-able buffer where the value of the
+ *		Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+						    sr2, 1);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+	return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor:	pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_chip(struct spi_nor *nor)
+{
+	int ret;
+
+	dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_NO_DATA);
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+						     NULL, 0);
+	}
+
+	if (ret)
+		dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+	return ret;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; i++)
+		if (table[i][0] == opcode)
+			return table[i][1];
+
+	/* No conversion found, keep input op code. */
+	return opcode;
+}
+
+u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+	static const u8 spi_nor_3to4_read[][2] = {
+		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
+		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
+		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
+		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
+		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
+		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
+		{ SPINOR_OP_READ_1_1_8,	SPINOR_OP_READ_1_1_8_4B },
+		{ SPINOR_OP_READ_1_8_8,	SPINOR_OP_READ_1_8_8_4B },
+
+		{ SPINOR_OP_READ_1_1_1_DTR,	SPINOR_OP_READ_1_1_1_DTR_4B },
+		{ SPINOR_OP_READ_1_2_2_DTR,	SPINOR_OP_READ_1_2_2_DTR_4B },
+		{ SPINOR_OP_READ_1_4_4_DTR,	SPINOR_OP_READ_1_4_4_DTR_4B },
+	};
+
+	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+				      ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+	static const u8 spi_nor_3to4_program[][2] = {
+		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
+		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
+		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
+		{ SPINOR_OP_PP_1_1_8,	SPINOR_OP_PP_1_1_8_4B },
+		{ SPINOR_OP_PP_1_8_8,	SPINOR_OP_PP_1_8_8_4B },
+	};
+
+	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+				      ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+	static const u8 spi_nor_3to4_erase[][2] = {
+		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
+		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
+		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
+	};
+
+	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+				      ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+	return !!nor->params->erase_map.uniform_erase_type;
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+	if (!spi_nor_has_uniform_erase(nor)) {
+		struct spi_nor_erase_map *map = &nor->params->erase_map;
+		struct spi_nor_erase_type *erase;
+		int i;
+
+		for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+			erase = &map->erase_type[i];
+			erase->opcode =
+				spi_nor_convert_3to4_erase(erase->opcode);
+		}
+	}
+}
+
+int spi_nor_lock_and_prep(struct spi_nor *nor)
+{
+	int ret = 0;
+
+	mutex_lock(&nor->lock);
+
+	if (nor->controller_ops &&  nor->controller_ops->prepare) {
+		ret = nor->controller_ops->prepare(nor);
+		if (ret) {
+			mutex_unlock(&nor->lock);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+void spi_nor_unlock_and_unprep(struct spi_nor *nor)
+{
+	if (nor->controller_ops && nor->controller_ops->unprepare)
+		nor->controller_ops->unprepare(nor);
+	mutex_unlock(&nor->lock);
+}
+
+static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
+{
+	if (!nor->params->convert_addr)
+		return addr;
+
+	return nor->params->convert_addr(nor, addr);
+}
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+	int i;
+
+	addr = spi_nor_convert_addr(nor, addr);
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
+				   SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_NO_DATA);
+
+		return spi_mem_exec_op(nor->spimem, &op);
+	} else if (nor->controller_ops->erase) {
+		return nor->controller_ops->erase(nor, addr);
+	}
+
+	/*
+	 * Default implementation, if driver doesn't have a specialized HW
+	 * control
+	 */
+	for (i = nor->addr_width - 1; i >= 0; i--) {
+		nor->bouncebuf[i] = addr & 0xff;
+		addr >>= 8;
+	}
+
+	return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+					      nor->bouncebuf, nor->addr_width);
+}
+
+/**
+ * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
+ * @erase:	pointer to a structure that describes a SPI NOR erase type
+ * @dividend:	dividend value
+ * @remainder:	pointer to u32 remainder (will be updated)
+ *
+ * Return: the result of the division
+ */
+static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
+				     u64 dividend, u32 *remainder)
+{
+	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+	*remainder = (u32)dividend & erase->size_mask;
+	return dividend >> erase->size_shift;
+}
+
+/**
+ * spi_nor_find_best_erase_type() - find the best erase type for the given
+ *				    offset in the serial flash memory and the
+ *				    number of bytes to erase. The region in
+ *				    which the address fits is expected to be
+ *				    provided.
+ * @map:	the erase map of the SPI NOR
+ * @region:	pointer to a structure that describes a SPI NOR erase region
+ * @addr:	offset in the serial flash memory
+ * @len:	number of bytes to erase
+ *
+ * Return: a pointer to the best fitted erase type, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+			     const struct spi_nor_erase_region *region,
+			     u64 addr, u32 len)
+{
+	const struct spi_nor_erase_type *erase;
+	u32 rem;
+	int i;
+	u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+	/*
+	 * Erase types are ordered by size, with the smallest erase type at
+	 * index 0.
+	 */
+	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+		/* Does the erase region support the tested erase type? */
+		if (!(erase_mask & BIT(i)))
+			continue;
+
+		erase = &map->erase_type[i];
+
+		/* Alignment is not mandatory for overlaid regions */
+		if (region->offset & SNOR_OVERLAID_REGION &&
+		    region->size <= len)
+			return erase;
+
+		/* Don't erase more than what the user has asked for. */
+		if (erase->size > len)
+			continue;
+
+		spi_nor_div_by_erase_size(erase, addr, &rem);
+		if (rem)
+			continue;
+		else
+			return erase;
+	}
+
+	return NULL;
+}
+
+static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+	return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+	return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+/**
+ * spi_nor_region_next() - get the next spi nor region
+ * @region:	pointer to a structure that describes a SPI NOR erase region
+ *
+ * Return: the next spi nor region or NULL if last region.
+ */
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region)
+{
+	if (spi_nor_region_is_last(region))
+		return NULL;
+	region++;
+	return region;
+}
+
+/**
+ * spi_nor_find_erase_region() - find the region of the serial flash memory in
+ *				 which the offset fits
+ * @map:	the erase map of the SPI NOR
+ * @addr:	offset in the serial flash memory
+ *
+ * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
+ *	   otherwise.
+ */
+static struct spi_nor_erase_region *
+spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
+{
+	struct spi_nor_erase_region *region = map->regions;
+	u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+	u64 region_end = region_start + region->size;
+
+	while (addr < region_start || addr >= region_end) {
+		region = spi_nor_region_next(region);
+		if (!region)
+			return ERR_PTR(-EINVAL);
+
+		region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+		region_end = region_start + region->size;
+	}
+
+	return region;
+}
+
+/**
+ * spi_nor_init_erase_cmd() - initialize an erase command
+ * @region:	pointer to a structure that describes a SPI NOR erase region
+ * @erase:	pointer to a structure that describes a SPI NOR erase type
+ *
+ * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
+ *	   otherwise.
+ */
+static struct spi_nor_erase_command *
+spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
+		       const struct spi_nor_erase_type *erase)
+{
+	struct spi_nor_erase_command *cmd;
+
+	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&cmd->list);
+	cmd->opcode = erase->opcode;
+	cmd->count = 1;
+
+	if (region->offset & SNOR_OVERLAID_REGION)
+		cmd->size = region->size;
+	else
+		cmd->size = erase->size;
+
+	return cmd;
+}
+
+/**
+ * spi_nor_destroy_erase_cmd_list() - destroy erase command list
+ * @erase_list:	list of erase commands
+ */
+static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
+{
+	struct spi_nor_erase_command *cmd, *next;
+
+	list_for_each_entry_safe(cmd, next, erase_list, list) {
+		list_del(&cmd->list);
+		kfree(cmd);
+	}
+}
+
+/**
+ * spi_nor_init_erase_cmd_list() - initialize erase command list
+ * @nor:	pointer to a 'struct spi_nor'
+ * @erase_list:	list of erase commands to be executed once we validate that the
+ *		erase can be performed
+ * @addr:	offset in the serial flash memory
+ * @len:	number of bytes to erase
+ *
+ * Builds the list of best fitted erase commands and verifies if the erase can
+ * be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+				       struct list_head *erase_list,
+				       u64 addr, u32 len)
+{
+	const struct spi_nor_erase_map *map = &nor->params->erase_map;
+	const struct spi_nor_erase_type *erase, *prev_erase = NULL;
+	struct spi_nor_erase_region *region;
+	struct spi_nor_erase_command *cmd = NULL;
+	u64 region_end;
+	int ret = -EINVAL;
+
+	region = spi_nor_find_erase_region(map, addr);
+	if (IS_ERR(region))
+		return PTR_ERR(region);
+
+	region_end = spi_nor_region_end(region);
+
+	while (len) {
+		erase = spi_nor_find_best_erase_type(map, region, addr, len);
+		if (!erase)
+			goto destroy_erase_cmd_list;
+
+		if (prev_erase != erase ||
+		    erase->size != cmd->size ||
+		    region->offset & SNOR_OVERLAID_REGION) {
+			cmd = spi_nor_init_erase_cmd(region, erase);
+			if (IS_ERR(cmd)) {
+				ret = PTR_ERR(cmd);
+				goto destroy_erase_cmd_list;
+			}
+
+			list_add_tail(&cmd->list, erase_list);
+		} else {
+			cmd->count++;
+		}
+
+		addr += cmd->size;
+		len -= cmd->size;
+
+		if (len && addr >= region_end) {
+			region = spi_nor_region_next(region);
+			if (!region)
+				goto destroy_erase_cmd_list;
+			region_end = spi_nor_region_end(region);
+		}
+
+		prev_erase = erase;
+	}
+
+	return 0;
+
+destroy_erase_cmd_list:
+	spi_nor_destroy_erase_cmd_list(erase_list);
+	return ret;
+}
+
+/**
+ * spi_nor_erase_multi_sectors() - perform a non-uniform erase
+ * @nor:	pointer to a 'struct spi_nor'
+ * @addr:	offset in the serial flash memory
+ * @len:	number of bytes to erase
+ *
+ * Build a list of best fitted erase commands and execute it once we validate
+ * that the erase can be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
+{
+	LIST_HEAD(erase_list);
+	struct spi_nor_erase_command *cmd, *next;
+	int ret;
+
+	ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
+	if (ret)
+		return ret;
+
+	list_for_each_entry_safe(cmd, next, &erase_list, list) {
+		nor->erase_opcode = cmd->opcode;
+		while (cmd->count) {
+			ret = spi_nor_write_enable(nor);
+			if (ret)
+				goto destroy_erase_cmd_list;
+
+			ret = spi_nor_erase_sector(nor, addr);
+			if (ret)
+				goto destroy_erase_cmd_list;
+
+			addr += cmd->size;
+			cmd->count--;
+
+			ret = spi_nor_wait_till_ready(nor);
+			if (ret)
+				goto destroy_erase_cmd_list;
+		}
+		list_del(&cmd->list);
+		kfree(cmd);
+	}
+
+	return 0;
+
+destroy_erase_cmd_list:
+	spi_nor_destroy_erase_cmd_list(&erase_list);
+	return ret;
+}
+
+/*
+ * Erase an address range on the nor chip.  The address range may extend
+ * one or more erase sectors.  Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	u32 addr, len;
+	uint32_t rem;
+	int ret;
+
+	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+			(long long)instr->len);
+
+	if (spi_nor_has_uniform_erase(nor)) {
+		div_u64_rem(instr->len, mtd->erasesize, &rem);
+		if (rem)
+			return -EINVAL;
+	}
+
+	addr = instr->addr;
+	len = instr->len;
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	/* whole-chip erase? */
+	if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+		unsigned long timeout;
+
+		ret = spi_nor_write_enable(nor);
+		if (ret)
+			goto erase_err;
+
+		ret = spi_nor_erase_chip(nor);
+		if (ret)
+			goto erase_err;
+
+		/*
+		 * Scale the timeout linearly with the size of the flash, with
+		 * a minimum calibrated to an old 2MB flash. We could try to
+		 * pull these from CFI/SFDP, but these values should be good
+		 * enough for now.
+		 */
+		timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+			      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+			      (unsigned long)(mtd->size / SZ_2M));
+		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+		if (ret)
+			goto erase_err;
+
+	/* REVISIT in some cases we could speed up erasing large regions
+	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
+	 * to use "small sector erase", but that's not always optimal.
+	 */
+
+	/* "sector"-at-a-time erase */
+	} else if (spi_nor_has_uniform_erase(nor)) {
+		while (len) {
+			ret = spi_nor_write_enable(nor);
+			if (ret)
+				goto erase_err;
+
+			ret = spi_nor_erase_sector(nor, addr);
+			if (ret)
+				goto erase_err;
+
+			addr += mtd->erasesize;
+			len -= mtd->erasesize;
+
+			ret = spi_nor_wait_till_ready(nor);
+			if (ret)
+				goto erase_err;
+		}
+
+	/* erase multiple sectors */
+	} else {
+		ret = spi_nor_erase_multi_sectors(nor, addr, len);
+		if (ret)
+			goto erase_err;
+	}
+
+	ret = spi_nor_write_disable(nor);
+
+erase_err:
+	spi_nor_unlock_and_unprep(nor);
+
+	return ret;
+}
+
+static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
+{
+	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+
+	if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
+		return mask | SR_BP3_BIT6;
+
+	if (nor->flags & SNOR_F_HAS_4BIT_BP)
+		return mask | SR_BP3;
+
+	return mask;
+}
+
+static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
+{
+	if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+		return SR_TB_BIT6;
+	else
+		return SR_TB_BIT5;
+}
+
+static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
+{
+	unsigned int bp_slots, bp_slots_needed;
+	u8 mask = spi_nor_get_sr_bp_mask(nor);
+
+	/* Reserved one for "protect none" and one for "protect all". */
+	bp_slots = (1 << hweight8(mask)) - 2;
+	bp_slots_needed = ilog2(nor->info->n_sectors);
+
+	if (bp_slots_needed > bp_slots)
+		return nor->info->sector_size <<
+			(bp_slots_needed - bp_slots);
+	else
+		return nor->info->sector_size;
+}
+
+static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+					uint64_t *len)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	u64 min_prot_len;
+	u8 mask = spi_nor_get_sr_bp_mask(nor);
+	u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+	u8 bp, val = sr & mask;
+
+	if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
+		val = (val & ~SR_BP3_BIT6) | SR_BP3;
+
+	bp = val >> SR_BP_SHIFT;
+
+	if (!bp) {
+		/* No protection */
+		*ofs = 0;
+		*len = 0;
+		return;
+	}
+
+	min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+	*len = min_prot_len << (bp - 1);
+
+	if (*len > mtd->size)
+		*len = mtd->size;
+
+	if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
+		*ofs = 0;
+	else
+		*ofs = mtd->size - *len;
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
+					uint64_t len, u8 sr, bool locked)
+{
+	loff_t lock_offs;
+	uint64_t lock_len;
+
+	if (!len)
+		return 1;
+
+	spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
+
+	if (locked)
+		/* Requested range is a sub-range of locked range */
+		return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+	else
+		/* Requested range does not overlap with locked range */
+		return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+				u8 sr)
+{
+	return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+				  u8 sr)
+{
+	return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
+ * register
+ * (SR). Does not support these features found in newer SR bitfields:
+ *   - SEC: sector/block protect - only handle SEC=0 (block protect)
+ *   - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ *   - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
+ *  --------------------------------------------------------------------------
+ *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
+ *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
+ *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
+ *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
+ *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
+ *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
+ *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
+ *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
+ *  ------|-------|-------|-------|-------|---------------|-------------------
+ *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
+ *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
+ *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
+ *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
+ *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
+ *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	u64 min_prot_len;
+	int ret, status_old, status_new;
+	u8 mask = spi_nor_get_sr_bp_mask(nor);
+	u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+	u8 pow, val;
+	loff_t lock_len;
+	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+	bool use_top;
+
+	ret = spi_nor_read_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	status_old = nor->bouncebuf[0];
+
+	/* If nothing in our range is unlocked, we don't need to do anything */
+	if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
+		return 0;
+
+	/* If anything below us is unlocked, we can't use 'bottom' protection */
+	if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
+		can_be_bottom = false;
+
+	/* If anything above us is unlocked, we can't use 'top' protection */
+	if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+				  status_old))
+		can_be_top = false;
+
+	if (!can_be_bottom && !can_be_top)
+		return -EINVAL;
+
+	/* Prefer top, if both are valid */
+	use_top = can_be_top;
+
+	/* lock_len: length of region that should end up locked */
+	if (use_top)
+		lock_len = mtd->size - ofs;
+	else
+		lock_len = ofs + len;
+
+	if (lock_len == mtd->size) {
+		val = mask;
+	} else {
+		min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+		pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+		val = pow << SR_BP_SHIFT;
+
+		if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+			val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+		if (val & ~mask)
+			return -EINVAL;
+
+		/* Don't "lock" with no region! */
+		if (!(val & mask))
+			return -EINVAL;
+	}
+
+	status_new = (status_old & ~mask & ~tb_mask) | val;
+
+	/* Disallow further writes if WP pin is asserted */
+	status_new |= SR_SRWD;
+
+	if (!use_top)
+		status_new |= tb_mask;
+
+	/* Don't bother if they're the same */
+	if (status_new == status_old)
+		return 0;
+
+	/* Only modify protection if it will not unlock other areas */
+	if ((status_new & mask) < (status_old & mask))
+		return -EINVAL;
+
+	return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Unlock a region of the flash. See spi_nor_sr_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	u64 min_prot_len;
+	int ret, status_old, status_new;
+	u8 mask = spi_nor_get_sr_bp_mask(nor);
+	u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+	u8 pow, val;
+	loff_t lock_len;
+	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+	bool use_top;
+
+	ret = spi_nor_read_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	status_old = nor->bouncebuf[0];
+
+	/* If nothing in our range is locked, we don't need to do anything */
+	if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
+		return 0;
+
+	/* If anything below us is locked, we can't use 'top' protection */
+	if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
+		can_be_top = false;
+
+	/* If anything above us is locked, we can't use 'bottom' protection */
+	if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+				    status_old))
+		can_be_bottom = false;
+
+	if (!can_be_bottom && !can_be_top)
+		return -EINVAL;
+
+	/* Prefer top, if both are valid */
+	use_top = can_be_top;
+
+	/* lock_len: length of region that should remain locked */
+	if (use_top)
+		lock_len = mtd->size - (ofs + len);
+	else
+		lock_len = ofs;
+
+	if (lock_len == 0) {
+		val = 0; /* fully unlocked */
+	} else {
+		min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+		pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+		val = pow << SR_BP_SHIFT;
+
+		if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+			val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+		/* Some power-of-two sizes are not supported */
+		if (val & ~mask)
+			return -EINVAL;
+	}
+
+	status_new = (status_old & ~mask & ~tb_mask) | val;
+
+	/* Don't protect status register if we're fully unlocked */
+	if (lock_len == 0)
+		status_new &= ~SR_SRWD;
+
+	if (!use_top)
+		status_new |= tb_mask;
+
+	/* Don't bother if they're the same */
+	if (status_new == status_old)
+		return 0;
+
+	/* Only modify protection if it will not lock other areas */
+	if ((status_new & mask) > (status_old & mask))
+		return -EINVAL;
+
+	return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
+ * for more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	int ret;
+
+	ret = spi_nor_read_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
+}
+
+static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
+	.lock = spi_nor_sr_lock,
+	.unlock = spi_nor_sr_unlock,
+	.is_locked = spi_nor_sr_is_locked,
+};
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	ret = nor->params->locking_ops->lock(nor, ofs, len);
+
+	spi_nor_unlock_and_unprep(nor);
+	return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	ret = nor->params->locking_ops->unlock(nor, ofs, len);
+
+	spi_nor_unlock_and_unprep(nor);
+	return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	ret = nor->params->locking_ops->is_locked(nor, ofs, len);
+
+	spi_nor_unlock_and_unprep(nor);
+	return ret;
+}
+
+/**
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
+{
+	int ret;
+
+	ret = spi_nor_read_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+		return 0;
+
+	nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+
+	return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor:       pointer to a 'struct spi_nor'.
+ *
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
+{
+	int ret;
+
+	if (nor->flags & SNOR_F_NO_READ_CR)
+		return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
+
+	ret = spi_nor_read_cr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+		return 0;
+
+	nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
+	return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+	u8 *sr2 = nor->bouncebuf;
+	int ret;
+	u8 sr2_written;
+
+	/* Check current Quad Enable bit value. */
+	ret = spi_nor_read_sr2(nor, sr2);
+	if (ret)
+		return ret;
+	if (*sr2 & SR2_QUAD_EN_BIT7)
+		return 0;
+
+	/* Update the Quad Enable bit. */
+	*sr2 |= SR2_QUAD_EN_BIT7;
+
+	ret = spi_nor_write_sr2(nor, sr2);
+	if (ret)
+		return ret;
+
+	sr2_written = *sr2;
+
+	/* Read back and check it. */
+	ret = spi_nor_read_sr2(nor, sr2);
+	if (ret)
+		return ret;
+
+	if (*sr2 != sr2_written) {
+		dev_dbg(nor->dev, "SR2: Read back test failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct spi_nor_manufacturer *manufacturers[] = {
+	&spi_nor_atmel,
+	&spi_nor_catalyst,
+	&spi_nor_eon,
+	&spi_nor_esmt,
+	&spi_nor_everspin,
+	&spi_nor_fujitsu,
+	&spi_nor_gigadevice,
+	&spi_nor_intel,
+	&spi_nor_issi,
+	&spi_nor_macronix,
+	&spi_nor_micron,
+	&spi_nor_st,
+	&spi_nor_spansion,
+	&spi_nor_sst,
+	&spi_nor_winbond,
+	&spi_nor_xilinx,
+	&spi_nor_xmc,
+};
+
+static const struct flash_info *
+spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
+			  const u8 *id)
+{
+	unsigned int i;
+
+	for (i = 0; i < nparts; i++) {
+		if (parts[i].id_len &&
+		    !memcmp(parts[i].id, id, parts[i].id_len))
+			return &parts[i];
+	}
+
+	return NULL;
+}
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+	const struct flash_info *info;
+	u8 *id = nor->bouncebuf;
+	unsigned int i;
+	int ret;
+
+	if (nor->spimem) {
+		struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+				   SPI_MEM_OP_NO_ADDR,
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
+
+		ret = spi_mem_exec_op(nor->spimem, &op);
+	} else {
+		ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+						    SPI_NOR_MAX_ID_LEN);
+	}
+	if (ret) {
+		dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+		info = spi_nor_search_part_by_id(manufacturers[i]->parts,
+						 manufacturers[i]->nparts,
+						 id);
+		if (info) {
+			nor->manufacturer = manufacturers[i];
+			return info;
+		}
+	}
+
+	dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+		SPI_NOR_MAX_ID_LEN, id);
+	return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+			size_t *retlen, u_char *buf)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	ssize_t ret;
+
+	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	while (len) {
+		loff_t addr = from;
+
+		addr = spi_nor_convert_addr(nor, addr);
+
+		ret = spi_nor_read_data(nor, addr, len, buf);
+		if (ret == 0) {
+			/* We shouldn't see 0-length reads */
+			ret = -EIO;
+			goto read_err;
+		}
+		if (ret < 0)
+			goto read_err;
+
+		WARN_ON(ret > len);
+		*retlen += ret;
+		buf += ret;
+		from += ret;
+		len -= ret;
+	}
+	ret = 0;
+
+read_err:
+	spi_nor_unlock_and_unprep(nor);
+	return ret;
+}
+
+/*
+ * Write an address range to the nor chip.  Data must be written in
+ * FLASH_PAGESIZE chunks.  The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+	size_t *retlen, const u_char *buf)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	size_t page_offset, page_remain, i;
+	ssize_t ret;
+
+	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < len; ) {
+		ssize_t written;
+		loff_t addr = to + i;
+
+		/*
+		 * If page_size is a power of two, the offset can be quickly
+		 * calculated with an AND operation. On the other cases we
+		 * need to do a modulus operation (more expensive).
+		 * Power of two numbers have only one bit set and we can use
+		 * the instruction hweight32 to detect if we need to do a
+		 * modulus (do_div()) or not.
+		 */
+		if (hweight32(nor->page_size) == 1) {
+			page_offset = addr & (nor->page_size - 1);
+		} else {
+			uint64_t aux = addr;
+
+			page_offset = do_div(aux, nor->page_size);
+		}
+		/* the size of data remaining on the first page */
+		page_remain = min_t(size_t,
+				    nor->page_size - page_offset, len - i);
+
+		addr = spi_nor_convert_addr(nor, addr);
+
+		ret = spi_nor_write_enable(nor);
+		if (ret)
+			goto write_err;
+
+		ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+		if (ret < 0)
+			goto write_err;
+		written = ret;
+
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto write_err;
+		*retlen += written;
+		i += written;
+	}
+
+write_err:
+	spi_nor_unlock_and_unprep(nor);
+	return ret;
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+	if (!nor->dev ||
+	    (!nor->spimem && !nor->controller_ops) ||
+	    (!nor->spimem && nor->controller_ops &&
+	    (!nor->controller_ops->read ||
+	     !nor->controller_ops->write ||
+	     !nor->controller_ops->read_reg ||
+	     !nor->controller_ops->write_reg))) {
+		pr_err("spi-nor: please fill all the necessary fields!\n");
+		return -EINVAL;
+	}
+
+	if (nor->spimem && nor->controller_ops) {
+		dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+			  u8 num_mode_clocks,
+			  u8 num_wait_states,
+			  u8 opcode,
+			  enum spi_nor_protocol proto)
+{
+	read->num_mode_clocks = num_mode_clocks;
+	read->num_wait_states = num_wait_states;
+	read->opcode = opcode;
+	read->proto = proto;
+}
+
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+			     enum spi_nor_protocol proto)
+{
+	pp->opcode = opcode;
+	pp->proto = proto;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; i++)
+		if (table[i][0] == (int)hwcaps)
+			return table[i][1];
+
+	return -EINVAL;
+}
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+	static const int hwcaps_read2cmd[][2] = {
+		{ SNOR_HWCAPS_READ,		SNOR_CMD_READ },
+		{ SNOR_HWCAPS_READ_FAST,	SNOR_CMD_READ_FAST },
+		{ SNOR_HWCAPS_READ_1_1_1_DTR,	SNOR_CMD_READ_1_1_1_DTR },
+		{ SNOR_HWCAPS_READ_1_1_2,	SNOR_CMD_READ_1_1_2 },
+		{ SNOR_HWCAPS_READ_1_2_2,	SNOR_CMD_READ_1_2_2 },
+		{ SNOR_HWCAPS_READ_2_2_2,	SNOR_CMD_READ_2_2_2 },
+		{ SNOR_HWCAPS_READ_1_2_2_DTR,	SNOR_CMD_READ_1_2_2_DTR },
+		{ SNOR_HWCAPS_READ_1_1_4,	SNOR_CMD_READ_1_1_4 },
+		{ SNOR_HWCAPS_READ_1_4_4,	SNOR_CMD_READ_1_4_4 },
+		{ SNOR_HWCAPS_READ_4_4_4,	SNOR_CMD_READ_4_4_4 },
+		{ SNOR_HWCAPS_READ_1_4_4_DTR,	SNOR_CMD_READ_1_4_4_DTR },
+		{ SNOR_HWCAPS_READ_1_1_8,	SNOR_CMD_READ_1_1_8 },
+		{ SNOR_HWCAPS_READ_1_8_8,	SNOR_CMD_READ_1_8_8 },
+		{ SNOR_HWCAPS_READ_8_8_8,	SNOR_CMD_READ_8_8_8 },
+		{ SNOR_HWCAPS_READ_1_8_8_DTR,	SNOR_CMD_READ_1_8_8_DTR },
+	};
+
+	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+				  ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+	static const int hwcaps_pp2cmd[][2] = {
+		{ SNOR_HWCAPS_PP,		SNOR_CMD_PP },
+		{ SNOR_HWCAPS_PP_1_1_4,		SNOR_CMD_PP_1_1_4 },
+		{ SNOR_HWCAPS_PP_1_4_4,		SNOR_CMD_PP_1_4_4 },
+		{ SNOR_HWCAPS_PP_4_4_4,		SNOR_CMD_PP_4_4_4 },
+		{ SNOR_HWCAPS_PP_1_1_8,		SNOR_CMD_PP_1_1_8 },
+		{ SNOR_HWCAPS_PP_1_8_8,		SNOR_CMD_PP_1_8_8 },
+		{ SNOR_HWCAPS_PP_8_8_8,		SNOR_CMD_PP_8_8_8 },
+	};
+
+	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+				  ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+/**
+ * spi_nor_spimem_check_op - check if the operation is supported
+ *                           by controller
+ *@nor:        pointer to a 'struct spi_nor'
+ *@op:         pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_op(struct spi_nor *nor,
+				   struct spi_mem_op *op)
+{
+	/*
+	 * First test with 4 address bytes. The opcode itself might
+	 * be a 3B addressing opcode but we don't care, because
+	 * SPI controller implementation should not check the opcode,
+	 * but just the sequence.
+	 */
+	op->addr.nbytes = 4;
+	if (!spi_mem_supports_op(nor->spimem, op)) {
+		if (nor->mtd.size > SZ_16M)
+			return -ENOTSUPP;
+
+		/* If flash size <= 16MB, 3 address bytes are sufficient */
+		op->addr.nbytes = 3;
+		if (!spi_mem_supports_op(nor->spimem, op))
+			return -ENOTSUPP;
+	}
+
+	return 0;
+}
+
+/**
+ * spi_nor_spimem_check_readop - check if the read op is supported
+ *                               by controller
+ *@nor:         pointer to a 'struct spi_nor'
+ *@read:        pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+				       const struct spi_nor_read_command *read)
+{
+	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
+					  SPI_MEM_OP_ADDR(3, 0, 1),
+					  SPI_MEM_OP_DUMMY(0, 1),
+					  SPI_MEM_OP_DATA_IN(0, NULL, 1));
+
+	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
+	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
+	op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
+	op.dummy.buswidth = op.addr.buswidth;
+	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
+			  op.dummy.buswidth / 8;
+
+	return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_check_pp - check if the page program op is supported
+ *                           by controller
+ *@nor:         pointer to a 'struct spi_nor'
+ *@pp:          pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_pp(struct spi_nor *nor,
+				   const struct spi_nor_pp_command *pp)
+{
+	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
+					  SPI_MEM_OP_ADDR(3, 0, 1),
+					  SPI_MEM_OP_NO_DUMMY,
+					  SPI_MEM_OP_DATA_OUT(0, NULL, 1));
+
+	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
+	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
+	op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
+
+	return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
+ *                                based on SPI controller capabilities
+ * @nor:        pointer to a 'struct spi_nor'
+ * @hwcaps:     pointer to resulting capabilities after adjusting
+ *              according to controller and flash's capability
+ */
+static void
+spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
+{
+	struct spi_nor_flash_parameter *params = nor->params;
+	unsigned int cap;
+
+	/* DTR modes are not supported yet, mask them all. */
+	*hwcaps &= ~SNOR_HWCAPS_DTR;
+
+	/* X-X-X modes are not supported yet, mask them all. */
+	*hwcaps &= ~SNOR_HWCAPS_X_X_X;
+
+	for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
+		int rdidx, ppidx;
+
+		if (!(*hwcaps & BIT(cap)))
+			continue;
+
+		rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
+		if (rdidx >= 0 &&
+		    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
+			*hwcaps &= ~BIT(cap);
+
+		ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
+		if (ppidx < 0)
+			continue;
+
+		if (spi_nor_spimem_check_pp(nor,
+					    &params->page_programs[ppidx]))
+			*hwcaps &= ~BIT(cap);
+	}
+}
+
+/**
+ * spi_nor_set_erase_type() - set a SPI NOR erase type
+ * @erase:	pointer to a structure that describes a SPI NOR erase type
+ * @size:	the size of the sector/block erased by the erase type
+ * @opcode:	the SPI command op code to erase the sector/block
+ */
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+			    u8 opcode)
+{
+	erase->size = size;
+	erase->opcode = opcode;
+	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+	erase->size_shift = ffs(erase->size) - 1;
+	erase->size_mask = (1 << erase->size_shift) - 1;
+}
+
+/**
+ * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+ * @map:		the erase map of the SPI NOR
+ * @erase_mask:		bitmask encoding erase types that can erase the entire
+ *			flash memory
+ * @flash_size:		the spi nor flash memory size
+ */
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+				    u8 erase_mask, u64 flash_size)
+{
+	/* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
+	map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
+				     SNOR_LAST_REGION;
+	map->uniform_region.size = flash_size;
+	map->regions = &map->uniform_region;
+	map->uniform_erase_type = erase_mask;
+}
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+			     const struct sfdp_parameter_header *bfpt_header,
+			     const struct sfdp_bfpt *bfpt,
+			     struct spi_nor_flash_parameter *params)
+{
+	int ret;
+
+	if (nor->manufacturer && nor->manufacturer->fixups &&
+	    nor->manufacturer->fixups->post_bfpt) {
+		ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
+							   bfpt, params);
+		if (ret)
+			return ret;
+	}
+
+	if (nor->info->fixups && nor->info->fixups->post_bfpt)
+		return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
+						    params);
+
+	return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+			       u32 shared_hwcaps)
+{
+	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+	const struct spi_nor_read_command *read;
+
+	if (best_match < 0)
+		return -EINVAL;
+
+	cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+	if (cmd < 0)
+		return -EINVAL;
+
+	read = &nor->params->reads[cmd];
+	nor->read_opcode = read->opcode;
+	nor->read_proto = read->proto;
+
+	/*
+	 * In the SPI NOR framework, we don't need to make the difference
+	 * between mode clock cycles and wait state clock cycles.
+	 * Indeed, the value of the mode clock cycles is used by a QSPI
+	 * flash memory to know whether it should enter or leave its 0-4-4
+	 * (Continuous Read / XIP) mode.
+	 * eXecution In Place is out of the scope of the mtd sub-system.
+	 * Hence we choose to merge both mode and wait state clock cycles
+	 * into the so called dummy clock cycles.
+	 */
+	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+	return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+			     u32 shared_hwcaps)
+{
+	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+	const struct spi_nor_pp_command *pp;
+
+	if (best_match < 0)
+		return -EINVAL;
+
+	cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+	if (cmd < 0)
+		return -EINVAL;
+
+	pp = &nor->params->page_programs[cmd];
+	nor->program_opcode = pp->opcode;
+	nor->write_proto = pp->proto;
+	return 0;
+}
+
+/**
+ * spi_nor_select_uniform_erase() - select optimum uniform erase type
+ * @map:		the erase map of the SPI NOR
+ * @wanted_size:	the erase type size to search for. Contains the value of
+ *			info->sector_size or of the "small sector" size in case
+ *			CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
+ *
+ * Once the optimum uniform sector erase command is found, disable all the
+ * other.
+ *
+ * Return: pointer to erase type on success, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
+			     const u32 wanted_size)
+{
+	const struct spi_nor_erase_type *tested_erase, *erase = NULL;
+	int i;
+	u8 uniform_erase_type = map->uniform_erase_type;
+
+	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+		if (!(uniform_erase_type & BIT(i)))
+			continue;
+
+		tested_erase = &map->erase_type[i];
+
+		/*
+		 * If the current erase size is the one, stop here:
+		 * we have found the right uniform Sector Erase command.
+		 */
+		if (tested_erase->size == wanted_size) {
+			erase = tested_erase;
+			break;
+		}
+
+		/*
+		 * Otherwise, the current erase size is still a valid canditate.
+		 * Select the biggest valid candidate.
+		 */
+		if (!erase && tested_erase->size)
+			erase = tested_erase;
+			/* keep iterating to find the wanted_size */
+	}
+
+	if (!erase)
+		return NULL;
+
+	/* Disable all other Sector Erase commands. */
+	map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
+	map->uniform_erase_type |= BIT(erase - map->erase_type);
+	return erase;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor)
+{
+	struct spi_nor_erase_map *map = &nor->params->erase_map;
+	const struct spi_nor_erase_type *erase = NULL;
+	struct mtd_info *mtd = &nor->mtd;
+	u32 wanted_size = nor->info->sector_size;
+	int i;
+
+	/*
+	 * The previous implementation handling Sector Erase commands assumed
+	 * that the SPI flash memory has an uniform layout then used only one
+	 * of the supported erase sizes for all Sector Erase commands.
+	 * So to be backward compatible, the new implementation also tries to
+	 * manage the SPI flash memory as uniform with a single erase sector
+	 * size, when possible.
+	 */
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+	/* prefer "small sector" erase if possible */
+	wanted_size = 4096u;
+#endif
+
+	if (spi_nor_has_uniform_erase(nor)) {
+		erase = spi_nor_select_uniform_erase(map, wanted_size);
+		if (!erase)
+			return -EINVAL;
+		nor->erase_opcode = erase->opcode;
+		mtd->erasesize = erase->size;
+		return 0;
+	}
+
+	/*
+	 * For non-uniform SPI flash memory, set mtd->erasesize to the
+	 * maximum erase sector size. No need to set nor->erase_opcode.
+	 */
+	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+		if (map->erase_type[i].size) {
+			erase = &map->erase_type[i];
+			break;
+		}
+	}
+
+	if (!erase)
+		return -EINVAL;
+
+	mtd->erasesize = erase->size;
+	return 0;
+}
+
+static int spi_nor_default_setup(struct spi_nor *nor,
+				 const struct spi_nor_hwcaps *hwcaps)
+{
+	struct spi_nor_flash_parameter *params = nor->params;
+	u32 ignored_mask, shared_mask;
+	int err;
+
+	/*
+	 * Keep only the hardware capabilities supported by both the SPI
+	 * controller and the SPI flash memory.
+	 */
+	shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+	if (nor->spimem) {
+		/*
+		 * When called from spi_nor_probe(), all caps are set and we
+		 * need to discard some of them based on what the SPI
+		 * controller actually supports (using spi_mem_supports_op()).
+		 */
+		spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
+	} else {
+		/*
+		 * SPI n-n-n protocols are not supported when the SPI
+		 * controller directly implements the spi_nor interface.
+		 * Yet another reason to switch to spi-mem.
+		 */
+		ignored_mask = SNOR_HWCAPS_X_X_X;
+		if (shared_mask & ignored_mask) {
+			dev_dbg(nor->dev,
+				"SPI n-n-n protocols are not supported.\n");
+			shared_mask &= ~ignored_mask;
+		}
+	}
+
+	/* Select the (Fast) Read command. */
+	err = spi_nor_select_read(nor, shared_mask);
+	if (err) {
+		dev_dbg(nor->dev,
+			"can't select read settings supported by both the SPI controller and memory.\n");
+		return err;
+	}
+
+	/* Select the Page Program command. */
+	err = spi_nor_select_pp(nor, shared_mask);
+	if (err) {
+		dev_dbg(nor->dev,
+			"can't select write settings supported by both the SPI controller and memory.\n");
+		return err;
+	}
+
+	/* Select the Sector Erase command. */
+	err = spi_nor_select_erase(nor);
+	if (err) {
+		dev_dbg(nor->dev,
+			"can't select erase settings supported by both the SPI controller and memory.\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+			 const struct spi_nor_hwcaps *hwcaps)
+{
+	if (!nor->params->setup)
+		return 0;
+
+	return nor->params->setup(nor, hwcaps);
+}
+
+/**
+ * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
+ * settings based on MFR register and ->default_init() hook.
+ * @nor:	pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
+{
+	if (nor->manufacturer && nor->manufacturer->fixups &&
+	    nor->manufacturer->fixups->default_init)
+		nor->manufacturer->fixups->default_init(nor);
+
+	if (nor->info->fixups && nor->info->fixups->default_init)
+		nor->info->fixups->default_init(nor);
+}
+
+/**
+ * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
+ * based on JESD216 SFDP standard.
+ * @nor:	pointer to a 'struct spi_nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params(struct spi_nor *nor)
+{
+	struct spi_nor_flash_parameter sfdp_params;
+
+	memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+	if (spi_nor_parse_sfdp(nor, nor->params)) {
+		memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+		nor->addr_width = 0;
+		nor->flags &= ~SNOR_F_4B_OPCODES;
+	}
+}
+
+/**
+ * spi_nor_info_init_params() - Initialize the flash's parameters and settings
+ * based on nor->info data.
+ * @nor:	pointer to a 'struct spi_nor'.
+ */
+static void spi_nor_info_init_params(struct spi_nor *nor)
+{
+	struct spi_nor_flash_parameter *params = nor->params;
+	struct spi_nor_erase_map *map = &params->erase_map;
+	const struct flash_info *info = nor->info;
+	struct device_node *np = spi_nor_get_flash_node(nor);
+	u8 i, erase_mask;
+
+	/* Initialize legacy flash parameters and settings. */
+	params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+	params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
+	params->setup = spi_nor_default_setup;
+	/* Default to 16-bit Write Status (01h) Command */
+	nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+	/* Set SPI NOR sizes. */
+	params->size = (u64)info->sector_size * info->n_sectors;
+	params->page_size = info->page_size;
+
+	if (!(info->flags & SPI_NOR_NO_FR)) {
+		/* Default to Fast Read for DT and non-DT platform devices. */
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+		/* Mask out Fast Read if not requested at DT instantiation. */
+		if (np && !of_property_read_bool(np, "m25p,fast-read"))
+			params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+	}
+
+	/* (Fast) Read settings. */
+	params->hwcaps.mask |= SNOR_HWCAPS_READ;
+	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+				  0, 0, SPINOR_OP_READ,
+				  SNOR_PROTO_1_1_1);
+
+	if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+					  0, 8, SPINOR_OP_READ_FAST,
+					  SNOR_PROTO_1_1_1);
+
+	if (info->flags & SPI_NOR_DUAL_READ) {
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+					  0, 8, SPINOR_OP_READ_1_1_2,
+					  SNOR_PROTO_1_1_2);
+	}
+
+	if (info->flags & SPI_NOR_QUAD_READ) {
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+					  0, 8, SPINOR_OP_READ_1_1_4,
+					  SNOR_PROTO_1_1_4);
+	}
+
+	if (info->flags & SPI_NOR_OCTAL_READ) {
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+					  0, 8, SPINOR_OP_READ_1_1_8,
+					  SNOR_PROTO_1_1_8);
+	}
+
+	/* Page Program settings. */
+	params->hwcaps.mask |= SNOR_HWCAPS_PP;
+	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+	/*
+	 * Sector Erase settings. Sort Erase Types in ascending order, with the
+	 * smallest erase size starting at BIT(0).
+	 */
+	erase_mask = 0;
+	i = 0;
+	if (info->flags & SECT_4K_PMC) {
+		erase_mask |= BIT(i);
+		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+				       SPINOR_OP_BE_4K_PMC);
+		i++;
+	} else if (info->flags & SECT_4K) {
+		erase_mask |= BIT(i);
+		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+				       SPINOR_OP_BE_4K);
+		i++;
+	}
+	erase_mask |= BIT(i);
+	spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+			       SPINOR_OP_SE);
+	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+}
+
+/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed (is also called for SPI NORs that do not
+ * support RDSFDP).
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Typically used to tweak various parameters that could not be extracted by
+ * other means (i.e. when information provided by the SFDP/flash_info tables
+ * are incomplete or wrong).
+ */
+static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+	if (nor->manufacturer && nor->manufacturer->fixups &&
+	    nor->manufacturer->fixups->post_sfdp)
+		nor->manufacturer->fixups->post_sfdp(nor);
+
+	if (nor->info->fixups && nor->info->fixups->post_sfdp)
+		nor->info->fixups->post_sfdp(nor);
+}
+
+/**
+ * spi_nor_late_init_params() - Late initialization of default flash parameters.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Used to set default flash parameters and settings when the ->default_init()
+ * hook or the SFDP parser let voids.
+ */
+static void spi_nor_late_init_params(struct spi_nor *nor)
+{
+	/*
+	 * NOR protection support. When locking_ops are not provided, we pick
+	 * the default ones.
+	 */
+	if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
+		nor->params->locking_ops = &spi_nor_sr_locking_ops;
+}
+
+/**
+ * spi_nor_init_params() - Initialize the flash's parameters and settings.
+ * @nor:	pointer to a 'struct spi_nor'.
+ *
+ * The flash parameters and settings are initialized based on a sequence of
+ * calls that are ordered by priority:
+ *
+ * 1/ Default flash parameters initialization. The initializations are done
+ *    based on nor->info data:
+ *		spi_nor_info_init_params()
+ *
+ * which can be overwritten by:
+ * 2/ Manufacturer flash parameters initialization. The initializations are
+ *    done based on MFR register, or when the decisions can not be done solely
+ *    based on MFR, by using specific flash_info tweeks, ->default_init():
+ *		spi_nor_manufacturer_init_params()
+ *
+ * which can be overwritten by:
+ * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
+ *    should be more accurate that the above.
+ *		spi_nor_sfdp_init_params()
+ *
+ *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
+ *    the flash parameters and settings immediately after parsing the Basic
+ *    Flash Parameter Table.
+ *
+ * which can be overwritten by:
+ * 4/ Post SFDP flash parameters initialization. Used to tweak various
+ *    parameters that could not be extracted by other means (i.e. when
+ *    information provided by the SFDP/flash_info tables are incomplete or
+ *    wrong).
+ *		spi_nor_post_sfdp_fixups()
+ *
+ * 5/ Late default flash parameters initialization, used when the
+ * ->default_init() hook or the SFDP parser do not set specific params.
+ *		spi_nor_late_init_params()
+ */
+static int spi_nor_init_params(struct spi_nor *nor)
+{
+	nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
+	if (!nor->params)
+		return -ENOMEM;
+
+	spi_nor_info_init_params(nor);
+
+	spi_nor_manufacturer_init_params(nor);
+
+	if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+	    !(nor->info->flags & SPI_NOR_SKIP_SFDP))
+		spi_nor_sfdp_init_params(nor);
+
+	spi_nor_post_sfdp_fixups(nor);
+
+	spi_nor_late_init_params(nor);
+
+	return 0;
+}
+
+/**
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
+ * @nor:                pointer to a 'struct spi_nor'
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_quad_enable(struct spi_nor *nor)
+{
+	if (!nor->params->quad_enable)
+		return 0;
+
+	if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+	      spi_nor_get_protocol_width(nor->write_proto) == 4))
+		return 0;
+
+	return nor->params->quad_enable(nor);
+}
+
+/**
+ * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array.
+ * @nor:	pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ *
+ * Unprotecting the entire flash array will fail for boards which are hardware
+ * write-protected. Thus any errors are ignored.
+ */
+static void spi_nor_try_unlock_all(struct spi_nor *nor)
+{
+	int ret;
+
+	if (!(nor->flags & SNOR_F_HAS_LOCK))
+		return;
+
+	ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+	if (ret)
+		dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+	int err;
+
+	err = spi_nor_quad_enable(nor);
+	if (err) {
+		dev_dbg(nor->dev, "quad mode not supported\n");
+		return err;
+	}
+
+	spi_nor_try_unlock_all(nor);
+
+	if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+		/*
+		 * If the RESET# pin isn't hooked up properly, or the system
+		 * otherwise doesn't perform a reset command in the boot
+		 * sequence, it's impossible to 100% protect against unexpected
+		 * reboots (e.g., crashes). Warn the user (or hopefully, system
+		 * designer) that this is bad.
+		 */
+		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+			  "enabling reset hack; may not recover from unexpected reboots\n");
+		nor->params->set_4byte_addr_mode(nor, true);
+	}
+
+	return 0;
+}
+
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	struct device *dev = nor->dev;
+	int ret;
+
+	/* re-initialize the nor chip */
+	ret = spi_nor_init(nor);
+	if (ret)
+		dev_err(dev, "resume() failed\n");
+}
+
+static int spi_nor_get_device(struct mtd_info *mtd)
+{
+	struct mtd_info *master = mtd_get_master(mtd);
+	struct spi_nor *nor = mtd_to_spi_nor(master);
+	struct device *dev;
+
+	if (nor->spimem)
+		dev = nor->spimem->spi->controller->dev.parent;
+	else
+		dev = nor->dev;
+
+	if (!try_module_get(dev->driver->owner))
+		return -ENODEV;
+
+	return 0;
+}
+
+static void spi_nor_put_device(struct mtd_info *mtd)
+{
+	struct mtd_info *master = mtd_get_master(mtd);
+	struct spi_nor *nor = mtd_to_spi_nor(master);
+	struct device *dev;
+
+	if (nor->spimem)
+		dev = nor->spimem->spi->controller->dev.parent;
+	else
+		dev = nor->dev;
+
+	module_put(dev->driver->owner);
+}
+
+void spi_nor_restore(struct spi_nor *nor)
+{
+	/* restore the addressing mode */
+	if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+	    nor->flags & SNOR_F_BROKEN_RESET)
+		nor->params->set_4byte_addr_mode(nor, false);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
+static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
+						 const char *name)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+		for (j = 0; j < manufacturers[i]->nparts; j++) {
+			if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+				nor->manufacturer = manufacturers[i];
+				return &manufacturers[i]->parts[j];
+			}
+		}
+	}
+
+	return NULL;
+}
+
+static int spi_nor_set_addr_width(struct spi_nor *nor)
+{
+	if (nor->addr_width) {
+		/* already configured from SFDP */
+	} else if (nor->info->addr_width) {
+		nor->addr_width = nor->info->addr_width;
+	} else {
+		nor->addr_width = 3;
+	}
+
+	if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
+		/* enable 4-byte addressing if the device exceeds 16MiB */
+		nor->addr_width = 4;
+	}
+
+	if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+		dev_dbg(nor->dev, "address width is too large: %u\n",
+			nor->addr_width);
+		return -EINVAL;
+	}
+
+	/* Set 4byte opcodes when possible. */
+	if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+	    !(nor->flags & SNOR_F_HAS_4BAIT))
+		spi_nor_set_4byte_opcodes(nor);
+
+	return 0;
+}
+
+static void spi_nor_debugfs_init(struct spi_nor *nor,
+				 const struct flash_info *info)
+{
+	struct mtd_info *mtd = &nor->mtd;
+
+	mtd->dbg.partname = info->name;
+	mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
+					 info->id_len, info->id);
+}
+
+static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
+						       const char *name)
+{
+	const struct flash_info *info = NULL;
+
+	if (name)
+		info = spi_nor_match_id(nor, name);
+	/* Try to auto-detect if chip name wasn't specified or not found */
+	if (!info)
+		info = spi_nor_read_id(nor);
+	if (IS_ERR_OR_NULL(info))
+		return ERR_PTR(-ENOENT);
+
+	/*
+	 * If caller has specified name of flash model that can normally be
+	 * detected using JEDEC, let's verify it.
+	 */
+	if (name && info->id_len) {
+		const struct flash_info *jinfo;
+
+		jinfo = spi_nor_read_id(nor);
+		if (IS_ERR(jinfo)) {
+			return jinfo;
+		} else if (jinfo != info) {
+			/*
+			 * JEDEC knows better, so overwrite platform ID. We
+			 * can't trust partitions any longer, but we'll let
+			 * mtd apply them anyway, since some partitions may be
+			 * marked read-only, and we don't want to lose that
+			 * information, even if it's not 100% accurate.
+			 */
+			dev_warn(nor->dev, "found %s, expected %s\n",
+				 jinfo->name, info->name);
+			info = jinfo;
+		}
+	}
+
+	return info;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+		 const struct spi_nor_hwcaps *hwcaps)
+{
+	const struct flash_info *info;
+	struct device *dev = nor->dev;
+	struct mtd_info *mtd = &nor->mtd;
+	struct device_node *np = spi_nor_get_flash_node(nor);
+	int ret;
+	int i;
+
+	ret = spi_nor_check(nor);
+	if (ret)
+		return ret;
+
+	/* Reset SPI protocol for all commands. */
+	nor->reg_proto = SNOR_PROTO_1_1_1;
+	nor->read_proto = SNOR_PROTO_1_1_1;
+	nor->write_proto = SNOR_PROTO_1_1_1;
+
+	/*
+	 * We need the bounce buffer early to read/write registers when going
+	 * through the spi-mem layer (buffers have to be DMA-able).
+	 * For spi-mem drivers, we'll reallocate a new buffer if
+	 * nor->page_size turns out to be greater than PAGE_SIZE (which
+	 * shouldn't happen before long since NOR pages are usually less
+	 * than 1KB) after spi_nor_scan() returns.
+	 */
+	nor->bouncebuf_size = PAGE_SIZE;
+	nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
+				      GFP_KERNEL);
+	if (!nor->bouncebuf)
+		return -ENOMEM;
+
+	info = spi_nor_get_flash_info(nor, name);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	nor->info = info;
+
+	spi_nor_debugfs_init(nor, info);
+
+	mutex_init(&nor->lock);
+
+	/*
+	 * Make sure the XSR_RDY flag is set before calling
+	 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+	 * with Atmel SPI NOR.
+	 */
+	if (info->flags & SPI_NOR_XSR_RDY)
+		nor->flags |=  SNOR_F_READY_XSR_RDY;
+
+	if (info->flags & SPI_NOR_HAS_LOCK)
+		nor->flags |= SNOR_F_HAS_LOCK;
+
+	mtd->_write = spi_nor_write;
+
+	/* Init flash parameters based on flash_info struct and SFDP */
+	ret = spi_nor_init_params(nor);
+	if (ret)
+		return ret;
+
+	if (!mtd->name)
+		mtd->name = dev_name(dev);
+	mtd->priv = nor;
+	mtd->type = MTD_NORFLASH;
+	mtd->writesize = 1;
+	mtd->flags = MTD_CAP_NORFLASH;
+	mtd->size = nor->params->size;
+	mtd->_erase = spi_nor_erase;
+	mtd->_read = spi_nor_read;
+	mtd->_resume = spi_nor_resume;
+	mtd->_get_device = spi_nor_get_device;
+	mtd->_put_device = spi_nor_put_device;
+
+	if (nor->params->locking_ops) {
+		mtd->_lock = spi_nor_lock;
+		mtd->_unlock = spi_nor_unlock;
+		mtd->_is_locked = spi_nor_is_locked;
+	}
+
+	if (info->flags & USE_FSR)
+		nor->flags |= SNOR_F_USE_FSR;
+	if (info->flags & SPI_NOR_HAS_TB) {
+		nor->flags |= SNOR_F_HAS_SR_TB;
+		if (info->flags & SPI_NOR_TB_SR_BIT6)
+			nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+	}
+
+	if (info->flags & NO_CHIP_ERASE)
+		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+	if (info->flags & USE_CLSR)
+		nor->flags |= SNOR_F_USE_CLSR;
+
+	if (info->flags & SPI_NOR_4BIT_BP) {
+		nor->flags |= SNOR_F_HAS_4BIT_BP;
+		if (info->flags & SPI_NOR_BP3_SR_BIT6)
+			nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+	}
+
+	if (info->flags & SPI_NOR_NO_ERASE)
+		mtd->flags |= MTD_NO_ERASE;
+
+	mtd->dev.parent = dev;
+	nor->page_size = nor->params->page_size;
+	mtd->writebufsize = nor->page_size;
+
+	if (of_property_read_bool(np, "broken-flash-reset"))
+		nor->flags |= SNOR_F_BROKEN_RESET;
+
+	/*
+	 * Configure the SPI memory:
+	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
+	 * - set the number of dummy cycles (mode cycles + wait states).
+	 * - set the SPI protocols for register and memory accesses.
+	 */
+	ret = spi_nor_setup(nor, hwcaps);
+	if (ret)
+		return ret;
+
+	if (info->flags & SPI_NOR_4B_OPCODES)
+		nor->flags |= SNOR_F_4B_OPCODES;
+
+	ret = spi_nor_set_addr_width(nor);
+	if (ret)
+		return ret;
+
+	/* Send all the required SPI flash commands to initialize device */
+	ret = spi_nor_init(nor);
+	if (ret)
+		return ret;
+
+	dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+			(long long)mtd->size >> 10);
+
+	dev_dbg(dev,
+		"mtd .name = %s, .size = 0x%llx (%lldMiB), "
+		".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+		mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+		mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+	if (mtd->numeraseregions)
+		for (i = 0; i < mtd->numeraseregions; i++)
+			dev_dbg(dev,
+				"mtd.eraseregions[%d] = { .offset = 0x%llx, "
+				".erasesize = 0x%.8x (%uKiB), "
+				".numblocks = %d }\n",
+				i, (long long)mtd->eraseregions[i].offset,
+				mtd->eraseregions[i].erasesize,
+				mtd->eraseregions[i].erasesize / 1024,
+				mtd->eraseregions[i].numblocks);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static int spi_nor_create_read_dirmap(struct spi_nor *nor)
+{
+	struct spi_mem_dirmap_info info = {
+		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+				      SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+				      SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+				      SPI_MEM_OP_DATA_IN(0, NULL, 1)),
+		.offset = 0,
+		.length = nor->mtd.size,
+	};
+	struct spi_mem_op *op = &info.op_tmpl;
+
+	/* get transfer protocols. */
+	op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+	op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+	op->dummy.buswidth = op->addr.buswidth;
+	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+	/* convert the dummy cycles to the number of bytes */
+	op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
+
+	nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+						       &info);
+	return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
+}
+
+static int spi_nor_create_write_dirmap(struct spi_nor *nor)
+{
+	struct spi_mem_dirmap_info info = {
+		.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+				      SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+				      SPI_MEM_OP_NO_DUMMY,
+				      SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
+		.offset = 0,
+		.length = nor->mtd.size,
+	};
+	struct spi_mem_op *op = &info.op_tmpl;
+
+	/* get transfer protocols. */
+	op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+	op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+	op->dummy.buswidth = op->addr.buswidth;
+	op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+		op->addr.nbytes = 0;
+
+	nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+						       &info);
+	return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
+}
+
+static int spi_nor_probe(struct spi_mem *spimem)
+{
+	struct spi_device *spi = spimem->spi;
+	struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+	struct spi_nor *nor;
+	/*
+	 * Enable all caps by default. The core will mask them after
+	 * checking what's really supported using spi_mem_supports_op().
+	 */
+	const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
+	char *flash_name;
+	int ret;
+
+	nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+	if (!nor)
+		return -ENOMEM;
+
+	nor->spimem = spimem;
+	nor->dev = &spi->dev;
+	spi_nor_set_flash_node(nor, spi->dev.of_node);
+
+	spi_mem_set_drvdata(spimem, nor);
+
+	if (data && data->name)
+		nor->mtd.name = data->name;
+
+	if (!nor->mtd.name)
+		nor->mtd.name = spi_mem_get_name(spimem);
+
+	/*
+	 * For some (historical?) reason many platforms provide two different
+	 * names in flash_platform_data: "name" and "type". Quite often name is
+	 * set to "m25p80" and then "type" provides a real chip name.
+	 * If that's the case, respect "type" and ignore a "name".
+	 */
+	if (data && data->type)
+		flash_name = data->type;
+	else if (!strcmp(spi->modalias, "spi-nor"))
+		flash_name = NULL; /* auto-detect */
+	else
+		flash_name = spi->modalias;
+
+	ret = spi_nor_scan(nor, flash_name, &hwcaps);
+	if (ret)
+		return ret;
+
+	/*
+	 * None of the existing parts have > 512B pages, but let's play safe
+	 * and add this logic so that if anyone ever adds support for such
+	 * a NOR we don't end up with buffer overflows.
+	 */
+	if (nor->page_size > PAGE_SIZE) {
+		nor->bouncebuf_size = nor->page_size;
+		devm_kfree(nor->dev, nor->bouncebuf);
+		nor->bouncebuf = devm_kmalloc(nor->dev,
+					      nor->bouncebuf_size,
+					      GFP_KERNEL);
+		if (!nor->bouncebuf)
+			return -ENOMEM;
+	}
+
+	ret = spi_nor_create_read_dirmap(nor);
+	if (ret)
+		return ret;
+
+	ret = spi_nor_create_write_dirmap(nor);
+	if (ret)
+		return ret;
+
+	return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
+				   data ? data->nr_parts : 0);
+}
+
+static int spi_nor_remove(struct spi_mem *spimem)
+{
+	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+	spi_nor_restore(nor);
+
+	/* Clean up MTD stuff. */
+	return mtd_device_unregister(&nor->mtd);
+}
+
+static void spi_nor_shutdown(struct spi_mem *spimem)
+{
+	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+	spi_nor_restore(nor);
+}
+
+/*
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
+ */
+static const struct spi_device_id spi_nor_dev_ids[] = {
+	/*
+	 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
+	 * hack around the fact that the SPI core does not provide uevent
+	 * matching for .of_match_table
+	 */
+	{"spi-nor"},
+
+	/*
+	 * Entries not used in DTs that should be safe to drop after replacing
+	 * them with "spi-nor" in platform data.
+	 */
+	{"s25sl064a"},	{"w25x16"},	{"m25p10"},	{"m25px64"},
+
+	/*
+	 * Entries that were used in DTs without "jedec,spi-nor" fallback and
+	 * should be kept for backward compatibility.
+	 */
+	{"at25df321a"},	{"at25df641"},	{"at26df081a"},
+	{"mx25l4005a"},	{"mx25l1606e"},	{"mx25l6405d"},	{"mx25l12805d"},
+	{"mx25l25635e"},{"mx66l51235l"},
+	{"n25q064"},	{"n25q128a11"},	{"n25q128a13"},	{"n25q512a"},
+	{"s25fl256s1"},	{"s25fl512s"},	{"s25sl12801"},	{"s25fl008k"},
+	{"s25fl064k"},
+	{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
+	{"m25p40"},	{"m25p80"},	{"m25p16"},	{"m25p32"},
+	{"m25p64"},	{"m25p128"},
+	{"w25x80"},	{"w25x32"},	{"w25q32"},	{"w25q32dw"},
+	{"w25q80bl"},	{"w25q128"},	{"w25q256"},
+
+	/* Flashes that can't be detected using JEDEC */
+	{"m25p05-nonjedec"},	{"m25p10-nonjedec"},	{"m25p20-nonjedec"},
+	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
+	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
+
+	/* Everspin MRAMs (non-JEDEC) */
+	{ "mr25h128" }, /* 128 Kib, 40 MHz */
+	{ "mr25h256" }, /* 256 Kib, 40 MHz */
+	{ "mr25h10" },  /*   1 Mib, 40 MHz */
+	{ "mr25h40" },  /*   4 Mib, 40 MHz */
+
+	{ },
+};
+MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
+
+static const struct of_device_id spi_nor_of_table[] = {
+	/*
+	 * Generic compatibility for SPI NOR that can be identified by the
+	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
+	 */
+	{ .compatible = "jedec,spi-nor" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_nor_of_table);
+
+/*
+ * REVISIT: many of these chips have deep power-down modes, which
+ * should clearly be entered on suspend() to minimize power use.
+ * And also when they're otherwise idle...
+ */
+static struct spi_mem_driver spi_nor_driver = {
+	.spidrv = {
+		.driver = {
+			.name = "spi-nor",
+			.of_match_table = spi_nor_of_table,
+		},
+		.id_table = spi_nor_dev_ids,
+	},
+	.probe = spi_nor_probe,
+	.remove = spi_nor_remove,
+	.shutdown = spi_nor_shutdown,
+};
+module_spi_mem_driver(spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
new file mode 100644
index 0000000..6f62ee8
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H
+#define __LINUX_MTD_SPI_NOR_INTERNAL_H
+
+#include "sfdp.h"
+
+#define SPI_NOR_MAX_ID_LEN	6
+
+enum spi_nor_option_flags {
+	SNOR_F_USE_FSR		= BIT(0),
+	SNOR_F_HAS_SR_TB	= BIT(1),
+	SNOR_F_NO_OP_CHIP_ERASE	= BIT(2),
+	SNOR_F_READY_XSR_RDY	= BIT(3),
+	SNOR_F_USE_CLSR		= BIT(4),
+	SNOR_F_BROKEN_RESET	= BIT(5),
+	SNOR_F_4B_OPCODES	= BIT(6),
+	SNOR_F_HAS_4BAIT	= BIT(7),
+	SNOR_F_HAS_LOCK		= BIT(8),
+	SNOR_F_HAS_16BIT_SR	= BIT(9),
+	SNOR_F_NO_READ_CR	= BIT(10),
+	SNOR_F_HAS_SR_TB_BIT6	= BIT(11),
+	SNOR_F_HAS_4BIT_BP      = BIT(12),
+	SNOR_F_HAS_SR_BP3_BIT6  = BIT(13),
+};
+
+struct spi_nor_read_command {
+	u8			num_mode_clocks;
+	u8			num_wait_states;
+	u8			opcode;
+	enum spi_nor_protocol	proto;
+};
+
+struct spi_nor_pp_command {
+	u8			opcode;
+	enum spi_nor_protocol	proto;
+};
+
+enum spi_nor_read_command_index {
+	SNOR_CMD_READ,
+	SNOR_CMD_READ_FAST,
+	SNOR_CMD_READ_1_1_1_DTR,
+
+	/* Dual SPI */
+	SNOR_CMD_READ_1_1_2,
+	SNOR_CMD_READ_1_2_2,
+	SNOR_CMD_READ_2_2_2,
+	SNOR_CMD_READ_1_2_2_DTR,
+
+	/* Quad SPI */
+	SNOR_CMD_READ_1_1_4,
+	SNOR_CMD_READ_1_4_4,
+	SNOR_CMD_READ_4_4_4,
+	SNOR_CMD_READ_1_4_4_DTR,
+
+	/* Octal SPI */
+	SNOR_CMD_READ_1_1_8,
+	SNOR_CMD_READ_1_8_8,
+	SNOR_CMD_READ_8_8_8,
+	SNOR_CMD_READ_1_8_8_DTR,
+
+	SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+	SNOR_CMD_PP,
+
+	/* Quad SPI */
+	SNOR_CMD_PP_1_1_4,
+	SNOR_CMD_PP_1_4_4,
+	SNOR_CMD_PP_4_4_4,
+
+	/* Octal SPI */
+	SNOR_CMD_PP_1_1_8,
+	SNOR_CMD_PP_1_8_8,
+	SNOR_CMD_PP_8_8_8,
+
+	SNOR_CMD_PP_MAX
+};
+
+/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size:		the size of the sector/block erased by the erase type.
+ *			JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift:		@size is a power of 2, the shift is stored in
+ *			@size_shift.
+ * @size_mask:		the size mask based on @size_shift.
+ * @opcode:		the SPI command op code to erase the sector/block.
+ * @idx:		Erase Type index as sorted in the Basic Flash Parameter
+ *			Table. It will be used to synchronize the supported
+ *			Erase Types with the ones identified in the SFDP
+ *			optional tables.
+ */
+struct spi_nor_erase_type {
+	u32	size;
+	u32	size_shift;
+	u32	size_mask;
+	u8	opcode;
+	u8	idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list:		for inclusion into the list of erase commands.
+ * @count:		how many times the same erase command should be
+ *			consecutively used.
+ * @size:		the size of the sector/block erased by the command.
+ * @opcode:		the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+	struct list_head	list;
+	u32			count;
+	u32			size;
+	u8			opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset:		the offset in the data array of erase region start.
+ *			LSB bits are used as a bitmask encoding flags to
+ *			determine if this region is overlaid, if this region is
+ *			the last in the SPI NOR flash memory and to indicate
+ *			all the supported erase commands inside this region.
+ *			The erase types are sorted in ascending order with the
+ *			smallest Erase Type size being at BIT(0).
+ * @size:		the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+	u64		offset;
+	u64		size;
+};
+
+#define SNOR_ERASE_TYPE_MAX	4
+#define SNOR_ERASE_TYPE_MASK	GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION	BIT(4)
+#define SNOR_OVERLAID_REGION	BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX	6
+#define SNOR_ERASE_FLAGS_MASK	GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions:		array of erase regions. The regions are consecutive in
+ *			address space. Walking through the regions is done
+ *			incrementally.
+ * @uniform_region:	a pre-allocated erase region for SPI NOR with a uniform
+ *			sector size (legacy implementation).
+ * @erase_type:		an array of erase types shared by all the regions.
+ *			The erase types are sorted in ascending order, with the
+ *			smallest Erase Type size being the first member in the
+ *			erase_type array.
+ * @uniform_erase_type:	bitmask encoding erase types that can erase the
+ *			entire memory. This member is completed at init by
+ *			uniform and non-uniform SPI NOR flash memories if they
+ *			support at least one erase type that can erase the
+ *			entire memory.
+ */
+struct spi_nor_erase_map {
+	struct spi_nor_erase_region	*regions;
+	struct spi_nor_erase_region	uniform_region;
+	struct spi_nor_erase_type	erase_type[SNOR_ERASE_TYPE_MAX];
+	u8				uniform_erase_type;
+};
+
+/**
+ * struct spi_nor_locking_ops - SPI NOR locking methods
+ * @lock:	lock a region of the SPI NOR.
+ * @unlock:	unlock a region of the SPI NOR.
+ * @is_locked:	check if a region of the SPI NOR is completely locked
+ */
+struct spi_nor_locking_ops {
+	int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+};
+
+/**
+ * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
+ * Includes legacy flash parameters and settings that can be overwritten
+ * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
+ * Serial Flash Discoverable Parameters (SFDP) tables.
+ *
+ * @size:		the flash memory density in bytes.
+ * @page_size:		the page size of the SPI NOR flash memory.
+ * @hwcaps:		describes the read and page program hardware
+ *			capabilities.
+ * @reads:		read capabilities ordered by priority: the higher index
+ *                      in the array, the higher priority.
+ * @page_programs:	page program capabilities ordered by priority: the
+ *                      higher index in the array, the higher priority.
+ * @erase_map:		the erase map parsed from the SFDP Sector Map Parameter
+ *                      Table.
+ * @quad_enable:	enables SPI NOR quad mode.
+ * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
+ * @convert_addr:	converts an absolute address into something the flash
+ *                      will understand. Particularly useful when pagesize is
+ *                      not a power-of-2.
+ * @setup:              configures the SPI NOR memory. Useful for SPI NOR
+ *                      flashes that have peculiarities to the SPI NOR standard
+ *                      e.g. different opcodes, specific address calculation,
+ *                      page size, etc.
+ * @locking_ops:	SPI NOR locking methods.
+ */
+struct spi_nor_flash_parameter {
+	u64				size;
+	u32				page_size;
+
+	struct spi_nor_hwcaps		hwcaps;
+	struct spi_nor_read_command	reads[SNOR_CMD_READ_MAX];
+	struct spi_nor_pp_command	page_programs[SNOR_CMD_PP_MAX];
+
+	struct spi_nor_erase_map        erase_map;
+
+	int (*quad_enable)(struct spi_nor *nor);
+	int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
+	u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
+	int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+
+	const struct spi_nor_locking_ops *locking_ops;
+};
+
+/**
+ * struct spi_nor_fixups - SPI NOR fixup hooks
+ * @default_init: called after default flash parameters init. Used to tweak
+ *                flash parameters when information provided by the flash_info
+ *                table is incomplete or wrong.
+ * @post_bfpt: called after the BFPT table has been parsed
+ * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
+ *             that do not support RDSFDP). Typically used to tweak various
+ *             parameters that could not be extracted by other means (i.e.
+ *             when information provided by the SFDP/flash_info tables are
+ *             incomplete or wrong).
+ *
+ * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
+ * table is broken or not available.
+ */
+struct spi_nor_fixups {
+	void (*default_init)(struct spi_nor *nor);
+	int (*post_bfpt)(struct spi_nor *nor,
+			 const struct sfdp_parameter_header *bfpt_header,
+			 const struct sfdp_bfpt *bfpt,
+			 struct spi_nor_flash_parameter *params);
+	void (*post_sfdp)(struct spi_nor *nor);
+};
+
+struct flash_info {
+	char		*name;
+
+	/*
+	 * This array stores the ID bytes.
+	 * The first three bytes are the JEDIC ID.
+	 * JEDEC ID zero means "no ID" (mostly older chips).
+	 */
+	u8		id[SPI_NOR_MAX_ID_LEN];
+	u8		id_len;
+
+	/* The size listed here is what works with SPINOR_OP_SE, which isn't
+	 * necessarily called a "sector" by the vendor.
+	 */
+	unsigned	sector_size;
+	u16		n_sectors;
+
+	u16		page_size;
+	u16		addr_width;
+
+	u32		flags;
+#define SECT_4K			BIT(0)	/* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE	BIT(1)	/* No erase command needed */
+#define SST_WRITE		BIT(2)	/* use SST byte programming */
+#define SPI_NOR_NO_FR		BIT(3)	/* Can't do fastread */
+#define SECT_4K_PMC		BIT(4)	/* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ	BIT(5)	/* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ	BIT(6)	/* Flash supports Quad Read */
+#define USE_FSR			BIT(7)	/* use flag status register */
+#define SPI_NOR_HAS_LOCK	BIT(8)	/* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB		BIT(9)	/*
+					 * Flash SR has Top/Bottom (TB) protect
+					 * bit. Must be used with
+					 * SPI_NOR_HAS_LOCK.
+					 */
+#define SPI_NOR_XSR_RDY		BIT(10)	/*
+					 * S3AN flashes have specific opcode to
+					 * read the status register.
+					 */
+#define SPI_NOR_4B_OPCODES	BIT(11)	/*
+					 * Use dedicated 4byte address op codes
+					 * to support memory size above 128Mib.
+					 */
+#define NO_CHIP_ERASE		BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP	BIT(13)	/* Skip parsing of SFDP tables */
+#define USE_CLSR		BIT(14)	/* use CLSR command */
+#define SPI_NOR_OCTAL_READ	BIT(15)	/* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6	BIT(16)	/*
+					 * Top/Bottom (TB) is bit 6 of
+					 * status register. Must be used with
+					 * SPI_NOR_HAS_TB.
+					 */
+#define SPI_NOR_4BIT_BP		BIT(17) /*
+					 * Flash SR has 4 bit fields (BP0-3)
+					 * for block protection.
+					 */
+#define SPI_NOR_BP3_SR_BIT6	BIT(18) /*
+					 * BP3 is bit 6 of status register.
+					 * Must be used with SPI_NOR_4BIT_BP.
+					 */
+
+	/* Part specific fixup hooks. */
+	const struct spi_nor_fixups *fixups;
+};
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
+		.id = {							\
+			((_jedec_id) >> 16) & 0xff,			\
+			((_jedec_id) >> 8) & 0xff,			\
+			(_jedec_id) & 0xff,				\
+			((_ext_id) >> 8) & 0xff,			\
+			(_ext_id) & 0xff,				\
+			},						\
+		.id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),	\
+		.sector_size = (_sector_size),				\
+		.n_sectors = (_n_sectors),				\
+		.page_size = 256,					\
+		.flags = (_flags),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
+		.id = {							\
+			((_jedec_id) >> 16) & 0xff,			\
+			((_jedec_id) >> 8) & 0xff,			\
+			(_jedec_id) & 0xff,				\
+			((_ext_id) >> 16) & 0xff,			\
+			((_ext_id) >> 8) & 0xff,			\
+			(_ext_id) & 0xff,				\
+			},						\
+		.id_len = 6,						\
+		.sector_size = (_sector_size),				\
+		.n_sectors = (_n_sectors),				\
+		.page_size = 256,					\
+		.flags = (_flags),
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)	\
+		.sector_size = (_sector_size),				\
+		.n_sectors = (_n_sectors),				\
+		.page_size = (_page_size),				\
+		.addr_width = (_addr_width),				\
+		.flags = (_flags),
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size)			\
+		.id = {							\
+			((_jedec_id) >> 16) & 0xff,			\
+			((_jedec_id) >> 8) & 0xff,			\
+			(_jedec_id) & 0xff				\
+			},						\
+		.id_len = 3,						\
+		.sector_size = (8*_page_size),				\
+		.n_sectors = (_n_sectors),				\
+		.page_size = _page_size,				\
+		.addr_width = 3,					\
+		.flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY,
+
+/**
+ * struct spi_nor_manufacturer - SPI NOR manufacturer object
+ * @name: manufacturer name
+ * @parts: array of parts supported by this manufacturer
+ * @nparts: number of entries in the parts array
+ * @fixups: hooks called at various points in time during spi_nor_scan()
+ */
+struct spi_nor_manufacturer {
+	const char *name;
+	const struct flash_info *parts;
+	unsigned int nparts;
+	const struct spi_nor_fixups *fixups;
+};
+
+/* Manufacturer drivers. */
+extern const struct spi_nor_manufacturer spi_nor_atmel;
+extern const struct spi_nor_manufacturer spi_nor_catalyst;
+extern const struct spi_nor_manufacturer spi_nor_eon;
+extern const struct spi_nor_manufacturer spi_nor_esmt;
+extern const struct spi_nor_manufacturer spi_nor_everspin;
+extern const struct spi_nor_manufacturer spi_nor_fujitsu;
+extern const struct spi_nor_manufacturer spi_nor_gigadevice;
+extern const struct spi_nor_manufacturer spi_nor_intel;
+extern const struct spi_nor_manufacturer spi_nor_issi;
+extern const struct spi_nor_manufacturer spi_nor_macronix;
+extern const struct spi_nor_manufacturer spi_nor_micron;
+extern const struct spi_nor_manufacturer spi_nor_st;
+extern const struct spi_nor_manufacturer spi_nor_spansion;
+extern const struct spi_nor_manufacturer spi_nor_sst;
+extern const struct spi_nor_manufacturer spi_nor_winbond;
+extern const struct spi_nor_manufacturer spi_nor_xilinx;
+extern const struct spi_nor_manufacturer spi_nor_xmc;
+
+int spi_nor_write_enable(struct spi_nor *nor);
+int spi_nor_write_disable(struct spi_nor *nor);
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+int spi_nor_lock_and_prep(struct spi_nor *nor);
+void spi_nor_unlock_and_unprep(struct spi_nor *nor);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1);
+
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+			  u8 *buf);
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+			   const u8 *buf);
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+			     enum spi_nor_protocol proto);
+
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+			    u8 opcode);
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region);
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+				    u8 erase_mask, u64 flash_size);
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+			     const struct sfdp_parameter_header *bfpt_header,
+			     const struct sfdp_bfpt *bfpt,
+			     struct spi_nor_flash_parameter *params);
+
+static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+	return mtd->priv;
+}
+
+#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
new file mode 100644
index 0000000..ddb8e36
--- /dev/null
+++ b/drivers/mtd/spi-nor/eon.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info eon_parts[] = {
+	/* EON -- en25xxx */
+	{ "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
+	{ "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
+	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
+	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
+	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
+	{ "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "en25qh16",   INFO(0x1c7015, 0, 64 * 1024,   32,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
+	{ "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
+	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
+	{ "en25s64",	INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_eon = {
+	.name = "eon",
+	.parts = eon_parts,
+	.nparts = ARRAY_SIZE(eon_parts),
+};
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
new file mode 100644
index 0000000..c931700
--- /dev/null
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info esmt_parts[] = {
+	/* ESMT */
+	{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
+			   SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
+			   SECT_4K | SPI_NOR_HAS_LOCK) },
+	{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
+			   SECT_4K | SPI_NOR_HAS_LOCK) },
+};
+
+const struct spi_nor_manufacturer spi_nor_esmt = {
+	.name = "esmt",
+	.parts = esmt_parts,
+	.nparts = ARRAY_SIZE(esmt_parts),
+};
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
new file mode 100644
index 0000000..04a177a
--- /dev/null
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info everspin_parts[] = {
+	/* Everspin */
+	{ "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3,
+				 SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_everspin = {
+	.name = "everspin",
+	.parts = everspin_parts,
+	.nparts = ARRAY_SIZE(everspin_parts),
+};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
new file mode 100644
index 0000000..e385d93
--- /dev/null
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info fujitsu_parts[] = {
+	/* Fujitsu */
+	{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+};
+
+const struct spi_nor_manufacturer spi_nor_fujitsu = {
+	.name = "fujitsu",
+	.parts = fujitsu_parts,
+	.nparts = ARRAY_SIZE(fujitsu_parts),
+};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
new file mode 100644
index 0000000..447d84b
--- /dev/null
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static void gd25q256_default_init(struct spi_nor *nor)
+{
+	/*
+	 * Some manufacturer like GigaDevice may use different
+	 * bit to set QE on different memories, so the MFR can't
+	 * indicate the quad_enable method for this case, we need
+	 * to set it in the default_init fixup hook.
+	 */
+	nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static struct spi_nor_fixups gd25q256_fixups = {
+	.default_init = gd25q256_default_init,
+};
+
+static const struct flash_info gigadevice_parts[] = {
+	{ "gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
+			  SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			  SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
+			  SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			  SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+			  SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			  SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
+			   SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+		.fixups = &gd25q256_fixups },
+};
+
+const struct spi_nor_manufacturer spi_nor_gigadevice = {
+	.name = "gigadevice",
+	.parts = gigadevice_parts,
+	.nparts = ARRAY_SIZE(gigadevice_parts),
+};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
new file mode 100644
index 0000000..d8196f1
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info intel_parts[] = {
+	/* Intel/Numonyx -- xxxs33b */
+	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
+	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
+	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
+};
+
+static void intel_default_init(struct spi_nor *nor)
+{
+	nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups intel_fixups = {
+	.default_init = intel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_intel = {
+	.name = "intel",
+	.parts = intel_parts,
+	.nparts = ARRAY_SIZE(intel_parts),
+	.fixups = &intel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
new file mode 100644
index 0000000..ffcb60e
--- /dev/null
+++ b/drivers/mtd/spi-nor/issi.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+is25lp256_post_bfpt_fixups(struct spi_nor *nor,
+			   const struct sfdp_parameter_header *bfpt_header,
+			   const struct sfdp_bfpt *bfpt,
+			   struct spi_nor_flash_parameter *params)
+{
+	/*
+	 * IS25LP256 supports 4B opcodes, but the BFPT advertises a
+	 * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
+	 * Overwrite the address width advertised by the BFPT.
+	 */
+	if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
+		BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
+		nor->addr_width = 4;
+
+	return 0;
+}
+
+static struct spi_nor_fixups is25lp256_fixups = {
+	.post_bfpt = is25lp256_post_bfpt_fixups,
+};
+
+static const struct flash_info issi_parts[] = {
+	/* ISSI */
+	{ "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
+	{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     SPI_NOR_4B_OPCODES)
+		.fixups = &is25lp256_fixups },
+	{ "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_4B_OPCODES)
+		.fixups = &is25lp256_fixups },
+
+	/* PMC */
+	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
+	{ "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
+	{ "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
+};
+
+static void issi_default_init(struct spi_nor *nor)
+{
+	nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static const struct spi_nor_fixups issi_fixups = {
+	.default_init = issi_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_issi = {
+	.name = "issi",
+	.parts = issi_parts,
+	.nparts = ARRAY_SIZE(issi_parts),
+	.fixups = &issi_fixups,
+};
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
new file mode 100644
index 0000000..662b212
--- /dev/null
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
+			    const struct sfdp_parameter_header *bfpt_header,
+			    const struct sfdp_bfpt *bfpt,
+			    struct spi_nor_flash_parameter *params)
+{
+	/*
+	 * MX25L25635F supports 4B opcodes but MX25L25635E does not.
+	 * Unfortunately, Macronix has re-used the same JEDEC ID for both
+	 * variants which prevents us from defining a new entry in the parts
+	 * table.
+	 * We need a way to differentiate MX25L25635E and MX25L25635F, and it
+	 * seems that the F version advertises support for Fast Read 4-4-4 in
+	 * its BFPT table.
+	 */
+	if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
+		nor->flags |= SNOR_F_4B_OPCODES;
+
+	return 0;
+}
+
+static struct spi_nor_fixups mx25l25635_fixups = {
+	.post_bfpt = mx25l25635_post_bfpt_fixups,
+};
+
+static const struct flash_info macronix_parts[] = {
+	/* Macronix */
+	{ "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
+	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
+	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
+	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
+	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
+	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
+	{ "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
+	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
+	{ "mx25u3235f",	 INFO(0xc22536, 0, 64 * 1024,  64,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8, SECT_4K) },
+	{ "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16, SECT_4K) },
+	{ "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K) },
+	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+	{ "mx25r1635f",  INFO(0xc22815, 0, 64 * 1024,  32,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mx25r3235f",  INFO(0xc22816, 0, 64 * 1024,  64,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+		.fixups = &mx25l25635_fixups },
+	{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
+			      SECT_4K | SPI_NOR_4B_OPCODES) },
+	{ "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      SPI_NOR_4B_OPCODES) },
+	{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048,
+			      SPI_NOR_QUAD_READ) },
+	{ "mx66u2g45g",	 INFO(0xc2253c, 0, 64 * 1024, 4096,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+};
+
+static void macronix_default_init(struct spi_nor *nor)
+{
+	nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+	nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups macronix_fixups = {
+	.default_init = macronix_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_macronix = {
+	.name = "macronix",
+	.parts = macronix_parts,
+	.nparts = ARRAY_SIZE(macronix_parts),
+	.fixups = &macronix_fixups,
+};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
new file mode 100644
index 0000000..ef36950
--- /dev/null
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info micron_parts[] = {
+	{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
+			       SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+			       SPI_NOR_4B_OPCODES) },
+	{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
+			    SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+			    SPI_NOR_4B_OPCODES) },
+};
+
+static const struct flash_info st_parts[] = {
+	{ "n25q016a",	 INFO(0x20bb15, 0, 64 * 1024,   32,
+			      SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q032",	 INFO(0x20ba16, 0, 64 * 1024,   64,
+			      SPI_NOR_QUAD_READ) },
+	{ "n25q032a",	 INFO(0x20bb16, 0, 64 * 1024,   64,
+			      SPI_NOR_QUAD_READ) },
+	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128,
+			      SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128,
+			      SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+			      SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+			      SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "mt25ql256a",  INFO6(0x20ba19, 0x104400, 64 * 1024,  512,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K |
+			      USE_FSR | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "mt25qu256a",  INFO6(0x20bb19, 0x104400, 64 * 1024,  512,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "mt25ql512a",  INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+			      SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+			      SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+	{ "mt25qu512a",  INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+			      SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+			      SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+			      SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+			      SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
+			      NO_CHIP_ERASE) },
+	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+			      NO_CHIP_ERASE) },
+	{ "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
+			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+			      NO_CHIP_ERASE) },
+	{ "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096,
+			      SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+
+	{ "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
+	{ "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
+	{ "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
+	{ "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
+	{ "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
+	{ "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
+	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
+	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
+	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
+
+	{ "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
+	{ "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
+	{ "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) },
+	{ "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) },
+	{ "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) },
+	{ "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) },
+	{ "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) },
+	{ "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) },
+	{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) },
+
+	{ "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) },
+	{ "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
+	{ "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
+
+	{ "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
+	{ "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
+	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
+
+	{ "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
+	{ "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
+	{ "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
+	{ "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
+	{ "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
+	{ "m25px80",    INFO(0x207114,  0, 64 * 1024, 16, 0) },
+};
+
+/**
+ * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron
+ * flashes.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
+ *		address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+	int ret;
+
+	ret = spi_nor_write_enable(nor);
+	if (ret)
+		return ret;
+
+	ret = spi_nor_set_4byte_addr_mode(nor, enable);
+	if (ret)
+		return ret;
+
+	return spi_nor_write_disable(nor);
+}
+
+static void micron_st_default_init(struct spi_nor *nor)
+{
+	nor->flags |= SNOR_F_HAS_LOCK;
+	nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+	nor->params->quad_enable = NULL;
+	nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups micron_st_fixups = {
+	.default_init = micron_st_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_micron = {
+	.name = "micron",
+	.parts = micron_parts,
+	.nparts = ARRAY_SIZE(micron_parts),
+	.fixups = &micron_st_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_st = {
+	.name = "st",
+	.parts = st_parts,
+	.nparts = ARRAY_SIZE(st_parts),
+	.fixups = &micron_st_fixups,
+};
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
deleted file mode 100644
index 34db01a..0000000
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ /dev/null
@@ -1,562 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Bayi Cheng <bayi.cheng@mediatek.com>
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/ioport.h>
-#include <linux/math64.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-
-#define MTK_NOR_CMD_REG			0x00
-#define MTK_NOR_CNT_REG			0x04
-#define MTK_NOR_RDSR_REG		0x08
-#define MTK_NOR_RDATA_REG		0x0c
-#define MTK_NOR_RADR0_REG		0x10
-#define MTK_NOR_RADR1_REG		0x14
-#define MTK_NOR_RADR2_REG		0x18
-#define MTK_NOR_WDATA_REG		0x1c
-#define MTK_NOR_PRGDATA0_REG		0x20
-#define MTK_NOR_PRGDATA1_REG		0x24
-#define MTK_NOR_PRGDATA2_REG		0x28
-#define MTK_NOR_PRGDATA3_REG		0x2c
-#define MTK_NOR_PRGDATA4_REG		0x30
-#define MTK_NOR_PRGDATA5_REG		0x34
-#define MTK_NOR_SHREG0_REG		0x38
-#define MTK_NOR_SHREG1_REG		0x3c
-#define MTK_NOR_SHREG2_REG		0x40
-#define MTK_NOR_SHREG3_REG		0x44
-#define MTK_NOR_SHREG4_REG		0x48
-#define MTK_NOR_SHREG5_REG		0x4c
-#define MTK_NOR_SHREG6_REG		0x50
-#define MTK_NOR_SHREG7_REG		0x54
-#define MTK_NOR_SHREG8_REG		0x58
-#define MTK_NOR_SHREG9_REG		0x5c
-#define MTK_NOR_CFG1_REG		0x60
-#define MTK_NOR_CFG2_REG		0x64
-#define MTK_NOR_CFG3_REG		0x68
-#define MTK_NOR_STATUS0_REG		0x70
-#define MTK_NOR_STATUS1_REG		0x74
-#define MTK_NOR_STATUS2_REG		0x78
-#define MTK_NOR_STATUS3_REG		0x7c
-#define MTK_NOR_FLHCFG_REG		0x84
-#define MTK_NOR_TIME_REG		0x94
-#define MTK_NOR_PP_DATA_REG		0x98
-#define MTK_NOR_PREBUF_STUS_REG		0x9c
-#define MTK_NOR_DELSEL0_REG		0xa0
-#define MTK_NOR_DELSEL1_REG		0xa4
-#define MTK_NOR_INTRSTUS_REG		0xa8
-#define MTK_NOR_INTREN_REG		0xac
-#define MTK_NOR_CHKSUM_CTL_REG		0xb8
-#define MTK_NOR_CHKSUM_REG		0xbc
-#define MTK_NOR_CMD2_REG		0xc0
-#define MTK_NOR_WRPROT_REG		0xc4
-#define MTK_NOR_RADR3_REG		0xc8
-#define MTK_NOR_DUAL_REG		0xcc
-#define MTK_NOR_DELSEL2_REG		0xd0
-#define MTK_NOR_DELSEL3_REG		0xd4
-#define MTK_NOR_DELSEL4_REG		0xd8
-
-/* commands for mtk nor controller */
-#define MTK_NOR_READ_CMD		0x0
-#define MTK_NOR_RDSR_CMD		0x2
-#define MTK_NOR_PRG_CMD			0x4
-#define MTK_NOR_WR_CMD			0x10
-#define MTK_NOR_PIO_WR_CMD		0x90
-#define MTK_NOR_WRSR_CMD		0x20
-#define MTK_NOR_PIO_READ_CMD		0x81
-#define MTK_NOR_WR_BUF_ENABLE		0x1
-#define MTK_NOR_WR_BUF_DISABLE		0x0
-#define MTK_NOR_ENABLE_SF_CMD		0x30
-#define MTK_NOR_DUAD_ADDR_EN		0x8
-#define MTK_NOR_QUAD_READ_EN		0x4
-#define MTK_NOR_DUAL_ADDR_EN		0x2
-#define MTK_NOR_DUAL_READ_EN		0x1
-#define MTK_NOR_DUAL_DISABLE		0x0
-#define MTK_NOR_FAST_READ		0x1
-
-#define SFLASH_WRBUF_SIZE		128
-
-/* Can shift up to 48 bits (6 bytes) of TX/RX */
-#define MTK_NOR_MAX_RX_TX_SHIFT		6
-/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
-#define MTK_NOR_MAX_SHIFT		7
-/* nor controller 4-byte address mode enable bit */
-#define MTK_NOR_4B_ADDR_EN		BIT(4)
-
-/* Helpers for accessing the program data / shift data registers */
-#define MTK_NOR_PRG_REG(n)		(MTK_NOR_PRGDATA0_REG + 4 * (n))
-#define MTK_NOR_SHREG(n)		(MTK_NOR_SHREG0_REG + 4 * (n))
-
-struct mtk_nor {
-	struct spi_nor nor;
-	struct device *dev;
-	void __iomem *base;	/* nor flash base address */
-	struct clk *spi_clk;
-	struct clk *nor_clk;
-};
-
-static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
-{
-	struct spi_nor *nor = &mtk_nor->nor;
-
-	switch (nor->read_proto) {
-	case SNOR_PROTO_1_1_1:
-		writeb(nor->read_opcode, mtk_nor->base +
-		       MTK_NOR_PRGDATA3_REG);
-		writeb(MTK_NOR_FAST_READ, mtk_nor->base +
-		       MTK_NOR_CFG1_REG);
-		break;
-	case SNOR_PROTO_1_1_2:
-		writeb(nor->read_opcode, mtk_nor->base +
-		       MTK_NOR_PRGDATA3_REG);
-		writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
-		       MTK_NOR_DUAL_REG);
-		break;
-	case SNOR_PROTO_1_1_4:
-		writeb(nor->read_opcode, mtk_nor->base +
-		       MTK_NOR_PRGDATA4_REG);
-		writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
-		       MTK_NOR_DUAL_REG);
-		break;
-	default:
-		writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
-		       MTK_NOR_DUAL_REG);
-		break;
-	}
-}
-
-static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
-{
-	int reg;
-	u8 val = cmdval & 0x1f;
-
-	writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
-	return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
-				  !(reg & val), 100, 10000);
-}
-
-static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
-			    u8 *tx, int txlen, u8 *rx, int rxlen)
-{
-	int len = 1 + txlen + rxlen;
-	int i, ret, idx;
-
-	if (len > MTK_NOR_MAX_SHIFT)
-		return -EINVAL;
-
-	writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
-
-	/* start at PRGDATA5, go down to PRGDATA0 */
-	idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
-
-	/* opcode */
-	writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
-	idx--;
-
-	/* program TX data */
-	for (i = 0; i < txlen; i++, idx--)
-		writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
-
-	/* clear out rest of TX registers */
-	while (idx >= 0) {
-		writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
-		idx--;
-	}
-
-	ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
-	if (ret)
-		return ret;
-
-	/* restart at first RX byte */
-	idx = rxlen - 1;
-
-	/* read out RX data */
-	for (i = 0; i < rxlen; i++, idx--)
-		rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
-
-	return 0;
-}
-
-/* Do a WRSR (Write Status Register) command */
-static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
-{
-	writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
-	writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
-	return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
-}
-
-static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
-{
-	u8 reg;
-
-	/* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
-	 * 0: pre-fetch buffer use for read
-	 * 1: pre-fetch buffer use for page program
-	 */
-	writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
-	return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
-				  0x01 == (reg & 0x01), 100, 10000);
-}
-
-static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
-{
-	u8 reg;
-
-	writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
-	return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
-				  MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
-				  10000);
-}
-
-static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
-{
-	u8 val;
-	struct spi_nor *nor = &mtk_nor->nor;
-
-	val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
-
-	switch (nor->addr_width) {
-	case 3:
-		val &= ~MTK_NOR_4B_ADDR_EN;
-		break;
-	case 4:
-		val |= MTK_NOR_4B_ADDR_EN;
-		break;
-	default:
-		dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
-			 nor->addr_width);
-		break;
-	}
-
-	writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
-}
-
-static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
-{
-	int i;
-
-	mtk_nor_set_addr_width(mtk_nor);
-
-	for (i = 0; i < 3; i++) {
-		writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
-		addr >>= 8;
-	}
-	/* Last register is non-contiguous */
-	writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
-}
-
-static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
-			    u_char *buffer)
-{
-	int i, ret;
-	int addr = (int)from;
-	u8 *buf = (u8 *)buffer;
-	struct mtk_nor *mtk_nor = nor->priv;
-
-	/* set mode for fast read mode ,dual mode or quad mode */
-	mtk_nor_set_read_mode(mtk_nor);
-	mtk_nor_set_addr(mtk_nor, addr);
-
-	for (i = 0; i < length; i++) {
-		ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
-		if (ret < 0)
-			return ret;
-		buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
-	}
-	return length;
-}
-
-static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
-				     int addr, int length, u8 *data)
-{
-	int i, ret;
-
-	mtk_nor_set_addr(mtk_nor, addr);
-
-	for (i = 0; i < length; i++) {
-		writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
-		ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
-		if (ret < 0)
-			return ret;
-	}
-	return 0;
-}
-
-static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
-				const u8 *buf)
-{
-	int i, bufidx, data;
-
-	mtk_nor_set_addr(mtk_nor, addr);
-
-	bufidx = 0;
-	for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
-		data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
-		       buf[bufidx + 1]<<8 | buf[bufidx];
-		bufidx += 4;
-		writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
-	}
-	return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
-}
-
-static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
-			     const u_char *buf)
-{
-	int ret;
-	struct mtk_nor *mtk_nor = nor->priv;
-	size_t i;
-
-	ret = mtk_nor_write_buffer_enable(mtk_nor);
-	if (ret < 0) {
-		dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
-		return ret;
-	}
-
-	for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
-		ret = mtk_nor_write_buffer(mtk_nor, to, buf);
-		if (ret < 0) {
-			dev_err(mtk_nor->dev, "write buffer failed!\n");
-			return ret;
-		}
-		to += SFLASH_WRBUF_SIZE;
-		buf += SFLASH_WRBUF_SIZE;
-	}
-	ret = mtk_nor_write_buffer_disable(mtk_nor);
-	if (ret < 0) {
-		dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
-		return ret;
-	}
-
-	if (i < len) {
-		ret = mtk_nor_write_single_byte(mtk_nor, to,
-						(int)(len - i), (u8 *)buf);
-		if (ret < 0) {
-			dev_err(mtk_nor->dev, "write single byte failed!\n");
-			return ret;
-		}
-	}
-
-	return len;
-}
-
-static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
-	int ret;
-	struct mtk_nor *mtk_nor = nor->priv;
-
-	switch (opcode) {
-	case SPINOR_OP_RDSR:
-		ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
-		if (ret < 0)
-			return ret;
-		if (len == 1)
-			*buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
-		else
-			dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
-		break;
-	default:
-		ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
-		break;
-	}
-	return ret;
-}
-
-static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
-			     int len)
-{
-	int ret;
-	struct mtk_nor *mtk_nor = nor->priv;
-
-	switch (opcode) {
-	case SPINOR_OP_WRSR:
-		/* We only handle 1 byte */
-		ret = mtk_nor_wr_sr(mtk_nor, *buf);
-		break;
-	default:
-		ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
-		if (ret)
-			dev_warn(mtk_nor->dev, "write reg failure!\n");
-		break;
-	}
-	return ret;
-}
-
-static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
-{
-	clk_disable_unprepare(mtk_nor->spi_clk);
-	clk_disable_unprepare(mtk_nor->nor_clk);
-}
-
-static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
-{
-	int ret;
-
-	ret = clk_prepare_enable(mtk_nor->spi_clk);
-	if (ret)
-		return ret;
-
-	ret = clk_prepare_enable(mtk_nor->nor_clk);
-	if (ret) {
-		clk_disable_unprepare(mtk_nor->spi_clk);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int mtk_nor_init(struct mtk_nor *mtk_nor,
-			struct device_node *flash_node)
-{
-	const struct spi_nor_hwcaps hwcaps = {
-		.mask = SNOR_HWCAPS_READ |
-			SNOR_HWCAPS_READ_FAST |
-			SNOR_HWCAPS_READ_1_1_2 |
-			SNOR_HWCAPS_PP,
-	};
-	int ret;
-	struct spi_nor *nor;
-
-	/* initialize controller to accept commands */
-	writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
-
-	nor = &mtk_nor->nor;
-	nor->dev = mtk_nor->dev;
-	nor->priv = mtk_nor;
-	spi_nor_set_flash_node(nor, flash_node);
-
-	/* fill the hooks to spi nor */
-	nor->read = mtk_nor_read;
-	nor->read_reg = mtk_nor_read_reg;
-	nor->write = mtk_nor_write;
-	nor->write_reg = mtk_nor_write_reg;
-	nor->mtd.name = "mtk_nor";
-	/* initialized with NULL */
-	ret = spi_nor_scan(nor, NULL, &hwcaps);
-	if (ret)
-		return ret;
-
-	return mtd_device_register(&nor->mtd, NULL, 0);
-}
-
-static int mtk_nor_drv_probe(struct platform_device *pdev)
-{
-	struct device_node *flash_np;
-	struct resource *res;
-	int ret;
-	struct mtk_nor *mtk_nor;
-
-	if (!pdev->dev.of_node) {
-		dev_err(&pdev->dev, "No DT found\n");
-		return -EINVAL;
-	}
-
-	mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
-	if (!mtk_nor)
-		return -ENOMEM;
-	platform_set_drvdata(pdev, mtk_nor);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(mtk_nor->base))
-		return PTR_ERR(mtk_nor->base);
-
-	mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
-	if (IS_ERR(mtk_nor->spi_clk))
-		return PTR_ERR(mtk_nor->spi_clk);
-
-	mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
-	if (IS_ERR(mtk_nor->nor_clk))
-		return PTR_ERR(mtk_nor->nor_clk);
-
-	mtk_nor->dev = &pdev->dev;
-
-	ret = mtk_nor_enable_clk(mtk_nor);
-	if (ret)
-		return ret;
-
-	/* only support one attached flash */
-	flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
-	if (!flash_np) {
-		dev_err(&pdev->dev, "no SPI flash device to configure\n");
-		ret = -ENODEV;
-		goto nor_free;
-	}
-	ret = mtk_nor_init(mtk_nor, flash_np);
-
-nor_free:
-	if (ret)
-		mtk_nor_disable_clk(mtk_nor);
-
-	return ret;
-}
-
-static int mtk_nor_drv_remove(struct platform_device *pdev)
-{
-	struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
-
-	mtk_nor_disable_clk(mtk_nor);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mtk_nor_suspend(struct device *dev)
-{
-	struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
-
-	mtk_nor_disable_clk(mtk_nor);
-
-	return 0;
-}
-
-static int mtk_nor_resume(struct device *dev)
-{
-	struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
-
-	return mtk_nor_enable_clk(mtk_nor);
-}
-
-static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
-	.suspend = mtk_nor_suspend,
-	.resume = mtk_nor_resume,
-};
-
-#define MTK_NOR_DEV_PM_OPS	(&mtk_nor_dev_pm_ops)
-#else
-#define MTK_NOR_DEV_PM_OPS	NULL
-#endif
-
-static const struct of_device_id mtk_nor_of_ids[] = {
-	{ .compatible = "mediatek,mt8173-nor"},
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
-
-static struct platform_driver mtk_nor_driver = {
-	.probe = mtk_nor_drv_probe,
-	.remove = mtk_nor_drv_remove,
-	.driver = {
-		.name = "mtk-nor",
-		.pm = MTK_NOR_DEV_PM_OPS,
-		.of_match_table = mtk_nor_of_ids,
-	},
-};
-
-module_platform_driver(mtk_nor_driver);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
new file mode 100644
index 0000000..08de2a2
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define SFDP_PARAM_HEADER_ID(p)	(((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+	(((p)->parameter_table_pointer[2] << 16) | \
+	 ((p)->parameter_table_pointer[1] <<  8) | \
+	 ((p)->parameter_table_pointer[0] <<  0))
+
+#define SFDP_BFPT_ID		0xff00	/* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID	0xff81	/* Sector Map Table */
+#define SFDP_4BAIT_ID		0xff84  /* 4-byte Address Instruction Table */
+
+#define SFDP_SIGNATURE		0x50444653U
+
+struct sfdp_header {
+	u32		signature; /* Ox50444653U <=> "SFDP" */
+	u8		minor;
+	u8		major;
+	u8		nph; /* 0-base number of parameter headers */
+	u8		unused;
+
+	/* Basic Flash Parameter Table. */
+	struct sfdp_parameter_header	bfpt_header;
+};
+
+/* Fast Read settings. */
+struct sfdp_bfpt_read {
+	/* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+	u32			hwcaps;
+
+	/*
+	 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+	 * whether the Fast Read x-y-z command is supported.
+	 */
+	u32			supported_dword;
+	u32			supported_bit;
+
+	/*
+	 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+	 * encodes the op code, the number of mode clocks and the number of wait
+	 * states to be used by Fast Read x-y-z command.
+	 */
+	u32			settings_dword;
+	u32			settings_shift;
+
+	/* The SPI protocol for this Fast Read x-y-z command. */
+	enum spi_nor_protocol	proto;
+};
+
+struct sfdp_bfpt_erase {
+	/*
+	 * The half-word at offset <shift> in DWORD <dwoard> encodes the
+	 * op code and erase sector size to be used by Sector Erase commands.
+	 */
+	u32			dword;
+	u32			shift;
+};
+
+#define SMPT_CMD_ADDRESS_LEN_MASK		GENMASK(23, 22)
+#define SMPT_CMD_ADDRESS_LEN_0			(0x0UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_3			(0x1UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_4			(0x2UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT	(0x3UL << 22)
+
+#define SMPT_CMD_READ_DUMMY_MASK		GENMASK(19, 16)
+#define SMPT_CMD_READ_DUMMY_SHIFT		16
+#define SMPT_CMD_READ_DUMMY(_cmd) \
+	(((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
+#define SMPT_CMD_READ_DUMMY_IS_VARIABLE		0xfUL
+
+#define SMPT_CMD_READ_DATA_MASK			GENMASK(31, 24)
+#define SMPT_CMD_READ_DATA_SHIFT		24
+#define SMPT_CMD_READ_DATA(_cmd) \
+	(((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
+
+#define SMPT_CMD_OPCODE_MASK			GENMASK(15, 8)
+#define SMPT_CMD_OPCODE_SHIFT			8
+#define SMPT_CMD_OPCODE(_cmd) \
+	(((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
+
+#define SMPT_MAP_REGION_COUNT_MASK		GENMASK(23, 16)
+#define SMPT_MAP_REGION_COUNT_SHIFT		16
+#define SMPT_MAP_REGION_COUNT(_header) \
+	((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
+	  SMPT_MAP_REGION_COUNT_SHIFT) + 1)
+
+#define SMPT_MAP_ID_MASK			GENMASK(15, 8)
+#define SMPT_MAP_ID_SHIFT			8
+#define SMPT_MAP_ID(_header) \
+	(((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
+
+#define SMPT_MAP_REGION_SIZE_MASK		GENMASK(31, 8)
+#define SMPT_MAP_REGION_SIZE_SHIFT		8
+#define SMPT_MAP_REGION_SIZE(_region) \
+	(((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
+	   SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
+
+#define SMPT_MAP_REGION_ERASE_TYPE_MASK		GENMASK(3, 0)
+#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
+	((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
+
+#define SMPT_DESC_TYPE_MAP			BIT(1)
+#define SMPT_DESC_END				BIT(0)
+
+#define SFDP_4BAIT_DWORD_MAX	2
+
+struct sfdp_4bait {
+	/* The hardware capability. */
+	u32		hwcaps;
+
+	/*
+	 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
+	 * the associated 4-byte address op code is supported.
+	 */
+	u32		supported_bit;
+};
+
+/**
+ * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
+ *			addr_width and read_dummy members of the struct spi_nor
+ *			should be previously
+ * set.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @addr:	offset in the serial flash memory
+ * @len:	number of bytes to read
+ * @buf:	buffer where the data is copied into (dma-safe memory)
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+{
+	ssize_t ret;
+
+	while (len) {
+		ret = spi_nor_read_data(nor, addr, len, buf);
+		if (ret < 0)
+			return ret;
+		if (!ret || ret > len)
+			return -EIO;
+
+		buf += ret;
+		addr += ret;
+		len -= ret;
+	}
+	return 0;
+}
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @addr:	offset in the SFDP area to start reading data from
+ * @len:	number of bytes to read
+ * @buf:	buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+			     size_t len, void *buf)
+{
+	u8 addr_width, read_opcode, read_dummy;
+	int ret;
+
+	read_opcode = nor->read_opcode;
+	addr_width = nor->addr_width;
+	read_dummy = nor->read_dummy;
+
+	nor->read_opcode = SPINOR_OP_RDSFDP;
+	nor->addr_width = 3;
+	nor->read_dummy = 8;
+
+	ret = spi_nor_read_raw(nor, addr, len, buf);
+
+	nor->read_opcode = read_opcode;
+	nor->addr_width = addr_width;
+	nor->read_dummy = read_dummy;
+
+	return ret;
+}
+
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @addr:	offset in the SFDP area to start reading data from
+ * @len:	number of bytes to read
+ * @buf:	buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ *          otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+					size_t len, void *buf)
+{
+	void *dma_safe_buf;
+	int ret;
+
+	dma_safe_buf = kmalloc(len, GFP_KERNEL);
+	if (!dma_safe_buf)
+		return -ENOMEM;
+
+	ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+	memcpy(buf, dma_safe_buf, len);
+	kfree(dma_safe_buf);
+
+	return ret;
+}
+
+static void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+				    u16 half,
+				    enum spi_nor_protocol proto)
+{
+	read->num_mode_clocks = (half >> 5) & 0x07;
+	read->num_wait_states = (half >> 0) & 0x1f;
+	read->opcode = (half >> 8) & 0xff;
+	read->proto = proto;
+}
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+	/* Fast Read 1-1-2 */
+	{
+		SNOR_HWCAPS_READ_1_1_2,
+		BFPT_DWORD(1), BIT(16),	/* Supported bit */
+		BFPT_DWORD(4), 0,	/* Settings */
+		SNOR_PROTO_1_1_2,
+	},
+
+	/* Fast Read 1-2-2 */
+	{
+		SNOR_HWCAPS_READ_1_2_2,
+		BFPT_DWORD(1), BIT(20),	/* Supported bit */
+		BFPT_DWORD(4), 16,	/* Settings */
+		SNOR_PROTO_1_2_2,
+	},
+
+	/* Fast Read 2-2-2 */
+	{
+		SNOR_HWCAPS_READ_2_2_2,
+		BFPT_DWORD(5),  BIT(0),	/* Supported bit */
+		BFPT_DWORD(6), 16,	/* Settings */
+		SNOR_PROTO_2_2_2,
+	},
+
+	/* Fast Read 1-1-4 */
+	{
+		SNOR_HWCAPS_READ_1_1_4,
+		BFPT_DWORD(1), BIT(22),	/* Supported bit */
+		BFPT_DWORD(3), 16,	/* Settings */
+		SNOR_PROTO_1_1_4,
+	},
+
+	/* Fast Read 1-4-4 */
+	{
+		SNOR_HWCAPS_READ_1_4_4,
+		BFPT_DWORD(1), BIT(21),	/* Supported bit */
+		BFPT_DWORD(3), 0,	/* Settings */
+		SNOR_PROTO_1_4_4,
+	},
+
+	/* Fast Read 4-4-4 */
+	{
+		SNOR_HWCAPS_READ_4_4_4,
+		BFPT_DWORD(5), BIT(4),	/* Supported bit */
+		BFPT_DWORD(7), 16,	/* Settings */
+		SNOR_PROTO_4_4_4,
+	},
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+	/* Erase Type 1 in DWORD8 bits[15:0] */
+	{BFPT_DWORD(8), 0},
+
+	/* Erase Type 2 in DWORD8 bits[31:16] */
+	{BFPT_DWORD(8), 16},
+
+	/* Erase Type 3 in DWORD9 bits[15:0] */
+	{BFPT_DWORD(9), 0},
+
+	/* Erase Type 4 in DWORD9 bits[31:16] */
+	{BFPT_DWORD(9), 16},
+};
+
+/**
+ * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
+ * @erase:	pointer to a structure that describes a SPI NOR erase type
+ * @size:	the size of the sector/block erased by the erase type
+ * @opcode:	the SPI command op code to erase the sector/block
+ * @i:		erase type index as sorted in the Basic Flash Parameter Table
+ *
+ * The supported Erase Types will be sorted at init in ascending order, with
+ * the smallest Erase Type size being the first member in the erase_type array
+ * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
+ * the Basic Flash Parameter Table since it will be used later on to
+ * synchronize with the supported Erase Types defined in SFDP optional tables.
+ */
+static void
+spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
+				     u32 size, u8 opcode, u8 i)
+{
+	erase->idx = i;
+	spi_nor_set_erase_type(erase, size, opcode);
+}
+
+/**
+ * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
+ * @l:	member in the left half of the map's erase_type array
+ * @r:	member in the right half of the map's erase_type array
+ *
+ * Comparison function used in the sort() call to sort in ascending order the
+ * map's erase types, the smallest erase type size being the first member in the
+ * sorted erase_type array.
+ *
+ * Return: the result of @l->size - @r->size
+ */
+static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
+{
+	const struct spi_nor_erase_type *left = l, *right = r;
+
+	return left->size - right->size;
+}
+
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map:	the erase map of the SPI NOR
+ * @erase_mask:	the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+	struct spi_nor_erase_type *erase_type = map->erase_type;
+	int i;
+	u8 sorted_erase_mask = 0;
+
+	if (!erase_mask)
+		return 0;
+
+	/* Replicate the sort done for the map's erase types. */
+	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+		if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+			sorted_erase_mask |= BIT(i);
+
+	return sorted_erase_mask;
+}
+
+/**
+ * spi_nor_regions_sort_erase_types() - sort erase types in each region
+ * @map:	the erase map of the SPI NOR
+ *
+ * Function assumes that the erase types defined in the erase map are already
+ * sorted in ascending order, with the smallest erase type size being the first
+ * member in the erase_type array. It replicates the sort done for the map's
+ * erase types. Each region's erase bitmask will indicate which erase types are
+ * supported from the sorted erase types defined in the erase map.
+ * Sort the all region's erase type at init in order to speed up the process of
+ * finding the best erase command at runtime.
+ */
+static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
+{
+	struct spi_nor_erase_region *region = map->regions;
+	u8 region_erase_mask, sorted_erase_mask;
+
+	while (region) {
+		region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+		sorted_erase_mask = spi_nor_sort_erase_mask(map,
+							    region_erase_mask);
+
+		/* Overwrite erase mask. */
+		region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
+				 sorted_erase_mask;
+
+		region = spi_nor_region_next(region);
+	}
+}
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor:		pointer to a 'struct spi_nor'
+ * @bfpt_header:	pointer to the 'struct sfdp_parameter_header' describing
+ *			the Basic Flash Parameter Table length and version
+ * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
+ *			filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+			      const struct sfdp_parameter_header *bfpt_header,
+			      struct spi_nor_flash_parameter *params)
+{
+	struct spi_nor_erase_map *map = &params->erase_map;
+	struct spi_nor_erase_type *erase_type = map->erase_type;
+	struct sfdp_bfpt bfpt;
+	size_t len;
+	int i, cmd, err;
+	u32 addr, val;
+	u16 half;
+	u8 erase_mask;
+
+	/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+	if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+		return -EINVAL;
+
+	/* Read the Basic Flash Parameter Table. */
+	len = min_t(size_t, sizeof(bfpt),
+		    bfpt_header->length * sizeof(u32));
+	addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+	memset(&bfpt, 0, sizeof(bfpt));
+	err = spi_nor_read_sfdp_dma_unsafe(nor,  addr, len, &bfpt);
+	if (err < 0)
+		return err;
+
+	/* Fix endianness of the BFPT DWORDs. */
+	le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
+
+	/* Number of address bytes. */
+	switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+	case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+	case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
+		nor->addr_width = 3;
+		break;
+
+	case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+		nor->addr_width = 4;
+		break;
+
+	default:
+		break;
+	}
+
+	/* Flash Memory Density (in bits). */
+	val = bfpt.dwords[BFPT_DWORD(2)];
+	if (val & BIT(31)) {
+		val &= ~BIT(31);
+
+		/*
+		 * Prevent overflows on params->size. Anyway, a NOR of 2^64
+		 * bits is unlikely to exist so this error probably means
+		 * the BFPT we are reading is corrupted/wrong.
+		 */
+		if (val > 63)
+			return -EINVAL;
+
+		params->size = 1ULL << val;
+	} else {
+		params->size = val + 1;
+	}
+	params->size >>= 3; /* Convert to bytes. */
+
+	/* Fast Read settings. */
+	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+		const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+		struct spi_nor_read_command *read;
+
+		if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+			params->hwcaps.mask &= ~rd->hwcaps;
+			continue;
+		}
+
+		params->hwcaps.mask |= rd->hwcaps;
+		cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+		read = &params->reads[cmd];
+		half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+		spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+	}
+
+	/*
+	 * Sector Erase settings. Reinitialize the uniform erase map using the
+	 * Erase Types defined in the bfpt table.
+	 */
+	erase_mask = 0;
+	memset(&params->erase_map, 0, sizeof(params->erase_map));
+	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+		const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+		u32 erasesize;
+		u8 opcode;
+
+		half = bfpt.dwords[er->dword] >> er->shift;
+		erasesize = half & 0xff;
+
+		/* erasesize == 0 means this Erase Type is not supported. */
+		if (!erasesize)
+			continue;
+
+		erasesize = 1U << erasesize;
+		opcode = (half >> 8) & 0xff;
+		erase_mask |= BIT(i);
+		spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
+						     opcode, i);
+	}
+	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+	/*
+	 * Sort all the map's Erase Types in ascending order with the smallest
+	 * erase size being the first member in the erase_type array.
+	 */
+	sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
+	     spi_nor_map_cmp_erase_type, NULL);
+	/*
+	 * Sort the erase types in the uniform region in order to update the
+	 * uniform_erase_type bitmask. The bitmask will be used later on when
+	 * selecting the uniform erase.
+	 */
+	spi_nor_regions_sort_erase_types(map);
+	map->uniform_erase_type = map->uniform_region.offset &
+				  SNOR_ERASE_TYPE_MASK;
+
+	/* Stop here if not JESD216 rev A or later. */
+	if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
+		return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+						params);
+
+	/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+	val = bfpt.dwords[BFPT_DWORD(11)];
+	val &= BFPT_DWORD11_PAGE_SIZE_MASK;
+	val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+	params->page_size = 1U << val;
+
+	/* Quad Enable Requirements. */
+	switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+	case BFPT_DWORD15_QER_NONE:
+		params->quad_enable = NULL;
+		break;
+
+	case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+		/*
+		 * Writing only one byte to the Status Register has the
+		 * side-effect of clearing Status Register 2.
+		 */
+	case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+		/*
+		 * Read Configuration Register (35h) instruction is not
+		 * supported.
+		 */
+		nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+		params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+		break;
+
+	case BFPT_DWORD15_QER_SR1_BIT6:
+		nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+		params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+		break;
+
+	case BFPT_DWORD15_QER_SR2_BIT7:
+		nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+		params->quad_enable = spi_nor_sr2_bit7_quad_enable;
+		break;
+
+	case BFPT_DWORD15_QER_SR2_BIT1:
+		/*
+		 * JESD216 rev B or later does not specify if writing only one
+		 * byte to the Status Register clears or not the Status
+		 * Register 2, so let's be cautious and keep the default
+		 * assumption of a 16-bit Write Status (01h) command.
+		 */
+		nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+		params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+		break;
+
+	default:
+		dev_dbg(nor->dev, "BFPT QER reserved value used\n");
+		break;
+	}
+
+	/* Stop here if not JESD216 rev C or later. */
+	if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
+		return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+						params);
+
+	return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
+}
+
+/**
+ * spi_nor_smpt_addr_width() - return the address width used in the
+ *			       configuration detection command.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @settings:	configuration detection command descriptor, dword1
+ */
+static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+{
+	switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
+	case SMPT_CMD_ADDRESS_LEN_0:
+		return 0;
+	case SMPT_CMD_ADDRESS_LEN_3:
+		return 3;
+	case SMPT_CMD_ADDRESS_LEN_4:
+		return 4;
+	case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
+	default:
+		return nor->addr_width;
+	}
+}
+
+/**
+ * spi_nor_smpt_read_dummy() - return the configuration detection command read
+ *			       latency, in clock cycles.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @settings:	configuration detection command descriptor, dword1
+ *
+ * Return: the number of dummy cycles for an SMPT read
+ */
+static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
+{
+	u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
+
+	if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
+		return nor->read_dummy;
+	return read_dummy;
+}
+
+/**
+ * spi_nor_get_map_in_use() - get the configuration map in use
+ * @nor:	pointer to a 'struct spi_nor'
+ * @smpt:	pointer to the sector map parameter table
+ * @smpt_len:	sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
+ */
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+					 u8 smpt_len)
+{
+	const u32 *ret;
+	u8 *buf;
+	u32 addr;
+	int err;
+	u8 i;
+	u8 addr_width, read_opcode, read_dummy;
+	u8 read_data_mask, map_id;
+
+	/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	addr_width = nor->addr_width;
+	read_dummy = nor->read_dummy;
+	read_opcode = nor->read_opcode;
+
+	map_id = 0;
+	/* Determine if there are any optional Detection Command Descriptors */
+	for (i = 0; i < smpt_len; i += 2) {
+		if (smpt[i] & SMPT_DESC_TYPE_MAP)
+			break;
+
+		read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
+		nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+		nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
+		nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
+		addr = smpt[i + 1];
+
+		err = spi_nor_read_raw(nor, addr, 1, buf);
+		if (err) {
+			ret = ERR_PTR(err);
+			goto out;
+		}
+
+		/*
+		 * Build an index value that is used to select the Sector Map
+		 * Configuration that is currently in use.
+		 */
+		map_id = map_id << 1 | !!(*buf & read_data_mask);
+	}
+
+	/*
+	 * If command descriptors are provided, they always precede map
+	 * descriptors in the table. There is no need to start the iteration
+	 * over smpt array all over again.
+	 *
+	 * Find the matching configuration map.
+	 */
+	ret = ERR_PTR(-EINVAL);
+	while (i < smpt_len) {
+		if (SMPT_MAP_ID(smpt[i]) == map_id) {
+			ret = smpt + i;
+			break;
+		}
+
+		/*
+		 * If there are no more configuration map descriptors and no
+		 * configuration ID matched the configuration identifier, the
+		 * sector address map is unknown.
+		 */
+		if (smpt[i] & SMPT_DESC_END)
+			break;
+
+		/* increment the table index to the next map */
+		i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
+	}
+
+	/* fall through */
+out:
+	kfree(buf);
+	nor->addr_width = addr_width;
+	nor->read_dummy = read_dummy;
+	nor->read_opcode = read_opcode;
+	return ret;
+}
+
+static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+	region->offset |= SNOR_LAST_REGION;
+}
+
+static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+	region->offset |= SNOR_OVERLAID_REGION;
+}
+
+/**
+ * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
+ * @region:	pointer to a structure that describes a SPI NOR erase region
+ * @erase:	pointer to a structure that describes a SPI NOR erase type
+ * @erase_type:	erase type bitmask
+ */
+static void
+spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+			     const struct spi_nor_erase_type *erase,
+			     const u8 erase_type)
+{
+	int i;
+
+	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+		if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
+			continue;
+		if (region->size & erase[i].size_mask) {
+			spi_nor_region_mark_overlay(region);
+			return;
+		}
+	}
+}
+
+/**
+ * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
+ * @nor:	pointer to a 'struct spi_nor'
+ * @params:     pointer to a duplicate 'struct spi_nor_flash_parameter' that is
+ *              used for storing SFDP parsed data
+ * @smpt:	pointer to the sector map parameter table
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+				   struct spi_nor_flash_parameter *params,
+				   const u32 *smpt)
+{
+	struct spi_nor_erase_map *map = &params->erase_map;
+	struct spi_nor_erase_type *erase = map->erase_type;
+	struct spi_nor_erase_region *region;
+	u64 offset;
+	u32 region_count;
+	int i, j;
+	u8 uniform_erase_type, save_uniform_erase_type;
+	u8 erase_type, regions_erase_type;
+
+	region_count = SMPT_MAP_REGION_COUNT(*smpt);
+	/*
+	 * The regions will be freed when the driver detaches from the
+	 * device.
+	 */
+	region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
+			      GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
+	map->regions = region;
+
+	uniform_erase_type = 0xff;
+	regions_erase_type = 0;
+	offset = 0;
+	/* Populate regions. */
+	for (i = 0; i < region_count; i++) {
+		j = i + 1; /* index for the region dword */
+		region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
+		erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
+		region[i].offset = offset | erase_type;
+
+		spi_nor_region_check_overlay(&region[i], erase, erase_type);
+
+		/*
+		 * Save the erase types that are supported in all regions and
+		 * can erase the entire flash memory.
+		 */
+		uniform_erase_type &= erase_type;
+
+		/*
+		 * regions_erase_type mask will indicate all the erase types
+		 * supported in this configuration map.
+		 */
+		regions_erase_type |= erase_type;
+
+		offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+			 region[i].size;
+	}
+	spi_nor_region_mark_end(&region[i - 1]);
+
+	save_uniform_erase_type = map->uniform_erase_type;
+	map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+							  uniform_erase_type);
+
+	if (!regions_erase_type) {
+		/*
+		 * Roll back to the previous uniform_erase_type mask, SMPT is
+		 * broken.
+		 */
+		map->uniform_erase_type = save_uniform_erase_type;
+		return -EINVAL;
+	}
+
+	/*
+	 * BFPT advertises all the erase types supported by all the possible
+	 * map configurations. Mask out the erase types that are not supported
+	 * by the current map configuration.
+	 */
+	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+		if (!(regions_erase_type & BIT(erase[i].idx)))
+			spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
+	return 0;
+}
+
+/**
+ * spi_nor_parse_smpt() - parse Sector Map Parameter Table
+ * @nor:		pointer to a 'struct spi_nor'
+ * @smpt_header:	sector map parameter table header
+ * @params:		pointer to a duplicate 'struct spi_nor_flash_parameter'
+ *                      that is used for storing SFDP parsed data
+ *
+ * This table is optional, but when available, we parse it to identify the
+ * location and size of sectors within the main data array of the flash memory
+ * device and to identify which Erase Types are supported by each sector.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_smpt(struct spi_nor *nor,
+			      const struct sfdp_parameter_header *smpt_header,
+			      struct spi_nor_flash_parameter *params)
+{
+	const u32 *sector_map;
+	u32 *smpt;
+	size_t len;
+	u32 addr;
+	int ret;
+
+	/* Read the Sector Map Parameter Table. */
+	len = smpt_header->length * sizeof(*smpt);
+	smpt = kmalloc(len, GFP_KERNEL);
+	if (!smpt)
+		return -ENOMEM;
+
+	addr = SFDP_PARAM_HEADER_PTP(smpt_header);
+	ret = spi_nor_read_sfdp(nor, addr, len, smpt);
+	if (ret)
+		goto out;
+
+	/* Fix endianness of the SMPT DWORDs. */
+	le32_to_cpu_array(smpt, smpt_header->length);
+
+	sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+	if (IS_ERR(sector_map)) {
+		ret = PTR_ERR(sector_map);
+		goto out;
+	}
+
+	ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
+	if (ret)
+		goto out;
+
+	spi_nor_regions_sort_erase_types(&params->erase_map);
+	/* fall through */
+out:
+	kfree(smpt);
+	return ret;
+}
+
+/**
+ * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
+ * @nor:		pointer to a 'struct spi_nor'.
+ * @param_header:	pointer to the 'struct sfdp_parameter_header' describing
+ *			the 4-Byte Address Instruction Table length and version.
+ * @params:		pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_4bait(struct spi_nor *nor,
+			       const struct sfdp_parameter_header *param_header,
+			       struct spi_nor_flash_parameter *params)
+{
+	static const struct sfdp_4bait reads[] = {
+		{ SNOR_HWCAPS_READ,		BIT(0) },
+		{ SNOR_HWCAPS_READ_FAST,	BIT(1) },
+		{ SNOR_HWCAPS_READ_1_1_2,	BIT(2) },
+		{ SNOR_HWCAPS_READ_1_2_2,	BIT(3) },
+		{ SNOR_HWCAPS_READ_1_1_4,	BIT(4) },
+		{ SNOR_HWCAPS_READ_1_4_4,	BIT(5) },
+		{ SNOR_HWCAPS_READ_1_1_1_DTR,	BIT(13) },
+		{ SNOR_HWCAPS_READ_1_2_2_DTR,	BIT(14) },
+		{ SNOR_HWCAPS_READ_1_4_4_DTR,	BIT(15) },
+	};
+	static const struct sfdp_4bait programs[] = {
+		{ SNOR_HWCAPS_PP,		BIT(6) },
+		{ SNOR_HWCAPS_PP_1_1_4,		BIT(7) },
+		{ SNOR_HWCAPS_PP_1_4_4,		BIT(8) },
+	};
+	static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
+		{ 0u /* not used */,		BIT(9) },
+		{ 0u /* not used */,		BIT(10) },
+		{ 0u /* not used */,		BIT(11) },
+		{ 0u /* not used */,		BIT(12) },
+	};
+	struct spi_nor_pp_command *params_pp = params->page_programs;
+	struct spi_nor_erase_map *map = &params->erase_map;
+	struct spi_nor_erase_type *erase_type = map->erase_type;
+	u32 *dwords;
+	size_t len;
+	u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
+	int i, ret;
+
+	if (param_header->major != SFDP_JESD216_MAJOR ||
+	    param_header->length < SFDP_4BAIT_DWORD_MAX)
+		return -EINVAL;
+
+	/* Read the 4-byte Address Instruction Table. */
+	len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
+
+	/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+	dwords = kmalloc(len, GFP_KERNEL);
+	if (!dwords)
+		return -ENOMEM;
+
+	addr = SFDP_PARAM_HEADER_PTP(param_header);
+	ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+	if (ret)
+		goto out;
+
+	/* Fix endianness of the 4BAIT DWORDs. */
+	le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
+
+	/*
+	 * Compute the subset of (Fast) Read commands for which the 4-byte
+	 * version is supported.
+	 */
+	discard_hwcaps = 0;
+	read_hwcaps = 0;
+	for (i = 0; i < ARRAY_SIZE(reads); i++) {
+		const struct sfdp_4bait *read = &reads[i];
+
+		discard_hwcaps |= read->hwcaps;
+		if ((params->hwcaps.mask & read->hwcaps) &&
+		    (dwords[0] & read->supported_bit))
+			read_hwcaps |= read->hwcaps;
+	}
+
+	/*
+	 * Compute the subset of Page Program commands for which the 4-byte
+	 * version is supported.
+	 */
+	pp_hwcaps = 0;
+	for (i = 0; i < ARRAY_SIZE(programs); i++) {
+		const struct sfdp_4bait *program = &programs[i];
+
+		/*
+		 * The 4 Byte Address Instruction (Optional) Table is the only
+		 * SFDP table that indicates support for Page Program Commands.
+		 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
+		 * authority for specifying Page Program support.
+		 */
+		discard_hwcaps |= program->hwcaps;
+		if (dwords[0] & program->supported_bit)
+			pp_hwcaps |= program->hwcaps;
+	}
+
+	/*
+	 * Compute the subset of Sector Erase commands for which the 4-byte
+	 * version is supported.
+	 */
+	erase_mask = 0;
+	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+		const struct sfdp_4bait *erase = &erases[i];
+
+		if (dwords[0] & erase->supported_bit)
+			erase_mask |= BIT(i);
+	}
+
+	/* Replicate the sort done for the map's erase types in BFPT. */
+	erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
+
+	/*
+	 * We need at least one 4-byte op code per read, program and erase
+	 * operation; the .read(), .write() and .erase() hooks share the
+	 * nor->addr_width value.
+	 */
+	if (!read_hwcaps || !pp_hwcaps || !erase_mask)
+		goto out;
+
+	/*
+	 * Discard all operations from the 4-byte instruction set which are
+	 * not supported by this memory.
+	 */
+	params->hwcaps.mask &= ~discard_hwcaps;
+	params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
+
+	/* Use the 4-byte address instruction set. */
+	for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
+		struct spi_nor_read_command *read_cmd = &params->reads[i];
+
+		read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
+	}
+
+	/* 4BAIT is the only SFDP table that indicates page program support. */
+	if (pp_hwcaps & SNOR_HWCAPS_PP)
+		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
+					SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+	if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
+		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
+					SPINOR_OP_PP_1_1_4_4B,
+					SNOR_PROTO_1_1_4);
+	if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
+		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
+					SPINOR_OP_PP_1_4_4_4B,
+					SNOR_PROTO_1_4_4);
+
+	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+		if (erase_mask & BIT(i))
+			erase_type[i].opcode = (dwords[1] >>
+						erase_type[i].idx * 8) & 0xFF;
+		else
+			spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
+	}
+
+	/*
+	 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
+	 * later because we already did the conversion to 4byte opcodes. Also,
+	 * this latest function implements a legacy quirk for the erase size of
+	 * Spansion memory. However this quirk is no longer needed with new
+	 * SFDP compliant memories.
+	 */
+	nor->addr_width = 4;
+	nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
+
+	/* fall through */
+out:
+	kfree(dwords);
+	return ret;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor:		pointer to a 'struct spi_nor'
+ * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
+ *			filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+		       struct spi_nor_flash_parameter *params)
+{
+	const struct sfdp_parameter_header *param_header, *bfpt_header;
+	struct sfdp_parameter_header *param_headers = NULL;
+	struct sfdp_header header;
+	struct device *dev = nor->dev;
+	size_t psize;
+	int i, err;
+
+	/* Get the SFDP header. */
+	err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
+	if (err < 0)
+		return err;
+
+	/* Check the SFDP header version. */
+	if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+	    header.major != SFDP_JESD216_MAJOR)
+		return -EINVAL;
+
+	/*
+	 * Verify that the first and only mandatory parameter header is a
+	 * Basic Flash Parameter Table header as specified in JESD216.
+	 */
+	bfpt_header = &header.bfpt_header;
+	if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+	    bfpt_header->major != SFDP_JESD216_MAJOR)
+		return -EINVAL;
+
+	/*
+	 * Allocate memory then read all parameter headers with a single
+	 * Read SFDP command. These parameter headers will actually be parsed
+	 * twice: a first time to get the latest revision of the basic flash
+	 * parameter table, then a second time to handle the supported optional
+	 * tables.
+	 * Hence we read the parameter headers once for all to reduce the
+	 * processing time. Also we use kmalloc() instead of devm_kmalloc()
+	 * because we don't need to keep these parameter headers: the allocated
+	 * memory is always released with kfree() before exiting this function.
+	 */
+	if (header.nph) {
+		psize = header.nph * sizeof(*param_headers);
+
+		param_headers = kmalloc(psize, GFP_KERNEL);
+		if (!param_headers)
+			return -ENOMEM;
+
+		err = spi_nor_read_sfdp(nor, sizeof(header),
+					psize, param_headers);
+		if (err < 0) {
+			dev_dbg(dev, "failed to read SFDP parameter headers\n");
+			goto exit;
+		}
+	}
+
+	/*
+	 * Check other parameter headers to get the latest revision of
+	 * the basic flash parameter table.
+	 */
+	for (i = 0; i < header.nph; i++) {
+		param_header = &param_headers[i];
+
+		if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+		    param_header->major == SFDP_JESD216_MAJOR &&
+		    (param_header->minor > bfpt_header->minor ||
+		     (param_header->minor == bfpt_header->minor &&
+		      param_header->length > bfpt_header->length)))
+			bfpt_header = param_header;
+	}
+
+	err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+	if (err)
+		goto exit;
+
+	/* Parse optional parameter tables. */
+	for (i = 0; i < header.nph; i++) {
+		param_header = &param_headers[i];
+
+		switch (SFDP_PARAM_HEADER_ID(param_header)) {
+		case SFDP_SECTOR_MAP_ID:
+			err = spi_nor_parse_smpt(nor, param_header, params);
+			break;
+
+		case SFDP_4BAIT_ID:
+			err = spi_nor_parse_4bait(nor, param_header, params);
+			break;
+
+		default:
+			break;
+		}
+
+		if (err) {
+			dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+				 SFDP_PARAM_HEADER_ID(param_header));
+			/*
+			 * Let's not drop all information we extracted so far
+			 * if optional table parsers fail. In case of failing,
+			 * each optional parser is responsible to roll back to
+			 * the previously known spi_nor data.
+			 */
+			err = 0;
+		}
+	}
+
+exit:
+	kfree(param_headers);
+	return err;
+}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
new file mode 100644
index 0000000..7f9846b
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SFDP_H
+#define __LINUX_MTD_SFDP_H
+
+/* SFDP revisions */
+#define SFDP_JESD216_MAJOR	1
+#define SFDP_JESD216_MINOR	0
+#define SFDP_JESD216A_MINOR	5
+#define SFDP_JESD216B_MINOR	6
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i)		((i) - 1)
+#define BFPT_DWORD_MAX		20
+
+struct sfdp_bfpt {
+	u32	dwords[BFPT_DWORD_MAX];
+};
+
+/* The first version of JESD216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216			9
+#define BFPT_DWORD_MAX_JESD216B			16
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2		BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK		GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY	(0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4	(0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY	(0x2UL << 17)
+#define BFPT_DWORD1_DTR				BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2		BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4		BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4		BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2		BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4		BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT		4
+#define BFPT_DWORD11_PAGE_SIZE_MASK		GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ *         reads based on instruction. DQ3/HOLD# functions are hold during
+ *         instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ *         two data bytes where bit 1 of the second byte is one.
+ *         [...]
+ *         Writing only one byte to the status register has the side-effect of
+ *         clearing status register 2, including the QE bit. The 100b code is
+ *         used if writing one byte to the status register does not modify
+ *         status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ *         one data byte where bit 6 is one.
+ *         [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ *         register 2 instruction 3Eh with one data byte where bit 7 is one.
+ *         [...]
+ *         The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ *         two data bytes where bit 1 of the second byte is one.
+ *         [...]
+ *         In contrast to the 001b code, writing one byte to the status
+ *         register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ *         Read Status instruction 05h. Status register2 is read using
+ *         instruction 35h. QE is set via Write Status instruction 01h with
+ *         two data bytes where bit 1 of the second byte is one.
+ *         [...]
+ */
+#define BFPT_DWORD15_QER_MASK			GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE			(0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY		(0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6		(0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7		(0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD		(0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1		(0x5UL << 20) /* Spansion */
+
+struct sfdp_parameter_header {
+	u8		id_lsb;
+	u8		minor;
+	u8		major;
+	u8		length; /* in double words */
+	u8		parameter_table_pointer[3]; /* byte address */
+	u8		id_msb;
+};
+
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+		       struct spi_nor_flash_parameter *params);
+
+#endif /* __LINUX_MTD_SFDP_H */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
new file mode 100644
index 0000000..8429b4a
--- /dev/null
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
+			 const struct sfdp_parameter_header *bfpt_header,
+			 const struct sfdp_bfpt *bfpt,
+			 struct spi_nor_flash_parameter *params)
+{
+	/*
+	 * The S25FS-S chip family reports 512-byte pages in BFPT but
+	 * in reality the write buffer still wraps at the safe default
+	 * of 256 bytes.  Overwrite the page size advertised by BFPT
+	 * to get the writes working.
+	 */
+	params->page_size = 256;
+
+	return 0;
+}
+
+static struct spi_nor_fixups s25fs_s_fixups = {
+	.post_bfpt = s25fs_s_post_bfpt_fixups,
+};
+
+static const struct flash_info spansion_parts[] = {
+	/* Spansion/Cypress -- single (large) sector size only, at least
+	 * for the chips listed here (without boot sectors).
+	 */
+	{ "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64,
+			     SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128,
+			     SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      USE_CLSR) },
+	{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      USE_CLSR) },
+	{ "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      USE_CLSR) },
+	{ "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      USE_CLSR) },
+	{ "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      SPI_NOR_HAS_LOCK | USE_CLSR) },
+	{ "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
+	  .fixups = &s25fs_s_fixups, },
+	{ "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      USE_CLSR) },
+	{ "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			      USE_CLSR) },
+	{ "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
+	  .fixups = &s25fs_s_fixups, },
+	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
+	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
+	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64,
+			     SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     USE_CLSR) },
+	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256,
+			     SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     USE_CLSR) },
+	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
+	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
+	{ "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
+	{ "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
+	{ "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
+	{ "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
+	{ "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
+	{ "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16,
+			     SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     SPI_NOR_4B_OPCODES) },
+	{ "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     SPI_NOR_4B_OPCODES) },
+	{ "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			     SPI_NOR_4B_OPCODES) },
+	{ "cy15x104q",  INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
+			      SPI_NOR_NO_ERASE) },
+};
+
+static void spansion_post_sfdp_fixups(struct spi_nor *nor)
+{
+	if (nor->params->size <= SZ_16M)
+		return;
+
+	nor->flags |= SNOR_F_4B_OPCODES;
+	/* No small sector erase for 4-byte command set */
+	nor->erase_opcode = SPINOR_OP_SE;
+	nor->mtd.erasesize = nor->info->sector_size;
+}
+
+static const struct spi_nor_fixups spansion_fixups = {
+	.post_sfdp = spansion_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_spansion = {
+	.name = "spansion",
+	.parts = spansion_parts,
+	.nparts = ARRAY_SIZE(spansion_parts),
+	.fixups = &spansion_fixups,
+};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
deleted file mode 100644
index dd6963e..0000000
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ /dev/null
@@ -1,5152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
- * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
- *
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/math64.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
-#include <linux/sched/task_stack.h>
-#include <linux/spi/flash.h>
-#include <linux/mtd/spi-nor.h>
-
-/* Define max times to check status register before we give up. */
-
-/*
- * For everything but full-chip erase; probably could be much smaller, but kept
- * around for safety for now
- */
-#define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
-
-/*
- * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
- * for larger flash
- */
-#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
-
-#define SPI_NOR_MAX_ID_LEN	6
-#define SPI_NOR_MAX_ADDR_WIDTH	4
-
-struct sfdp_parameter_header {
-	u8		id_lsb;
-	u8		minor;
-	u8		major;
-	u8		length; /* in double words */
-	u8		parameter_table_pointer[3]; /* byte address */
-	u8		id_msb;
-};
-
-#define SFDP_PARAM_HEADER_ID(p)	(((p)->id_msb << 8) | (p)->id_lsb)
-#define SFDP_PARAM_HEADER_PTP(p) \
-	(((p)->parameter_table_pointer[2] << 16) | \
-	 ((p)->parameter_table_pointer[1] <<  8) | \
-	 ((p)->parameter_table_pointer[0] <<  0))
-
-#define SFDP_BFPT_ID		0xff00	/* Basic Flash Parameter Table */
-#define SFDP_SECTOR_MAP_ID	0xff81	/* Sector Map Table */
-#define SFDP_4BAIT_ID		0xff84  /* 4-byte Address Instruction Table */
-
-#define SFDP_SIGNATURE		0x50444653U
-#define SFDP_JESD216_MAJOR	1
-#define SFDP_JESD216_MINOR	0
-#define SFDP_JESD216A_MINOR	5
-#define SFDP_JESD216B_MINOR	6
-
-struct sfdp_header {
-	u32		signature; /* Ox50444653U <=> "SFDP" */
-	u8		minor;
-	u8		major;
-	u8		nph; /* 0-base number of parameter headers */
-	u8		unused;
-
-	/* Basic Flash Parameter Table. */
-	struct sfdp_parameter_header	bfpt_header;
-};
-
-/* Basic Flash Parameter Table */
-
-/*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
- * They are indexed from 1 but C arrays are indexed from 0.
- */
-#define BFPT_DWORD(i)		((i) - 1)
-#define BFPT_DWORD_MAX		16
-
-/* The first version of JESB216 defined only 9 DWORDs. */
-#define BFPT_DWORD_MAX_JESD216			9
-
-/* 1st DWORD. */
-#define BFPT_DWORD1_FAST_READ_1_1_2		BIT(16)
-#define BFPT_DWORD1_ADDRESS_BYTES_MASK		GENMASK(18, 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY	(0x0UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4	(0x1UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY	(0x2UL << 17)
-#define BFPT_DWORD1_DTR				BIT(19)
-#define BFPT_DWORD1_FAST_READ_1_2_2		BIT(20)
-#define BFPT_DWORD1_FAST_READ_1_4_4		BIT(21)
-#define BFPT_DWORD1_FAST_READ_1_1_4		BIT(22)
-
-/* 5th DWORD. */
-#define BFPT_DWORD5_FAST_READ_2_2_2		BIT(0)
-#define BFPT_DWORD5_FAST_READ_4_4_4		BIT(4)
-
-/* 11th DWORD. */
-#define BFPT_DWORD11_PAGE_SIZE_SHIFT		4
-#define BFPT_DWORD11_PAGE_SIZE_MASK		GENMASK(7, 4)
-
-/* 15th DWORD. */
-
-/*
- * (from JESD216 rev B)
- * Quad Enable Requirements (QER):
- * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
- *         reads based on instruction. DQ3/HOLD# functions are hold during
- *         instruction phase.
- * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
- *         two data bytes where bit 1 of the second byte is one.
- *         [...]
- *         Writing only one byte to the status register has the side-effect of
- *         clearing status register 2, including the QE bit. The 100b code is
- *         used if writing one byte to the status register does not modify
- *         status register 2.
- * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
- *         one data byte where bit 6 is one.
- *         [...]
- * - 011b: QE is bit 7 of status register 2. It is set via Write status
- *         register 2 instruction 3Eh with one data byte where bit 7 is one.
- *         [...]
- *         The status register 2 is read using instruction 3Fh.
- * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
- *         two data bytes where bit 1 of the second byte is one.
- *         [...]
- *         In contrast to the 001b code, writing one byte to the status
- *         register does not modify status register 2.
- * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
- *         Read Status instruction 05h. Status register2 is read using
- *         instruction 35h. QE is set via Write Status instruction 01h with
- *         two data bytes where bit 1 of the second byte is one.
- *         [...]
- */
-#define BFPT_DWORD15_QER_MASK			GENMASK(22, 20)
-#define BFPT_DWORD15_QER_NONE			(0x0UL << 20) /* Micron */
-#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY		(0x1UL << 20)
-#define BFPT_DWORD15_QER_SR1_BIT6		(0x2UL << 20) /* Macronix */
-#define BFPT_DWORD15_QER_SR2_BIT7		(0x3UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD		(0x4UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1		(0x5UL << 20) /* Spansion */
-
-struct sfdp_bfpt {
-	u32	dwords[BFPT_DWORD_MAX];
-};
-
-/**
- * struct spi_nor_fixups - SPI NOR fixup hooks
- * @default_init: called after default flash parameters init. Used to tweak
- *                flash parameters when information provided by the flash_info
- *                table is incomplete or wrong.
- * @post_bfpt: called after the BFPT table has been parsed
- * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
- *             that do not support RDSFDP). Typically used to tweak various
- *             parameters that could not be extracted by other means (i.e.
- *             when information provided by the SFDP/flash_info tables are
- *             incomplete or wrong).
- *
- * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
- * table is broken or not available.
- */
-struct spi_nor_fixups {
-	void (*default_init)(struct spi_nor *nor);
-	int (*post_bfpt)(struct spi_nor *nor,
-			 const struct sfdp_parameter_header *bfpt_header,
-			 const struct sfdp_bfpt *bfpt,
-			 struct spi_nor_flash_parameter *params);
-	void (*post_sfdp)(struct spi_nor *nor);
-};
-
-struct flash_info {
-	char		*name;
-
-	/*
-	 * This array stores the ID bytes.
-	 * The first three bytes are the JEDIC ID.
-	 * JEDEC ID zero means "no ID" (mostly older chips).
-	 */
-	u8		id[SPI_NOR_MAX_ID_LEN];
-	u8		id_len;
-
-	/* The size listed here is what works with SPINOR_OP_SE, which isn't
-	 * necessarily called a "sector" by the vendor.
-	 */
-	unsigned	sector_size;
-	u16		n_sectors;
-
-	u16		page_size;
-	u16		addr_width;
-
-	u16		flags;
-#define SECT_4K			BIT(0)	/* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE	BIT(1)	/* No erase command needed */
-#define SST_WRITE		BIT(2)	/* use SST byte programming */
-#define SPI_NOR_NO_FR		BIT(3)	/* Can't do fastread */
-#define SECT_4K_PMC		BIT(4)	/* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ	BIT(5)	/* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ	BIT(6)	/* Flash supports Quad Read */
-#define USE_FSR			BIT(7)	/* use flag status register */
-#define SPI_NOR_HAS_LOCK	BIT(8)	/* Flash supports lock/unlock via SR */
-#define SPI_NOR_HAS_TB		BIT(9)	/*
-					 * Flash SR has Top/Bottom (TB) protect
-					 * bit. Must be used with
-					 * SPI_NOR_HAS_LOCK.
-					 */
-#define SPI_NOR_XSR_RDY		BIT(10)	/*
-					 * S3AN flashes have specific opcode to
-					 * read the status register.
-					 * Flags SPI_NOR_XSR_RDY and SPI_S3AN
-					 * use the same bit as one implies the
-					 * other, but we will get rid of
-					 * SPI_S3AN soon.
-					 */
-#define	SPI_S3AN		BIT(10)	/*
-					 * Xilinx Spartan 3AN In-System Flash
-					 * (MFR cannot be used for probing
-					 * because it has the same value as
-					 * ATMEL flashes)
-					 */
-#define SPI_NOR_4B_OPCODES	BIT(11)	/*
-					 * Use dedicated 4byte address op codes
-					 * to support memory size above 128Mib.
-					 */
-#define NO_CHIP_ERASE		BIT(12) /* Chip does not support chip erase */
-#define SPI_NOR_SKIP_SFDP	BIT(13)	/* Skip parsing of SFDP tables */
-#define USE_CLSR		BIT(14)	/* use CLSR command */
-#define SPI_NOR_OCTAL_READ	BIT(15)	/* Flash supports Octal Read */
-
-	/* Part specific fixup hooks. */
-	const struct spi_nor_fixups *fixups;
-};
-
-#define JEDEC_MFR(info)	((info)->id[0])
-
-/**
- * spi_nor_spimem_xfer_data() - helper function to read/write data to
- *                              flash's memory region
- * @nor:        pointer to 'struct spi_nor'
- * @op:         pointer to 'struct spi_mem_op' template for transfer
- *
- * Return: number of bytes transferred on success, -errno otherwise
- */
-static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
-					struct spi_mem_op *op)
-{
-	bool usebouncebuf = false;
-	void *rdbuf = NULL;
-	const void *buf;
-	int ret;
-
-	if (op->data.dir == SPI_MEM_DATA_IN)
-		buf = op->data.buf.in;
-	else
-		buf = op->data.buf.out;
-
-	if (object_is_on_stack(buf) || !virt_addr_valid(buf))
-		usebouncebuf = true;
-
-	if (usebouncebuf) {
-		if (op->data.nbytes > nor->bouncebuf_size)
-			op->data.nbytes = nor->bouncebuf_size;
-
-		if (op->data.dir == SPI_MEM_DATA_IN) {
-			rdbuf = op->data.buf.in;
-			op->data.buf.in = nor->bouncebuf;
-		} else {
-			op->data.buf.out = nor->bouncebuf;
-			memcpy(nor->bouncebuf, buf,
-			       op->data.nbytes);
-		}
-	}
-
-	ret = spi_mem_adjust_op_size(nor->spimem, op);
-	if (ret)
-		return ret;
-
-	ret = spi_mem_exec_op(nor->spimem, op);
-	if (ret)
-		return ret;
-
-	if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
-		memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
-
-	return op->data.nbytes;
-}
-
-/**
- * spi_nor_spimem_read_data() - read data from flash's memory region via
- *                              spi-mem
- * @nor:        pointer to 'struct spi_nor'
- * @from:       offset to read from
- * @len:        number of bytes to read
- * @buf:        pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
-					size_t len, u8 *buf)
-{
-	struct spi_mem_op op =
-		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
-			   SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
-			   SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
-			   SPI_MEM_OP_DATA_IN(len, buf, 1));
-
-	/* get transfer protocols. */
-	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
-	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
-	op.dummy.buswidth = op.addr.buswidth;
-	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
-
-	/* convert the dummy cycles to the number of bytes */
-	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
-
-	return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_read_data() - read data from flash memory
- * @nor:        pointer to 'struct spi_nor'
- * @from:       offset to read from
- * @len:        number of bytes to read
- * @buf:        pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
-				 u8 *buf)
-{
-	if (nor->spimem)
-		return spi_nor_spimem_read_data(nor, from, len, buf);
-
-	return nor->read(nor, from, len, buf);
-}
-
-/**
- * spi_nor_spimem_write_data() - write data to flash memory via
- *                               spi-mem
- * @nor:        pointer to 'struct spi_nor'
- * @to:         offset to write to
- * @len:        number of bytes to write
- * @buf:        pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
-					 size_t len, const u8 *buf)
-{
-	struct spi_mem_op op =
-		SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
-			   SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
-			   SPI_MEM_OP_NO_DUMMY,
-			   SPI_MEM_OP_DATA_OUT(len, buf, 1));
-
-	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
-	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
-	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
-	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
-		op.addr.nbytes = 0;
-
-	return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_write_data() - write data to flash memory
- * @nor:        pointer to 'struct spi_nor'
- * @to:         offset to write to
- * @len:        number of bytes to write
- * @buf:        pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
-				  const u8 *buf)
-{
-	if (nor->spimem)
-		return spi_nor_spimem_write_data(nor, to, len, buf);
-
-	return nor->write(nor, to, len, buf);
-}
-
-/*
- * Read the status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
- */
-static int read_sr(struct spi_nor *nor)
-{
-	int ret;
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
-
-		ret = spi_mem_exec_op(nor->spimem, &op);
-	} else {
-		ret = nor->read_reg(nor, SPINOR_OP_RDSR, nor->bouncebuf, 1);
-	}
-
-	if (ret < 0) {
-		pr_err("error %d reading SR\n", (int) ret);
-		return ret;
-	}
-
-	return nor->bouncebuf[0];
-}
-
-/*
- * Read the flag status register, returning its value in the location
- * Return the status register value.
- * Returns negative if error occurred.
- */
-static int read_fsr(struct spi_nor *nor)
-{
-	int ret;
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
-
-		ret = spi_mem_exec_op(nor->spimem, &op);
-	} else {
-		ret = nor->read_reg(nor, SPINOR_OP_RDFSR, nor->bouncebuf, 1);
-	}
-
-	if (ret < 0) {
-		pr_err("error %d reading FSR\n", ret);
-		return ret;
-	}
-
-	return nor->bouncebuf[0];
-}
-
-/*
- * Read configuration register, returning its value in the
- * location. Return the configuration register value.
- * Returns negative if error occurred.
- */
-static int read_cr(struct spi_nor *nor)
-{
-	int ret;
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
-
-		ret = spi_mem_exec_op(nor->spimem, &op);
-	} else {
-		ret = nor->read_reg(nor, SPINOR_OP_RDCR, nor->bouncebuf, 1);
-	}
-
-	if (ret < 0) {
-		dev_err(nor->dev, "error %d reading CR\n", ret);
-		return ret;
-	}
-
-	return nor->bouncebuf[0];
-}
-
-/*
- * Write status register 1 byte
- * Returns negative if error occurred.
- */
-static int write_sr(struct spi_nor *nor, u8 val)
-{
-	nor->bouncebuf[0] = val;
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_WRSR, nor->bouncebuf, 1);
-}
-
-/*
- * Set write enable latch with Write Enable command.
- * Returns negative if error occurred.
- */
-static int write_enable(struct spi_nor *nor)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
-}
-
-/*
- * Send write disable instruction to the chip.
- */
-static int write_disable(struct spi_nor *nor)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
-	return mtd->priv;
-}
-
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
-	size_t i;
-
-	for (i = 0; i < size; i++)
-		if (table[i][0] == opcode)
-			return table[i][1];
-
-	/* No conversion found, keep input op code. */
-	return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
-	static const u8 spi_nor_3to4_read[][2] = {
-		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
-		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
-		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
-		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
-		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
-		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
-		{ SPINOR_OP_READ_1_1_8,	SPINOR_OP_READ_1_1_8_4B },
-		{ SPINOR_OP_READ_1_8_8,	SPINOR_OP_READ_1_8_8_4B },
-
-		{ SPINOR_OP_READ_1_1_1_DTR,	SPINOR_OP_READ_1_1_1_DTR_4B },
-		{ SPINOR_OP_READ_1_2_2_DTR,	SPINOR_OP_READ_1_2_2_DTR_4B },
-		{ SPINOR_OP_READ_1_4_4_DTR,	SPINOR_OP_READ_1_4_4_DTR_4B },
-	};
-
-	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
-				      ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
-	static const u8 spi_nor_3to4_program[][2] = {
-		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
-		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
-		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
-		{ SPINOR_OP_PP_1_1_8,	SPINOR_OP_PP_1_1_8_4B },
-		{ SPINOR_OP_PP_1_8_8,	SPINOR_OP_PP_1_8_8_4B },
-	};
-
-	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
-				      ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
-	static const u8 spi_nor_3to4_erase[][2] = {
-		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
-		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
-		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
-	};
-
-	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
-				      ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
-	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
-	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
-	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
-	if (!spi_nor_has_uniform_erase(nor)) {
-		struct spi_nor_erase_map *map = &nor->params.erase_map;
-		struct spi_nor_erase_type *erase;
-		int i;
-
-		for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
-			erase = &map->erase_type[i];
-			erase->opcode =
-				spi_nor_convert_3to4_erase(erase->opcode);
-		}
-	}
-}
-
-static int macronix_set_4byte(struct spi_nor *nor, bool enable)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
-						  SPINOR_OP_EN4B :
-						  SPINOR_OP_EX4B,
-						  1),
-				  SPI_MEM_OP_NO_ADDR,
-				  SPI_MEM_OP_NO_DUMMY,
-				  SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B,
-			      NULL, 0);
-}
-
-static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
-{
-	int ret;
-
-	write_enable(nor);
-	ret = macronix_set_4byte(nor, enable);
-	write_disable(nor);
-
-	return ret;
-}
-
-static int spansion_set_4byte(struct spi_nor *nor, bool enable)
-{
-	nor->bouncebuf[0] = enable << 7;
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_BRWR, nor->bouncebuf, 1);
-}
-
-static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
-{
-	nor->bouncebuf[0] = ear;
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_WREAR, nor->bouncebuf, 1);
-}
-
-static int winbond_set_4byte(struct spi_nor *nor, bool enable)
-{
-	int ret;
-
-	ret = macronix_set_4byte(nor, enable);
-	if (ret || enable)
-		return ret;
-
-	/*
-	 * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
-	 * Register to be set to 1, so all 3-byte-address reads come from the
-	 * second 16M. We must clear the register to enable normal behavior.
-	 */
-	write_enable(nor);
-	ret = spi_nor_write_ear(nor, 0);
-	write_disable(nor);
-
-	return ret;
-}
-
-static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_IN(1, sr, 1));
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->read_reg(nor, SPINOR_OP_XRDSR, sr, 1);
-}
-
-static int s3an_sr_ready(struct spi_nor *nor)
-{
-	int ret;
-
-	ret = spi_nor_xread_sr(nor, nor->bouncebuf);
-	if (ret < 0) {
-		dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
-		return ret;
-	}
-
-	return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-static int spi_nor_clear_sr(struct spi_nor *nor)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
-}
-
-static int spi_nor_sr_ready(struct spi_nor *nor)
-{
-	int sr = read_sr(nor);
-	if (sr < 0)
-		return sr;
-
-	if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
-		if (sr & SR_E_ERR)
-			dev_err(nor->dev, "Erase Error occurred\n");
-		else
-			dev_err(nor->dev, "Programming Error occurred\n");
-
-		spi_nor_clear_sr(nor);
-		return -EIO;
-	}
-
-	return !(sr & SR_WIP);
-}
-
-static int spi_nor_clear_fsr(struct spi_nor *nor)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
-}
-
-static int spi_nor_fsr_ready(struct spi_nor *nor)
-{
-	int fsr = read_fsr(nor);
-	if (fsr < 0)
-		return fsr;
-
-	if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
-		if (fsr & FSR_E_ERR)
-			dev_err(nor->dev, "Erase operation failed.\n");
-		else
-			dev_err(nor->dev, "Program operation failed.\n");
-
-		if (fsr & FSR_PT_ERR)
-			dev_err(nor->dev,
-			"Attempted to modify a protected sector.\n");
-
-		spi_nor_clear_fsr(nor);
-		return -EIO;
-	}
-
-	return fsr & FSR_READY;
-}
-
-static int spi_nor_ready(struct spi_nor *nor)
-{
-	int sr, fsr;
-
-	if (nor->flags & SNOR_F_READY_XSR_RDY)
-		sr = s3an_sr_ready(nor);
-	else
-		sr = spi_nor_sr_ready(nor);
-	if (sr < 0)
-		return sr;
-	fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
-	if (fsr < 0)
-		return fsr;
-	return sr && fsr;
-}
-
-/*
- * Service routine to read status register until ready, or timeout occurs.
- * Returns non-zero if error.
- */
-static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
-						unsigned long timeout_jiffies)
-{
-	unsigned long deadline;
-	int timeout = 0, ret;
-
-	deadline = jiffies + timeout_jiffies;
-
-	while (!timeout) {
-		if (time_after_eq(jiffies, deadline))
-			timeout = 1;
-
-		ret = spi_nor_ready(nor);
-		if (ret < 0)
-			return ret;
-		if (ret)
-			return 0;
-
-		cond_resched();
-	}
-
-	dev_err(nor->dev, "flash operation timed out\n");
-
-	return -ETIMEDOUT;
-}
-
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
-{
-	return spi_nor_wait_till_ready_with_timeout(nor,
-						    DEFAULT_READY_WAIT_JIFFIES);
-}
-
-/*
- * Erase the whole flash memory
- *
- * Returns 0 if successful, non-zero otherwise.
- */
-static int erase_chip(struct spi_nor *nor)
-{
-	dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
-}
-
-static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
-	int ret = 0;
-
-	mutex_lock(&nor->lock);
-
-	if (nor->prepare) {
-		ret = nor->prepare(nor, ops);
-		if (ret) {
-			dev_err(nor->dev, "failed in the preparation.\n");
-			mutex_unlock(&nor->lock);
-			return ret;
-		}
-	}
-	return ret;
-}
-
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
-	if (nor->unprepare)
-		nor->unprepare(nor, ops);
-	mutex_unlock(&nor->lock);
-}
-
-/*
- * This code converts an address to the Default Address Mode, that has non
- * power of two page sizes. We must support this mode because it is the default
- * mode supported by Xilinx tools, it can access the whole flash area and
- * changing over to the Power-of-two mode is irreversible and corrupts the
- * original data.
- * Addr can safely be unsigned int, the biggest S3AN device is smaller than
- * 4 MiB.
- */
-static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
-{
-	u32 offset, page;
-
-	offset = addr % nor->page_size;
-	page = addr / nor->page_size;
-	page <<= (nor->page_size > 512) ? 10 : 9;
-
-	return page | offset;
-}
-
-static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
-{
-	if (!nor->params.convert_addr)
-		return addr;
-
-	return nor->params.convert_addr(nor, addr);
-}
-
-/*
- * Initiate the erasure of a single sector
- */
-static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
-{
-	int i;
-
-	addr = spi_nor_convert_addr(nor, addr);
-
-	if (nor->erase)
-		return nor->erase(nor, addr);
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
-				   SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_NO_DATA);
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	/*
-	 * Default implementation, if driver doesn't have a specialized HW
-	 * control
-	 */
-	for (i = nor->addr_width - 1; i >= 0; i--) {
-		nor->bouncebuf[i] = addr & 0xff;
-		addr >>= 8;
-	}
-
-	return nor->write_reg(nor, nor->erase_opcode, nor->bouncebuf,
-			      nor->addr_width);
-}
-
-/**
- * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
- * @erase:	pointer to a structure that describes a SPI NOR erase type
- * @dividend:	dividend value
- * @remainder:	pointer to u32 remainder (will be updated)
- *
- * Return: the result of the division
- */
-static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
-				     u64 dividend, u32 *remainder)
-{
-	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
-	*remainder = (u32)dividend & erase->size_mask;
-	return dividend >> erase->size_shift;
-}
-
-/**
- * spi_nor_find_best_erase_type() - find the best erase type for the given
- *				    offset in the serial flash memory and the
- *				    number of bytes to erase. The region in
- *				    which the address fits is expected to be
- *				    provided.
- * @map:	the erase map of the SPI NOR
- * @region:	pointer to a structure that describes a SPI NOR erase region
- * @addr:	offset in the serial flash memory
- * @len:	number of bytes to erase
- *
- * Return: a pointer to the best fitted erase type, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
-			     const struct spi_nor_erase_region *region,
-			     u64 addr, u32 len)
-{
-	const struct spi_nor_erase_type *erase;
-	u32 rem;
-	int i;
-	u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
-	/*
-	 * Erase types are ordered by size, with the smallest erase type at
-	 * index 0.
-	 */
-	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
-		/* Does the erase region support the tested erase type? */
-		if (!(erase_mask & BIT(i)))
-			continue;
-
-		erase = &map->erase_type[i];
-
-		/* Alignment is not mandatory for overlaid regions */
-		if (region->offset & SNOR_OVERLAID_REGION &&
-		    region->size <= len)
-			return erase;
-
-		/* Don't erase more than what the user has asked for. */
-		if (erase->size > len)
-			continue;
-
-		spi_nor_div_by_erase_size(erase, addr, &rem);
-		if (rem)
-			continue;
-		else
-			return erase;
-	}
-
-	return NULL;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region:	pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-static struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
-	if (spi_nor_region_is_last(region))
-		return NULL;
-	region++;
-	return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- *				 which the offset fits
- * @map:	the erase map of the SPI NOR
- * @addr:	offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- *	   otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
-	struct spi_nor_erase_region *region = map->regions;
-	u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
-	u64 region_end = region_start + region->size;
-
-	while (addr < region_start || addr >= region_end) {
-		region = spi_nor_region_next(region);
-		if (!region)
-			return ERR_PTR(-EINVAL);
-
-		region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
-		region_end = region_start + region->size;
-	}
-
-	return region;
-}
-
-/**
- * spi_nor_init_erase_cmd() - initialize an erase command
- * @region:	pointer to a structure that describes a SPI NOR erase region
- * @erase:	pointer to a structure that describes a SPI NOR erase type
- *
- * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
- *	   otherwise.
- */
-static struct spi_nor_erase_command *
-spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
-		       const struct spi_nor_erase_type *erase)
-{
-	struct spi_nor_erase_command *cmd;
-
-	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
-	if (!cmd)
-		return ERR_PTR(-ENOMEM);
-
-	INIT_LIST_HEAD(&cmd->list);
-	cmd->opcode = erase->opcode;
-	cmd->count = 1;
-
-	if (region->offset & SNOR_OVERLAID_REGION)
-		cmd->size = region->size;
-	else
-		cmd->size = erase->size;
-
-	return cmd;
-}
-
-/**
- * spi_nor_destroy_erase_cmd_list() - destroy erase command list
- * @erase_list:	list of erase commands
- */
-static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
-{
-	struct spi_nor_erase_command *cmd, *next;
-
-	list_for_each_entry_safe(cmd, next, erase_list, list) {
-		list_del(&cmd->list);
-		kfree(cmd);
-	}
-}
-
-/**
- * spi_nor_init_erase_cmd_list() - initialize erase command list
- * @nor:	pointer to a 'struct spi_nor'
- * @erase_list:	list of erase commands to be executed once we validate that the
- *		erase can be performed
- * @addr:	offset in the serial flash memory
- * @len:	number of bytes to erase
- *
- * Builds the list of best fitted erase commands and verifies if the erase can
- * be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
-				       struct list_head *erase_list,
-				       u64 addr, u32 len)
-{
-	const struct spi_nor_erase_map *map = &nor->params.erase_map;
-	const struct spi_nor_erase_type *erase, *prev_erase = NULL;
-	struct spi_nor_erase_region *region;
-	struct spi_nor_erase_command *cmd = NULL;
-	u64 region_end;
-	int ret = -EINVAL;
-
-	region = spi_nor_find_erase_region(map, addr);
-	if (IS_ERR(region))
-		return PTR_ERR(region);
-
-	region_end = spi_nor_region_end(region);
-
-	while (len) {
-		erase = spi_nor_find_best_erase_type(map, region, addr, len);
-		if (!erase)
-			goto destroy_erase_cmd_list;
-
-		if (prev_erase != erase ||
-		    erase->size != cmd->size ||
-		    region->offset & SNOR_OVERLAID_REGION) {
-			cmd = spi_nor_init_erase_cmd(region, erase);
-			if (IS_ERR(cmd)) {
-				ret = PTR_ERR(cmd);
-				goto destroy_erase_cmd_list;
-			}
-
-			list_add_tail(&cmd->list, erase_list);
-		} else {
-			cmd->count++;
-		}
-
-		addr += cmd->size;
-		len -= cmd->size;
-
-		if (len && addr >= region_end) {
-			region = spi_nor_region_next(region);
-			if (!region)
-				goto destroy_erase_cmd_list;
-			region_end = spi_nor_region_end(region);
-		}
-
-		prev_erase = erase;
-	}
-
-	return 0;
-
-destroy_erase_cmd_list:
-	spi_nor_destroy_erase_cmd_list(erase_list);
-	return ret;
-}
-
-/**
- * spi_nor_erase_multi_sectors() - perform a non-uniform erase
- * @nor:	pointer to a 'struct spi_nor'
- * @addr:	offset in the serial flash memory
- * @len:	number of bytes to erase
- *
- * Build a list of best fitted erase commands and execute it once we validate
- * that the erase can be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
-{
-	LIST_HEAD(erase_list);
-	struct spi_nor_erase_command *cmd, *next;
-	int ret;
-
-	ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
-	if (ret)
-		return ret;
-
-	list_for_each_entry_safe(cmd, next, &erase_list, list) {
-		nor->erase_opcode = cmd->opcode;
-		while (cmd->count) {
-			write_enable(nor);
-
-			ret = spi_nor_erase_sector(nor, addr);
-			if (ret)
-				goto destroy_erase_cmd_list;
-
-			addr += cmd->size;
-			cmd->count--;
-
-			ret = spi_nor_wait_till_ready(nor);
-			if (ret)
-				goto destroy_erase_cmd_list;
-		}
-		list_del(&cmd->list);
-		kfree(cmd);
-	}
-
-	return 0;
-
-destroy_erase_cmd_list:
-	spi_nor_destroy_erase_cmd_list(&erase_list);
-	return ret;
-}
-
-/*
- * Erase an address range on the nor chip.  The address range may extend
- * one or more erase sectors.  Return an error is there is a problem erasing.
- */
-static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	u32 addr, len;
-	uint32_t rem;
-	int ret;
-
-	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
-			(long long)instr->len);
-
-	if (spi_nor_has_uniform_erase(nor)) {
-		div_u64_rem(instr->len, mtd->erasesize, &rem);
-		if (rem)
-			return -EINVAL;
-	}
-
-	addr = instr->addr;
-	len = instr->len;
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
-	if (ret)
-		return ret;
-
-	/* whole-chip erase? */
-	if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
-		unsigned long timeout;
-
-		write_enable(nor);
-
-		if (erase_chip(nor)) {
-			ret = -EIO;
-			goto erase_err;
-		}
-
-		/*
-		 * Scale the timeout linearly with the size of the flash, with
-		 * a minimum calibrated to an old 2MB flash. We could try to
-		 * pull these from CFI/SFDP, but these values should be good
-		 * enough for now.
-		 */
-		timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
-			      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
-			      (unsigned long)(mtd->size / SZ_2M));
-		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
-		if (ret)
-			goto erase_err;
-
-	/* REVISIT in some cases we could speed up erasing large regions
-	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
-	 * to use "small sector erase", but that's not always optimal.
-	 */
-
-	/* "sector"-at-a-time erase */
-	} else if (spi_nor_has_uniform_erase(nor)) {
-		while (len) {
-			write_enable(nor);
-
-			ret = spi_nor_erase_sector(nor, addr);
-			if (ret)
-				goto erase_err;
-
-			addr += mtd->erasesize;
-			len -= mtd->erasesize;
-
-			ret = spi_nor_wait_till_ready(nor);
-			if (ret)
-				goto erase_err;
-		}
-
-	/* erase multiple sectors */
-	} else {
-		ret = spi_nor_erase_multi_sectors(nor, addr, len);
-		if (ret)
-			goto erase_err;
-	}
-
-	write_disable(nor);
-
-erase_err:
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
-
-	return ret;
-}
-
-/* Write status register and ensure bits in mask match written values */
-static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
-{
-	int ret;
-
-	write_enable(nor);
-	ret = write_sr(nor, status_new);
-	if (ret)
-		return ret;
-
-	ret = spi_nor_wait_till_ready(nor);
-	if (ret)
-		return ret;
-
-	ret = read_sr(nor);
-	if (ret < 0)
-		return ret;
-
-	return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
-}
-
-static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
-				 uint64_t *len)
-{
-	struct mtd_info *mtd = &nor->mtd;
-	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-	int shift = ffs(mask) - 1;
-	int pow;
-
-	if (!(sr & mask)) {
-		/* No protection */
-		*ofs = 0;
-		*len = 0;
-	} else {
-		pow = ((sr & mask) ^ mask) >> shift;
-		*len = mtd->size >> pow;
-		if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
-			*ofs = 0;
-		else
-			*ofs = mtd->size - *len;
-	}
-}
-
-/*
- * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
- * @locked is false); 0 otherwise
- */
-static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
-				    u8 sr, bool locked)
-{
-	loff_t lock_offs;
-	uint64_t lock_len;
-
-	if (!len)
-		return 1;
-
-	stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
-
-	if (locked)
-		/* Requested range is a sub-range of locked range */
-		return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
-	else
-		/* Requested range does not overlap with locked range */
-		return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
-}
-
-static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
-			    u8 sr)
-{
-	return stm_check_lock_status_sr(nor, ofs, len, sr, true);
-}
-
-static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
-			      u8 sr)
-{
-	return stm_check_lock_status_sr(nor, ofs, len, sr, false);
-}
-
-/*
- * Lock a region of the flash. Compatible with ST Micro and similar flash.
- * Supports the block protection bits BP{0,1,2} in the status register
- * (SR). Does not support these features found in newer SR bitfields:
- *   - SEC: sector/block protect - only handle SEC=0 (block protect)
- *   - CMP: complement protect - only support CMP=0 (range is not complemented)
- *
- * Support for the following is provided conditionally for some flash:
- *   - TB: top/bottom protect
- *
- * Sample table portion for 8MB flash (Winbond w25q64fw):
- *
- *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
- *  --------------------------------------------------------------------------
- *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
- *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
- *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
- *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
- *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
- *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
- *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
- *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
- *  ------|-------|-------|-------|-------|---------------|-------------------
- *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
- *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
- *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
- *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
- *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
- *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
-	struct mtd_info *mtd = &nor->mtd;
-	int status_old, status_new;
-	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-	u8 shift = ffs(mask) - 1, pow, val;
-	loff_t lock_len;
-	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
-	bool use_top;
-
-	status_old = read_sr(nor);
-	if (status_old < 0)
-		return status_old;
-
-	/* If nothing in our range is unlocked, we don't need to do anything */
-	if (stm_is_locked_sr(nor, ofs, len, status_old))
-		return 0;
-
-	/* If anything below us is unlocked, we can't use 'bottom' protection */
-	if (!stm_is_locked_sr(nor, 0, ofs, status_old))
-		can_be_bottom = false;
-
-	/* If anything above us is unlocked, we can't use 'top' protection */
-	if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
-				status_old))
-		can_be_top = false;
-
-	if (!can_be_bottom && !can_be_top)
-		return -EINVAL;
-
-	/* Prefer top, if both are valid */
-	use_top = can_be_top;
-
-	/* lock_len: length of region that should end up locked */
-	if (use_top)
-		lock_len = mtd->size - ofs;
-	else
-		lock_len = ofs + len;
-
-	/*
-	 * Need smallest pow such that:
-	 *
-	 *   1 / (2^pow) <= (len / size)
-	 *
-	 * so (assuming power-of-2 size) we do:
-	 *
-	 *   pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
-	 */
-	pow = ilog2(mtd->size) - ilog2(lock_len);
-	val = mask - (pow << shift);
-	if (val & ~mask)
-		return -EINVAL;
-	/* Don't "lock" with no region! */
-	if (!(val & mask))
-		return -EINVAL;
-
-	status_new = (status_old & ~mask & ~SR_TB) | val;
-
-	/* Disallow further writes if WP pin is asserted */
-	status_new |= SR_SRWD;
-
-	if (!use_top)
-		status_new |= SR_TB;
-
-	/* Don't bother if they're the same */
-	if (status_new == status_old)
-		return 0;
-
-	/* Only modify protection if it will not unlock other areas */
-	if ((status_new & mask) < (status_old & mask))
-		return -EINVAL;
-
-	return write_sr_and_check(nor, status_new, mask);
-}
-
-/*
- * Unlock a region of the flash. See stm_lock() for more info
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
-	struct mtd_info *mtd = &nor->mtd;
-	int status_old, status_new;
-	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-	u8 shift = ffs(mask) - 1, pow, val;
-	loff_t lock_len;
-	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
-	bool use_top;
-
-	status_old = read_sr(nor);
-	if (status_old < 0)
-		return status_old;
-
-	/* If nothing in our range is locked, we don't need to do anything */
-	if (stm_is_unlocked_sr(nor, ofs, len, status_old))
-		return 0;
-
-	/* If anything below us is locked, we can't use 'top' protection */
-	if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
-		can_be_top = false;
-
-	/* If anything above us is locked, we can't use 'bottom' protection */
-	if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
-				status_old))
-		can_be_bottom = false;
-
-	if (!can_be_bottom && !can_be_top)
-		return -EINVAL;
-
-	/* Prefer top, if both are valid */
-	use_top = can_be_top;
-
-	/* lock_len: length of region that should remain locked */
-	if (use_top)
-		lock_len = mtd->size - (ofs + len);
-	else
-		lock_len = ofs;
-
-	/*
-	 * Need largest pow such that:
-	 *
-	 *   1 / (2^pow) >= (len / size)
-	 *
-	 * so (assuming power-of-2 size) we do:
-	 *
-	 *   pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
-	 */
-	pow = ilog2(mtd->size) - order_base_2(lock_len);
-	if (lock_len == 0) {
-		val = 0; /* fully unlocked */
-	} else {
-		val = mask - (pow << shift);
-		/* Some power-of-two sizes are not supported */
-		if (val & ~mask)
-			return -EINVAL;
-	}
-
-	status_new = (status_old & ~mask & ~SR_TB) | val;
-
-	/* Don't protect status register if we're fully unlocked */
-	if (lock_len == 0)
-		status_new &= ~SR_SRWD;
-
-	if (!use_top)
-		status_new |= SR_TB;
-
-	/* Don't bother if they're the same */
-	if (status_new == status_old)
-		return 0;
-
-	/* Only modify protection if it will not lock other areas */
-	if ((status_new & mask) > (status_old & mask))
-		return -EINVAL;
-
-	return write_sr_and_check(nor, status_new, mask);
-}
-
-/*
- * Check if a region of the flash is (completely) locked. See stm_lock() for
- * more info.
- *
- * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
- * negative on errors.
- */
-static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
-	int status;
-
-	status = read_sr(nor);
-	if (status < 0)
-		return status;
-
-	return stm_is_locked_sr(nor, ofs, len, status);
-}
-
-static const struct spi_nor_locking_ops stm_locking_ops = {
-	.lock = stm_lock,
-	.unlock = stm_unlock,
-	.is_locked = stm_is_locked,
-};
-
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	int ret;
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
-	if (ret)
-		return ret;
-
-	ret = nor->params.locking_ops->lock(nor, ofs, len);
-
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
-	return ret;
-}
-
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	int ret;
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
-	if (ret)
-		return ret;
-
-	ret = nor->params.locking_ops->unlock(nor, ofs, len);
-
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
-	return ret;
-}
-
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	int ret;
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
-	if (ret)
-		return ret;
-
-	ret = nor->params.locking_ops->is_locked(nor, ofs, len);
-
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
-	return ret;
-}
-
-/*
- * Write status Register and configuration register with 2 bytes
- * The first byte will be written to the status register, while the
- * second byte will be written to the configuration register.
- * Return negative if error occurred.
- */
-static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
-{
-	int ret;
-
-	write_enable(nor);
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_OUT(2, sr_cr, 1));
-
-		ret = spi_mem_exec_op(nor->spimem, &op);
-	} else {
-		ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
-	}
-
-	if (ret < 0) {
-		dev_err(nor->dev,
-			"error while writing configuration register\n");
-		return -EINVAL;
-	}
-
-	ret = spi_nor_wait_till_ready(nor);
-	if (ret) {
-		dev_err(nor->dev,
-			"timeout while writing configuration register\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-/**
- * macronix_quad_enable() - set QE bit in Status Register.
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register.
- *
- * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_quad_enable(struct spi_nor *nor)
-{
-	int ret, val;
-
-	val = read_sr(nor);
-	if (val < 0)
-		return val;
-	if (val & SR_QUAD_EN_MX)
-		return 0;
-
-	write_enable(nor);
-
-	write_sr(nor, val | SR_QUAD_EN_MX);
-
-	ret = spi_nor_wait_till_ready(nor);
-	if (ret)
-		return ret;
-
-	ret = read_sr(nor);
-	if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
-		dev_err(nor->dev, "Macronix Quad bit not set\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/**
- * spansion_quad_enable() - set QE bit in Configuraiton Register.
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function is kept for legacy purpose because it has been used for a
- * long time without anybody complaining but it should be considered as
- * deprecated and maybe buggy.
- * First, this function doesn't care about the previous values of the Status
- * and Configuration Registers when it sets the QE bit (bit 1) in the
- * Configuration Register: all other bits are cleared, which may have unwanted
- * side effects like removing some block protections.
- * Secondly, it uses the Read Configuration Register (35h) instruction though
- * some very old and few memories don't support this instruction. If a pull-up
- * resistor is present on the MISO/IO1 line, we might still be able to pass the
- * "read back" test because the QSPI memory doesn't recognize the command,
- * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_quad_enable(struct spi_nor *nor)
-{
-	u8 *sr_cr = nor->bouncebuf;
-	int ret;
-
-	sr_cr[0] = 0;
-	sr_cr[1] = CR_QUAD_EN_SPAN;
-	ret = write_sr_cr(nor, sr_cr);
-	if (ret)
-		return ret;
-
-	/* read back and check it */
-	ret = read_cr(nor);
-	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
-		dev_err(nor->dev, "Spansion Quad bit not set\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/**
- * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories not supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
-{
-	u8 *sr_cr = nor->bouncebuf;
-	int ret;
-
-	/* Keep the current value of the Status Register. */
-	ret = read_sr(nor);
-	if (ret < 0) {
-		dev_err(nor->dev, "error while reading status register\n");
-		return -EINVAL;
-	}
-	sr_cr[0] = ret;
-	sr_cr[1] = CR_QUAD_EN_SPAN;
-
-	return write_sr_cr(nor, sr_cr);
-}
-
-/**
- * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_read_cr_quad_enable(struct spi_nor *nor)
-{
-	struct device *dev = nor->dev;
-	u8 *sr_cr = nor->bouncebuf;
-	int ret;
-
-	/* Check current Quad Enable bit value. */
-	ret = read_cr(nor);
-	if (ret < 0) {
-		dev_err(dev, "error while reading configuration register\n");
-		return -EINVAL;
-	}
-
-	if (ret & CR_QUAD_EN_SPAN)
-		return 0;
-
-	sr_cr[1] = ret | CR_QUAD_EN_SPAN;
-
-	/* Keep the current value of the Status Register. */
-	ret = read_sr(nor);
-	if (ret < 0) {
-		dev_err(dev, "error while reading status register\n");
-		return -EINVAL;
-	}
-	sr_cr[0] = ret;
-
-	ret = write_sr_cr(nor, sr_cr);
-	if (ret)
-		return ret;
-
-	/* Read back and check it. */
-	ret = read_cr(nor);
-	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
-		dev_err(nor->dev, "Spansion Quad bit not set\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int spi_nor_write_sr2(struct spi_nor *nor, u8 *sr2)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->write_reg(nor, SPINOR_OP_WRSR2, sr2, 1);
-}
-
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
-		return spi_mem_exec_op(nor->spimem, &op);
-	}
-
-	return nor->read_reg(nor, SPINOR_OP_RDSR2, sr2, 1);
-}
-
-/**
- * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register 2.
- *
- * This is one of the procedures to set the QE bit described in the SFDP
- * (JESD216 rev B) specification but no manufacturer using this procedure has
- * been identified yet, hence the name of the function.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int sr2_bit7_quad_enable(struct spi_nor *nor)
-{
-	u8 *sr2 = nor->bouncebuf;
-	int ret;
-
-	/* Check current Quad Enable bit value. */
-	ret = spi_nor_read_sr2(nor, sr2);
-	if (ret)
-		return ret;
-	if (*sr2 & SR2_QUAD_EN_BIT7)
-		return 0;
-
-	/* Update the Quad Enable bit. */
-	*sr2 |= SR2_QUAD_EN_BIT7;
-
-	write_enable(nor);
-
-	ret = spi_nor_write_sr2(nor, sr2);
-	if (ret < 0) {
-		dev_err(nor->dev, "error while writing status register 2\n");
-		return -EINVAL;
-	}
-
-	ret = spi_nor_wait_till_ready(nor);
-	if (ret < 0) {
-		dev_err(nor->dev, "timeout while writing status register 2\n");
-		return ret;
-	}
-
-	/* Read back and check it. */
-	ret = spi_nor_read_sr2(nor, sr2);
-	if (!(ret > 0 && (*sr2 & SR2_QUAD_EN_BIT7))) {
-		dev_err(nor->dev, "SR2 Quad bit not set\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/**
- * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
- * @nor:        pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_clear_sr_bp(struct spi_nor *nor)
-{
-	int ret;
-	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-
-	ret = read_sr(nor);
-	if (ret < 0) {
-		dev_err(nor->dev, "error while reading status register\n");
-		return ret;
-	}
-
-	write_enable(nor);
-
-	ret = write_sr(nor, ret & ~mask);
-	if (ret) {
-		dev_err(nor->dev, "write to status register failed\n");
-		return ret;
-	}
-
-	ret = spi_nor_wait_till_ready(nor);
-	if (ret)
-		dev_err(nor->dev, "timeout while writing status register\n");
-	return ret;
-}
-
-/**
- * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
- * bits on spansion flashes.
- * @nor:        pointer to a 'struct spi_nor'
- *
- * Read-modify-write function that clears the Block Protection bits from the
- * Status Register without affecting other bits. The function is tightly
- * coupled with the spansion_quad_enable() function. Both assume that the Write
- * Register with 16 bits, together with the Read Configuration Register (35h)
- * instructions are supported.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
-{
-	int ret;
-	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
-	u8 *sr_cr =  nor->bouncebuf;
-
-	/* Check current Quad Enable bit value. */
-	ret = read_cr(nor);
-	if (ret < 0) {
-		dev_err(nor->dev,
-			"error while reading configuration register\n");
-		return ret;
-	}
-
-	/*
-	 * When the configuration register Quad Enable bit is one, only the
-	 * Write Status (01h) command with two data bytes may be used.
-	 */
-	if (ret & CR_QUAD_EN_SPAN) {
-		sr_cr[1] = ret;
-
-		ret = read_sr(nor);
-		if (ret < 0) {
-			dev_err(nor->dev,
-				"error while reading status register\n");
-			return ret;
-		}
-		sr_cr[0] = ret & ~mask;
-
-		ret = write_sr_cr(nor, sr_cr);
-		if (ret)
-			dev_err(nor->dev, "16-bit write register failed\n");
-		return ret;
-	}
-
-	/*
-	 * If the Quad Enable bit is zero, use the Write Status (01h) command
-	 * with one data byte.
-	 */
-	return spi_nor_clear_sr_bp(nor);
-}
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
-		.id = {							\
-			((_jedec_id) >> 16) & 0xff,			\
-			((_jedec_id) >> 8) & 0xff,			\
-			(_jedec_id) & 0xff,				\
-			((_ext_id) >> 8) & 0xff,			\
-			(_ext_id) & 0xff,				\
-			},						\
-		.id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),	\
-		.sector_size = (_sector_size),				\
-		.n_sectors = (_n_sectors),				\
-		.page_size = 256,					\
-		.flags = (_flags),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
-		.id = {							\
-			((_jedec_id) >> 16) & 0xff,			\
-			((_jedec_id) >> 8) & 0xff,			\
-			(_jedec_id) & 0xff,				\
-			((_ext_id) >> 16) & 0xff,			\
-			((_ext_id) >> 8) & 0xff,			\
-			(_ext_id) & 0xff,				\
-			},						\
-		.id_len = 6,						\
-		.sector_size = (_sector_size),				\
-		.n_sectors = (_n_sectors),				\
-		.page_size = 256,					\
-		.flags = (_flags),
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)	\
-		.sector_size = (_sector_size),				\
-		.n_sectors = (_n_sectors),				\
-		.page_size = (_page_size),				\
-		.addr_width = (_addr_width),				\
-		.flags = (_flags),
-
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size)			\
-		.id = {							\
-			((_jedec_id) >> 16) & 0xff,			\
-			((_jedec_id) >> 8) & 0xff,			\
-			(_jedec_id) & 0xff				\
-			},						\
-		.id_len = 3,						\
-		.sector_size = (8*_page_size),				\
-		.n_sectors = (_n_sectors),				\
-		.page_size = _page_size,				\
-		.addr_width = 3,					\
-		.flags = SPI_NOR_NO_FR | SPI_S3AN,
-
-static int
-is25lp256_post_bfpt_fixups(struct spi_nor *nor,
-			   const struct sfdp_parameter_header *bfpt_header,
-			   const struct sfdp_bfpt *bfpt,
-			   struct spi_nor_flash_parameter *params)
-{
-	/*
-	 * IS25LP256 supports 4B opcodes, but the BFPT advertises a
-	 * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
-	 * Overwrite the address width advertised by the BFPT.
-	 */
-	if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
-		BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
-		nor->addr_width = 4;
-
-	return 0;
-}
-
-static struct spi_nor_fixups is25lp256_fixups = {
-	.post_bfpt = is25lp256_post_bfpt_fixups,
-};
-
-static int
-mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
-			    const struct sfdp_parameter_header *bfpt_header,
-			    const struct sfdp_bfpt *bfpt,
-			    struct spi_nor_flash_parameter *params)
-{
-	/*
-	 * MX25L25635F supports 4B opcodes but MX25L25635E does not.
-	 * Unfortunately, Macronix has re-used the same JEDEC ID for both
-	 * variants which prevents us from defining a new entry in the parts
-	 * table.
-	 * We need a way to differentiate MX25L25635E and MX25L25635F, and it
-	 * seems that the F version advertises support for Fast Read 4-4-4 in
-	 * its BFPT table.
-	 */
-	if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
-		nor->flags |= SNOR_F_4B_OPCODES;
-
-	return 0;
-}
-
-static struct spi_nor_fixups mx25l25635_fixups = {
-	.post_bfpt = mx25l25635_post_bfpt_fixups,
-};
-
-static void gd25q256_default_init(struct spi_nor *nor)
-{
-	/*
-	 * Some manufacturer like GigaDevice may use different
-	 * bit to set QE on different memories, so the MFR can't
-	 * indicate the quad_enable method for this case, we need
-	 * to set it in the default_init fixup hook.
-	 */
-	nor->params.quad_enable = macronix_quad_enable;
-}
-
-static struct spi_nor_fixups gd25q256_fixups = {
-	.default_init = gd25q256_default_init,
-};
-
-/* NOTE: double check command sets and memory organization when you add
- * more nor chips.  This current list focusses on newer chips, which
- * have been converging on command sets which including JEDEC ID.
- *
- * All newly added entries should describe *hardware* and should use SECT_4K
- * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
- * scenarios excluding small sectors there is config option that can be
- * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
- * For historical (and compatibility) reasons (before we got above config) some
- * old entries may be missing 4K flag.
- */
-static const struct flash_info spi_nor_ids[] = {
-	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
-	{ "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) },
-	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
-
-	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
-	{ "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K) },
-	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
-	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
-
-	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
-	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
-	{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
-	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
-
-	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
-
-	/* EON -- en25xxx */
-	{ "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
-	{ "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
-	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
-	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
-	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
-	{ "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16,
-			SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
-	{ "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128,
-			SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
-	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
-	{ "en25s64",	INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
-
-	/* ESMT */
-	{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
-	{ "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
-	{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
-
-	/* Everspin */
-	{ "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
-	/* Fujitsu */
-	{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
-
-	/* GigaDevice */
-	{
-		"gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-			.fixups = &gd25q256_fixups,
-	},
-
-	/* Intel/Numonyx -- xxxs33b */
-	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
-	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
-	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
-
-	/* ISSI */
-	{ "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
-	{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64,
-			SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128,
-			SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
-			SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_4B_OPCODES)
-			.fixups = &is25lp256_fixups },
-	{ "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
-	/* Macronix */
-	{ "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
-	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
-	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
-	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
-	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
-	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
-	{ "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
-	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
-	{ "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
-	{ "mx25u3235f",	 INFO(0xc22536, 0, 64 * 1024,  64,
-			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8, SECT_4K) },
-	{ "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16, SECT_4K) },
-	{ "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
-	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
-	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
-	{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
-			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
-			 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-			 .fixups = &mx25l25635_fixups },
-	{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
-	{ "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
-			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
-	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-	{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-	{ "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-
-	/* Micron <--> ST Micro */
-	{ "n25q016a",	 INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q032",	 INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
-	{ "n25q032a",	 INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
-	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
-	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
-	{ "mt25qu512a",  INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
-			       SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
-			       SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-	{ "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
-			      SPI_NOR_QUAD_READ) },
-	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-	{ "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
-			      SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
-			      NO_CHIP_ERASE) },
-	{ "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
-	/* Micron */
-	{
-		"mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
-			SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
-			SPI_NOR_4B_OPCODES)
-	},
-	{ "mt35xu02g",  INFO(0x2c5b1c, 0, 128 * 1024, 2048,
-			     SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
-			     SPI_NOR_4B_OPCODES) },
-
-	/* PMC */
-	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
-	{ "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
-	{ "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
-
-	/* Spansion/Cypress -- single (large) sector size only, at least
-	 * for the chips listed here (without boot sectors).
-	 */
-	{ "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
-			SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
-	{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
-			SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
-	{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
-	{ "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
-	{ "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
-			SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | USE_CLSR) },
-	{ "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
-	{ "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
-	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
-	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
-	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
-	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
-	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
-	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
-	{ "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
-	{ "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
-	{ "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
-	{ "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
-	{ "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
-	{ "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
-	{ "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
-	{ "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-	{ "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-	{ "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-
-	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
-	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
-	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
-	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
-	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
-	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
-	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
-	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
-	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
-	{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
-	{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
-	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
-	{ "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
-	{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
-			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
-	/* ST Microelectronics -- newer production may have feature updates */
-	{ "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
-	{ "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
-	{ "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
-	{ "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
-	{ "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
-	{ "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
-	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
-	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
-	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
-
-	{ "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
-	{ "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
-	{ "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) },
-	{ "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) },
-	{ "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) },
-	{ "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) },
-	{ "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) },
-	{ "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) },
-	{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) },
-
-	{ "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) },
-	{ "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
-	{ "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
-
-	{ "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
-	{ "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
-	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
-
-	{ "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
-	{ "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
-	{ "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
-	{ "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
-	{ "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
-	{ "m25px80",    INFO(0x207114,  0, 64 * 1024, 16, 0) },
-
-	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
-	{ "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
-	{ "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
-	{ "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
-	{ "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
-	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
-	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
-	{
-		"w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
-	{
-		"w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
-	{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
-	{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
-	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
-	{
-		"w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
-	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
-	{
-		"w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{
-		"w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
-			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
-			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-	},
-	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
-	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
-	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
-	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
-			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
-			SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
-
-	/* Catalyst / On Semiconductor -- non-JEDEC */
-	{ "cat25c11", CAT25_INFO(  16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "cat25c03", CAT25_INFO(  32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-	{ "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
-	/* Xilinx S3AN Internal Flash */
-	{ "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
-	{ "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
-	{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
-	{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
-	{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
-
-	/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
-	{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-	{ },
-};
-
-static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
-{
-	int			tmp;
-	u8			*id = nor->bouncebuf;
-	const struct flash_info	*info;
-
-	if (nor->spimem) {
-		struct spi_mem_op op =
-			SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
-				   SPI_MEM_OP_NO_ADDR,
-				   SPI_MEM_OP_NO_DUMMY,
-				   SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
-
-		tmp = spi_mem_exec_op(nor->spimem, &op);
-	} else {
-		tmp = nor->read_reg(nor, SPINOR_OP_RDID, id,
-				    SPI_NOR_MAX_ID_LEN);
-	}
-	if (tmp < 0) {
-		dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
-		return ERR_PTR(tmp);
-	}
-
-	for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
-		info = &spi_nor_ids[tmp];
-		if (info->id_len) {
-			if (!memcmp(info->id, id, info->id_len))
-				return &spi_nor_ids[tmp];
-		}
-	}
-	dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
-		SPI_NOR_MAX_ID_LEN, id);
-	return ERR_PTR(-ENODEV);
-}
-
-static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
-			size_t *retlen, u_char *buf)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	ssize_t ret;
-
-	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
-	if (ret)
-		return ret;
-
-	while (len) {
-		loff_t addr = from;
-
-		addr = spi_nor_convert_addr(nor, addr);
-
-		ret = spi_nor_read_data(nor, addr, len, buf);
-		if (ret == 0) {
-			/* We shouldn't see 0-length reads */
-			ret = -EIO;
-			goto read_err;
-		}
-		if (ret < 0)
-			goto read_err;
-
-		WARN_ON(ret > len);
-		*retlen += ret;
-		buf += ret;
-		from += ret;
-		len -= ret;
-	}
-	ret = 0;
-
-read_err:
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
-	return ret;
-}
-
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
-		size_t *retlen, const u_char *buf)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	size_t actual;
-	int ret;
-
-	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
-	if (ret)
-		return ret;
-
-	write_enable(nor);
-
-	nor->sst_write_second = false;
-
-	actual = to % 2;
-	/* Start write from odd address. */
-	if (actual) {
-		nor->program_opcode = SPINOR_OP_BP;
-
-		/* write one byte. */
-		ret = spi_nor_write_data(nor, to, 1, buf);
-		if (ret < 0)
-			goto sst_write_err;
-		WARN(ret != 1, "While writing 1 byte written %i bytes\n",
-		     (int)ret);
-		ret = spi_nor_wait_till_ready(nor);
-		if (ret)
-			goto sst_write_err;
-	}
-	to += actual;
-
-	/* Write out most of the data here. */
-	for (; actual < len - 1; actual += 2) {
-		nor->program_opcode = SPINOR_OP_AAI_WP;
-
-		/* write two bytes. */
-		ret = spi_nor_write_data(nor, to, 2, buf + actual);
-		if (ret < 0)
-			goto sst_write_err;
-		WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
-		     (int)ret);
-		ret = spi_nor_wait_till_ready(nor);
-		if (ret)
-			goto sst_write_err;
-		to += 2;
-		nor->sst_write_second = true;
-	}
-	nor->sst_write_second = false;
-
-	write_disable(nor);
-	ret = spi_nor_wait_till_ready(nor);
-	if (ret)
-		goto sst_write_err;
-
-	/* Write out trailing byte if it exists. */
-	if (actual != len) {
-		write_enable(nor);
-
-		nor->program_opcode = SPINOR_OP_BP;
-		ret = spi_nor_write_data(nor, to, 1, buf + actual);
-		if (ret < 0)
-			goto sst_write_err;
-		WARN(ret != 1, "While writing 1 byte written %i bytes\n",
-		     (int)ret);
-		ret = spi_nor_wait_till_ready(nor);
-		if (ret)
-			goto sst_write_err;
-		write_disable(nor);
-		actual += 1;
-	}
-sst_write_err:
-	*retlen += actual;
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
-	return ret;
-}
-
-/*
- * Write an address range to the nor chip.  Data must be written in
- * FLASH_PAGESIZE chunks.  The address range may be any size provided
- * it is within the physical boundaries.
- */
-static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
-	size_t *retlen, const u_char *buf)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	size_t page_offset, page_remain, i;
-	ssize_t ret;
-
-	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
-	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < len; ) {
-		ssize_t written;
-		loff_t addr = to + i;
-
-		/*
-		 * If page_size is a power of two, the offset can be quickly
-		 * calculated with an AND operation. On the other cases we
-		 * need to do a modulus operation (more expensive).
-		 * Power of two numbers have only one bit set and we can use
-		 * the instruction hweight32 to detect if we need to do a
-		 * modulus (do_div()) or not.
-		 */
-		if (hweight32(nor->page_size) == 1) {
-			page_offset = addr & (nor->page_size - 1);
-		} else {
-			uint64_t aux = addr;
-
-			page_offset = do_div(aux, nor->page_size);
-		}
-		/* the size of data remaining on the first page */
-		page_remain = min_t(size_t,
-				    nor->page_size - page_offset, len - i);
-
-		addr = spi_nor_convert_addr(nor, addr);
-
-		write_enable(nor);
-		ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
-		if (ret < 0)
-			goto write_err;
-		written = ret;
-
-		ret = spi_nor_wait_till_ready(nor);
-		if (ret)
-			goto write_err;
-		*retlen += written;
-		i += written;
-	}
-
-write_err:
-	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
-	return ret;
-}
-
-static int spi_nor_check(struct spi_nor *nor)
-{
-	if (!nor->dev ||
-	    (!nor->spimem &&
-	    (!nor->read || !nor->write || !nor->read_reg ||
-	      !nor->write_reg))) {
-		pr_err("spi-nor: please fill all the necessary fields!\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int s3an_nor_setup(struct spi_nor *nor,
-			  const struct spi_nor_hwcaps *hwcaps)
-{
-	int ret;
-
-	ret = spi_nor_xread_sr(nor, nor->bouncebuf);
-	if (ret < 0) {
-		dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
-		return ret;
-	}
-
-	nor->erase_opcode = SPINOR_OP_XSE;
-	nor->program_opcode = SPINOR_OP_XPP;
-	nor->read_opcode = SPINOR_OP_READ;
-	nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
-	/*
-	 * This flashes have a page size of 264 or 528 bytes (known as
-	 * Default addressing mode). It can be changed to a more standard
-	 * Power of two mode where the page size is 256/512. This comes
-	 * with a price: there is 3% less of space, the data is corrupted
-	 * and the page size cannot be changed back to default addressing
-	 * mode.
-	 *
-	 * The current addressing mode can be read from the XRDSR register
-	 * and should not be changed, because is a destructive operation.
-	 */
-	if (nor->bouncebuf[0] & XSR_PAGESIZE) {
-		/* Flash in Power of 2 mode */
-		nor->page_size = (nor->page_size == 264) ? 256 : 512;
-		nor->mtd.writebufsize = nor->page_size;
-		nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
-		nor->mtd.erasesize = 8 * nor->page_size;
-	} else {
-		/* Flash in Default addressing mode */
-		nor->params.convert_addr = s3an_convert_addr;
-		nor->mtd.erasesize = nor->info->sector_size;
-	}
-
-	return 0;
-}
-
-static void
-spi_nor_set_read_settings(struct spi_nor_read_command *read,
-			  u8 num_mode_clocks,
-			  u8 num_wait_states,
-			  u8 opcode,
-			  enum spi_nor_protocol proto)
-{
-	read->num_mode_clocks = num_mode_clocks;
-	read->num_wait_states = num_wait_states;
-	read->opcode = opcode;
-	read->proto = proto;
-}
-
-static void
-spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
-			u8 opcode,
-			enum spi_nor_protocol proto)
-{
-	pp->opcode = opcode;
-	pp->proto = proto;
-}
-
-static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
-{
-	size_t i;
-
-	for (i = 0; i < size; i++)
-		if (table[i][0] == (int)hwcaps)
-			return table[i][1];
-
-	return -EINVAL;
-}
-
-static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
-{
-	static const int hwcaps_read2cmd[][2] = {
-		{ SNOR_HWCAPS_READ,		SNOR_CMD_READ },
-		{ SNOR_HWCAPS_READ_FAST,	SNOR_CMD_READ_FAST },
-		{ SNOR_HWCAPS_READ_1_1_1_DTR,	SNOR_CMD_READ_1_1_1_DTR },
-		{ SNOR_HWCAPS_READ_1_1_2,	SNOR_CMD_READ_1_1_2 },
-		{ SNOR_HWCAPS_READ_1_2_2,	SNOR_CMD_READ_1_2_2 },
-		{ SNOR_HWCAPS_READ_2_2_2,	SNOR_CMD_READ_2_2_2 },
-		{ SNOR_HWCAPS_READ_1_2_2_DTR,	SNOR_CMD_READ_1_2_2_DTR },
-		{ SNOR_HWCAPS_READ_1_1_4,	SNOR_CMD_READ_1_1_4 },
-		{ SNOR_HWCAPS_READ_1_4_4,	SNOR_CMD_READ_1_4_4 },
-		{ SNOR_HWCAPS_READ_4_4_4,	SNOR_CMD_READ_4_4_4 },
-		{ SNOR_HWCAPS_READ_1_4_4_DTR,	SNOR_CMD_READ_1_4_4_DTR },
-		{ SNOR_HWCAPS_READ_1_1_8,	SNOR_CMD_READ_1_1_8 },
-		{ SNOR_HWCAPS_READ_1_8_8,	SNOR_CMD_READ_1_8_8 },
-		{ SNOR_HWCAPS_READ_8_8_8,	SNOR_CMD_READ_8_8_8 },
-		{ SNOR_HWCAPS_READ_1_8_8_DTR,	SNOR_CMD_READ_1_8_8_DTR },
-	};
-
-	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
-				  ARRAY_SIZE(hwcaps_read2cmd));
-}
-
-static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
-{
-	static const int hwcaps_pp2cmd[][2] = {
-		{ SNOR_HWCAPS_PP,		SNOR_CMD_PP },
-		{ SNOR_HWCAPS_PP_1_1_4,		SNOR_CMD_PP_1_1_4 },
-		{ SNOR_HWCAPS_PP_1_4_4,		SNOR_CMD_PP_1_4_4 },
-		{ SNOR_HWCAPS_PP_4_4_4,		SNOR_CMD_PP_4_4_4 },
-		{ SNOR_HWCAPS_PP_1_1_8,		SNOR_CMD_PP_1_1_8 },
-		{ SNOR_HWCAPS_PP_1_8_8,		SNOR_CMD_PP_1_8_8 },
-		{ SNOR_HWCAPS_PP_8_8_8,		SNOR_CMD_PP_8_8_8 },
-	};
-
-	return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
-				  ARRAY_SIZE(hwcaps_pp2cmd));
-}
-
-/*
- * Serial Flash Discoverable Parameters (SFDP) parsing.
- */
-
-/**
- * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- *			addr_width and read_dummy members of the struct spi_nor
- *			should be previously
- * set.
- * @nor:	pointer to a 'struct spi_nor'
- * @addr:	offset in the serial flash memory
- * @len:	number of bytes to read
- * @buf:	buffer where the data is copied into (dma-safe memory)
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
-{
-	ssize_t ret;
-
-	while (len) {
-		ret = spi_nor_read_data(nor, addr, len, buf);
-		if (ret < 0)
-			return ret;
-		if (!ret || ret > len)
-			return -EIO;
-
-		buf += ret;
-		addr += ret;
-		len -= ret;
-	}
-	return 0;
-}
-
-/**
- * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
- * @nor:	pointer to a 'struct spi_nor'
- * @addr:	offset in the SFDP area to start reading data from
- * @len:	number of bytes to read
- * @buf:	buffer where the SFDP data are copied into (dma-safe memory)
- *
- * Whatever the actual numbers of bytes for address and dummy cycles are
- * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
- * followed by a 3-byte address and 8 dummy clock cycles.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
-			     size_t len, void *buf)
-{
-	u8 addr_width, read_opcode, read_dummy;
-	int ret;
-
-	read_opcode = nor->read_opcode;
-	addr_width = nor->addr_width;
-	read_dummy = nor->read_dummy;
-
-	nor->read_opcode = SPINOR_OP_RDSFDP;
-	nor->addr_width = 3;
-	nor->read_dummy = 8;
-
-	ret = spi_nor_read_raw(nor, addr, len, buf);
-
-	nor->read_opcode = read_opcode;
-	nor->addr_width = addr_width;
-	nor->read_dummy = read_dummy;
-
-	return ret;
-}
-
-/**
- * spi_nor_spimem_check_op - check if the operation is supported
- *                           by controller
- *@nor:        pointer to a 'struct spi_nor'
- *@op:         pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_op(struct spi_nor *nor,
-				   struct spi_mem_op *op)
-{
-	/*
-	 * First test with 4 address bytes. The opcode itself might
-	 * be a 3B addressing opcode but we don't care, because
-	 * SPI controller implementation should not check the opcode,
-	 * but just the sequence.
-	 */
-	op->addr.nbytes = 4;
-	if (!spi_mem_supports_op(nor->spimem, op)) {
-		if (nor->mtd.size > SZ_16M)
-			return -ENOTSUPP;
-
-		/* If flash size <= 16MB, 3 address bytes are sufficient */
-		op->addr.nbytes = 3;
-		if (!spi_mem_supports_op(nor->spimem, op))
-			return -ENOTSUPP;
-	}
-
-	return 0;
-}
-
-/**
- * spi_nor_spimem_check_readop - check if the read op is supported
- *                               by controller
- *@nor:         pointer to a 'struct spi_nor'
- *@read:        pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_readop(struct spi_nor *nor,
-				       const struct spi_nor_read_command *read)
-{
-	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
-					  SPI_MEM_OP_ADDR(3, 0, 1),
-					  SPI_MEM_OP_DUMMY(0, 1),
-					  SPI_MEM_OP_DATA_IN(0, NULL, 1));
-
-	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
-	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
-	op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
-	op.dummy.buswidth = op.addr.buswidth;
-	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
-			  op.dummy.buswidth / 8;
-
-	return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_check_pp - check if the page program op is supported
- *                           by controller
- *@nor:         pointer to a 'struct spi_nor'
- *@pp:          pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_pp(struct spi_nor *nor,
-				   const struct spi_nor_pp_command *pp)
-{
-	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
-					  SPI_MEM_OP_ADDR(3, 0, 1),
-					  SPI_MEM_OP_NO_DUMMY,
-					  SPI_MEM_OP_DATA_OUT(0, NULL, 1));
-
-	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
-	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
-	op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
-
-	return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
- *                                based on SPI controller capabilities
- * @nor:        pointer to a 'struct spi_nor'
- * @hwcaps:     pointer to resulting capabilities after adjusting
- *              according to controller and flash's capability
- */
-static void
-spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
-{
-	struct spi_nor_flash_parameter *params =  &nor->params;
-	unsigned int cap;
-
-	/* DTR modes are not supported yet, mask them all. */
-	*hwcaps &= ~SNOR_HWCAPS_DTR;
-
-	/* X-X-X modes are not supported yet, mask them all. */
-	*hwcaps &= ~SNOR_HWCAPS_X_X_X;
-
-	for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
-		int rdidx, ppidx;
-
-		if (!(*hwcaps & BIT(cap)))
-			continue;
-
-		rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
-		if (rdidx >= 0 &&
-		    spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
-			*hwcaps &= ~BIT(cap);
-
-		ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
-		if (ppidx < 0)
-			continue;
-
-		if (spi_nor_spimem_check_pp(nor,
-					    &params->page_programs[ppidx]))
-			*hwcaps &= ~BIT(cap);
-	}
-}
-
-/**
- * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
- * @nor:	pointer to a 'struct spi_nor'
- * @addr:	offset in the SFDP area to start reading data from
- * @len:	number of bytes to read
- * @buf:	buffer where the SFDP data are copied into
- *
- * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
- * guaranteed to be dma-safe.
- *
- * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
- *          otherwise.
- */
-static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
-					size_t len, void *buf)
-{
-	void *dma_safe_buf;
-	int ret;
-
-	dma_safe_buf = kmalloc(len, GFP_KERNEL);
-	if (!dma_safe_buf)
-		return -ENOMEM;
-
-	ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
-	memcpy(buf, dma_safe_buf, len);
-	kfree(dma_safe_buf);
-
-	return ret;
-}
-
-/* Fast Read settings. */
-
-static void
-spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
-				    u16 half,
-				    enum spi_nor_protocol proto)
-{
-	read->num_mode_clocks = (half >> 5) & 0x07;
-	read->num_wait_states = (half >> 0) & 0x1f;
-	read->opcode = (half >> 8) & 0xff;
-	read->proto = proto;
-}
-
-struct sfdp_bfpt_read {
-	/* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
-	u32			hwcaps;
-
-	/*
-	 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
-	 * whether the Fast Read x-y-z command is supported.
-	 */
-	u32			supported_dword;
-	u32			supported_bit;
-
-	/*
-	 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
-	 * encodes the op code, the number of mode clocks and the number of wait
-	 * states to be used by Fast Read x-y-z command.
-	 */
-	u32			settings_dword;
-	u32			settings_shift;
-
-	/* The SPI protocol for this Fast Read x-y-z command. */
-	enum spi_nor_protocol	proto;
-};
-
-static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
-	/* Fast Read 1-1-2 */
-	{
-		SNOR_HWCAPS_READ_1_1_2,
-		BFPT_DWORD(1), BIT(16),	/* Supported bit */
-		BFPT_DWORD(4), 0,	/* Settings */
-		SNOR_PROTO_1_1_2,
-	},
-
-	/* Fast Read 1-2-2 */
-	{
-		SNOR_HWCAPS_READ_1_2_2,
-		BFPT_DWORD(1), BIT(20),	/* Supported bit */
-		BFPT_DWORD(4), 16,	/* Settings */
-		SNOR_PROTO_1_2_2,
-	},
-
-	/* Fast Read 2-2-2 */
-	{
-		SNOR_HWCAPS_READ_2_2_2,
-		BFPT_DWORD(5),  BIT(0),	/* Supported bit */
-		BFPT_DWORD(6), 16,	/* Settings */
-		SNOR_PROTO_2_2_2,
-	},
-
-	/* Fast Read 1-1-4 */
-	{
-		SNOR_HWCAPS_READ_1_1_4,
-		BFPT_DWORD(1), BIT(22),	/* Supported bit */
-		BFPT_DWORD(3), 16,	/* Settings */
-		SNOR_PROTO_1_1_4,
-	},
-
-	/* Fast Read 1-4-4 */
-	{
-		SNOR_HWCAPS_READ_1_4_4,
-		BFPT_DWORD(1), BIT(21),	/* Supported bit */
-		BFPT_DWORD(3), 0,	/* Settings */
-		SNOR_PROTO_1_4_4,
-	},
-
-	/* Fast Read 4-4-4 */
-	{
-		SNOR_HWCAPS_READ_4_4_4,
-		BFPT_DWORD(5), BIT(4),	/* Supported bit */
-		BFPT_DWORD(7), 16,	/* Settings */
-		SNOR_PROTO_4_4_4,
-	},
-};
-
-struct sfdp_bfpt_erase {
-	/*
-	 * The half-word at offset <shift> in DWORD <dwoard> encodes the
-	 * op code and erase sector size to be used by Sector Erase commands.
-	 */
-	u32			dword;
-	u32			shift;
-};
-
-static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
-	/* Erase Type 1 in DWORD8 bits[15:0] */
-	{BFPT_DWORD(8), 0},
-
-	/* Erase Type 2 in DWORD8 bits[31:16] */
-	{BFPT_DWORD(8), 16},
-
-	/* Erase Type 3 in DWORD9 bits[15:0] */
-	{BFPT_DWORD(9), 0},
-
-	/* Erase Type 4 in DWORD9 bits[31:16] */
-	{BFPT_DWORD(9), 16},
-};
-
-/**
- * spi_nor_set_erase_type() - set a SPI NOR erase type
- * @erase:	pointer to a structure that describes a SPI NOR erase type
- * @size:	the size of the sector/block erased by the erase type
- * @opcode:	the SPI command op code to erase the sector/block
- */
-static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
-				   u32 size, u8 opcode)
-{
-	erase->size = size;
-	erase->opcode = opcode;
-	/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
-	erase->size_shift = ffs(erase->size) - 1;
-	erase->size_mask = (1 << erase->size_shift) - 1;
-}
-
-/**
- * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
- * @erase:	pointer to a structure that describes a SPI NOR erase type
- * @size:	the size of the sector/block erased by the erase type
- * @opcode:	the SPI command op code to erase the sector/block
- * @i:		erase type index as sorted in the Basic Flash Parameter Table
- *
- * The supported Erase Types will be sorted at init in ascending order, with
- * the smallest Erase Type size being the first member in the erase_type array
- * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
- * the Basic Flash Parameter Table since it will be used later on to
- * synchronize with the supported Erase Types defined in SFDP optional tables.
- */
-static void
-spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
-				     u32 size, u8 opcode, u8 i)
-{
-	erase->idx = i;
-	spi_nor_set_erase_type(erase, size, opcode);
-}
-
-/**
- * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
- * @l:	member in the left half of the map's erase_type array
- * @r:	member in the right half of the map's erase_type array
- *
- * Comparison function used in the sort() call to sort in ascending order the
- * map's erase types, the smallest erase type size being the first member in the
- * sorted erase_type array.
- *
- * Return: the result of @l->size - @r->size
- */
-static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
-{
-	const struct spi_nor_erase_type *left = l, *right = r;
-
-	return left->size - right->size;
-}
-
-/**
- * spi_nor_sort_erase_mask() - sort erase mask
- * @map:	the erase map of the SPI NOR
- * @erase_mask:	the erase type mask to be sorted
- *
- * Replicate the sort done for the map's erase types in BFPT: sort the erase
- * mask in ascending order with the smallest erase type size starting from
- * BIT(0) in the sorted erase mask.
- *
- * Return: sorted erase mask.
- */
-static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
-{
-	struct spi_nor_erase_type *erase_type = map->erase_type;
-	int i;
-	u8 sorted_erase_mask = 0;
-
-	if (!erase_mask)
-		return 0;
-
-	/* Replicate the sort done for the map's erase types. */
-	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
-		if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
-			sorted_erase_mask |= BIT(i);
-
-	return sorted_erase_mask;
-}
-
-/**
- * spi_nor_regions_sort_erase_types() - sort erase types in each region
- * @map:	the erase map of the SPI NOR
- *
- * Function assumes that the erase types defined in the erase map are already
- * sorted in ascending order, with the smallest erase type size being the first
- * member in the erase_type array. It replicates the sort done for the map's
- * erase types. Each region's erase bitmask will indicate which erase types are
- * supported from the sorted erase types defined in the erase map.
- * Sort the all region's erase type at init in order to speed up the process of
- * finding the best erase command at runtime.
- */
-static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
-{
-	struct spi_nor_erase_region *region = map->regions;
-	u8 region_erase_mask, sorted_erase_mask;
-
-	while (region) {
-		region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
-		sorted_erase_mask = spi_nor_sort_erase_mask(map,
-							    region_erase_mask);
-
-		/* Overwrite erase mask. */
-		region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
-				 sorted_erase_mask;
-
-		region = spi_nor_region_next(region);
-	}
-}
-
-/**
- * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
- * @map:		the erase map of the SPI NOR
- * @erase_mask:		bitmask encoding erase types that can erase the entire
- *			flash memory
- * @flash_size:		the spi nor flash memory size
- */
-static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
-					   u8 erase_mask, u64 flash_size)
-{
-	/* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
-	map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
-				     SNOR_LAST_REGION;
-	map->uniform_region.size = flash_size;
-	map->regions = &map->uniform_region;
-	map->uniform_erase_type = erase_mask;
-}
-
-static int
-spi_nor_post_bfpt_fixups(struct spi_nor *nor,
-			 const struct sfdp_parameter_header *bfpt_header,
-			 const struct sfdp_bfpt *bfpt,
-			 struct spi_nor_flash_parameter *params)
-{
-	if (nor->info->fixups && nor->info->fixups->post_bfpt)
-		return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
-						    params);
-
-	return 0;
-}
-
-/**
- * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
- * @nor:		pointer to a 'struct spi_nor'
- * @bfpt_header:	pointer to the 'struct sfdp_parameter_header' describing
- *			the Basic Flash Parameter Table length and version
- * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
- *			filled
- *
- * The Basic Flash Parameter Table is the main and only mandatory table as
- * defined by the SFDP (JESD216) specification.
- * It provides us with the total size (memory density) of the data array and
- * the number of address bytes for Fast Read, Page Program and Sector Erase
- * commands.
- * For Fast READ commands, it also gives the number of mode clock cycles and
- * wait states (regrouped in the number of dummy clock cycles) for each
- * supported instruction op code.
- * For Page Program, the page size is now available since JESD216 rev A, however
- * the supported instruction op codes are still not provided.
- * For Sector Erase commands, this table stores the supported instruction op
- * codes and the associated sector sizes.
- * Finally, the Quad Enable Requirements (QER) are also available since JESD216
- * rev A. The QER bits encode the manufacturer dependent procedure to be
- * executed to set the Quad Enable (QE) bit in some internal register of the
- * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
- * sending any Quad SPI command to the memory. Actually, setting the QE bit
- * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
- * and IO3 hence enabling 4 (Quad) I/O lines.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_bfpt(struct spi_nor *nor,
-			      const struct sfdp_parameter_header *bfpt_header,
-			      struct spi_nor_flash_parameter *params)
-{
-	struct spi_nor_erase_map *map = &params->erase_map;
-	struct spi_nor_erase_type *erase_type = map->erase_type;
-	struct sfdp_bfpt bfpt;
-	size_t len;
-	int i, cmd, err;
-	u32 addr;
-	u16 half;
-	u8 erase_mask;
-
-	/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
-	if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
-		return -EINVAL;
-
-	/* Read the Basic Flash Parameter Table. */
-	len = min_t(size_t, sizeof(bfpt),
-		    bfpt_header->length * sizeof(u32));
-	addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
-	memset(&bfpt, 0, sizeof(bfpt));
-	err = spi_nor_read_sfdp_dma_unsafe(nor,  addr, len, &bfpt);
-	if (err < 0)
-		return err;
-
-	/* Fix endianness of the BFPT DWORDs. */
-	for (i = 0; i < BFPT_DWORD_MAX; i++)
-		bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
-
-	/* Number of address bytes. */
-	switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
-	case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
-		nor->addr_width = 3;
-		break;
-
-	case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
-		nor->addr_width = 4;
-		break;
-
-	default:
-		break;
-	}
-
-	/* Flash Memory Density (in bits). */
-	params->size = bfpt.dwords[BFPT_DWORD(2)];
-	if (params->size & BIT(31)) {
-		params->size &= ~BIT(31);
-
-		/*
-		 * Prevent overflows on params->size. Anyway, a NOR of 2^64
-		 * bits is unlikely to exist so this error probably means
-		 * the BFPT we are reading is corrupted/wrong.
-		 */
-		if (params->size > 63)
-			return -EINVAL;
-
-		params->size = 1ULL << params->size;
-	} else {
-		params->size++;
-	}
-	params->size >>= 3; /* Convert to bytes. */
-
-	/* Fast Read settings. */
-	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
-		const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
-		struct spi_nor_read_command *read;
-
-		if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
-			params->hwcaps.mask &= ~rd->hwcaps;
-			continue;
-		}
-
-		params->hwcaps.mask |= rd->hwcaps;
-		cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
-		read = &params->reads[cmd];
-		half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
-		spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
-	}
-
-	/*
-	 * Sector Erase settings. Reinitialize the uniform erase map using the
-	 * Erase Types defined in the bfpt table.
-	 */
-	erase_mask = 0;
-	memset(&params->erase_map, 0, sizeof(params->erase_map));
-	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
-		const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
-		u32 erasesize;
-		u8 opcode;
-
-		half = bfpt.dwords[er->dword] >> er->shift;
-		erasesize = half & 0xff;
-
-		/* erasesize == 0 means this Erase Type is not supported. */
-		if (!erasesize)
-			continue;
-
-		erasesize = 1U << erasesize;
-		opcode = (half >> 8) & 0xff;
-		erase_mask |= BIT(i);
-		spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
-						     opcode, i);
-	}
-	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
-	/*
-	 * Sort all the map's Erase Types in ascending order with the smallest
-	 * erase size being the first member in the erase_type array.
-	 */
-	sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
-	     spi_nor_map_cmp_erase_type, NULL);
-	/*
-	 * Sort the erase types in the uniform region in order to update the
-	 * uniform_erase_type bitmask. The bitmask will be used later on when
-	 * selecting the uniform erase.
-	 */
-	spi_nor_regions_sort_erase_types(map);
-	map->uniform_erase_type = map->uniform_region.offset &
-				  SNOR_ERASE_TYPE_MASK;
-
-	/* Stop here if not JESD216 rev A or later. */
-	if (bfpt_header->length < BFPT_DWORD_MAX)
-		return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
-						params);
-
-	/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
-	params->page_size = bfpt.dwords[BFPT_DWORD(11)];
-	params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
-	params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
-	params->page_size = 1U << params->page_size;
-
-	/* Quad Enable Requirements. */
-	switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
-	case BFPT_DWORD15_QER_NONE:
-		params->quad_enable = NULL;
-		break;
-
-	case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
-	case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
-		params->quad_enable = spansion_no_read_cr_quad_enable;
-		break;
-
-	case BFPT_DWORD15_QER_SR1_BIT6:
-		params->quad_enable = macronix_quad_enable;
-		break;
-
-	case BFPT_DWORD15_QER_SR2_BIT7:
-		params->quad_enable = sr2_bit7_quad_enable;
-		break;
-
-	case BFPT_DWORD15_QER_SR2_BIT1:
-		params->quad_enable = spansion_read_cr_quad_enable;
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
-}
-
-#define SMPT_CMD_ADDRESS_LEN_MASK		GENMASK(23, 22)
-#define SMPT_CMD_ADDRESS_LEN_0			(0x0UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_3			(0x1UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_4			(0x2UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT	(0x3UL << 22)
-
-#define SMPT_CMD_READ_DUMMY_MASK		GENMASK(19, 16)
-#define SMPT_CMD_READ_DUMMY_SHIFT		16
-#define SMPT_CMD_READ_DUMMY(_cmd) \
-	(((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
-#define SMPT_CMD_READ_DUMMY_IS_VARIABLE		0xfUL
-
-#define SMPT_CMD_READ_DATA_MASK			GENMASK(31, 24)
-#define SMPT_CMD_READ_DATA_SHIFT		24
-#define SMPT_CMD_READ_DATA(_cmd) \
-	(((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
-
-#define SMPT_CMD_OPCODE_MASK			GENMASK(15, 8)
-#define SMPT_CMD_OPCODE_SHIFT			8
-#define SMPT_CMD_OPCODE(_cmd) \
-	(((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
-
-#define SMPT_MAP_REGION_COUNT_MASK		GENMASK(23, 16)
-#define SMPT_MAP_REGION_COUNT_SHIFT		16
-#define SMPT_MAP_REGION_COUNT(_header) \
-	((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
-	  SMPT_MAP_REGION_COUNT_SHIFT) + 1)
-
-#define SMPT_MAP_ID_MASK			GENMASK(15, 8)
-#define SMPT_MAP_ID_SHIFT			8
-#define SMPT_MAP_ID(_header) \
-	(((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
-
-#define SMPT_MAP_REGION_SIZE_MASK		GENMASK(31, 8)
-#define SMPT_MAP_REGION_SIZE_SHIFT		8
-#define SMPT_MAP_REGION_SIZE(_region) \
-	(((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
-	   SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
-
-#define SMPT_MAP_REGION_ERASE_TYPE_MASK		GENMASK(3, 0)
-#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
-	((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
-
-#define SMPT_DESC_TYPE_MAP			BIT(1)
-#define SMPT_DESC_END				BIT(0)
-
-/**
- * spi_nor_smpt_addr_width() - return the address width used in the
- *			       configuration detection command.
- * @nor:	pointer to a 'struct spi_nor'
- * @settings:	configuration detection command descriptor, dword1
- */
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
-{
-	switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
-	case SMPT_CMD_ADDRESS_LEN_0:
-		return 0;
-	case SMPT_CMD_ADDRESS_LEN_3:
-		return 3;
-	case SMPT_CMD_ADDRESS_LEN_4:
-		return 4;
-	case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
-		/* fall through */
-	default:
-		return nor->addr_width;
-	}
-}
-
-/**
- * spi_nor_smpt_read_dummy() - return the configuration detection command read
- *			       latency, in clock cycles.
- * @nor:	pointer to a 'struct spi_nor'
- * @settings:	configuration detection command descriptor, dword1
- *
- * Return: the number of dummy cycles for an SMPT read
- */
-static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
-{
-	u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
-
-	if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
-		return nor->read_dummy;
-	return read_dummy;
-}
-
-/**
- * spi_nor_get_map_in_use() - get the configuration map in use
- * @nor:	pointer to a 'struct spi_nor'
- * @smpt:	pointer to the sector map parameter table
- * @smpt_len:	sector map parameter table length
- *
- * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
- */
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
-					 u8 smpt_len)
-{
-	const u32 *ret;
-	u8 *buf;
-	u32 addr;
-	int err;
-	u8 i;
-	u8 addr_width, read_opcode, read_dummy;
-	u8 read_data_mask, map_id;
-
-	/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
-	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
-	if (!buf)
-		return ERR_PTR(-ENOMEM);
-
-	addr_width = nor->addr_width;
-	read_dummy = nor->read_dummy;
-	read_opcode = nor->read_opcode;
-
-	map_id = 0;
-	/* Determine if there are any optional Detection Command Descriptors */
-	for (i = 0; i < smpt_len; i += 2) {
-		if (smpt[i] & SMPT_DESC_TYPE_MAP)
-			break;
-
-		read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
-		nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
-		nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
-		nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
-		addr = smpt[i + 1];
-
-		err = spi_nor_read_raw(nor, addr, 1, buf);
-		if (err) {
-			ret = ERR_PTR(err);
-			goto out;
-		}
-
-		/*
-		 * Build an index value that is used to select the Sector Map
-		 * Configuration that is currently in use.
-		 */
-		map_id = map_id << 1 | !!(*buf & read_data_mask);
-	}
-
-	/*
-	 * If command descriptors are provided, they always precede map
-	 * descriptors in the table. There is no need to start the iteration
-	 * over smpt array all over again.
-	 *
-	 * Find the matching configuration map.
-	 */
-	ret = ERR_PTR(-EINVAL);
-	while (i < smpt_len) {
-		if (SMPT_MAP_ID(smpt[i]) == map_id) {
-			ret = smpt + i;
-			break;
-		}
-
-		/*
-		 * If there are no more configuration map descriptors and no
-		 * configuration ID matched the configuration identifier, the
-		 * sector address map is unknown.
-		 */
-		if (smpt[i] & SMPT_DESC_END)
-			break;
-
-		/* increment the table index to the next map */
-		i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
-	}
-
-	/* fall through */
-out:
-	kfree(buf);
-	nor->addr_width = addr_width;
-	nor->read_dummy = read_dummy;
-	nor->read_opcode = read_opcode;
-	return ret;
-}
-
-/**
- * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
- * @region:	pointer to a structure that describes a SPI NOR erase region
- * @erase:	pointer to a structure that describes a SPI NOR erase type
- * @erase_type:	erase type bitmask
- */
-static void
-spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
-			     const struct spi_nor_erase_type *erase,
-			     const u8 erase_type)
-{
-	int i;
-
-	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
-		if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
-			continue;
-		if (region->size & erase[i].size_mask) {
-			spi_nor_region_mark_overlay(region);
-			return;
-		}
-	}
-}
-
-/**
- * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
- * @nor:	pointer to a 'struct spi_nor'
- * @params:     pointer to a duplicate 'struct spi_nor_flash_parameter' that is
- *              used for storing SFDP parsed data
- * @smpt:	pointer to the sector map parameter table
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int
-spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
-				   struct spi_nor_flash_parameter *params,
-				   const u32 *smpt)
-{
-	struct spi_nor_erase_map *map = &params->erase_map;
-	struct spi_nor_erase_type *erase = map->erase_type;
-	struct spi_nor_erase_region *region;
-	u64 offset;
-	u32 region_count;
-	int i, j;
-	u8 uniform_erase_type, save_uniform_erase_type;
-	u8 erase_type, regions_erase_type;
-
-	region_count = SMPT_MAP_REGION_COUNT(*smpt);
-	/*
-	 * The regions will be freed when the driver detaches from the
-	 * device.
-	 */
-	region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
-			      GFP_KERNEL);
-	if (!region)
-		return -ENOMEM;
-	map->regions = region;
-
-	uniform_erase_type = 0xff;
-	regions_erase_type = 0;
-	offset = 0;
-	/* Populate regions. */
-	for (i = 0; i < region_count; i++) {
-		j = i + 1; /* index for the region dword */
-		region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
-		erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
-		region[i].offset = offset | erase_type;
-
-		spi_nor_region_check_overlay(&region[i], erase, erase_type);
-
-		/*
-		 * Save the erase types that are supported in all regions and
-		 * can erase the entire flash memory.
-		 */
-		uniform_erase_type &= erase_type;
-
-		/*
-		 * regions_erase_type mask will indicate all the erase types
-		 * supported in this configuration map.
-		 */
-		regions_erase_type |= erase_type;
-
-		offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
-			 region[i].size;
-	}
-	spi_nor_region_mark_end(&region[i - 1]);
-
-	save_uniform_erase_type = map->uniform_erase_type;
-	map->uniform_erase_type = spi_nor_sort_erase_mask(map,
-							  uniform_erase_type);
-
-	if (!regions_erase_type) {
-		/*
-		 * Roll back to the previous uniform_erase_type mask, SMPT is
-		 * broken.
-		 */
-		map->uniform_erase_type = save_uniform_erase_type;
-		return -EINVAL;
-	}
-
-	/*
-	 * BFPT advertises all the erase types supported by all the possible
-	 * map configurations. Mask out the erase types that are not supported
-	 * by the current map configuration.
-	 */
-	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
-		if (!(regions_erase_type & BIT(erase[i].idx)))
-			spi_nor_set_erase_type(&erase[i], 0, 0xFF);
-
-	return 0;
-}
-
-/**
- * spi_nor_parse_smpt() - parse Sector Map Parameter Table
- * @nor:		pointer to a 'struct spi_nor'
- * @smpt_header:	sector map parameter table header
- * @params:		pointer to a duplicate 'struct spi_nor_flash_parameter'
- *                      that is used for storing SFDP parsed data
- *
- * This table is optional, but when available, we parse it to identify the
- * location and size of sectors within the main data array of the flash memory
- * device and to identify which Erase Types are supported by each sector.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_smpt(struct spi_nor *nor,
-			      const struct sfdp_parameter_header *smpt_header,
-			      struct spi_nor_flash_parameter *params)
-{
-	const u32 *sector_map;
-	u32 *smpt;
-	size_t len;
-	u32 addr;
-	int i, ret;
-
-	/* Read the Sector Map Parameter Table. */
-	len = smpt_header->length * sizeof(*smpt);
-	smpt = kmalloc(len, GFP_KERNEL);
-	if (!smpt)
-		return -ENOMEM;
-
-	addr = SFDP_PARAM_HEADER_PTP(smpt_header);
-	ret = spi_nor_read_sfdp(nor, addr, len, smpt);
-	if (ret)
-		goto out;
-
-	/* Fix endianness of the SMPT DWORDs. */
-	for (i = 0; i < smpt_header->length; i++)
-		smpt[i] = le32_to_cpu(smpt[i]);
-
-	sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
-	if (IS_ERR(sector_map)) {
-		ret = PTR_ERR(sector_map);
-		goto out;
-	}
-
-	ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
-	if (ret)
-		goto out;
-
-	spi_nor_regions_sort_erase_types(&params->erase_map);
-	/* fall through */
-out:
-	kfree(smpt);
-	return ret;
-}
-
-#define SFDP_4BAIT_DWORD_MAX	2
-
-struct sfdp_4bait {
-	/* The hardware capability. */
-	u32		hwcaps;
-
-	/*
-	 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
-	 * the associated 4-byte address op code is supported.
-	 */
-	u32		supported_bit;
-};
-
-/**
- * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
- * @nor:		pointer to a 'struct spi_nor'.
- * @param_header:	pointer to the 'struct sfdp_parameter_header' describing
- *			the 4-Byte Address Instruction Table length and version.
- * @params:		pointer to the 'struct spi_nor_flash_parameter' to be.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_4bait(struct spi_nor *nor,
-			       const struct sfdp_parameter_header *param_header,
-			       struct spi_nor_flash_parameter *params)
-{
-	static const struct sfdp_4bait reads[] = {
-		{ SNOR_HWCAPS_READ,		BIT(0) },
-		{ SNOR_HWCAPS_READ_FAST,	BIT(1) },
-		{ SNOR_HWCAPS_READ_1_1_2,	BIT(2) },
-		{ SNOR_HWCAPS_READ_1_2_2,	BIT(3) },
-		{ SNOR_HWCAPS_READ_1_1_4,	BIT(4) },
-		{ SNOR_HWCAPS_READ_1_4_4,	BIT(5) },
-		{ SNOR_HWCAPS_READ_1_1_1_DTR,	BIT(13) },
-		{ SNOR_HWCAPS_READ_1_2_2_DTR,	BIT(14) },
-		{ SNOR_HWCAPS_READ_1_4_4_DTR,	BIT(15) },
-	};
-	static const struct sfdp_4bait programs[] = {
-		{ SNOR_HWCAPS_PP,		BIT(6) },
-		{ SNOR_HWCAPS_PP_1_1_4,		BIT(7) },
-		{ SNOR_HWCAPS_PP_1_4_4,		BIT(8) },
-	};
-	static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
-		{ 0u /* not used */,		BIT(9) },
-		{ 0u /* not used */,		BIT(10) },
-		{ 0u /* not used */,		BIT(11) },
-		{ 0u /* not used */,		BIT(12) },
-	};
-	struct spi_nor_pp_command *params_pp = params->page_programs;
-	struct spi_nor_erase_map *map = &params->erase_map;
-	struct spi_nor_erase_type *erase_type = map->erase_type;
-	u32 *dwords;
-	size_t len;
-	u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
-	int i, ret;
-
-	if (param_header->major != SFDP_JESD216_MAJOR ||
-	    param_header->length < SFDP_4BAIT_DWORD_MAX)
-		return -EINVAL;
-
-	/* Read the 4-byte Address Instruction Table. */
-	len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
-
-	/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
-	dwords = kmalloc(len, GFP_KERNEL);
-	if (!dwords)
-		return -ENOMEM;
-
-	addr = SFDP_PARAM_HEADER_PTP(param_header);
-	ret = spi_nor_read_sfdp(nor, addr, len, dwords);
-	if (ret)
-		goto out;
-
-	/* Fix endianness of the 4BAIT DWORDs. */
-	for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
-		dwords[i] = le32_to_cpu(dwords[i]);
-
-	/*
-	 * Compute the subset of (Fast) Read commands for which the 4-byte
-	 * version is supported.
-	 */
-	discard_hwcaps = 0;
-	read_hwcaps = 0;
-	for (i = 0; i < ARRAY_SIZE(reads); i++) {
-		const struct sfdp_4bait *read = &reads[i];
-
-		discard_hwcaps |= read->hwcaps;
-		if ((params->hwcaps.mask & read->hwcaps) &&
-		    (dwords[0] & read->supported_bit))
-			read_hwcaps |= read->hwcaps;
-	}
-
-	/*
-	 * Compute the subset of Page Program commands for which the 4-byte
-	 * version is supported.
-	 */
-	pp_hwcaps = 0;
-	for (i = 0; i < ARRAY_SIZE(programs); i++) {
-		const struct sfdp_4bait *program = &programs[i];
-
-		/*
-		 * The 4 Byte Address Instruction (Optional) Table is the only
-		 * SFDP table that indicates support for Page Program Commands.
-		 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
-		 * authority for specifying Page Program support.
-		 */
-		discard_hwcaps |= program->hwcaps;
-		if (dwords[0] & program->supported_bit)
-			pp_hwcaps |= program->hwcaps;
-	}
-
-	/*
-	 * Compute the subset of Sector Erase commands for which the 4-byte
-	 * version is supported.
-	 */
-	erase_mask = 0;
-	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
-		const struct sfdp_4bait *erase = &erases[i];
-
-		if (dwords[0] & erase->supported_bit)
-			erase_mask |= BIT(i);
-	}
-
-	/* Replicate the sort done for the map's erase types in BFPT. */
-	erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
-
-	/*
-	 * We need at least one 4-byte op code per read, program and erase
-	 * operation; the .read(), .write() and .erase() hooks share the
-	 * nor->addr_width value.
-	 */
-	if (!read_hwcaps || !pp_hwcaps || !erase_mask)
-		goto out;
-
-	/*
-	 * Discard all operations from the 4-byte instruction set which are
-	 * not supported by this memory.
-	 */
-	params->hwcaps.mask &= ~discard_hwcaps;
-	params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
-
-	/* Use the 4-byte address instruction set. */
-	for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
-		struct spi_nor_read_command *read_cmd = &params->reads[i];
-
-		read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
-	}
-
-	/* 4BAIT is the only SFDP table that indicates page program support. */
-	if (pp_hwcaps & SNOR_HWCAPS_PP)
-		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
-					SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
-	if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
-		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
-					SPINOR_OP_PP_1_1_4_4B,
-					SNOR_PROTO_1_1_4);
-	if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
-		spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
-					SPINOR_OP_PP_1_4_4_4B,
-					SNOR_PROTO_1_4_4);
-
-	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
-		if (erase_mask & BIT(i))
-			erase_type[i].opcode = (dwords[1] >>
-						erase_type[i].idx * 8) & 0xFF;
-		else
-			spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
-	}
-
-	/*
-	 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
-	 * later because we already did the conversion to 4byte opcodes. Also,
-	 * this latest function implements a legacy quirk for the erase size of
-	 * Spansion memory. However this quirk is no longer needed with new
-	 * SFDP compliant memories.
-	 */
-	nor->addr_width = 4;
-	nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
-
-	/* fall through */
-out:
-	kfree(dwords);
-	return ret;
-}
-
-/**
- * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
- * @nor:		pointer to a 'struct spi_nor'
- * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
- *			filled
- *
- * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
- * specification. This is a standard which tends to supported by almost all
- * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
- * runtime the main parameters needed to perform basic SPI flash operations such
- * as Fast Read, Page Program or Sector Erase commands.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_sfdp(struct spi_nor *nor,
-			      struct spi_nor_flash_parameter *params)
-{
-	const struct sfdp_parameter_header *param_header, *bfpt_header;
-	struct sfdp_parameter_header *param_headers = NULL;
-	struct sfdp_header header;
-	struct device *dev = nor->dev;
-	size_t psize;
-	int i, err;
-
-	/* Get the SFDP header. */
-	err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
-	if (err < 0)
-		return err;
-
-	/* Check the SFDP header version. */
-	if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
-	    header.major != SFDP_JESD216_MAJOR)
-		return -EINVAL;
-
-	/*
-	 * Verify that the first and only mandatory parameter header is a
-	 * Basic Flash Parameter Table header as specified in JESD216.
-	 */
-	bfpt_header = &header.bfpt_header;
-	if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
-	    bfpt_header->major != SFDP_JESD216_MAJOR)
-		return -EINVAL;
-
-	/*
-	 * Allocate memory then read all parameter headers with a single
-	 * Read SFDP command. These parameter headers will actually be parsed
-	 * twice: a first time to get the latest revision of the basic flash
-	 * parameter table, then a second time to handle the supported optional
-	 * tables.
-	 * Hence we read the parameter headers once for all to reduce the
-	 * processing time. Also we use kmalloc() instead of devm_kmalloc()
-	 * because we don't need to keep these parameter headers: the allocated
-	 * memory is always released with kfree() before exiting this function.
-	 */
-	if (header.nph) {
-		psize = header.nph * sizeof(*param_headers);
-
-		param_headers = kmalloc(psize, GFP_KERNEL);
-		if (!param_headers)
-			return -ENOMEM;
-
-		err = spi_nor_read_sfdp(nor, sizeof(header),
-					psize, param_headers);
-		if (err < 0) {
-			dev_err(dev, "failed to read SFDP parameter headers\n");
-			goto exit;
-		}
-	}
-
-	/*
-	 * Check other parameter headers to get the latest revision of
-	 * the basic flash parameter table.
-	 */
-	for (i = 0; i < header.nph; i++) {
-		param_header = &param_headers[i];
-
-		if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
-		    param_header->major == SFDP_JESD216_MAJOR &&
-		    (param_header->minor > bfpt_header->minor ||
-		     (param_header->minor == bfpt_header->minor &&
-		      param_header->length > bfpt_header->length)))
-			bfpt_header = param_header;
-	}
-
-	err = spi_nor_parse_bfpt(nor, bfpt_header, params);
-	if (err)
-		goto exit;
-
-	/* Parse optional parameter tables. */
-	for (i = 0; i < header.nph; i++) {
-		param_header = &param_headers[i];
-
-		switch (SFDP_PARAM_HEADER_ID(param_header)) {
-		case SFDP_SECTOR_MAP_ID:
-			err = spi_nor_parse_smpt(nor, param_header, params);
-			break;
-
-		case SFDP_4BAIT_ID:
-			err = spi_nor_parse_4bait(nor, param_header, params);
-			break;
-
-		default:
-			break;
-		}
-
-		if (err) {
-			dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
-				 SFDP_PARAM_HEADER_ID(param_header));
-			/*
-			 * Let's not drop all information we extracted so far
-			 * if optional table parsers fail. In case of failing,
-			 * each optional parser is responsible to roll back to
-			 * the previously known spi_nor data.
-			 */
-			err = 0;
-		}
-	}
-
-exit:
-	kfree(param_headers);
-	return err;
-}
-
-static int spi_nor_select_read(struct spi_nor *nor,
-			       u32 shared_hwcaps)
-{
-	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
-	const struct spi_nor_read_command *read;
-
-	if (best_match < 0)
-		return -EINVAL;
-
-	cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
-	if (cmd < 0)
-		return -EINVAL;
-
-	read = &nor->params.reads[cmd];
-	nor->read_opcode = read->opcode;
-	nor->read_proto = read->proto;
-
-	/*
-	 * In the spi-nor framework, we don't need to make the difference
-	 * between mode clock cycles and wait state clock cycles.
-	 * Indeed, the value of the mode clock cycles is used by a QSPI
-	 * flash memory to know whether it should enter or leave its 0-4-4
-	 * (Continuous Read / XIP) mode.
-	 * eXecution In Place is out of the scope of the mtd sub-system.
-	 * Hence we choose to merge both mode and wait state clock cycles
-	 * into the so called dummy clock cycles.
-	 */
-	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
-	return 0;
-}
-
-static int spi_nor_select_pp(struct spi_nor *nor,
-			     u32 shared_hwcaps)
-{
-	int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
-	const struct spi_nor_pp_command *pp;
-
-	if (best_match < 0)
-		return -EINVAL;
-
-	cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
-	if (cmd < 0)
-		return -EINVAL;
-
-	pp = &nor->params.page_programs[cmd];
-	nor->program_opcode = pp->opcode;
-	nor->write_proto = pp->proto;
-	return 0;
-}
-
-/**
- * spi_nor_select_uniform_erase() - select optimum uniform erase type
- * @map:		the erase map of the SPI NOR
- * @wanted_size:	the erase type size to search for. Contains the value of
- *			info->sector_size or of the "small sector" size in case
- *			CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
- *
- * Once the optimum uniform sector erase command is found, disable all the
- * other.
- *
- * Return: pointer to erase type on success, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
-			     const u32 wanted_size)
-{
-	const struct spi_nor_erase_type *tested_erase, *erase = NULL;
-	int i;
-	u8 uniform_erase_type = map->uniform_erase_type;
-
-	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
-		if (!(uniform_erase_type & BIT(i)))
-			continue;
-
-		tested_erase = &map->erase_type[i];
-
-		/*
-		 * If the current erase size is the one, stop here:
-		 * we have found the right uniform Sector Erase command.
-		 */
-		if (tested_erase->size == wanted_size) {
-			erase = tested_erase;
-			break;
-		}
-
-		/*
-		 * Otherwise, the current erase size is still a valid canditate.
-		 * Select the biggest valid candidate.
-		 */
-		if (!erase && tested_erase->size)
-			erase = tested_erase;
-			/* keep iterating to find the wanted_size */
-	}
-
-	if (!erase)
-		return NULL;
-
-	/* Disable all other Sector Erase commands. */
-	map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
-	map->uniform_erase_type |= BIT(erase - map->erase_type);
-	return erase;
-}
-
-static int spi_nor_select_erase(struct spi_nor *nor)
-{
-	struct spi_nor_erase_map *map = &nor->params.erase_map;
-	const struct spi_nor_erase_type *erase = NULL;
-	struct mtd_info *mtd = &nor->mtd;
-	u32 wanted_size = nor->info->sector_size;
-	int i;
-
-	/*
-	 * The previous implementation handling Sector Erase commands assumed
-	 * that the SPI flash memory has an uniform layout then used only one
-	 * of the supported erase sizes for all Sector Erase commands.
-	 * So to be backward compatible, the new implementation also tries to
-	 * manage the SPI flash memory as uniform with a single erase sector
-	 * size, when possible.
-	 */
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
-	/* prefer "small sector" erase if possible */
-	wanted_size = 4096u;
-#endif
-
-	if (spi_nor_has_uniform_erase(nor)) {
-		erase = spi_nor_select_uniform_erase(map, wanted_size);
-		if (!erase)
-			return -EINVAL;
-		nor->erase_opcode = erase->opcode;
-		mtd->erasesize = erase->size;
-		return 0;
-	}
-
-	/*
-	 * For non-uniform SPI flash memory, set mtd->erasesize to the
-	 * maximum erase sector size. No need to set nor->erase_opcode.
-	 */
-	for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
-		if (map->erase_type[i].size) {
-			erase = &map->erase_type[i];
-			break;
-		}
-	}
-
-	if (!erase)
-		return -EINVAL;
-
-	mtd->erasesize = erase->size;
-	return 0;
-}
-
-static int spi_nor_default_setup(struct spi_nor *nor,
-				 const struct spi_nor_hwcaps *hwcaps)
-{
-	struct spi_nor_flash_parameter *params = &nor->params;
-	u32 ignored_mask, shared_mask;
-	int err;
-
-	/*
-	 * Keep only the hardware capabilities supported by both the SPI
-	 * controller and the SPI flash memory.
-	 */
-	shared_mask = hwcaps->mask & params->hwcaps.mask;
-
-	if (nor->spimem) {
-		/*
-		 * When called from spi_nor_probe(), all caps are set and we
-		 * need to discard some of them based on what the SPI
-		 * controller actually supports (using spi_mem_supports_op()).
-		 */
-		spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
-	} else {
-		/*
-		 * SPI n-n-n protocols are not supported when the SPI
-		 * controller directly implements the spi_nor interface.
-		 * Yet another reason to switch to spi-mem.
-		 */
-		ignored_mask = SNOR_HWCAPS_X_X_X;
-		if (shared_mask & ignored_mask) {
-			dev_dbg(nor->dev,
-				"SPI n-n-n protocols are not supported.\n");
-			shared_mask &= ~ignored_mask;
-		}
-	}
-
-	/* Select the (Fast) Read command. */
-	err = spi_nor_select_read(nor, shared_mask);
-	if (err) {
-		dev_err(nor->dev,
-			"can't select read settings supported by both the SPI controller and memory.\n");
-		return err;
-	}
-
-	/* Select the Page Program command. */
-	err = spi_nor_select_pp(nor, shared_mask);
-	if (err) {
-		dev_err(nor->dev,
-			"can't select write settings supported by both the SPI controller and memory.\n");
-		return err;
-	}
-
-	/* Select the Sector Erase command. */
-	err = spi_nor_select_erase(nor);
-	if (err) {
-		dev_err(nor->dev,
-			"can't select erase settings supported by both the SPI controller and memory.\n");
-		return err;
-	}
-
-	return 0;
-}
-
-static int spi_nor_setup(struct spi_nor *nor,
-			 const struct spi_nor_hwcaps *hwcaps)
-{
-	if (!nor->params.setup)
-		return 0;
-
-	return nor->params.setup(nor, hwcaps);
-}
-
-static void macronix_set_default_init(struct spi_nor *nor)
-{
-	nor->params.quad_enable = macronix_quad_enable;
-	nor->params.set_4byte = macronix_set_4byte;
-}
-
-static void st_micron_set_default_init(struct spi_nor *nor)
-{
-	nor->flags |= SNOR_F_HAS_LOCK;
-	nor->params.quad_enable = NULL;
-	nor->params.set_4byte = st_micron_set_4byte;
-}
-
-static void winbond_set_default_init(struct spi_nor *nor)
-{
-	nor->params.set_4byte = winbond_set_4byte;
-}
-
-/**
- * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
- * settings based on MFR register and ->default_init() hook.
- * @nor:	pointer to a 'struct spi-nor'.
- */
-static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
-{
-	/* Init flash parameters based on MFR */
-	switch (JEDEC_MFR(nor->info)) {
-	case SNOR_MFR_MACRONIX:
-		macronix_set_default_init(nor);
-		break;
-
-	case SNOR_MFR_ST:
-	case SNOR_MFR_MICRON:
-		st_micron_set_default_init(nor);
-		break;
-
-	case SNOR_MFR_WINBOND:
-		winbond_set_default_init(nor);
-		break;
-
-	default:
-		break;
-	}
-
-	if (nor->info->fixups && nor->info->fixups->default_init)
-		nor->info->fixups->default_init(nor);
-}
-
-/**
- * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
- * based on JESD216 SFDP standard.
- * @nor:	pointer to a 'struct spi-nor'.
- *
- * The method has a roll-back mechanism: in case the SFDP parsing fails, the
- * legacy flash parameters and settings will be restored.
- */
-static void spi_nor_sfdp_init_params(struct spi_nor *nor)
-{
-	struct spi_nor_flash_parameter sfdp_params;
-
-	memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
-
-	if (spi_nor_parse_sfdp(nor, &nor->params)) {
-		memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
-		nor->addr_width = 0;
-		nor->flags &= ~SNOR_F_4B_OPCODES;
-	}
-}
-
-/**
- * spi_nor_info_init_params() - Initialize the flash's parameters and settings
- * based on nor->info data.
- * @nor:	pointer to a 'struct spi-nor'.
- */
-static void spi_nor_info_init_params(struct spi_nor *nor)
-{
-	struct spi_nor_flash_parameter *params = &nor->params;
-	struct spi_nor_erase_map *map = &params->erase_map;
-	const struct flash_info *info = nor->info;
-	struct device_node *np = spi_nor_get_flash_node(nor);
-	u8 i, erase_mask;
-
-	/* Initialize legacy flash parameters and settings. */
-	params->quad_enable = spansion_quad_enable;
-	params->set_4byte = spansion_set_4byte;
-	params->setup = spi_nor_default_setup;
-
-	/* Set SPI NOR sizes. */
-	params->size = (u64)info->sector_size * info->n_sectors;
-	params->page_size = info->page_size;
-
-	if (!(info->flags & SPI_NOR_NO_FR)) {
-		/* Default to Fast Read for DT and non-DT platform devices. */
-		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
-
-		/* Mask out Fast Read if not requested at DT instantiation. */
-		if (np && !of_property_read_bool(np, "m25p,fast-read"))
-			params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
-	}
-
-	/* (Fast) Read settings. */
-	params->hwcaps.mask |= SNOR_HWCAPS_READ;
-	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
-				  0, 0, SPINOR_OP_READ,
-				  SNOR_PROTO_1_1_1);
-
-	if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
-		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
-					  0, 8, SPINOR_OP_READ_FAST,
-					  SNOR_PROTO_1_1_1);
-
-	if (info->flags & SPI_NOR_DUAL_READ) {
-		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
-		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
-					  0, 8, SPINOR_OP_READ_1_1_2,
-					  SNOR_PROTO_1_1_2);
-	}
-
-	if (info->flags & SPI_NOR_QUAD_READ) {
-		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
-		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
-					  0, 8, SPINOR_OP_READ_1_1_4,
-					  SNOR_PROTO_1_1_4);
-	}
-
-	if (info->flags & SPI_NOR_OCTAL_READ) {
-		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
-		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
-					  0, 8, SPINOR_OP_READ_1_1_8,
-					  SNOR_PROTO_1_1_8);
-	}
-
-	/* Page Program settings. */
-	params->hwcaps.mask |= SNOR_HWCAPS_PP;
-	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
-				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
-
-	/*
-	 * Sector Erase settings. Sort Erase Types in ascending order, with the
-	 * smallest erase size starting at BIT(0).
-	 */
-	erase_mask = 0;
-	i = 0;
-	if (info->flags & SECT_4K_PMC) {
-		erase_mask |= BIT(i);
-		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
-				       SPINOR_OP_BE_4K_PMC);
-		i++;
-	} else if (info->flags & SECT_4K) {
-		erase_mask |= BIT(i);
-		spi_nor_set_erase_type(&map->erase_type[i], 4096u,
-				       SPINOR_OP_BE_4K);
-		i++;
-	}
-	erase_mask |= BIT(i);
-	spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
-			       SPINOR_OP_SE);
-	spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
-}
-
-static void spansion_post_sfdp_fixups(struct spi_nor *nor)
-{
-	if (nor->params.size <= SZ_16M)
-		return;
-
-	nor->flags |= SNOR_F_4B_OPCODES;
-	/* No small sector erase for 4-byte command set */
-	nor->erase_opcode = SPINOR_OP_SE;
-	nor->mtd.erasesize = nor->info->sector_size;
-}
-
-static void s3an_post_sfdp_fixups(struct spi_nor *nor)
-{
-	nor->params.setup = s3an_nor_setup;
-}
-
-/**
- * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
- * after SFDP has been parsed (is also called for SPI NORs that do not
- * support RDSFDP).
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Typically used to tweak various parameters that could not be extracted by
- * other means (i.e. when information provided by the SFDP/flash_info tables
- * are incomplete or wrong).
- */
-static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
-{
-	switch (JEDEC_MFR(nor->info)) {
-	case SNOR_MFR_SPANSION:
-		spansion_post_sfdp_fixups(nor);
-		break;
-
-	default:
-		break;
-	}
-
-	if (nor->info->flags & SPI_S3AN)
-		s3an_post_sfdp_fixups(nor);
-
-	if (nor->info->fixups && nor->info->fixups->post_sfdp)
-		nor->info->fixups->post_sfdp(nor);
-}
-
-/**
- * spi_nor_late_init_params() - Late initialization of default flash parameters.
- * @nor:	pointer to a 'struct spi_nor'
- *
- * Used to set default flash parameters and settings when the ->default_init()
- * hook or the SFDP parser let voids.
- */
-static void spi_nor_late_init_params(struct spi_nor *nor)
-{
-	/*
-	 * NOR protection support. When locking_ops are not provided, we pick
-	 * the default ones.
-	 */
-	if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
-		nor->params.locking_ops = &stm_locking_ops;
-}
-
-/**
- * spi_nor_init_params() - Initialize the flash's parameters and settings.
- * @nor:	pointer to a 'struct spi-nor'.
- *
- * The flash parameters and settings are initialized based on a sequence of
- * calls that are ordered by priority:
- *
- * 1/ Default flash parameters initialization. The initializations are done
- *    based on nor->info data:
- *		spi_nor_info_init_params()
- *
- * which can be overwritten by:
- * 2/ Manufacturer flash parameters initialization. The initializations are
- *    done based on MFR register, or when the decisions can not be done solely
- *    based on MFR, by using specific flash_info tweeks, ->default_init():
- *		spi_nor_manufacturer_init_params()
- *
- * which can be overwritten by:
- * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
- *    should be more accurate that the above.
- *		spi_nor_sfdp_init_params()
- *
- *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
- *    the flash parameters and settings immediately after parsing the Basic
- *    Flash Parameter Table.
- *
- * which can be overwritten by:
- * 4/ Post SFDP flash parameters initialization. Used to tweak various
- *    parameters that could not be extracted by other means (i.e. when
- *    information provided by the SFDP/flash_info tables are incomplete or
- *    wrong).
- *		spi_nor_post_sfdp_fixups()
- *
- * 5/ Late default flash parameters initialization, used when the
- * ->default_init() hook or the SFDP parser do not set specific params.
- *		spi_nor_late_init_params()
- */
-static void spi_nor_init_params(struct spi_nor *nor)
-{
-	spi_nor_info_init_params(nor);
-
-	spi_nor_manufacturer_init_params(nor);
-
-	if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
-	    !(nor->info->flags & SPI_NOR_SKIP_SFDP))
-		spi_nor_sfdp_init_params(nor);
-
-	spi_nor_post_sfdp_fixups(nor);
-
-	spi_nor_late_init_params(nor);
-}
-
-/**
- * spi_nor_quad_enable() - enable Quad I/O if needed.
- * @nor:                pointer to a 'struct spi_nor'
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_quad_enable(struct spi_nor *nor)
-{
-	if (!nor->params.quad_enable)
-		return 0;
-
-	if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
-	      spi_nor_get_protocol_width(nor->write_proto) == 4))
-		return 0;
-
-	return nor->params.quad_enable(nor);
-}
-
-static int spi_nor_init(struct spi_nor *nor)
-{
-	int err;
-
-	if (nor->clear_sr_bp) {
-		if (nor->params.quad_enable == spansion_quad_enable)
-			nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
-
-		err = nor->clear_sr_bp(nor);
-		if (err) {
-			dev_err(nor->dev,
-				"fail to clear block protection bits\n");
-			return err;
-		}
-	}
-
-	err = spi_nor_quad_enable(nor);
-	if (err) {
-		dev_err(nor->dev, "quad mode not supported\n");
-		return err;
-	}
-
-	if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
-		/*
-		 * If the RESET# pin isn't hooked up properly, or the system
-		 * otherwise doesn't perform a reset command in the boot
-		 * sequence, it's impossible to 100% protect against unexpected
-		 * reboots (e.g., crashes). Warn the user (or hopefully, system
-		 * designer) that this is bad.
-		 */
-		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
-			  "enabling reset hack; may not recover from unexpected reboots\n");
-		nor->params.set_4byte(nor, true);
-	}
-
-	return 0;
-}
-
-/* mtd resume handler */
-static void spi_nor_resume(struct mtd_info *mtd)
-{
-	struct spi_nor *nor = mtd_to_spi_nor(mtd);
-	struct device *dev = nor->dev;
-	int ret;
-
-	/* re-initialize the nor chip */
-	ret = spi_nor_init(nor);
-	if (ret)
-		dev_err(dev, "resume() failed\n");
-}
-
-void spi_nor_restore(struct spi_nor *nor)
-{
-	/* restore the addressing mode */
-	if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
-	    nor->flags & SNOR_F_BROKEN_RESET)
-		nor->params.set_4byte(nor, false);
-}
-EXPORT_SYMBOL_GPL(spi_nor_restore);
-
-static const struct flash_info *spi_nor_match_id(const char *name)
-{
-	const struct flash_info *id = spi_nor_ids;
-
-	while (id->name) {
-		if (!strcmp(name, id->name))
-			return id;
-		id++;
-	}
-	return NULL;
-}
-
-static int spi_nor_set_addr_width(struct spi_nor *nor)
-{
-	if (nor->addr_width) {
-		/* already configured from SFDP */
-	} else if (nor->info->addr_width) {
-		nor->addr_width = nor->info->addr_width;
-	} else if (nor->mtd.size > 0x1000000) {
-		/* enable 4-byte addressing if the device exceeds 16MiB */
-		nor->addr_width = 4;
-	} else {
-		nor->addr_width = 3;
-	}
-
-	if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
-		dev_err(nor->dev, "address width is too large: %u\n",
-			nor->addr_width);
-		return -EINVAL;
-	}
-
-	/* Set 4byte opcodes when possible. */
-	if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
-	    !(nor->flags & SNOR_F_HAS_4BAIT))
-		spi_nor_set_4byte_opcodes(nor);
-
-	return 0;
-}
-
-static void spi_nor_debugfs_init(struct spi_nor *nor,
-				 const struct flash_info *info)
-{
-	struct mtd_info *mtd = &nor->mtd;
-
-	mtd->dbg.partname = info->name;
-	mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
-					 info->id_len, info->id);
-}
-
-static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
-						       const char *name)
-{
-	const struct flash_info *info = NULL;
-
-	if (name)
-		info = spi_nor_match_id(name);
-	/* Try to auto-detect if chip name wasn't specified or not found */
-	if (!info)
-		info = spi_nor_read_id(nor);
-	if (IS_ERR_OR_NULL(info))
-		return ERR_PTR(-ENOENT);
-
-	/*
-	 * If caller has specified name of flash model that can normally be
-	 * detected using JEDEC, let's verify it.
-	 */
-	if (name && info->id_len) {
-		const struct flash_info *jinfo;
-
-		jinfo = spi_nor_read_id(nor);
-		if (IS_ERR(jinfo)) {
-			return jinfo;
-		} else if (jinfo != info) {
-			/*
-			 * JEDEC knows better, so overwrite platform ID. We
-			 * can't trust partitions any longer, but we'll let
-			 * mtd apply them anyway, since some partitions may be
-			 * marked read-only, and we don't want to lose that
-			 * information, even if it's not 100% accurate.
-			 */
-			dev_warn(nor->dev, "found %s, expected %s\n",
-				 jinfo->name, info->name);
-			info = jinfo;
-		}
-	}
-
-	return info;
-}
-
-int spi_nor_scan(struct spi_nor *nor, const char *name,
-		 const struct spi_nor_hwcaps *hwcaps)
-{
-	const struct flash_info *info;
-	struct device *dev = nor->dev;
-	struct mtd_info *mtd = &nor->mtd;
-	struct device_node *np = spi_nor_get_flash_node(nor);
-	struct spi_nor_flash_parameter *params = &nor->params;
-	int ret;
-	int i;
-
-	ret = spi_nor_check(nor);
-	if (ret)
-		return ret;
-
-	/* Reset SPI protocol for all commands. */
-	nor->reg_proto = SNOR_PROTO_1_1_1;
-	nor->read_proto = SNOR_PROTO_1_1_1;
-	nor->write_proto = SNOR_PROTO_1_1_1;
-
-	/*
-	 * We need the bounce buffer early to read/write registers when going
-	 * through the spi-mem layer (buffers have to be DMA-able).
-	 * For spi-mem drivers, we'll reallocate a new buffer if
-	 * nor->page_size turns out to be greater than PAGE_SIZE (which
-	 * shouldn't happen before long since NOR pages are usually less
-	 * than 1KB) after spi_nor_scan() returns.
-	 */
-	nor->bouncebuf_size = PAGE_SIZE;
-	nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
-				      GFP_KERNEL);
-	if (!nor->bouncebuf)
-		return -ENOMEM;
-
-	info = spi_nor_get_flash_info(nor, name);
-	if (IS_ERR(info))
-		return PTR_ERR(info);
-
-	nor->info = info;
-
-	spi_nor_debugfs_init(nor, info);
-
-	mutex_init(&nor->lock);
-
-	/*
-	 * Make sure the XSR_RDY flag is set before calling
-	 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
-	 * with Atmel spi-nor
-	 */
-	if (info->flags & SPI_NOR_XSR_RDY)
-		nor->flags |=  SNOR_F_READY_XSR_RDY;
-
-	if (info->flags & SPI_NOR_HAS_LOCK)
-		nor->flags |= SNOR_F_HAS_LOCK;
-
-	/*
-	 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
-	 * with the software protection bits set.
-	 */
-	if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
-	    JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
-	    JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
-	    nor->info->flags & SPI_NOR_HAS_LOCK)
-		nor->clear_sr_bp = spi_nor_clear_sr_bp;
-
-	/* Init flash parameters based on flash_info struct and SFDP */
-	spi_nor_init_params(nor);
-
-	if (!mtd->name)
-		mtd->name = dev_name(dev);
-	mtd->priv = nor;
-	mtd->type = MTD_NORFLASH;
-	mtd->writesize = 1;
-	mtd->flags = MTD_CAP_NORFLASH;
-	mtd->size = params->size;
-	mtd->_erase = spi_nor_erase;
-	mtd->_read = spi_nor_read;
-	mtd->_resume = spi_nor_resume;
-
-	if (nor->params.locking_ops) {
-		mtd->_lock = spi_nor_lock;
-		mtd->_unlock = spi_nor_unlock;
-		mtd->_is_locked = spi_nor_is_locked;
-	}
-
-	/* sst nor chips use AAI word program */
-	if (info->flags & SST_WRITE)
-		mtd->_write = sst_write;
-	else
-		mtd->_write = spi_nor_write;
-
-	if (info->flags & USE_FSR)
-		nor->flags |= SNOR_F_USE_FSR;
-	if (info->flags & SPI_NOR_HAS_TB)
-		nor->flags |= SNOR_F_HAS_SR_TB;
-	if (info->flags & NO_CHIP_ERASE)
-		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-	if (info->flags & USE_CLSR)
-		nor->flags |= SNOR_F_USE_CLSR;
-
-	if (info->flags & SPI_NOR_NO_ERASE)
-		mtd->flags |= MTD_NO_ERASE;
-
-	mtd->dev.parent = dev;
-	nor->page_size = params->page_size;
-	mtd->writebufsize = nor->page_size;
-
-	if (of_property_read_bool(np, "broken-flash-reset"))
-		nor->flags |= SNOR_F_BROKEN_RESET;
-
-	/*
-	 * Configure the SPI memory:
-	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
-	 * - set the number of dummy cycles (mode cycles + wait states).
-	 * - set the SPI protocols for register and memory accesses.
-	 */
-	ret = spi_nor_setup(nor, hwcaps);
-	if (ret)
-		return ret;
-
-	if (info->flags & SPI_NOR_4B_OPCODES)
-		nor->flags |= SNOR_F_4B_OPCODES;
-
-	ret = spi_nor_set_addr_width(nor);
-	if (ret)
-		return ret;
-
-	/* Send all the required SPI flash commands to initialize device */
-	ret = spi_nor_init(nor);
-	if (ret)
-		return ret;
-
-	dev_info(dev, "%s (%lld Kbytes)\n", info->name,
-			(long long)mtd->size >> 10);
-
-	dev_dbg(dev,
-		"mtd .name = %s, .size = 0x%llx (%lldMiB), "
-		".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
-		mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
-		mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
-	if (mtd->numeraseregions)
-		for (i = 0; i < mtd->numeraseregions; i++)
-			dev_dbg(dev,
-				"mtd.eraseregions[%d] = { .offset = 0x%llx, "
-				".erasesize = 0x%.8x (%uKiB), "
-				".numblocks = %d }\n",
-				i, (long long)mtd->eraseregions[i].offset,
-				mtd->eraseregions[i].erasesize,
-				mtd->eraseregions[i].erasesize / 1024,
-				mtd->eraseregions[i].numblocks);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(spi_nor_scan);
-
-static int spi_nor_probe(struct spi_mem *spimem)
-{
-	struct spi_device *spi = spimem->spi;
-	struct flash_platform_data *data = dev_get_platdata(&spi->dev);
-	struct spi_nor *nor;
-	/*
-	 * Enable all caps by default. The core will mask them after
-	 * checking what's really supported using spi_mem_supports_op().
-	 */
-	const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
-	char *flash_name;
-	int ret;
-
-	nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
-	if (!nor)
-		return -ENOMEM;
-
-	nor->spimem = spimem;
-	nor->dev = &spi->dev;
-	spi_nor_set_flash_node(nor, spi->dev.of_node);
-
-	spi_mem_set_drvdata(spimem, nor);
-
-	if (data && data->name)
-		nor->mtd.name = data->name;
-
-	if (!nor->mtd.name)
-		nor->mtd.name = spi_mem_get_name(spimem);
-
-	/*
-	 * For some (historical?) reason many platforms provide two different
-	 * names in flash_platform_data: "name" and "type". Quite often name is
-	 * set to "m25p80" and then "type" provides a real chip name.
-	 * If that's the case, respect "type" and ignore a "name".
-	 */
-	if (data && data->type)
-		flash_name = data->type;
-	else if (!strcmp(spi->modalias, "spi-nor"))
-		flash_name = NULL; /* auto-detect */
-	else
-		flash_name = spi->modalias;
-
-	ret = spi_nor_scan(nor, flash_name, &hwcaps);
-	if (ret)
-		return ret;
-
-	/*
-	 * None of the existing parts have > 512B pages, but let's play safe
-	 * and add this logic so that if anyone ever adds support for such
-	 * a NOR we don't end up with buffer overflows.
-	 */
-	if (nor->page_size > PAGE_SIZE) {
-		nor->bouncebuf_size = nor->page_size;
-		devm_kfree(nor->dev, nor->bouncebuf);
-		nor->bouncebuf = devm_kmalloc(nor->dev,
-					      nor->bouncebuf_size,
-					      GFP_KERNEL);
-		if (!nor->bouncebuf)
-			return -ENOMEM;
-	}
-
-	return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
-				   data ? data->nr_parts : 0);
-}
-
-static int spi_nor_remove(struct spi_mem *spimem)
-{
-	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
-	spi_nor_restore(nor);
-
-	/* Clean up MTD stuff. */
-	return mtd_device_unregister(&nor->mtd);
-}
-
-static void spi_nor_shutdown(struct spi_mem *spimem)
-{
-	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
-	spi_nor_restore(nor);
-}
-
-/*
- * Do NOT add to this array without reading the following:
- *
- * Historically, many flash devices are bound to this driver by their name. But
- * since most of these flash are compatible to some extent, and their
- * differences can often be differentiated by the JEDEC read-ID command, we
- * encourage new users to add support to the spi-nor library, and simply bind
- * against a generic string here (e.g., "jedec,spi-nor").
- *
- * Many flash names are kept here in this list (as well as in spi-nor.c) to
- * keep them available as module aliases for existing platforms.
- */
-static const struct spi_device_id spi_nor_dev_ids[] = {
-	/*
-	 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
-	 * hack around the fact that the SPI core does not provide uevent
-	 * matching for .of_match_table
-	 */
-	{"spi-nor"},
-
-	/*
-	 * Entries not used in DTs that should be safe to drop after replacing
-	 * them with "spi-nor" in platform data.
-	 */
-	{"s25sl064a"},	{"w25x16"},	{"m25p10"},	{"m25px64"},
-
-	/*
-	 * Entries that were used in DTs without "jedec,spi-nor" fallback and
-	 * should be kept for backward compatibility.
-	 */
-	{"at25df321a"},	{"at25df641"},	{"at26df081a"},
-	{"mx25l4005a"},	{"mx25l1606e"},	{"mx25l6405d"},	{"mx25l12805d"},
-	{"mx25l25635e"},{"mx66l51235l"},
-	{"n25q064"},	{"n25q128a11"},	{"n25q128a13"},	{"n25q512a"},
-	{"s25fl256s1"},	{"s25fl512s"},	{"s25sl12801"},	{"s25fl008k"},
-	{"s25fl064k"},
-	{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
-	{"m25p40"},	{"m25p80"},	{"m25p16"},	{"m25p32"},
-	{"m25p64"},	{"m25p128"},
-	{"w25x80"},	{"w25x32"},	{"w25q32"},	{"w25q32dw"},
-	{"w25q80bl"},	{"w25q128"},	{"w25q256"},
-
-	/* Flashes that can't be detected using JEDEC */
-	{"m25p05-nonjedec"},	{"m25p10-nonjedec"},	{"m25p20-nonjedec"},
-	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
-	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
-
-	/* Everspin MRAMs (non-JEDEC) */
-	{ "mr25h128" }, /* 128 Kib, 40 MHz */
-	{ "mr25h256" }, /* 256 Kib, 40 MHz */
-	{ "mr25h10" },  /*   1 Mib, 40 MHz */
-	{ "mr25h40" },  /*   4 Mib, 40 MHz */
-
-	{ },
-};
-MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
-
-static const struct of_device_id spi_nor_of_table[] = {
-	/*
-	 * Generic compatibility for SPI NOR that can be identified by the
-	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
-	 */
-	{ .compatible = "jedec,spi-nor" },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, spi_nor_of_table);
-
-/*
- * REVISIT: many of these chips have deep power-down modes, which
- * should clearly be entered on suspend() to minimize power use.
- * And also when they're otherwise idle...
- */
-static struct spi_mem_driver spi_nor_driver = {
-	.spidrv = {
-		.driver = {
-			.name = "spi-nor",
-			.of_match_table = spi_nor_of_table,
-		},
-		.id_table = spi_nor_dev_ids,
-	},
-	.probe = spi_nor_probe,
-	.remove = spi_nor_remove,
-	.shutdown = spi_nor_shutdown,
-};
-module_spi_mem_driver(spi_nor_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
-MODULE_AUTHOR("Mike Lavender");
-MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
new file mode 100644
index 0000000..0ab0762
--- /dev/null
+++ b/drivers/mtd/spi-nor/sst.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info sst_parts[] = {
+	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
+	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128,
+			      SECT_4K | SPI_NOR_4BIT_BP) },
+	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
+	{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
+	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8,
+			      SECT_4K | SST_WRITE) },
+	{ "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16,
+			      SECT_4K | SST_WRITE) },
+	{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+	{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
+			      SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
+			      SECT_4K | SPI_NOR_DUAL_READ |
+			      SPI_NOR_QUAD_READ) },
+};
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+		     size_t *retlen, const u_char *buf)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	size_t actual = 0;
+	int ret;
+
+	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+	ret = spi_nor_lock_and_prep(nor);
+	if (ret)
+		return ret;
+
+	ret = spi_nor_write_enable(nor);
+	if (ret)
+		goto out;
+
+	nor->sst_write_second = false;
+
+	/* Start write from odd address. */
+	if (to % 2) {
+		nor->program_opcode = SPINOR_OP_BP;
+
+		/* write one byte. */
+		ret = spi_nor_write_data(nor, to, 1, buf);
+		if (ret < 0)
+			goto out;
+		WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto out;
+
+		to++;
+		actual++;
+	}
+
+	/* Write out most of the data here. */
+	for (; actual < len - 1; actual += 2) {
+		nor->program_opcode = SPINOR_OP_AAI_WP;
+
+		/* write two bytes. */
+		ret = spi_nor_write_data(nor, to, 2, buf + actual);
+		if (ret < 0)
+			goto out;
+		WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto out;
+		to += 2;
+		nor->sst_write_second = true;
+	}
+	nor->sst_write_second = false;
+
+	ret = spi_nor_write_disable(nor);
+	if (ret)
+		goto out;
+
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret)
+		goto out;
+
+	/* Write out trailing byte if it exists. */
+	if (actual != len) {
+		ret = spi_nor_write_enable(nor);
+		if (ret)
+			goto out;
+
+		nor->program_opcode = SPINOR_OP_BP;
+		ret = spi_nor_write_data(nor, to, 1, buf + actual);
+		if (ret < 0)
+			goto out;
+		WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto out;
+
+		actual += 1;
+
+		ret = spi_nor_write_disable(nor);
+	}
+out:
+	*retlen += actual;
+	spi_nor_unlock_and_unprep(nor);
+	return ret;
+}
+
+static void sst_default_init(struct spi_nor *nor)
+{
+	nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void sst_post_sfdp_fixups(struct spi_nor *nor)
+{
+	if (nor->info->flags & SST_WRITE)
+		nor->mtd._write = sst_write;
+}
+
+static const struct spi_nor_fixups sst_fixups = {
+	.default_init = sst_default_init,
+	.post_sfdp = sst_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_sst = {
+	.name = "sst",
+	.parts = sst_parts,
+	.nparts = ARRAY_SIZE(sst_parts),
+	.fixups = &sst_fixups,
+};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
new file mode 100644
index 0000000..e5dfa78
--- /dev/null
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+w25q256_post_bfpt_fixups(struct spi_nor *nor,
+			 const struct sfdp_parameter_header *bfpt_header,
+			 const struct sfdp_bfpt *bfpt,
+			 struct spi_nor_flash_parameter *params)
+{
+	/*
+	 * W25Q256JV supports 4B opcodes but W25Q256FV does not.
+	 * Unfortunately, Winbond has re-used the same JEDEC ID for both
+	 * variants which prevents us from defining a new entry in the parts
+	 * table.
+	 * To differentiate between W25Q256JV and W25Q256FV check SFDP header
+	 * version: only JV has JESD216A compliant structure (version 5).
+	 */
+	if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+	    bfpt_header->minor == SFDP_JESD216A_MINOR)
+		nor->flags |= SNOR_F_4B_OPCODES;
+
+	return 0;
+}
+
+static struct spi_nor_fixups w25q256_fixups = {
+	.post_bfpt = w25q256_post_bfpt_fixups,
+};
+
+static const struct flash_info winbond_parts[] = {
+	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+	{ "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
+	{ "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
+	{ "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
+	{ "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
+	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
+	{ "w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
+	{ "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
+				 SECT_4K | SPI_NOR_DUAL_READ |
+				 SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+				 SPI_NOR_HAS_TB) },
+	{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
+	{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
+	{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
+	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
+	{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024,  64,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128,
+			 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+			   SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			   SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			    SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+			  SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+	  .fixups = &w25q256_fixups },
+	{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+			    SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+};
+
+/**
+ * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes.
+ * @nor:	pointer to 'struct spi_nor'.
+ * @enable:	true to enter the 4-byte address mode, false to exit the 4-byte
+ *		address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+	int ret;
+
+	ret = spi_nor_set_4byte_addr_mode(nor, enable);
+	if (ret || enable)
+		return ret;
+
+	/*
+	 * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
+	 * Register to be set to 1, so all 3-byte-address reads come from the
+	 * second 16M. We must clear the register to enable normal behavior.
+	 */
+	ret = spi_nor_write_enable(nor);
+	if (ret)
+		return ret;
+
+	ret = spi_nor_write_ear(nor, 0);
+	if (ret)
+		return ret;
+
+	return spi_nor_write_disable(nor);
+}
+
+static void winbond_default_init(struct spi_nor *nor)
+{
+	nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups winbond_fixups = {
+	.default_init = winbond_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_winbond = {
+	.name = "winbond",
+	.parts = winbond_parts,
+	.nparts = ARRAY_SIZE(winbond_parts),
+	.fixups = &winbond_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
new file mode 100644
index 0000000..1138bdb
--- /dev/null
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xilinx_parts[] = {
+	/* Xilinx S3AN Internal Flash */
+	{ "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+	{ "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+	{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+	{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+	{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+};
+
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
+{
+	u32 offset, page;
+
+	offset = addr % nor->page_size;
+	page = addr / nor->page_size;
+	page <<= (nor->page_size > 512) ? 10 : 9;
+
+	return page | offset;
+}
+
+static int xilinx_nor_setup(struct spi_nor *nor,
+			    const struct spi_nor_hwcaps *hwcaps)
+{
+	int ret;
+
+	ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+	if (ret)
+		return ret;
+
+	nor->erase_opcode = SPINOR_OP_XSE;
+	nor->program_opcode = SPINOR_OP_XPP;
+	nor->read_opcode = SPINOR_OP_READ;
+	nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+	/*
+	 * This flashes have a page size of 264 or 528 bytes (known as
+	 * Default addressing mode). It can be changed to a more standard
+	 * Power of two mode where the page size is 256/512. This comes
+	 * with a price: there is 3% less of space, the data is corrupted
+	 * and the page size cannot be changed back to default addressing
+	 * mode.
+	 *
+	 * The current addressing mode can be read from the XRDSR register
+	 * and should not be changed, because is a destructive operation.
+	 */
+	if (nor->bouncebuf[0] & XSR_PAGESIZE) {
+		/* Flash in Power of 2 mode */
+		nor->page_size = (nor->page_size == 264) ? 256 : 512;
+		nor->mtd.writebufsize = nor->page_size;
+		nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
+		nor->mtd.erasesize = 8 * nor->page_size;
+	} else {
+		/* Flash in Default addressing mode */
+		nor->params->convert_addr = s3an_convert_addr;
+		nor->mtd.erasesize = nor->info->sector_size;
+	}
+
+	return 0;
+}
+
+static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
+{
+	nor->params->setup = xilinx_nor_setup;
+}
+
+static const struct spi_nor_fixups xilinx_fixups = {
+	.post_sfdp = xilinx_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_xilinx = {
+	.name = "xilinx",
+	.parts = xilinx_parts,
+	.nparts = ARRAY_SIZE(xilinx_parts),
+	.fixups = &xilinx_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
new file mode 100644
index 0000000..2c7773b
--- /dev/null
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xmc_parts[] = {
+	/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+	{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
+			    SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
+			     SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+};
+
+const struct spi_nor_manufacturer spi_nor_xmc = {
+	.name = "xmc",
+	.parts = xmc_parts,
+	.nparts = ARRAY_SIZE(xmc_parts),
+};
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 10b2459..ae5abe4 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1059,7 +1059,7 @@
 			 * be a result of power cut during erasure.
 			 */
 			ai->maybe_bad_peb_count += 1;
-		/* fall through */
+		fallthrough;
 	case UBI_IO_BAD_HDR:
 			/*
 			 * If we're facing a bad VID header we have to drop *all*
@@ -1640,7 +1640,7 @@
 out_wl:
 	ubi_wl_close(ubi);
 out_vtbl:
-	ubi_free_internal_volumes(ubi);
+	ubi_free_all_volumes(ubi);
 	vfree(ubi->vtbl);
 out_ai:
 	destroy_ai(ai);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index d636bbe..e85b04e 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -503,19 +503,40 @@
 }
 
 /**
+ * ubi_free_volumes_from - free volumes from specific index.
+ * @ubi: UBI device description object
+ * @from: the start index used for volume free.
+ */
+static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
+{
+	int i;
+
+	for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+		if (!ubi->volumes[i])
+			continue;
+		ubi_eba_replace_table(ubi->volumes[i], NULL);
+		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
+		kfree(ubi->volumes[i]);
+		ubi->volumes[i] = NULL;
+	}
+}
+
+/**
+ * ubi_free_all_volumes - free all volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_all_volumes(struct ubi_device *ubi)
+{
+	ubi_free_volumes_from(ubi, 0);
+}
+
+/**
  * ubi_free_internal_volumes - free internal volumes.
  * @ubi: UBI device description object
  */
 void ubi_free_internal_volumes(struct ubi_device *ubi)
 {
-	int i;
-
-	for (i = ubi->vtbl_slots;
-	     i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
-		ubi_eba_replace_table(ubi->volumes[i], NULL);
-		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
-		kfree(ubi->volumes[i]);
-	}
+	ubi_free_volumes_from(ubi, ubi->vtbl_slots);
 }
 
 static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
@@ -846,8 +867,11 @@
 	 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
 	 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
 	 * will die soon and you will lose all your data.
+	 * Relax this rule if the partition we're attaching to operates in SLC
+	 * mode.
 	 */
-	if (mtd->type == MTD_MLCNANDFLASH) {
+	if (mtd->type == MTD_MLCNANDFLASH &&
+	    !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
 		pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
 			mtd->index);
 		return -EINVAL;
@@ -1013,7 +1037,7 @@
 out_detach:
 	ubi_devices[ubi_num] = NULL;
 	ubi_wl_close(ubi);
-	ubi_free_internal_volumes(ubi);
+	ubi_free_all_volumes(ubi);
 	vfree(ubi->vtbl);
 out_free:
 	vfree(ubi->peb_buf);
@@ -1159,7 +1183,7 @@
 		 * MTD device name.
 		 */
 		mtd = get_mtd_device_nm(mtd_dev);
-		if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+		if (PTR_ERR(mtd) == -ENODEV)
 			/* Probably this is an MTD character device node path */
 			mtd = open_mtd_by_chdev(mtd_dev);
 	} else
@@ -1321,10 +1345,10 @@
 	switch (*endp) {
 	case 'G':
 		result *= 1024;
-		/* fall through */
+		fallthrough;
 	case 'M':
 		result *= 1024;
-		/* fall through */
+		fallthrough;
 	case 'K':
 		result *= 1024;
 		if (endp[1] == 'i' && endp[2] == 'B')
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 1b77fff..cc9a28c 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1078,36 +1078,6 @@
 	return err;
 }
 
-#ifdef CONFIG_COMPAT
-static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
-				  unsigned long arg)
-{
-	unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
-	return vol_cdev_ioctl(file, cmd, translated_arg);
-}
-
-static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
-				  unsigned long arg)
-{
-	unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
-	return ubi_cdev_ioctl(file, cmd, translated_arg);
-}
-
-static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
-				   unsigned long arg)
-{
-	unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
-	return ctrl_cdev_ioctl(file, cmd, translated_arg);
-}
-#else
-#define vol_cdev_compat_ioctl  NULL
-#define ubi_cdev_compat_ioctl  NULL
-#define ctrl_cdev_compat_ioctl NULL
-#endif
-
 /* UBI volume character device operations */
 const struct file_operations ubi_vol_cdev_operations = {
 	.owner          = THIS_MODULE,
@@ -1118,7 +1088,7 @@
 	.write          = vol_cdev_write,
 	.fsync		= vol_cdev_fsync,
 	.unlocked_ioctl = vol_cdev_ioctl,
-	.compat_ioctl   = vol_cdev_compat_ioctl,
+	.compat_ioctl   = compat_ptr_ioctl,
 };
 
 /* UBI character device operations */
@@ -1126,13 +1096,13 @@
 	.owner          = THIS_MODULE,
 	.llseek         = no_llseek,
 	.unlocked_ioctl = ubi_cdev_ioctl,
-	.compat_ioctl   = ubi_cdev_compat_ioctl,
+	.compat_ioctl   = compat_ptr_ioctl,
 };
 
 /* UBI control character device operations */
 const struct file_operations ubi_ctrl_cdev_operations = {
 	.owner          = THIS_MODULE,
 	.unlocked_ioctl = ctrl_cdev_ioctl,
-	.compat_ioctl   = ctrl_cdev_compat_ioctl,
+	.compat_ioctl   = compat_ptr_ioctl,
 	.llseek		= no_llseek,
 };
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 8a83072..ac2bdba 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -107,6 +107,7 @@
 	pr_err("\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
 	pr_err("\tcorrupted       %d\n", vol->corrupted);
 	pr_err("\tupd_marker      %d\n", vol->upd_marker);
+	pr_err("\tskip_check      %d\n", vol->skip_check);
 
 	if (vol->name_len <= UBI_VOL_NAME_MAX &&
 	    strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
@@ -501,11 +502,9 @@
  */
 int ubi_debugfs_init_dev(struct ubi_device *ubi)
 {
-	int err, n;
 	unsigned long ubi_num = ubi->ubi_num;
-	const char *fname;
-	struct dentry *dent;
 	struct ubi_debug_info *d = &ubi->dbg;
+	int n;
 
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
@@ -514,95 +513,52 @@
 		     ubi->ubi_num);
 	if (n == UBI_DFS_DIR_LEN) {
 		/* The array size is too small */
-		fname = UBI_DFS_DIR_NAME;
-		dent = ERR_PTR(-EINVAL);
-		goto out;
+		return -EINVAL;
 	}
 
-	fname = d->dfs_dir_name;
-	dent = debugfs_create_dir(fname, dfs_rootdir);
-	if (IS_ERR_OR_NULL(dent))
-		goto out;
-	d->dfs_dir = dent;
+	d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
 
-	fname = "chk_gen";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_chk_gen = dent;
+	d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+					     (void *)ubi_num, &dfs_fops);
 
-	fname = "chk_io";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_chk_io = dent;
+	d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+					    (void *)ubi_num, &dfs_fops);
 
-	fname = "chk_fastmap";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_chk_fastmap = dent;
+	d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+						 d->dfs_dir, (void *)ubi_num,
+						 &dfs_fops);
 
-	fname = "tst_disable_bgt";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_disable_bgt = dent;
+	d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+						 d->dfs_dir, (void *)ubi_num,
+						 &dfs_fops);
 
-	fname = "tst_emulate_bitflips";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_emulate_bitflips = dent;
+	d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
+						      S_IWUSR, d->dfs_dir,
+						      (void *)ubi_num,
+						      &dfs_fops);
 
-	fname = "tst_emulate_io_failures";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_emulate_io_failures = dent;
+	d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
+							 S_IWUSR, d->dfs_dir,
+							 (void *)ubi_num,
+							 &dfs_fops);
 
-	fname = "tst_emulate_power_cut";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_emulate_power_cut = dent;
+	d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
+						       S_IWUSR, d->dfs_dir,
+						       (void *)ubi_num,
+						       &dfs_fops);
 
-	fname = "tst_emulate_power_cut_min";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_power_cut_min = dent;
+	d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
+						   S_IWUSR, d->dfs_dir,
+						   (void *)ubi_num, &dfs_fops);
 
-	fname = "tst_emulate_power_cut_max";
-	dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
-				   &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	d->dfs_power_cut_max = dent;
+	d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
+						   S_IWUSR, d->dfs_dir,
+						   (void *)ubi_num, &dfs_fops);
 
-	fname = "detailed_erase_block_info";
-	dent = debugfs_create_file(fname, S_IRUSR, d->dfs_dir, (void *)ubi_num,
-				   &eraseblk_count_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
+	debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+			    (void *)ubi_num, &eraseblk_count_fops);
 
 	return 0;
-
-out_remove:
-	debugfs_remove_recursive(d->dfs_dir);
-out:
-	err = dent ? PTR_ERR(dent) : -ENODEV;
-	ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
-		fname, err);
-	return err;
 }
 
 /**
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5133e1b..0edecfd 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -599,7 +599,7 @@
 	int err, pnum, scrub = 0, vol_id = vol->vol_id;
 	struct ubi_vid_io_buf *vidb;
 	struct ubi_vid_hdr *vid_hdr;
-	uint32_t uninitialized_var(crc);
+	uint32_t crc;
 
 	err = leb_read_lock(ubi, vol_id, lnum);
 	if (err)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index b486250..28f55f9 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -116,6 +116,21 @@
 	wl_pool->size = 0;
 	pool->size = 0;
 
+	if (ubi->fm_anchor) {
+		wl_tree_add(ubi->fm_anchor, &ubi->free);
+		ubi->free_count++;
+	}
+	if (ubi->fm_next_anchor) {
+		wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+		ubi->free_count++;
+	}
+
+	/* All available PEBs are in ubi->free, now is the time to get
+	 * the best anchor PEBs.
+	 */
+	ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+	ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+
 	for (;;) {
 		enough = 0;
 		if (pool->size < pool->max_size) {
@@ -271,26 +286,20 @@
 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
 {
 	struct ubi_work *wrk;
-	struct ubi_wl_entry *anchor;
 
 	spin_lock(&ubi->wl_lock);
 
-	/* Do we already have an anchor? */
-	if (ubi->fm_anchor) {
-		spin_unlock(&ubi->wl_lock);
-		return 0;
+	/* Do we have a next anchor? */
+	if (!ubi->fm_next_anchor) {
+		ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+		if (!ubi->fm_next_anchor)
+			/* Tell wear leveling to produce a new anchor PEB */
+			ubi->fm_do_produce_anchor = 1;
 	}
 
-	/* See if we can find an anchor PEB on the list of free PEBs */
-	anchor = ubi_wl_get_fm_peb(ubi, 1);
-	if (anchor) {
-		ubi->fm_anchor = anchor;
-		spin_unlock(&ubi->wl_lock);
-		return 0;
-	}
-
-	/* No luck, trigger wear leveling to produce a new anchor PEB */
-	ubi->fm_do_produce_anchor = 1;
+	/* Do wear leveling to get a new anchor PEB or check the
+	 * existing next anchor candidate.
+	 */
 	if (ubi->wl_scheduled) {
 		spin_unlock(&ubi->wl_lock);
 		return 0;
@@ -372,6 +381,11 @@
 		ubi->fm_anchor = NULL;
 	}
 
+	if (ubi->fm_next_anchor) {
+		return_unused_peb(ubi, ubi->fm_next_anchor);
+		ubi->fm_next_anchor = NULL;
+	}
+
 	if (ubi->fm) {
 		for (i = 0; i < ubi->fm->used_blocks; i++)
 			kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 53f448e..022af59 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1220,6 +1220,17 @@
 		fm_pos += sizeof(*fec);
 		ubi_assert(fm_pos <= ubi->fm_size);
 	}
+	if (ubi->fm_next_anchor) {
+		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+		fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
+		set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
+		fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
+
+		free_peb_count++;
+		fm_pos += sizeof(*fec);
+		ubi_assert(fm_pos <= ubi->fm_size);
+	}
 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
 
 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index b57b84f..14d890b 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1297,7 +1297,7 @@
 	if (!ubi_dbg_chk_io(ubi))
 		return 0;
 
-	buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+	buf1 = __vmalloc(len, GFP_NOFS);
 	if (!buf1) {
 		ubi_err(ubi, "cannot allocate memory to check writes");
 		return 0;
@@ -1361,7 +1361,7 @@
 	if (!ubi_dbg_chk_io(ubi))
 		return 0;
 
-	buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+	buf = __vmalloc(len, GFP_NOFS);
 	if (!buf) {
 		ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
 		return 0;
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index b5fe8f8..386db05 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -498,6 +498,6 @@
 struct ubi_fm_eba {
 	__be32 magic;
 	__be32 reserved_pebs;
-	__be32 pnum[0];
+	__be32 pnum[];
 } __packed;
 #endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index a173eb7..c2da771 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -26,7 +26,7 @@
 #include <linux/notifier.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/ubi.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
 
 #include "ubi-media.h"
 
@@ -491,7 +491,8 @@
  * @fm_work: fastmap work queue
  * @fm_work_scheduled: non-zero if fastmap work was scheduled
  * @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_anchor: The new anchor PEB used during fastmap update
+ * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
  * @fm_do_produce_anchor: If true produce an anchor PEB in wl
  *
  * @used: RB-tree of used physical eraseblocks
@@ -602,6 +603,7 @@
 	int fm_work_scheduled;
 	int fast_attach;
 	struct ubi_wl_entry *fm_anchor;
+	struct ubi_wl_entry *fm_next_anchor;
 	int fm_do_produce_anchor;
 
 	/* Wear-leveling sub-system's stuff */
@@ -950,6 +952,7 @@
 int ubi_notify_all(struct ubi_device *ubi, int ntype,
 		   struct notifier_block *nb);
 int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_all_volumes(struct ubi_device *ubi);
 void ubi_free_internal_volumes(struct ubi_device *ubi);
 
 /* kapi.c */
@@ -970,7 +973,7 @@
 void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
 #else
 static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
-int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
+static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
 static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
 #endif
 
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 53d8ab5..f700f0e 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -782,7 +782,7 @@
  */
 int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
 {
-	int i, err;
+	int err;
 	struct ubi_ainf_volume *av;
 
 	empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -851,11 +851,7 @@
 
 out_free:
 	vfree(ubi->vtbl);
-	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
-		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
-		kfree(ubi->volumes[i]);
-		ubi->volumes[i] = NULL;
-	}
+	ubi_free_all_volumes(ubi);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7def041..7847de7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -319,7 +319,7 @@
 					  struct rb_root *root, int diff)
 {
 	struct rb_node *p;
-	struct ubi_wl_entry *e, *prev_e = NULL;
+	struct ubi_wl_entry *e;
 	int max;
 
 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -334,7 +334,6 @@
 			p = p->rb_left;
 		else {
 			p = p->rb_right;
-			prev_e = e;
 			e = e1;
 		}
 	}
@@ -688,20 +687,27 @@
 	}
 
 #ifdef CONFIG_MTD_UBI_FASTMAP
+	e1 = find_anchor_wl_entry(&ubi->used);
+	if (e1 && ubi->fm_next_anchor &&
+	    (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+		ubi->fm_do_produce_anchor = 1;
+		/* fm_next_anchor is no longer considered a good anchor
+		 * candidate.
+		 * NULL assignment also prevents multiple wear level checks
+		 * of this PEB.
+		 */
+		wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+		ubi->fm_next_anchor = NULL;
+		ubi->free_count++;
+	}
+
 	if (ubi->fm_do_produce_anchor) {
-		e1 = find_anchor_wl_entry(&ubi->used);
 		if (!e1)
 			goto out_cancel;
 		e2 = get_peb_for_wl(ubi);
 		if (!e2)
 			goto out_cancel;
 
-		/*
-		 * Anchor move within the anchor area is useless.
-		 */
-		if (e2->pnum < UBI_FM_MAX_START)
-			goto out_cancel;
-
 		self_check_in_wl_tree(ubi, e1, &ubi->used);
 		rb_erase(&e1->u.rb, &ubi->used);
 		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
@@ -1080,8 +1086,12 @@
 	if (!err) {
 		spin_lock(&ubi->wl_lock);
 
-		if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
-			ubi->fm_anchor = e;
+		if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+		    e->pnum < UBI_FM_MAX_START) {
+			/* Abort anchor production, if needed it will be
+			 * enabled again in the wear leveling started below.
+			 */
+			ubi->fm_next_anchor = e;
 			ubi->fm_do_produce_anchor = 0;
 		} else {
 			wl_tree_add(e, &ubi->free);
@@ -1889,7 +1899,8 @@
 		goto out_free;
 
 #ifdef CONFIG_MTD_UBI_FASTMAP
-	ubi_ensure_anchor_pebs(ubi);
+	if (!ubi->ro_mode && !ubi->fm_disabled)
+		ubi_ensure_anchor_pebs(ubi);
 #endif
 	return 0;