v4.19.13 snapshot.
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
new file mode 100644
index 0000000..3102bd7
--- /dev/null
+++ b/include/linux/mtd/bbm.h
@@ -0,0 +1,169 @@
+/*
+ *  NAND family Bad Block Management (BBM) header file
+ *    - Bad Block Table (BBT) implementation
+ *
+ *  Copyright © 2005 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ *  Copyright © 2000-2005
+ *  Thomas Gleixner <tglx@linuxtronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+#ifndef __LINUX_MTD_BBM_H
+#define __LINUX_MTD_BBM_H
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS		8
+
+/**
+ * struct nand_bbt_descr - bad block table descriptor
+ * @options:	options for this descriptor
+ * @pages:	the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ *		when bbt is searched, then we store the found bbts pages here.
+ *		Its an array and supports up to 8 chips now
+ * @offs:	offset of the pattern in the oob area of the page
+ * @veroffs:	offset of the bbt version counter in the oob are of the page
+ * @version:	version read from the bbt page during scan
+ * @len:	length of the pattern, if 0 no pattern check is performed
+ * @maxblocks:	maximum number of blocks to search for a bbt. This number of
+ *		blocks is reserved at the end of the device where the tables are
+ *		written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ *              bad) block in the stored bbt
+ * @pattern:	pattern to identify bad block table or factory marked good /
+ *		bad blocks, can be NULL, if len = 0
+ *
+ * Descriptor for the bad block table marker and the descriptor for the
+ * pattern which identifies good and bad blocks. The assumption is made
+ * that the pattern and the version count are always located in the oob area
+ * of the first block.
+ */
+struct nand_bbt_descr {
+	int options;
+	int pages[NAND_MAX_CHIPS];
+	int offs;
+	int veroffs;
+	uint8_t version[NAND_MAX_CHIPS];
+	int len;
+	int maxblocks;
+	int reserved_block_code;
+	uint8_t *pattern;
+};
+
+/* Options for the bad block table descriptors */
+
+/* The number of bits used per block in the bbt on the device */
+#define NAND_BBT_NRBITS_MSK	0x0000000F
+#define NAND_BBT_1BIT		0x00000001
+#define NAND_BBT_2BIT		0x00000002
+#define NAND_BBT_4BIT		0x00000004
+#define NAND_BBT_8BIT		0x00000008
+/* The bad block table is in the last good block of the device */
+#define NAND_BBT_LASTBLOCK	0x00000010
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_ABSPAGE	0x00000020
+/* bbt is stored per chip on multichip devices */
+#define NAND_BBT_PERCHIP	0x00000080
+/* bbt has a version counter at offset veroffs */
+#define NAND_BBT_VERSION	0x00000100
+/* Create a bbt if none exists */
+#define NAND_BBT_CREATE		0x00000200
+/*
+ * Create an empty BBT with no vendor information. Vendor's information may be
+ * unavailable, for example, if the NAND controller has a different data and OOB
+ * layout or if this information is already purged. Must be used in conjunction
+ * with NAND_BBT_CREATE.
+ */
+#define NAND_BBT_CREATE_EMPTY	0x00000400
+/* Write bbt if neccecary */
+#define NAND_BBT_WRITE		0x00002000
+/* Read and write back block contents when writing bbt */
+#define NAND_BBT_SAVECONTENT	0x00004000
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE	0x00008000
+/* Search good / bad pattern on the last page of the eraseblock */
+#define NAND_BBT_SCANLASTPAGE	0x00010000
+/*
+ * Use a flash based bad block table. By default, OOB identifier is saved in
+ * OOB area. This option is passed to the default bad block table function.
+ */
+#define NAND_BBT_USE_FLASH	0x00020000
+/*
+ * Do not store flash based bad block table marker in the OOB area; store it
+ * in-band.
+ */
+#define NAND_BBT_NO_OOB		0x00040000
+/*
+ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
+ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
+ */
+#define NAND_BBT_NO_OOB_BBM	0x00080000
+
+/*
+ * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
+ * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * in nand_chip.bbt_options.
+ */
+#define NAND_BBT_DYNAMICSTRUCT	0x80000000
+
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS	4
+
+/*
+ * Constants for oob configuration
+ */
+#define NAND_SMALL_BADBLOCK_POS		5
+#define NAND_LARGE_BADBLOCK_POS		0
+#define ONENAND_BADBLOCK_POS		0
+
+/*
+ * Bad block scanning errors
+ */
+#define ONENAND_BBT_READ_ERROR		1
+#define ONENAND_BBT_READ_ECC_ERROR	2
+#define ONENAND_BBT_READ_FATAL_ERROR	4
+
+/**
+ * struct bbm_info - [GENERIC] Bad Block Table data structure
+ * @bbt_erase_shift:	[INTERN] number of address bits in a bbt entry
+ * @badblockpos:	[INTERN] position of the bad block marker in the oob area
+ * @options:		options for this descriptor
+ * @bbt:		[INTERN] bad block table pointer
+ * @isbad_bbt:		function to determine if a block is bad
+ * @badblock_pattern:	[REPLACEABLE] bad block scan pattern used for
+ *			initial bad block scan
+ * @priv:		[OPTIONAL] pointer to private bbm date
+ */
+struct bbm_info {
+	int bbt_erase_shift;
+	int badblockpos;
+	int options;
+
+	uint8_t *bbt;
+
+	int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt);
+
+	/* TODO Add more NAND specific fileds */
+	struct nand_bbt_descr *badblock_pattern;
+
+	void *priv;
+};
+
+/* OneNAND BBT interface */
+extern int onenand_default_bbt(struct mtd_info *mtd);
+
+#endif	/* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
new file mode 100644
index 0000000..e93837f
--- /dev/null
+++ b/include/linux/mtd/blktrans.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __MTD_TRANS_H__
+#define __MTD_TRANS_H__
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+struct hd_geometry;
+struct mtd_info;
+struct mtd_blktrans_ops;
+struct file;
+struct inode;
+
+struct mtd_blktrans_dev {
+	struct mtd_blktrans_ops *tr;
+	struct list_head list;
+	struct mtd_info *mtd;
+	struct mutex lock;
+	int devnum;
+	bool bg_stop;
+	unsigned long size;
+	int readonly;
+	int open;
+	struct kref ref;
+	struct gendisk *disk;
+	struct attribute_group *disk_attributes;
+	struct workqueue_struct *wq;
+	struct work_struct work;
+	struct request_queue *rq;
+	spinlock_t queue_lock;
+	void *priv;
+	fmode_t file_mode;
+};
+
+struct mtd_blktrans_ops {
+	char *name;
+	int major;
+	int part_bits;
+	int blksize;
+	int blkshift;
+
+	/* Access functions */
+	int (*readsect)(struct mtd_blktrans_dev *dev,
+		    unsigned long block, char *buffer);
+	int (*writesect)(struct mtd_blktrans_dev *dev,
+		     unsigned long block, char *buffer);
+	int (*discard)(struct mtd_blktrans_dev *dev,
+		       unsigned long block, unsigned nr_blocks);
+	void (*background)(struct mtd_blktrans_dev *dev);
+
+	/* Block layer ioctls */
+	int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
+	int (*flush)(struct mtd_blktrans_dev *dev);
+
+	/* Called with mtd_table_mutex held; no race with add/remove */
+	int (*open)(struct mtd_blktrans_dev *dev);
+	void (*release)(struct mtd_blktrans_dev *dev);
+
+	/* Called on {de,}registration and on subsequent addition/removal
+	   of devices, with mtd_table_mutex held. */
+	void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd);
+	void (*remove_dev)(struct mtd_blktrans_dev *dev);
+
+	struct list_head devs;
+	struct list_head list;
+	struct module *owner;
+};
+
+extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr);
+extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr);
+extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
+extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
+extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
+
+
+#endif /* __MTD_TRANS_H__ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
new file mode 100644
index 0000000..9b57a9b
--- /dev/null
+++ b/include/linux/mtd/cfi.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __MTD_CFI_H__
+#define __MTD_CFI_H__
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi_endian.h>
+#include <linux/mtd/xip.h>
+
+#ifdef CONFIG_MTD_CFI_I1
+#define cfi_interleave(cfi) 1
+#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
+#else
+#define cfi_interleave_is_1(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I2
+# ifdef cfi_interleave
+#  undef cfi_interleave
+#  define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+#  define cfi_interleave(cfi) 2
+# endif
+#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
+#else
+#define cfi_interleave_is_2(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I4
+# ifdef cfi_interleave
+#  undef cfi_interleave
+#  define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+#  define cfi_interleave(cfi) 4
+# endif
+#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
+#else
+#define cfi_interleave_is_4(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I8
+# ifdef cfi_interleave
+#  undef cfi_interleave
+#  define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+#  define cfi_interleave(cfi) 8
+# endif
+#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
+#else
+#define cfi_interleave_is_8(cfi) (0)
+#endif
+
+#ifndef cfi_interleave
+#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work.
+static inline int cfi_interleave(void *cfi)
+{
+	BUG();
+	return 0;
+}
+#endif
+
+static inline int cfi_interleave_supported(int i)
+{
+	switch (i) {
+#ifdef CONFIG_MTD_CFI_I1
+	case 1:
+#endif
+#ifdef CONFIG_MTD_CFI_I2
+	case 2:
+#endif
+#ifdef CONFIG_MTD_CFI_I4
+	case 4:
+#endif
+#ifdef CONFIG_MTD_CFI_I8
+	case 8:
+#endif
+		return 1;
+
+	default:
+		return 0;
+	}
+}
+
+
+/* NB: these values must represents the number of bytes needed to meet the
+ *     device type (x8, x16, x32).  Eg. a 32 bit device is 4 x 8 bytes.
+ *     These numbers are used in calculations.
+ */
+#define CFI_DEVICETYPE_X8  (8 / 8)
+#define CFI_DEVICETYPE_X16 (16 / 8)
+#define CFI_DEVICETYPE_X32 (32 / 8)
+#define CFI_DEVICETYPE_X64 (64 / 8)
+
+
+/* Device Interface Code Assignments from the "Common Flash Memory Interface
+ * Publication 100" dated December 1, 2001.
+ */
+#define CFI_INTERFACE_X8_ASYNC		0x0000
+#define CFI_INTERFACE_X16_ASYNC		0x0001
+#define CFI_INTERFACE_X8_BY_X16_ASYNC	0x0002
+#define CFI_INTERFACE_X32_ASYNC		0x0003
+#define CFI_INTERFACE_X16_BY_X32_ASYNC	0x0005
+#define CFI_INTERFACE_NOT_ALLOWED	0xffff
+
+
+/* NB: We keep these structures in memory in HOST byteorder, except
+ * where individually noted.
+ */
+
+/* Basic Query Structure */
+struct cfi_ident {
+	uint8_t  qry[3];
+	uint16_t P_ID;
+	uint16_t P_ADR;
+	uint16_t A_ID;
+	uint16_t A_ADR;
+	uint8_t  VccMin;
+	uint8_t  VccMax;
+	uint8_t  VppMin;
+	uint8_t  VppMax;
+	uint8_t  WordWriteTimeoutTyp;
+	uint8_t  BufWriteTimeoutTyp;
+	uint8_t  BlockEraseTimeoutTyp;
+	uint8_t  ChipEraseTimeoutTyp;
+	uint8_t  WordWriteTimeoutMax;
+	uint8_t  BufWriteTimeoutMax;
+	uint8_t  BlockEraseTimeoutMax;
+	uint8_t  ChipEraseTimeoutMax;
+	uint8_t  DevSize;
+	uint16_t InterfaceDesc;
+	uint16_t MaxBufWriteSize;
+	uint8_t  NumEraseRegions;
+	uint32_t EraseRegionInfo[0]; /* Not host ordered */
+} __packed;
+
+/* Extended Query Structure for both PRI and ALT */
+
+struct cfi_extquery {
+	uint8_t  pri[3];
+	uint8_t  MajorVersion;
+	uint8_t  MinorVersion;
+} __packed;
+
+/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
+
+struct cfi_pri_intelext {
+	uint8_t  pri[3];
+	uint8_t  MajorVersion;
+	uint8_t  MinorVersion;
+	uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
+				    block follows - FIXME - not currently supported */
+	uint8_t  SuspendCmdSupport;
+	uint16_t BlkStatusRegMask;
+	uint8_t  VccOptimal;
+	uint8_t  VppOptimal;
+	uint8_t  NumProtectionFields;
+	uint16_t ProtRegAddr;
+	uint8_t  FactProtRegSize;
+	uint8_t  UserProtRegSize;
+	uint8_t  extra[0];
+} __packed;
+
+struct cfi_intelext_otpinfo {
+	uint32_t ProtRegAddr;
+	uint16_t FactGroups;
+	uint8_t  FactProtRegSize;
+	uint16_t UserGroups;
+	uint8_t  UserProtRegSize;
+} __packed;
+
+struct cfi_intelext_blockinfo {
+	uint16_t NumIdentBlocks;
+	uint16_t BlockSize;
+	uint16_t MinBlockEraseCycles;
+	uint8_t  BitsPerCell;
+	uint8_t  BlockCap;
+} __packed;
+
+struct cfi_intelext_regioninfo {
+	uint16_t NumIdentPartitions;
+	uint8_t  NumOpAllowed;
+	uint8_t  NumOpAllowedSimProgMode;
+	uint8_t  NumOpAllowedSimEraMode;
+	uint8_t  NumBlockTypes;
+	struct cfi_intelext_blockinfo BlockTypes[1];
+} __packed;
+
+struct cfi_intelext_programming_regioninfo {
+	uint8_t  ProgRegShift;
+	uint8_t  Reserved1;
+	uint8_t  ControlValid;
+	uint8_t  Reserved2;
+	uint8_t  ControlInvalid;
+	uint8_t  Reserved3;
+} __packed;
+
+/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
+
+struct cfi_pri_amdstd {
+	uint8_t  pri[3];
+	uint8_t  MajorVersion;
+	uint8_t  MinorVersion;
+	uint8_t  SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
+	uint8_t  EraseSuspend;
+	uint8_t  BlkProt;
+	uint8_t  TmpBlkUnprotect;
+	uint8_t  BlkProtUnprot;
+	uint8_t  SimultaneousOps;
+	uint8_t  BurstMode;
+	uint8_t  PageMode;
+	uint8_t  VppMin;
+	uint8_t  VppMax;
+	uint8_t  TopBottom;
+} __packed;
+
+/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
+
+struct cfi_pri_atmel {
+	uint8_t pri[3];
+	uint8_t MajorVersion;
+	uint8_t MinorVersion;
+	uint8_t Features;
+	uint8_t BottomBoot;
+	uint8_t BurstMode;
+	uint8_t PageMode;
+} __packed;
+
+struct cfi_pri_query {
+	uint8_t  NumFields;
+	uint32_t ProtField[1]; /* Not host ordered */
+} __packed;
+
+struct cfi_bri_query {
+	uint8_t  PageModeReadCap;
+	uint8_t  NumFields;
+	uint32_t ConfField[1]; /* Not host ordered */
+} __packed;
+
+#define P_ID_NONE               0x0000
+#define P_ID_INTEL_EXT          0x0001
+#define P_ID_AMD_STD            0x0002
+#define P_ID_INTEL_STD          0x0003
+#define P_ID_AMD_EXT            0x0004
+#define P_ID_WINBOND            0x0006
+#define P_ID_ST_ADV             0x0020
+#define P_ID_MITSUBISHI_STD     0x0100
+#define P_ID_MITSUBISHI_EXT     0x0101
+#define P_ID_SST_PAGE           0x0102
+#define P_ID_SST_OLD            0x0701
+#define P_ID_INTEL_PERFORMANCE  0x0200
+#define P_ID_INTEL_DATA         0x0210
+#define P_ID_RESERVED           0xffff
+
+
+#define CFI_MODE_CFI	1
+#define CFI_MODE_JEDEC	0
+
+struct cfi_private {
+	uint16_t cmdset;
+	void *cmdset_priv;
+	int interleave;
+	int device_type;
+	int cfi_mode;		/* Are we a JEDEC device pretending to be CFI? */
+	int addr_unlock1;
+	int addr_unlock2;
+	struct mtd_info *(*cmdset_setup)(struct map_info *);
+	struct cfi_ident *cfiq; /* For now only one. We insist that all devs
+				  must be of the same type. */
+	int mfr, id;
+	int numchips;
+	map_word sector_erase_cmd;
+	unsigned long chipshift; /* Because they're of the same type */
+	const char *im_name;	 /* inter_module name for cmdset_setup */
+	struct flchip chips[0];  /* per-chip data structure for each chip */
+};
+
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+				struct map_info *map, struct cfi_private *cfi);
+
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
+#define CMD(x)  cfi_build_cmd((x), map, cfi)
+
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+					   struct cfi_private *cfi);
+#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
+
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+				struct map_info *map, struct cfi_private *cfi,
+				int type, map_word *prev_val);
+
+static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
+{
+	map_word val = map_read(map, addr);
+
+	if (map_bankwidth_is_1(map)) {
+		return val.x[0];
+	} else if (map_bankwidth_is_2(map)) {
+		return cfi16_to_cpu(map, val.x[0]);
+	} else {
+		/* No point in a 64-bit byteswap since that would just be
+		   swapping the responses from different chips, and we are
+		   only interested in one chip (a representative sample) */
+		return cfi32_to_cpu(map, val.x[0]);
+	}
+}
+
+static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
+{
+	map_word val = map_read(map, addr);
+
+	if (map_bankwidth_is_1(map)) {
+		return val.x[0] & 0xff;
+	} else if (map_bankwidth_is_2(map)) {
+		return cfi16_to_cpu(map, val.x[0]);
+	} else {
+		/* No point in a 64-bit byteswap since that would just be
+		   swapping the responses from different chips, and we are
+		   only interested in one chip (a representative sample) */
+		return cfi32_to_cpu(map, val.x[0]);
+	}
+}
+
+void cfi_udelay(int us);
+
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+			     struct cfi_private *cfi);
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+			     struct cfi_private *cfi);
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+			       struct cfi_private *cfi);
+
+struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
+			     const char* name);
+struct cfi_fixup {
+	uint16_t mfr;
+	uint16_t id;
+	void (*fixup)(struct mtd_info *mtd);
+};
+
+#define CFI_MFR_ANY		0xFFFF
+#define CFI_ID_ANY		0xFFFF
+#define CFI_MFR_CONTINUATION	0x007F
+
+#define CFI_MFR_AMD		0x0001
+#define CFI_MFR_AMIC		0x0037
+#define CFI_MFR_ATMEL		0x001F
+#define CFI_MFR_EON		0x001C
+#define CFI_MFR_FUJITSU		0x0004
+#define CFI_MFR_HYUNDAI		0x00AD
+#define CFI_MFR_INTEL		0x0089
+#define CFI_MFR_MACRONIX	0x00C2
+#define CFI_MFR_NEC		0x0010
+#define CFI_MFR_PMC		0x009D
+#define CFI_MFR_SAMSUNG		0x00EC
+#define CFI_MFR_SHARP		0x00B0
+#define CFI_MFR_SST		0x00BF
+#define CFI_MFR_ST		0x0020 /* STMicroelectronics */
+#define CFI_MFR_TOSHIBA		0x0098
+#define CFI_MFR_WINBOND		0x00DA
+
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
+
+typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
+			      unsigned long adr, int len, void *thunk);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+	loff_t ofs, size_t len, void *thunk);
+
+
+#endif /* __MTD_CFI_H__ */
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h
new file mode 100644
index 0000000..b97a625
--- /dev/null
+++ b/include/linux/mtd/cfi_endian.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#include <asm/byteorder.h>
+
+#define CFI_HOST_ENDIAN 1
+#define CFI_LITTLE_ENDIAN 2
+#define CFI_BIG_ENDIAN 3
+
+#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP)
+#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN
+#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN
+#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN
+#else
+#error No CFI endianness defined
+#endif
+
+#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN)
+#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN)
+#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN)
+#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN)
+
+#define cpu_to_cfi8(map, x) (x)
+#define cfi8_to_cpu(map, x) (x)
+#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x))
+#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x))
+#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x))
+#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x))
+#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x))
+#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x))
+
+#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x))
+#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x))
+#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x))
+#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x))
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h
new file mode 100644
index 0000000..ccdbe93
--- /dev/null
+++ b/include/linux/mtd/concat.h
@@ -0,0 +1,34 @@
+/*
+ * MTD device concatenation layer definitions
+ *
+ * Copyright © 2002      Robert Kaiser <rkaiser@sysgo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef MTD_CONCAT_H
+#define MTD_CONCAT_H
+
+
+struct mtd_info *mtd_concat_create(
+    struct mtd_info *subdev[],  /* subdevices to concatenate */
+    int num_devs,               /* number of subdevices      */
+    const char *name);          /* name for the new device   */
+
+void mtd_concat_destroy(struct mtd_info *mtd);
+
+#endif
+
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
new file mode 100644
index 0000000..407d1e5
--- /dev/null
+++ b/include/linux/mtd/doc2000.h
@@ -0,0 +1,220 @@
+/*
+ * Linux driver for Disk-On-Chip devices
+ *
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com>
+ * Copyright © 2002-2003 SnapGear Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __MTD_DOC2000_H__
+#define __MTD_DOC2000_H__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mutex.h>
+
+#define DoC_Sig1 0
+#define DoC_Sig2 1
+
+#define DoC_ChipID		0x1000
+#define DoC_DOCStatus		0x1001
+#define DoC_DOCControl		0x1002
+#define DoC_FloorSelect		0x1003
+#define DoC_CDSNControl		0x1004
+#define DoC_CDSNDeviceSelect 	0x1005
+#define DoC_ECCConf 		0x1006
+#define DoC_2k_ECCStatus	0x1007
+
+#define DoC_CDSNSlowIO		0x100d
+#define DoC_ECCSyndrome0	0x1010
+#define DoC_ECCSyndrome1	0x1011
+#define DoC_ECCSyndrome2	0x1012
+#define DoC_ECCSyndrome3	0x1013
+#define DoC_ECCSyndrome4	0x1014
+#define DoC_ECCSyndrome5	0x1015
+#define DoC_AliasResolution 	0x101b
+#define DoC_ConfigInput		0x101c
+#define DoC_ReadPipeInit 	0x101d
+#define DoC_WritePipeTerm 	0x101e
+#define DoC_LastDataRead 	0x101f
+#define DoC_NOP 		0x1020
+
+#define DoC_Mil_CDSN_IO 	0x0800
+#define DoC_2k_CDSN_IO 		0x1800
+
+#define DoC_Mplus_NOP			0x1002
+#define DoC_Mplus_AliasResolution	0x1004
+#define DoC_Mplus_DOCControl		0x1006
+#define DoC_Mplus_AccessStatus		0x1008
+#define DoC_Mplus_DeviceSelect		0x1008
+#define DoC_Mplus_Configuration		0x100a
+#define DoC_Mplus_OutputControl		0x100c
+#define DoC_Mplus_FlashControl		0x1020
+#define DoC_Mplus_FlashSelect 		0x1022
+#define DoC_Mplus_FlashCmd		0x1024
+#define DoC_Mplus_FlashAddress		0x1026
+#define DoC_Mplus_FlashData0		0x1028
+#define DoC_Mplus_FlashData1		0x1029
+#define DoC_Mplus_ReadPipeInit		0x102a
+#define DoC_Mplus_LastDataRead		0x102c
+#define DoC_Mplus_LastDataRead1		0x102d
+#define DoC_Mplus_WritePipeTerm 	0x102e
+#define DoC_Mplus_ECCSyndrome0		0x1040
+#define DoC_Mplus_ECCSyndrome1		0x1041
+#define DoC_Mplus_ECCSyndrome2		0x1042
+#define DoC_Mplus_ECCSyndrome3		0x1043
+#define DoC_Mplus_ECCSyndrome4		0x1044
+#define DoC_Mplus_ECCSyndrome5		0x1045
+#define DoC_Mplus_ECCConf 		0x1046
+#define DoC_Mplus_Toggle		0x1046
+#define DoC_Mplus_DownloadStatus	0x1074
+#define DoC_Mplus_CtrlConfirm		0x1076
+#define DoC_Mplus_Power			0x1fff
+
+/* How to access the device?
+ * On ARM, it'll be mmap'd directly with 32-bit wide accesses.
+ * On PPC, it's mmap'd and 16-bit wide.
+ * Others use readb/writeb
+ */
+#if defined(__arm__)
+static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg)
+{
+	return __raw_readl(addr + reg);
+}
+static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg)
+{
+	__raw_writel(data, addr + reg);
+	wmb();
+}
+#define DOC_IOREMAP_LEN 0x8000
+#elif defined(__ppc__)
+static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg)
+{
+	return __raw_readw(addr + reg);
+}
+static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg)
+{
+	__raw_writew(data, addr + reg);
+	wmb();
+}
+#define DOC_IOREMAP_LEN 0x4000
+#else
+#define ReadDOC_(adr, reg)      readb((void __iomem *)(adr) + (reg))
+#define WriteDOC_(d, adr, reg)  writeb(d, (void __iomem *)(adr) + (reg))
+#define DOC_IOREMAP_LEN 0x2000
+
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+#define USE_MEMCPY
+#endif
+
+/* These are provided to directly use the DoC_xxx defines */
+#define ReadDOC(adr, reg)      ReadDOC_(adr,DoC_##reg)
+#define WriteDOC(d, adr, reg)  WriteDOC_(d,adr,DoC_##reg)
+
+#define DOC_MODE_RESET 		0
+#define DOC_MODE_NORMAL 	1
+#define DOC_MODE_RESERVED1 	2
+#define DOC_MODE_RESERVED2 	3
+
+#define DOC_MODE_CLR_ERR 	0x80
+#define	DOC_MODE_RST_LAT	0x10
+#define	DOC_MODE_BDECT		0x08
+#define DOC_MODE_MDWREN 	0x04
+
+#define DOC_ChipID_Doc2k 	0x20
+#define DOC_ChipID_Doc2kTSOP 	0x21	/* internal number for MTD */
+#define DOC_ChipID_DocMil 	0x30
+#define DOC_ChipID_DocMilPlus32	0x40
+#define DOC_ChipID_DocMilPlus16	0x41
+
+#define CDSN_CTRL_FR_B 		0x80
+#define CDSN_CTRL_FR_B0		0x40
+#define CDSN_CTRL_FR_B1		0x80
+
+#define CDSN_CTRL_ECC_IO 	0x20
+#define CDSN_CTRL_FLASH_IO 	0x10
+#define CDSN_CTRL_WP 		0x08
+#define CDSN_CTRL_ALE 		0x04
+#define CDSN_CTRL_CLE 		0x02
+#define CDSN_CTRL_CE 		0x01
+
+#define DOC_ECC_RESET 		0
+#define DOC_ECC_ERROR 		0x80
+#define DOC_ECC_RW 		0x20
+#define DOC_ECC__EN 		0x08
+#define DOC_TOGGLE_BIT 		0x04
+#define DOC_ECC_RESV 		0x02
+#define DOC_ECC_IGNORE		0x01
+
+#define DOC_FLASH_CE		0x80
+#define DOC_FLASH_WP		0x40
+#define DOC_FLASH_BANK		0x02
+
+/* We have to also set the reserved bit 1 for enable */
+#define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV)
+#define DOC_ECC_DIS (DOC_ECC_RESV)
+
+struct Nand {
+	char floor, chip;
+	unsigned long curadr;
+	unsigned char curmode;
+	/* Also some erase/write/pipeline info when we get that far */
+};
+
+#define MAX_FLOORS 4
+#define MAX_CHIPS 4
+
+#define MAX_FLOORS_MIL 1
+#define MAX_CHIPS_MIL 1
+
+#define MAX_FLOORS_MPLUS 2
+#define MAX_CHIPS_MPLUS 1
+
+#define ADDR_COLUMN 1
+#define ADDR_PAGE 2
+#define ADDR_COLUMN_PAGE 3
+
+struct DiskOnChip {
+	unsigned long physadr;
+	void __iomem *virtadr;
+	unsigned long totlen;
+	unsigned char ChipID; /* Type of DiskOnChip */
+	int ioreg;
+
+	unsigned long mfr; /* Flash IDs - only one type of flash per device */
+	unsigned long id;
+	int chipshift;
+	char page256;
+	char pageadrlen;
+	char interleave; /* Internal interleaving - Millennium Plus style */
+	unsigned long erasesize;
+
+	int curfloor;
+	int curchip;
+
+	int numchips;
+	struct Nand *chips;
+	struct mtd_info *nextdoc;
+	struct mutex lock;
+};
+
+int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
+
+#endif /* __MTD_DOC2000_H__ */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
new file mode 100644
index 0000000..3529683
--- /dev/null
+++ b/include/linux/mtd/flashchip.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright © 2000      Red Hat UK Limited
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __MTD_FLASHCHIP_H__
+#define __MTD_FLASHCHIP_H__
+
+/* For spinlocks. sched.h includes spinlock.h from whichever directory it
+ * happens to be in - so we don't have to care whether we're on 2.2, which
+ * has asm/spinlock.h, or 2.4, which has linux/spinlock.h
+ */
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+typedef enum {
+	FL_READY,
+	FL_STATUS,
+	FL_CFI_QUERY,
+	FL_JEDEC_QUERY,
+	FL_ERASING,
+	FL_ERASE_SUSPENDING,
+	FL_ERASE_SUSPENDED,
+	FL_WRITING,
+	FL_WRITING_TO_BUFFER,
+	FL_OTP_WRITE,
+	FL_WRITE_SUSPENDING,
+	FL_WRITE_SUSPENDED,
+	FL_PM_SUSPENDED,
+	FL_SYNCING,
+	FL_UNLOADING,
+	FL_LOCKING,
+	FL_UNLOCKING,
+	FL_POINT,
+	FL_XIP_WHILE_ERASING,
+	FL_XIP_WHILE_WRITING,
+	FL_SHUTDOWN,
+	/* These 2 come from nand_state_t, which has been unified here */
+	FL_READING,
+	FL_CACHEDPRG,
+	/* These 4 come from onenand_state_t, which has been unified here */
+	FL_RESETING,
+	FL_OTPING,
+	FL_PREPARING_ERASE,
+	FL_VERIFYING_ERASE,
+
+	FL_UNKNOWN
+} flstate_t;
+
+
+
+/* NOTE: confusingly, this can be used to refer to more than one chip at a time,
+   if they're interleaved.  This can even refer to individual partitions on
+   the same physical chip when present. */
+
+struct flchip {
+	unsigned long start; /* Offset within the map */
+	//	unsigned long len;
+	/* We omit len for now, because when we group them together
+	   we insist that they're all of the same size, and the chip size
+	   is held in the next level up. If we get more versatile later,
+	   it'll make it a damn sight harder to find which chip we want from
+	   a given offset, and we'll want to add the per-chip length field
+	   back in.
+	*/
+	int ref_point_counter;
+	flstate_t state;
+	flstate_t oldstate;
+
+	unsigned int write_suspended:1;
+	unsigned int erase_suspended:1;
+	unsigned long in_progress_block_addr;
+	unsigned long in_progress_block_mask;
+
+	struct mutex mutex;
+	wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+			     to be ready */
+	int word_write_time;
+	int buffer_write_time;
+	int erase_time;
+
+	int word_write_time_max;
+	int buffer_write_time_max;
+	int erase_time_max;
+
+	void *priv;
+};
+
+/* This is used to handle contention on write/erase operations
+   between partitions of the same physical chip. */
+struct flchip_shared {
+	struct mutex lock;
+	struct flchip *writing;
+	struct flchip *erasing;
+};
+
+
+#endif /* __MTD_FLASHCHIP_H__ */
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h
new file mode 100644
index 0000000..0555f7a
--- /dev/null
+++ b/include/linux/mtd/ftl.h
@@ -0,0 +1,74 @@
+/*
+ * Derived from (and probably identical to):
+ * ftl.h 1.7 1999/10/25 20:23:17
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>.  Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds.  All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above.  If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL.  If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_FTL_H
+#define _LINUX_FTL_H
+
+typedef struct erase_unit_header_t {
+    uint8_t	LinkTargetTuple[5];
+    uint8_t	DataOrgTuple[10];
+    uint8_t	NumTransferUnits;
+    uint32_t	EraseCount;
+    uint16_t	LogicalEUN;
+    uint8_t	BlockSize;
+    uint8_t	EraseUnitSize;
+    uint16_t	FirstPhysicalEUN;
+    uint16_t	NumEraseUnits;
+    uint32_t	FormattedSize;
+    uint32_t	FirstVMAddress;
+    uint16_t	NumVMPages;
+    uint8_t	Flags;
+    uint8_t	Code;
+    uint32_t	SerialNumber;
+    uint32_t	AltEUHOffset;
+    uint32_t	BAMOffset;
+    uint8_t	Reserved[12];
+    uint8_t	EndTuple[2];
+} erase_unit_header_t;
+
+/* Flags in erase_unit_header_t */
+#define HIDDEN_AREA		0x01
+#define REVERSE_POLARITY	0x02
+#define DOUBLE_BAI		0x04
+
+/* Definitions for block allocation information */
+
+#define BLOCK_FREE(b)		((b) == 0xffffffff)
+#define BLOCK_DELETED(b)	(((b) == 0) || ((b) == 0xfffffffe))
+
+#define BLOCK_TYPE(b)		((b) & 0x7f)
+#define BLOCK_ADDRESS(b)	((b) & ~0x7f)
+#define BLOCK_NUMBER(b)		((b) >> 9)
+#define BLOCK_CONTROL		0x30
+#define BLOCK_DATA		0x40
+#define BLOCK_REPLACEMENT	0x60
+#define BLOCK_BAD		0x70
+
+#endif /* _LINUX_FTL_H */
diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h
new file mode 100644
index 0000000..2c45605
--- /dev/null
+++ b/include/linux/mtd/gen_probe.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2001      Red Hat UK Limited
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __LINUX_MTD_GEN_PROBE_H__
+#define __LINUX_MTD_GEN_PROBE_H__
+
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/bitops.h>
+
+struct chip_probe {
+	char *name;
+	int (*probe_chip)(struct map_info *map, __u32 base,
+			  unsigned long *chip_map, struct cfi_private *cfi);
+};
+
+struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
+
+#endif /* __LINUX_MTD_GEN_PROBE_H__ */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
new file mode 100644
index 0000000..fdfff87
--- /dev/null
+++ b/include/linux/mtd/inftl.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *	inftl.h -- defines to support the Inverse NAND Flash Translation Layer
+ *
+ *	(C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ */
+
+#ifndef __MTD_INFTL_H__
+#define __MTD_INFTL_H__
+
+#ifndef __KERNEL__
+#error This is a kernel header. Perhaps include nftl-user.h instead?
+#endif
+
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+
+#include <mtd/inftl-user.h>
+
+#ifndef INFTL_MAJOR
+#define INFTL_MAJOR 96
+#endif
+#define INFTL_PARTN_BITS 4
+
+#ifdef __KERNEL__
+
+struct INFTLrecord {
+	struct mtd_blktrans_dev mbd;
+	__u16 MediaUnit;
+	__u32 EraseSize;
+	struct INFTLMediaHeader MediaHdr;
+	int usecount;
+	unsigned char heads;
+	unsigned char sectors;
+	unsigned short cylinders;
+	__u16 numvunits;
+	__u16 firstEUN;
+	__u16 lastEUN;
+	__u16 numfreeEUNs;
+	__u16 LastFreeEUN;		/* To speed up finding a free EUN */
+	int head,sect,cyl;
+	__u16 *PUtable;			/* Physical Unit Table */
+	__u16 *VUtable;			/* Virtual Unit Table */
+	unsigned int nb_blocks;		/* number of physical blocks */
+	unsigned int nb_boot_blocks;	/* number of blocks used by the bios */
+	struct erase_info instr;
+};
+
+int INFTL_mount(struct INFTLrecord *s);
+int INFTL_formatblock(struct INFTLrecord *s, int block);
+
+void INFTL_dumptables(struct INFTLrecord *s);
+void INFTL_dumpVUchains(struct INFTLrecord *s);
+
+int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+		   size_t *retlen, uint8_t *buf);
+int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+		    size_t *retlen, uint8_t *buf);
+
+#endif /* __KERNEL__ */
+
+#endif /* __MTD_INFTL_H__ */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
new file mode 100644
index 0000000..e94b8e1
--- /dev/null
+++ b/include/linux/mtd/latch-addr-flash.h
@@ -0,0 +1,29 @@
+/*
+ * Interface for NOR flash driver whose high address lines are latched
+ *
+ * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __LATCH_ADDR_FLASH__
+#define __LATCH_ADDR_FLASH__
+
+struct map_info;
+struct mtd_partition;
+
+struct latch_addr_flash_data {
+	unsigned int		width;
+	unsigned int		size;
+
+	int			(*init)(void *data, int cs);
+	void			(*done)(void *data);
+	void			(*set_window)(unsigned long offset, void *data);
+	void			*data;
+
+	unsigned int		nr_parts;
+	struct mtd_partition	*parts;
+};
+
+#endif
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h
new file mode 100644
index 0000000..d91b1e3
--- /dev/null
+++ b/include/linux/mtd/lpc32xx_mlc.h
@@ -0,0 +1,20 @@
+/*
+ * Platform data for LPC32xx SoC MLC NAND controller
+ *
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_LPC32XX_MLC_H
+#define __LINUX_MTD_LPC32XX_MLC_H
+
+#include <linux/dmaengine.h>
+
+struct lpc32xx_mlc_platform_data {
+	bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+#endif  /* __LINUX_MTD_LPC32XX_MLC_H */
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h
new file mode 100644
index 0000000..1169548
--- /dev/null
+++ b/include/linux/mtd/lpc32xx_slc.h
@@ -0,0 +1,20 @@
+/*
+ * Platform data for LPC32xx SoC SLC NAND controller
+ *
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_LPC32XX_SLC_H
+#define __LINUX_MTD_LPC32XX_SLC_H
+
+#include <linux/dmaengine.h>
+
+struct lpc32xx_slc_platform_data {
+	bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+#endif  /* __LINUX_MTD_LPC32XX_SLC_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
new file mode 100644
index 0000000..01b990e
--- /dev/null
+++ b/include/linux/mtd/map.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+/* Overhauled routines for dealing with different mmap regions of flash */
+
+#ifndef __LINUX_MTD_MAP_H__
+#define __LINUX_MTD_MAP_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <asm/unaligned.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+#define map_bankwidth(map) 1
+#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
+#define map_bankwidth_is_large(map) (0)
+#define map_words(map) (1)
+#define MAX_MAP_BANKWIDTH 1
+#else
+#define map_bankwidth_is_1(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+# ifdef map_bankwidth
+#  undef map_bankwidth
+#  define map_bankwidth(map) ((map)->bankwidth)
+# else
+#  define map_bankwidth(map) 2
+#  define map_bankwidth_is_large(map) (0)
+#  define map_words(map) (1)
+# endif
+#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 2
+#else
+#define map_bankwidth_is_2(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+# ifdef map_bankwidth
+#  undef map_bankwidth
+#  define map_bankwidth(map) ((map)->bankwidth)
+# else
+#  define map_bankwidth(map) 4
+#  define map_bankwidth_is_large(map) (0)
+#  define map_words(map) (1)
+# endif
+#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 4
+#else
+#define map_bankwidth_is_4(map) (0)
+#endif
+
+/* ensure we never evaluate anything shorted than an unsigned long
+ * to zero, and ensure we'll never miss the end of an comparison (bjd) */
+
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+# ifdef map_bankwidth
+#  undef map_bankwidth
+#  define map_bankwidth(map) ((map)->bankwidth)
+#  if BITS_PER_LONG < 64
+#   undef map_bankwidth_is_large
+#   define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+#   undef map_words
+#   define map_words(map) map_calc_words(map)
+#  endif
+# else
+#  define map_bankwidth(map) 8
+#  define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
+#  define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 8
+#else
+#define map_bankwidth_is_8(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+# ifdef map_bankwidth
+#  undef map_bankwidth
+#  define map_bankwidth(map) ((map)->bankwidth)
+#  undef map_bankwidth_is_large
+#  define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+#  undef map_words
+#  define map_words(map) map_calc_words(map)
+# else
+#  define map_bankwidth(map) 16
+#  define map_bankwidth_is_large(map) (1)
+#  define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 16
+#else
+#define map_bankwidth_is_16(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+/* always use indirect access for 256-bit to preserve kernel stack */
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 32
+#else
+#define map_bankwidth_is_32(map) (0)
+#endif
+
+#ifndef map_bankwidth
+#ifdef CONFIG_MTD
+#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
+#endif
+static inline int map_bankwidth(void *map)
+{
+	BUG();
+	return 0;
+}
+#define map_bankwidth_is_large(map) (0)
+#define map_words(map) (0)
+#define MAX_MAP_BANKWIDTH 1
+#endif
+
+static inline int map_bankwidth_supported(int w)
+{
+	switch (w) {
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+	case 1:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+	case 2:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+	case 4:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+	case 8:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+	case 16:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+	case 32:
+#endif
+		return 1;
+
+	default:
+		return 0;
+	}
+}
+
+#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+typedef union {
+	unsigned long x[MAX_MAP_LONGS];
+} map_word;
+
+/* The map stuff is very simple. You fill in your struct map_info with
+   a handful of routines for accessing the device, making sure they handle
+   paging etc. correctly if your device needs it. Then you pass it off
+   to a chip probe routine -- either JEDEC or CFI probe or both -- via
+   do_map_probe(). If a chip is recognised, the probe code will invoke the
+   appropriate chip driver (if present) and return a struct mtd_info.
+   At which point, you fill in the mtd->module with your own module
+   address, and register it with the MTD core code. Or you could partition
+   it and register the partitions instead, or keep it for your own private
+   use; whatever.
+
+   The mtd->priv field will point to the struct map_info, and any further
+   private data required by the chip driver is linked from the
+   mtd->priv->fldrv_priv field. This allows the map driver to get at
+   the destructor function map->fldrv_destroy() when it's tired
+   of living.
+*/
+
+struct map_info {
+	const char *name;
+	unsigned long size;
+	resource_size_t phys;
+#define NO_XIP (-1UL)
+
+	void __iomem *virt;
+	void *cached;
+
+	int swap; /* this mapping's byte-swapping requirement */
+	int bankwidth; /* in octets. This isn't necessarily the width
+		       of actual bus cycles -- it's the repeat interval
+		      in bytes, before you are talking to the first chip again.
+		      */
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+	map_word (*read)(struct map_info *, unsigned long);
+	void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
+
+	void (*write)(struct map_info *, const map_word, unsigned long);
+	void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
+
+	/* We can perhaps put in 'point' and 'unpoint' methods, if we really
+	   want to enable XIP for non-linear mappings. Not yet though. */
+#endif
+	/* It's possible for the map driver to use cached memory in its
+	   copy_from implementation (and _only_ with copy_from).  However,
+	   when the chip driver knows some flash area has changed contents,
+	   it will signal it to the map driver through this routine to let
+	   the map driver invalidate the corresponding cache as needed.
+	   If there is no cache to care about this can be set to NULL. */
+	void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
+
+	/* This will be called with 1 as parameter when the first map user
+	 * needs VPP, and called with 0 when the last user exits. The map
+	 * core maintains a reference counter, and assumes that VPP is a
+	 * global resource applying to all mapped flash chips on the system.
+	 */
+	void (*set_vpp)(struct map_info *, int);
+
+	unsigned long pfow_base;
+	unsigned long map_priv_1;
+	unsigned long map_priv_2;
+	struct device_node *device_node;
+	void *fldrv_priv;
+	struct mtd_chip_driver *fldrv;
+};
+
+struct mtd_chip_driver {
+	struct mtd_info *(*probe)(struct map_info *map);
+	void (*destroy)(struct mtd_info *);
+	struct module *module;
+	char *name;
+	struct list_head list;
+};
+
+void register_mtd_chip_driver(struct mtd_chip_driver *);
+void unregister_mtd_chip_driver(struct mtd_chip_driver *);
+
+struct mtd_info *do_map_probe(const char *name, struct map_info *map);
+void map_destroy(struct mtd_info *mtd);
+
+#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
+#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
+
+#define INVALIDATE_CACHED_RANGE(map, from, size) \
+	do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
+
+#define map_word_equal(map, val1, val2)					\
+({									\
+	int i, ret = 1;							\
+	for (i = 0; i < map_words(map); i++)				\
+		if ((val1).x[i] != (val2).x[i]) {			\
+			ret = 0;					\
+			break;						\
+		}							\
+	ret;								\
+})
+
+#define map_word_and(map, val1, val2)					\
+({									\
+	map_word r;							\
+	int i;								\
+	for (i = 0; i < map_words(map); i++)				\
+		r.x[i] = (val1).x[i] & (val2).x[i];			\
+	r;								\
+})
+
+#define map_word_clr(map, val1, val2)					\
+({									\
+	map_word r;							\
+	int i;								\
+	for (i = 0; i < map_words(map); i++)				\
+		r.x[i] = (val1).x[i] & ~(val2).x[i];			\
+	r;								\
+})
+
+#define map_word_or(map, val1, val2)					\
+({									\
+	map_word r;							\
+	int i;								\
+	for (i = 0; i < map_words(map); i++)				\
+		r.x[i] = (val1).x[i] | (val2).x[i];			\
+	r;								\
+})
+
+#define map_word_andequal(map, val1, val2, val3)			\
+({									\
+	int i, ret = 1;							\
+	for (i = 0; i < map_words(map); i++) {				\
+		if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) {	\
+			ret = 0;					\
+			break;						\
+		}							\
+	}								\
+	ret;								\
+})
+
+#define map_word_bitsset(map, val1, val2)				\
+({									\
+	int i, ret = 0;							\
+	for (i = 0; i < map_words(map); i++) {				\
+		if ((val1).x[i] & (val2).x[i]) {			\
+			ret = 1;					\
+			break;						\
+		}							\
+	}								\
+	ret;								\
+})
+
+static inline map_word map_word_load(struct map_info *map, const void *ptr)
+{
+	map_word r;
+
+	if (map_bankwidth_is_1(map))
+		r.x[0] = *(unsigned char *)ptr;
+	else if (map_bankwidth_is_2(map))
+		r.x[0] = get_unaligned((uint16_t *)ptr);
+	else if (map_bankwidth_is_4(map))
+		r.x[0] = get_unaligned((uint32_t *)ptr);
+#if BITS_PER_LONG >= 64
+	else if (map_bankwidth_is_8(map))
+		r.x[0] = get_unaligned((uint64_t *)ptr);
+#endif
+	else if (map_bankwidth_is_large(map))
+		memcpy(r.x, ptr, map->bankwidth);
+	else
+		BUG();
+
+	return r;
+}
+
+static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
+{
+	int i;
+
+	if (map_bankwidth_is_large(map)) {
+		char *dest = (char *)&orig;
+
+		memcpy(dest+start, buf, len);
+	} else {
+		for (i = start; i < start+len; i++) {
+			int bitpos;
+
+#ifdef __LITTLE_ENDIAN
+			bitpos = i * 8;
+#else /* __BIG_ENDIAN */
+			bitpos = (map_bankwidth(map) - 1 - i) * 8;
+#endif
+			orig.x[0] &= ~(0xff << bitpos);
+			orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
+		}
+	}
+	return orig;
+}
+
+#if BITS_PER_LONG < 64
+#define MAP_FF_LIMIT 4
+#else
+#define MAP_FF_LIMIT 8
+#endif
+
+static inline map_word map_word_ff(struct map_info *map)
+{
+	map_word r;
+	int i;
+
+	if (map_bankwidth(map) < MAP_FF_LIMIT) {
+		int bw = 8 * map_bankwidth(map);
+
+		r.x[0] = (1UL << bw) - 1;
+	} else {
+		for (i = 0; i < map_words(map); i++)
+			r.x[i] = ~0UL;
+	}
+	return r;
+}
+
+static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
+{
+	map_word r;
+
+	if (map_bankwidth_is_1(map))
+		r.x[0] = __raw_readb(map->virt + ofs);
+	else if (map_bankwidth_is_2(map))
+		r.x[0] = __raw_readw(map->virt + ofs);
+	else if (map_bankwidth_is_4(map))
+		r.x[0] = __raw_readl(map->virt + ofs);
+#if BITS_PER_LONG >= 64
+	else if (map_bankwidth_is_8(map))
+		r.x[0] = __raw_readq(map->virt + ofs);
+#endif
+	else if (map_bankwidth_is_large(map))
+		memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
+	else
+		BUG();
+
+	return r;
+}
+
+static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+{
+	if (map_bankwidth_is_1(map))
+		__raw_writeb(datum.x[0], map->virt + ofs);
+	else if (map_bankwidth_is_2(map))
+		__raw_writew(datum.x[0], map->virt + ofs);
+	else if (map_bankwidth_is_4(map))
+		__raw_writel(datum.x[0], map->virt + ofs);
+#if BITS_PER_LONG >= 64
+	else if (map_bankwidth_is_8(map))
+		__raw_writeq(datum.x[0], map->virt + ofs);
+#endif
+	else if (map_bankwidth_is_large(map))
+		memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+	else
+		BUG();
+	mb();
+}
+
+static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+	if (map->cached)
+		memcpy(to, (char *)map->cached + from, len);
+	else
+		memcpy_fromio(to, map->virt + from, len);
+}
+
+static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+	memcpy_toio(map->virt + to, from, len);
+}
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+#define map_read(map, ofs) (map)->read(map, ofs)
+#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
+
+extern void simple_map_init(struct map_info *);
+#define map_is_linear(map) (map->phys != NO_XIP)
+
+#else
+#define map_read(map, ofs) inline_map_read(map, ofs)
+#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
+
+
+#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
+#define map_is_linear(map) ({ (void)(map); 1; })
+
+#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
+
+#endif /* __LINUX_MTD_MAP_H__ */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
new file mode 100644
index 0000000..cd0be91
--- /dev/null
+++ b/include/linux/mtd/mtd.h
@@ -0,0 +1,601 @@
+/*
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __MTD_MTD_H__
+#define __MTD_MTD_H__
+
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/of.h>
+
+#include <mtd/mtd-abi.h>
+
+#include <asm/div64.h>
+
+#define MTD_FAIL_ADDR_UNKNOWN -1LL
+
+struct mtd_info;
+
+/*
+ * If the erase fails, fail_addr might indicate exactly which block failed. If
+ * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
+ * or was not specific to any particular block.
+ */
+struct erase_info {
+	uint64_t addr;
+	uint64_t len;
+	uint64_t fail_addr;
+};
+
+struct mtd_erase_region_info {
+	uint64_t offset;		/* At which this region starts, from the beginning of the MTD */
+	uint32_t erasesize;		/* For this region */
+	uint32_t numblocks;		/* Number of blocks of erasesize in this region */
+	unsigned long *lockmap;		/* If keeping bitmap of locks */
+};
+
+/**
+ * struct mtd_oob_ops - oob operation operands
+ * @mode:	operation mode
+ *
+ * @len:	number of data bytes to write/read
+ *
+ * @retlen:	number of data bytes written/read
+ *
+ * @ooblen:	number of oob bytes to write/read
+ * @oobretlen:	number of oob bytes written/read
+ * @ooboffs:	offset of oob data in the oob area (only relevant when
+ *		mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
+ * @datbuf:	data buffer - if NULL only oob data are read/written
+ * @oobbuf:	oob data buffer
+ *
+ * Note, some MTD drivers do not allow you to write more than one OOB area at
+ * one go. If you try to do that on such an MTD device, -EINVAL will be
+ * returned. If you want to make your implementation portable on all kind of MTD
+ * devices you should split the write request into several sub-requests when the
+ * request crosses a page boundary.
+ */
+struct mtd_oob_ops {
+	unsigned int	mode;
+	size_t		len;
+	size_t		retlen;
+	size_t		ooblen;
+	size_t		oobretlen;
+	uint32_t	ooboffs;
+	uint8_t		*datbuf;
+	uint8_t		*oobbuf;
+};
+
+#define MTD_MAX_OOBFREE_ENTRIES_LARGE	32
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE	640
+/**
+ * struct mtd_oob_region - oob region definition
+ * @offset: region offset
+ * @length: region length
+ *
+ * This structure describes a region of the OOB area, and is used
+ * to retrieve ECC or free bytes sections.
+ * Each section is defined by an offset within the OOB area and a
+ * length.
+ */
+struct mtd_oob_region {
+	u32 offset;
+	u32 length;
+};
+
+/*
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
+ * @ecc: function returning an ECC region in the OOB area.
+ *	 Should return -ERANGE if %section exceeds the total number of
+ *	 ECC sections.
+ * @free: function returning a free region in the OOB area.
+ *	  Should return -ERANGE if %section exceeds the total number of
+ *	  free sections.
+ */
+struct mtd_ooblayout_ops {
+	int (*ecc)(struct mtd_info *mtd, int section,
+		   struct mtd_oob_region *oobecc);
+	int (*free)(struct mtd_info *mtd, int section,
+		    struct mtd_oob_region *oobfree);
+};
+
+/**
+ * struct mtd_pairing_info - page pairing information
+ *
+ * @pair: pair id
+ * @group: group id
+ *
+ * The term "pair" is used here, even though TLC NANDs might group pages by 3
+ * (3 bits in a single cell). A pair should regroup all pages that are sharing
+ * the same cell. Pairs are then indexed in ascending order.
+ *
+ * @group is defining the position of a page in a given pair. It can also be
+ * seen as the bit position in the cell: page attached to bit 0 belongs to
+ * group 0, page attached to bit 1 belongs to group 1, etc.
+ *
+ * Example:
+ * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
+ *
+ *		group-0		group-1
+ *
+ *  pair-0	page-0		page-4
+ *  pair-1	page-1		page-5
+ *  pair-2	page-2		page-8
+ *  ...
+ *  pair-127	page-251	page-255
+ *
+ *
+ * Note that the "group" and "pair" terms were extracted from Samsung and
+ * Hynix datasheets, and might be referenced under other names in other
+ * datasheets (Micron is describing this concept as "shared pages").
+ */
+struct mtd_pairing_info {
+	int pair;
+	int group;
+};
+
+/**
+ * struct mtd_pairing_scheme - page pairing scheme description
+ *
+ * @ngroups: number of groups. Should be related to the number of bits
+ *	     per cell.
+ * @get_info: converts a write-unit (page number within an erase block) into
+ *	      mtd_pairing information (pair + group). This function should
+ *	      fill the info parameter based on the wunit index or return
+ *	      -EINVAL if the wunit parameter is invalid.
+ * @get_wunit: converts pairing information into a write-unit (page) number.
+ *	       This function should return the wunit index pointed by the
+ *	       pairing information described in the info argument. It should
+ *	       return -EINVAL, if there's no wunit corresponding to the
+ *	       passed pairing information.
+ *
+ * See mtd_pairing_info documentation for a detailed explanation of the
+ * pair and group concepts.
+ *
+ * The mtd_pairing_scheme structure provides a generic solution to represent
+ * NAND page pairing scheme. Instead of exposing two big tables to do the
+ * write-unit <-> (pair + group) conversions, we ask the MTD drivers to
+ * implement the ->get_info() and ->get_wunit() functions.
+ *
+ * MTD users will then be able to query these information by using the
+ * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
+ *
+ * @ngroups is here to help MTD users iterating over all the pages in a
+ * given pair. This value can be retrieved by MTD users using the
+ * mtd_pairing_groups() helper.
+ *
+ * Examples are given in the mtd_pairing_info_to_wunit() and
+ * mtd_wunit_to_pairing_info() documentation.
+ */
+struct mtd_pairing_scheme {
+	int ngroups;
+	int (*get_info)(struct mtd_info *mtd, int wunit,
+			struct mtd_pairing_info *info);
+	int (*get_wunit)(struct mtd_info *mtd,
+			 const struct mtd_pairing_info *info);
+};
+
+struct module;	/* only needed for owner field in mtd_info */
+
+/**
+ * struct mtd_debug_info - debugging information for an MTD device.
+ *
+ * @dfs_dir: direntry object of the MTD device debugfs directory
+ */
+struct mtd_debug_info {
+	struct dentry *dfs_dir;
+};
+
+struct mtd_info {
+	u_char type;
+	uint32_t flags;
+	uint64_t size;	 // Total size of the MTD
+
+	/* "Major" erase size for the device. Naïve users may take this
+	 * to be the only erase size available, or may use the more detailed
+	 * information below if they desire
+	 */
+	uint32_t erasesize;
+	/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
+	 * though individual bits can be cleared), in case of NAND flash it is
+	 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
+	 * it is of ECC block size, etc. It is illegal to have writesize = 0.
+	 * Any driver registering a struct mtd_info must ensure a writesize of
+	 * 1 or larger.
+	 */
+	uint32_t writesize;
+
+	/*
+	 * Size of the write buffer used by the MTD. MTD devices having a write
+	 * buffer can write multiple writesize chunks at a time. E.g. while
+	 * writing 4 * writesize bytes to a device with 2 * writesize bytes
+	 * buffer the MTD driver can (but doesn't have to) do 2 writesize
+	 * operations, but not 4. Currently, all NANDs have writebufsize
+	 * equivalent to writesize (NAND page size). Some NOR flashes do have
+	 * writebufsize greater than writesize.
+	 */
+	uint32_t writebufsize;
+
+	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
+	uint32_t oobavail;  // Available OOB bytes per block
+
+	/*
+	 * If erasesize is a power of 2 then the shift is stored in
+	 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
+	 */
+	unsigned int erasesize_shift;
+	unsigned int writesize_shift;
+	/* Masks based on erasesize_shift and writesize_shift */
+	unsigned int erasesize_mask;
+	unsigned int writesize_mask;
+
+	/*
+	 * read ops return -EUCLEAN if max number of bitflips corrected on any
+	 * one region comprising an ecc step equals or exceeds this value.
+	 * Settable by driver, else defaults to ecc_strength.  User can override
+	 * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
+	 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
+	 */
+	unsigned int bitflip_threshold;
+
+	/* Kernel-only stuff starts here. */
+	const char *name;
+	int index;
+
+	/* OOB layout description */
+	const struct mtd_ooblayout_ops *ooblayout;
+
+	/* NAND pairing scheme, only provided for MLC/TLC NANDs */
+	const struct mtd_pairing_scheme *pairing;
+
+	/* the ecc step size. */
+	unsigned int ecc_step_size;
+
+	/* max number of correctible bit errors per ecc step */
+	unsigned int ecc_strength;
+
+	/* Data for variable erase regions. If numeraseregions is zero,
+	 * it means that the whole device has erasesize as given above.
+	 */
+	int numeraseregions;
+	struct mtd_erase_region_info *eraseregions;
+
+	/*
+	 * Do not call via these pointers, use corresponding mtd_*()
+	 * wrappers instead.
+	 */
+	int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
+	int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
+		       size_t *retlen, void **virt, resource_size_t *phys);
+	int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
+	int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
+		      size_t *retlen, u_char *buf);
+	int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
+		       size_t *retlen, const u_char *buf);
+	int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+			     size_t *retlen, const u_char *buf);
+	int (*_read_oob) (struct mtd_info *mtd, loff_t from,
+			  struct mtd_oob_ops *ops);
+	int (*_write_oob) (struct mtd_info *mtd, loff_t to,
+			   struct mtd_oob_ops *ops);
+	int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
+				    size_t *retlen, struct otp_info *buf);
+	int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+				    size_t len, size_t *retlen, u_char *buf);
+	int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
+				    size_t *retlen, struct otp_info *buf);
+	int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+				    size_t len, size_t *retlen, u_char *buf);
+	int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
+				     size_t len, size_t *retlen, u_char *buf);
+	int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+				    size_t len);
+	int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
+			unsigned long count, loff_t to, size_t *retlen);
+	void (*_sync) (struct mtd_info *mtd);
+	int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
+	int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
+	int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+	int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
+	int (*_suspend) (struct mtd_info *mtd);
+	void (*_resume) (struct mtd_info *mtd);
+	void (*_reboot) (struct mtd_info *mtd);
+	/*
+	 * If the driver is something smart, like UBI, it may need to maintain
+	 * its own reference counting. The below functions are only for driver.
+	 */
+	int (*_get_device) (struct mtd_info *mtd);
+	void (*_put_device) (struct mtd_info *mtd);
+
+	struct notifier_block reboot_notifier;  /* default mode before reboot */
+
+	/* ECC status information */
+	struct mtd_ecc_stats ecc_stats;
+	/* Subpage shift (NAND) */
+	int subpage_sft;
+
+	void *priv;
+
+	struct module *owner;
+	struct device dev;
+	int usecount;
+	struct mtd_debug_info dbg;
+};
+
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+		      struct mtd_oob_region *oobecc);
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+				 int *section,
+				 struct mtd_oob_region *oobregion);
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+			       const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+			       u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+		       struct mtd_oob_region *oobfree);
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+				const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+				u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
+
+static inline void mtd_set_ooblayout(struct mtd_info *mtd,
+				     const struct mtd_ooblayout_ops *ooblayout)
+{
+	mtd->ooblayout = ooblayout;
+}
+
+static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
+				const struct mtd_pairing_scheme *pairing)
+{
+	mtd->pairing = pairing;
+}
+
+static inline void mtd_set_of_node(struct mtd_info *mtd,
+				   struct device_node *np)
+{
+	mtd->dev.of_node = np;
+	if (!mtd->name)
+		of_property_read_string(np, "label", &mtd->name);
+}
+
+static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
+{
+	return dev_of_node(&mtd->dev);
+}
+
+static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+{
+	return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
+}
+
+static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
+				     loff_t ofs, size_t len)
+{
+	if (!mtd->_max_bad_blocks)
+		return -ENOTSUPP;
+
+	if (mtd->size < (len + ofs) || ofs < 0)
+		return -EINVAL;
+
+	return mtd->_max_bad_blocks(mtd, ofs, len);
+}
+
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+			      struct mtd_pairing_info *info);
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+			      const struct mtd_pairing_info *info);
+int mtd_pairing_groups(struct mtd_info *mtd);
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	      void **virt, resource_size_t *phys);
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+				    unsigned long offset, unsigned long flags);
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+	     u_char *buf);
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+	      const u_char *buf);
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+		    const u_char *buf);
+
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
+int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
+
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+			   struct otp_info *buf);
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+			   size_t *retlen, u_char *buf);
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+			   struct otp_info *buf);
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+			   size_t *retlen, u_char *buf);
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+			    size_t *retlen, u_char *buf);
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
+
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+	       unsigned long count, loff_t to, size_t *retlen);
+
+static inline void mtd_sync(struct mtd_info *mtd)
+{
+	if (mtd->_sync)
+		mtd->_sync(mtd);
+}
+
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
+
+static inline int mtd_suspend(struct mtd_info *mtd)
+{
+	return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+}
+
+static inline void mtd_resume(struct mtd_info *mtd)
+{
+	if (mtd->_resume)
+		mtd->_resume(mtd);
+}
+
+static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->erasesize_shift)
+		return sz >> mtd->erasesize_shift;
+	do_div(sz, mtd->erasesize);
+	return sz;
+}
+
+static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->erasesize_shift)
+		return sz & mtd->erasesize_mask;
+	return do_div(sz, mtd->erasesize);
+}
+
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ *			 boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+				       struct erase_info *req)
+{
+	u32 mod;
+
+	if (WARN_ON(!mtd->erasesize))
+		return;
+
+	mod = mtd_mod_by_eb(req->addr, mtd);
+	if (mod) {
+		req->addr -= mod;
+		req->len += mod;
+	}
+
+	mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+	if (mod)
+		req->len += mtd->erasesize - mod;
+}
+
+static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->writesize_shift)
+		return sz >> mtd->writesize_shift;
+	do_div(sz, mtd->writesize);
+	return sz;
+}
+
+static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->writesize_shift)
+		return sz & mtd->writesize_mask;
+	return do_div(sz, mtd->writesize);
+}
+
+static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
+{
+	return mtd->erasesize / mtd->writesize;
+}
+
+static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
+{
+	return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
+}
+
+static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
+					 int wunit)
+{
+	return base + (wunit * mtd->writesize);
+}
+
+
+static inline int mtd_has_oob(const struct mtd_info *mtd)
+{
+	return mtd->_read_oob && mtd->_write_oob;
+}
+
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+	return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
+static inline int mtd_can_have_bb(const struct mtd_info *mtd)
+{
+	return !!mtd->_block_isbad;
+}
+
+	/* Kernel-side ioctl definitions */
+
+struct mtd_partition;
+struct mtd_part_parser_data;
+
+extern int mtd_device_parse_register(struct mtd_info *mtd,
+				     const char * const *part_probe_types,
+				     struct mtd_part_parser_data *parser_data,
+				     const struct mtd_partition *defparts,
+				     int defnr_parts);
+#define mtd_device_register(master, parts, nr_parts)	\
+	mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
+extern int mtd_device_unregister(struct mtd_info *master);
+extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
+extern int __get_mtd_device(struct mtd_info *mtd);
+extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *get_mtd_device_nm(const char *name);
+extern void put_mtd_device(struct mtd_info *mtd);
+
+
+struct mtd_notifier {
+	void (*add)(struct mtd_info *mtd);
+	void (*remove)(struct mtd_info *mtd);
+	struct list_head list;
+};
+
+
+extern void register_mtd_user (struct mtd_notifier *new);
+extern int unregister_mtd_user (struct mtd_notifier *old);
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
+
+static inline int mtd_is_bitflip(int err) {
+	return err == -EUCLEAN;
+}
+
+static inline int mtd_is_eccerr(int err) {
+	return err == -EBADMSG;
+}
+
+static inline int mtd_is_bitflip_or_eccerr(int err) {
+	return mtd_is_bitflip(err) || mtd_is_eccerr(err);
+}
+
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+
+#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
new file mode 100644
index 0000000..ee8f956
--- /dev/null
+++ b/include/linux/mtd/mtdram.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MTD_MTDRAM_H__
+#define __MTD_MTDRAM_H__
+
+#include <linux/mtd/mtd.h>
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+			unsigned long size, const char *name);
+
+#endif /* __MTD_MTDRAM_H__ */
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
new file mode 100644
index 0000000..7ab51bc
--- /dev/null
+++ b/include/linux/mtd/nand-gpio.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MTD_NAND_GPIO_H
+#define __LINUX_MTD_NAND_GPIO_H
+
+#include <linux/mtd/rawnand.h>
+
+struct gpio_nand_platdata {
+	void	(*adjust_parts)(struct gpio_nand_platdata *, size_t);
+	struct mtd_partition *parts;
+	unsigned int num_parts;
+	unsigned int options;
+	int	chip_delay;
+};
+
+#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
new file mode 100644
index 0000000..7f53ece
--- /dev/null
+++ b/include/linux/mtd/nand.h
@@ -0,0 +1,733 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright 2017 - Free Electrons
+ *
+ *  Authors:
+ *	Boris Brezillon <boris.brezillon@free-electrons.com>
+ *	Peter Pan <peterpandong@micron.com>
+ */
+
+#ifndef __LINUX_MTD_NAND_H
+#define __LINUX_MTD_NAND_H
+
+#include <linux/mtd/mtd.h>
+
+/**
+ * struct nand_memory_organization - Memory organization structure
+ * @bits_per_cell: number of bits per NAND cell
+ * @pagesize: page size
+ * @oobsize: OOB area size
+ * @pages_per_eraseblock: number of pages per eraseblock
+ * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @planes_per_lun: number of planes per LUN
+ * @luns_per_target: number of LUN per target (target is a synonym for die)
+ * @ntargets: total number of targets exposed by the NAND device
+ */
+struct nand_memory_organization {
+	unsigned int bits_per_cell;
+	unsigned int pagesize;
+	unsigned int oobsize;
+	unsigned int pages_per_eraseblock;
+	unsigned int eraseblocks_per_lun;
+	unsigned int planes_per_lun;
+	unsigned int luns_per_target;
+	unsigned int ntargets;
+};
+
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt)	\
+	{							\
+		.bits_per_cell = (bpc),				\
+		.pagesize = (ps),				\
+		.oobsize = (os),				\
+		.pages_per_eraseblock = (ppe),			\
+		.eraseblocks_per_lun = (epl),			\
+		.planes_per_lun = (ppl),			\
+		.luns_per_target = (lpt),			\
+		.ntargets = (nt),				\
+	}
+
+/**
+ * struct nand_row_converter - Information needed to convert an absolute offset
+ *			       into a row address
+ * @lun_addr_shift: position of the LUN identifier in the row address
+ * @eraseblock_addr_shift: position of the eraseblock identifier in the row
+ *			   address
+ */
+struct nand_row_converter {
+	unsigned int lun_addr_shift;
+	unsigned int eraseblock_addr_shift;
+};
+
+/**
+ * struct nand_pos - NAND position object
+ * @target: the NAND target/die
+ * @lun: the LUN identifier
+ * @plane: the plane within the LUN
+ * @eraseblock: the eraseblock within the LUN
+ * @page: the page within the LUN
+ *
+ * These information are usually used by specific sub-layers to select the
+ * appropriate target/die and generate a row address to pass to the device.
+ */
+struct nand_pos {
+	unsigned int target;
+	unsigned int lun;
+	unsigned int plane;
+	unsigned int eraseblock;
+	unsigned int page;
+};
+
+/**
+ * struct nand_page_io_req - NAND I/O request object
+ * @pos: the position this I/O request is targeting
+ * @dataoffs: the offset within the page
+ * @datalen: number of data bytes to read from/write to this page
+ * @databuf: buffer to store data in or get data from
+ * @ooboffs: the OOB offset within the page
+ * @ooblen: the number of OOB bytes to read from/write to this page
+ * @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
+ *
+ * This object is used to pass per-page I/O requests to NAND sub-layers. This
+ * way all useful information are already formatted in a useful way and
+ * specific NAND layers can focus on translating these information into
+ * specific commands/operations.
+ */
+struct nand_page_io_req {
+	struct nand_pos pos;
+	unsigned int dataoffs;
+	unsigned int datalen;
+	union {
+		const void *out;
+		void *in;
+	} databuf;
+	unsigned int ooboffs;
+	unsigned int ooblen;
+	union {
+		const void *out;
+		void *in;
+	} oobbuf;
+	int mode;
+};
+
+/**
+ * struct nand_ecc_req - NAND ECC requirements
+ * @strength: ECC strength
+ * @step_size: ECC step/block size
+ */
+struct nand_ecc_req {
+	unsigned int strength;
+	unsigned int step_size;
+};
+
+#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+
+/**
+ * struct nand_bbt - bad block table object
+ * @cache: in memory BBT cache
+ */
+struct nand_bbt {
+	unsigned long *cache;
+};
+
+struct nand_device;
+
+/**
+ * struct nand_ops - NAND operations
+ * @erase: erase a specific block. No need to check if the block is bad before
+ *	   erasing, this has been taken care of by the generic NAND layer
+ * @markbad: mark a specific block bad. No need to check if the block is
+ *	     already marked bad, this has been taken care of by the generic
+ *	     NAND layer. This method should just write the BBM (Bad Block
+ *	     Marker) so that future call to struct_nand_ops->isbad() return
+ *	     true
+ * @isbad: check whether a block is bad or not. This method should just read
+ *	   the BBM and return whether the block is bad or not based on what it
+ *	   reads
+ *
+ * These are all low level operations that should be implemented by specialized
+ * NAND layers (SPI NAND, raw NAND, ...).
+ */
+struct nand_ops {
+	int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
+	int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
+	bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
+};
+
+/**
+ * struct nand_device - NAND device
+ * @mtd: MTD instance attached to the NAND device
+ * @memorg: memory layout
+ * @eccreq: ECC requirements
+ * @rowconv: position to row address converter
+ * @bbt: bad block table info
+ * @ops: NAND operations attached to the NAND device
+ *
+ * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
+ * should declare their own NAND object embedding a nand_device struct (that's
+ * how inheritance is done).
+ * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
+ * at device detection time to reflect the NAND device
+ * capabilities/requirements. Once this is done nanddev_init() can be called.
+ * It will take care of converting NAND information into MTD ones, which means
+ * the specialized NAND layers should never manually tweak
+ * struct_nand_device->mtd except for the ->_read/write() hooks.
+ */
+struct nand_device {
+	struct mtd_info mtd;
+	struct nand_memory_organization memorg;
+	struct nand_ecc_req eccreq;
+	struct nand_row_converter rowconv;
+	struct nand_bbt bbt;
+	const struct nand_ops *ops;
+};
+
+/**
+ * struct nand_io_iter - NAND I/O iterator
+ * @req: current I/O request
+ * @oobbytes_per_page: maximum number of OOB bytes per page
+ * @dataleft: remaining number of data bytes to read/write
+ * @oobleft: remaining number of OOB bytes to read/write
+ *
+ * Can be used by specialized NAND layers to iterate over all pages covered
+ * by an MTD I/O request, which should greatly simplifies the boiler-plate
+ * code needed to read/write data from/to a NAND device.
+ */
+struct nand_io_iter {
+	struct nand_page_io_req req;
+	unsigned int oobbytes_per_page;
+	unsigned int dataleft;
+	unsigned int oobleft;
+};
+
+/**
+ * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the NAND device embedding @mtd.
+ */
+static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
+{
+	return container_of(mtd, struct nand_device, mtd);
+}
+
+/**
+ * nanddev_to_mtd() - Get the MTD device attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the MTD device embedded in @nand.
+ */
+static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
+{
+	return &nand->mtd;
+}
+
+/*
+ * nanddev_bits_per_cell() - Get the number of bits per cell
+ * @nand: NAND device
+ *
+ * Return: the number of bits per cell.
+ */
+static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
+{
+	return nand->memorg.bits_per_cell;
+}
+
+/**
+ * nanddev_page_size() - Get NAND page size
+ * @nand: NAND device
+ *
+ * Return: the page size.
+ */
+static inline size_t nanddev_page_size(const struct nand_device *nand)
+{
+	return nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND OOB size
+ * @nand: NAND device
+ *
+ * Return: the OOB size.
+ */
+static inline unsigned int
+nanddev_per_page_oobsize(const struct nand_device *nand)
+{
+	return nand->memorg.oobsize;
+}
+
+/**
+ * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
+ * @nand: NAND device
+ *
+ * Return: the number of pages per eraseblock.
+ */
+static inline unsigned int
+nanddev_pages_per_eraseblock(const struct nand_device *nand)
+{
+	return nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND erase block size
+ * @nand: NAND device
+ *
+ * Return: the eraseblock size.
+ */
+static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
+{
+	return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per LUN.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_lun(const struct nand_device *nand)
+{
+	return nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_target_size() - Get the total size provided by a single target/die
+ * @nand: NAND device
+ *
+ * Return: the total size exposed by a single target/die in bytes.
+ */
+static inline u64 nanddev_target_size(const struct nand_device *nand)
+{
+	return (u64)nand->memorg.luns_per_target *
+	       nand->memorg.eraseblocks_per_lun *
+	       nand->memorg.pages_per_eraseblock *
+	       nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_ntarget() - Get the total of targets
+ * @nand: NAND device
+ *
+ * Return: the number of targets/dies exposed by @nand.
+ */
+static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
+{
+	return nand->memorg.ntargets;
+}
+
+/**
+ * nanddev_neraseblocks() - Get the total number of erasablocks
+ * @nand: NAND device
+ *
+ * Return: the total number of eraseblocks exposed by @nand.
+ */
+static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
+{
+	return nand->memorg.ntargets * nand->memorg.luns_per_target *
+	       nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_size() - Get NAND size
+ * @nand: NAND device
+ *
+ * Return: the total size (in bytes) exposed by @nand.
+ */
+static inline u64 nanddev_size(const struct nand_device *nand)
+{
+	return nanddev_target_size(nand) * nanddev_ntargets(nand);
+}
+
+/**
+ * nanddev_get_memorg() - Extract memory organization info from a NAND device
+ * @nand: NAND device
+ *
+ * This can be used by the upper layer to fill the memorg info before calling
+ * nanddev_init().
+ *
+ * Return: the memorg object embedded in the NAND device.
+ */
+static inline struct nand_memory_organization *
+nanddev_get_memorg(struct nand_device *nand)
+{
+	return &nand->memorg;
+}
+
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+		 struct module *owner);
+void nanddev_cleanup(struct nand_device *nand);
+
+/**
+ * nanddev_register() - Register a NAND device
+ * @nand: NAND device
+ *
+ * Register a NAND device.
+ * This function is just a wrapper around mtd_device_register()
+ * registering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_register(struct nand_device *nand)
+{
+	return mtd_device_register(&nand->mtd, NULL, 0);
+}
+
+/**
+ * nanddev_unregister() - Unregister a NAND device
+ * @nand: NAND device
+ *
+ * Unregister a NAND device.
+ * This function is just a wrapper around mtd_device_unregister()
+ * unregistering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_unregister(struct nand_device *nand)
+{
+	return mtd_device_unregister(&nand->mtd);
+}
+
+/**
+ * nanddev_set_of_node() - Attach a DT node to a NAND device
+ * @nand: NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a NAND device.
+ */
+static inline void nanddev_set_of_node(struct nand_device *nand,
+				       struct device_node *np)
+{
+	mtd_set_of_node(&nand->mtd, np);
+}
+
+/**
+ * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the DT node attached to @nand.
+ */
+static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
+{
+	return mtd_get_of_node(&nand->mtd);
+}
+
+/**
+ * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
+ * @nand: NAND device
+ * @offs: absolute NAND offset (usually passed by the MTD layer)
+ * @pos: a NAND position object to fill in
+ *
+ * Converts @offs into a nand_pos representation.
+ *
+ * Return: the offset within the NAND page pointed by @pos.
+ */
+static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
+					       loff_t offs,
+					       struct nand_pos *pos)
+{
+	unsigned int pageoffs;
+	u64 tmp = offs;
+
+	pageoffs = do_div(tmp, nand->memorg.pagesize);
+	pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
+	pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
+	pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+	pos->lun = do_div(tmp, nand->memorg.luns_per_target);
+	pos->target = tmp;
+
+	return pageoffs;
+}
+
+/**
+ * nanddev_pos_cmp() - Compare two NAND positions
+ * @a: First NAND position
+ * @b: Second NAND position
+ *
+ * Compares two NAND positions.
+ *
+ * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
+ */
+static inline int nanddev_pos_cmp(const struct nand_pos *a,
+				  const struct nand_pos *b)
+{
+	if (a->target != b->target)
+		return a->target < b->target ? -1 : 1;
+
+	if (a->lun != b->lun)
+		return a->lun < b->lun ? -1 : 1;
+
+	if (a->eraseblock != b->eraseblock)
+		return a->eraseblock < b->eraseblock ? -1 : 1;
+
+	if (a->page != b->page)
+		return a->page < b->page ? -1 : 1;
+
+	return 0;
+}
+
+/**
+ * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
+ * @nand: NAND device
+ * @pos: the NAND position to convert
+ *
+ * Converts @pos NAND position into an absolute offset.
+ *
+ * Return: the absolute offset. Note that @pos points to the beginning of a
+ *	   page, if one wants to point to a specific offset within this page
+ *	   the returned offset has to be adjusted manually.
+ */
+static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
+					 const struct nand_pos *pos)
+{
+	unsigned int npages;
+
+	npages = pos->page +
+		 ((pos->eraseblock +
+		   (pos->lun +
+		    (pos->target * nand->memorg.luns_per_target)) *
+		   nand->memorg.eraseblocks_per_lun) *
+		  nand->memorg.pages_per_eraseblock);
+
+	return (loff_t)npages * nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_pos_to_row() - Extract a row address from a NAND position
+ * @nand: NAND device
+ * @pos: the position to convert
+ *
+ * Converts a NAND position into a row address that can then be passed to the
+ * device.
+ *
+ * Return: the row address extracted from @pos.
+ */
+static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
+					      const struct nand_pos *pos)
+{
+	return (pos->lun << nand->rowconv.lun_addr_shift) |
+	       (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
+	       pos->page;
+}
+
+/**
+ * nanddev_pos_next_target() - Move a position to the next target/die
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next target/die. Useful when you
+ * want to iterate over all targets/dies of a NAND device.
+ */
+static inline void nanddev_pos_next_target(struct nand_device *nand,
+					   struct nand_pos *pos)
+{
+	pos->page = 0;
+	pos->plane = 0;
+	pos->eraseblock = 0;
+	pos->lun = 0;
+	pos->target++;
+}
+
+/**
+ * nanddev_pos_next_lun() - Move a position to the next LUN
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next LUN. Useful when you want to
+ * iterate over all LUNs of a NAND device.
+ */
+static inline void nanddev_pos_next_lun(struct nand_device *nand,
+					struct nand_pos *pos)
+{
+	if (pos->lun >= nand->memorg.luns_per_target - 1)
+		return nanddev_pos_next_target(nand, pos);
+
+	pos->lun++;
+	pos->page = 0;
+	pos->plane = 0;
+	pos->eraseblock = 0;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next eraseblock. Useful when you
+ * want to iterate over all eraseblocks of a NAND device.
+ */
+static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
+					       struct nand_pos *pos)
+{
+	if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
+		return nanddev_pos_next_lun(nand, pos);
+
+	pos->eraseblock++;
+	pos->page = 0;
+	pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+}
+
+/**
+ * nanddev_pos_next_page() - Move a position to the next page
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next page. Useful when you want to
+ * iterate over all pages of a NAND device.
+ */
+static inline void nanddev_pos_next_page(struct nand_device *nand,
+					 struct nand_pos *pos)
+{
+	if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
+		return nanddev_pos_next_eraseblock(nand, pos);
+
+	pos->page++;
+}
+
+/**
+ * nand_io_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer.
+ */
+static inline void nanddev_io_iter_init(struct nand_device *nand,
+					loff_t offs, struct mtd_oob_ops *req,
+					struct nand_io_iter *iter)
+{
+	struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+	iter->req.mode = req->mode;
+	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+	iter->req.ooboffs = req->ooboffs;
+	iter->oobbytes_per_page = mtd_oobavail(mtd, req);
+	iter->dataleft = req->len;
+	iter->oobleft = req->ooblen;
+	iter->req.databuf.in = req->datbuf;
+	iter->req.datalen = min_t(unsigned int,
+				  nand->memorg.pagesize - iter->req.dataoffs,
+				  iter->dataleft);
+	iter->req.oobbuf.in = req->oobbuf;
+	iter->req.ooblen = min_t(unsigned int,
+				 iter->oobbytes_per_page - iter->req.ooboffs,
+				 iter->oobleft);
+}
+
+/**
+ * nand_io_iter_next_page - Move to the next page
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next page.
+ */
+static inline void nanddev_io_iter_next_page(struct nand_device *nand,
+					     struct nand_io_iter *iter)
+{
+	nanddev_pos_next_page(nand, &iter->req.pos);
+	iter->dataleft -= iter->req.datalen;
+	iter->req.databuf.in += iter->req.datalen;
+	iter->oobleft -= iter->req.ooblen;
+	iter->req.oobbuf.in += iter->req.ooblen;
+	iter->req.dataoffs = 0;
+	iter->req.ooboffs = 0;
+	iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
+				  iter->dataleft);
+	iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
+				 iter->oobleft);
+}
+
+/**
+ * nand_io_iter_end - Should end iteration or not
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Check whether @iter has reached the end of the NAND portion it was asked to
+ * iterate on or not.
+ *
+ * Return: true if @iter has reached the end of the iteration request, false
+ *	   otherwise.
+ */
+static inline bool nanddev_io_iter_end(struct nand_device *nand,
+				       const struct nand_io_iter *iter)
+{
+	if (iter->dataleft || iter->oobleft)
+		return false;
+
+	return true;
+}
+
+/**
+ * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
+ *			   request
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterate over pages that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_page(nand, start, req, iter)		\
+	for (nanddev_io_iter_init(nand, start, req, iter);		\
+	     !nanddev_io_iter_end(nand, iter);				\
+	     nanddev_io_iter_next_page(nand, iter))
+
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+
+/* BBT related functions */
+enum nand_bbt_block_status {
+	NAND_BBT_BLOCK_STATUS_UNKNOWN,
+	NAND_BBT_BLOCK_GOOD,
+	NAND_BBT_BLOCK_WORN,
+	NAND_BBT_BLOCK_RESERVED,
+	NAND_BBT_BLOCK_FACTORY_BAD,
+	NAND_BBT_BLOCK_NUM_STATUS,
+};
+
+int nanddev_bbt_init(struct nand_device *nand);
+void nanddev_bbt_cleanup(struct nand_device *nand);
+int nanddev_bbt_update(struct nand_device *nand);
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+				 unsigned int entry);
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+				 enum nand_bbt_block_status status);
+int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
+
+/**
+ * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
+ * @nand: NAND device
+ * @pos: the NAND position we want to get BBT entry for
+ *
+ * Return the BBT entry used to store information about the eraseblock pointed
+ * by @pos.
+ *
+ * Return: the BBT entry storing information about eraseblock pointed by @pos.
+ */
+static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
+						    const struct nand_pos *pos)
+{
+	return pos->eraseblock +
+	       ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
+		nand->memorg.eraseblocks_per_lun);
+}
+
+/**
+ * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
+ * @nand: NAND device
+ *
+ * Return: true if the BBT has been initialized, false otherwise.
+ */
+static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
+{
+	return !!nand->bbt.cache;
+}
+
+/* MTD -> NAND helper functions. */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+
+#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
new file mode 100644
index 0000000..98f20ef
--- /dev/null
+++ b/include/linux/mtd/nand_bch.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is the header for the NAND BCH ECC implementation.
+ */
+
+#ifndef __MTD_NAND_BCH_H__
+#define __MTD_NAND_BCH_H__
+
+struct mtd_info;
+struct nand_bch_control;
+
+#if defined(CONFIG_MTD_NAND_ECC_BCH)
+
+static inline int mtd_nand_has_bch(void) { return 1; }
+
+/*
+ * Calculate BCH ecc code
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+			   u_char *ecc_code);
+
+/*
+ * Detect and correct bit errors
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
+			  u_char *calc_ecc);
+/*
+ * Initialize BCH encoder/decoder
+ */
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
+/*
+ * Release BCH encoder/decoder resources
+ */
+void nand_bch_free(struct nand_bch_control *nbc);
+
+#else /* !CONFIG_MTD_NAND_ECC_BCH */
+
+static inline int mtd_nand_has_bch(void) { return 0; }
+
+static inline int
+nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+		       u_char *ecc_code)
+{
+	return -1;
+}
+
+static inline int
+nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+		      unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+	return -ENOTSUPP;
+}
+
+static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
+{
+	return NULL;
+}
+
+static inline void nand_bch_free(struct nand_bch_control *nbc) {}
+
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+
+#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
new file mode 100644
index 0000000..8a2decf
--- /dev/null
+++ b/include/linux/mtd/nand_ecc.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
+ *			    David Woodhouse <dwmw2@infradead.org>
+ *			    Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is the header for the ECC algorithm.
+ */
+
+#ifndef __MTD_NAND_ECC_H__
+#define __MTD_NAND_ECC_H__
+
+struct mtd_info;
+
+/*
+ * Calculate 3 byte ECC code for eccsize byte block
+ */
+void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
+				u_char *ecc_code);
+
+/*
+ * Calculate 3 byte ECC code for 256/512 byte block
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
+
+/*
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
+			unsigned int eccsize);
+
+/*
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+
+#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
new file mode 100644
index 0000000..357e88b
--- /dev/null
+++ b/include/linux/mtd/ndfc.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Info:
+ *   Contains defines, datastructures for ndfc nand controller
+ *
+ */
+#ifndef __LINUX_MTD_NDFC_H
+#define __LINUX_MTD_NDFC_H
+
+/* NDFC Register definitions */
+#define NDFC_CMD		0x00
+#define NDFC_ALE		0x04
+#define NDFC_DATA		0x08
+#define NDFC_ECC		0x10
+#define NDFC_BCFG0		0x30
+#define NDFC_BCFG1		0x34
+#define NDFC_BCFG2		0x38
+#define NDFC_BCFG3		0x3c
+#define NDFC_CCR		0x40
+#define NDFC_STAT		0x44
+#define NDFC_HWCTL		0x48
+#define NDFC_REVID		0x50
+
+#define NDFC_STAT_IS_READY	0x01000000
+
+#define NDFC_CCR_RESET_CE	0x80000000 /* CE Reset */
+#define NDFC_CCR_RESET_ECC	0x40000000 /* ECC Reset */
+#define NDFC_CCR_RIE		0x20000000 /* Interrupt Enable on Device Rdy */
+#define NDFC_CCR_REN		0x10000000 /* Enable wait for Rdy in LinearR */
+#define NDFC_CCR_ROMEN		0x08000000 /* Enable ROM In LinearR */
+#define NDFC_CCR_ARE		0x04000000 /* Auto-Read Enable */
+#define NDFC_CCR_BS(x)		(((x) & 0x3) << 24) /* Select Bank on CE[x] */
+#define NDFC_CCR_BS_MASK	0x03000000 /* Select Bank */
+#define NDFC_CCR_ARAC0		0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */
+#define NDFC_CCR_ARAC1		0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */
+#define NDFC_CCR_ARAC2		0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */
+#define NDFC_CCR_ARAC3		0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */
+#define NDFC_CCR_ARAC_MASK	0x00003000 /* Auto-Read mode Addr Cycles */
+#define NDFC_CCR_RPG		0x0000C000 /* Auto-Read Page */
+#define NDFC_CCR_EBCC		0x00000004 /* EBC Configuration Completed */
+#define NDFC_CCR_DHC		0x00000002 /* Direct Hardware Control Enable */
+
+#define NDFC_BxCFG_EN		0x80000000 /* Bank Enable */
+#define NDFC_BxCFG_CED		0x40000000 /* nCE Style */
+#define NDFC_BxCFG_SZ_MASK	0x08000000 /* Bank Size */
+#define NDFC_BxCFG_SZ_8BIT	0x00000000 /* 8bit */
+#define NDFC_BxCFG_SZ_16BIT	0x08000000 /* 16bit */
+
+#define NDFC_MAX_BANKS		4
+
+struct ndfc_controller_settings {
+	uint32_t	ccr_settings;
+	uint64_t	ndfc_erpn;
+};
+
+struct ndfc_chip_settings {
+	uint32_t	bank_settings;
+};
+
+#endif
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
new file mode 100644
index 0000000..044daa0
--- /dev/null
+++ b/include/linux/mtd/nftl.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __MTD_NFTL_H__
+#define __MTD_NFTL_H__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+
+#include <mtd/nftl-user.h>
+
+/* these info are used in ReplUnitTable */
+#define BLOCK_NIL          0xffff /* last block of a chain */
+#define BLOCK_FREE         0xfffe /* free block */
+#define BLOCK_NOTEXPLORED  0xfffd /* non explored block, only used during mounting */
+#define BLOCK_RESERVED     0xfffc /* bios block or bad block */
+
+struct NFTLrecord {
+	struct mtd_blktrans_dev mbd;
+	__u16 MediaUnit, SpareMediaUnit;
+	__u32 EraseSize;
+	struct NFTLMediaHeader MediaHdr;
+	int usecount;
+	unsigned char heads;
+	unsigned char sectors;
+	unsigned short cylinders;
+	__u16 numvunits;
+	__u16 lastEUN;                  /* should be suppressed */
+	__u16 numfreeEUNs;
+	__u16 LastFreeEUN; 		/* To speed up finding a free EUN */
+	int head,sect,cyl;
+	__u16 *EUNtable; 		/* [numvunits]: First EUN for each virtual unit  */
+	__u16 *ReplUnitTable; 		/* [numEUNs]: ReplUnitNumber for each */
+        unsigned int nb_blocks;		/* number of physical blocks */
+        unsigned int nb_boot_blocks;	/* number of blocks used by the bios */
+        struct erase_info instr;
+};
+
+int NFTL_mount(struct NFTLrecord *s);
+int NFTL_formatblock(struct NFTLrecord *s, int block);
+
+int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+		  size_t *retlen, uint8_t *buf);
+int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+		   size_t *retlen, uint8_t *buf);
+
+#ifndef NFTL_MAJOR
+#define NFTL_MAJOR 93
+#endif
+
+#define MAX_NFTLS 16
+#define MAX_SECTORS_PER_UNIT 64
+#define NFTL_PARTN_BITS 4
+
+#endif /* __MTD_NFTL_H__ */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
new file mode 100644
index 0000000..0aaa98b
--- /dev/null
+++ b/include/linux/mtd/onenand.h
@@ -0,0 +1,240 @@
+/*
+ *  linux/include/linux/mtd/onenand.h
+ *
+ *  Copyright © 2005-2009 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_ONENAND_H
+#define __LINUX_MTD_ONENAND_H
+
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/onenand_regs.h>
+#include <linux/mtd/bbm.h>
+
+#define MAX_DIES		2
+#define MAX_BUFFERRAM		2
+
+/* Scan and identify a OneNAND device */
+extern int onenand_scan(struct mtd_info *mtd, int max_chips);
+/* Free resources held by the OneNAND device */
+extern void onenand_release(struct mtd_info *mtd);
+
+/**
+ * struct onenand_bufferram - OneNAND BufferRAM Data
+ * @blockpage:		block & page address in BufferRAM
+ */
+struct onenand_bufferram {
+	int	blockpage;
+};
+
+/**
+ * struct onenand_chip - OneNAND Private Flash Chip Data
+ * @base:		[BOARDSPECIFIC] address to access OneNAND
+ * @dies:		[INTERN][FLEX-ONENAND] number of dies on chip
+ * @boundary:		[INTERN][FLEX-ONENAND] Boundary of the dies
+ * @diesize:		[INTERN][FLEX-ONENAND] Size of the dies
+ * @chipsize:		[INTERN] the size of one chip for multichip arrays
+ *			FIXME For Flex-OneNAND, chipsize holds maximum possible
+ *			device size ie when all blocks are considered MLC
+ * @device_id:		[INTERN] device ID
+ * @density_mask:	chip density, used for DDP devices
+ * @verstion_id:	[INTERN] version ID
+ * @options:		[BOARDSPECIFIC] various chip options. They can
+ *			partly be set to inform onenand_scan about
+ * @erase_shift:	[INTERN] number of address bits in a block
+ * @page_shift:		[INTERN] number of address bits in a page
+ * @page_mask:		[INTERN] a page per block mask
+ * @writesize:		[INTERN] a real page size
+ * @bufferram_index:	[INTERN] BufferRAM index
+ * @bufferram:		[INTERN] BufferRAM info
+ * @readw:		[REPLACEABLE] hardware specific function for read short
+ * @writew:		[REPLACEABLE] hardware specific function for write short
+ * @command:		[REPLACEABLE] hardware specific function for writing
+ *			commands to the chip
+ * @wait:		[REPLACEABLE] hardware specific function for wait on ready
+ * @bbt_wait:		[REPLACEABLE] hardware specific function for bbt wait on ready
+ * @unlock_all:		[REPLACEABLE] hardware specific function for unlock all
+ * @read_bufferram:	[REPLACEABLE] hardware specific function for BufferRAM Area
+ * @write_bufferram:	[REPLACEABLE] hardware specific function for BufferRAM Area
+ * @read_word:		[REPLACEABLE] hardware specific function for read
+ *			register of OneNAND
+ * @write_word:		[REPLACEABLE] hardware specific function for write
+ *			register of OneNAND
+ * @mmcontrol:		sync burst read function
+ * @chip_probe:		[REPLACEABLE] hardware specific function for chip probe
+ * @block_markbad:	function to mark a block as bad
+ * @scan_bbt:		[REPLACEALBE] hardware specific function for scanning
+ *			Bad block Table
+ * @chip_lock:		[INTERN] spinlock used to protect access to this
+ *			structure and the chip
+ * @wq:			[INTERN] wait queue to sleep on if a OneNAND
+ *			operation is in progress
+ * @state:		[INTERN] the current state of the OneNAND device
+ * @page_buf:		[INTERN] page main data buffer
+ * @oob_buf:		[INTERN] page oob data buffer
+ * @subpagesize:	[INTERN] holds the subpagesize
+ * @bbm:		[REPLACEABLE] pointer to Bad Block Management
+ * @priv:		[OPTIONAL] pointer to private chip date
+ */
+struct onenand_chip {
+	void __iomem		*base;
+	unsigned		dies;
+	unsigned		boundary[MAX_DIES];
+	loff_t			diesize[MAX_DIES];
+	unsigned int		chipsize;
+	unsigned int		device_id;
+	unsigned int		version_id;
+	unsigned int		technology;
+	unsigned int		density_mask;
+	unsigned int		options;
+
+	unsigned int		erase_shift;
+	unsigned int		page_shift;
+	unsigned int		page_mask;
+	unsigned int		writesize;
+
+	unsigned int		bufferram_index;
+	struct onenand_bufferram	bufferram[MAX_BUFFERRAM];
+
+	int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
+	int (*wait)(struct mtd_info *mtd, int state);
+	int (*bbt_wait)(struct mtd_info *mtd, int state);
+	void (*unlock_all)(struct mtd_info *mtd);
+	int (*read_bufferram)(struct mtd_info *mtd, int area,
+			unsigned char *buffer, int offset, size_t count);
+	int (*write_bufferram)(struct mtd_info *mtd, int area,
+			const unsigned char *buffer, int offset, size_t count);
+	unsigned short (*read_word)(void __iomem *addr);
+	void (*write_word)(unsigned short value, void __iomem *addr);
+	void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+	int (*chip_probe)(struct mtd_info *mtd);
+	int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+	int (*scan_bbt)(struct mtd_info *mtd);
+	int (*enable)(struct mtd_info *mtd);
+	int (*disable)(struct mtd_info *mtd);
+
+	struct completion	complete;
+	int			irq;
+
+	spinlock_t		chip_lock;
+	wait_queue_head_t	wq;
+	flstate_t		state;
+	unsigned char		*page_buf;
+	unsigned char		*oob_buf;
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+	unsigned char		*verify_buf;
+#endif
+
+	int			subpagesize;
+
+	void			*bbm;
+
+	void			*priv;
+
+	/*
+	 * Shows that the current operation is composed
+	 * of sequence of commands. For example, cache program.
+	 * Such command status OnGo bit is checked at the end of
+	 * sequence.
+	 */
+	unsigned int		ongoing;
+};
+
+/*
+ * Helper macros
+ */
+#define ONENAND_PAGES_PER_BLOCK        (1<<6)
+
+#define ONENAND_CURRENT_BUFFERRAM(this)		(this->bufferram_index)
+#define ONENAND_NEXT_BUFFERRAM(this)		(this->bufferram_index ^ 1)
+#define ONENAND_SET_NEXT_BUFFERRAM(this)	(this->bufferram_index ^= 1)
+#define ONENAND_SET_PREV_BUFFERRAM(this)	(this->bufferram_index ^= 1)
+#define ONENAND_SET_BUFFERRAM0(this)		(this->bufferram_index = 0)
+#define ONENAND_SET_BUFFERRAM1(this)		(this->bufferram_index = 1)
+
+#define FLEXONENAND(this)						\
+	(this->device_id & DEVICE_IS_FLEXONENAND)
+#define ONENAND_GET_SYS_CFG1(this)					\
+	(this->read_word(this->base + ONENAND_REG_SYS_CFG1))
+#define ONENAND_SET_SYS_CFG1(v, this)					\
+	(this->write_word(v, this->base + ONENAND_REG_SYS_CFG1))
+
+#define ONENAND_IS_DDP(this)						\
+	(this->device_id & ONENAND_DEVICE_IS_DDP)
+
+#define ONENAND_IS_MLC(this)						\
+	(this->technology & ONENAND_TECHNOLOGY_IS_MLC)
+
+#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM
+#define ONENAND_IS_2PLANE(this)						\
+	(this->options & ONENAND_HAS_2PLANE)
+#else
+#define ONENAND_IS_2PLANE(this)			(0)
+#endif
+
+#define ONENAND_IS_CACHE_PROGRAM(this)					\
+	(this->options & ONENAND_HAS_CACHE_PROGRAM)
+
+#define ONENAND_IS_NOP_1(this)						\
+	(this->options & ONENAND_HAS_NOP_1)
+
+/* Check byte access in OneNAND */
+#define ONENAND_CHECK_BYTE_ACCESS(addr)		(addr & 0x1)
+
+/*
+ * Options bits
+ */
+#define ONENAND_HAS_CONT_LOCK		(0x0001)
+#define ONENAND_HAS_UNLOCK_ALL		(0x0002)
+#define ONENAND_HAS_2PLANE		(0x0004)
+#define ONENAND_HAS_4KB_PAGE		(0x0008)
+#define ONENAND_HAS_CACHE_PROGRAM	(0x0010)
+#define ONENAND_HAS_NOP_1		(0x0020)
+#define ONENAND_SKIP_UNLOCK_CHECK	(0x0100)
+#define ONENAND_PAGEBUF_ALLOC		(0x1000)
+#define ONENAND_OOBBUF_ALLOC		(0x2000)
+#define ONENAND_SKIP_INITIAL_UNLOCKING	(0x4000)
+
+#define ONENAND_IS_4KB_PAGE(this)					\
+	(this->options & ONENAND_HAS_4KB_PAGE)
+
+/*
+ * OneNAND Flash Manufacturer ID Codes
+ */
+#define ONENAND_MFR_SAMSUNG	0xec
+#define ONENAND_MFR_NUMONYX	0x20
+
+/**
+ * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
+ * @name:	Manufacturer name
+ * @id:		manufacturer ID code of device.
+*/
+struct onenand_manufacturers {
+        int id;
+        char *name;
+};
+
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+			 struct mtd_oob_ops *ops);
+unsigned onenand_block(struct onenand_chip *this, loff_t addr);
+loff_t onenand_addr(struct onenand_chip *this, int block);
+int flexonenand_region(struct mtd_info *mtd, loff_t addr);
+
+struct mtd_partition;
+
+struct onenand_platform_data {
+	void		(*mmcontrol)(struct mtd_info *mtd, int sync_read);
+	int		(*read_bufferram)(struct mtd_info *mtd, int area,
+			unsigned char *buffer, int offset, size_t count);
+	struct mtd_partition *parts;
+	unsigned int	nr_parts;
+};
+
+#endif	/* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
new file mode 100644
index 0000000..d60130f
--- /dev/null
+++ b/include/linux/mtd/onenand_regs.h
@@ -0,0 +1,223 @@
+/*
+ *  linux/include/linux/mtd/onenand_regs.h
+ *
+ *  OneNAND Register header file
+ *
+ *  Copyright (C) 2005-2007 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ONENAND_REG_H
+#define __ONENAND_REG_H
+
+/* Memory Address Map Translation (Word order) */
+#define ONENAND_MEMORY_MAP(x)		((x) << 1)
+
+/*
+ * External BufferRAM area
+ */
+#define	ONENAND_BOOTRAM			ONENAND_MEMORY_MAP(0x0000)
+#define	ONENAND_DATARAM			ONENAND_MEMORY_MAP(0x0200)
+#define	ONENAND_SPARERAM		ONENAND_MEMORY_MAP(0x8010)
+
+/*
+ * OneNAND Registers
+ */
+#define ONENAND_REG_MANUFACTURER_ID	ONENAND_MEMORY_MAP(0xF000)
+#define ONENAND_REG_DEVICE_ID		ONENAND_MEMORY_MAP(0xF001)
+#define ONENAND_REG_VERSION_ID		ONENAND_MEMORY_MAP(0xF002)
+#define ONENAND_REG_DATA_BUFFER_SIZE	ONENAND_MEMORY_MAP(0xF003)
+#define ONENAND_REG_BOOT_BUFFER_SIZE	ONENAND_MEMORY_MAP(0xF004)
+#define ONENAND_REG_NUM_BUFFERS		ONENAND_MEMORY_MAP(0xF005)
+#define ONENAND_REG_TECHNOLOGY		ONENAND_MEMORY_MAP(0xF006)
+
+#define ONENAND_REG_START_ADDRESS1	ONENAND_MEMORY_MAP(0xF100)
+#define ONENAND_REG_START_ADDRESS2	ONENAND_MEMORY_MAP(0xF101)
+#define ONENAND_REG_START_ADDRESS3	ONENAND_MEMORY_MAP(0xF102)
+#define ONENAND_REG_START_ADDRESS4	ONENAND_MEMORY_MAP(0xF103)
+#define ONENAND_REG_START_ADDRESS5	ONENAND_MEMORY_MAP(0xF104)
+#define ONENAND_REG_START_ADDRESS6	ONENAND_MEMORY_MAP(0xF105)
+#define ONENAND_REG_START_ADDRESS7	ONENAND_MEMORY_MAP(0xF106)
+#define ONENAND_REG_START_ADDRESS8	ONENAND_MEMORY_MAP(0xF107)
+
+#define ONENAND_REG_START_BUFFER	ONENAND_MEMORY_MAP(0xF200)
+#define ONENAND_REG_COMMAND		ONENAND_MEMORY_MAP(0xF220)
+#define ONENAND_REG_SYS_CFG1		ONENAND_MEMORY_MAP(0xF221)
+#define ONENAND_REG_SYS_CFG2		ONENAND_MEMORY_MAP(0xF222)
+#define ONENAND_REG_CTRL_STATUS		ONENAND_MEMORY_MAP(0xF240)
+#define ONENAND_REG_INTERRUPT		ONENAND_MEMORY_MAP(0xF241)
+#define ONENAND_REG_START_BLOCK_ADDRESS	ONENAND_MEMORY_MAP(0xF24C)
+#define ONENAND_REG_END_BLOCK_ADDRESS	ONENAND_MEMORY_MAP(0xF24D)
+#define ONENAND_REG_WP_STATUS		ONENAND_MEMORY_MAP(0xF24E)
+
+#define ONENAND_REG_ECC_STATUS		ONENAND_MEMORY_MAP(0xFF00)
+#define ONENAND_REG_ECC_M0		ONENAND_MEMORY_MAP(0xFF01)
+#define ONENAND_REG_ECC_S0		ONENAND_MEMORY_MAP(0xFF02)
+#define ONENAND_REG_ECC_M1		ONENAND_MEMORY_MAP(0xFF03)
+#define ONENAND_REG_ECC_S1		ONENAND_MEMORY_MAP(0xFF04)
+#define ONENAND_REG_ECC_M2		ONENAND_MEMORY_MAP(0xFF05)
+#define ONENAND_REG_ECC_S2		ONENAND_MEMORY_MAP(0xFF06)
+#define ONENAND_REG_ECC_M3		ONENAND_MEMORY_MAP(0xFF07)
+#define ONENAND_REG_ECC_S3		ONENAND_MEMORY_MAP(0xFF08)
+
+/*
+ * Device ID Register F001h (R)
+ */
+#define DEVICE_IS_FLEXONENAND		(1 << 9)
+#define FLEXONENAND_PI_MASK		(0x3ff)
+#define FLEXONENAND_PI_UNLOCK_SHIFT	(14)
+#define ONENAND_DEVICE_DENSITY_MASK	(0xf)
+#define ONENAND_DEVICE_DENSITY_SHIFT	(4)
+#define ONENAND_DEVICE_IS_DDP		(1 << 3)
+#define ONENAND_DEVICE_IS_DEMUX		(1 << 2)
+#define ONENAND_DEVICE_VCC_MASK		(0x3)
+
+#define ONENAND_DEVICE_DENSITY_512Mb	(0x002)
+#define ONENAND_DEVICE_DENSITY_1Gb	(0x003)
+#define ONENAND_DEVICE_DENSITY_2Gb	(0x004)
+#define ONENAND_DEVICE_DENSITY_4Gb	(0x005)
+
+/*
+ * Version ID Register F002h (R)
+ */
+#define ONENAND_VERSION_PROCESS_SHIFT	(8)
+
+/*
+ * Technology Register F006h (R)
+ */
+#define ONENAND_TECHNOLOGY_IS_MLC	(1 << 0)
+
+/*
+ * Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W)
+ */
+#define ONENAND_DDP_SHIFT		(15)
+#define ONENAND_DDP_CHIP0		(0)
+#define ONENAND_DDP_CHIP1		(1 << ONENAND_DDP_SHIFT)
+
+/*
+ * Start Address 8 F107h (R/W)
+ */
+/* Note: It's actually 0x3f in case of SLC */
+#define ONENAND_FPA_MASK		(0x7f)
+#define ONENAND_FPA_SHIFT		(2)
+#define ONENAND_FSA_MASK		(0x03)
+
+/*
+ * Start Buffer Register F200h (R/W)
+ */
+#define ONENAND_BSA_MASK		(0x03)
+#define ONENAND_BSA_SHIFT		(8)
+#define ONENAND_BSA_BOOTRAM		(0 << 2)
+#define ONENAND_BSA_DATARAM0		(2 << 2)
+#define ONENAND_BSA_DATARAM1		(3 << 2)
+/* Note: It's actually 0x03 in case of SLC */
+#define ONENAND_BSC_MASK		(0x07)
+
+/*
+ * Command Register F220h (R/W)
+ */
+#define ONENAND_CMD_READ		(0x00)
+#define ONENAND_CMD_READOOB		(0x13)
+#define ONENAND_CMD_PROG		(0x80)
+#define ONENAND_CMD_PROGOOB		(0x1A)
+#define ONENAND_CMD_2X_PROG		(0x7D)
+#define ONENAND_CMD_2X_CACHE_PROG	(0x7F)
+#define ONENAND_CMD_UNLOCK		(0x23)
+#define ONENAND_CMD_LOCK		(0x2A)
+#define ONENAND_CMD_LOCK_TIGHT		(0x2C)
+#define ONENAND_CMD_UNLOCK_ALL		(0x27)
+#define ONENAND_CMD_ERASE		(0x94)
+#define ONENAND_CMD_MULTIBLOCK_ERASE	(0x95)
+#define ONENAND_CMD_ERASE_VERIFY	(0x71)
+#define ONENAND_CMD_RESET		(0xF0)
+#define ONENAND_CMD_OTP_ACCESS		(0x65)
+#define ONENAND_CMD_READID		(0x90)
+#define FLEXONENAND_CMD_PI_UPDATE	(0x05)
+#define FLEXONENAND_CMD_PI_ACCESS	(0x66)
+#define FLEXONENAND_CMD_RECOVER_LSB	(0x05)
+
+/* NOTE: Those are not *REAL* commands */
+#define ONENAND_CMD_BUFFERRAM		(0x1978)
+#define FLEXONENAND_CMD_READ_PI		(0x1985)
+
+/*
+ * System Configuration 1 Register F221h (R, R/W)
+ */
+#define ONENAND_SYS_CFG1_SYNC_READ	(1 << 15)
+#define ONENAND_SYS_CFG1_BRL_7		(7 << 12)
+#define ONENAND_SYS_CFG1_BRL_6		(6 << 12)
+#define ONENAND_SYS_CFG1_BRL_5		(5 << 12)
+#define ONENAND_SYS_CFG1_BRL_4		(4 << 12)
+#define ONENAND_SYS_CFG1_BRL_3		(3 << 12)
+#define ONENAND_SYS_CFG1_BRL_10		(2 << 12)
+#define ONENAND_SYS_CFG1_BRL_9		(1 << 12)
+#define ONENAND_SYS_CFG1_BRL_8		(0 << 12)
+#define ONENAND_SYS_CFG1_BRL_SHIFT	(12)
+#define ONENAND_SYS_CFG1_BL_32		(4 << 9)
+#define ONENAND_SYS_CFG1_BL_16		(3 << 9)
+#define ONENAND_SYS_CFG1_BL_8		(2 << 9)
+#define ONENAND_SYS_CFG1_BL_4		(1 << 9)
+#define ONENAND_SYS_CFG1_BL_CONT	(0 << 9)
+#define ONENAND_SYS_CFG1_BL_SHIFT	(9)
+#define ONENAND_SYS_CFG1_NO_ECC		(1 << 8)
+#define ONENAND_SYS_CFG1_RDY		(1 << 7)
+#define ONENAND_SYS_CFG1_INT		(1 << 6)
+#define ONENAND_SYS_CFG1_IOBE		(1 << 5)
+#define ONENAND_SYS_CFG1_RDY_CONF	(1 << 4)
+#define ONENAND_SYS_CFG1_VHF		(1 << 3)
+#define ONENAND_SYS_CFG1_HF		(1 << 2)
+#define ONENAND_SYS_CFG1_SYNC_WRITE	(1 << 1)
+
+/*
+ * Controller Status Register F240h (R)
+ */
+#define ONENAND_CTRL_ONGO		(1 << 15)
+#define ONENAND_CTRL_LOCK		(1 << 14)
+#define ONENAND_CTRL_LOAD		(1 << 13)
+#define ONENAND_CTRL_PROGRAM		(1 << 12)
+#define ONENAND_CTRL_ERASE		(1 << 11)
+#define ONENAND_CTRL_ERROR		(1 << 10)
+#define ONENAND_CTRL_RSTB		(1 << 7)
+#define ONENAND_CTRL_OTP_L		(1 << 6)
+#define ONENAND_CTRL_OTP_BL		(1 << 5)
+
+/*
+ * Interrupt Status Register F241h (R)
+ */
+#define ONENAND_INT_MASTER		(1 << 15)
+#define ONENAND_INT_READ		(1 << 7)
+#define ONENAND_INT_WRITE		(1 << 6)
+#define ONENAND_INT_ERASE		(1 << 5)
+#define ONENAND_INT_RESET		(1 << 4)
+#define ONENAND_INT_CLEAR		(0 << 0)
+
+/*
+ * NAND Flash Write Protection Status Register F24Eh (R)
+ */
+#define ONENAND_WP_US			(1 << 2)
+#define ONENAND_WP_LS			(1 << 1)
+#define ONENAND_WP_LTS			(1 << 0)
+
+/*
+ * ECC Status Reigser FF00h (R)
+ */
+#define ONENAND_ECC_1BIT		(1 << 0)
+#define ONENAND_ECC_1BIT_ALL		(0x5555)
+#define ONENAND_ECC_2BIT		(1 << 1)
+#define ONENAND_ECC_2BIT_ALL		(0xAAAA)
+#define FLEXONENAND_UNCORRECTABLE_ERROR	(0x1010)
+#define ONENAND_ECC_3BIT		(1 << 2)
+#define ONENAND_ECC_4BIT		(1 << 3)
+#define ONENAND_ECC_4BIT_UNCORRECTABLE	(0x1010)
+
+/*
+ * One-Time Programmable (OTP)
+ */
+#define FLEXONENAND_OTP_LOCK_OFFSET		(2048)
+#define ONENAND_OTP_LOCK_OFFSET		(14)
+
+#endif	/* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
new file mode 100644
index 0000000..11cb0c5
--- /dev/null
+++ b/include/linux/mtd/partitions.h
@@ -0,0 +1,114 @@
+/*
+ * MTD partitioning layer definitions
+ *
+ * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This code is GPL
+ */
+
+#ifndef MTD_PARTITIONS_H
+#define MTD_PARTITIONS_H
+
+#include <linux/types.h>
+
+
+/*
+ * Partition definition structure:
+ *
+ * An array of struct partition is passed along with a MTD object to
+ * mtd_device_register() to create them.
+ *
+ * For each partition, these fields are available:
+ * name: string that will be used to label the partition's MTD device.
+ * types: some partitions can be containers using specific format to describe
+ *	embedded subpartitions / volumes. E.g. many home routers use "firmware"
+ *	partition that contains at least kernel and rootfs. In such case an
+ *	extra parser is needed that will detect these dynamic partitions and
+ *	report them to the MTD subsystem. If set this property stores an array
+ *	of parser names to use when looking for subpartitions.
+ * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
+ * 	will extend to the end of the master MTD device.
+ * offset: absolute starting position within the master MTD device; if
+ * 	defined as MTDPART_OFS_APPEND, the partition will start where the
+ *	previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block;
+ *	if MTDPART_OFS_RETAIN, consume as much as possible, leaving size
+ *	after the end of partition.
+ * mask_flags: contains flags that have to be masked (removed) from the
+ * 	master MTD flag set for the corresponding MTD partition.
+ * 	For example, to force a read-only partition, simply adding
+ * 	MTD_WRITEABLE to the mask_flags will do the trick.
+ *
+ * Note: writeable partitions require their size and offset be
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
+
+struct mtd_partition {
+	const char *name;		/* identifier string */
+	const char *const *types;	/* names of parsers to use if any */
+	uint64_t size;			/* partition size */
+	uint64_t offset;		/* offset within the master MTD space */
+	uint32_t mask_flags;		/* master MTD flags to mask out for this partition */
+	struct device_node *of_node;
+};
+
+#define MTDPART_OFS_RETAIN	(-3)
+#define MTDPART_OFS_NXTBLK	(-2)
+#define MTDPART_OFS_APPEND	(-1)
+#define MTDPART_SIZ_FULL	(0)
+
+
+struct mtd_info;
+struct device_node;
+
+/**
+ * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
+ * @origin: for RedBoot, start address of MTD device
+ */
+struct mtd_part_parser_data {
+	unsigned long origin;
+};
+
+
+/*
+ * Functions dealing with the various ways of partitioning the space
+ */
+
+struct mtd_part_parser {
+	struct list_head list;
+	struct module *owner;
+	const char *name;
+	const struct of_device_id *of_match_table;
+	int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
+			struct mtd_part_parser_data *);
+	void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
+};
+
+/* Container for passing around a set of parsed partitions */
+struct mtd_partitions {
+	const struct mtd_partition *parts;
+	int nr_parts;
+	const struct mtd_part_parser *parser;
+};
+
+extern int __register_mtd_parser(struct mtd_part_parser *parser,
+				 struct module *owner);
+#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
+
+extern void deregister_mtd_parser(struct mtd_part_parser *parser);
+
+/*
+ * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
+ * do anything special in module init/exit. Each driver may only use this macro
+ * once, and calling it replaces module_init() and module_exit().
+ */
+#define module_mtd_part_parser(__mtd_part_parser) \
+	module_driver(__mtd_part_parser, register_mtd_parser, \
+		      deregister_mtd_parser)
+
+int mtd_is_partition(const struct mtd_info *mtd);
+int mtd_add_partition(struct mtd_info *master, const char *name,
+		      long long offset, long long length);
+int mtd_del_partition(struct mtd_info *master, int partno);
+uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+
+#endif
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
new file mode 100644
index 0000000..122f343
--- /dev/null
+++ b/include/linux/mtd/pfow.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Primary function overlay window definitions
+ * and service functions used by LPDDR chips
+ */
+#ifndef __LINUX_MTD_PFOW_H
+#define __LINUX_MTD_PFOW_H
+
+#include <linux/mtd/qinfo.h>
+
+/* PFOW registers addressing */
+/* Address of symbol "P" */
+#define PFOW_QUERY_STRING_P			0x0000
+/* Address of symbol "F" */
+#define PFOW_QUERY_STRING_F			0x0002
+/* Address of symbol "O" */
+#define PFOW_QUERY_STRING_O			0x0004
+/* Address of symbol "W" */
+#define PFOW_QUERY_STRING_W			0x0006
+/* Identification info for LPDDR chip */
+#define PFOW_MANUFACTURER_ID			0x0020
+#define PFOW_DEVICE_ID				0x0022
+/* Address in PFOW where prog buffer can can be found */
+#define PFOW_PROGRAM_BUFFER_OFFSET		0x0040
+/* Size of program buffer in words */
+#define PFOW_PROGRAM_BUFFER_SIZE		0x0042
+/* Address command code register */
+#define PFOW_COMMAND_CODE			0x0080
+/* command data register */
+#define PFOW_COMMAND_DATA			0x0084
+/* command address register lower address bits */
+#define PFOW_COMMAND_ADDRESS_L			0x0088
+/* command address register upper address bits */
+#define PFOW_COMMAND_ADDRESS_H			0x008a
+/* number of bytes to be proggrammed lower address bits */
+#define PFOW_DATA_COUNT_L			0x0090
+/* number of bytes to be proggrammed higher address bits */
+#define PFOW_DATA_COUNT_H			0x0092
+/* command execution register, the only possible value is 0x01 */
+#define PFOW_COMMAND_EXECUTE			0x00c0
+/* 0x01 should be written at this address to clear buffer */
+#define PFOW_CLEAR_PROGRAM_BUFFER		0x00c4
+/* device program/erase suspend register */
+#define PFOW_PROGRAM_ERASE_SUSPEND		0x00c8
+/* device status register */
+#define PFOW_DSR				0x00cc
+
+/* LPDDR memory device command codes */
+/* They are possible values of PFOW command code register */
+#define LPDDR_WORD_PROGRAM		0x0041
+#define LPDDR_BUFF_PROGRAM		0x00E9
+#define LPDDR_BLOCK_ERASE		0x0020
+#define LPDDR_LOCK_BLOCK		0x0061
+#define LPDDR_UNLOCK_BLOCK		0x0062
+#define LPDDR_READ_BLOCK_LOCK_STATUS	0x0065
+#define LPDDR_INFO_QUERY		0x0098
+#define LPDDR_READ_OTP			0x0097
+#define LPDDR_PROG_OTP			0x00C0
+#define LPDDR_RESUME			0x00D0
+
+/* Defines possible value of PFOW command execution register */
+#define LPDDR_START_EXECUTION			0x0001
+
+/* Defines possible value of PFOW program/erase suspend register */
+#define LPDDR_SUSPEND				0x0001
+
+/* Possible values of PFOW device status register */
+/* access R - read; RC read & clearable */
+#define DSR_DPS			(1<<1) /* RC; device protect status
+					* 0 - not protected 1 - locked */
+#define DSR_PSS			(1<<2) /* R; program suspend status;
+					* 0-prog in progress/completed,
+					* 1- prog suspended */
+#define DSR_VPPS		(1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */
+#define DSR_PROGRAM_STATUS	(1<<4) /* RC; 0-successful, 1-error */
+#define DSR_ERASE_STATUS	(1<<5) /* RC; erase or blank check status;
+					* 0-success erase/blank check,
+					* 1 blank check error */
+#define DSR_ESS			(1<<6) /* R; erase suspend status;
+					* 0-erase in progress/complete,
+					* 1 erase suspended */
+#define DSR_READY_STATUS	(1<<7) /* R; Device status
+					* 0-busy,
+					* 1-ready */
+#define DSR_RPS			(0x3<<8) /* RC;  region program status
+					* 00 - Success,
+					* 01-re-program attempt in region with
+					* object mode data,
+					* 10-object mode program w attempt in
+					* region with control mode data
+					* 11-attempt to program invalid half
+					* with 0x41 command */
+#define DSR_AOS			(1<<12) /* RC; 1- AO related failure */
+#define DSR_AVAILABLE		(1<<15) /* R; Device availbility
+					* 1 - Device available
+					* 0 - not available */
+
+/* The superset of all possible error bits in DSR */
+#define DSR_ERR			0x133A
+
+static inline void send_pfow_command(struct map_info *map,
+				unsigned long cmd_code, unsigned long adr,
+				unsigned long len, map_word *datum)
+{
+	int bits_per_chip = map_bankwidth(map) * 8;
+
+	map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
+	map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
+				map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+	map_write(map, CMD(adr>>bits_per_chip),
+				map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+	if (len) {
+		map_write(map, CMD(len & ((1<<bits_per_chip) - 1)),
+					map->pfow_base + PFOW_DATA_COUNT_L);
+		map_write(map, CMD(len>>bits_per_chip),
+					map->pfow_base + PFOW_DATA_COUNT_H);
+	}
+	if (datum)
+		map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA);
+
+	/* Command execution start */
+	map_write(map, CMD(LPDDR_START_EXECUTION),
+			map->pfow_base + PFOW_COMMAND_EXECUTE);
+}
+
+static inline void print_drs_error(unsigned dsr)
+{
+	int prog_status = (dsr & DSR_RPS) >> 8;
+
+	if (!(dsr & DSR_AVAILABLE))
+		printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
+	if (prog_status & 0x03)
+		printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
+						"half with 41h command\n");
+	else if (prog_status & 0x02)
+		printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
+					"in region with Control Mode data\n");
+	else if (prog_status &  0x01)
+		printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
+						"with Object Mode data\n");
+	if (!(dsr & DSR_READY_STATUS))
+		printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
+	if (dsr & DSR_ESS)
+		printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
+	if (dsr & DSR_ERASE_STATUS)
+		printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
+	if (dsr & DSR_PROGRAM_STATUS)
+		printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
+	if (dsr & DSR_VPPS)
+		printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
+					"aborted\n");
+	if (dsr & DSR_PSS)
+		printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
+	if (dsr & DSR_DPS)
+		printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
+					"on locked block\n");
+}
+#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
new file mode 100644
index 0000000..aa6a263
--- /dev/null
+++ b/include/linux/mtd/physmap.h
@@ -0,0 +1,36 @@
+/*
+ * For boards with physically mapped flash and using
+ * drivers/mtd/maps/physmap.c mapping driver.
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MTD_PHYSMAP__
+#define __LINUX_MTD_PHYSMAP__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+struct map_info;
+struct platform_device;
+
+struct physmap_flash_data {
+	unsigned int		width;
+	int			(*init)(struct platform_device *);
+	void			(*exit)(struct platform_device *);
+	void			(*set_vpp)(struct platform_device *, int);
+	unsigned int		nr_parts;
+	unsigned int		pfow_base;
+	char                    *probe_type;
+	struct mtd_partition	*parts;
+	const char * const	*part_probe_types;
+};
+
+#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h
new file mode 100644
index 0000000..8dfb7e1
--- /dev/null
+++ b/include/linux/mtd/pismo.h
@@ -0,0 +1,17 @@
+/*
+ * PISMO memory driver - http://www.pismoworld.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#ifndef __LINUX_MTD_PISMO_H
+#define __LINUX_MTD_PISMO_H
+
+struct pismo_pdata {
+	void			(*set_vpp)(void *, int);
+	void			*vpp_data;
+	phys_addr_t		cs_addrs[5];
+};
+
+#endif
diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h
new file mode 100644
index 0000000..44212d6
--- /dev/null
+++ b/include/linux/mtd/plat-ram.h
@@ -0,0 +1,34 @@
+/* linux/include/linux/mtd/plat-ram.h
+ *
+ * (c) 2004 Simtec Electronics
+ *	http://www.simtec.co.uk/products/SWLINUX/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * Generic platform device based RAM map
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_MTD_PLATRAM_H
+#define __LINUX_MTD_PLATRAM_H __FILE__
+
+#define PLATRAM_RO (0)
+#define PLATRAM_RW (1)
+
+struct platdata_mtd_ram {
+	const char		*mapname;
+	const char * const      *map_probes;
+	const char * const      *probes;
+	struct mtd_partition	*partitions;
+	int			 nr_partitions;
+	int			 bankwidth;
+
+	/* control callbacks */
+
+	void	(*set_rw)(struct device *dev, int to);
+};
+
+#endif /* __LINUX_MTD_PLATRAM_H */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
new file mode 100644
index 0000000..df5b9fd
--- /dev/null
+++ b/include/linux/mtd/qinfo.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MTD_QINFO_H
+#define __LINUX_MTD_QINFO_H
+
+#include <linux/mtd/map.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/partitions.h>
+
+/* lpddr_private describes lpddr flash chip in memory map
+ * @ManufactId - Chip Manufacture ID
+ * @DevId - Chip Device ID
+ * @qinfo - pointer to qinfo records describing the chip
+ * @numchips - number of chips including virual RWW partitions
+ * @chipshift - Chip/partition size 2^chipshift
+ * @chips - per-chip data structure
+ */
+struct lpddr_private {
+	uint16_t ManufactId;
+	uint16_t DevId;
+	struct qinfo_chip *qinfo;
+	int numchips;
+	unsigned long chipshift;
+	struct flchip chips[0];
+};
+
+/* qinfo_query_info structure contains request information for
+ * each qinfo record
+ * @major - major number of qinfo record
+ * @major - minor number of qinfo record
+ * @id_str - descriptive string to access the record
+ * @desc - detailed description for the qinfo record
+ */
+struct qinfo_query_info {
+	uint8_t	major;
+	uint8_t	minor;
+	char *id_str;
+	char *desc;
+};
+
+/*
+ * qinfo_chip structure contains necessary qinfo records data
+ * @DevSizeShift - Device size 2^n bytes
+ * @BufSizeShift - Program buffer size 2^n bytes
+ * @TotalBlocksNum - Total number of blocks
+ * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes
+ * @HWPartsNum - Number of hardware partitions
+ * @SuspEraseSupp - Suspend erase supported
+ * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec
+ * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec
+ * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec
+ */
+struct qinfo_chip {
+	/* General device info */
+	uint16_t DevSizeShift;
+	uint16_t BufSizeShift;
+	/* Erase block information */
+	uint16_t TotalBlocksNum;
+	uint16_t UniformBlockSizeShift;
+	/* Partition information */
+	uint16_t HWPartsNum;
+	/* Optional features */
+	uint16_t SuspEraseSupp;
+	/* Operation typical time */
+	uint16_t SingleWordProgTime;
+	uint16_t ProgBufferTime;
+	uint16_t BlockEraseTime;
+};
+
+/* defines for fixup usage */
+#define LPDDR_MFR_ANY		0xffff
+#define LPDDR_ID_ANY		0xffff
+#define NUMONYX_MFGR_ID		0x0089
+#define R18_DEVICE_ID_1G	0x893c
+
+static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map)
+{
+	map_word val = { {0} };
+	val.x[0] = cmd;
+	return val;
+}
+
+#define CMD(x) lpddr_build_cmd(x, map)
+#define CMDVAL(cmd) cmd.x[0]
+
+struct mtd_info *lpddr_cmdset(struct map_info *);
+
+#endif
+
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
new file mode 100644
index 0000000..efb2345
--- /dev/null
+++ b/include/linux/mtd/rawnand.h
@@ -0,0 +1,1754 @@
+/*
+ *  Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ *                        Steven J. Hill <sjhill@realitydiluted.com>
+ *		          Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Info:
+ *	Contains standard defines and IDs for NAND flash devices
+ *
+ * Changelog:
+ *	See git changelog.
+ */
+#ifndef __LINUX_MTD_RAWNAND_H
+#define __LINUX_MTD_RAWNAND_H
+
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+struct nand_flash_dev;
+
+/* Scan and identify a NAND device */
+int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
+		       struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct mtd_info *mtd, int max_chips)
+{
+	return nand_scan_with_ids(mtd, max_chips, NULL);
+}
+
+/* Internal helper for board drivers which need to override command function */
+void nand_wait_ready(struct mtd_info *mtd);
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS		8
+
+/*
+ * Constants for hardware specific CLE/ALE/NCE function
+ *
+ * These are bits which can be or'ed to set/clear multiple
+ * bits in one go.
+ */
+/* Select the chip by setting nCE to low */
+#define NAND_NCE		0x01
+/* Select the command latch by setting CLE to high */
+#define NAND_CLE		0x02
+/* Select the address latch by setting ALE to high */
+#define NAND_ALE		0x04
+
+#define NAND_CTRL_CLE		(NAND_NCE | NAND_CLE)
+#define NAND_CTRL_ALE		(NAND_NCE | NAND_ALE)
+#define NAND_CTRL_CHANGE	0x80
+
+/*
+ * Standard NAND flash commands
+ */
+#define NAND_CMD_READ0		0
+#define NAND_CMD_READ1		1
+#define NAND_CMD_RNDOUT		5
+#define NAND_CMD_PAGEPROG	0x10
+#define NAND_CMD_READOOB	0x50
+#define NAND_CMD_ERASE1		0x60
+#define NAND_CMD_STATUS		0x70
+#define NAND_CMD_SEQIN		0x80
+#define NAND_CMD_RNDIN		0x85
+#define NAND_CMD_READID		0x90
+#define NAND_CMD_ERASE2		0xd0
+#define NAND_CMD_PARAM		0xec
+#define NAND_CMD_GET_FEATURES	0xee
+#define NAND_CMD_SET_FEATURES	0xef
+#define NAND_CMD_RESET		0xff
+
+/* Extended commands for large page devices */
+#define NAND_CMD_READSTART	0x30
+#define NAND_CMD_RNDOUTSTART	0xE0
+#define NAND_CMD_CACHEDPROG	0x15
+
+#define NAND_CMD_NONE		-1
+
+/* Status bits */
+#define NAND_STATUS_FAIL	0x01
+#define NAND_STATUS_FAIL_N1	0x02
+#define NAND_STATUS_TRUE_READY	0x20
+#define NAND_STATUS_READY	0x40
+#define NAND_STATUS_WP		0x80
+
+#define NAND_DATA_IFACE_CHECK_ONLY	-1
+
+/*
+ * Constants for ECC_MODES
+ */
+typedef enum {
+	NAND_ECC_NONE,
+	NAND_ECC_SOFT,
+	NAND_ECC_HW,
+	NAND_ECC_HW_SYNDROME,
+	NAND_ECC_HW_OOB_FIRST,
+	NAND_ECC_ON_DIE,
+} nand_ecc_modes_t;
+
+enum nand_ecc_algo {
+	NAND_ECC_UNKNOWN,
+	NAND_ECC_HAMMING,
+	NAND_ECC_BCH,
+	NAND_ECC_RS,
+};
+
+/*
+ * Constants for Hardware ECC
+ */
+/* Reset Hardware ECC for read */
+#define NAND_ECC_READ		0
+/* Reset Hardware ECC for write */
+#define NAND_ECC_WRITE		1
+/* Enable Hardware ECC before syndrome is read back from flash */
+#define NAND_ECC_READSYN	2
+
+/*
+ * Enable generic NAND 'page erased' check. This check is only done when
+ * ecc.correct() returns -EBADMSG.
+ * Set this flag if your implementation does not fix bitflips in erased
+ * pages and you want to rely on the default implementation.
+ */
+#define NAND_ECC_GENERIC_ERASED_CHECK	BIT(0)
+#define NAND_ECC_MAXIMIZE		BIT(1)
+
+/* Bit mask for flags passed to do_nand_read_ecc */
+#define NAND_GET_DEVICE		0x80
+
+
+/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16	0x00000002
+/* Chip has cache program function */
+#define NAND_CACHEPRG		0x00000008
+/*
+ * Chip requires ready check on read (for auto-incremented sequential read).
+ * True only for small page devices; large page devices do not support
+ * autoincrement.
+ */
+#define NAND_NEED_READRDY	0x00000100
+
+/* Chip does not allow subpage writes */
+#define NAND_NO_SUBPAGE_WRITE	0x00000200
+
+/* Device is one of 'new' xD cards that expose fake nand command set */
+#define NAND_BROKEN_XD		0x00000400
+
+/* Device behaves just like nand, but is readonly */
+#define NAND_ROM		0x00000800
+
+/* Device supports subpage reads */
+#define NAND_SUBPAGE_READ	0x00001000
+
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING	0x00002000
+
+/* Device needs 3rd row address cycle */
+#define NAND_ROW_ADDR_3		0x00004000
+
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
+/* Macros to identify the above */
+#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
+
+/* Non chip related options */
+/* This option skips the bbt scan during initialization. */
+#define NAND_SKIP_BBTSCAN	0x00010000
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV	0x00040000
+/*
+ * Autodetect nand buswidth with readid/onfi.
+ * This suppose the driver will configure the hardware in 8 bits mode
+ * when calling nand_scan_ident, and update its configuration
+ * before calling nand_scan_tail.
+ */
+#define NAND_BUSWIDTH_AUTO      0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER	0x00100000
+
+/*
+ * In case your controller is implementing ->cmd_ctrl() and is relying on the
+ * default ->cmdfunc() implementation, you may want to let the core handle the
+ * tCCS delay which is required when a column change (RNDIN or RNDOUT) is
+ * requested.
+ * If your controller already takes care of this delay, you don't need to set
+ * this flag.
+ */
+#define NAND_WAIT_TCCS		0x00200000
+
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM	0x00400000
+
+/* Options set by nand scan */
+/* Nand scan has allocated controller struct */
+#define NAND_CONTROLLER_ALLOC	0x80000000
+
+/* Cell info constants */
+#define NAND_CI_CHIPNR_MSK	0x03
+#define NAND_CI_CELLTYPE_MSK	0x0C
+#define NAND_CI_CELLTYPE_SHIFT	2
+
+/* Keep gcc happy */
+struct nand_chip;
+
+/* ONFI version bits */
+#define ONFI_VERSION_1_0		BIT(1)
+#define ONFI_VERSION_2_0		BIT(2)
+#define ONFI_VERSION_2_1		BIT(3)
+#define ONFI_VERSION_2_2		BIT(4)
+#define ONFI_VERSION_2_3		BIT(5)
+#define ONFI_VERSION_3_0		BIT(6)
+#define ONFI_VERSION_3_1		BIT(7)
+#define ONFI_VERSION_3_2		BIT(8)
+#define ONFI_VERSION_4_0		BIT(9)
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS		(1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE	(1 << 7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_TIMING_MODE_0		(1 << 0)
+#define ONFI_TIMING_MODE_1		(1 << 1)
+#define ONFI_TIMING_MODE_2		(1 << 2)
+#define ONFI_TIMING_MODE_3		(1 << 3)
+#define ONFI_TIMING_MODE_4		(1 << 4)
+#define ONFI_TIMING_MODE_5		(1 << 5)
+#define ONFI_TIMING_MODE_UNKNOWN	(1 << 6)
+
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER		256
+#define ONFI_FEATURE_ADDR_TIMING_MODE	0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY	0x89
+#define ONFI_FEATURE_ON_DIE_ECC		0x90
+#define   ONFI_FEATURE_ON_DIE_ECC_EN	BIT(3)
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN	4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES	(1 << 2)
+
+struct nand_onfi_params {
+	/* rev info and features block */
+	/* 'O' 'N' 'F' 'I'  */
+	u8 sig[4];
+	__le16 revision;
+	__le16 features;
+	__le16 opt_cmd;
+	u8 reserved0[2];
+	__le16 ext_param_page_length; /* since ONFI 2.1 */
+	u8 num_of_param_pages;        /* since ONFI 2.1 */
+	u8 reserved1[17];
+
+	/* manufacturer information block */
+	char manufacturer[12];
+	char model[20];
+	u8 jedec_id;
+	__le16 date_code;
+	u8 reserved2[13];
+
+	/* memory organization block */
+	__le32 byte_per_page;
+	__le16 spare_bytes_per_page;
+	__le32 data_bytes_per_ppage;
+	__le16 spare_bytes_per_ppage;
+	__le32 pages_per_block;
+	__le32 blocks_per_lun;
+	u8 lun_count;
+	u8 addr_cycles;
+	u8 bits_per_cell;
+	__le16 bb_per_lun;
+	__le16 block_endurance;
+	u8 guaranteed_good_blocks;
+	__le16 guaranteed_block_endurance;
+	u8 programs_per_page;
+	u8 ppage_attr;
+	u8 ecc_bits;
+	u8 interleaved_bits;
+	u8 interleaved_ops;
+	u8 reserved3[13];
+
+	/* electrical parameter block */
+	u8 io_pin_capacitance_max;
+	__le16 async_timing_mode;
+	__le16 program_cache_timing_mode;
+	__le16 t_prog;
+	__le16 t_bers;
+	__le16 t_r;
+	__le16 t_ccs;
+	__le16 src_sync_timing_mode;
+	u8 src_ssync_features;
+	__le16 clk_pin_capacitance_typ;
+	__le16 io_pin_capacitance_typ;
+	__le16 input_pin_capacitance_typ;
+	u8 input_pin_capacitance_max;
+	u8 driver_strength_support;
+	__le16 t_int_r;
+	__le16 t_adl;
+	u8 reserved4[8];
+
+	/* vendor */
+	__le16 vendor_revision;
+	u8 vendor[88];
+
+	__le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE	0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+	u8 ecc_bits;
+	u8 codeword_size;
+	__le16 bb_per_lun;
+	__le16 block_endurance;
+	u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0	0	/* Unused section. */
+#define ONFI_SECTION_TYPE_1	1	/* for additional sections. */
+#define ONFI_SECTION_TYPE_2	2	/* for ECC information. */
+struct onfi_ext_section {
+	u8 type;
+	u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+	__le16 crc;
+	u8 sig[4];             /* 'E' 'P' 'P' 'S' */
+	u8 reserved0[10];
+	struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+	/*
+	 * The actual size of the Extended Parameter Page is in
+	 * @ext_param_page_length of nand_onfi_params{}.
+	 * The following are the variable length sections.
+	 * So we do not add any fields below. Please see the ONFI spec.
+	 */
+} __packed;
+
+struct jedec_ecc_info {
+	u8 ecc_bits;
+	u8 codeword_size;
+	__le16 bb_per_lun;
+	__le16 block_endurance;
+	u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS	(1 << 0)
+
+struct nand_jedec_params {
+	/* rev info and features block */
+	/* 'J' 'E' 'S' 'D'  */
+	u8 sig[4];
+	__le16 revision;
+	__le16 features;
+	u8 opt_cmd[3];
+	__le16 sec_cmd;
+	u8 num_of_param_pages;
+	u8 reserved0[18];
+
+	/* manufacturer information block */
+	char manufacturer[12];
+	char model[20];
+	u8 jedec_id[6];
+	u8 reserved1[10];
+
+	/* memory organization block */
+	__le32 byte_per_page;
+	__le16 spare_bytes_per_page;
+	u8 reserved2[6];
+	__le32 pages_per_block;
+	__le32 blocks_per_lun;
+	u8 lun_count;
+	u8 addr_cycles;
+	u8 bits_per_cell;
+	u8 programs_per_page;
+	u8 multi_plane_addr;
+	u8 multi_plane_op_attr;
+	u8 reserved3[38];
+
+	/* electrical parameter block */
+	__le16 async_sdr_speed_grade;
+	__le16 toggle_ddr_speed_grade;
+	__le16 sync_ddr_speed_grade;
+	u8 async_sdr_features;
+	u8 toggle_ddr_features;
+	u8 sync_ddr_features;
+	__le16 t_prog;
+	__le16 t_bers;
+	__le16 t_r;
+	__le16 t_r_multi_plane;
+	__le16 t_ccs;
+	__le16 io_pin_capacitance_typ;
+	__le16 input_pin_capacitance_typ;
+	__le16 clk_pin_capacitance_typ;
+	u8 driver_strength_support;
+	__le16 t_adl;
+	u8 reserved4[36];
+
+	/* ECC and endurance block */
+	u8 guaranteed_good_blocks;
+	__le16 guaranteed_block_endurance;
+	struct jedec_ecc_info ecc_info[4];
+	u8 reserved5[29];
+
+	/* reserved */
+	u8 reserved6[148];
+
+	/* vendor */
+	__le16 vendor_rev_num;
+	u8 reserved7[88];
+
+	/* CRC for Parameter Page */
+	__le16 crc;
+} __packed;
+
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @async_timing_mode: Supported asynchronous timing mode
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+	int version;
+	u16 tPROG;
+	u16 tBERS;
+	u16 tR;
+	u16 tCCS;
+	u16 async_timing_mode;
+	u16 vendor_revision;
+	u8 vendor[88];
+};
+
+/**
+ * struct nand_parameters - NAND generic parameters from the parameter page
+ * @model: Model name
+ * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @set_feature_list: Bitmap of features that can be set
+ * @get_feature_list: Bitmap of features that can be get
+ * @onfi: ONFI specific parameters
+ */
+struct nand_parameters {
+	/* Generic parameters */
+	const char *model;
+	bool supports_set_get_features;
+	DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+	DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+
+	/* ONFI parameters */
+	struct onfi_params *onfi;
+};
+
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/**
+ * struct nand_id - NAND id structure
+ * @data: buffer containing the id bytes.
+ * @len: ID length.
+ */
+struct nand_id {
+	u8 data[NAND_MAX_ID_LEN];
+	int len;
+};
+
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ *		 flash ID and MTD fields such as erase size, page size and OOB
+ *		 size have been set up. ECC requirements are available if
+ *		 provided by the NAND chip or device tree. Typically used to
+ *		 choose the appropriate ECC configuration and allocate
+ *		 associated resources.
+ *		 This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ *		 nand_controller_ops->attach_chip().
+ *		 This hook is optional.
+ */
+struct nand_controller_ops {
+	int (*attach_chip)(struct nand_chip *chip);
+	void (*detach_chip)(struct nand_chip *chip);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock:               protection lock
+ * @active:		the mtd device which holds the controller currently
+ * @wq:			wait queue to sleep on if a NAND operation is in
+ *			progress used instead of the per chip wait queue
+ *			when a hw controller is available.
+ * @ops:		NAND controller operations.
+ */
+struct nand_controller {
+	spinlock_t lock;
+	struct nand_chip *active;
+	wait_queue_head_t wq;
+	const struct nand_controller_ops *ops;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+	nfc->active = NULL;
+	spin_lock_init(&nfc->lock);
+	init_waitqueue_head(&nfc->wq);
+}
+
+/**
+ * struct nand_ecc_step_info - ECC step information of ECC engine
+ * @stepsize: data bytes per ECC step
+ * @strengths: array of supported strengths
+ * @nstrengths: number of supported strengths
+ */
+struct nand_ecc_step_info {
+	int stepsize;
+	const int *strengths;
+	int nstrengths;
+};
+
+/**
+ * struct nand_ecc_caps - capability of ECC engine
+ * @stepinfos: array of ECC step information
+ * @nstepinfos: number of ECC step information
+ * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
+ */
+struct nand_ecc_caps {
+	const struct nand_ecc_step_info *stepinfos;
+	int nstepinfos;
+	int (*calc_ecc_bytes)(int step_size, int strength);
+};
+
+/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
+#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...)	\
+static const int __name##_strengths[] = { __VA_ARGS__ };	\
+static const struct nand_ecc_step_info __name##_stepinfo = {	\
+	.stepsize = __step,					\
+	.strengths = __name##_strengths,			\
+	.nstrengths = ARRAY_SIZE(__name##_strengths),		\
+};								\
+static const struct nand_ecc_caps __name = {			\
+	.stepinfos = &__name##_stepinfo,			\
+	.nstepinfos = 1,					\
+	.calc_ecc_bytes = __calc,				\
+}
+
+/**
+ * struct nand_ecc_ctrl - Control structure for ECC
+ * @mode:	ECC mode
+ * @algo:	ECC algorithm
+ * @steps:	number of ECC steps per page
+ * @size:	data bytes per ECC step
+ * @bytes:	ECC bytes per step
+ * @strength:	max number of correctible bits per ECC step
+ * @total:	total number of ECC bytes per page
+ * @prepad:	padding information for syndrome based ECC generators
+ * @postpad:	padding information for syndrome based ECC generators
+ * @options:	ECC specific options (see NAND_ECC_XXX flags defined above)
+ * @priv:	pointer to private ECC control data
+ * @calc_buf:	buffer for calculated ECC, size is oobsize.
+ * @code_buf:	buffer for ECC read from flash, size is oobsize.
+ * @hwctl:	function to control hardware ECC generator. Must only
+ *		be provided if an hardware ECC is available
+ * @calculate:	function for ECC calculation or readback from ECC hardware
+ * @correct:	function for ECC correction, matching to ECC generator (sw/hw).
+ *		Should return a positive number representing the number of
+ *		corrected bitflips, -EBADMSG if the number of bitflips exceed
+ *		ECC strength, or any other error code if the error is not
+ *		directly related to correction.
+ *		If -EBADMSG is returned the input buffers should be left
+ *		untouched.
+ * @read_page_raw:	function to read a raw page without ECC. This function
+ *			should hide the specific layout used by the ECC
+ *			controller and always return contiguous in-band and
+ *			out-of-band data even if they're not stored
+ *			contiguously on the NAND chip (e.g.
+ *			NAND_ECC_HW_SYNDROME interleaves in-band and
+ *			out-of-band data).
+ * @write_page_raw:	function to write a raw page without ECC. This function
+ *			should hide the specific layout used by the ECC
+ *			controller and consider the passed data as contiguous
+ *			in-band and out-of-band data. ECC controller is
+ *			responsible for doing the appropriate transformations
+ *			to adapt to its specific layout (e.g.
+ *			NAND_ECC_HW_SYNDROME interleaves in-band and
+ *			out-of-band data).
+ * @read_page:	function to read a page according to the ECC generator
+ *		requirements; returns maximum number of bitflips corrected in
+ *		any single ECC step, -EIO hw error
+ * @read_subpage:	function to read parts of the page covered by ECC;
+ *			returns same as read_page()
+ * @write_subpage:	function to write parts of the page covered by ECC.
+ * @write_page:	function to write a page according to the ECC generator
+ *		requirements.
+ * @write_oob_raw:	function to write chip OOB data without ECC
+ * @read_oob_raw:	function to read chip OOB data without ECC
+ * @read_oob:	function to read chip OOB data
+ * @write_oob:	function to write chip OOB data
+ */
+struct nand_ecc_ctrl {
+	nand_ecc_modes_t mode;
+	enum nand_ecc_algo algo;
+	int steps;
+	int size;
+	int bytes;
+	int total;
+	int strength;
+	int prepad;
+	int postpad;
+	unsigned int options;
+	void *priv;
+	u8 *calc_buf;
+	u8 *code_buf;
+	void (*hwctl)(struct mtd_info *mtd, int mode);
+	int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
+			uint8_t *ecc_code);
+	int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
+			uint8_t *calc_ecc);
+	int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+			uint8_t *buf, int oob_required, int page);
+	int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+			const uint8_t *buf, int oob_required, int page);
+	int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
+			uint8_t *buf, int oob_required, int page);
+	int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+			uint32_t offs, uint32_t len, uint8_t *buf, int page);
+	int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+			uint32_t offset, uint32_t data_len,
+			const uint8_t *data_buf, int oob_required, int page);
+	int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
+			const uint8_t *buf, int oob_required, int page);
+	int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+			int page);
+	int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+			int page);
+	int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
+	int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
+			int page);
+};
+
+/**
+ * struct nand_sdr_timings - SDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a SDR NAND chip.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
+ * Parameters)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tALH_min: ALE hold time
+ * @tADL_min: ALE to data loading time
+ * @tALS_min: ALE setup time
+ * @tAR_min: ALE to RE# delay
+ * @tCEA_max: CE# access time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min:  CE# hold time
+ * @tCHZ_max: CE# high to output hi-Z
+ * @tCLH_min: CLE hold time
+ * @tCLR_min: CLE to RE# delay
+ * @tCLS_min: CLE setup time
+ * @tCOH_min: CE# high to output hold
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDS_min: Data setup time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tIR_min: Output hi-Z to RE# low
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tRC_min: RE# cycle time
+ * @tREA_max: RE# access time
+ * @tREH_min: RE# high hold time
+ * @tRHOH_min: RE# high to output hold
+ * @tRHW_min: RE# high to WE# low
+ * @tRHZ_max: RE# high to output hi-Z
+ * @tRLOH_min: RE# low to output hold
+ * @tRP_min: RE# pulse width
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ *	      rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWC_min: WE# cycle time
+ * @tWH_min: WE# high hold time
+ * @tWHR_min: WE# high to RE# low
+ * @tWP_min: WE# pulse width
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_sdr_timings {
+	u64 tBERS_max;
+	u32 tCCS_min;
+	u64 tPROG_max;
+	u64 tR_max;
+	u32 tALH_min;
+	u32 tADL_min;
+	u32 tALS_min;
+	u32 tAR_min;
+	u32 tCEA_max;
+	u32 tCEH_min;
+	u32 tCH_min;
+	u32 tCHZ_max;
+	u32 tCLH_min;
+	u32 tCLR_min;
+	u32 tCLS_min;
+	u32 tCOH_min;
+	u32 tCS_min;
+	u32 tDH_min;
+	u32 tDS_min;
+	u32 tFEAT_max;
+	u32 tIR_min;
+	u32 tITC_max;
+	u32 tRC_min;
+	u32 tREA_max;
+	u32 tREH_min;
+	u32 tRHOH_min;
+	u32 tRHW_min;
+	u32 tRHZ_max;
+	u32 tRLOH_min;
+	u32 tRP_min;
+	u32 tRR_min;
+	u64 tRST_max;
+	u32 tWB_max;
+	u32 tWC_min;
+	u32 tWH_min;
+	u32 tWHR_min;
+	u32 tWP_min;
+	u32 tWW_min;
+};
+
+/**
+ * enum nand_data_interface_type - NAND interface timing type
+ * @NAND_SDR_IFACE:	Single Data Rate interface
+ */
+enum nand_data_interface_type {
+	NAND_SDR_IFACE,
+};
+
+/**
+ * struct nand_data_interface - NAND interface timing
+ * @type:	 type of the timing
+ * @timings:	 The timing, type according to @type
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ */
+struct nand_data_interface {
+	enum nand_data_interface_type type;
+	union {
+		struct nand_sdr_timings sdr;
+	} timings;
+};
+
+/**
+ * nand_get_sdr_timings - get SDR timing from data interface
+ * @conf:	The data interface
+ */
+static inline const struct nand_sdr_timings *
+nand_get_sdr_timings(const struct nand_data_interface *conf)
+{
+	if (conf->type != NAND_SDR_IFACE)
+		return ERR_PTR(-EINVAL);
+
+	return &conf->timings.sdr;
+}
+
+/**
+ * struct nand_manufacturer_ops - NAND Manufacturer operations
+ * @detect: detect the NAND memory organization and capabilities
+ * @init: initialize all vendor specific fields (like the ->read_retry()
+ *	  implementation) if any.
+ * @cleanup: the ->init() function may have allocated resources, ->cleanup()
+ *	     is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ *			   page. This is called after the checksum is verified.
+ */
+struct nand_manufacturer_ops {
+	void (*detect)(struct nand_chip *chip);
+	int (*init)(struct nand_chip *chip);
+	void (*cleanup)(struct nand_chip *chip);
+	void (*fixup_onfi_param_page)(struct nand_chip *chip,
+				      struct nand_onfi_params *p);
+};
+
+/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+	u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+	unsigned int naddrs;
+	const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+	unsigned int len;
+	union {
+		void *in;
+		const void *out;
+	} buf;
+	bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+	unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+	NAND_OP_CMD_INSTR,
+	NAND_OP_ADDR_INSTR,
+	NAND_OP_DATA_IN_INSTR,
+	NAND_OP_DATA_OUT_INSTR,
+	NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @ctx:  extra data associated to the instruction. You'll have to use the
+ *        appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ *	      or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
+ * @delay_ns: delay the controller should apply after the instruction has been
+ *	      issued on the bus. Most modern controllers have internal timings
+ *	      control logic, and in this case, the controller driver can ignore
+ *	      this field.
+ */
+struct nand_op_instr {
+	enum nand_op_instr_type type;
+	union {
+		struct nand_op_cmd_instr cmd;
+		struct nand_op_addr_instr addr;
+		struct nand_op_data_instr data;
+		struct nand_op_waitrdy_instr waitrdy;
+	} ctx;
+	unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ *
+ * Cast to type of dividend is needed here to guarantee that the result won't
+ * be an unsigned long long when the dividend is an unsigned long (or smaller),
+ * which is what the compiler does when it sees ternary operator with 2
+ * different return types (picks the largest type to make sure there's no
+ * loss).
+ */
+#define __DIVIDE(dividend, divisor) ({						\
+	(__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ?	\
+			       DIV_ROUND_UP(dividend, divisor) :		\
+			       DIV_ROUND_UP_ULL(dividend, divisor)); 		\
+	})
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns)						\
+	{								\
+		.type = NAND_OP_CMD_INSTR,				\
+		.ctx.cmd.opcode = id,					\
+		.delay_ns = ns,						\
+	}
+
+#define NAND_OP_ADDR(ncycles, cycles, ns)				\
+	{								\
+		.type = NAND_OP_ADDR_INSTR,				\
+		.ctx.addr = {						\
+			.naddrs = ncycles,				\
+			.addrs = cycles,				\
+		},							\
+		.delay_ns = ns,						\
+	}
+
+#define NAND_OP_DATA_IN(l, b, ns)					\
+	{								\
+		.type = NAND_OP_DATA_IN_INSTR,				\
+		.ctx.data = {						\
+			.len = l,					\
+			.buf.in = b,					\
+			.force_8bit = false,				\
+		},							\
+		.delay_ns = ns,						\
+	}
+
+#define NAND_OP_DATA_OUT(l, b, ns)					\
+	{								\
+		.type = NAND_OP_DATA_OUT_INSTR,				\
+		.ctx.data = {						\
+			.len = l,					\
+			.buf.out = b,					\
+			.force_8bit = false,				\
+		},							\
+		.delay_ns = ns,						\
+	}
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns)					\
+	{								\
+		.type = NAND_OP_DATA_IN_INSTR,				\
+		.ctx.data = {						\
+			.len = l,					\
+			.buf.in = b,					\
+			.force_8bit = true,				\
+		},							\
+		.delay_ns = ns,						\
+	}
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns)					\
+	{								\
+		.type = NAND_OP_DATA_OUT_INSTR,				\
+		.ctx.data = {						\
+			.len = l,					\
+			.buf.out = b,					\
+			.force_8bit = true,				\
+		},							\
+		.delay_ns = ns,						\
+	}
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns)					\
+	{								\
+		.type = NAND_OP_WAITRDY_INSTR,				\
+		.ctx.waitrdy.timeout_ms = tout_ms,			\
+		.delay_ns = ns,						\
+	}
+
+/**
+ * struct nand_subop - a sub operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ *			   of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ *			of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+	const struct nand_op_instr *instrs;
+	unsigned int ninstrs;
+	unsigned int first_instr_start_off;
+	unsigned int last_instr_end_off;
+};
+
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+					   unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+					 unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+					   unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+				     unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ *	       single step
+ */
+struct nand_op_parser_addr_constraints {
+	unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+	unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
+ */
+struct nand_op_parser_pattern_elem {
+	enum nand_op_instr_type type;
+	bool optional;
+	union {
+		struct nand_op_parser_addr_constraints addr;
+		struct nand_op_parser_data_constraints data;
+	} ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt)			\
+	{							\
+		.type = NAND_OP_CMD_INSTR,			\
+		.optional = _opt,				\
+	}
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles)		\
+	{							\
+		.type = NAND_OP_ADDR_INSTR,			\
+		.optional = _opt,				\
+		.ctx.addr.maxcycles = _maxcycles,		\
+	}
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen)		\
+	{							\
+		.type = NAND_OP_DATA_IN_INSTR,			\
+		.optional = _opt,				\
+		.ctx.data.maxlen = _maxlen,			\
+	}
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen)		\
+	{							\
+		.type = NAND_OP_DATA_OUT_INSTR,			\
+		.optional = _opt,				\
+		.ctx.data.maxlen = _maxlen,			\
+	}
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt)			\
+	{							\
+		.type = NAND_OP_WAITRDY_INSTR,			\
+		.optional = _opt,				\
+	}
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+	const struct nand_op_parser_pattern_elem *elems;
+	unsigned int nelems;
+	int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...)							\
+	{											\
+		.exec = _exec,									\
+		.elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ },		\
+		.nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) /	\
+			  sizeof(struct nand_op_parser_pattern_elem),				\
+	}
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+	const struct nand_op_parser_pattern *patterns;
+	unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...)									\
+	{											\
+		.patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ },			\
+		.npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) /	\
+			     sizeof(struct nand_op_parser_pattern),				\
+	}
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+	const struct nand_op_instr *instrs;
+	unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_instrs)					\
+	{							\
+		.instrs = _instrs,				\
+		.ninstrs = ARRAY_SIZE(_instrs),			\
+	}
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+			   const struct nand_op_parser *parser,
+			   const struct nand_operation *op, bool check_only);
+
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @mtd:		MTD device registered to the MTD framework
+ * @IO_ADDR_R:		[BOARDSPECIFIC] address to read the 8 I/O lines of the
+ *			flash device
+ * @IO_ADDR_W:		[BOARDSPECIFIC] address to write the 8 I/O lines of the
+ *			flash device.
+ * @read_byte:		[REPLACEABLE] read one byte from the chip
+ * @read_word:		[REPLACEABLE] read one word from the chip
+ * @write_byte:		[REPLACEABLE] write a single byte to the chip on the
+ *			low 8 I/O lines
+ * @write_buf:		[REPLACEABLE] write data from the buffer to the chip
+ * @read_buf:		[REPLACEABLE] read data from the chip into the buffer
+ * @select_chip:	[REPLACEABLE] select chip nr
+ * @block_bad:		[REPLACEABLE] check if a block is bad, using OOB markers
+ * @block_markbad:	[REPLACEABLE] mark a block bad
+ * @cmd_ctrl:		[BOARDSPECIFIC] hardwarespecific function for controlling
+ *			ALE/CLE/nCE. Also used to write command and address
+ * @dev_ready:		[BOARDSPECIFIC] hardwarespecific function for accessing
+ *			device ready/busy line. If set to NULL no access to
+ *			ready/busy is available and the ready/busy information
+ *			is read from the chip status register.
+ * @cmdfunc:		[REPLACEABLE] hardwarespecific function for writing
+ *			commands to the chip.
+ * @waitfunc:		[REPLACEABLE] hardwarespecific function for wait on
+ *			ready.
+ * @exec_op:		controller specific method to execute NAND operations.
+ *			This method replaces ->cmdfunc(),
+ *			->{read,write}_{buf,byte,word}(), ->dev_ready() and
+ *			->waifunc().
+ * @setup_read_retry:	[FLASHSPECIFIC] flash (vendor) specific function for
+ *			setting the read-retry mode. Mostly needed for MLC NAND.
+ * @ecc:		[BOARDSPECIFIC] ECC control structure
+ * @buf_align:		minimum buffer alignment required by a platform
+ * @dummy_controller:	dummy controller implementation for drivers that can
+ *			only control a single chip
+ * @erase:		[REPLACEABLE] erase function
+ * @chip_delay:		[BOARDSPECIFIC] chip dependent delay for transferring
+ *			data from array to read regs (tR).
+ * @state:		[INTERN] the current state of the NAND device
+ * @oob_poi:		"poison value buffer," used for laying out OOB data
+ *			before writing
+ * @page_shift:		[INTERN] number of address bits in a page (column
+ *			address bits).
+ * @phys_erase_shift:	[INTERN] number of address bits in a physical eraseblock
+ * @bbt_erase_shift:	[INTERN] number of address bits in a bbt entry
+ * @chip_shift:		[INTERN] number of address bits in one chip
+ * @options:		[BOARDSPECIFIC] various chip options. They can partly
+ *			be set to inform nand_scan about special functionality.
+ *			See the defines for further explanation.
+ * @bbt_options:	[INTERN] bad block specific options. All options used
+ *			here must come from bbm.h. By default, these options
+ *			will be copied to the appropriate nand_bbt_descr's.
+ * @badblockpos:	[INTERN] position of the bad block marker in the oob
+ *			area.
+ * @badblockbits:	[INTERN] minimum number of set bits in a good block's
+ *			bad block marker position; i.e., BBM == 11110111b is
+ *			not bad when badblockbits == 7
+ * @bits_per_cell:	[INTERN] number of bits per cell. i.e., 1 means SLC.
+ * @ecc_strength_ds:	[INTERN] ECC correctability from the datasheet.
+ *			Minimum amount of bit errors per @ecc_step_ds guaranteed
+ *			to be correctable. If unknown, set to zero.
+ * @ecc_step_ds:	[INTERN] ECC step required by the @ecc_strength_ds,
+ *			also from the datasheet. It is the recommended ECC step
+ *			size, if known; if unknown, set to zero.
+ * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
+ *			      set to the actually used ONFI mode if the chip is
+ *			      ONFI compliant or deduced from the datasheet if
+ *			      the NAND chip is not ONFI compliant.
+ * @numchips:		[INTERN] number of physical chips
+ * @chipsize:		[INTERN] the size of one chip for multichip arrays
+ * @pagemask:		[INTERN] page number mask = number of (pages / chip) - 1
+ * @data_buf:		[INTERN] buffer for data, size is (page size + oobsize).
+ * @pagebuf:		[INTERN] holds the pagenumber which is currently in
+ *			data_buf.
+ * @pagebuf_bitflips:	[INTERN] holds the bitflip count for the page which is
+ *			currently in data_buf.
+ * @subpagesize:	[INTERN] holds the subpagesize
+ * @id:			[INTERN] holds NAND ID
+ * @parameters:		[INTERN] holds generic parameters under an easily
+ *			readable form.
+ * @max_bb_per_die:	[INTERN] the max number of bad blocks each die of a
+ *			this nand device will encounter their life times.
+ * @blocks_per_die:	[INTERN] The number of PEBs in a die
+ * @data_interface:	[INTERN] NAND interface timing information
+ * @read_retries:	[INTERN] the number of read retry modes supported
+ * @set_features:	[REPLACEABLE] set the NAND chip features
+ * @get_features:	[REPLACEABLE] get the NAND chip features
+ * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
+ *			  chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ *			  means the configuration should not be applied but
+ *			  only checked.
+ * @bbt:		[INTERN] bad block table pointer
+ * @bbt_td:		[REPLACEABLE] bad block table descriptor for flash
+ *			lookup.
+ * @bbt_md:		[REPLACEABLE] bad block table mirror descriptor
+ * @badblock_pattern:	[REPLACEABLE] bad block scan pattern used for initial
+ *			bad block scan.
+ * @controller:		[REPLACEABLE] a pointer to a hardware controller
+ *			structure which is shared among multiple independent
+ *			devices.
+ * @priv:		[OPTIONAL] pointer to private chip data
+ * @manufacturer:	[INTERN] Contains manufacturer information
+ * @manufacturer.desc:	[INTERN] Contains manufacturer's description
+ * @manufacturer.priv:	[INTERN] Contains manufacturer private information
+ */
+
+struct nand_chip {
+	struct mtd_info mtd;
+	void __iomem *IO_ADDR_R;
+	void __iomem *IO_ADDR_W;
+
+	uint8_t (*read_byte)(struct mtd_info *mtd);
+	u16 (*read_word)(struct mtd_info *mtd);
+	void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
+	void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
+	void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+	void (*select_chip)(struct mtd_info *mtd, int chip);
+	int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
+	int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+	void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
+	int (*dev_ready)(struct mtd_info *mtd);
+	void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
+			int page_addr);
+	int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+	int (*exec_op)(struct nand_chip *chip,
+		       const struct nand_operation *op,
+		       bool check_only);
+	int (*erase)(struct mtd_info *mtd, int page);
+	int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
+			    int feature_addr, uint8_t *subfeature_para);
+	int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
+			    int feature_addr, uint8_t *subfeature_para);
+	int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
+	int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
+				    const struct nand_data_interface *conf);
+
+	int chip_delay;
+	unsigned int options;
+	unsigned int bbt_options;
+
+	int page_shift;
+	int phys_erase_shift;
+	int bbt_erase_shift;
+	int chip_shift;
+	int numchips;
+	uint64_t chipsize;
+	int pagemask;
+	u8 *data_buf;
+	int pagebuf;
+	unsigned int pagebuf_bitflips;
+	int subpagesize;
+	uint8_t bits_per_cell;
+	uint16_t ecc_strength_ds;
+	uint16_t ecc_step_ds;
+	int onfi_timing_mode_default;
+	int badblockpos;
+	int badblockbits;
+
+	struct nand_id id;
+	struct nand_parameters parameters;
+	u16 max_bb_per_die;
+	u32 blocks_per_die;
+
+	struct nand_data_interface data_interface;
+
+	int read_retries;
+
+	flstate_t state;
+
+	uint8_t *oob_poi;
+	struct nand_controller *controller;
+
+	struct nand_ecc_ctrl ecc;
+	unsigned long buf_align;
+	struct nand_controller dummy_controller;
+
+	uint8_t *bbt;
+	struct nand_bbt_descr *bbt_td;
+	struct nand_bbt_descr *bbt_md;
+
+	struct nand_bbt_descr *badblock_pattern;
+
+	void *priv;
+
+	struct {
+		const struct nand_manufacturer *desc;
+		void *priv;
+	} manufacturer;
+};
+
+static inline int nand_exec_op(struct nand_chip *chip,
+			       const struct nand_operation *op)
+{
+	if (!chip->exec_op)
+		return -ENOTSUPP;
+
+	return chip->exec_op(chip, op, false);
+}
+
+extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
+extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
+
+static inline void nand_set_flash_node(struct nand_chip *chip,
+				       struct device_node *np)
+{
+	mtd_set_of_node(&chip->mtd, np);
+}
+
+static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
+{
+	return mtd_get_of_node(&chip->mtd);
+}
+
+static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
+{
+	return container_of(mtd, struct nand_chip, mtd);
+}
+
+static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
+{
+	return &chip->mtd;
+}
+
+static inline void *nand_get_controller_data(struct nand_chip *chip)
+{
+	return chip->priv;
+}
+
+static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
+{
+	chip->priv = priv;
+}
+
+static inline void nand_set_manufacturer_data(struct nand_chip *chip,
+					      void *priv)
+{
+	chip->manufacturer.priv = priv;
+}
+
+static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
+{
+	return chip->manufacturer.priv;
+}
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_TOSHIBA	0x98
+#define NAND_MFR_ESMT		0xc8
+#define NAND_MFR_SAMSUNG	0xec
+#define NAND_MFR_FUJITSU	0x04
+#define NAND_MFR_NATIONAL	0x8f
+#define NAND_MFR_RENESAS	0x07
+#define NAND_MFR_STMICRO	0x20
+#define NAND_MFR_HYNIX		0xad
+#define NAND_MFR_MICRON		0x2c
+#define NAND_MFR_AMD		0x01
+#define NAND_MFR_MACRONIX	0xc2
+#define NAND_MFR_EON		0x92
+#define NAND_MFR_SANDISK	0x45
+#define NAND_MFR_INTEL		0x89
+#define NAND_MFR_ATO		0x9b
+#define NAND_MFR_WINBOND	0xef
+
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts)          \
+	{ .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+	  .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts)                      \
+	{ .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+	  .options = (opts) }
+
+#define NAND_ECC_INFO(_strength, _step)	\
+			{ .strength_ds = (_strength), .step_ds = (_step) }
+#define NAND_ECC_STRENGTH(type)		((type)->ecc.strength_ds)
+#define NAND_ECC_STEP(type)		((type)->ecc.step_ds)
+
+/**
+ * struct nand_flash_dev - NAND Flash Device ID Structure
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ *          memory address as @id[0])
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ *          address as @id[1])
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ *            well as the eraseblock size) is determined from the extended NAND
+ *            chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
+ * @ecc: ECC correctability and step information from the datasheet.
+ * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
+ *                   @ecc_strength_ds in nand_chip{}.
+ * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
+ *               @ecc_step_ds in nand_chip{}, also from the datasheet.
+ *               For example, the "4bit ECC for each 512Byte" can be set with
+ *               NAND_ECC_INFO(4, 512).
+ * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
+ *			      reset. Should be deduced from timings described
+ *			      in the datasheet.
+ *
+ */
+struct nand_flash_dev {
+	char *name;
+	union {
+		struct {
+			uint8_t mfr_id;
+			uint8_t dev_id;
+		};
+		uint8_t id[NAND_MAX_ID_LEN];
+	};
+	unsigned int pagesize;
+	unsigned int chipsize;
+	unsigned int erasesize;
+	unsigned int options;
+	uint16_t id_len;
+	uint16_t oobsize;
+	struct {
+		uint16_t strength_ds;
+		uint16_t step_ds;
+	} ecc;
+	int onfi_timing_mode_default;
+};
+
+/**
+ * struct nand_manufacturer - NAND Flash Manufacturer structure
+ * @name:	Manufacturer name
+ * @id:		manufacturer ID code of device.
+ * @ops:	manufacturer operations
+*/
+struct nand_manufacturer {
+	int id;
+	char *name;
+	const struct nand_manufacturer_ops *ops;
+};
+
+const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+
+static inline const char *
+nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
+{
+	return manufacturer ? manufacturer->name : "Unknown";
+}
+
+extern struct nand_flash_dev nand_flash_ids[];
+
+extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
+extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
+extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
+extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
+
+int nand_create_bbt(struct nand_chip *chip);
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+		    int allowbbt);
+
+/**
+ * struct platform_nand_chip - chip level device structure
+ * @nr_chips:		max. number of chips to scan for
+ * @chip_offset:	chip number offset
+ * @nr_partitions:	number of partitions pointed to by partitions (or zero)
+ * @partitions:		mtd partition list
+ * @chip_delay:		R/B delay value in us
+ * @options:		Option flags, e.g. 16bit buswidth
+ * @bbt_options:	BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @part_probe_types:	NULL-terminated array of probe types
+ */
+struct platform_nand_chip {
+	int nr_chips;
+	int chip_offset;
+	int nr_partitions;
+	struct mtd_partition *partitions;
+	int chip_delay;
+	unsigned int options;
+	unsigned int bbt_options;
+	const char **part_probe_types;
+};
+
+/* Keep gcc happy */
+struct platform_device;
+
+/**
+ * struct platform_nand_ctrl - controller level device structure
+ * @probe:		platform specific function to probe/setup hardware
+ * @remove:		platform specific function to remove/teardown hardware
+ * @dev_ready:		platform specific function to read ready/busy pin
+ * @select_chip:	platform specific chip select function
+ * @cmd_ctrl:		platform specific function for controlling
+ *			ALE/CLE/nCE. Also used to write command and address
+ * @write_buf:		platform specific function for write buffer
+ * @read_buf:		platform specific function for read buffer
+ * @priv:		private data to transport driver specific settings
+ *
+ * All fields are optional and depend on the hardware driver requirements
+ */
+struct platform_nand_ctrl {
+	int (*probe)(struct platform_device *pdev);
+	void (*remove)(struct platform_device *pdev);
+	int (*dev_ready)(struct mtd_info *mtd);
+	void (*select_chip)(struct mtd_info *mtd, int chip);
+	void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
+	void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
+	void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+	void *priv;
+};
+
+/**
+ * struct platform_nand_data - container structure for platform-specific data
+ * @chip:		chip level chip structure
+ * @ctrl:		controller level device structure
+ */
+struct platform_nand_data {
+	struct platform_nand_chip chip;
+	struct platform_nand_ctrl ctrl;
+};
+
+/* return the supported asynchronous timing mode. */
+static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
+{
+	if (!chip->parameters.onfi)
+		return ONFI_TIMING_MODE_UNKNOWN;
+
+	return chip->parameters.onfi->async_timing_mode;
+}
+
+int onfi_fill_data_interface(struct nand_chip *chip,
+			     enum nand_data_interface_type type,
+			     int timing_mode);
+
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+	WARN(chip->bits_per_cell == 0,
+	     "chip->bits_per_cell is used uninitialized\n");
+	return chip->bits_per_cell == 1;
+}
+
+/**
+ * Check if the opcode's address should be sent only on the lower 8 bits
+ * @command: opcode to check
+ */
+static inline int nand_opcode_8bits(unsigned int command)
+{
+	switch (command) {
+	case NAND_CMD_READID:
+	case NAND_CMD_PARAM:
+	case NAND_CMD_GET_FEATURES:
+	case NAND_CMD_SET_FEATURES:
+		return 1;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/* get timing characteristics from ONFI timing mode. */
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+				void *ecc, int ecclen,
+				void *extraoob, int extraooblen,
+				int threshold);
+
+int nand_ecc_choose_conf(struct nand_chip *chip,
+			 const struct nand_ecc_caps *caps, int oobavail);
+
+/* Default write_oob implementation */
+int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+
+/* Default write_oob syndrome implementation */
+int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page);
+
+/* Default read_oob implementation */
+int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+
+/* Default read_oob syndrome implementation */
+int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			   int page);
+
+/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+/* Stub used by drivers that do not support GET/SET FEATURES operations */
+int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+				  int addr, u8 *subfeature_param);
+
+/* Default read_page_raw implementation */
+int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+		       uint8_t *buf, int oob_required, int page);
+int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+			       u8 *buf, int oob_required, int page);
+
+/* Default write_page_raw implementation */
+int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+			const uint8_t *buf, int oob_required, int page);
+int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+				const u8 *buf, int oob_required, int page);
+
+/* Reset and initialize a NAND device */
+int nand_reset(struct nand_chip *chip, int chipnr);
+
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+		   unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+		      unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+			       unsigned int offset_in_page, void *buf,
+			       unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+		     unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+			    unsigned int offset_in_page, const void *buf,
+			    unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+		      unsigned int offset_in_page, const void *buf,
+		      unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+				unsigned int offset_in_page, const void *buf,
+				unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+		      bool force_8bit);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+		       unsigned int len, bool force_8bit);
+
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
+void nand_cleanup(struct nand_chip *chip);
+/* Unregister the MTD device and calls nand_cleanup() */
+void nand_release(struct mtd_info *mtd);
+
+/* Default extended ID decoding function */
+void nand_decode_ext_id(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+
+#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
new file mode 100644
index 0000000..c759d40
--- /dev/null
+++ b/include/linux/mtd/sh_flctl.h
@@ -0,0 +1,192 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright © 2008 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __SH_FLCTL_H__
+#define __SH_FLCTL_H__
+
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/pm_qos.h>
+
+/* FLCTL registers */
+#define FLCMNCR(f)		(f->reg + 0x0)
+#define FLCMDCR(f)		(f->reg + 0x4)
+#define FLCMCDR(f)		(f->reg + 0x8)
+#define FLADR(f)		(f->reg + 0xC)
+#define FLADR2(f)		(f->reg + 0x3C)
+#define FLDATAR(f)		(f->reg + 0x10)
+#define FLDTCNTR(f)		(f->reg + 0x14)
+#define FLINTDMACR(f)		(f->reg + 0x18)
+#define FLBSYTMR(f)		(f->reg + 0x1C)
+#define FLBSYCNT(f)		(f->reg + 0x20)
+#define FLDTFIFO(f)		(f->reg + 0x24)
+#define FLECFIFO(f)		(f->reg + 0x28)
+#define FLTRCR(f)		(f->reg + 0x2C)
+#define FLHOLDCR(f)		(f->reg + 0x38)
+#define	FL4ECCRESULT0(f)	(f->reg + 0x80)
+#define	FL4ECCRESULT1(f)	(f->reg + 0x84)
+#define	FL4ECCRESULT2(f)	(f->reg + 0x88)
+#define	FL4ECCRESULT3(f)	(f->reg + 0x8C)
+#define	FL4ECCCR(f)		(f->reg + 0x90)
+#define	FL4ECCCNT(f)		(f->reg + 0x94)
+#define	FLERRADR(f)		(f->reg + 0x98)
+
+/* FLCMNCR control bits */
+#define _4ECCCNTEN	(0x1 << 24)
+#define _4ECCEN		(0x1 << 23)
+#define _4ECCCORRECT	(0x1 << 22)
+#define SHBUSSEL	(0x1 << 20)
+#define SEL_16BIT	(0x1 << 19)
+#define SNAND_E		(0x1 << 18)	/* SNAND (0=512 1=2048)*/
+#define QTSEL_E		(0x1 << 17)
+#define ENDIAN		(0x1 << 16)	/* 1 = little endian */
+#define FCKSEL_E	(0x1 << 15)
+#define ACM_SACCES_MODE	(0x01 << 10)
+#define NANWF_E		(0x1 << 9)
+#define SE_D		(0x1 << 8)	/* Spare area disable */
+#define	CE1_ENABLE	(0x1 << 4)	/* Chip Enable 1 */
+#define	CE0_ENABLE	(0x1 << 3)	/* Chip Enable 0 */
+#define	TYPESEL_SET	(0x1 << 0)
+
+/*
+ * Clock settings using the PULSEx registers from FLCMNCR
+ *
+ * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E
+ * to control the clock divider used between the High-Speed Peripheral Clock
+ * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit
+ * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16
+ * bit version the divider is seperate for the pulse width of high and low
+ * signals.
+ */
+#define PULSE3	(0x1 << 27)
+#define PULSE2	(0x1 << 17)
+#define PULSE1	(0x1 << 15)
+#define PULSE0	(0x1 << 9)
+#define CLK_8B_0_5			PULSE1
+#define CLK_8B_1			0x0
+#define CLK_8B_1_5			(PULSE1 | PULSE2)
+#define CLK_8B_2			PULSE0
+#define CLK_8B_3			(PULSE0 | PULSE1 | PULSE2)
+#define CLK_8B_4			(PULSE0 | PULSE2)
+#define CLK_16B_6L_2H			PULSE0
+#define CLK_16B_9L_3H			(PULSE0 | PULSE1 | PULSE2)
+#define CLK_16B_12L_4H			(PULSE0 | PULSE2)
+
+/* FLCMDCR control bits */
+#define ADRCNT2_E	(0x1 << 31)	/* 5byte address enable */
+#define ADRMD_E		(0x1 << 26)	/* Sector address access */
+#define CDSRC_E		(0x1 << 25)	/* Data buffer selection */
+#define DOSR_E		(0x1 << 24)	/* Status read check */
+#define SELRW		(0x1 << 21)	/*  0:read 1:write */
+#define DOADR_E		(0x1 << 20)	/* Address stage execute */
+#define ADRCNT_1	(0x00 << 18)	/* Address data bytes: 1byte */
+#define ADRCNT_2	(0x01 << 18)	/* Address data bytes: 2byte */
+#define ADRCNT_3	(0x02 << 18)	/* Address data bytes: 3byte */
+#define ADRCNT_4	(0x03 << 18)	/* Address data bytes: 4byte */
+#define DOCMD2_E	(0x1 << 17)	/* 2nd cmd stage execute */
+#define DOCMD1_E	(0x1 << 16)	/* 1st cmd stage execute */
+
+/* FLINTDMACR control bits */
+#define ESTERINTE	(0x1 << 24)	/* ECC error interrupt enable */
+#define AC1CLR		(0x1 << 19)	/* ECC FIFO clear */
+#define AC0CLR		(0x1 << 18)	/* Data FIFO clear */
+#define DREQ0EN		(0x1 << 16)	/* FLDTFIFODMA Request Enable */
+#define ECERB		(0x1 << 9)	/* ECC error */
+#define STERB		(0x1 << 8)	/* Status error */
+#define STERINTE	(0x1 << 4)	/* Status error enable */
+
+/* FLTRCR control bits */
+#define TRSTRT		(0x1 << 0)	/* translation start */
+#define TREND		(0x1 << 1)	/* translation end */
+
+/*
+ * FLHOLDCR control bits
+ *
+ * HOLDEN: Bus Occupancy Enable (inverted)
+ * Enable this bit when the external bus might be used in between transfers.
+ * If not set and the bus gets used by other modules, a deadlock occurs.
+ */
+#define HOLDEN		(0x1 << 0)
+
+/* FL4ECCCR control bits */
+#define	_4ECCFA		(0x1 << 2)	/* 4 symbols correct fault */
+#define	_4ECCEND	(0x1 << 1)	/* 4 symbols end */
+#define	_4ECCEXST	(0x1 << 0)	/* 4 symbols exist */
+
+#define LOOP_TIMEOUT_MAX	0x00010000
+
+enum flctl_ecc_res_t {
+	FL_SUCCESS,
+	FL_REPAIRABLE,
+	FL_ERROR,
+	FL_TIMEOUT
+};
+
+struct dma_chan;
+
+struct sh_flctl {
+	struct nand_chip	chip;
+	struct platform_device	*pdev;
+	struct dev_pm_qos_request pm_qos;
+	void __iomem		*reg;
+	resource_size_t		fifo;
+
+	uint8_t	done_buff[2048 + 64];	/* max size 2048 + 64 */
+	int	read_bytes;
+	unsigned int index;
+	int	seqin_column;		/* column in SEQIN cmd */
+	int	seqin_page_addr;	/* page_addr in SEQIN cmd */
+	uint32_t seqin_read_cmd;		/* read cmd in SEQIN cmd */
+	int	erase1_page_addr;	/* page_addr in ERASE1 cmd */
+	uint32_t erase_ADRCNT;		/* bits of FLCMDCR in ERASE1 cmd */
+	uint32_t rw_ADRCNT;	/* bits of FLCMDCR in READ WRITE cmd */
+	uint32_t flcmncr_base;	/* base value of FLCMNCR */
+	uint32_t flintdmacr_base;	/* irq enable bits */
+
+	unsigned page_size:1;	/* NAND page size (0 = 512, 1 = 2048) */
+	unsigned hwecc:1;	/* Hardware ECC (0 = disabled, 1 = enabled) */
+	unsigned holden:1;	/* Hardware has FLHOLDCR and HOLDEN is set */
+	unsigned qos_request:1;	/* QoS request to prevent deep power shutdown */
+
+	/* DMA related objects */
+	struct dma_chan		*chan_fifo0_rx;
+	struct dma_chan		*chan_fifo0_tx;
+	struct completion	dma_complete;
+};
+
+struct sh_flctl_platform_data {
+	struct mtd_partition	*parts;
+	int			nr_parts;
+	unsigned long		flcmncr_val;
+
+	unsigned has_hwecc:1;
+	unsigned use_holden:1;
+
+	unsigned int            slave_id_fifo0_tx;
+	unsigned int            slave_id_fifo0_rx;
+};
+
+static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
+{
+	return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip);
+}
+
+#endif	/* __SH_FLCTL_H__ */
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
new file mode 100644
index 0000000..e1845fc
--- /dev/null
+++ b/include/linux/mtd/sharpsl.h
@@ -0,0 +1,21 @@
+/*
+ * SharpSL NAND support
+ *
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+struct sharpsl_nand_platform_data {
+	struct nand_bbt_descr	*badblock_pattern;
+	const struct mtd_ooblayout_ops *ecc_layout;
+	struct mtd_partition	*partitions;
+	unsigned int		nr_partitions;
+	const char *const	*part_parsers;
+};
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
new file mode 100644
index 0000000..581603a
--- /dev/null
+++ b/include/linux/mtd/spear_smi.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2010 ST Microelectronics
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MTD_SPEAR_SMI_H
+#define __MTD_SPEAR_SMI_H
+
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+/* max possible slots for serial-nor flash chip in the SMI controller */
+#define MAX_NUM_FLASH_CHIP	4
+
+/* macro to define partitions for flash devices */
+#define DEFINE_PARTS(n, of, s)		\
+{					\
+	.name = n,			\
+	.offset = of,			\
+	.size = s,			\
+}
+
+/**
+ * struct spear_smi_flash_info - platform structure for passing flash
+ * information
+ *
+ * name: name of the serial nor flash for identification
+ * mem_base: the memory base on which the flash is mapped
+ * size: size of the flash in bytes
+ * partitions: parition details
+ * nr_partitions: number of partitions
+ * fast_mode: whether flash supports fast mode
+ */
+
+struct spear_smi_flash_info {
+	char *name;
+	unsigned long mem_base;
+	unsigned long size;
+	struct mtd_partition *partitions;
+	int nr_partitions;
+	u8 fast_mode;
+};
+
+/**
+ * struct spear_smi_plat_data - platform structure for configuring smi
+ *
+ * clk_rate: clk rate at which SMI must operate
+ * num_flashes: number of flashes present on board
+ * board_flash_info: specific details of each flash present on board
+ */
+struct spear_smi_plat_data {
+	unsigned long clk_rate;
+	int num_flashes;
+	struct spear_smi_flash_info *board_flash_info;
+	struct device_node *np[MAX_NUM_FLASH_CHIP];
+};
+
+#endif /* __MTD_SPEAR_SMI_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
new file mode 100644
index 0000000..c922e97
--- /dev/null
+++ b/include/linux/mtd/spi-nor.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_H
+#define __LINUX_MTD_SPI_NOR_H
+
+#include <linux/bitops.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+
+/*
+ * Manufacturer IDs
+ *
+ * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
+ * Sometimes these are the same as CFI IDs, but sometimes they aren't.
+ */
+#define SNOR_MFR_ATMEL		CFI_MFR_ATMEL
+#define SNOR_MFR_GIGADEVICE	0xc8
+#define SNOR_MFR_INTEL		CFI_MFR_INTEL
+#define SNOR_MFR_MICRON		CFI_MFR_ST /* ST Micro <--> Micron */
+#define SNOR_MFR_MACRONIX	CFI_MFR_MACRONIX
+#define SNOR_MFR_SPANSION	CFI_MFR_AMD
+#define SNOR_MFR_SST		CFI_MFR_SST
+#define SNOR_MFR_WINBOND	0xef /* Also used by some Spansion */
+
+/*
+ * Note on opcode nomenclature: some opcodes have a format like
+ * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
+ * of I/O lines used for the opcode, address, and data (respectively). The
+ * FUNCTION has an optional suffix of '4', to represent an opcode which
+ * requires a 4-byte (32-bit) address.
+ */
+
+/* Flash opcodes. */
+#define SPINOR_OP_WREN		0x06	/* Write enable */
+#define SPINOR_OP_RDSR		0x05	/* Read status register */
+#define SPINOR_OP_WRSR		0x01	/* Write status register 1 byte */
+#define SPINOR_OP_RDSR2		0x3f	/* Read status register 2 */
+#define SPINOR_OP_WRSR2		0x3e	/* Write status register 2 */
+#define SPINOR_OP_READ		0x03	/* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST	0x0b	/* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2	0x3b	/* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2	0xbb	/* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4	0x6b	/* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4	0xeb	/* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_PP		0x02	/* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4	0x32	/* Quad page program */
+#define SPINOR_OP_PP_1_4_4	0x38	/* Quad page program */
+#define SPINOR_OP_BE_4K		0x20	/* Erase 4KiB block */
+#define SPINOR_OP_BE_4K_PMC	0xd7	/* Erase 4KiB block on PMC chips */
+#define SPINOR_OP_BE_32K	0x52	/* Erase 32KiB block */
+#define SPINOR_OP_CHIP_ERASE	0xc7	/* Erase whole flash chip */
+#define SPINOR_OP_SE		0xd8	/* Sector erase (usually 64KiB) */
+#define SPINOR_OP_RDID		0x9f	/* Read JEDEC ID */
+#define SPINOR_OP_RDSFDP	0x5a	/* Read SFDP */
+#define SPINOR_OP_RDCR		0x35	/* Read configuration register */
+#define SPINOR_OP_RDFSR		0x70	/* Read flag status register */
+#define SPINOR_OP_CLFSR		0x50	/* Clear flag status register */
+#define SPINOR_OP_RDEAR		0xc8	/* Read Extended Address Register */
+#define SPINOR_OP_WREAR		0xc5	/* Write Extended Address Register */
+
+/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+#define SPINOR_OP_READ_4B	0x13	/* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST_4B	0x0c	/* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2_4B	0x3c	/* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2_4B	0xbc	/* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4_4B	0x6c	/* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4_4B	0xec	/* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_PP_4B		0x12	/* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4_4B	0x34	/* Quad page program */
+#define SPINOR_OP_PP_1_4_4_4B	0x3e	/* Quad page program */
+#define SPINOR_OP_BE_4K_4B	0x21	/* Erase 4KiB block */
+#define SPINOR_OP_BE_32K_4B	0x5c	/* Erase 32KiB block */
+#define SPINOR_OP_SE_4B		0xdc	/* Sector erase (usually 64KiB) */
+
+/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */
+#define SPINOR_OP_READ_1_1_1_DTR	0x0d
+#define SPINOR_OP_READ_1_2_2_DTR	0xbd
+#define SPINOR_OP_READ_1_4_4_DTR	0xed
+
+#define SPINOR_OP_READ_1_1_1_DTR_4B	0x0e
+#define SPINOR_OP_READ_1_2_2_DTR_4B	0xbe
+#define SPINOR_OP_READ_1_4_4_DTR_4B	0xee
+
+/* Used for SST flashes only. */
+#define SPINOR_OP_BP		0x02	/* Byte program */
+#define SPINOR_OP_WRDI		0x04	/* Write disable */
+#define SPINOR_OP_AAI_WP	0xad	/* Auto address increment word program */
+
+/* Used for S3AN flashes only */
+#define SPINOR_OP_XSE		0x50	/* Sector erase */
+#define SPINOR_OP_XPP		0x82	/* Page program */
+#define SPINOR_OP_XRDSR		0xd7	/* Read status register */
+
+#define XSR_PAGESIZE		BIT(0)	/* Page size in Po2 or Linear */
+#define XSR_RDY			BIT(7)	/* Ready */
+
+
+/* Used for Macronix and Winbond flashes. */
+#define SPINOR_OP_EN4B		0xb7	/* Enter 4-byte mode */
+#define SPINOR_OP_EX4B		0xe9	/* Exit 4-byte mode */
+
+/* Used for Spansion flashes only. */
+#define SPINOR_OP_BRWR		0x17	/* Bank register write */
+#define SPINOR_OP_CLSR		0x30	/* Clear status register 1 */
+
+/* Used for Micron flashes only. */
+#define SPINOR_OP_RD_EVCR      0x65    /* Read EVCR register */
+#define SPINOR_OP_WD_EVCR      0x61    /* Write EVCR register */
+
+/* Status Register bits. */
+#define SR_WIP			BIT(0)	/* Write in progress */
+#define SR_WEL			BIT(1)	/* Write enable latch */
+/* meaning of other SR_* bits may differ between vendors */
+#define SR_BP0			BIT(2)	/* Block protect 0 */
+#define SR_BP1			BIT(3)	/* Block protect 1 */
+#define SR_BP2			BIT(4)	/* Block protect 2 */
+#define SR_TB			BIT(5)	/* Top/Bottom protect */
+#define SR_SRWD			BIT(7)	/* SR write protect */
+/* Spansion/Cypress specific status bits */
+#define SR_E_ERR		BIT(5)
+#define SR_P_ERR		BIT(6)
+
+#define SR_QUAD_EN_MX		BIT(6)	/* Macronix Quad I/O */
+
+/* Enhanced Volatile Configuration Register bits */
+#define EVCR_QUAD_EN_MICRON	BIT(7)	/* Micron Quad I/O */
+
+/* Flag Status Register bits */
+#define FSR_READY		BIT(7)	/* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR		BIT(5)	/* Erase operation status */
+#define FSR_P_ERR		BIT(4)	/* Program operation status */
+#define FSR_PT_ERR		BIT(1)	/* Protection error bit */
+
+/* Configuration Register bits. */
+#define CR_QUAD_EN_SPAN		BIT(1)	/* Spansion Quad I/O */
+
+/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT7	BIT(7)
+
+/* Supported SPI protocols */
+#define SNOR_PROTO_INST_MASK	GENMASK(23, 16)
+#define SNOR_PROTO_INST_SHIFT	16
+#define SNOR_PROTO_INST(_nbits)	\
+	((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \
+	 SNOR_PROTO_INST_MASK)
+
+#define SNOR_PROTO_ADDR_MASK	GENMASK(15, 8)
+#define SNOR_PROTO_ADDR_SHIFT	8
+#define SNOR_PROTO_ADDR(_nbits)	\
+	((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \
+	 SNOR_PROTO_ADDR_MASK)
+
+#define SNOR_PROTO_DATA_MASK	GENMASK(7, 0)
+#define SNOR_PROTO_DATA_SHIFT	0
+#define SNOR_PROTO_DATA(_nbits)	\
+	((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \
+	 SNOR_PROTO_DATA_MASK)
+
+#define SNOR_PROTO_IS_DTR	BIT(24)	/* Double Transfer Rate */
+
+#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)	\
+	(SNOR_PROTO_INST(_inst_nbits) |				\
+	 SNOR_PROTO_ADDR(_addr_nbits) |				\
+	 SNOR_PROTO_DATA(_data_nbits))
+#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits)	\
+	(SNOR_PROTO_IS_DTR |					\
+	 SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits))
+
+enum spi_nor_protocol {
+	SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1),
+	SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2),
+	SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4),
+	SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8),
+	SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2),
+	SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4),
+	SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8),
+	SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2),
+	SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4),
+	SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8),
+
+	SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1),
+	SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
+	SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
+	SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
+};
+
+static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
+{
+	return !!(proto & SNOR_PROTO_IS_DTR);
+}
+
+static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto)
+{
+	return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >>
+		SNOR_PROTO_INST_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto)
+{
+	return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >>
+		SNOR_PROTO_ADDR_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto)
+{
+	return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >>
+		SNOR_PROTO_DATA_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
+{
+	return spi_nor_get_protocol_data_nbits(proto);
+}
+
+#define SPI_NOR_MAX_CMD_SIZE	8
+enum spi_nor_ops {
+	SPI_NOR_OPS_READ = 0,
+	SPI_NOR_OPS_WRITE,
+	SPI_NOR_OPS_ERASE,
+	SPI_NOR_OPS_LOCK,
+	SPI_NOR_OPS_UNLOCK,
+};
+
+enum spi_nor_option_flags {
+	SNOR_F_USE_FSR		= BIT(0),
+	SNOR_F_HAS_SR_TB	= BIT(1),
+	SNOR_F_NO_OP_CHIP_ERASE	= BIT(2),
+	SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
+	SNOR_F_READY_XSR_RDY	= BIT(4),
+	SNOR_F_USE_CLSR		= BIT(5),
+	SNOR_F_BROKEN_RESET	= BIT(6),
+};
+
+/**
+ * struct flash_info - Forward declaration of a structure used internally by
+ *		       spi_nor_scan()
+ */
+struct flash_info;
+
+/**
+ * struct spi_nor - Structure for defining a the SPI NOR layer
+ * @mtd:		point to a mtd_info structure
+ * @lock:		the lock for the read/write/erase/lock/unlock operations
+ * @dev:		point to a spi device, or a spi nor controller device.
+ * @info:		spi-nor part JDEC MFR id and other info
+ * @page_size:		the page size of the SPI NOR
+ * @addr_width:		number of address bytes
+ * @erase_opcode:	the opcode for erasing a sector
+ * @read_opcode:	the read opcode
+ * @read_dummy:		the dummy needed by the read operation
+ * @program_opcode:	the program opcode
+ * @sst_write_second:	used by the SST write operation
+ * @flags:		flag options for the current SPI-NOR (SNOR_F_*)
+ * @read_proto:		the SPI protocol for read operations
+ * @write_proto:	the SPI protocol for write operations
+ * @reg_proto		the SPI protocol for read_reg/write_reg/erase operations
+ * @cmd_buf:		used by the write_reg
+ * @prepare:		[OPTIONAL] do some preparations for the
+ *			read/write/erase/lock/unlock operations
+ * @unprepare:		[OPTIONAL] do some post work after the
+ *			read/write/erase/lock/unlock operations
+ * @read_reg:		[DRIVER-SPECIFIC] read out the register
+ * @write_reg:		[DRIVER-SPECIFIC] write data to the register
+ * @read:		[DRIVER-SPECIFIC] read data from the SPI NOR
+ * @write:		[DRIVER-SPECIFIC] write data to the SPI NOR
+ * @erase:		[DRIVER-SPECIFIC] erase a sector of the SPI NOR
+ *			at the offset @offs; if not provided by the driver,
+ *			spi-nor will send the erase opcode via write_reg()
+ * @flash_lock:		[FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @flash_unlock:	[FLASH-SPECIFIC] unlock a region of the SPI NOR
+ * @flash_is_locked:	[FLASH-SPECIFIC] check if a region of the SPI NOR is
+ * @quad_enable:	[FLASH-SPECIFIC] enables SPI NOR quad mode
+ *			completely locked
+ * @priv:		the private data
+ */
+struct spi_nor {
+	struct mtd_info		mtd;
+	struct mutex		lock;
+	struct device		*dev;
+	const struct flash_info	*info;
+	u32			page_size;
+	u8			addr_width;
+	u8			erase_opcode;
+	u8			read_opcode;
+	u8			read_dummy;
+	u8			program_opcode;
+	enum spi_nor_protocol	read_proto;
+	enum spi_nor_protocol	write_proto;
+	enum spi_nor_protocol	reg_proto;
+	bool			sst_write_second;
+	u32			flags;
+	u8			cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+
+	int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+	void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+	int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
+	int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
+
+	ssize_t (*read)(struct spi_nor *nor, loff_t from,
+			size_t len, u_char *read_buf);
+	ssize_t (*write)(struct spi_nor *nor, loff_t to,
+			size_t len, const u_char *write_buf);
+	int (*erase)(struct spi_nor *nor, loff_t offs);
+
+	int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*quad_enable)(struct spi_nor *nor);
+
+	void *priv;
+};
+
+static inline void spi_nor_set_flash_node(struct spi_nor *nor,
+					  struct device_node *np)
+{
+	mtd_set_of_node(&nor->mtd, np);
+}
+
+static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
+{
+	return mtd_get_of_node(&nor->mtd);
+}
+
+/**
+ * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
+ * supported by the SPI controller (bus master).
+ * @mask:		the bitmask listing all the supported hw capabilies
+ */
+struct spi_nor_hwcaps {
+	u32	mask;
+};
+
+/*
+ *(Fast) Read capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * As a matter of performances, it is relevant to use Octo SPI protocols first,
+ * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
+ * (Slow) Read.
+ */
+#define SNOR_HWCAPS_READ_MASK		GENMASK(14, 0)
+#define SNOR_HWCAPS_READ		BIT(0)
+#define SNOR_HWCAPS_READ_FAST		BIT(1)
+#define SNOR_HWCAPS_READ_1_1_1_DTR	BIT(2)
+
+#define SNOR_HWCAPS_READ_DUAL		GENMASK(6, 3)
+#define SNOR_HWCAPS_READ_1_1_2		BIT(3)
+#define SNOR_HWCAPS_READ_1_2_2		BIT(4)
+#define SNOR_HWCAPS_READ_2_2_2		BIT(5)
+#define SNOR_HWCAPS_READ_1_2_2_DTR	BIT(6)
+
+#define SNOR_HWCAPS_READ_QUAD		GENMASK(10, 7)
+#define SNOR_HWCAPS_READ_1_1_4		BIT(7)
+#define SNOR_HWCAPS_READ_1_4_4		BIT(8)
+#define SNOR_HWCAPS_READ_4_4_4		BIT(9)
+#define SNOR_HWCAPS_READ_1_4_4_DTR	BIT(10)
+
+#define SNOR_HWCPAS_READ_OCTO		GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_1_1_8		BIT(11)
+#define SNOR_HWCAPS_READ_1_8_8		BIT(12)
+#define SNOR_HWCAPS_READ_8_8_8		BIT(13)
+#define SNOR_HWCAPS_READ_1_8_8_DTR	BIT(14)
+
+/*
+ * Page Program capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the
+ * legacy SPI 1-1-1 protocol.
+ * Note that Dual Page Programs are not supported because there is no existing
+ * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
+ * implements such commands.
+ */
+#define SNOR_HWCAPS_PP_MASK	GENMASK(22, 16)
+#define SNOR_HWCAPS_PP		BIT(16)
+
+#define SNOR_HWCAPS_PP_QUAD	GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4	BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4	BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4	BIT(19)
+
+#define SNOR_HWCAPS_PP_OCTO	GENMASK(22, 20)
+#define SNOR_HWCAPS_PP_1_1_8	BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8	BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8	BIT(22)
+
+/**
+ * spi_nor_scan() - scan the SPI NOR
+ * @nor:	the spi_nor structure
+ * @name:	the chip type name
+ * @hwcaps:	the hardware capabilities supported by the controller driver
+ *
+ * The drivers can use this fuction to scan the SPI NOR.
+ * In the scanning, it will try to get all the necessary information to
+ * fill the mtd_info{} and the spi_nor{}.
+ *
+ * The chip type name can be provided through the @name parameter.
+ *
+ * Return: 0 for success, others for failure.
+ */
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+		 const struct spi_nor_hwcaps *hwcaps);
+
+/**
+ * spi_nor_restore_addr_mode() - restore the status of SPI NOR
+ * @nor:	the spi_nor structure
+ */
+void spi_nor_restore(struct spi_nor *nor);
+
+#endif
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 0000000..088ff96
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ *  Authors:
+ *	Peter Pan <peterpandong@micron.com>
+ */
+#ifndef __LINUX_MTD_SPINAND_H
+#define __LINUX_MTD_SPINAND_H
+
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/**
+ * Standard SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_OP						\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1),				\
+		   SPI_MEM_OP_NO_ADDR,					\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_EN_DIS_OP(enable)					\
+	SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1),		\
+		   SPI_MEM_OP_NO_ADDR,					\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_OP(ndummy, buf, len)				\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1),				\
+		   SPI_MEM_OP_NO_ADDR,					\
+		   SPI_MEM_OP_DUMMY(ndummy, 1),				\
+		   SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_SET_FEATURE_OP(reg, valptr)				\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1),				\
+		   SPI_MEM_OP_ADDR(1, reg, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_DATA_OUT(1, valptr, 1))
+
+#define SPINAND_GET_FEATURE_OP(reg, valptr)				\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1),				\
+		   SPI_MEM_OP_ADDR(1, reg, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_DATA_IN(1, valptr, 1))
+
+#define SPINAND_BLK_ERASE_OP(addr)					\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1),				\
+		   SPI_MEM_OP_ADDR(3, addr, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_OP(addr)					\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1),				\
+		   SPI_MEM_OP_ADDR(3, addr, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len)	\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1),		\
+		   SPI_MEM_OP_ADDR(2, addr, 1),				\
+		   SPI_MEM_OP_DUMMY(ndummy, 1),				\
+		   SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len)	\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1),				\
+		   SPI_MEM_OP_ADDR(2, addr, 1),				\
+		   SPI_MEM_OP_DUMMY(ndummy, 1),				\
+		   SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len)	\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1),				\
+		   SPI_MEM_OP_ADDR(2, addr, 1),				\
+		   SPI_MEM_OP_DUMMY(ndummy, 1),				\
+		   SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len)	\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1),				\
+		   SPI_MEM_OP_ADDR(2, addr, 2),				\
+		   SPI_MEM_OP_DUMMY(ndummy, 2),				\
+		   SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len)	\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1),				\
+		   SPI_MEM_OP_ADDR(2, addr, 4),				\
+		   SPI_MEM_OP_DUMMY(ndummy, 4),				\
+		   SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PROG_EXEC_OP(addr)					\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1),				\
+		   SPI_MEM_OP_ADDR(3, addr, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD(reset, addr, buf, len)			\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1),		\
+		   SPI_MEM_OP_ADDR(2, addr, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_DATA_OUT(len, buf, 1))
+
+#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len)			\
+	SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1),		\
+		   SPI_MEM_OP_ADDR(2, addr, 1),				\
+		   SPI_MEM_OP_NO_DUMMY,					\
+		   SPI_MEM_OP_DATA_OUT(len, buf, 4))
+
+/**
+ * Standard SPI NAND flash commands
+ */
+#define SPINAND_CMD_PROG_LOAD_X4		0x32
+#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4	0x34
+
+/* feature register */
+#define REG_BLOCK_LOCK		0xa0
+#define BL_ALL_UNLOCKED		0x00
+
+/* configuration register */
+#define REG_CFG			0xb0
+#define CFG_OTP_ENABLE		BIT(6)
+#define CFG_ECC_ENABLE		BIT(4)
+#define CFG_QUAD_ENABLE		BIT(0)
+
+/* status register */
+#define REG_STATUS		0xc0
+#define STATUS_BUSY		BIT(0)
+#define STATUS_ERASE_FAILED	BIT(2)
+#define STATUS_PROG_FAILED	BIT(3)
+#define STATUS_ECC_MASK		GENMASK(5, 4)
+#define STATUS_ECC_NO_BITFLIPS	(0 << 4)
+#define STATUS_ECC_HAS_BITFLIPS	(1 << 4)
+#define STATUS_ECC_UNCOR_ERROR	(2 << 4)
+
+struct spinand_op;
+struct spinand_device;
+
+#define SPINAND_MAX_ID_LEN	4
+
+/**
+ * struct spinand_id - SPI NAND id structure
+ * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ *	  be extended if required
+ * @len: ID length
+ *
+ * struct_spinand_id->data contains all bytes returned after a READ_ID command,
+ * including dummy bytes if the chip does not emit ID bytes right after the
+ * READ_ID command. The responsibility to extract real ID bytes is left to
+ * struct_manufacurer_ops->detect().
+ */
+struct spinand_id {
+	u8 data[SPINAND_MAX_ID_LEN];
+	int len;
+};
+
+/**
+ * struct manufacurer_ops - SPI NAND manufacturer specific operations
+ * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
+ *	    the core calls the struct_manufacurer_ops->detect() hook of each
+ *	    registered manufacturer until one of them return 1. Note that
+ *	    the first thing to check in this hook is that the manufacturer ID
+ *	    in struct_spinand_device->id matches the manufacturer whose
+ *	    ->detect() hook has been called. Should return 1 if there's a
+ *	    match, 0 if the manufacturer ID does not match and a negative
+ *	    error code otherwise. When true is returned, the core assumes
+ *	    that properties of the NAND chip (spinand->base.memorg and
+ *	    spinand->base.eccreq) have been filled
+ * @init: initialize a SPI NAND device
+ * @cleanup: cleanup a SPI NAND device
+ *
+ * Each SPI NAND manufacturer driver should implement this interface so that
+ * NAND chips coming from this vendor can be detected and initialized properly.
+ */
+struct spinand_manufacturer_ops {
+	int (*detect)(struct spinand_device *spinand);
+	int (*init)(struct spinand_device *spinand);
+	void (*cleanup)(struct spinand_device *spinand);
+};
+
+/**
+ * struct spinand_manufacturer - SPI NAND manufacturer instance
+ * @id: manufacturer ID
+ * @name: manufacturer name
+ * @ops: manufacturer operations
+ */
+struct spinand_manufacturer {
+	u8 id;
+	char *name;
+	const struct spinand_manufacturer_ops *ops;
+};
+
+/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+
+/**
+ * struct spinand_op_variants - SPI NAND operation variants
+ * @ops: the list of variants for a given operation
+ * @nops: the number of variants
+ *
+ * Some operations like read-from-cache/write-to-cache have several variants
+ * depending on the number of IO lines you use to transfer data or address
+ * cycles. This structure is a way to describe the different variants supported
+ * by a chip and let the core pick the best one based on the SPI mem controller
+ * capabilities.
+ */
+struct spinand_op_variants {
+	const struct spi_mem_op *ops;
+	unsigned int nops;
+};
+
+#define SPINAND_OP_VARIANTS(name, ...)					\
+	const struct spinand_op_variants name = {			\
+		.ops = (struct spi_mem_op[]) { __VA_ARGS__ },		\
+		.nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) /	\
+			sizeof(struct spi_mem_op),			\
+	}
+
+/**
+ * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
+ *		      chip
+ * @get_status: get the ECC status. Should return a positive number encoding
+ *		the number of corrected bitflips if correction was possible or
+ *		-EBADMSG if there are uncorrectable errors. I can also return
+ *		other negative error codes if the error is not caused by
+ *		uncorrectable bitflips
+ * @ooblayout: the OOB layout used by the on-die ECC implementation
+ */
+struct spinand_ecc_info {
+	int (*get_status)(struct spinand_device *spinand, u8 status);
+	const struct mtd_ooblayout_ops *ooblayout;
+};
+
+#define SPINAND_HAS_QE_BIT		BIT(0)
+
+/**
+ * struct spinand_info - Structure used to describe SPI NAND chips
+ * @model: model name
+ * @devid: device ID
+ * @flags: OR-ing of the SPINAND_XXX flags
+ * @memorg: memory organization
+ * @eccreq: ECC requirements
+ * @eccinfo: on-die ECC info
+ * @op_variants: operations variants
+ * @op_variants.read_cache: variants of the read-cache operation
+ * @op_variants.write_cache: variants of the write-cache operation
+ * @op_variants.update_cache: variants of the update-cache operation
+ * @select_target: function used to select a target/die. Required only for
+ *		   multi-die chips
+ *
+ * Each SPI NAND manufacturer driver should have a spinand_info table
+ * describing all the chips supported by the driver.
+ */
+struct spinand_info {
+	const char *model;
+	u8 devid;
+	u32 flags;
+	struct nand_memory_organization memorg;
+	struct nand_ecc_req eccreq;
+	struct spinand_ecc_info eccinfo;
+	struct {
+		const struct spinand_op_variants *read_cache;
+		const struct spinand_op_variants *write_cache;
+		const struct spinand_op_variants *update_cache;
+	} op_variants;
+	int (*select_target)(struct spinand_device *spinand,
+			     unsigned int target);
+};
+
+#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update)		\
+	{								\
+		.read_cache = __read,					\
+		.write_cache = __write,					\
+		.update_cache = __update,				\
+	}
+
+#define SPINAND_ECCINFO(__ooblayout, __get_status)			\
+	.eccinfo = {							\
+		.ooblayout = __ooblayout,				\
+		.get_status = __get_status,				\
+	}
+
+#define SPINAND_SELECT_TARGET(__func)					\
+	.select_target = __func,
+
+#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants,	\
+		     __flags, ...)					\
+	{								\
+		.model = __model,					\
+		.devid = __id,						\
+		.memorg = __memorg,					\
+		.eccreq = __eccreq,					\
+		.op_variants = __op_variants,				\
+		.flags = __flags,					\
+		__VA_ARGS__						\
+	}
+
+/**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+ * @spimem: pointer to the SPI mem object
+ * @lock: lock used to serialize accesses to the NAND
+ * @id: NAND ID as returned by READ_ID
+ * @flags: NAND flags
+ * @op_templates: various SPI mem op templates
+ * @op_templates.read_cache: read cache op template
+ * @op_templates.write_cache: write cache op template
+ * @op_templates.update_cache: update cache op template
+ * @select_target: select a specific target/die. Usually called before sending
+ *		   a command addressing a page or an eraseblock embedded in
+ *		   this die. Only required if your chip exposes several dies
+ * @cur_target: currently selected target/die
+ * @eccinfo: on-die ECC information
+ * @cfg_cache: config register cache. One entry per die
+ * @databuf: bounce buffer for data
+ * @oobbuf: bounce buffer for OOB data
+ * @scratchbuf: buffer used for everything but page accesses. This is needed
+ *		because the spi-mem interface explicitly requests that buffers
+ *		passed in spi_mem_op be DMA-able, so we can't based the bufs on
+ *		the stack
+ * @manufacturer: SPI NAND manufacturer information
+ * @priv: manufacturer private data
+ */
+struct spinand_device {
+	struct nand_device base;
+	struct spi_mem *spimem;
+	struct mutex lock;
+	struct spinand_id id;
+	u32 flags;
+
+	struct {
+		const struct spi_mem_op *read_cache;
+		const struct spi_mem_op *write_cache;
+		const struct spi_mem_op *update_cache;
+	} op_templates;
+
+	int (*select_target)(struct spinand_device *spinand,
+			     unsigned int target);
+	unsigned int cur_target;
+
+	struct spinand_ecc_info eccinfo;
+
+	u8 *cfg_cache;
+	u8 *databuf;
+	u8 *oobbuf;
+	u8 *scratchbuf;
+	const struct spinand_manufacturer *manufacturer;
+	void *priv;
+};
+
+/**
+ * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the SPI NAND device attached to @mtd.
+ */
+static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
+}
+
+/**
+ * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
+ * @spinand: SPI NAND device
+ *
+ * Return: the MTD device embedded in @spinand.
+ */
+static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
+{
+	return nanddev_to_mtd(&spinand->base);
+}
+
+/**
+ * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
+ * @nand: NAND object
+ *
+ * Return: the SPI NAND device embedding @nand.
+ */
+static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
+{
+	return container_of(nand, struct spinand_device, base);
+}
+
+/**
+ * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
+ * @spinand: SPI NAND device
+ *
+ * Return: the NAND device embedded in @spinand.
+ */
+static inline struct nand_device *
+spinand_to_nand(struct spinand_device *spinand)
+{
+	return &spinand->base;
+}
+
+/**
+ * spinand_set_of_node - Attach a DT node to a SPI NAND device
+ * @spinand: SPI NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a SPI NAND device.
+ */
+static inline void spinand_set_of_node(struct spinand_device *spinand,
+				       struct device_node *np)
+{
+	nanddev_set_of_node(&spinand->base, np);
+}
+
+int spinand_match_and_init(struct spinand_device *dev,
+			   const struct spinand_info *table,
+			   unsigned int table_size, u8 devid);
+
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+
+#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h
new file mode 100644
index 0000000..f456230
--- /dev/null
+++ b/include/linux/mtd/super.h
@@ -0,0 +1,29 @@
+/* MTD-based superblock handling
+ *
+ * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __MTD_SUPER_H__
+#define __MTD_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mtd/mtd.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+
+extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
+		      const char *dev_name, void *data,
+		      int (*fill_super)(struct super_block *, void *, int));
+extern void kill_mtd_super(struct super_block *sb);
+
+
+#endif /* __KERNEL__ */
+
+#endif /* __MTD_SUPER_H__ */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
new file mode 100644
index 0000000..1e271cb
--- /dev/null
+++ b/include/linux/mtd/ubi.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __LINUX_UBI_H__
+#define __LINUX_UBI_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <mtd/ubi-user.h>
+
+/* All voumes/LEBs */
+#define UBI_ALL -1
+
+/*
+ * Maximum number of scatter gather list entries,
+ * we use only 64 to have a lower memory foot print.
+ */
+#define UBI_MAX_SG_COUNT 64
+
+/*
+ * enum ubi_open_mode - UBI volume open mode constants.
+ *
+ * UBI_READONLY: read-only mode
+ * UBI_READWRITE: read-write mode
+ * UBI_EXCLUSIVE: exclusive mode
+ * UBI_METAONLY: modify only the volume meta-data,
+ *  i.e. the data stored in the volume table, but not in any of volume LEBs.
+ */
+enum {
+	UBI_READONLY = 1,
+	UBI_READWRITE,
+	UBI_EXCLUSIVE,
+	UBI_METAONLY
+};
+
+/**
+ * struct ubi_volume_info - UBI volume description data structure.
+ * @vol_id: volume ID
+ * @ubi_num: UBI device number this volume belongs to
+ * @size: how many physical eraseblocks are reserved for this volume
+ * @used_bytes: how many bytes of data this volume contains
+ * @used_ebs: how many physical eraseblocks of this volume actually contain any
+ *            data
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @corrupted: non-zero if the volume is corrupted (static volumes only)
+ * @upd_marker: non-zero if the volume has update marker set
+ * @alignment: volume alignment
+ * @usable_leb_size: how many bytes are available in logical eraseblocks of
+ *                   this volume
+ * @name_len: volume name length
+ * @name: volume name
+ * @cdev: UBI volume character device major and minor numbers
+ *
+ * The @corrupted flag is only relevant to static volumes and is always zero
+ * for dynamic ones. This is because UBI does not care about dynamic volume
+ * data protection and only cares about protecting static volume data.
+ *
+ * The @upd_marker flag is set if the volume update operation was interrupted.
+ * Before touching the volume data during the update operation, UBI first sets
+ * the update marker flag for this volume. If the volume update operation was
+ * further interrupted, the update marker indicates this. If the update marker
+ * is set, the contents of the volume is certainly damaged and a new volume
+ * update operation has to be started.
+ *
+ * To put it differently, @corrupted and @upd_marker fields have different
+ * semantics:
+ *     o the @corrupted flag means that this static volume is corrupted for some
+ *       reasons, but not because an interrupted volume update
+ *     o the @upd_marker field means that the volume is damaged because of an
+ *       interrupted update operation.
+ *
+ * I.e., the @corrupted flag is never set if the @upd_marker flag is set.
+ *
+ * The @used_bytes and @used_ebs fields are only really needed for static
+ * volumes and contain the number of bytes stored in this static volume and how
+ * many eraseblock this data occupies. In case of dynamic volumes, the
+ * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs
+ * field is equivalent to @size.
+ *
+ * In general, logical eraseblock size is a property of the UBI device, not
+ * of the UBI volume. Indeed, the logical eraseblock size depends on the
+ * physical eraseblock size and on how much bytes UBI headers consume. But
+ * because of the volume alignment (@alignment), the usable size of logical
+ * eraseblocks if a volume may be less. The following equation is true:
+ *	@usable_leb_size = LEB size - (LEB size mod @alignment),
+ * where LEB size is the logical eraseblock size defined by the UBI device.
+ *
+ * The alignment is multiple to the minimal flash input/output unit size or %1
+ * if all the available space is used.
+ *
+ * To put this differently, alignment may be considered is a way to change
+ * volume logical eraseblock sizes.
+ */
+struct ubi_volume_info {
+	int ubi_num;
+	int vol_id;
+	int size;
+	long long used_bytes;
+	int used_ebs;
+	int vol_type;
+	int corrupted;
+	int upd_marker;
+	int alignment;
+	int usable_leb_size;
+	int name_len;
+	const char *name;
+	dev_t cdev;
+};
+
+/**
+ * struct ubi_sgl - UBI scatter gather list data structure.
+ * @list_pos: current position in @sg[]
+ * @page_pos: current position in @sg[@list_pos]
+ * @sg: the scatter gather list itself
+ *
+ * ubi_sgl is a wrapper around a scatter list which keeps track of the
+ * current position in the list and the current list item such that
+ * it can be used across multiple ubi_leb_read_sg() calls.
+ */
+struct ubi_sgl {
+	int list_pos;
+	int page_pos;
+	struct scatterlist sg[UBI_MAX_SG_COUNT];
+};
+
+/**
+ * ubi_sgl_init - initialize an UBI scatter gather list data structure.
+ * @usgl: the UBI scatter gather struct itself
+ *
+ * Please note that you still have to use sg_init_table() or any adequate
+ * function to initialize the unterlaying struct scatterlist.
+ */
+static inline void ubi_sgl_init(struct ubi_sgl *usgl)
+{
+	usgl->list_pos = 0;
+	usgl->page_pos = 0;
+}
+
+/**
+ * struct ubi_device_info - UBI device description data structure.
+ * @ubi_num: ubi device number
+ * @leb_size: logical eraseblock size on this UBI device
+ * @leb_start: starting offset of logical eraseblocks within physical
+ *             eraseblocks
+ * @min_io_size: minimal I/O unit size
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ *                  time (MTD write buffer size)
+ * @ro_mode: if this device is in read-only mode
+ * @cdev: UBI character device major and minor numbers
+ *
+ * Note, @leb_size is the logical eraseblock size offered by the UBI device.
+ * Volumes of this UBI device may have smaller logical eraseblock size if their
+ * alignment is not equivalent to %1.
+ *
+ * The @max_write_size field describes flash write maximum write unit. For
+ * example, NOR flash allows for changing individual bytes, so @min_io_size is
+ * %1. However, it does not mean than NOR flash has to write data byte-by-byte.
+ * Instead, CFI NOR flashes have a write-buffer of, e.g., 64 bytes, and when
+ * writing large chunks of data, they write 64-bytes at a time. Obviously, this
+ * improves write throughput.
+ *
+ * Also, the MTD device may have N interleaved (striped) flash chips
+ * underneath, in which case @min_io_size can be physical min. I/O size of
+ * single flash chip, while @max_write_size can be N * @min_io_size.
+ *
+ * The @max_write_size field is always greater or equivalent to @min_io_size.
+ * E.g., some NOR flashes may have (@min_io_size = 1, @max_write_size = 64). In
+ * contrast, NAND flashes usually have @min_io_size = @max_write_size = NAND
+ * page size.
+ */
+struct ubi_device_info {
+	int ubi_num;
+	int leb_size;
+	int leb_start;
+	int min_io_size;
+	int max_write_size;
+	int ro_mode;
+	dev_t cdev;
+};
+
+/*
+ * Volume notification types.
+ * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a
+ *                    volume was created)
+ * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached
+ *			or a volume was removed)
+ * @UBI_VOLUME_RESIZED: a volume has been re-sized
+ * @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_UPDATED: data has been written to a volume
+ *
+ * These constants define which type of event has happened when a volume
+ * notification function is invoked.
+ */
+enum {
+	UBI_VOLUME_ADDED,
+	UBI_VOLUME_REMOVED,
+	UBI_VOLUME_RESIZED,
+	UBI_VOLUME_RENAMED,
+	UBI_VOLUME_UPDATED,
+};
+
+/*
+ * struct ubi_notification - UBI notification description structure.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
+ *
+ * UBI notifiers are called with a pointer to an object of this type. The
+ * object describes the notification. Namely, it provides a description of the
+ * UBI device and UBI volume the notification informs about.
+ */
+struct ubi_notification {
+	struct ubi_device_info di;
+	struct ubi_volume_info vi;
+};
+
+/* UBI descriptor given to users when they open UBI volumes */
+struct ubi_volume_desc;
+
+int ubi_get_device_info(int ubi_num, struct ubi_device_info *di);
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+			 struct ubi_volume_info *vi);
+struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode);
+struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
+					   int mode);
+struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode);
+
+int ubi_register_volume_notifier(struct notifier_block *nb,
+				 int ignore_existing);
+int ubi_unregister_volume_notifier(struct notifier_block *nb);
+
+void ubi_close_volume(struct ubi_volume_desc *desc);
+int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
+		 int len, int check);
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+		   int offset, int len, int check);
+int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
+		  int offset, int len);
+int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+		   int len);
+int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
+int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
+int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
+int ubi_sync(int ubi_num);
+int ubi_flush(int ubi_num, int vol_id, int lnum);
+
+/*
+ * This function is the same as the 'ubi_leb_read()' function, but it does not
+ * provide the checking capability.
+ */
+static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
+			   int offset, int len)
+{
+	return ubi_leb_read(desc, lnum, buf, offset, len, 0);
+}
+
+/*
+ * This function is the same as the 'ubi_leb_read_sg()' function, but it does
+ * not provide the checking capability.
+ */
+static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
+			      struct ubi_sgl *sgl, int offset, int len)
+{
+	return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
+}
+#endif /* !__LINUX_UBI_H__ */
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
new file mode 100644
index 0000000..e373690
--- /dev/null
+++ b/include/linux/mtd/xip.h
@@ -0,0 +1,101 @@
+/*
+ * MTD primitives for XIP support
+ *
+ * Author:	Nicolas Pitre
+ * Created:	Nov 2, 2004
+ * Copyright:	(C) 2004 MontaVista Software, Inc.
+ *
+ * This XIP support for MTD has been loosely inspired
+ * by an earlier patch authored by David Woodhouse.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_XIP_H__
+#define __LINUX_MTD_XIP_H__
+
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * We really don't want gcc to guess anything.
+ * We absolutely _need_ proper inlining.
+ */
+#include <linux/compiler.h>
+
+/*
+ * Function that are modifying the flash state away from array mode must
+ * obviously not be running from flash.  The __xipram is therefore marking
+ * those functions so they get relocated to ram.
+ */
+#ifdef CONFIG_XIP_KERNEL
+#define __xipram noinline __attribute__ ((__section__ (".xiptext")))
+#endif
+
+/*
+ * Each architecture has to provide the following macros.  They must access
+ * the hardware directly and not rely on any other (XIP) functions since they
+ * won't be available when used (flash not in array mode).
+ *
+ * xip_irqpending()
+ *
+ * 	return non zero when any hardware interrupt is pending.
+ *
+ * xip_currtime()
+ *
+ * 	return a platform specific time reference to be used with
+ * 	xip_elapsed_since().
+ *
+ * xip_elapsed_since(x)
+ *
+ * 	return in usecs the elapsed timebetween now and the reference x as
+ * 	returned by xip_currtime().
+ *
+ * 	note 1: conversion to usec can be approximated, as long as the
+ * 		returned value is <= the real elapsed time.
+ * 	note 2: this should be able to cope with a few seconds without
+ * 		overflowing.
+ *
+ * xip_iprefetch()
+ *
+ *      Macro to fill instruction prefetch
+ *	e.g. a series of nops:  asm volatile (".rep 8; nop; .endr");
+ */
+
+#include <asm/mtd-xip.h>
+
+#ifndef xip_irqpending
+
+#warning "missing IRQ and timer primitives for XIP MTD support"
+#warning "some of the XIP MTD support code will be disabled"
+#warning "your system will therefore be unresponsive when writing or erasing flash"
+
+#define xip_irqpending()	(0)
+#define xip_currtime()		(0)
+#define xip_elapsed_since(x)	(0)
+
+#endif
+
+#ifndef xip_iprefetch
+#define xip_iprefetch()		do { } while (0)
+#endif
+
+/*
+ * xip_cpu_idle() is used when waiting for a delay equal or larger than
+ * the system timer tick period.  This should put the CPU into idle mode
+ * to save power and to be woken up only when some interrupts are pending.
+ * This should not rely upon standard kernel code.
+ */
+#ifndef xip_cpu_idle
+#define xip_cpu_idle()  do { } while (0)
+#endif
+
+#endif /* CONFIG_MTD_XIP */
+
+#ifndef __xipram
+#define __xipram
+#endif
+
+#endif /* __LINUX_MTD_XIP_H__ */