summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2013-08-05 12:49:58 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2013-08-05 12:49:58 +0200
commitfeefc3ef3b4d201845480b17cb9e6c6729eb1a96 (patch)
tree355bae733a4bb876425a06549b8e1bdd393a5020 /drivers
parent9ebb0554cd0e19f3757f4f5eea723eaf3b6dab78 (diff)
parentae5eeb06ed37914d6a47a1f414a9165763ac5677 (diff)
downloadbarebox-feefc3ef3b4d201845480b17cb9e6c6729eb1a96.tar.gz
barebox-feefc3ef3b4d201845480b17cb9e6c6729eb1a96.tar.xz
Merge branch 'for-next/mtd'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/apbh_dma.c176
-rw-r--r--drivers/mci/mxs.c4
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/core.c21
-rw-r--r--drivers/mtd/devices/docg3.c26
-rw-r--r--drivers/mtd/mtdoob.c8
-rw-r--r--drivers/mtd/mtdraw.c10
-rw-r--r--drivers/mtd/nand/Kconfig31
-rw-r--r--drivers/mtd/nand/Makefile5
-rw-r--r--drivers/mtd/nand/atmel_nand.c15
-rw-r--r--drivers/mtd/nand/nand.h3
-rw-r--r--drivers/mtd/nand/nand_base.c2770
-rw-r--r--drivers/mtd/nand/nand_bbt.c1049
-rw-r--r--drivers/mtd/nand/nand_bch.c243
-rw-r--r--drivers/mtd/nand/nand_hwecc.c103
-rw-r--r--drivers/mtd/nand/nand_hwecc_syndrome.c225
-rw-r--r--drivers/mtd/nand/nand_ids.c266
-rw-r--r--drivers/mtd/nand/nand_imx.c99
-rw-r--r--drivers/mtd/nand/nand_mxs.c175
-rw-r--r--drivers/mtd/nand/nand_omap_gpmc.c18
-rw-r--r--drivers/mtd/nand/nand_s3c24xx.c2
-rw-r--r--drivers/mtd/nand/nand_swecc.c94
-rw-r--r--drivers/mtd/nand/nand_write.c747
-rw-r--r--drivers/mtd/nand/nomadik_nand.c1
-rw-r--r--drivers/mtd/ubi/Kconfig82
-rw-r--r--drivers/mtd/ubi/Makefile6
-rw-r--r--drivers/mtd/ubi/attach.c1728
-rw-r--r--drivers/mtd/ubi/build.c862
-rw-r--r--drivers/mtd/ubi/cdev.c27
-rw-r--r--drivers/mtd/ubi/crc32defs.h32
-rw-r--r--drivers/mtd/ubi/debug.c223
-rw-r--r--drivers/mtd/ubi/debug.h178
-rw-r--r--drivers/mtd/ubi/eba.c485
-rw-r--r--drivers/mtd/ubi/fastmap.c1514
-rw-r--r--drivers/mtd/ubi/io.c815
-rw-r--r--drivers/mtd/ubi/kapi.c177
-rw-r--r--drivers/mtd/ubi/misc.c64
-rw-r--r--drivers/mtd/ubi/scan.c1359
-rw-r--r--drivers/mtd/ubi/scan.h162
-rw-r--r--drivers/mtd/ubi/ubi-barebox.h135
-rw-r--r--drivers/mtd/ubi/ubi-media.h515
-rw-r--r--drivers/mtd/ubi/ubi.h632
-rw-r--r--drivers/mtd/ubi/upd.c101
-rw-r--r--drivers/mtd/ubi/vmt.c513
-rw-r--r--drivers/mtd/ubi/vtbl.c365
-rw-r--r--drivers/mtd/ubi/wl.c1655
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_mtd.c82
-rw-r--r--drivers/serial/serial_auart.c2
-rw-r--r--drivers/spi/mxs_spi.c27
-rw-r--r--drivers/video/stm.c11
52 files changed, 10821 insertions, 7028 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a30fa375d1..c75fc8b981 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -2,7 +2,8 @@ menu "DMA support"
config MXS_APBH_DMA
tristate "MXS APBH DMA ENGINE"
- depends on ARCH_IMX23 || ARCH_IMX28
+ depends on ARCH_IMX23 || ARCH_IMX28 || ARCH_IMX6
+ select STMP_DEVICE
help
Experimental!
endmenu
diff --git a/drivers/dma/apbh_dma.c b/drivers/dma/apbh_dma.c
index d30b8fb193..665d394086 100644
--- a/drivers/dma/apbh_dma.c
+++ b/drivers/dma/apbh_dma.c
@@ -15,17 +15,17 @@
* (at your option) any later version.
*/
+#include <dma/apbh-dma.h>
+#include <stmp-device.h>
#include <linux/list.h>
-
#include <common.h>
+#include <driver.h>
#include <malloc.h>
#include <errno.h>
+#include <init.h>
+#include <io.h>
+
#include <asm/mmu.h>
-#include <asm/io.h>
-#include <mach/clock.h>
-#include <mach/imx-regs.h>
-#include <mach/dma.h>
-#include <mach/mxs.h>
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
@@ -37,18 +37,29 @@
#define HW_APBHX_CTRL2 0x020
#define HW_APBHX_CHANNEL_CTRL 0x030
#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
-#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
-#define HW_APBX_VERSION 0x800
#define BP_APBHX_VERSION_MAJOR 24
-#define HW_APBHX_CHn_NXTCMDAR(n) \
- ((apbh_is_old ? 0x050 : 0x110) + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA(n) \
- ((apbh_is_old ? 0x080 : 0x140) + (n) * 0x70)
+#define HW_APBHX_CHn_NXTCMDAR_MX23(n) (0x050 + (n) * 0x70)
+#define HW_APBHX_CHn_NXTCMDAR_MX28(n) (0x110 + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA_MX23(n) (0x080 + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA_MX28(n) (0x140 + (n) * 0x70)
#define BM_APBHX_CHn_SEMA_PHORE (0xff << 16)
#define BP_APBHX_CHn_SEMA_PHORE 16
static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
-static bool apbh_is_old;
+
+enum mxs_dma_id {
+ IMX23_DMA,
+ IMX28_DMA,
+};
+
+struct apbh_dma {
+ void __iomem *regs;
+ enum mxs_dma_id id;
+};
+
+#define apbh_dma_is_imx23(aphb) ((apbh)->id == IMX23_DMA)
+
+static struct apbh_dma *apbh_dma;
/*
* Test is the DMA channel is valid channel
@@ -91,7 +102,7 @@ static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
*/
static int mxs_dma_read_semaphore(int channel)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
uint32_t tmp;
int ret;
@@ -99,7 +110,10 @@ static int mxs_dma_read_semaphore(int channel)
if (ret)
return ret;
- tmp = readl(apbh_regs + HW_APBHX_CHn_SEMA(channel));
+ if (apbh_dma_is_imx23(apbh))
+ tmp = readl(apbh->regs + HW_APBHX_CHn_SEMA_MX23(channel));
+ else
+ tmp = readl(apbh->regs + HW_APBHX_CHn_SEMA_MX28(channel));
tmp &= BM_APBHX_CHn_SEMA_PHORE;
tmp >>= BP_APBHX_CHn_SEMA_PHORE;
@@ -118,7 +132,7 @@ static int mxs_dma_read_semaphore(int channel)
*/
static int mxs_dma_enable(int channel)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
unsigned int sem;
struct mxs_dma_chan *pchan;
struct mxs_dma_desc *pdesc;
@@ -150,22 +164,40 @@ static int mxs_dma_enable(int channel)
if (sem == 1) {
pdesc = list_entry(pdesc->node.next,
struct mxs_dma_desc, node);
- writel(mxs_dma_cmd_address(pdesc),
- apbh_regs + HW_APBHX_CHn_NXTCMDAR(channel));
+ if (apbh_dma_is_imx23(apbh))
+ writel(mxs_dma_cmd_address(pdesc),
+ apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX23(channel));
+ else
+ writel(mxs_dma_cmd_address(pdesc),
+ apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX28(channel));
}
- writel(pchan->pending_num,
- apbh_regs + HW_APBHX_CHn_SEMA(channel));
+
+ if (apbh_dma_is_imx23(apbh))
+ writel(pchan->pending_num,
+ apbh->regs + HW_APBHX_CHn_SEMA_MX23(channel));
+ else
+ writel(pchan->pending_num,
+ apbh->regs + HW_APBHX_CHn_SEMA_MX28(channel));
+
pchan->active_num += pchan->pending_num;
pchan->pending_num = 0;
} else {
pchan->active_num += pchan->pending_num;
pchan->pending_num = 0;
- writel(mxs_dma_cmd_address(pdesc),
- apbh_regs + HW_APBHX_CHn_NXTCMDAR(channel));
- writel(pchan->active_num,
- apbh_regs + HW_APBHX_CHn_SEMA(channel));
- channel_bit = channel + (apbh_is_old ? BP_APBH_CTRL0_CLKGATE_CHANNEL : 0);
- writel(1 << channel_bit, apbh_regs + HW_APBHX_CTRL0 + BIT_CLR);
+ if (apbh_dma_is_imx23(apbh)) {
+ writel(mxs_dma_cmd_address(pdesc),
+ apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX23(channel));
+ writel(pchan->active_num,
+ apbh->regs + HW_APBHX_CHn_SEMA_MX23(channel));
+ channel_bit = channel + BP_APBH_CTRL0_CLKGATE_CHANNEL;
+ } else {
+ writel(mxs_dma_cmd_address(pdesc),
+ apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX28(channel));
+ writel(pchan->active_num,
+ apbh->regs + HW_APBHX_CHn_SEMA_MX28(channel));
+ channel_bit = channel;
+ }
+ writel(1 << channel_bit, apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
}
pchan->flags |= MXS_DMA_FLAGS_BUSY;
@@ -189,7 +221,7 @@ static int mxs_dma_enable(int channel)
static int mxs_dma_disable(int channel)
{
struct mxs_dma_chan *pchan;
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
int channel_bit, ret;
ret = mxs_dma_validate_chan(channel);
@@ -201,8 +233,12 @@ static int mxs_dma_disable(int channel)
if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
return -EINVAL;
- channel_bit = channel + (apbh_is_old ? BP_APBH_CTRL0_CLKGATE_CHANNEL : 0);
- writel(1 << channel_bit, apbh_regs + HW_APBHX_CTRL0 + BIT_SET);
+ if (apbh_dma_is_imx23(apbh))
+ channel_bit = channel + BP_APBH_CTRL0_CLKGATE_CHANNEL;
+ else
+ channel_bit = channel + 0;
+
+ writel(1 << channel_bit, apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
pchan->active_num = 0;
@@ -217,19 +253,19 @@ static int mxs_dma_disable(int channel)
*/
static int mxs_dma_reset(int channel)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
int ret;
ret = mxs_dma_validate_chan(channel);
if (ret)
return ret;
- if (apbh_is_old)
+ if (apbh_dma_is_imx23(apbh))
writel(1 << (channel + BP_APBH_CTRL0_RESET_CHANNEL),
- apbh_regs + HW_APBHX_CTRL0 + BIT_SET);
+ apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << (channel + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
- apbh_regs + HW_APBHX_CHANNEL_CTRL + BIT_SET);
+ apbh->regs + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
return 0;
}
@@ -241,7 +277,7 @@ static int mxs_dma_reset(int channel)
*/
static int mxs_dma_enable_irq(int channel, int enable)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
int ret;
ret = mxs_dma_validate_chan(channel);
@@ -250,10 +286,10 @@ static int mxs_dma_enable_irq(int channel, int enable)
if (enable)
writel(1 << (channel + BP_APBHX_CTRL1_CH_CMDCMPLT_IRQ_EN),
- apbh_regs + HW_APBHX_CTRL1 + BIT_SET);
+ apbh->regs + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
else
writel(1 << (channel + BP_APBHX_CTRL1_CH_CMDCMPLT_IRQ_EN),
- apbh_regs + HW_APBHX_CTRL1 + BIT_CLR);
+ apbh->regs + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
return 0;
}
@@ -266,15 +302,15 @@ static int mxs_dma_enable_irq(int channel, int enable)
*/
static int mxs_dma_ack_irq(int channel)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
int ret;
ret = mxs_dma_validate_chan(channel);
if (ret)
return ret;
- writel(1 << channel, apbh_regs + HW_APBHX_CTRL1 + BIT_CLR);
- writel(1 << channel, apbh_regs + HW_APBHX_CTRL2 + BIT_CLR);
+ writel(1 << channel, apbh->regs + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
+ writel(1 << channel, apbh->regs + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
return 0;
}
@@ -496,7 +532,7 @@ static int mxs_dma_finish(int channel, struct list_head *head)
*/
static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh = apbh_dma;
int ret;
ret = mxs_dma_validate_chan(chan);
@@ -504,7 +540,7 @@ static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
return ret;
while (--timeout) {
- if (readl(apbh_regs + HW_APBHX_CTRL1) & (1 << chan))
+ if (readl(apbh->regs + HW_APBHX_CTRL1) & (1 << chan))
break;
udelay(1);
}
@@ -548,31 +584,33 @@ int mxs_dma_go(int chan)
/*
* Initialize the DMA hardware
*/
-int mxs_dma_init(void)
+static int apbh_dma_probe(struct device_d *dev)
{
- void __iomem *apbh_regs = (void *)MXS_APBH_BASE;
+ struct apbh_dma *apbh;
struct mxs_dma_chan *pchan;
+ enum mxs_dma_id id;
int ret, channel;
- u32 val, reg;
- ret = mxs_reset_block(apbh_regs, 0);
+ ret = dev_get_drvdata(dev, (unsigned long *)&id);
if (ret)
return ret;
- /* HACK: Get CPUID and determine APBH version */
- val = readl(0x8001c310) >> 16;
- if (val == 0x2800)
- reg = MXS_APBH_BASE + 0x0800;
- else
- reg = MXS_APBH_BASE + 0x03f0;
+ apbh_dma = apbh = xzalloc(sizeof(*apbh));
+ apbh->regs = dev_request_mem_region(dev, 0);
+ if (!apbh->regs)
+ return -EBUSY;
+
+ apbh->id = id;
- apbh_is_old = (readl((void *)reg) >> 24) < 3;
+ ret = stmp_reset_block(apbh->regs, 0);
+ if (ret)
+ return ret;
writel(BM_APBH_CTRL0_APB_BURST8_EN,
- apbh_regs + HW_APBHX_CTRL0 + BIT_SET);
+ apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_APBH_CTRL0_APB_BURST_EN,
- apbh_regs + HW_APBHX_CTRL0 + BIT_SET);
+ apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
for (channel = 0; channel < MXS_MAX_DMA_CHANNELS; channel++) {
pchan = mxs_dma_channels + channel;
@@ -598,3 +636,33 @@ err:
mxs_dma_release(channel);
return ret;
}
+
+static struct platform_device_id apbh_ids[] = {
+ {
+ .name = "imx23-dma-apbh",
+ .driver_data = (unsigned long)IMX23_DMA,
+ }, {
+ .name = "imx28-dma-apbh",
+ .driver_data = (unsigned long)IMX28_DMA,
+ },
+};
+
+static __maybe_unused struct of_device_id apbh_dt_ids[] = {
+ {
+ .compatible = "fsl,imx23-dma-apbh",
+ .data = (unsigned long)IMX23_DMA,
+ }, {
+ .compatible = "fsl,imx28-dma-apbh",
+ .data = (unsigned long)IMX28_DMA,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct driver_d apbh_dma_driver = {
+ .name = "dma-apbh",
+ .id_table = apbh_ids,
+ .of_compatible = DRV_OF_COMPAT(apbh_dt_ids),
+ .probe = apbh_dma_probe,
+};
+coredevice_platform_driver(apbh_dma_driver);
diff --git a/drivers/mci/mxs.c b/drivers/mci/mxs.c
index 023f92236a..1b935f7ce6 100644
--- a/drivers/mci/mxs.c
+++ b/drivers/mci/mxs.c
@@ -36,10 +36,10 @@
#include <errno.h>
#include <clock.h>
#include <io.h>
+#include <stmp-device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/bitops.h>
-#include <mach/mxs.h>
#include <mach/imx-regs.h>
#include <mach/mci.h>
#include <mach/clock.h>
@@ -457,7 +457,7 @@ static int mxs_mci_initialize(struct mci_host *host, struct device_d *mci_dev)
writel(SSP_CTRL0_CLKGATE, mxs_mci->regs + HW_SSP_CTRL0 + 8);
/* reset the unit */
- mxs_reset_block(mxs_mci->regs + HW_SSP_CTRL0, 0);
+ stmp_reset_block(mxs_mci->regs + HW_SSP_CTRL0, 0);
/* restore the last settings */
mxs_mci_setup_timeout(mxs_mci, 0xffff);
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 82b2cc9e68..d8312642c5 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_NAND) += nand/
obj-$(CONFIG_DRIVER_CFI) += nor/
-obj-$(CONFIG_UBI) += ubi/
+obj-$(CONFIG_MTD_UBI) += ubi/
obj-y += devices/
obj-$(CONFIG_PARTITION_NEED_MTD) += partition.o
obj-$(CONFIG_MTD) += core.o
diff --git a/drivers/mtd/core.c b/drivers/mtd/core.c
index 37f4428dc3..fc345470f2 100644
--- a/drivers/mtd/core.c
+++ b/drivers/mtd/core.c
@@ -327,6 +327,27 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
return mtd->erase(mtd, instr);
}
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ int ret_code;
+
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->read_oob)
+ return -EOPNOTSUPP;
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ ret_code = mtd->read_oob(mtd, from, ops);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+
static struct file_operations mtd_ops = {
.read = mtd_op_read,
#ifdef CONFIG_MTD_WRITE
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index af3d174cd6..e15c809520 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -468,7 +468,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
else
ooblen = 0;
- if (oobbuf && ops->mode == MTD_OOB_PLACE)
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
oobbuf += ops->ooboffs;
doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
@@ -537,7 +537,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
(eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
(eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
- (ops->mode != MTD_OOB_RAW) &&
+ (ops->mode != MTD_OPS_RAW) &&
(nbdata == DOC_LAYOUT_PAGE_SIZE)) {
ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
if (ret < 0) {
@@ -577,7 +577,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
memset(&ops, 0, sizeof(ops));
ops.datbuf = buf;
ops.len = len;
- ops.mode = MTD_OOB_AUTO;
+ ops.mode = MTD_OPS_AUTO_OOB;
ret = doc_read_oob(mtd, from, &ops);
*retlen = ops.retlen;
@@ -631,11 +631,11 @@ static int doc_guess_autoecc(struct mtd_oob_ops *ops)
int autoecc;
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
autoecc = 1;
break;
- case MTD_OOB_RAW:
+ case MTD_OPS_RAW:
autoecc = 0;
break;
default:
@@ -663,7 +663,7 @@ static int doc_backup_oob(struct docg3 *docg3, loff_t to,
docg3->oob_write_ofs = to;
docg3->oob_autoecc = autoecc;
- if (ops->mode == MTD_OOB_AUTO) {
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
ops->oobretlen = 8;
} else {
@@ -960,17 +960,17 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
else
ooblen = 0;
- if (oobbuf && ops->mode == MTD_OOB_PLACE)
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
oobbuf += ops->ooboffs;
doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
ofs, ops->mode, buf, len, oobbuf, ooblen);
switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
oobdelta = mtd->oobsize;
break;
- case MTD_OOB_AUTO:
+ case MTD_OPS_AUTO_OOB:
oobdelta = mtd->ecclayout->oobavail;
break;
default:
@@ -1005,7 +1005,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
memset(oob, 0, sizeof(oob));
if (ofs == docg3->oob_write_ofs)
memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
- else if (ooblen > 0 && ops->mode == MTD_OOB_AUTO)
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
doc_fill_autooob(oob, oobbuf);
else if (ooblen > 0)
memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
@@ -1036,7 +1036,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
ops.datbuf = (char *)buf;
ops.len = len;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.oobbuf = NULL;
ops.ooblen = 0;
ops.ooboffs = 0;
diff --git a/drivers/mtd/mtdoob.c b/drivers/mtd/mtdoob.c
index e5d803903a..1e88b534ff 100644
--- a/drivers/mtd/mtdoob.c
+++ b/drivers/mtd/mtdoob.c
@@ -37,7 +37,7 @@ static struct mtd_info *to_mtd(struct cdev *cdev)
return mtdoob->mtd;
}
-static ssize_t mtd_read_oob(struct cdev *cdev, void *buf, size_t count,
+static ssize_t mtd_op_read_oob(struct cdev *cdev, void *buf, size_t count,
loff_t _offset, ulong flags)
{
struct mtd_info *mtd = to_mtd(cdev);
@@ -48,7 +48,7 @@ static ssize_t mtd_read_oob(struct cdev *cdev, void *buf, size_t count,
if (count < mtd->oobsize)
return -EINVAL;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
@@ -56,7 +56,7 @@ static ssize_t mtd_read_oob(struct cdev *cdev, void *buf, size_t count,
ops.len = mtd->oobsize;
offset /= mtd->oobsize;
- ret = mtd->read_oob(mtd, offset * mtd->writesize, &ops);
+ ret = mtd_read_oob(mtd, offset * mtd->writesize, &ops);
if (ret)
return ret;
@@ -64,7 +64,7 @@ static ssize_t mtd_read_oob(struct cdev *cdev, void *buf, size_t count,
}
static struct file_operations mtd_ops_oob = {
- .read = mtd_read_oob,
+ .read = mtd_op_read_oob,
.ioctl = mtd_ioctl,
.lseek = dev_lseek_default,
};
diff --git a/drivers/mtd/mtdraw.c b/drivers/mtd/mtdraw.c
index c289e8d48e..1a4711e7dd 100644
--- a/drivers/mtd/mtdraw.c
+++ b/drivers/mtd/mtdraw.c
@@ -58,7 +58,7 @@
* - no actual mtd->write if done
* A second write of 512 bytes triggers:
* - copy of the 16 first bytes into writebuf
- * - a mtd->write_oob() from writebuf
+ * - a mtd_write_oob() from writebuf
* - empty writebuf
* - copy the remaining 496 bytes into writebuf
* => write_fill = 496, write_ofs = offset + 528
@@ -97,13 +97,13 @@ static ssize_t mtdraw_read_unaligned(struct mtd_info *mtd, void *dst,
tmp = malloc(mtd->writesize + mtd->oobsize);
if (!tmp)
return -ENOMEM;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.datbuf = tmp;
ops.len = mtd->writesize;
ops.oobbuf = tmp + mtd->writesize;
ops.ooblen = mtd->oobsize;
- ret = mtd->read_oob(mtd, offset, &ops);
+ ret = mtd_read_oob(mtd, offset, &ops);
if (ret)
goto err;
if (partial)
@@ -152,13 +152,13 @@ static ssize_t mtdraw_blkwrite(struct mtd_info *mtd, const void *buf,
struct mtd_oob_ops ops;
int ret;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.datbuf = (void *)buf;
ops.len = mtd->writesize;
ops.oobbuf = (void *)buf + mtd->writesize;
ops.ooblen = mtd->oobsize;
- ret = mtd->write_oob(mtd, offset, &ops);
+ ret = mtd_write_oob(mtd, offset, &ops);
if (!ret)
ret = ops.retlen + ops.oobretlen;
return ret;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index de8fb5e18e..04fe3c80bf 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -13,11 +13,20 @@ config NAND_ECC_SOFT
default y
prompt "Support software ecc"
+config NAND_ECC_BCH
+ select BCH
+ bool
+ prompt "Support software BCH ecc"
+
config NAND_ECC_HW
bool
default y
prompt "Support hardware ecc"
+config NAND_ECC_HW_OOB_FIRST
+ bool
+ prompt "Support hardware ecc (oob first)"
+
config NAND_ECC_HW_SYNDROME
bool
default y
@@ -64,13 +73,14 @@ config NAND_IMX
config NAND_IMX_BBM
bool
- prompt "i.MX NAND flash bbt creation command"
+ depends on NAND_BBT
depends on NAND_IMX
+ prompt "i.MX NAND flash bbt creation command"
config NAND_MXS
bool
select NAND_BBT
- prompt "i.MX23/28 NAND driver"
+ prompt "i.MX23/28/6 NAND driver"
depends on MXS_APBH_DMA
config NAND_OMAP_GPMC
@@ -100,15 +110,6 @@ config NAND_S3C24XX
help
Add support for processor's NAND device controller.
-config MTD_NAND_VERIFY_WRITE
- bool "Verify NAND page writes"
- help
- This adds an extra check when data is written to the flash. The
- NAND flash device internally checks only bits transitioning
- from 1 to 0. There is a rare possibility that even though the
- device thinks the write was successful, a bit could have been
- flipped accidentally due to device wear or something else.
-
config MTD_NAND_ECC_SMC
bool "NAND ECC Smart Media byte order"
default n
@@ -116,14 +117,6 @@ config MTD_NAND_ECC_SMC
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
-config MTD_NAND_MUSEUM_IDS
- bool "Enable chip ids for obsolete ancient NAND devices"
- default n
- help
- Enable this option only when your board has first generation
- NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
- of these chips were reused by later, larger chips.
-
config MTD_NAND_IDS
tristate
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 0c7c8e255b..a1414e1fb2 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -1,10 +1,7 @@
# Generic NAND options
obj-$(CONFIG_NAND) += nand_ecc.o
-obj-$(CONFIG_MTD_WRITE) += nand_write.o
-obj-$(CONFIG_NAND_ECC_SOFT) += nand_ecc.o nand_swecc.o
-obj-$(CONFIG_NAND_ECC_HW) += nand_hwecc.o
-obj-$(CONFIG_NAND_ECC_HW_SYNDROME) += nand_hwecc_syndrome.o
+obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
obj-$(CONFIG_NAND) += nand_base.o nand-bb.o
obj-$(CONFIG_NAND_BBT) += nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 3eb78b771b..a0f0966b04 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -624,7 +624,7 @@ normal_check:
}
static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf)
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
struct atmel_nand_host *host = chip->priv;
int eccsize = chip->ecc.size;
@@ -659,8 +659,9 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
return 0;
}
-static void atmel_nand_pmecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required)
{
struct atmel_nand_host *host = chip->priv;
uint32_t *eccpos = chip->ecc.layout->eccpos;
@@ -681,7 +682,7 @@ static void atmel_nand_pmecc_write_page(struct mtd_info *mtd,
!(pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY));
if (ret) {
dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
- return;
+ return -ETIMEDOUT;
}
for (i = 0; i < host->pmecc_sector_number; i++) {
@@ -694,6 +695,8 @@ static void atmel_nand_pmecc_write_page(struct mtd_info *mtd,
}
}
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static void atmel_pmecc_core_init(struct mtd_info *mtd)
@@ -881,7 +884,7 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
* buf: buffer to store read data
*/
static int atmel_nand_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf)
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1201,7 +1204,7 @@ static int __init atmel_nand_probe(struct device_d *dev)
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}
diff --git a/drivers/mtd/nand/nand.h b/drivers/mtd/nand/nand.h
index 123258d7cb..eb6652c14f 100644
--- a/drivers/mtd/nand/nand.h
+++ b/drivers/mtd/nand/nand.h
@@ -17,7 +17,8 @@ void multi_erase_cmd(struct mtd_info *mtd, int page);
void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf);
int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw);
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw);
int nand_erase(struct mtd_info *mtd, struct erase_info *instr);
int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 829ab4292e..c252a2a2dc 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4,7 +4,6 @@
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
- * Basic support for AG-AND chips is provided.
*
* Additional technical information is available on
* http://www.linux-mtd.infradead.org/doc/nand.html
@@ -21,9 +20,7 @@
* TODO:
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
- * if we have HW ecc support.
- * The AG-AND chips have nice features for speed improvement,
- * which are not supported yet. Read / program 4 pages in one go.
+ * if we have HW ECC support.
* BBT table is not serialized, has to be fixed
*
* This program is free software; you can redistribute it and/or modify
@@ -45,10 +42,7 @@
#include <io.h>
#include <malloc.h>
#include <module.h>
-
-#include "nand.h"
-
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
+#include <linux/mtd/nand_bch.h>
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
@@ -58,7 +52,7 @@ static struct nand_ecclayout nand_oob_8 = {
{.offset = 3,
.length = 2},
{.offset = 6,
- .length = 2}}
+ .length = 2} }
};
static struct nand_ecclayout nand_oob_16 = {
@@ -66,7 +60,7 @@ static struct nand_ecclayout nand_oob_16 = {
.eccpos = {0, 1, 2, 3, 6, 7},
.oobfree = {
{.offset = 8,
- . length = 8}}
+ . length = 8} }
};
static struct nand_ecclayout nand_oob_64 = {
@@ -77,26 +71,69 @@ static struct nand_ecclayout nand_oob_64 = {
56, 57, 58, 59, 60, 61, 62, 63},
.oobfree = {
{.offset = 2,
- .length = 38}}
+ .length = 38} }
};
-#define DEFINE_LED_TRIGGER(x)
-#define DEFINE_LED_TRIGGER_GLOBAL(x)
-#define led_trigger_register_simple(x, y) do {} while(0)
-#define led_trigger_unregister_simple(x) do {} while(0)
-#define led_trigger_event(x, y) do {} while(0)
+static struct nand_ecclayout nand_oob_128 = {
+ .eccbytes = 48,
+ .eccpos = {
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ .oobfree = {
+ {.offset = 2,
+ .length = 78} }
+};
-/*
- * For devices which display every fart in the system on a separate LED. Is
- * compiled away when LED support is disabled.
+static int nand_get_device(struct mtd_info *mtd, int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Release chip lock and wake up anyone waiting on the device.
*/
-DEFINE_LED_TRIGGER(nand_led_trigger);
+static void nand_release_device(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /* Release the controller and the chip */
+ chip->controller->active = NULL;
+ chip->state = FL_READY;
+}
/**
* nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Default read function for 8bit buswith
+ * Default read function for 8bit buswidth
*/
static uint8_t nand_read_byte(struct mtd_info *mtd)
{
@@ -105,11 +142,12 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
}
/**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
- * @mtd: MTD device structure
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
*
- * Default read function for 16bit buswith with
- * endianess conversion
*/
static uint8_t nand_read_byte16(struct mtd_info *mtd)
{
@@ -119,10 +157,9 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd)
/**
* nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
+ * @mtd: MTD device structure
*
- * Default read function for 16bit buswith without
- * endianess conversion
+ * Default read function for 16bit buswidth without endianness conversion.
*/
static u16 nand_read_word(struct mtd_info *mtd)
{
@@ -132,8 +169,8 @@ static u16 nand_read_word(struct mtd_info *mtd)
/**
* nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chipnr: chipnumber to select, -1 for deselect
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
*
* Default select function for 1 chip devices.
*/
@@ -147,56 +184,57 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
break;
case 0:
break;
+
default:
- printf("%s: illegal chip number %d\n", __func__, chipnr);
+ BUG();
}
}
/**
- * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
*
- * Default read function for 8bit buswith
+ * Default write function for 8bit buswidth.
*/
-static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static __maybe_unused void nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
for (i = 0; i < len; i++)
- buf[i] = readb(chip->IO_ADDR_R);
+ writeb(buf[i], chip->IO_ADDR_W);
}
/**
- * nand_verify_buf - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
*
- * Default verify function for 8bit buswith
+ * Default read function for 8bit buswidth.
*/
-static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
for (i = 0; i < len; i++)
- if (buf[i] != readb(chip->IO_ADDR_R))
- return -EFAULT;
- return 0;
+ buf[i] = readb(chip->IO_ADDR_R);
}
/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
*
- * Default read function for 16bit buswith
+ * Default write function for 16bit buswidth.
*/
-static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+static __maybe_unused void nand_write_buf16(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
@@ -204,18 +242,19 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
len >>= 1;
for (i = 0; i < len; i++)
- p[i] = readw(chip->IO_ADDR_R);
+ writew(p[i], chip->IO_ADDR_W);
+
}
/**
- * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
*
- * Default verify function for 16bit buswith
+ * Default read function for 16bit buswidth.
*/
-static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
int i;
struct nand_chip *chip = mtd->priv;
@@ -223,63 +262,185 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
len >>= 1;
for (i = 0; i < len; i++)
- if (p[i] != readw(chip->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
+ p[i] = readw(chip->IO_ADDR_R);
}
/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
*
* Check, if the block is bad.
*/
static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
- int page, chipnr, res = 0;
+ int page, chipnr, res = 0, i = 0;
struct nand_chip *chip = mtd->priv;
u16 bad;
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
chipnr = (int)(ofs >> chip->chip_shift);
+ nand_get_device(mtd, FL_READING);
+
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
}
- if (chip->options & NAND_BUSWIDTH_16) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
- page);
- bad = cpu_to_le16(chip->read_word(mtd));
- if (chip->badblockpos & 0x1)
- bad >>= 8;
- if ((bad & 0xFF) != 0xff)
- res = 1;
- } else {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- if (chip->read_byte(mtd) != 0xff)
- res = 1;
+ do {
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ chip->badblockpos & 0xFE, page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ else
+ bad &= 0xFF;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+ page);
+ bad = chip->read_byte(mtd);
+ }
+
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ ofs += mtd->writesize;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+
+ if (getchip) {
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
}
return res;
}
/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. We try operations in the following order, according to our
+ * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) update in-memory BBT
+ * (3) write bad block marker to OOB area of affected block
+ * (4) update flash-based BBT
+ * Note that we retain the first error encountered in (3) or (4), finish the
+ * procedures, and dump the error in the end.
+*/
+static __maybe_unused int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ uint8_t buf[2] = { 0, 0 };
+ int block, res, ret = 0, i = 0;
+ int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
+
+ if (write_oob) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ einfo.len = 1 << chip->phys_erase_shift;
+ nand_erase_nand(mtd, &einfo, 0);
+ }
+
+ /* Get block number */
+ block = (int)(ofs >> chip->bbt_erase_shift);
+ /* Mark block bad in memory-based BBT */
+ if (chip->bbt)
+ chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* Write bad block marker to OOB */
+ if (write_oob) {
+ struct mtd_oob_ops ops;
+ loff_t wr_ofs = ofs;
+
+ nand_get_device(mtd, FL_WRITING);
+
+ ops.datbuf = NULL;
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ wr_ofs += mtd->erasesize - mtd->writesize;
+ do {
+ res = nand_do_write_oob(mtd, wr_ofs, &ops);
+ if (!ret)
+ ret = res;
+
+ i++;
+ wr_ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+ nand_release_device(mtd);
+ }
+
+ /* Update flash-based bad block table */
+ if (IS_ENABLED(CONFIG_NAND_BBT) && chip->bbt_options & NAND_BBT_USE_FLASH) {
+ res = nand_update_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
- * @allowbbt: 1, if its allowed to access the bbt area
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
-int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
int allowbbt)
{
struct nand_chip *chip = mtd->priv;
@@ -287,6 +448,7 @@ int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
#ifdef CONFIG_NAND_BBT
if (!chip->bbt)
return chip->block_bad(mtd, ofs, getchip);
+
/* Return info from the table */
return nand_isbad_bbt(mtd, ofs, allowbbt);
#else
@@ -294,34 +456,28 @@ int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
#endif
}
-/*
- * Wait for the ready pin, after a command
- * The timeout is catched later.
- */
+/* Wait for the ready pin, after a command. The timeout is caught later. */
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
uint64_t start = get_time_ns();
- led_trigger_event(nand_led_trigger, LED_FULL);
/* wait until command is processed or timeout occures */
do {
if (chip->dev_ready(mtd))
break;
} while (!is_timeout(start, SECOND * 2));
- led_trigger_event(nand_led_trigger, LED_OFF);
}
-EXPORT_SYMBOL(nand_wait_ready);
/**
* nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
*
- * Send command to NAND device. This function is used for small page
- * devices (256/512 Bytes per page)
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
*/
static void nand_command(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -329,10 +485,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
register struct nand_chip *chip = mtd->priv;
int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
+ /* Write out the command to the device */
+ if (IS_ENABLED(CONFIG_MTD_WRITE) && command == NAND_CMD_SEQIN) {
int readcmd;
if (column >= mtd->writesize) {
@@ -351,9 +505,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
}
chip->cmd_ctrl(mtd, command, ctrl);
- /*
- * Address cycle, when necessary
- */
+ /* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
/* Serially input address */
if (column != -1) {
@@ -374,8 +526,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
*/
switch (command) {
@@ -394,7 +546,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd,
NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
+ ;
return;
/* This applies to read commands */
@@ -408,8 +561,10 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
return;
}
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
ndelay(100);
nand_wait_ready(mtd);
@@ -417,14 +572,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This is the version for the new large page
- * devices We dont have the separate regions as we have in the small page
- * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
*/
static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -438,8 +593,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command & 0xff,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -466,8 +620,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/*
- * program and erase have their own busy handlers
- * status, sequential in, and deplete1 need no delay
+ * Program and erase have their own busy handlers status, sequential
+ * in, and deplete1 need no delay.
*/
switch (command) {
@@ -478,18 +632,6 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- /*
- * read error status commands require only a short delay
- */
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
@@ -500,7 +642,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
+ while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
+ ;
return;
case NAND_CMD_RNDOUT:
@@ -521,7 +664,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
default:
/*
* If we don't have access to the busy pin, we apply the given
- * command delay
+ * command delay.
*/
if (!chip->dev_ready) {
udelay(chip->chip_delay);
@@ -529,14 +672,46 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
ndelay(100);
nand_wait_ready(mtd);
}
/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct mtd_info *mtd, int new_state)
+{
+ struct nand_chip *chip = mtd->priv;
+retry:
+
+ /* Hardware controller shared among independent devices */
+ if (!chip->controller->active)
+ chip->controller->active = chip;
+
+ if (chip->controller->active == chip && chip->state == FL_READY) {
+ chip->state = new_state;
+ return 0;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ if (chip->controller->active->state == FL_PM_SUSPENDED) {
+ chip->state = FL_PM_SUSPENDED;
+ return 0;
+ }
+ }
+ goto retry;
+}
+
+/**
* nand_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
* @chip: NAND chip structure
@@ -557,16 +732,11 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
timeo = 20 * MSECOND;
- led_trigger_event(nand_led_trigger, LED_FULL);
-
/* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine. */
ndelay(100);
- if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
- chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
- else
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
while (!is_timeout(start, timeo)) {
if (chip->dev_ready) {
@@ -577,51 +747,557 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
break;
}
}
- led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
return status;
}
/**
- * nand_read_page_raw - [Intern] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
+ * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * when = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * Returs unlock status.
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* See if device thinks it succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * Returns unlock status.
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_unlock);
+
+/**
+ * nand_lock - [REPLACEABLE] locks all blocks present in the device
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
+ *
+ * Returns lock status.
*/
-static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ nand_get_device(mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* See if device thinks it succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
+}
+EXPORT_SYMBOL(nand_lock);
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static __maybe_unused int nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
- * nand_transfer_oob - [Internal] Transfer oob to client buffer
- * @chip: nand chip structure
- * @oob: oob destination address
- * @ops: oob ops structure
- * @len: size of oob to transfer
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static __maybe_unused int nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->read_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->read_buf(mtd, oob, size);
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ */
+static __maybe_unused int nand_read_page_swecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ */
+static __maybe_unused int nand_read_subpage(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t data_offs, uint32_t readlen,
+ uint8_t *bufpoi)
+{
+ int start_step, end_step, num_steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index = 0;
+ unsigned int max_bitflips = 0;
+
+ /*
+ * Currently we have no users in barebox, so disable this for now
+ */
+ return -ENOTSUPP;
+
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ if (data_col_addr != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+ p = bufpoi + data_col_addr;
+ chip->read_buf(mtd, p, datafrag_len);
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ for (i = 0; i < eccfrag_len - 1; i++) {
+ if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
+ eccpos[i + start_step * chip->ecc.bytes + 1]) {
+ gaps = 1;
+ break;
+ }
+ }
+ if (gaps) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ index = start_step * chip->ecc.bytes;
+
+ aligned_pos = eccpos[index] & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (eccpos[index] & (busw - 1))
+ aligned_len++;
+ if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
+ aligned_len++;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + aligned_pos, -1);
+ chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ }
+
+ for (i = 0; i < eccfrag_len; i++)
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p,
+ &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static __maybe_unused int nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
+ */
+static __maybe_unused int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static __maybe_unused int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+ chip->read_buf(mtd, oob, eccbytes);
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->read_buf(mtd, oob, i);
+
+ return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
*/
-#ifdef CONFIG_NAND_READ_OOB
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
- switch(ops->mode) {
+ switch (ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
return oob + len;
- case MTD_OOB_AUTO: {
+ case MTD_OPS_AUTO_OOB: {
struct nand_oobfree *free = chip->ecc.layout->oobfree;
uint32_t boffs = 0, roffs = ops->ooboffs;
size_t bytes = 0;
- for(; free->length && len; free++, len -= bytes) {
- /* Read request not from offset 0 ? */
+ for (; free->length && len; free++, len -= bytes) {
+ /* Read request not from offset 0? */
if (unlikely(roffs)) {
if (roffs >= free->length) {
roffs -= free->length;
@@ -645,29 +1321,29 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
}
return NULL;
}
-#endif
/**
- * nand_do_read_ops - [Internal] Read data with ECC
- *
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob ops structure
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int chipnr, page, realpage, col, bytes, aligned;
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
- int sndcmd = 1;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *bufpoi, *oob, *buf;
+ unsigned int max_bitflips = 0;
stats = mtd->ecc_stats;
@@ -681,60 +1357,70 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf = ops->datbuf;
oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
- while(1) {
+ while (1) {
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
- /* Is the current page in the buffer ? */
+ /* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
- if (likely(sndcmd)) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- /* Now read the page into the buffer */
- if (unlikely(ops->mode == MTD_OOB_RAW))
- ret = chip->ecc.read_page_raw(mtd, chip, bufpoi);
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
+ ret = chip->ecc.read_subpage(mtd, chip,
+ col, bytes, bufpoi);
else
- ret = chip->ecc.read_page(mtd, chip, bufpoi);
- if (ret < 0)
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ oob_required, page);
+ if (ret < 0) {
+ if (!aligned)
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
break;
+ }
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
/* Transfer not aligned data */
if (!aligned) {
- chip->pagebuf = realpage;
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - stats.failed) &&
+ (ops->mode != MTD_OPS_RAW)) {
+ chip->pagebuf = realpage;
+ chip->pagebuf_bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
memcpy(buf, chip->buffers->databuf + col, bytes);
}
buf += bytes;
-#ifdef CONFIG_NAND_READ_OOB
if (unlikely(oob)) {
- /* Raw mode does data:oob:data:oob */
- if (ops->mode != MTD_OOB_RAW) {
- int toread = min(oobreadlen,
- chip->ecc.layout->oobavail);
- if (toread) {
- oob = nand_transfer_oob(chip,
- oob, ops, toread);
- oobreadlen -= toread;
- }
- } else
- buf = nand_transfer_oob(chip,
- buf, ops, mtd->oobsize);
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
}
-#endif
- if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do
- * this before the AUTOINCR check, so no
- * problems arise if a chip which does auto
- * increment is marked as NOAUTOINCR by the
- * board driver.
- */
+
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -743,6 +1429,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagebuf_bitflips);
}
readlen -= bytes;
@@ -750,7 +1438,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (!readlen)
break;
- /* For subsequent reads align to page boundary. */
+ /* For subsequent reads align to page boundary */
col = 0;
/* Increment page address */
realpage++;
@@ -762,108 +1450,230 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
+ chip->select_chip(mtd, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
- if (ret)
+ if (ret < 0)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return 0;
+ return max_bitflips;
}
/**
- * nand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
+ * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
*
- * Get hold of the chip and call nand_do_read
+ * Get hold of the chip and call nand_do_read.
*/
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
+ struct mtd_oob_ops ops;
int ret;
- /* Do not allow reads past end of device */
- if ((from + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
+ nand_get_device(mtd, FL_READING);
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ret = nand_do_read_ops(mtd, from, &ops);
+ *retlen = ops.retlen;
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static __maybe_unused int nand_read_oob_std(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
+ return -ENOTSUPP;
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static __maybe_unused int nand_read_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = buf;
+ int i, toread, sndrnd = 0, pos;
+
+ if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
+ return -ENOTSUPP;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ else
+ chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0)
+ chip->read_buf(mtd, bufpoi, length);
+
+ return 0;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static __maybe_unused int nand_write_oob_std(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int status = 0;
+ const uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
- chip->ops.len = len;
- chip->ops.datbuf = buf;
- chip->ops.oobbuf = NULL;
+ if (!IS_ENABLED(CONFIG_NAND_READ_OOB) || !IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
- ret = nand_do_read_ops(mtd, from, &chip->ops);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, buf, length);
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- *retlen = chip->ops.retlen;
+ status = chip->waitfunc(mtd, chip);
- return ret;
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
/**
- * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
*/
-int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+static __maybe_unused int nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ if (!IS_ENABLED(CONFIG_NAND_READ_OOB) || !IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+ chip->write_buf(mtd, (uint8_t *)&fill,
+ num);
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+ chip->write_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ length -= len;
}
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ if (length > 0)
+ chip->write_buf(mtd, bufpoi, length);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
/**
- * nand_do_read_oob - [Intern] NAND read out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operations description structure
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
*
- * NAND read out-of-band data from the spare area
+ * NAND read out-of-band data from the spare area.
*/
-#ifdef CONFIG_NAND_READ_OOB
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int page, realpage, chipnr, sndcmd = 1;
+ int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+ struct mtd_ecc_stats stats;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
+ int ret = 0;
+
+ if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
+ return -ENOTSUPP;
- MTD_DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n",
- (unsigned long long)from, readlen);
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
- if (ops->mode == MTD_OOB_AUTO)
+ stats = mtd->ecc_stats;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
len = chip->ecc.layout->oobavail;
else
len = mtd->oobsize;
if (unlikely(ops->ooboffs >= len)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt to start read outside oob\n");
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
return -EINVAL;
}
@@ -871,8 +1681,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (unlikely(from >= mtd->size ||
ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
(from >> chip->page_shift)) * len)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt read beyond end of device\n");
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
@@ -883,19 +1693,20 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
- while(1) {
- sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+ while (1) {
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(mtd, chip, page);
+ else
+ ret = chip->ecc.read_oob(mtd, chip, page);
+
+ if (ret < 0)
+ break;
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
- if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do this
- * before the AUTOINCR check, so no problems arise if a
- * chip which does auto increment is marked as
- * NOAUTOINCR by the board driver.
- */
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -916,44 +1727,51 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /* Check, if the chip supports auto page increment
- * or if we have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
+ chip->select_chip(mtd, -1);
- ops->oobretlen = ops->ooblen;
- return 0;
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
/**
* nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
*
- * NAND read data and/or out-of-band data
+ * NAND read data and/or out-of-band data.
*/
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = -ENOSYS;
+ int ret = -ENOTSUPP;
+
+ if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
+ return -ENOTSUPP;
ops->retlen = 0;
/* Do not allow reads past end of device */
if (ops->datbuf && (from + ops->len) > mtd->size) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt read beyond end of device\n");
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
- switch(ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
- case MTD_OOB_RAW:
+ nand_get_device(mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
break;
default:
@@ -965,28 +1783,914 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
else
ret = nand_do_read_ops(mtd, from, ops);
- out:
+out:
+ nand_release_device(mtd);
return ret;
}
-#endif
+
/**
- * nand_block_isbad - [MTD Interface] Check if block at offset is bad
- * @mtd: MTD device structure
- * @offs: offset relative to mtd start
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+static __maybe_unused int nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
- /* Check for invalid offset */
- if (offs > mtd->size)
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ chip->write_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static __maybe_unused int nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ chip->write_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->write_buf(mtd, oob, size);
+
+ return 0;
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static __maybe_unused int nand_write_page_swecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static __maybe_unused int nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @column: column address of subpage within the page
+ * @data_len: data length
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static __maybe_unused int nand_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required)
+{
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, i;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ chip->write_buf(mtd, data_buf, ecc_size);
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(mtd, data_buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ data_buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static __maybe_unused int nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->write_buf(mtd, p, eccsize);
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(mtd, p, oob);
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->write_buf(mtd, oob, i);
+
+ return 0;
+}
+
+/**
+ * nand_write_page - [REPLACEABLE] write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
+{
+ int status, subpage;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ if (unlikely(raw))
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required);
+ else if (subpage)
+ status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+ buf, oob_required);
+ else
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
+
+ /*
+ * Cached progamming disabled for now. Not sure if it's worth the
+ * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
+ */
+ cached = 0;
+
+ if (!cached || !NAND_HAS_CACHEPROG(chip)) {
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ /*
+ * See if operation failed and additional status checks are
+ * available.
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_WRITING, status,
+ page);
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ return 0;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, woffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Write request not from offset 0? */
+ if (unlikely(woffs)) {
+ if (woffs >= free->length) {
+ woffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + woffs;
+ bytes = min_t(size_t, len,
+ (free->length - woffs));
+ woffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(chip->oob_poi + boffs, oob, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, realpage, page, blockmask, column;
+ struct nand_chip *chip = mtd->priv;
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
+ mtd->oobavail : mtd->oobsize;
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret;
+ int oob_required = oob ? 1 : 0;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ column = to & (mtd->writesize - 1);
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+ blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= (chip->pagebuf << chip->page_shift) &&
+ (chip->pagebuf << chip->page_shift) < (to + ops->len))
+ chip->pagebuf = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ while (1) {
+ int bytes = mtd->writesize;
+ int cached = writelen > bytes && page != blockmask;
+ uint8_t *wbuf = buf;
+
+ /* Partial page write? */
+ if (unlikely(column || writelen < (mtd->writesize - 1))) {
+ cached = 0;
+ bytes = min_t(int, bytes - column, (int) writelen);
+ chip->pagebuf = -1;
+ memset(chip->buffers->databuf, 0xff, mtd->writesize);
+ memcpy(&chip->buffers->databuf[column], buf, bytes);
+ wbuf = chip->buffers->databuf;
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(mtd, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+
+ if (oob || !mtd_all_ff(wbuf, mtd->writesize)) {
+ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+ oob_required, page, cached,
+ (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+ }
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC.
+ */
+static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ nand_get_device(mtd, FL_WRITING);
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ret = nand_do_write_ops(mtd, to, &ops);
+ *retlen = ops.retlen;
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, status, len;
+ struct nand_chip *chip = mtd->priv;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = chip->ecc.layout->oobavail;
+ else
+ len = mtd->oobsize;
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
return -EINVAL;
+ }
+
+ /* Do not allow write past end of device */
+ if (unlikely(to >= mtd->size ||
+ ops->ooboffs + ops->ooblen >
+ ((mtd->size >> chip->page_shift) -
+ (to >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+
+ chip->select_chip(mtd, -1);
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(mtd, FL_WRITING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(mtd, to, ops);
+ else
+ ret = nand_do_write_ops(mtd, to, ops);
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * single_erase_cmd - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips.
+ */
+static void single_erase_cmd(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return;
+
+ /* Send commands to erase a block */
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ return nand_erase_nand(mtd, instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, status, pages_per_block, ret, chipnr;
+ struct nand_chip *chip = mtd->priv;
+ loff_t len;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(mtd, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(mtd, FL_ERASING);
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (!mtd->allow_erasebad &&
+ nand_block_checkbad(mtd, ((loff_t) page) <<
+ chip->page_shift, 0, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagebuf && chip->pagebuf <
+ (page + pages_per_block))
+ chip->pagebuf = -1;
+
+ chip->erase_cmd(mtd, page & chip->pagemask);
+
+ status = chip->waitfunc(mtd, chip);
+
+ /*
+ * See if operation failed and additional status checks are
+ * available
+ */
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+ status = chip->errstat(mtd, chip, FL_ERASING,
+ status, page);
+
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1 << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ /* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(mtd, FL_SYNCING);
+ /* Release it and go back */
+ nand_release_device(mtd);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
return nand_block_checkbad(mtd, offs, 1, 0);
}
-/*
- * Set default functions
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
*/
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ return -ENOTSUPP;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return chip->block_markbad(mtd, ofs);
+}
+
+/**
+ * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int status;
+
+ if (!chip->onfi_version)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+ chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ return 0;
+}
+
+/**
+ * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ if (!chip->onfi_version)
+ return -EINVAL;
+
+ /* clear the sub feature parameters */
+ memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
+ chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ return 0;
+}
+
+/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip, int busw)
{
/* check for proper chip_delay setup, set 20us if not */
@@ -1003,7 +2707,7 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
if (!chip->select_chip)
chip->select_chip = nand_select_chip;
- if (!chip->read_byte || chip->read_byte == nand_read_byte)
+ if (!chip->read_byte)
chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
if (!chip->read_word)
chip->read_word = nand_read_word;
@@ -1012,13 +2716,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
#ifdef CONFIG_MTD_WRITE
if (!chip->block_markbad)
chip->block_markbad = nand_default_block_markbad;
- if (!chip->write_buf || chip->write_buf == nand_write_buf)
+ if (!chip->write_buf)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
#endif
- if (!chip->read_buf || chip->read_buf == nand_read_buf)
+ if (!chip->read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
- if (!chip->verify_buf || chip->verify_buf == nand_verify_buf)
- chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
#ifdef CONFIG_NAND_BBT
if (!chip->scan_bbt)
chip->scan_bbt = nand_default_bbt;
@@ -1029,23 +2731,21 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
}
-/*
- * sanitize ONFI strings so we can safely print them
- */
-static void sanitize_string(char *s, size_t len)
+/* Sanitize ONFI strings so we can safely print them */
+static void sanitize_string(uint8_t *s, size_t len)
{
ssize_t i;
- /* null terminate */
+ /* Null terminate */
s[len - 1] = 0;
- /* remove non printable chars */
+ /* Remove non printable chars */
for (i = 0; i < len - 1; i++) {
if (s[i] < ' ' || s[i] > 127)
s[i] = '?';
}
- /* remove trailing spaces */
+ /* Remove trailing spaces */
strim(s);
}
@@ -1062,7 +2762,7 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
}
/*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int *busw)
@@ -1071,7 +2771,12 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
- /* try ONFI for unknow chip or LP */
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
+ /* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
@@ -1090,7 +2795,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
if (i == 3)
return 0;
- /* check version */
+ /* Check version */
val = le16_to_cpu(p->revision);
if (val & (1 << 5))
chip->onfi_version = 23;
@@ -1102,11 +2807,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->onfi_version = 20;
else if (val & (1 << 1))
chip->onfi_version = 10;
- else
- chip->onfi_version = 0;
if (!chip->onfi_version) {
- pr_info("unsupported ONFI version: %d\n", val);
+ pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
return 0;
}
@@ -1123,10 +2826,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
if (le16_to_cpu(p->features) & 1)
*busw = NAND_BUSWIDTH_16;
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
-
- pr_info("ONFI flash detected ...\n");
+ pr_info("ONFI flash detected\n");
return 1;
}
@@ -1371,15 +3071,40 @@ static void nand_decode_bbm_options(struct mtd_info *mtd,
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
}
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ mtd->writesize = type->pagesize;
+ mtd->erasesize = type->erasesize;
+ mtd->oobsize = type->oobsize;
+
+ chip->cellinfo = id_data[2];
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+ chip->options |= type->options;
+
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ return true;
+ }
+ return false;
+}
+
/*
- * Get the flash and manufacturer id and lookup if the type is supported
+ * Get the flash and manufacturer id and lookup if the type is supported.
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw, int *maf_id)
+ int busw,
+ int *maf_id, int *dev_id,
+ struct nand_flash_dev *type)
{
- struct nand_flash_dev *type = NULL;
- int i, dev_id, maf_idx;
+ int i, maf_idx;
u8 id_data[8];
/* Select the device */
@@ -1387,7 +3112,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
- * after power-up
+ * after power-up.
*/
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
@@ -1396,9 +3121,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Read manufacturer and device IDs */
*maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ *dev_id = chip->read_byte(mtd);
- /* Try again to make sure, as some systems the bus-hold or other
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
* interface concerns can cause random data which looks like a
* possibly credible NAND flash to appear. If the two results do
* not match, ignore the device completely.
@@ -1410,19 +3136,24 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
for (i = 0; i < 8; i++)
id_data[i] = chip->read_byte(mtd);
- if (id_data[0] != *maf_id || id_data[1] != dev_id) {
- pr_err("%s: second ID read did not match "
- "%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, dev_id, id_data[0], id_data[1]);
+ if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
+ pr_info("%s: second ID read did not match "
+ "%02x,%02x against %02x,%02x\n", __func__,
+ *maf_id, *dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
if (!type)
type = nand_flash_ids;
- for (; type->name != NULL; type++)
- if (dev_id == type->id)
- break;
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+ goto ident_done;
+ } else if (*dev_id == type->dev_id) {
+ break;
+ }
+ }
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
@@ -1439,7 +3170,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chipsize = (uint64_t)type->chipsize << 20;
- if (!type->pagesize) {
+ if (!type->pagesize && chip->init_size) {
+ /* Set the pagesize, oobsize, erasesize by the driver */
+ busw = chip->init_size(mtd, chip, id_data);
+ } else if (!type->pagesize) {
/* Decode parameters from extended ID */
nand_decode_ext_id(mtd, chip, id_data, &busw);
} else {
@@ -1455,10 +3189,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
ident_done:
- /*
- * Set chip as a default. Board drivers can override it, if necessary
- */
- chip->options |= NAND_NO_AUTOINCR;
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
@@ -1467,21 +3197,20 @@ ident_done:
}
if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
chip->options |= busw;
nand_set_defaults(chip, busw);
- if (chip->set_buswidth)
- chip->set_buswidth(mtd, chip, busw);
- } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
/*
* Check, if buswidth is correct. Hardware drivers should set
- * chip correct !
+ * chip correct!
*/
pr_info("NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
- pr_warning("NAND bus width %d instead %d bit\n",
- (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
- busw ? 16 : 8);
+ *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("NAND bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
return ERR_PTR(-EINVAL);
}
@@ -1489,7 +3218,7 @@ ident_done:
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
- /* Convert chipsize to number of pages per chip -1. */
+ /* Convert chipsize to number of pages per chip -1 */
chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -1501,74 +3230,72 @@ ident_done:
chip->chip_shift += 32 - 1;
}
- /* Set the bad block position */
- chip->badblockpos = mtd->writesize > 512 ?
- NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+ chip->badblockbits = 8;
+ chip->erase_cmd = single_erase_cmd;
-#ifdef CONFIG_MTD_WRITE
- /* Check for AND chips with 4 page planes */
- if (chip->options & NAND_4PAGE_ARRAY)
- chip->erase_cmd = multi_erase_cmd;
- else
- chip->erase_cmd = single_erase_cmd;
-#endif
- /* Do not replace user supplied command function ! */
+ /* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_notice("Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " page size: %d, OOB size: %d\n",
- *maf_id, dev_id, nand_manuf_ids[maf_idx].name,
+ pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
+ " %dMiB, page size: %d, OOB size: %d\n",
+ *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name,
- mtd->writesize, mtd->oobsize);
+ (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
return type;
}
/**
* nand_scan_ident - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
*
- * This is the first phase of the normal nand_scan() function. It
- * reads the flash ID and sets up MTD fields accordingly.
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips)
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
- int i, busw, nand_maf_id;
+ int i, busw, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd->priv;
struct nand_flash_dev *type;
- if (chip->options & NAND_BUSWIDTH_AUTO && !chip->set_buswidth) {
- pr_err("buswidth detection but no buswidth callback\n");
- return -EINVAL;
- }
-
/* Get buswidth to select the correct functions */
busw = chip->options & NAND_BUSWIDTH_16;
/* Set the default functions */
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ type = nand_get_flash_type(mtd, chip, busw,
+ &nand_maf_id, &nand_dev_id, table);
if (IS_ERR(type)) {
- pr_warning("No NAND device found (%ld)!\n", PTR_ERR(type));
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
chip->select_chip(mtd, -1);
return PTR_ERR(type);
}
+ chip->select_chip(mtd, -1);
+
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
chip->select_chip(mtd, i);
+ /* See comment in nand_get_flash_type for reset */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- type->id != chip->read_byte(mtd))
+ nand_dev_id != chip->read_byte(mtd)) {
+ chip->select_chip(mtd, -1);
break;
+ }
+ chip->select_chip(mtd, -1);
}
if (i > 1)
pr_info("%d NAND chips detected\n", i);
@@ -1579,39 +3306,26 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
return 0;
}
+EXPORT_SYMBOL(nand_scan_ident);
-static void __maybe_unused nand_check_hwecc(struct mtd_info *mtd, struct nand_chip *chip)
-{
- if ((!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) &&
- (!chip->ecc.read_page || !chip->ecc.write_page)) {
- pr_warning("No ECC functions supplied, "
- "Hardware ECC not possible\n");
- BUG();
- }
-
- if (mtd->writesize < chip->ecc.size) {
- pr_warning("%d byte HW ECC not possible on "
- "%d byte page size\n",
- chip->ecc.size, mtd->writesize);
- BUG();
- }
-}
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
+ * @mtd: MTD device structure
*
- * This is the second phase of the normal nand_scan() function. It
- * fills out all the uninitialized function pointers with the defaults
- * and scans for a bad block table if appropriate.
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
*/
int nand_scan_tail(struct mtd_info *mtd)
{
int i;
struct nand_chip *chip = mtd->priv;
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH));
+
if (!(chip->options & NAND_OWN_BUFFERS))
chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
if (!chip->buffers)
@@ -1621,9 +3335,9 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->oob_poi = chip->buffers->databuf + mtd->writesize;
/*
- * If no default placement scheme is given, select an appropriate one
+ * If no default placement scheme is given, select an appropriate one.
*/
- if (!chip->ecc.layout) {
+ if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
switch (mtd->oobsize) {
case 8:
chip->ecc.layout = &nand_oob_8;
@@ -1634,99 +3348,208 @@ int nand_scan_tail(struct mtd_info *mtd)
case 64:
chip->ecc.layout = &nand_oob_64;
break;
+ case 128:
+ chip->ecc.layout = &nand_oob_128;
+ break;
default:
- pr_warning("No oob scheme defined for "
- "oobsize %d\n", mtd->oobsize);
+ pr_warn("No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
BUG();
}
}
-#ifdef CONFIG_MTD_WRITE
if (!chip->write_page)
chip->write_page = nand_write_page;
-#endif
+
+ /* set for ONFI nand */
+ if (!chip->onfi_set_features)
+ chip->onfi_set_features = nand_onfi_set_features;
+ if (!chip->onfi_get_features)
+ chip->onfi_get_features = nand_onfi_get_features;
/*
- * check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
*/
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw;
-#ifdef CONFIG_MTD_WRITE
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw;
-#endif
+
switch (chip->ecc.mode) {
+#ifdef CONFIG_NAND_ECC_HW_OOB_FIRST
+ case NAND_ECC_HW_OOB_FIRST:
+ /* Similar to NAND_ECC_HW, but a separate read_page handle */
+ if (!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) {
+ pr_warn("No ECC functions supplied; "
+ "hardware ECC not possible\n");
+ BUG();
+ }
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_hwecc;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_std;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_std;
+ if (!chip->ecc.read_subpage)
+ chip->ecc.read_subpage = nand_read_subpage;
+ if (!chip->ecc.write_subpage)
+ chip->ecc.write_subpage = nand_write_subpage_hwecc;
+ break;
+#endif
#ifdef CONFIG_NAND_ECC_HW
case NAND_ECC_HW:
- nand_check_hwecc(mtd, chip);
- nand_init_ecc_hw(chip);
+ /* Use standard hwecc read page function? */
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_hwecc;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_std;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_std;
+ if (!chip->ecc.read_subpage)
+ chip->ecc.read_subpage = nand_read_subpage;
+ if (!chip->ecc.write_subpage)
+ chip->ecc.write_subpage = nand_write_subpage_hwecc;
break;
#endif
#ifdef CONFIG_NAND_ECC_HW_SYNDROME
case NAND_ECC_HW_SYNDROME:
- nand_check_hwecc(mtd, chip);
- nand_init_ecc_hw_syndrome(chip);
+ /* Use standard syndrome read/write page function? */
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_syndrome;
+ if (!chip->ecc.write_page)
+ chip->ecc.write_page = nand_write_page_syndrome;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
+ if (!chip->ecc.read_oob)
+ chip->ecc.read_oob = nand_read_oob_syndrome;
+ if (!chip->ecc.write_oob)
+ chip->ecc.write_oob = nand_write_oob_syndrome;
break;
#endif
#ifdef CONFIG_NAND_ECC_SOFT
case NAND_ECC_SOFT:
- nand_init_ecc_soft(chip);
+ chip->ecc.calculate = nand_calculate_ecc;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
+ chip->ecc.write_page = nand_write_page_swecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.write_oob = nand_write_oob_std;
+ if (!chip->ecc.size)
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
+ break;
+#endif
+#ifdef CONFIG_NAND_ECC_BCH
+ case NAND_ECC_SOFT_BCH:
+ if (!mtd_nand_has_bch()) {
+ pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
+ BUG();
+ }
+ chip->ecc.calculate = nand_bch_calculate_ecc;
+ chip->ecc.correct = nand_bch_correct_data;
+ chip->ecc.read_page = nand_read_page_swecc;
+ chip->ecc.read_subpage = nand_read_subpage;
+ chip->ecc.write_page = nand_write_page_swecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.write_oob = nand_write_oob_std;
+ /*
+ * Board driver should supply ecc.size and ecc.bytes values to
+ * select how many bits are correctable; see nand_bch_init()
+ * for details. Otherwise, default to 4 bits for large page
+ * devices.
+ */
+ if (!chip->ecc.size && (mtd->oobsize >= 64)) {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 7;
+ }
+ chip->ecc.priv = nand_bch_init(mtd,
+ chip->ecc.size,
+ chip->ecc.bytes,
+ &chip->ecc.layout);
+ if (!chip->ecc.priv) {
+ pr_warn("BCH ECC initialization failed!\n");
+ BUG();
+ }
+ chip->ecc.strength =
+ chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
break;
#endif
#ifdef CONFIG_NAND_ECC_NONE
case NAND_ECC_NONE:
- pr_warning("NAND_ECC_NONE selected by board driver. "
- "This is not recommended !!\n");
+ pr_warn("NAND_ECC_NONE selected by board driver. "
+ "This is not recommended!\n");
chip->ecc.read_page = nand_read_page_raw;
-#ifdef CONFIG_MTD_WRITE
chip->ecc.write_page = nand_write_page_raw;
- chip->ecc.write_oob = nand_write_oob_std;
-#endif
chip->ecc.read_oob = nand_read_oob_std;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ chip->ecc.write_oob = nand_write_oob_std;
chip->ecc.size = mtd->writesize;
chip->ecc.bytes = 0;
+ chip->ecc.strength = 0;
break;
#endif
default:
- pr_warning("Invalid NAND_ECC_MODE %d\n",
- chip->ecc.mode);
+ pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
BUG();
}
+ /* For many systems, the standard OOB write also works for raw */
+ if (!chip->ecc.read_oob_raw)
+ chip->ecc.read_oob_raw = chip->ecc.read_oob;
+ if (!chip->ecc.write_oob_raw)
+ chip->ecc.write_oob_raw = chip->ecc.write_oob;
+
/*
* The number of bytes available for a client to place data into
- * the out of band area
+ * the out of band area.
*/
chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
/*
* Set the number of read / write steps for one page depending on ECC
- * mode
+ * mode.
*/
chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if(chip->ecc.steps * chip->ecc.size != mtd->writesize) {
- pr_warning("Invalid ecc parameters\n");
+ if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
+ pr_warn("Invalid ECC parameters\n");
BUG();
}
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- /*
- * Allow subpage writes up to ecc.steps. Not possible for MLC
- * FLASH.
- */
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
!(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
- switch(chip->ecc.steps) {
+ switch (chip->ecc.steps) {
case 2:
mtd->subpage_sft = 1;
break;
case 4:
case 8:
+ case 16:
mtd->subpage_sft = 2;
break;
}
@@ -1736,87 +3559,94 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Initialize state */
chip->state = FL_READY;
- /* De-select the device */
- chip->select_chip(mtd, -1);
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
+ chip->options |= NAND_SUBPAGE_READ;
+
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
-#ifdef CONFIG_MTD_WRITE
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
mtd->erase = nand_erase;
- mtd->write = nand_write;
- mtd->write_oob = nand_write_oob;
-#endif
mtd->read = nand_read;
-#ifdef CONFIG_NAND_READ_OOB
+ mtd->write = nand_write;
mtd->read_oob = nand_read_oob;
-#endif
+ mtd->write_oob = nand_write_oob;
+ mtd->sync = nand_sync;
mtd->lock = NULL;
mtd->unlock = NULL;
mtd->block_isbad = nand_block_isbad;
-#ifdef CONFIG_MTD_WRITE
mtd->block_markbad = nand_block_markbad;
-#endif
- /* propagate ecc.layout to mtd_info */
+ mtd->writebufsize = mtd->writesize;
+
+ /* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
+ mtd->ecc_strength = chip->ecc.strength;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = mtd->ecc_strength;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
return 0;
-#ifdef CONFIG_NAND_BBT
+
/* Build bad block table */
return chip->scan_bbt(mtd);
-#else
- return 0;
-#endif
}
+EXPORT_SYMBOL(nand_scan_tail);
/**
* nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: Number of chips to scan for
- *
- * This fills out all the uninitialized function pointers
- * with the defaults.
- * The flash ID is read and the mtd/chip structures are
- * filled with the appropriate values.
- * The mtd->owner field must be set to the module of the caller
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
*
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- ret = nand_scan_ident(mtd, maxchips);
+ ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
}
+EXPORT_SYMBOL(nand_scan);
/**
* nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
-*/
+ * @mtd: MTD device structure
+ */
void nand_release(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* Deregister the device */
+ if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+ nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+
del_mtd_device(mtd);
/* Free bad block table memory */
kfree(chip->bbt);
if (!(chip->options & NAND_OWN_BUFFERS))
kfree(chip->buffers);
-}
-EXPORT_SYMBOL(nand_scan);
-EXPORT_SYMBOL(nand_scan_ident);
-EXPORT_SYMBOL(nand_scan_tail);
-EXPORT_SYMBOL(nand_release);
+ /* Free bad block descriptor memory */
+ if (chip->badblock_pattern && chip->badblock_pattern->options
+ & NAND_BBT_DYNAMICSTRUCT)
+ kfree(chip->badblock_pattern);
+}
+EXPORT_SYMBOL_GPL(nand_release);
static int mtd_set_erasebad(struct param_d *param, void *priv)
{
@@ -1869,5 +3699,3 @@ int add_mtd_nand_device(struct mtd_info *mtd, char *devname)
return ret;
}
-
-#endif /* DOXYGEN_SHOULD_SKIP_THIS */
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index ba51e0bfe0..b0548a3e96 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -4,9 +4,7 @@
* Overview:
* Bad block table support for the NAND driver
*
- * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
- *
- * $Id: nand_bbt.c,v 1.36 2005/11/07 11:14:30 gleixner Exp $
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,28 +13,37 @@
* Description:
*
* When nand_scan_bbt is called, then it tries to find the bad block table
- * depending on the options in the bbt descriptor(s). If a bbt is found
- * then the contents are read and the memory based bbt is created. If a
- * mirrored bbt is selected then the mirror is searched too and the
- * versions are compared. If the mirror has a greater version number
- * than the mirror bbt is used to build the memory based bbt.
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
* If the tables are not versioned, then we "or" the bad block information.
- * If one of the bbt's is out of date or does not exist it is (re)created.
- * If no bbt exists at all then the device is scanned for factory marked
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
* good / bad blocks and the bad block tables are created.
*
- * For manufacturer created bbts like the one found on M-SYS DOC devices
- * the bbt is searched and read but never created
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
*
- * The autogenerated bad block table is located in the last good blocks
+ * The auto generated bad block table is located in the last good blocks
* of the device. The table is mirrored, so it can be updated eventually.
- * The table is marked in the oob area with an ident pattern and a version
- * number which indicates which of both tables is more up to date.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
*
* The table uses 2 bits per block
- * 11b: block is good
- * 00b: block is factory marked bad
- * 01b, 10b: block is marked bad due to wear
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
*
* The memory bad block table uses the following scheme:
* 00b: block is good
@@ -52,8 +59,6 @@
*
*/
-#define pr_fmt(fmt) "nand: " fmt
-
#include <common.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
@@ -64,109 +69,139 @@
#include <errno.h>
#include <malloc.h>
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
/**
* check_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @len: the length of buffer to search
- * @paglen: the pagelength
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers.
- * If the SCAN_EMPTY option is set then check, if all bytes except the
- * pattern area contain 0xff
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
*
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
+ * all bytes except the pattern area contain 0xff.
+ */
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
- int i, end = 0;
+ int end = 0;
uint8_t *p = buf;
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
end = paglen + td->offs;
- if (td->options & NAND_BBT_SCANEMPTY) {
- for (i = 0; i < end; i++) {
- if (p[i] != 0xff)
- return -1;
- }
- }
+ if (td->options & NAND_BBT_SCANEMPTY)
+ if (memchr_inv(p, 0xff, end))
+ return -1;
p += end;
/* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[i] != td->pattern[i])
- return -1;
- }
+ if (memcmp(p, td->pattern, td->len))
+ return -1;
if (td->options & NAND_BBT_SCANEMPTY) {
p += td->len;
end += td->len;
- for (i = end; i < len; i++) {
- if (*p++ != 0xff)
- return -1;
- }
+ if (memchr_inv(p, 0xff, len - end))
+ return -1;
}
return 0;
}
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers. Same as check_pattern, but
- * no optional empty check
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
*
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
{
- int i;
- uint8_t *p = buf;
-
/* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[td->offs + i] != td->pattern[i])
- return -1;
- }
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
return 0;
}
/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @page: the starting page
- * @num: the number of bbt descriptors to read
- * @bits: number of bits per block
- * @offs: offset in the memory table
- * @reserved_block_code: Pattern to identify reserved blocks
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
*
* Read the bad block table starting from page.
- *
*/
static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
- int bits, int offs, int reserved_block_code)
+ struct nand_bbt_descr *td, int offs)
{
- int res, i, j, act = 0;
+ int res, ret = 0, i, j, act = 0;
struct nand_chip *this = mtd->priv;
size_t retlen, len, totlen;
loff_t from;
- uint8_t msk = (uint8_t) ((1 << bits) - 1);
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
totlen = (num * bits) >> 3;
- from = ((loff_t) page) << this->page_shift;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) << this->page_shift;
while (totlen) {
- len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
- if (retlen != len) {
- pr_info("nand_bbt: Error reading bad block table\n");
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at "
+ "0x%012llx\n", from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
return res;
}
- pr_warning("nand_bbt: ECC error while reading bad block table\n");
}
/* Analyse data */
@@ -177,17 +212,15 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
- pr_debug("nand_read_bbt: Reserved block at 0x%08x\n",
- ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
mtd->ecc_stats.bbtblocks++;
continue;
}
- /* Leave it for now, if its matured we can move this
- * message to MTD_DEBUG_LEVEL0 */
- pr_debug("nand_read_bbt: Bad block at 0x%08x\n",
- ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
- /* Factory marked bad or worn out ? */
+ pr_debug("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
if (tmp == 0)
this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
else
@@ -198,143 +231,190 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
totlen -= len;
from += len;
}
- return 0;
+ return ret;
}
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @chip: read the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
*
- * Read the bad block table for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
-*/
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
{
struct nand_chip *this = mtd->priv;
int res = 0, i;
- int bits;
- bits = td->options & NAND_BBT_NRBITS_MSK;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < this->numchips; i++) {
if (chip == -1 || chip == i)
- res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
+ res = read_bbt(mtd, buf, td->pages[i],
+ this->chipsize >> this->bbt_erase_shift,
+ td, offs);
if (res)
return res;
offs += this->chipsize >> (this->bbt_erase_shift + 2);
}
} else {
- res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code);
+ res = read_bbt(mtd, buf, td->pages[0],
+ mtd->size >> this->bbt_erase_shift, td, 0);
if (res)
return res;
}
return 0;
}
-/*
- * Scan read raw data from flash
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
*/
-static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
+ int res, ret = 0;
- ops.mode = MTD_OOB_RAW;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
- ops.oobbuf = buf;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ res = mtd_read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return ret;
}
-/*
- * Scan write data with oob to flash
- */
-#ifdef CONFIG_MTD_WRITE
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(mtd, buf, offs, td);
+ else
+ return scan_read_oob(mtd, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_oob_ops ops;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.datbuf = buf;
ops.oobbuf = oob;
ops.len = len;
- return mtd->write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
}
-#endif
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- *
- * Read the bad block table(s) for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
*
-*/
-static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
struct nand_chip *this = mtd->priv;
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read_raw(mtd, buf, td->pages[0] << this->page_shift,
- mtd->writesize);
- td->version[0] = buf[mtd->writesize + td->veroffs];
- pr_debug("Bad block table at page %d, version 0x%02X\n",
- td->pages[0], td->version[0]);
+ scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read_raw(mtd, buf, md->pages[0] << this->page_shift,
- mtd->writesize);
- md->version[0] = buf[mtd->writesize + md->veroffs];
- pr_debug("Bad block table at page %d, version 0x%02X\n",
- md->pages[0], md->version[0]);
+ scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
}
- return 1;
}
-/*
- * Scan a given block full
- */
+/* Scan a given block full */
static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, size_t readlen,
- int scanlen, int len)
+ int scanlen, int numpages)
{
int ret, j;
- ret = scan_read_raw(mtd, buf, offs, readlen);
- if (ret)
+ ret = scan_read_oob(mtd, buf, offs, readlen);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
- for (j = 0; j < len; j++, buf += scanlen) {
+ for (j = 0; j < numpages; j++, buf += scanlen) {
if (check_pattern(buf, scanlen, mtd->writesize, bd))
return 1;
}
return 0;
}
-/*
- * Scan a given block partially
- */
+/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, int len)
+ loff_t offs, uint8_t *buf, int numpages)
{
struct mtd_oob_ops ops;
int j, ret;
@@ -343,16 +423,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
ops.oobbuf = buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
- for (j = 0; j < len; j++) {
+ for (j = 0; j < numpages; j++) {
/*
- * Read the full oob until read_oob is fixed to
- * handle single byte reads for 16 bit
- * buswidth
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
*/
- ret = mtd->read_oob(mtd, offs, &ops);
- if (ret)
+ ret = mtd_read_oob(mtd, offs, &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
if (check_short_pattern(buf, bd))
@@ -365,20 +445,20 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- * @chip: create the table for a specific chip, -1 read all chips.
- * Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
*
- * Create a bad block table by scanning the device
- * for the given good/bad block identify pattern
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
struct nand_chip *this = mtd->priv;
- int i, numblocks, len, scanlen;
+ int i, numblocks, numpages, scanlen;
int startblock;
loff_t from;
size_t readlen;
@@ -386,13 +466,11 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
pr_info("Scanning device for bad blocks\n");
if (bd->options & NAND_BBT_SCANALLPAGES)
- len = 1 << (this->bbt_erase_shift - this->page_shift);
- else {
- if (bd->options & NAND_BBT_SCAN2NDPAGE)
- len = 2;
- else
- len = 1;
- }
+ numpages = 1 << (this->bbt_erase_shift - this->page_shift);
+ else if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ numpages = 2;
+ else
+ numpages = 1;
if (!(bd->options & NAND_BBT_SCANEMPTY)) {
/* We need only read few bytes from the OOB area */
@@ -401,43 +479,50 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
} else {
/* Full page content should be read */
scanlen = mtd->writesize + mtd->oobsize;
- readlen = len * mtd->writesize;
+ readlen = numpages * mtd->writesize;
}
if (chip == -1) {
- /* Note that numblocks is 2 * (real numblocks) here, see i+=2
- * below as it makes shifting and masking less painful */
+ /*
+ * Note that numblocks is 2 * (real numblocks) here, see i+=2
+ * below as it makes shifting and masking less painful
+ */
numblocks = mtd->size >> (this->bbt_erase_shift - 1);
startblock = 0;
from = 0;
} else {
if (chip >= this->numchips) {
- pr_warning("create_bbt(): chipnr (%d) > available chips (%d)\n",
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
chip + 1, this->numchips);
return -EINVAL;
}
numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
startblock = chip * numblocks;
numblocks += startblock;
- from = startblock << (this->bbt_erase_shift - 1);
+ from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * numpages);
+
for (i = startblock; i < numblocks;) {
int ret;
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
if (bd->options & NAND_BBT_SCANALLPAGES)
ret = scan_block_full(mtd, bd, from, buf, readlen,
- scanlen, len);
+ scanlen, numpages);
else
- ret = scan_block_fast(mtd, bd, from, buf, len);
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
if (ret < 0)
return ret;
if (ret) {
this->bbt[i >> 3] |= 0x03 << (i & 0x6);
- pr_warning("Bad eraseblock %d at 0x%08x\n",
- i >> 1, (unsigned int)from);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i >> 1, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
@@ -449,20 +534,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
*
- * Read the bad block table by searching for a given ident pattern.
- * Search is preformed either from the beginning up or from the end of
- * the device downwards. The search starts always at the start of a
- * block.
- * If the option NAND_BBT_PERCHIP is given, each chip is searched
- * for a bbt, which contains the bad block information of this chip.
- * This is necessary to provide support for certain DOC devices.
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
*
- * The bbt ident pattern resides in the oob area of the first page
- * in a block.
+ * The bbt ident pattern resides in the oob area of the first page in a block.
*/
static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
{
@@ -473,7 +556,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int bbtblocks;
int blocktopage = this->bbt_erase_shift - this->page_shift;
- /* Search direction top -> down ? */
+ /* Search direction top -> down? */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = (mtd->size >> this->bbt_erase_shift) - 1;
dir = -1;
@@ -482,7 +565,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
dir = 1;
}
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -503,14 +586,15 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
for (block = 0; block < td->maxblocks; block++) {
int actblock = startblock + dir * block;
- loff_t offs = actblock << this->bbt_erase_shift;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
- scan_read_raw(mtd, buf, offs, mtd->writesize);
+ scan_read(mtd, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
- td->version[i] = buf[mtd->writesize + td->veroffs];
+ offs = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs];
}
break;
}
@@ -520,24 +604,26 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
if (td->pages[i] == -1)
- pr_warning("Bad block table not found for chip %d\n", i);
+ pr_warn("Bad block table not found for chip %d\n", i);
else
- pr_debug("Bad block table found at page %d, version 0x%02X\n", td->pages[i],
- td->version[i]);
+ pr_info("Bad block table found at page %d, version "
+ "0x%02X\n", td->pages[i], td->version[i]);
}
return 0;
}
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
*
- * Search and read the bad block table(s)
-*/
-static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
{
/* Search the primary table */
search_bbt(mtd, buf, td);
@@ -545,24 +631,18 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
/* Search the mirror table */
if (md)
search_bbt(mtd, buf, md);
-
- /* Force result check */
- return 1;
}
/**
* write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
*
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- * @chipsel: selector for a specific chip, -1 for all
- *
- * (Re)write the bad block table
- *
-*/
-#ifdef CONFIG_MTD_WRITE
+ * (Re)write the bad block table.
+ */
static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
@@ -581,14 +661,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ops.mode = MTD_OPS_PLACE_OOB;
if (!rcode)
rcode = 0xff;
- /* Write bad block table per chip rather than per device ? */
+ /* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
- /* Full device write or specific chip ? */
+ /* Full device write or specific chip? */
if (chipsel == -1) {
nrchips = this->numchips;
} else {
@@ -602,8 +682,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Loop through the chips */
for (; chip < nrchips; chip++) {
-
- /* There was already a version of the table, reuse the page
+ /*
+ * There was already a version of the table, reuse the page
* This applies for absolute placement too, as we have the
* page nr. in td->pages.
*/
@@ -612,8 +692,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
goto write;
}
- /* Automatic placement of the bad block table */
- /* Search direction top -> down ? */
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = numblocks * (chip + 1) - 1;
dir = -1;
@@ -662,29 +744,27 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
bbtoffs = chip * (numblocks >> 2);
- to = ((loff_t) page) << this->page_shift;
+ to = ((loff_t)page) << this->page_shift;
- /* Must we save the block contents ? */
+ /* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
- to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+ to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
- pr_info("nand_bbt: Error "
- "reading block for writing "
- "the bad block table\n");
+ pr_info("nand_bbt: error reading block "
+ "for writing the bad block table\n");
return res;
}
- pr_warning("nand_bbt: ECC error "
- "while reading block for writing "
- "bad block table\n");
+ pr_warn("nand_bbt: ECC error while reading "
+ "block for writing bad block table\n");
}
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
- res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
@@ -692,15 +772,29 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
pageoffs = page - (int)(to >> this->page_shift);
offs = pageoffs << this->page_shift;
/* Preset the bbt area with 0xff */
- memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
} else {
/* Calc length */
- len = (size_t) (numblocks >> sft);
- /* Make it page aligned ! */
- len = (len + (mtd->writesize - 1)) &
- ~(mtd->writesize - 1);
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
(len >> this->page_shift)* mtd->oobsize);
@@ -713,13 +807,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
if (td->options & NAND_BBT_VERSION)
buf[ooboffs + td->veroffs] = td->version[chip];
- /* walk through the memory table */
+ /* Walk through the memory table */
for (i = 0; i < numblocks;) {
uint8_t dat;
dat = this->bbt[bbtoffs + (i >> 2)];
for (j = 0; j < 4; j++, i++) {
int sftcnt = (i << (3 - sft)) & sftmsk;
- /* Do not store the reserved bbt blocks ! */
+ /* Do not store the reserved bbt blocks! */
buf[offs + (i >> sft)] &=
~(msk[dat & 0x03] << sftcnt);
dat >>= 2;
@@ -728,18 +822,20 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
memset(&einfo, 0, sizeof(einfo));
einfo.mtd = mtd;
- einfo.addr = (unsigned long)to;
+ einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
res = nand_erase_nand(mtd, &einfo, 1);
if (res < 0)
goto outerr;
- res = scan_write_bbt(mtd, to, len, buf, &buf[len]);
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
if (res < 0)
goto outerr;
- pr_debug("Bad block table written to 0x%08x, version "
- "0x%02X\n", (unsigned int)to, td->version[chip]);
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
/* Mark it as used */
td->pages[chip] = page;
@@ -747,27 +843,18 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
return 0;
outerr:
- pr_warning(
- "nand_bbt: Error while writing bad block table %d\n", res);
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
return res;
}
-#else
-static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *td, struct nand_bbt_descr *md,
- int chipsel)
-{
- return 0;
-}
-#endif
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
- * The function creates a memory based bbt by scanning the device
- * for manufacturer / software marked good / bad blocks
-*/
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
@@ -778,25 +865,24 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
*
- * The function checks the results of the previous call to read_bbt
- * and creates / updates the bbt(s) if necessary
- * Creation is necessary if no bbt was found for the chip/device
- * Update is necessary if one of the tables is missing or the
- * version nr. of one table is less than the other
-*/
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
{
- int i, chips, writeops, chipsel, res;
+ int i, chips, writeops, create, chipsel, res, res2;
struct nand_chip *this = mtd->priv;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
chips = this->numchips;
else
@@ -804,85 +890,98 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
for (i = 0; i < chips; i++) {
writeops = 0;
+ create = 0;
rd = NULL;
rd2 = NULL;
- /* Per chip or per device ? */
+ res = res2 = 0;
+ /* Per chip or per device? */
chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
- /* Mirrored table avilable ? */
+ /* Mirrored table available? */
if (md) {
if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
writeops = 0x03;
- goto create;
- }
-
- if (td->pages[i] == -1) {
+ } else if (td->pages[i] == -1) {
rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
- goto writecheck;
- }
-
- if (md->pages[i] == -1) {
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
- goto writecheck;
- }
-
- if (td->version[i] == md->version[i]) {
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
rd = td;
if (!(td->options & NAND_BBT_VERSION))
rd2 = md;
- goto writecheck;
- }
-
- if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
rd = td;
- md->version[i] = td->version[i];
- writeops = 2;
+ writeops = 0x02;
} else {
rd = md;
- td->version[i] = md->version[i];
- writeops = 1;
+ writeops = 0x01;
}
-
- goto writecheck;
-
} else {
if (td->pages[i] == -1) {
+ create = 1;
writeops = 0x01;
- goto create;
+ } else {
+ rd = td;
}
- rd = td;
- goto writecheck;
}
- create:
- /* Create the bad block table by scanning the device ? */
- if (!(td->options & NAND_BBT_CREATE))
- continue;
- /* Create the table in memory by scanning the chip(s) */
- create_bbt(mtd, buf, bd, chipsel);
-
- td->version[i] = 1;
- if (md)
- md->version[i] = 1;
- writecheck:
- /* read back first ? */
- if (rd)
- read_abs_bbt(mtd, buf, rd, chipsel);
- /* If they weren't versioned, read both. */
- if (rd2)
- read_abs_bbt(mtd, buf, rd2, chipsel);
-
- /* Write the bad block table to the device ? */
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
return res;
}
- /* Write the mirror bad block table to the device ? */
+ /* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
if (res < 0)
@@ -894,20 +993,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
- * @td: bad block table descriptor
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
*
- * The bad block table regions are marked as "bad" to prevent
- * accidental erasures / writes. The regions are identified by
- * the mark 0x02.
-*/
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
{
struct nand_chip *this = mtd->priv;
int i, j, chips, block, nrblocks, update;
uint8_t oldval, newval;
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = this->numchips;
nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -927,7 +1025,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
newval = oldval | (0x2 << (block & 0x06));
this->bbt[(block >> 3)] = newval;
if ((oldval != newval) && td->reserved_block_code)
- nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
continue;
}
update = 0;
@@ -944,28 +1042,76 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
update = 1;
block += 2;
}
- /* If we want reserved blocks to be recorded to flash, and some
- new ones have been marked, then we need to update the stored
- bbts. This should only happen once. */
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
+ nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
}
}
/**
- * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
*
- * The function checks, if a bad block table(s) is/are already
- * available. If not it scans the device for manufacturer
- * marked good / bad blocks and writes the bad block table(s) to
- * the selected place.
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = this->chipsize >> this->bbt_erase_shift;
+ else
+ table_size = mtd->size >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
*
- * The bad block table memory is allocated here. It must be freed
- * by calling the nand_free_bbt function.
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
*
-*/
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
@@ -975,46 +1121,48 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
struct nand_bbt_descr *md = this->bbt_md;
len = mtd->size >> (this->bbt_erase_shift + 2);
- /* Allocate memory (2bit per block) and clear the memory bad block table */
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
this->bbt = kzalloc(len, GFP_KERNEL);
- if (!this->bbt) {
- pr_err("nand_scan_bbt: Out of memory\n");
+ if (!this->bbt)
return -ENOMEM;
- }
- /* If no primary table decriptor is given, scan the device
- * to build a memory based bad block table
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
*/
if (!td) {
if ((res = nand_memory_bbt(mtd, bd))) {
- pr_err("nand_bbt: Can't scan flash and build the RAM-based BBT\n");
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
kfree(this->bbt);
this->bbt = NULL;
}
return res;
}
+ verify_bbt_descr(mtd, td);
+ verify_bbt_descr(mtd, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
- pr_err("nand_bbt: Out of memory\n");
kfree(this->bbt);
this->bbt = NULL;
return -ENOMEM;
}
- /* Is the bbt at a given page ? */
+ /* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
- res = read_abs_bbts(mtd, buf, td, md);
+ read_abs_bbts(mtd, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
- res = search_read_bbts(mtd, buf, td, md);
+ search_read_bbts(mtd, buf, td, md);
}
- if (res)
- res = check_create(mtd, buf, bd);
+ res = check_create(mtd, buf, bd);
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(mtd, td);
@@ -1027,15 +1175,15 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/**
* nand_update_bbt - [NAND Interface] update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
*
- * The function updates the bad block table(s)
-*/
+ * The function updates the bad block table(s).
+ */
int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *this = mtd->priv;
- int len, res = 0, writeops = 0;
+ int len, res = 0;
int chip, chipsel;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
@@ -1044,19 +1192,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
if (!this->bbt || !td)
return -EINVAL;
- len = mtd->size >> (this->bbt_erase_shift + 2);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- pr_err("nand_update_bbt: Out of memory\n");
+ if (!buf)
return -ENOMEM;
- }
- writeops = md != NULL ? 0x03 : 0x01;
-
- /* Do we have a bbt per chip ? */
+ /* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chip = (int)(offs >> this->chip_shift);
chipsel = chip;
@@ -1069,14 +1212,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
if (md)
md->version[chip]++;
- /* Write the bad block table to the device ? */
- if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
res = write_bbt(mtd, buf, td, md, chipsel);
if (res < 0)
goto out;
}
- /* Write the mirror bad block table to the device ? */
- if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(mtd, buf, md, td, chipsel);
}
@@ -1085,49 +1228,13 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
return res;
}
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks. */
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static struct nand_bbt_descr smallpage_memorybased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = 5,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr smallpage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = 5,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr largepage_flashbased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = 0,
- .len = 2,
- .pattern = scan_ff_pattern
-};
-
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0x20,
- .len = 6,
- .pattern = scan_agand_pattern
-};
-
-/* Generic flash bbt decriptors
-*/
+/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
@@ -1137,7 +1244,7 @@ static struct nand_bbt_descr bbt_main_descr = {
.offs = 8,
.len = 4,
.veroffs = 12,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
@@ -1147,67 +1254,99 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.offs = 8,
.len = 4,
.veroffs = 12,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
/**
- * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
*
- * This function selects the default bad block table
- * support for the device and calls the nand_scan_bbt function
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
*
-*/
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
int nand_default_bbt(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
- /* Default for AG-AND. We must use a flash based
- * bad block table as the devices have factory marked
- * _good_ blocks. Erasing those blocks leads to loss
- * of the good / bad information, so we _must_ store
- * this information in a good / bad table during
- * startup
- */
- if (this->options & NAND_IS_AND) {
- /* Use the default pattern descriptors */
- if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- this->bbt_options |= NAND_BBT_USE_FLASH;
- return nand_scan_bbt(mtd, &agand_flashbased);
- }
-
- /* Is a flash based bad block table requested ? */
+ /* Is a flash based bad block table requested? */
if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- if (!this->badblock_pattern) {
- this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
}
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
- if (!this->badblock_pattern) {
- this->badblock_pattern = (mtd->writesize > 512) ?
- &largepage_memorybased : &smallpage_memorybased;
- }
}
+
+ if (!this->badblock_pattern)
+ nand_create_badblock_pattern(this);
+
return nand_scan_bbt(mtd, this->badblock_pattern);
}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
- * @offs: offset in the device
- * @allowbbt: allow access to bad block table region
- *
-*/
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct nand_chip *this = mtd->priv;
@@ -1218,8 +1357,9 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
block = (int)(offs >> (this->bbt_erase_shift - 1));
res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
- MTD_DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
- (unsigned int)offs, block >> 1, res);
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
+ "(block %d) 0x%02x\n",
+ (unsigned int)offs, block >> 1, res);
switch ((int)res) {
case 0x00:
@@ -1234,5 +1374,4 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
EXPORT_SYMBOL(nand_scan_bbt);
EXPORT_SYMBOL(nand_default_bbt);
-
-#endif /* DOXYGEN_SHOULD_SKIP_THIS */
+EXPORT_SYMBOL_GPL(nand_update_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
new file mode 100644
index 0000000000..cba5285545
--- /dev/null
+++ b/drivers/mtd/nand/nand_bch.c
@@ -0,0 +1,243 @@
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch: BCH control structure
+ * @ecclayout: private ecc layout for this BCH configuration
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int i;
+
+ memset(code, 0, chip->ecc.bytes);
+ encode_bch(nbc->bch, buf, chip->ecc.size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < chip->ecc.bytes; i++)
+ code[i] ^= nbc->eccmask[i];
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_bch_calculate_ecc);
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ const struct nand_chip *chip = mtd->priv;
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int *errloc = nbc->errloc;
+ int i, count;
+
+ count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (chip->ecc.size*8))
+ /* error is located in data, correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+ /* else error in ecc, no action needed */
+
+ pr_debug("%s: corrected bitflip %u\n", __func__,
+ errloc[i]);
+ }
+ } else if (count < 0) {
+ printk(KERN_ERR "ecc unrecoverable error\n");
+ count = -1;
+ }
+ return count;
+}
+EXPORT_SYMBOL(nand_bch_correct_data);
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd: MTD block structure
+ * @eccsize: ecc block size in bytes
+ * @eccbytes: ecc length in bytes
+ * @ecclayout: output default layout
+ *
+ * Returns:
+ * a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *
+nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
+ struct nand_ecclayout **ecclayout)
+{
+ unsigned int m, t, eccsteps, i;
+ struct nand_ecclayout *layout;
+ struct nand_bch_control *nbc = NULL;
+ unsigned char *erased_page;
+
+ if (!eccsize || !eccbytes) {
+ printk(KERN_WARNING "ecc parameters not supplied\n");
+ goto fail;
+ }
+
+ m = fls(1+8*eccsize);
+ t = (eccbytes*8)/m;
+
+ nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+ if (!nbc)
+ goto fail;
+
+ nbc->bch = init_bch(m, t, 0);
+ if (!nbc->bch)
+ goto fail;
+
+ /* verify that eccbytes has the expected value */
+ if (nbc->bch->ecc_bytes != eccbytes) {
+ printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
+ goto fail;
+ }
+
+ eccsteps = mtd->writesize/eccsize;
+
+ /* if no ecc placement scheme was provided, build one */
+ if (!*ecclayout) {
+
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ printk(KERN_WARNING "must provide an oob scheme for "
+ "oobsize %d\n", mtd->oobsize);
+ goto fail;
+ }
+
+ layout = &nbc->ecclayout;
+ layout->eccbytes = eccsteps*eccbytes;
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ printk(KERN_WARNING "no suitable oob scheme available "
+ "for oobsize %d eccbytes %u\n", mtd->oobsize,
+ eccbytes);
+ goto fail;
+ }
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+
+ *ecclayout = layout;
+ }
+
+ /* sanity checks */
+ if (8*(eccsize+eccbytes) >= (1 << m)) {
+ printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+ goto fail;
+ }
+ if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
+ printk(KERN_WARNING "invalid ecc layout\n");
+ goto fail;
+ }
+
+ nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ if (!nbc->eccmask || !nbc->errloc)
+ goto fail;
+ /*
+ * compute and store the inverted ecc of an erased ecc block
+ */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page)
+ goto fail;
+
+ memset(erased_page, 0xff, eccsize);
+ memset(nbc->eccmask, 0, eccbytes);
+ encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ nbc->eccmask[i] ^= 0xff;
+
+ return nbc;
+fail:
+ nand_bch_free(nbc);
+ return NULL;
+}
+EXPORT_SYMBOL(nand_bch_init);
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc: NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+ if (nbc) {
+ free_bch(nbc->bch);
+ kfree(nbc->errloc);
+ kfree(nbc->eccmask);
+ kfree(nbc);
+ }
+}
+EXPORT_SYMBOL(nand_bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/nand_hwecc.c b/drivers/mtd/nand/nand_hwecc.c
deleted file mode 100644
index a48efa1074..0000000000
--- a/drivers/mtd/nand/nand_hwecc.c
+++ /dev/null
@@ -1,103 +0,0 @@
-#include <common.h>
-#include <errno.h>
-#include <clock.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/err.h>
-#include <linux/mtd/nand_ecc.h>
-#include <asm/byteorder.h>
-#include <io.h>
-#include <malloc.h>
-
-#include "nand.h"
-
-/**
- * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- *
- * Not for syndrome calculating ecc controllers which need a special oob layout
- */
-static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
- }
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
-
- eccsteps = chip->ecc.steps;
- p = buf;
-
- for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += stat;
- }
- return 0;
-}
-
-/**
- * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- */
-#ifdef CONFIG_MTD_WRITE
-static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- const uint8_t *p = buf;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
- }
-
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
-
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
-#endif
-
-void nand_init_ecc_hw(struct nand_chip *chip)
-{
- /* Use standard hwecc read page function ? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc;
-#ifdef CONFIG_NAND_READ_OOB
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_std;
-#endif
-#ifdef CONFIG_MTD_WRITE
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_hwecc;
-#endif
-}
diff --git a/drivers/mtd/nand/nand_hwecc_syndrome.c b/drivers/mtd/nand/nand_hwecc_syndrome.c
deleted file mode 100644
index 1493b88439..0000000000
--- a/drivers/mtd/nand/nand_hwecc_syndrome.c
+++ /dev/null
@@ -1,225 +0,0 @@
-#include <common.h>
-#include <errno.h>
-#include <clock.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/err.h>
-#include <linux/mtd/nand_ecc.h>
-#include <asm/byteorder.h>
-#include <io.h>
-#include <malloc.h>
-#include <module.h>
-
-/**
- * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- *
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
- */
-static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *oob = chip->oob_poi;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
-
- if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
- oob += chip->ecc.prepad;
- }
-
- chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
- chip->read_buf(mtd, oob, eccbytes);
- stat = chip->ecc.correct(mtd, p, oob, NULL);
-
- if (stat < 0)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += stat;
-
- oob += eccbytes;
-
- if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
- oob += chip->ecc.postpad;
- }
- }
-
- /* Calculate remaining oob bytes */
- i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->read_buf(mtd, oob, i);
-
- return 0;
-}
-/**
- * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- *
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
- */
-#ifdef CONFIG_MTD_WRITE
-static void nand_write_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- const uint8_t *p = buf;
- uint8_t *oob = chip->oob_poi;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
-
- if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
- oob += chip->ecc.prepad;
- }
-
- chip->ecc.calculate(mtd, p, oob);
- chip->write_buf(mtd, oob, eccbytes);
- oob += eccbytes;
-
- if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
- oob += chip->ecc.postpad;
- }
- }
-
- /* Calculate remaining oob bytes */
- i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->write_buf(mtd, oob, i);
-}
-#endif
-
-/**
- * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
- * with syndromes
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- * @sndcmd: flag whether to issue read command or not
- */
-static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
-{
- uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
- int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
- int eccsize = chip->ecc.size;
- uint8_t *bufpoi = buf;
- int i, toread, sndrnd = 0, pos;
-
- chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
- for (i = 0; i < chip->ecc.steps; i++) {
- if (sndrnd) {
- pos = eccsize + i * (eccsize + chunk);
- if (mtd->writesize > 512)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
- else
- chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
- } else
- sndrnd = 1;
- toread = min_t(int, length, chunk);
- chip->read_buf(mtd, bufpoi, toread);
- bufpoi += toread;
- length -= toread;
- }
- if (length > 0)
- chip->read_buf(mtd, bufpoi, length);
-
- return 1;
-}
-
-/**
- * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
- * with syndrome - only for large page flash !
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
- */
-#ifdef CONFIG_MTD_WRITE
-static int nand_write_oob_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
-{
- int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
- int eccsize = chip->ecc.size, length = mtd->oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
- const uint8_t *bufpoi = chip->oob_poi;
-
- /*
- * data-ecc-data-ecc ... ecc-oob
- * or
- * data-pad-ecc-pad-data-pad .... ecc-pad-oob
- */
- if (!chip->ecc.prepad && !chip->ecc.postpad) {
- pos = steps * (eccsize + chunk);
- steps = 0;
- } else
- pos = eccsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
- for (i = 0; i < steps; i++) {
- if (sndcmd) {
- if (mtd->writesize <= 512) {
- uint32_t fill = 0xFFFFFFFF;
-
- len = eccsize;
- while (len > 0) {
- int num = min_t(int, len, 4);
- chip->write_buf(mtd, (uint8_t *)&fill,
- num);
- len -= num;
- }
- } else {
- pos = eccsize + i * (eccsize + chunk);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
- }
- } else
- sndcmd = 1;
- len = min_t(int, length, chunk);
- chip->write_buf(mtd, bufpoi, len);
- bufpoi += len;
- length -= len;
- }
- if (length > 0)
- chip->write_buf(mtd, bufpoi, length);
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-#endif
-
-void nand_init_ecc_hw_syndrome(struct nand_chip *chip)
-{
- /* Use standard syndrome read/write page function ? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_syndrome;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_syndrome;
-#ifdef CONFIG_MTD_WRITE
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_syndrome;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_syndrome;
-#endif
-}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 4f8fb021a9..52b0da5927 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -3,184 +3,178 @@
*
* Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: nand_ids.c,v 1.16 2005/11/07 11:14:31 gleixner Exp $
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <common.h>
+#include <sizes.h>
#include <linux/mtd/nand.h>
#ifdef CONFIG_NAND_INFO
-#define __NANDSTR(str) str
+#define __STR(str) str
#else
-#define __NANDSTR(str) ""
-#endif
-
-/*
-* Chip ID list
-*
-* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-* options
-*
-* Pagesize; 0, 256, 512
-* 0 get this information from the extended chip ID
-+ 256 256 Byte page size
-* 512 512 Byte page size
-*/
-struct nand_flash_dev nand_flash_ids[] = {
-
-#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
- {__NANDSTR("NAND 1MiB 5V 8-bit"), 0x6e, 256, 1, 0x1000, 0},
- {__NANDSTR("NAND 2MiB 5V 8-bit"), 0x64, 256, 2, 0x1000, 0},
- {__NANDSTR("NAND 4MiB 5V 8-bit"), 0x6b, 512, 4, 0x2000, 0},
- {__NANDSTR("NAND 1MiB 3,3V 8-bit"), 0xe8, 256, 1, 0x1000, 0},
- {__NANDSTR("NAND 1MiB 3,3V 8-bit"), 0xec, 256, 1, 0x1000, 0},
- {__NANDSTR("NAND 2MiB 3,3V 8-bit"), 0xea, 256, 2, 0x1000, 0},
- {__NANDSTR("NAND 4MiB 3,3V 8-bit"), 0xd5, 512, 4, 0x2000, 0},
- {__NANDSTR("NAND 4MiB 3,3V 8-bit"), 0xe3, 512, 4, 0x2000, 0},
- {__NANDSTR("NAND 4MiB 3,3V 8-bit"), 0xe5, 512, 4, 0x2000, 0},
- {__NANDSTR("NAND 8MiB 3,3V 8-bit"), 0xd6, 512, 8, 0x2000, 0},
-
- {__NANDSTR("NAND 8MiB 1,8V 8-bit"), 0x39, 512, 8, 0x2000, 0},
- {__NANDSTR("NAND 8MiB 3,3V 8-bit"), 0xe6, 512, 8, 0x2000, 0},
- {__NANDSTR("NAND 8MiB 1,8V 16-bit"), 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 8MiB 3,3V 16-bit"), 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+#define __STR(str) ""
#endif
- {__NANDSTR("NAND 16MiB 1,8V 8-bit"), 0x33, 512, 16, 0x4000, 0},
- {__NANDSTR("NAND 16MiB 3,3V 8-bit"), 0x73, 512, 16, 0x4000, 0},
- {__NANDSTR("NAND 16MiB 1,8V 16-bit"), 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 16MiB 3,3V 16-bit"), 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-
- {__NANDSTR("NAND 32MiB 1,8V 8-bit"), 0x35, 512, 32, 0x4000, 0},
- {__NANDSTR("NAND 32MiB 3,3V 8-bit"), 0x75, 512, 32, 0x4000, 0},
- {__NANDSTR("NAND 32MiB 1,8V 16-bit"), 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 32MiB 3,3V 16-bit"), 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-
- {__NANDSTR("NAND 64MiB 1,8V 8-bit"), 0x36, 512, 64, 0x4000, 0},
- {__NANDSTR("NAND 64MiB 3,3V 8-bit"), 0x76, 512, 64, 0x4000, 0},
- {__NANDSTR("NAND 64MiB 1,8V 16-bit"), 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 64MiB 3,3V 16-bit"), 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- {__NANDSTR("NAND 128MiB 1,8V 8-bit"), 0x78, 512, 128, 0x4000, 0},
- {__NANDSTR("NAND 128MiB 1,8V 8-bit"), 0x39, 512, 128, 0x4000, 0},
- {__NANDSTR("NAND 128MiB 3,3V 8-bit"), 0x79, 512, 128, 0x4000, 0},
- {__NANDSTR("NAND 128MiB 1,8V 16-bit"), 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 128MiB 1,8V 16-bit"), 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 128MiB 3,3V 16-bit"), 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
- {__NANDSTR("NAND 128MiB 3,3V 16-bit"), 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
- {__NANDSTR("NAND 256MiB 3,3V 8-bit"), 0x71, 512, 256, 0x4000, 0},
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {__STR("TC58NVG2S0F 4G 3.3V 8-bit"),
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
+ {__STR("TC58NVG3S0F 8G 3.3V 8-bit"),
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
+ {__STR("TC58NVG5D2 32G 3.3V 8-bit"),
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
+ {__STR("TC58NVG6D2 64G 3.3V 8-bit"),
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
+
+ LEGACY_ID_NAND(__STR("NAND 4MiB 5V 8-bit"), 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 4MiB 3,3V 8-bit"), 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 4MiB 3,3V 8-bit"), 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 8MiB 3,3V 8-bit"), 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 8MiB 3,3V 8-bit"), 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND(__STR("NAND 16MiB 1,8V 8-bit"), 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 16MiB 3,3V 8-bit"), 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 16MiB 1,8V 16-bit"), 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND(__STR("NAND 16MiB 3,3V 16-bit"), 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND(__STR("NAND 32MiB 1,8V 8-bit"), 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 32MiB 3,3V 8-bit"), 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 32MiB 1,8V 16-bit"), 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND(__STR("NAND 32MiB 3,3V 16-bit"), 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND(__STR("NAND 64MiB 1,8V 8-bit"), 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 64MiB 1,8V 16-bit"), 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND(__STR("NAND 64MiB 3,3V 16-bit"), 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 8-bit"), 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 8-bit"), 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 128MiB 3,3V 8-bit"), 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND(__STR("NAND 128MiB 3,3V 16-bit"), 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND(__STR("NAND 128MiB 3,3V 16-bit"), 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND(__STR("NAND 256MiB 3,3V 8-bit"), 0x71, 256, SZ_16K, SP_OPTIONS),
/*
- * These are the new chips with large page size. The pagesize and the
- * erasesize is determined from the extended id bytes
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
*/
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
-#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- /*512 Megabit */
- {__NANDSTR("NAND 64MiB 1,8V 8-bit"), 0xA2, 0, 64, 0, LP_OPTIONS},
- {__NANDSTR("NAND 64MiB 3,3V 8-bit"), 0xF2, 0, 64, 0, LP_OPTIONS},
- {__NANDSTR("NAND 64MiB 1,8V 16-bit"), 0xB2, 0, 64, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 64MiB 3,3V 16-bit"), 0xC2, 0, 64, 0, LP_OPTIONS16},
+ /* 512 Megabit */
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 8-bit"), 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 8-bit"), 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 16-bit"), 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 16-bit"), 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 16-bit"), 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 16-bit"), 0xC0, 64, LP_OPTIONS16),
/* 1 Gigabit */
- {__NANDSTR("NAND 128MiB 1,8V 8-bit"), 0xA1, 0, 128, 0, LP_OPTIONS},
- {__NANDSTR("NAND 128MiB 3,3V 8-bit"), 0xF1, 0, 128, 0, LP_OPTIONS},
- {__NANDSTR("NAND 128MiB 1,8V 16-bit"), 0xB1, 0, 128, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 128MiB 3,3V 16-bit"), 0xC1, 0, 128, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 128MiB 1,8V 8-bit"), 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 128MiB 3,3V 8-bit"), 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 128MiB 3,3V 8-bit"), 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 128MiB 3,3V 16-bit"), 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0xAD, 128, LP_OPTIONS16),
/* 2 Gigabit */
- {__NANDSTR("NAND 256MiB 1,8V 8-bit"), 0xAA, 0, 256, 0, LP_OPTIONS},
- {__NANDSTR("NAND 256MiB 3,3V 8-bit"), 0xDA, 0, 256, 0, LP_OPTIONS},
- {__NANDSTR("NAND 256MiB 1,8V 16-bit"), 0xBA, 0, 256, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 256MiB 3,3V 16-bit"), 0xCA, 0, 256, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 256MiB 1,8V 8-bit"), 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 256MiB 3,3V 8-bit"), 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 256MiB 1,8V 16-bit"), 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 256MiB 3,3V 16-bit"), 0xCA, 256, LP_OPTIONS16),
/* 4 Gigabit */
- {__NANDSTR("NAND 512MiB 1,8V 8-bit"), 0xAC, 0, 512, 0, LP_OPTIONS},
- {__NANDSTR("NAND 512MiB 3,3V 8-bit"), 0xDC, 0, 512, 0, LP_OPTIONS},
- {__NANDSTR("NAND 512MiB 1,8V 16-bit"), 0xBC, 0, 512, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 512MiB 3,3V 16-bit"), 0xCC, 0, 512, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 512MiB 1,8V 8-bit"), 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 512MiB 3,3V 8-bit"), 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 512MiB 1,8V 16-bit"), 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 512MiB 3,3V 16-bit"), 0xCC, 512, LP_OPTIONS16),
/* 8 Gigabit */
- {__NANDSTR("NAND 1GiB 1,8V 8-bit"), 0xA3, 0, 1024, 0, LP_OPTIONS},
- {__NANDSTR("NAND 1GiB 3,3V 8-bit"), 0xD3, 0, 1024, 0, LP_OPTIONS},
- {__NANDSTR("NAND 1GiB 1,8V 16-bit"), 0xB3, 0, 1024, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 1GiB 3,3V 16-bit"), 0xC3, 0, 1024, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 1GiB 1,8V 8-bit"), 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 1GiB 3,3V 8-bit"), 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 1GiB 1,8V 16-bit"), 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 1GiB 3,3V 16-bit"), 0xC3, 1024, LP_OPTIONS16),
/* 16 Gigabit */
- {__NANDSTR("NAND 2GiB 1,8V 8-bit"), 0xA5, 0, 2048, 0, LP_OPTIONS},
- {__NANDSTR("NAND 2GiB 3,3V 8-bit"), 0xD5, 0, 2048, 0, LP_OPTIONS},
- {__NANDSTR("NAND 2GiB 1,8V 16-bit"), 0xB5, 0, 2048, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 2GiB 3,3V 16-bit"), 0xC5, 0, 2048, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 2GiB 1,8V 8-bit"), 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 2GiB 3,3V 8-bit"), 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 2GiB 1,8V 16-bit"), 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 2GiB 3,3V 16-bit"), 0xC5, 2048, LP_OPTIONS16),
/* 32 Gigabit */
- {__NANDSTR("NAND 4GiB 1,8V 8-bit"), 0xA7, 0, 4096, 0, LP_OPTIONS},
- {__NANDSTR("NAND 4GiB 3,3V 8-bit"), 0xD7, 0, 4096, 0, LP_OPTIONS},
- {__NANDSTR("NAND 4GiB 1,8V 16-bit"), 0xB7, 0, 4096, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 4GiB 3,3V 16-bit"), 0xC7, 0, 4096, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 4GiB 1,8V 8-bit"), 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 4GiB 3,3V 8-bit"), 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 4GiB 1,8V 16-bit"), 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 4GiB 3,3V 16-bit"), 0xC7, 4096, LP_OPTIONS16),
/* 64 Gigabit */
- {__NANDSTR("NAND 8GiB 1,8V 8-bit"), 0xAE, 0, 8192, 0, LP_OPTIONS},
- {__NANDSTR("NAND 8GiB 3,3V 8-bit"), 0xDE, 0, 8192, 0, LP_OPTIONS},
- {__NANDSTR("NAND 8GiB 1,8V 16-bit"), 0xBE, 0, 8192, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 8GiB 3,3V 16-bit"), 0xCE, 0, 8192, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 8GiB 1,8V 8-bit"), 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 8GiB 3,3V 8-bit"), 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 8GiB 1,8V 16-bit"), 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 8GiB 3,3V 16-bit"), 0xCE, 8192, LP_OPTIONS16),
/* 128 Gigabit */
- {__NANDSTR("NAND 16GiB 1,8V 8-bit"), 0x1A, 0, 16384, 0, LP_OPTIONS},
- {__NANDSTR("NAND 16GiB 3,3V 8-bit"), 0x3A, 0, 16384, 0, LP_OPTIONS},
- {__NANDSTR("NAND 16GiB 1,8V 16-bit"), 0x2A, 0, 16384, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 16GiB 3,3V 16-bit"), 0x4A, 0, 16384, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 16GiB 1,8V 8-bit"), 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 16GiB 3,3V 8-bit"), 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 16GiB 1,8V 16-bit"), 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 16GiB 3,3V 16-bit"), 0x4A, 16384, LP_OPTIONS16),
/* 256 Gigabit */
- {__NANDSTR("NAND 32GiB 1,8V 8-bit"), 0x1C, 0, 32768, 0, LP_OPTIONS},
- {__NANDSTR("NAND 32GiB 3,3V 8-bit"), 0x3C, 0, 32768, 0, LP_OPTIONS},
- {__NANDSTR("NAND 32GiB 1,8V 16-bit"), 0x2C, 0, 32768, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 32GiB 3,3V 16-bit"), 0x4C, 0, 32768, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 32GiB 1,8V 8-bit"), 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 32GiB 3,3V 8-bit"), 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 32GiB 1,8V 16-bit"), 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 32GiB 3,3V 16-bit"), 0x4C, 32768, LP_OPTIONS16),
/* 512 Gigabit */
- {__NANDSTR("NAND 64GiB 1,8V 8-bit"), 0x1E, 0, 65536, 0, LP_OPTIONS},
- {__NANDSTR("NAND 64GiB 3,3V 8-bit"), 0x3E, 0, 65536, 0, LP_OPTIONS},
- {__NANDSTR("NAND 64GiB 1,8V 16-bit"), 0x2E, 0, 65536, 0, LP_OPTIONS16},
- {__NANDSTR("NAND 64GiB 3,3V 16-bit"), 0x4E, 0, 65536, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND(__STR("NAND 64GiB 1,8V 8-bit"), 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64GiB 3,3V 8-bit"), 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND(__STR("NAND 64GiB 1,8V 16-bit"), 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND(__STR("NAND 64GiB 3,3V 16-bit"), 0x4E, 65536, LP_OPTIONS16),
- /*
- * Renesas AND 1 Gigabit. Those chips do not support extended id and
- * have a strange page/block layout ! The chosen minimum erasesize is
- * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
- * planes 1 block = 2 pages, but due to plane arrangement the blocks
- * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
- * increase the eraseblock size so we chose a combined one which can be
- * erased in one go There are more speed improvements for reads and
- * writes possible, but not implemented now
- */
- {__NANDSTR("AND 128MiB 3,3V 8-bit"), 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
- BBT_AUTO_REFRESH
- },
-
- {NULL,}
+ {NULL}
};
-/*
-* Manufacturer ID list
-*/
+/* Manufacturer IDs */
struct nand_manufacturers nand_manuf_ids[] = {
- {NAND_MFR_TOSHIBA, __NANDSTR("Toshiba")},
- {NAND_MFR_SAMSUNG, __NANDSTR("Samsung")},
- {NAND_MFR_FUJITSU, __NANDSTR("Fujitsu")},
- {NAND_MFR_NATIONAL, __NANDSTR("National")},
- {NAND_MFR_RENESAS, __NANDSTR("Renesas")},
- {NAND_MFR_STMICRO, __NANDSTR("ST Micro")},
- {NAND_MFR_HYNIX, __NANDSTR("Hynix")},
- {NAND_MFR_MICRON, __NANDSTR("Micron")},
- {NAND_MFR_AMD, __NANDSTR("AMD/Spansion")},
- {NAND_MFR_MACRONIX, __NANDSTR("Macronix")},
- {NAND_MFR_EON, __NANDSTR("Eon")},
+ {NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_SAMSUNG, "Samsung"},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_HYNIX, "Hynix"},
+ {NAND_MFR_MICRON, "Micron"},
+ {NAND_MFR_AMD, "AMD/Spansion"},
+ {NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_EON, "Eon"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nand_imx.c b/drivers/mtd/nand/nand_imx.c
index 75aefd6e86..965b41e866 100644
--- a/drivers/mtd/nand/nand_imx.c
+++ b/drivers/mtd/nand/nand_imx.c
@@ -27,6 +27,7 @@
#include <mach/generic.h>
#include <mach/imx-nand.h>
#include <io.h>
+#include <of_mtd.h>
#include <errno.h>
#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
@@ -100,6 +101,10 @@ struct imx_nand_host {
int spare_len;
int eccsize;
+ int hw_ecc;
+ int data_width;
+ int flash_bbt;
+
void (*preset)(struct mtd_info *);
void (*send_cmd)(struct imx_nand_host *, uint16_t);
void (*send_addr)(struct imx_nand_host *, uint16_t);
@@ -682,23 +687,6 @@ static void copy_spare(struct mtd_info *mtd, int bfrom)
}
/*
- * This function is used by the upper layer to verify the data in NAND Flash
- * with the data in the \b buf.
- *
- * @param mtd MTD structure for the NAND Flash
- * @param buf data to be verified
- * @param len length of the data to be verified
- *
- * @return -EFAULT if error else 0
- *
- */
-static int
-imx_nand_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
-{
- return -EFAULT;
-}
-
-/*
* This function is used by upper layer for select and deselect of the NAND
* chip
*
@@ -1087,6 +1075,31 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern,
};
+static int __init mxcnd_probe_dt(struct imx_nand_host *host)
+{
+ struct device_node *np = host->dev->device_node;
+ int buswidth;
+
+ if (!IS_ENABLED(CONFIG_OFDEVICE))
+ return 1;
+
+ if (!np)
+ return 1;
+
+ if (of_get_nand_ecc_mode(np) == NAND_ECC_HW)
+ host->hw_ecc = 1;
+
+ host->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ buswidth = of_get_nand_bus_width(np);
+ if (buswidth < 0)
+ return buswidth;
+
+ host->data_width = buswidth / 8;
+
+ return 0;
+}
+
/*
* This function is called during the driver binding process.
*
@@ -1101,7 +1114,6 @@ static int __init imxnd_probe(struct device_d *dev)
{
struct nand_chip *this;
struct mtd_info *mtd;
- struct imx_nand_platform_data *pdata = dev->platform_data;
struct imx_nand_host *host;
struct nand_ecclayout *oob_smallpage, *oob_largepage, *oob_4kpage;
int err = 0;
@@ -1112,6 +1124,21 @@ static int __init imxnd_probe(struct device_d *dev)
if (!host)
return -ENOMEM;
+ host->dev = dev;
+
+ err = mxcnd_probe_dt(host);
+ if (err < 0)
+ goto escan;
+
+ if (err > 0) {
+ struct imx_nand_platform_data *pdata;
+
+ pdata = dev->platform_data;
+ host->flash_bbt = pdata->flash_bbt;
+ host->data_width = pdata->width;
+ host->hw_ecc = pdata->hw_ecc;
+ }
+
host->data_buf = (uint8_t *)(host + 1);
if (nfc_is_v1() || nfc_is_v21()) {
@@ -1173,7 +1200,6 @@ static int __init imxnd_probe(struct device_d *dev)
goto escan;
}
- host->dev = dev;
/* structures must be linked */
this = &host->nand;
mtd = &host->mtd;
@@ -1192,9 +1218,8 @@ static int __init imxnd_probe(struct device_d *dev)
this->read_word = imx_nand_read_word;
this->write_buf = imx_nand_write_buf;
this->read_buf = imx_nand_read_buf;
- this->verify_buf = imx_nand_verify_buf;
- if (pdata->hw_ecc) {
+ if (host->hw_ecc) {
this->ecc.calculate = imx_nand_calculate_ecc;
this->ecc.hwctl = imx_nand_enable_hwecc;
if (nfc_is_v1())
@@ -1211,13 +1236,13 @@ static int __init imxnd_probe(struct device_d *dev)
this->ecc.layout = oob_smallpage;
/* NAND bus width determines access functions used by upper layer */
- if (pdata->width == 2) {
+ if (host->data_width == 2) {
this->options |= NAND_BUSWIDTH_16;
this->ecc.layout = &nandv1_hw_eccoob_smallpage;
imx_nand_set_layout(0, 16);
}
- if (pdata->flash_bbt) {
+ if (host->flash_bbt) {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
/* update flash based bbt */
@@ -1225,7 +1250,7 @@ static int __init imxnd_probe(struct device_d *dev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -1233,10 +1258,10 @@ static int __init imxnd_probe(struct device_d *dev)
/* Call preset again, with correct writesize this time */
host->preset(mtd);
- imx_nand_set_layout(mtd->writesize, pdata->width == 2 ? 16 : 8);
+ imx_nand_set_layout(mtd->writesize, host->data_width == 2 ? 16 : 8);
if (mtd->writesize >= 2048) {
- if (!pdata->flash_bbt)
+ if (!host->flash_bbt)
dev_warn(dev, "2k or 4k flash detected without flash_bbt. "
"You will loose factory bad block markers!\n");
@@ -1255,13 +1280,16 @@ static int __init imxnd_probe(struct device_d *dev)
writew(NFC_V2_SPAS_SPARESIZE(16), host->regs + NFC_V2_SPAS);
}
+ if (this->ecc.mode == NAND_ECC_HW)
+ this->ecc.strength = host->eccsize;
+
/* second phase scan */
if (nand_scan_tail(mtd)) {
err = -ENXIO;
goto escan;
}
- if (pdata->flash_bbt && this->bbt_td->pages[0] == -1 && this->bbt_md->pages[0] == -1) {
+ if (host->flash_bbt && this->bbt_td->pages[0] == -1 && this->bbt_md->pages[0] == -1) {
dev_warn(dev, "no BBT found. create one using the imx_nand_bbm command\n");
} else {
bbt_main_descr.options |= NAND_BBT_WRITE | NAND_BBT_CREATE;
@@ -1281,9 +1309,26 @@ escan:
}
+static __maybe_unused struct of_device_id imx_nand_compatible[] = {
+ {
+ .compatible = "fsl,imx21-nand",
+ }, {
+ .compatible = "fsl,imx25-nand",
+ }, {
+ .compatible = "fsl,imx27-nand",
+ }, {
+ .compatible = "fsl,imx51-nand",
+ }, {
+ .compatible = "fsl,imx53-nand",
+ }, {
+ /* sentinel */
+ }
+};
+
static struct driver_d imx_nand_driver = {
.name = "imx_nand",
.probe = imxnd_probe,
+ .of_compatible = DRV_OF_COMPAT(imx_nand_compatible),
};
device_platform_driver(imx_nand_driver);
diff --git a/drivers/mtd/nand/nand_mxs.c b/drivers/mtd/nand/nand_mxs.c
index 56d5ecfe26..8e5c83cef3 100644
--- a/drivers/mtd/nand/nand_mxs.c
+++ b/drivers/mtd/nand/nand_mxs.c
@@ -23,17 +23,16 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <of_mtd.h>
#include <common.h>
#include <malloc.h>
#include <errno.h>
#include <driver.h>
#include <init.h>
+#include <io.h>
+#include <dma/apbh-dma.h>
+#include <stmp-device.h>
#include <asm/mmu.h>
-#include <asm/io.h>
-#include <mach/clock.h>
-#include <mach/imx-regs.h>
-#include <mach/dma.h>
-#include <mach/mxs.h>
#define MX28_BLOCK_SFTRST (1 << 31)
#define MX28_BLOCK_CLKGATE (1 << 30)
@@ -123,12 +122,14 @@
#define BCH_FLASHLAYOUT0_META_SIZE_OFFSET 16
#define BCH_FLASHLAYOUT0_ECC0_MASK (0xf << 12)
#define BCH_FLASHLAYOUT0_ECC0_OFFSET 12
+#define IMX6_BCH_FLASHLAYOUT0_ECC0_OFFSET 11
#define BCH_FLASH0LAYOUT1 0x00000090
#define BCH_FLASHLAYOUT1_PAGE_SIZE_MASK (0xffff << 16)
#define BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET 16
#define BCH_FLASHLAYOUT1_ECCN_MASK (0xf << 12)
#define BCH_FLASHLAYOUT1_ECCN_OFFSET 12
+#define IMX6_BCH_FLASHLAYOUT1_ECCN_OFFSET 11
#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
@@ -139,11 +140,19 @@
#define MXS_NAND_BCH_TIMEOUT 10000
+enum gpmi_type {
+ GPMI_MXS,
+ GPMI_IMX6,
+};
+
struct mxs_nand_info {
struct nand_chip nand_chip;
void __iomem *io_base;
+ void __iomem *bch_base;
struct clk *clk;
struct mtd_info mtd;
+ enum gpmi_type type;
+ int dma_channel_base;
u32 version;
int cur_chip;
@@ -172,6 +181,11 @@ struct mxs_nand_info {
struct nand_ecclayout fake_ecc_layout;
+static inline int mxs_nand_is_imx6(struct mxs_nand_info *info)
+{
+ return info->type == GPMI_IMX6;
+}
+
static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
{
struct mxs_dma_desc *desc;
@@ -299,9 +313,9 @@ static uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
/*
* Wait for BCH complete IRQ and clear the IRQ
*/
-static int mxs_nand_wait_for_bch_complete(void)
+static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
{
- void __iomem *bch_regs = (void __iomem *)MXS_BCH_BASE;
+ void __iomem *bch_regs = nand_info->bch_base;
int timeout = MXS_NAND_BCH_TIMEOUT;
int ret;
@@ -313,7 +327,7 @@ static int mxs_nand_wait_for_bch_complete(void)
ret = (timeout == 0) ? -ETIMEDOUT : 0;
- writel(BCH_CTRL_COMPLETE_IRQ, bch_regs + BCH_CTRL + BIT_CLR);
+ writel(BCH_CTRL_COMPLETE_IRQ, bch_regs + BCH_CTRL + STMP_OFFSET_REG_CLR);
return ret;
}
@@ -333,7 +347,7 @@ static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
struct nand_chip *nand = mtd->priv;
struct mxs_nand_info *nand_info = nand->priv;
struct mxs_dma_desc *d;
- uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ uint32_t channel = nand_info->dma_channel_base + nand_info->cur_chip;
int ret;
/*
@@ -408,7 +422,7 @@ static int mxs_nand_device_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct mxs_nand_info *nand_info = chip->priv;
- void __iomem *gpmi_regs = (void *)MXS_GPMI_BASE;
+ void __iomem *gpmi_regs = nand_info->io_base;
uint32_t tmp;
if (nand_info->version > GPMI_VERSION_TYPE_MX23) {
@@ -486,7 +500,7 @@ static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
struct nand_chip *nand = mtd->priv;
struct mxs_nand_info *nand_info = nand->priv;
struct mxs_dma_desc *d;
- uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ uint32_t channel = nand_info->dma_channel_base + nand_info->cur_chip;
int ret;
if (length > NAND_MAX_PAGESIZE) {
@@ -564,7 +578,7 @@ static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
struct nand_chip *nand = mtd->priv;
struct mxs_nand_info *nand_info = nand->priv;
struct mxs_dma_desc *d;
- uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ uint32_t channel = nand_info->dma_channel_base + nand_info->cur_chip;
int ret;
if (length > NAND_MAX_PAGESIZE) {
@@ -620,11 +634,11 @@ static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
* Read a page from NAND.
*/
static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf)
+ uint8_t *buf, int oob_required, int page)
{
struct mxs_nand_info *nand_info = nand->priv;
struct mxs_dma_desc *d;
- uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ uint32_t channel = nand_info->dma_channel_base + nand_info->cur_chip;
uint32_t corrected = 0, failed = 0;
uint8_t *status;
int i, ret;
@@ -708,7 +722,7 @@ static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
goto rtn;
}
- ret = mxs_nand_wait_for_bch_complete();
+ ret = mxs_nand_wait_for_bch_complete(nand_info);
if (ret) {
printf("MXS NAND: BCH read timeout\n");
goto rtn;
@@ -762,13 +776,14 @@ rtn:
/*
* Write a page to NAND.
*/
-static void mxs_nand_ecc_write_page(struct mtd_info *mtd,
- struct nand_chip *nand, const uint8_t *buf)
+static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *nand, const uint8_t *buf,
+ int oob_required)
{
struct mxs_nand_info *nand_info = nand->priv;
struct mxs_dma_desc *d;
- uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
- int ret;
+ uint32_t channel = nand_info->dma_channel_base + nand_info->cur_chip;
+ int ret = 0;
memcpy(nand_info->data_buf, buf, mtd->writesize);
memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
@@ -808,7 +823,7 @@ static void mxs_nand_ecc_write_page(struct mtd_info *mtd,
goto rtn;
}
- ret = mxs_nand_wait_for_bch_complete();
+ ret = mxs_nand_wait_for_bch_complete(nand_info);
if (ret) {
printf("MXS NAND: BCH write timeout\n");
goto rtn;
@@ -816,6 +831,8 @@ static void mxs_nand_ecc_write_page(struct mtd_info *mtd,
rtn:
mxs_nand_return_dma_descs(nand_info);
+
+ return ret;
}
/*
@@ -831,7 +848,7 @@ static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
struct mxs_nand_info *nand_info = chip->priv;
int ret;
- if (ops->mode == MTD_OOB_RAW)
+ if (ops->mode == MTD_OPS_RAW)
nand_info->raw_oob_mode = 1;
else
nand_info->raw_oob_mode = 0;
@@ -856,7 +873,7 @@ static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
struct mxs_nand_info *nand_info = chip->priv;
int ret;
- if (ops->mode == MTD_OOB_RAW)
+ if (ops->mode == MTD_OPS_RAW)
nand_info->raw_oob_mode = 1;
else
nand_info->raw_oob_mode = 0;
@@ -934,7 +951,7 @@ static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
* what to do.
*/
static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
- int page, int cmd)
+ int page)
{
struct mxs_nand_info *nand_info = nand->priv;
int column;
@@ -1040,37 +1057,52 @@ static int mxs_nand_scan_bbt(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd->priv;
struct mxs_nand_info *nand_info = nand->priv;
- void __iomem *bch_regs = (void __iomem *)MXS_BCH_BASE;
- uint32_t tmp;
+ void __iomem *bch_regs = nand_info->bch_base;
+ uint32_t layout0, layout1;
int ret;
/* Reset BCH. Don't use SFTRST on MX23 due to Errata #2847 */
- ret = mxs_reset_block(bch_regs + BCH_CTRL,
+ ret = stmp_reset_block(bch_regs + BCH_CTRL,
nand_info->version == GPMI_VERSION_TYPE_MX23);
if (ret)
return ret;
- /* Configure layout 0 */
- tmp = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1)
- << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
- tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
- tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
- << BCH_FLASHLAYOUT0_ECC0_OFFSET;
- tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE;
- writel(tmp, bch_regs + BCH_FLASH0LAYOUT0);
-
- tmp = (mtd->writesize + mtd->oobsize)
- << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
- tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
- << BCH_FLASHLAYOUT1_ECCN_OFFSET;
- tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE;
- writel(tmp, bch_regs + BCH_FLASH0LAYOUT1);
+ if (mxs_nand_is_imx6(nand_info)) {
+ layout0 = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1)
+ << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET |
+ MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET |
+ (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
+ << IMX6_BCH_FLASHLAYOUT0_ECC0_OFFSET |
+ MXS_NAND_CHUNK_DATA_CHUNK_SIZE >> 2;
+
+ layout1 = (mtd->writesize + mtd->oobsize)
+ << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET |
+ (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
+ << IMX6_BCH_FLASHLAYOUT1_ECCN_OFFSET |
+ MXS_NAND_CHUNK_DATA_CHUNK_SIZE >> 2;
+ } else {
+ layout0 = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1)
+ << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET |
+ MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET |
+ (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
+ << BCH_FLASHLAYOUT0_ECC0_OFFSET |
+ MXS_NAND_CHUNK_DATA_CHUNK_SIZE;
+
+ layout1 = (mtd->writesize + mtd->oobsize)
+ << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET |
+ (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1)
+ << BCH_FLASHLAYOUT1_ECCN_OFFSET |
+ MXS_NAND_CHUNK_DATA_CHUNK_SIZE;
+ }
+
+ writel(layout0, bch_regs + BCH_FLASH0LAYOUT0);
+ writel(layout1, bch_regs + BCH_FLASH0LAYOUT1);
/* Set *all* chip selects to use layout 0 */
writel(0, bch_regs + BCH_LAYOUTSELECT);
/* Enable BCH complete interrupt */
- writel(BCH_CTRL_COMPLETE_IRQ_EN, bch_regs + BCH_CTRL + BIT_SET);
+ writel(BCH_CTRL_COMPLETE_IRQ_EN, bch_regs + BCH_CTRL + STMP_OFFSET_REG_SET);
/* Hook some operations at the MTD level. */
if (mtd->read_oob != mxs_nand_hook_read_oob) {
@@ -1130,8 +1162,8 @@ int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
*/
int mxs_nand_hw_init(struct mxs_nand_info *info)
{
- void __iomem *gpmi_regs = (void *)MXS_GPMI_BASE;
- void __iomem *bch_regs = (void __iomem *)MXS_BCH_BASE;
+ void __iomem *gpmi_regs = info->io_base;
+ void __iomem *bch_regs = info->bch_base;
int i = 0, ret;
u32 val;
@@ -1147,11 +1179,8 @@ int mxs_nand_hw_init(struct mxs_nand_info *info)
goto err2;
}
- /* Init the DMA controller. */
- mxs_dma_init();
-
/* Reset the GPMI block. */
- ret = mxs_reset_block(gpmi_regs + GPMI_CTRL0, 0);
+ ret = stmp_reset_block(gpmi_regs + GPMI_CTRL0, 0);
if (ret)
return ret;
@@ -1159,7 +1188,7 @@ int mxs_nand_hw_init(struct mxs_nand_info *info)
info->version = val >> GPMI_VERSION_MINOR_OFFSET;
/* Reset BCH. Don't use SFTRST on MX23 due to Errata #2847 */
- ret = mxs_reset_block(bch_regs + BCH_CTRL,
+ ret = stmp_reset_block(bch_regs + BCH_CTRL,
info->version == GPMI_VERSION_TYPE_MX23);
if (ret)
return ret;
@@ -1185,27 +1214,52 @@ err1:
return -ENOMEM;
}
+static void mxs_nand_probe_dt(struct device_d *dev, struct mxs_nand_info *nand_info)
+{
+ struct nand_chip *nand = &nand_info->nand_chip;
+
+ if (!IS_ENABLED(CONFIG_OFTREE))
+ return;
+
+ if (of_get_nand_on_flash_bbt(dev->device_node))
+ nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+}
+
static int mxs_nand_probe(struct device_d *dev)
{
struct mxs_nand_info *nand_info;
struct nand_chip *nand;
struct mtd_info *mtd;
+ enum gpmi_type type;
int err;
+ err = dev_get_drvdata(dev, (unsigned long *)&type);
+ if (err)
+ type = GPMI_MXS;
+
nand_info = kzalloc(sizeof(struct mxs_nand_info), GFP_KERNEL);
if (!nand_info) {
printf("MXS NAND: Failed to allocate private data\n");
return -ENOMEM;
}
- /* XXX: Remove u-boot specific access pointers and use io_base instead? */
+ mxs_nand_probe_dt(dev, nand_info);
+
+ nand_info->type = type;
nand_info->io_base = dev_request_mem_region(dev, 0);
+ nand_info->bch_base = dev_request_mem_region(dev, 1);
nand_info->clk = clk_get(dev, NULL);
if (IS_ERR(nand_info->clk))
return PTR_ERR(nand_info->clk);
- clk_enable(nand_info->clk);
+ if (mxs_nand_is_imx6(nand_info)) {
+ clk_set_rate(nand_info->clk, 96000000);
+ clk_enable(nand_info->clk);
+ nand_info->dma_channel_base = 0;
+ } else {
+ nand_info->dma_channel_base = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
+ }
err = mxs_nand_alloc_buffers(nand_info);
if (err)
@@ -1246,9 +1300,10 @@ static int mxs_nand_probe(struct device_d *dev)
nand->ecc.mode = NAND_ECC_HW;
nand->ecc.bytes = 9;
nand->ecc.size = 512;
+ nand->ecc.strength = 8;
/* first scan to find the device and get the page size */
- err = nand_scan_ident(mtd, 1);
+ err = nand_scan_ident(mtd, 1, NULL);
if (err)
goto err2;
@@ -1268,9 +1323,25 @@ err1:
return err;
}
+static __maybe_unused struct of_device_id gpmi_dt_ids[] = {
+ {
+ .compatible = "fsl,imx23-gpmi-nand",
+ .data = GPMI_MXS,
+ }, {
+ .compatible = "fsl,imx28-gpmi-nand",
+ .data = GPMI_MXS,
+ }, {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = GPMI_IMX6,
+ }, {
+ /* sentinel */
+ }
+};
+
static struct driver_d mxs_nand_driver = {
.name = "mxs_nand",
.probe = mxs_nand_probe,
+ .of_compatible = DRV_OF_COMPAT(gpmi_dt_ids),
};
device_platform_driver(mxs_nand_driver);
diff --git a/drivers/mtd/nand/nand_omap_gpmc.c b/drivers/mtd/nand/nand_omap_gpmc.c
index 8168193156..448e7b9797 100644
--- a/drivers/mtd/nand/nand_omap_gpmc.c
+++ b/drivers/mtd/nand/nand_omap_gpmc.c
@@ -85,6 +85,9 @@
#define GPMC_ECC_SIZE_CONFIG_ECCSIZE0(x) ((x) << 12)
#define GPMC_ECC_SIZE_CONFIG_ECCSIZE1(x) ((x) << 22)
+#define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
+#define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
+
int omap_gpmc_decode_bch(int select_4_8, unsigned char *ecc, unsigned int *err_loc);
static char *ecc_mode_strings[] = {
@@ -694,7 +697,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
* generate dummy eccs for the unprotected oob area.
*/
static int omap_gpmc_read_page_bch_rom_mode(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf)
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
struct gpmc_nand_info *oinfo = chip->priv;
int dev_width = chip->options & NAND_BUSWIDTH_16 ? GPMC_ECC_CONFIG_ECC16B : 0;
@@ -785,6 +788,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
case OMAP_ECC_HAMMING_CODE_HW_ROMCODE:
oinfo->nand.ecc.bytes = 3;
oinfo->nand.ecc.size = 512;
+ oinfo->nand.ecc.strength = 1;
oinfo->ecc_parity_pairs = 12;
if (oinfo->nand.options & NAND_BUSWIDTH_16) {
offset = 2;
@@ -802,6 +806,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
case OMAP_ECC_BCH4_CODE_HW:
oinfo->nand.ecc.bytes = 4 * 7;
oinfo->nand.ecc.size = 4 * 512;
+ oinfo->nand.ecc.strength = BCH4_MAX_ERROR;
omap_oobinfo.oobfree->offset = offset;
omap_oobinfo.oobfree->length = minfo->oobsize -
offset - omap_oobinfo.eccbytes;
@@ -812,6 +817,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
case OMAP_ECC_BCH8_CODE_HW:
oinfo->nand.ecc.bytes = 4 * 13;
oinfo->nand.ecc.size = 4 * 512;
+ oinfo->nand.ecc.strength = BCH8_MAX_ERROR;
omap_oobinfo.oobfree->offset = offset;
omap_oobinfo.oobfree->length = minfo->oobsize -
offset - omap_oobinfo.eccbytes;
@@ -822,6 +828,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
case OMAP_ECC_BCH8_CODE_HW_ROMCODE:
oinfo->nand.ecc.bytes = 4 * 13;
oinfo->nand.ecc.size = 4 * 512;
+ oinfo->nand.ecc.strength = BCH8_MAX_ERROR;
nand->ecc.read_page = omap_gpmc_read_page_bch_rom_mode;
omap_oobinfo.oobfree->length = 0;
j = 0;
@@ -837,6 +844,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
case OMAP_ECC_SOFT:
nand->ecc.layout = NULL;
nand->ecc.mode = NAND_ECC_SOFT;
+ oinfo->nand.ecc.strength = 1;
break;
default:
return -EINVAL;
@@ -878,7 +886,7 @@ static int omap_gpmc_eccmode_set(struct device_d *dev, struct param_d *param, co
return omap_gpmc_eccmode(oinfo, i);
}
-static int gpmc_set_buswidth(struct mtd_info *mtd, struct nand_chip *chip, int buswidth)
+static int gpmc_set_buswidth(struct nand_chip *chip, int buswidth)
{
struct gpmc_nand_info *oinfo = chip->priv;
@@ -999,8 +1007,6 @@ static int gpmc_nand_probe(struct device_d *pdev)
nand->options |= NAND_OWN_BUFFERS;
nand->buffers = xzalloc(sizeof(*nand->buffers));
- nand->set_buswidth = gpmc_set_buswidth;
-
/* State my controller */
nand->controller = &oinfo->controller;
@@ -1023,11 +1029,13 @@ static int gpmc_nand_probe(struct device_d *pdev)
mdelay(1);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(minfo, 1)) {
+ if (nand_scan_ident(minfo, 1, NULL)) {
err = -ENXIO;
goto out_release_mem;
}
+ gpmc_set_buswidth(nand, nand->options & NAND_BUSWIDTH_16);
+
if (nand->options & NAND_BUSWIDTH_16) {
lsp = &ecc_sp_x16;
llp = &ecc_lp_x16;
diff --git a/drivers/mtd/nand/nand_s3c24xx.c b/drivers/mtd/nand/nand_s3c24xx.c
index a3f947a60e..44063523f9 100644
--- a/drivers/mtd/nand/nand_s3c24xx.c
+++ b/drivers/mtd/nand/nand_s3c24xx.c
@@ -456,6 +456,8 @@ static int s3c24x0_nand_probe(struct device_d *dev)
*/
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.bytes = 3; /* always 24 bit ECC per turn */
+ chip->ecc.strength = 1;
+
#ifdef CONFIG_CPU_S3C2440
if (readl(host->base) & 0x8) {
/* large page (2048 bytes per page) */
diff --git a/drivers/mtd/nand/nand_swecc.c b/drivers/mtd/nand/nand_swecc.c
deleted file mode 100644
index 95dfbd8083..0000000000
--- a/drivers/mtd/nand/nand_swecc.c
+++ /dev/null
@@ -1,94 +0,0 @@
-#include <common.h>
-#include <errno.h>
-#include <clock.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/err.h>
-#include <linux/mtd/nand_ecc.h>
-#include <asm/byteorder.h>
-#include <io.h>
-#include <malloc.h>
-
-#include "nand.h"
-
-/**
- * nand_read_page_swecc - [REPLACABLE] software ecc based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- */
-static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
-
- chip->ecc.read_page_raw(mtd, chip, buf);
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
-
- eccsteps = chip->ecc.steps;
- p = buf;
-
- for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += stat;
- }
- return 0;
-}
-
-/**
- * nand_write_page_swecc - [REPLACABLE] software ecc based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- */
-#ifdef CONFIG_MTD_WRITE
-static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- const uint8_t *p = buf;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
-
- /* Software ecc calculation */
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
-
- chip->ecc.write_page_raw(mtd, chip, buf);
-}
-#endif
-
-void nand_init_ecc_soft(struct nand_chip *chip)
-{
- chip->ecc.calculate = nand_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_oob = nand_read_oob_std;
-#ifdef CONFIG_MTD_WRITE
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.write_oob = nand_write_oob_std;
-#endif
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
-}
diff --git a/drivers/mtd/nand/nand_write.c b/drivers/mtd/nand/nand_write.c
deleted file mode 100644
index 61b52ba484..0000000000
--- a/drivers/mtd/nand/nand_write.c
+++ /dev/null
@@ -1,747 +0,0 @@
-#define pr_fmt(fmt) "nand: " fmt
-
-#include <common.h>
-#include <errno.h>
-#include <clock.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/err.h>
-#include <linux/mtd/nand_ecc.h>
-#include <asm/byteorder.h>
-#include <io.h>
-#include <malloc.h>
-#include <module.h>
-
-#include "nand.h"
-
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops);
-
-/**
- * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 8bit buswith
- */
-void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- int i;
- struct nand_chip *chip = mtd->priv;
-
- for (i = 0; i < len; i++)
- writeb(buf[i], chip->IO_ADDR_W);
-}
-
-/**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 16bit buswith
- */
-void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- int i;
- struct nand_chip *chip = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++)
- writew(p[i], chip->IO_ADDR_W);
-
-}
-
-/**
- * nand_default_block_markbad - [DEFAULT] mark a block bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- *
- * This is the default implementation, which can be overridden by
- * a hardware specific driver.
- */
-int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct nand_chip *chip = mtd->priv;
- uint8_t buf[2] = { 0, 0 };
- int block, ret;
-
- /* Get block number */
- block = (int)(ofs >> chip->bbt_erase_shift);
- if (chip->bbt)
- chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
-
- /* Do we have a flash based bad block table ? */
- if (IS_ENABLED(CONFIG_NAND_BBT) && chip->bbt_options & NAND_BBT_USE_FLASH)
- ret = nand_update_bbt(mtd, ofs);
- else {
- /* We write two bytes, so we dont have to mess with 16 bit
- * access
- */
- ofs += mtd->oobsize;
- chip->ops.len = chip->ops.ooblen = 2;
- chip->ops.datbuf = NULL;
- chip->ops.oobbuf = buf;
- chip->ops.ooboffs = chip->badblockpos & ~0x01;
-
- ret = nand_do_write_oob(mtd, ofs, &chip->ops);
- }
- if (!ret)
- mtd->ecc_stats.badblocks++;
-
- return ret;
-}
-
-/**
- * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- * Check, if the device is write protected
- *
- * The function expects, that the device is already selected
- */
-static int nand_check_wp(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd->priv;
- /* Check the WP bit */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
-}
-
-/**
- * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
- */
-int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
-{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, buf, length);
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-/**
- * nand_write_page_raw - [Intern] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- */
-void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
-{
- chip->write_buf(mtd, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
-
-/**
- * nand_write_page - [REPLACEABLE] write one page
- * @mtd: MTD device structure
- * @chip: NAND chip descriptor
- * @buf: the data to write
- * @page: page number to write
- * @cached: cached programming
- * @raw: use _raw version of write_page
- */
-int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
- else
- chip->ecc.write_page(mtd, chip, buf);
-
- /*
- * Cached progamming disabled for now, Not sure if its worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
- */
- cached = 0;
-
- if (!cached || !(chip->options & NAND_CACHEPRG)) {
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
- /*
- * See if operation failed and additional status checks are
- * available
- */
- if ((status & NAND_STATUS_FAIL) && (chip->errstat))
- status = chip->errstat(mtd, chip, FL_WRITING, status,
- page);
-
- if (status & NAND_STATUS_FAIL) {
- return -EIO;
- }
- } else {
- chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
- }
-
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
- /* Send command to read back the data */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- if (chip->verify_buf(mtd, buf, mtd->writesize))
- return -EIO;
-#endif
- return 0;
-}
-
-/**
- * nand_fill_oob - [Internal] Transfer client buffer to oob
- * @chip: nand chip structure
- * @oob: oob data buffer
- * @ops: oob ops structure
- */
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
- struct mtd_oob_ops *ops)
-{
- size_t len = ops->ooblen;
-
- switch(ops->mode) {
-
- case MTD_OOB_PLACE:
- case MTD_OOB_RAW:
- memcpy(chip->oob_poi + ops->ooboffs, oob, len);
- return oob + len;
-
- case MTD_OOB_AUTO: {
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
- uint32_t boffs = 0, woffs = ops->ooboffs;
- size_t bytes = 0;
-
- for(; free->length && len; free++, len -= bytes) {
- /* Write request not from offset 0 ? */
- if (unlikely(woffs)) {
- if (woffs >= free->length) {
- woffs -= free->length;
- continue;
- }
- boffs = free->offset + woffs;
- bytes = min_t(size_t, len,
- (free->length - woffs));
- woffs = 0;
- } else {
- bytes = min_t(size_t, len, free->length);
- boffs = free->offset;
- }
- memcpy(chip->oob_poi + boffs, oob, bytes);
- oob += bytes;
- }
- return oob;
- }
- default:
- BUG();
- }
- return NULL;
-}
-
-#define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0
-
-/**
- * nand_do_write_ops - [Internal] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operations description structure
- *
- * NAND write with ECC
- */
-int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- int chipnr, realpage, page, blockmask, column;
- struct nand_chip *chip = mtd->priv;
- uint32_t writelen = ops->len;
- uint8_t *oob = ops->oobbuf;
- uint8_t *buf = ops->datbuf;
- int ret = 0, subpage;
-
- ops->retlen = 0;
- if (!writelen)
- return 0;
-
- column = to & (mtd->writesize - 1);
- subpage = column || (writelen & (mtd->writesize - 1));
-
- if (subpage && oob)
- return -EINVAL;
-
- chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
- return -EIO;
- }
-
- realpage = (int)(to >> chip->page_shift);
- page = realpage & chip->pagemask;
- blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
-
- /* Invalidate the page cache, when we write to the cached page */
- if (to <= (chip->pagebuf << chip->page_shift) &&
- (chip->pagebuf << chip->page_shift) < (to + ops->len))
- chip->pagebuf = -1;
-
- while(1) {
- int bytes = mtd->writesize;
- int cached = writelen > bytes && page != blockmask;
- uint8_t *wbuf = buf;
-
- /* Partial page write ? */
- if (unlikely(column || writelen < (mtd->writesize - 1))) {
- cached = 0;
- bytes = min_t(int, bytes - column, (int) writelen);
- chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
- }
-
- if (unlikely(oob)) {
- oob = nand_fill_oob(chip, oob, ops);
- } else {
- /* We still need to erase leftover OOB data */
- memset(chip->oob_poi, 0xff, mtd->oobsize);
- }
-
- if (oob || !mtd_all_ff(wbuf, mtd->writesize)) {
- ret = chip->write_page(mtd, chip, wbuf, page, cached,
- (ops->mode == MTD_OOB_RAW));
- if (ret)
- break;
- }
-
- writelen -= bytes;
- if (!writelen)
- break;
-
- column = 0;
- buf += bytes;
- realpage++;
-
- page = realpage & chip->pagemask;
- /* Check, if we cross a chip boundary */
- if (!page) {
- chipnr++;
- chip->select_chip(mtd, -1);
- chip->select_chip(mtd, chipnr);
- }
- }
-
- ops->retlen = ops->len - writelen;
- if (unlikely(oob))
- ops->oobretlen = ops->ooblen;
- return ret;
-}
-
-/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC
- */
-int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
-{
- struct nand_chip *chip = mtd->priv;
- int ret;
-
- /* Do not allow reads past end of device */
- if ((to + len) > mtd->size)
- return -EINVAL;
- if (!len)
- return 0;
-
- chip->ops.len = len;
- chip->ops.datbuf = (uint8_t *)buf;
- chip->ops.oobbuf = NULL;
-
- ret = nand_do_write_ops(mtd, to, &chip->ops);
-
- *retlen = chip->ops.retlen;
-
- return ret;
-}
-
-/**
- * nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- *
- * NAND write out-of-band
- */
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- int chipnr, page, status, len;
- struct nand_chip *chip = mtd->priv;
-
- MTD_DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n",
- (unsigned int)to, (int)ops->ooblen);
-
- if (ops->mode == MTD_OOB_AUTO)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
-
- /* Do not allow write past end of page */
- if ((ops->ooboffs + ops->ooblen) > len) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
- "Attempt to write past end of page\n");
- return -EINVAL;
- }
-
- if (unlikely(ops->ooboffs >= len)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt to start write outside oob\n");
- return -EINVAL;
- }
-
- /* Do not allow reads past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt write beyond end of device\n");
- return -EINVAL;
- }
-
- chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
-
- /*
- * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
- * of my DiskOnChip 2000 test units) will clear the whole data page too
- * if we don't do this. I have no clue why, but I seem to have 'fixed'
- * it in the doc2000 driver in August 1999. dwmw2.
- */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd))
- return -EROFS;
-
- /* Invalidate the page cache, if we write to the cached page */
- if (page == chip->pagebuf)
- chip->pagebuf = -1;
-
- memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops);
- status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
- memset(chip->oob_poi, 0xff, mtd->oobsize);
-
- if (status)
- return status;
-
- ops->oobretlen = ops->ooblen;
-
- return 0;
-}
-
-/**
- * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- */
-int nand_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- int ret = -ENOSYS;
-
- ops->retlen = 0;
-
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt read beyond end of device\n");
- return -EINVAL;
- }
-
- switch(ops->mode) {
- case MTD_OOB_PLACE:
- case MTD_OOB_AUTO:
- case MTD_OOB_RAW:
- break;
-
- default:
- goto out;
- }
-
- if (!ops->datbuf)
- ret = nand_do_write_oob(mtd, to, ops);
- else
- ret = nand_do_write_ops(mtd, to, ops);
-
- out:
- return ret;
-}
-
-/**
- * single_erease_cmd - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * Standard erase command for NAND chips
- */
-void single_erase_cmd(struct mtd_info *mtd, int page)
-{
- struct nand_chip *chip = mtd->priv;
- /* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
- * multi_erease_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function
- * Erase 4 consecutive blocks
- */
-void multi_erase_cmd(struct mtd_info *mtd, int page)
-{
- struct nand_chip *chip = mtd->priv;
- /* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
- * nand_erase - [MTD Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- *
- * Erase one ore more blocks
- */
-int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- return nand_erase_nand(mtd, instr, 0);
-}
-
-#define BBT_PAGE_MASK 0xffffff3f
-/**
- * nand_erase_nand - [Internal] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- * @allowbbt: allow erasing the bbt area
- *
- * Erase one ore more blocks
- */
-int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
- int allowbbt)
-{
- int page, len, status, pages_per_block, ret, chipnr;
- struct nand_chip *chip = mtd->priv;
- int rewrite_bbt[NAND_MAX_CHIPS]={0};
- unsigned int bbt_masked_page = 0xffffffff;
-
- MTD_DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n",
- (unsigned int)instr->addr, (unsigned int)instr->len);
-
- /* Start address must align on block boundary */
- if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
- return -EINVAL;
- }
-
- /* Length must align on block boundary */
- if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Length not block aligned\n");
- return -EINVAL;
- }
-
- /* Do not allow erase past end of device */
- if ((instr->len + instr->addr) > mtd->size) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Erase past end of device\n");
- return -EINVAL;
- }
-
- instr->fail_addr = 0xffffffff;
-
- /* Shift to get first page */
- page = (int)(instr->addr >> chip->page_shift);
- chipnr = (int)(instr->addr >> chip->chip_shift);
-
- /* Calculate pages in each block */
- pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
-
- /* Select the NAND device */
- chip->select_chip(mtd, chipnr);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Device is write protected!!!\n");
- instr->state = MTD_ERASE_FAILED;
- goto erase_exit;
- }
-
- /*
- * If BBT requires refresh, set the BBT page mask to see if the BBT
- * should be rewritten. Otherwise the mask is set to 0xffffffff which
- * can not be matched. This is also done when the bbt is actually
- * erased to avoid recusrsive updates
- */
- if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
- bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
-
- /* Loop through the pages */
- len = instr->len;
-
- instr->state = MTD_ERASING;
-
- while (len) {
- /*
- * heck if we have a bad block, we do not erase bad blocks !
- */
- if (!mtd->allow_erasebad &&
- nand_block_checkbad(mtd, ((loff_t) page) <<
- chip->page_shift, 0, allowbbt)) {
- pr_warn("nand_erase: attempt to erase a "
- "bad block at page 0x%08x\n", page);
- instr->state = MTD_ERASE_FAILED;
- goto erase_exit;
- }
-
- /*
- * Invalidate the page cache, if we erase the block which
- * contains the current cached page
- */
- if (page <= chip->pagebuf && chip->pagebuf <
- (page + pages_per_block))
- chip->pagebuf = -1;
-
- chip->erase_cmd(mtd, page & chip->pagemask);
-
- status = chip->waitfunc(mtd, chip);
-
- /*
- * See if operation failed and additional status checks are
- * available
- */
- if ((status & NAND_STATUS_FAIL) && (chip->errstat))
- status = chip->errstat(mtd, chip, FL_ERASING,
- status, page);
-
- /* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Failed erase, page 0x%08x\n", page);
- instr->state = MTD_ERASE_FAILED;
- instr->fail_addr = (page << chip->page_shift);
- goto erase_exit;
- }
-
- /*
- * If BBT requires refresh, set the BBT rewrite flag to the
- * page being erased
- */
- if (bbt_masked_page != 0xffffffff &&
- (page & BBT_PAGE_MASK) == bbt_masked_page)
- rewrite_bbt[chipnr] = (page << chip->page_shift);
-
- /* Increment page address and decrement length */
- len -= (1 << chip->phys_erase_shift);
- page += pages_per_block;
-
- /* Check, if we cross a chip boundary */
- if (len && !(page & chip->pagemask)) {
- chipnr++;
- chip->select_chip(mtd, -1);
- chip->select_chip(mtd, chipnr);
-
- /*
- * If BBT requires refresh and BBT-PERCHIP, set the BBT
- * page mask to see if this BBT should be rewritten
- */
- if (bbt_masked_page != 0xffffffff &&
- (chip->bbt_td->options & NAND_BBT_PERCHIP))
- bbt_masked_page = chip->bbt_td->pages[chipnr] &
- BBT_PAGE_MASK;
- }
- }
- instr->state = MTD_ERASE_DONE;
-
- erase_exit:
-
- ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
-
- /* Do call back function */
- if (!ret)
- mtd_erase_callback(instr);
-
- /*
- * If BBT requires refresh and erase was successful, rewrite any
- * selected bad block tables
- */
- if (bbt_masked_page == 0xffffffff || ret)
- return ret;
-
- if (!IS_ENABLED(CONFIG_NAND_BBT))
- return ret;
-
- for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
- if (!rewrite_bbt[chipnr])
- continue;
- /* update the BBT for chip */
- MTD_DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
- "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
- chip->bbt_td->pages[chipnr]);
- nand_update_bbt(mtd, rewrite_bbt[chipnr]);
- }
-
- /* Return more or less happy */
- return ret;
-}
-
-/**
- * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- */
-int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct nand_chip *chip = mtd->priv;
- int ret;
-
- if ((ret = nand_block_isbad(mtd, ofs))) {
- /* If it was bad already, return success and do nothing. */
- if (ret > 0)
- return 0;
- return ret;
- }
-
- return chip->block_markbad(mtd, ofs);
-}
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index afdbef12fd..fbd8ecd487 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -209,6 +209,7 @@ static int nomadik_nand_probe(struct device_d *dev)
nand->ecc.hwctl = nomadik_ecc_control;
nand->ecc.size = 512;
nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
nand->options = pdata->options;
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 6df954cbe8..926d31c77f 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -1,7 +1,81 @@
-config UBI
- bool "UBI support"
- select PARTITION_NEED_MTD
+menuconfig MTD_UBI
+ tristate "Enable UBI - Unsorted block images"
select CRC32
+ select PARTITION_NEED_MTD
+ help
+ UBI is a software layer above MTD layer which admits of LVM-like
+ logical volumes on top of MTD devices, hides some complexities of
+ flash chips like wear and bad blocks and provides some other useful
+ capabilities. Please, consult the MTD web site for more details
+ (www.linux-mtd.infradead.org).
+
+if MTD_UBI
+
+config MTD_UBI_WL_THRESHOLD
+ int "UBI wear-leveling threshold"
+ default 4096
+ range 2 65536
+ help
+ This parameter defines the maximum difference between the highest
+ erase counter value and the lowest erase counter value of eraseblocks
+ of UBI devices. When this threshold is exceeded, UBI starts performing
+ wear leveling by means of moving data from eraseblock with low erase
+ counter to eraseblocks with high erase counter.
+
+ The default value should be OK for SLC NAND flashes, NOR flashes and
+ other flashes which have eraseblock life-cycle 100000 or more.
+ However, in case of MLC NAND flashes which typically have eraseblock
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
+ to 128 or 256, although it does not have to be power of 2).
+
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
help
- This enables support for UBI (unsorted block images)
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
+endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index cef11416a1..795b116ecc 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,3 +1,5 @@
-obj-y += build.o vtbl.o vmt.o upd.o kapi.o eba.o io.o wl.o scan.o misc.o debug.o cdev.o
-
+obj-$(CONFIG_MTD_UBI) += ubi.o
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
new file mode 100644
index 0000000000..62d611310c
--- /dev/null
+++ b/drivers/mtd/ubi/attach.c
@@ -0,0 +1,1728 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI attaching sub-system.
+ *
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
+ *
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ *
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
+ * UBI tries to distinguish between 2 types of corruptions.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
+ *
+ * 2. Unexpected corruptions which are not caused by power cuts. During
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
+ *
+ * However, it is difficult to reliably distinguish between these types of
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
+ * NAND), it is probably a PEB which was being erased when power cut
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this is corruption type 2.
+ */
+
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <stdlib.h>
+#include "ubi.h"
+
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* Temporary variables used during scanning */
+static struct ubi_ec_hdr *ech;
+static struct ubi_vid_hdr *vidh;
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
+ * @ec: erase counter of the physical eraseblock
+ * @to_head: if not zero, add to the head of the list
+ * @list: the list to add to
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
+ * If @to_head is not zero, PEB will be added to the head of the list, which
+ * basically means it will be processed first later. E.g., we add corrupted
+ * PEBs (corrupted due to power cuts) to the head of the erase list to make
+ * sure we erase them first and get rid of corruptions ASAP. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
+{
+ struct ubi_ainf_peb *aeb;
+
+ if (list == &ai->free) {
+ dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->erase) {
+ dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->alien) {
+ dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+ ai->alien_peb_count += 1;
+ } else
+ BUG();
+
+ aeb = kzalloc(sizeof(*aeb), GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
+ if (to_head)
+ list_add(&aeb->u.list, list);
+ else
+ list_add_tail(&aeb->u.list, list);
+ return 0;
+}
+
+/**
+ * add_corrupted - add a corrupted physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+
+ aeb = kzalloc(sizeof(aeb), GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
+ return 0;
+}
+
+/**
+ * validate_vid_hdr - check volume identifier header.
+ * @vid_hdr: the volume identifier header to check
+ * @av: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O sub-system. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
+ const struct ubi_ainf_volume *av, int pnum)
+{
+ int vol_type = vid_hdr->vol_type;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+ if (av->leb_count != 0) {
+ int av_vol_type;
+
+ /*
+ * This is not the first logical eraseblock belonging to this
+ * volume. Ensure that the data in its VID header is consistent
+ * to the data in previous logical eraseblock headers.
+ */
+
+ if (vol_id != av->vol_id) {
+ ubi_err("inconsistent vol_id");
+ goto bad;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
+ else
+ av_vol_type = UBI_VID_DYNAMIC;
+
+ if (vol_type != av_vol_type) {
+ ubi_err("inconsistent vol_type");
+ goto bad;
+ }
+
+ if (used_ebs != av->used_ebs) {
+ ubi_err("inconsistent used_ebs");
+ goto bad;
+ }
+
+ if (data_pad != av->data_pad) {
+ ubi_err("inconsistent data_pad");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err("inconsistent VID header at PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
+ return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ /* The volume is absent - add it */
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ return ERR_PTR(-ENOMEM);
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ : UBI_STATIC_VOLUME;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
+ dbg_bld("added volume %d", vol_id);
+ return av;
+}
+
+/**
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @aeb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
+ * second PEB (described by @pnum and @vid_hdr);
+ * o bit 0 is set: the second PEB is newer;
+ * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ * o bit 1 is set: bit-flips were detected in the newer LEB;
+ * o bit 2 is cleared: the older LEB is not corrupted;
+ * o bit 2 is set: the older LEB is corrupted.
+ */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+ int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+ uint32_t data_crc, crc;
+ struct ubi_vid_hdr *vh = NULL;
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+ if (sqnum2 == aeb->sqnum) {
+ /*
+ * This must be a really ancient UBI image which has been
+ * created before sequence numbers support has been added. At
+ * that times we used 32-bit LEB versions stored in logical
+ * eraseblocks. That was before UBI got into mainline. We do not
+ * support these images anymore. Well, those images still work,
+ * but only if no unclean reboots happened.
+ */
+ ubi_err("unsupported on-flash UBI format");
+ return -EINVAL;
+ }
+
+ /* Obviously the LEB with lower sequence counter is older */
+ second_is_newer = (sqnum2 > aeb->sqnum);
+
+ /*
+ * Now we know which copy is newer. If the copy flag of the PEB with
+ * newer version is not set, then we just return, otherwise we have to
+ * check data CRC. For the second PEB we already have the VID header,
+ * for the first one - we'll need to re-read it from flash.
+ *
+ * Note: this may be optimized so that we wouldn't read twice.
+ */
+
+ if (second_is_newer) {
+ if (!vid_hdr->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("second PEB %d is newer, copy_flag is unset",
+ pnum);
+ return 1;
+ }
+ } else {
+ if (!aeb->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("first PEB %d is newer, copy_flag is unset",
+ pnum);
+ return bitflips << 1;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh)
+ return -ENOMEM;
+
+ pnum = aeb->pnum;
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else {
+ ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
+ if (err > 0)
+ err = -EIO;
+
+ goto out_free_vidh;
+ }
+ }
+
+ vid_hdr = vh;
+ }
+
+ /* Read the data of the copy and check the CRC */
+
+ len = be32_to_cpu(vid_hdr->data_size);
+
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto out_unlock;
+
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
+ if (crc != data_crc) {
+ dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+ pnum, crc, data_crc);
+ corrupted = 1;
+ bitflips = 0;
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+ bitflips = !!err;
+ }
+
+ ubi_free_vid_hdr(ubi, vh);
+
+ if (second_is_newer)
+ dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+ else
+ dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+ return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_unlock:
+out_free_vidh:
+ ubi_free_vid_hdr(ubi, vh);
+ return err;
+}
+
+/**
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
+{
+ int err, vol_id, lnum;
+ unsigned long long sqnum;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node **p, *parent = NULL;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+
+ dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
+ pnum, vol_id, lnum, ec, sqnum, bitflips);
+
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
+
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
+
+ /*
+ * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+ * if this is the first instance of this logical eraseblock or not.
+ */
+ p = &av->root.rb_node;
+ while (*p) {
+ int cmp_res;
+
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ continue;
+ }
+
+ /*
+ * There is already a physical eraseblock describing the same
+ * logical eraseblock present.
+ */
+
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * sequence numbers. Otherwise the image is bad.
+ *
+ * However, if the sequence number is zero, we assume it must
+ * be an ancient UBI image from the era when UBI did not have
+ * sequence numbers. We still can attach these images, unless
+ * there is a need to distinguish between old and new
+ * eraseblocks, in which case we'll refuse the image in
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
+ * images, but refuse attaching old images with duplicated
+ * logical eraseblocks because there was an unclean reboot.
+ */
+ if (aeb->sqnum == sqnum && sqnum != 0) {
+ ubi_err("two LEBs with same sequence number %llu",
+ sqnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Now we have to drop the older one and preserve the newer
+ * one.
+ */
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ if (cmp_res & 1) {
+ /*
+ * This logical eraseblock is newer than the one
+ * found earlier.
+ */
+ err = validate_vid_hdr(vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
+ if (err)
+ return err;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
+ be32_to_cpu(vid_hdr->data_size);
+
+ return 0;
+ } else {
+ /*
+ * This logical eraseblock is older than the one found
+ * previously.
+ */
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
+ }
+ }
+
+ /*
+ * We've met this logical eraseblock for the first time, add it to the
+ * attaching information.
+ */
+
+ err = validate_vid_hdr(vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ aeb = kzalloc(sizeof(*aeb), GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ }
+
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+ return 0;
+}
+
+/**
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the attaching information.
+ */
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
+
+ while (p) {
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
+ */
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
+
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
+ }
+
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
+}
+
+/**
+ * early_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA sub-system had not been yet initialized.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
+ return -EINVAL;
+ }
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_sync_erase(ubi, pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * ubi_early_get_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling sub-system is
+ * not initialized yet. This function picks a physical eraseblocks from one of
+ * the lists, writes the EC header if it is needed, and removes it from the
+ * list.
+ *
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
+ */
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err = 0;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
+
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ /*
+ * We try to erase the first physical eraseblock from the erase list
+ * and pick it if we succeed, or try to erase the next one if not. And
+ * so forth. We don't want to take care about bad eraseblocks here -
+ * they'll be handled later.
+ */
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+ if (err)
+ continue;
+
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ ubi_err("no free eraseblocks");
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * check_corruption - check the data area of PEB.
+ * @ubi: UBI device description object
+ * @vid_hdr: the (corrupted) VID header of this PEB
+ * @pnum: the physical eraseblock number to check
+ *
+ * This is a helper function which is used to distinguish between VID header
+ * corruptions caused by power cuts and other reasons. If the PEB contains only
+ * 0xFF bytes in the data area, the VID header is most probably corrupted
+ * because of a power cut (%0 is returned in this case). Otherwise, it was
+ * probably corrupted for some other reasons (%1 is returned in this case). A
+ * negative error code is returned if a read error occurred.
+ *
+ * If the corruption reason was a power cut, UBI can safely erase this PEB.
+ * Otherwise, it should preserve it to avoid possibly destroying important
+ * information.
+ */
+static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
+ int pnum)
+{
+ int err;
+
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
+
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
+ ubi->leb_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * Bit-flips or integrity errors while reading the data area.
+ * It is difficult to say for sure what type of corruption is
+ * this, but presumably a power cut happened while this PEB was
+ * erased, so it became unstable and corrupted, and should be
+ * erased.
+ */
+ err = 0;
+ goto out_unlock;
+ }
+
+ if (err)
+ goto out_unlock;
+
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
+ goto out_unlock;
+
+ ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
+ ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->peb_buf, ubi->leb_size, 1);
+ err = 1;
+
+out_unlock:
+ return err;
+}
+
+/**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, int *vid, unsigned long long *sqnum)
+{
+ long long uninitialized_var(ec);
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+
+ /* Skip bad physical eraseblocks */
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0)
+ return err;
+ else if (err) {
+ ai->bad_peb_count += 1;
+ return 0;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_FF:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
+ case UBI_IO_FF_BITFLIPS:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
+ case UBI_IO_BAD_HDR_EBADMSG:
+ case UBI_IO_BAD_HDR:
+ /*
+ * We have to also look at the VID header, possibly it is not
+ * corrupted. Set %bitflips flag in order to make this PEB be
+ * moved and EC be re-created.
+ */
+ ec_err = err;
+ ec = UBI_UNKNOWN;
+ bitflips = 1;
+ break;
+ default:
+ ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
+ return -EINVAL;
+ }
+
+ if (!ec_err) {
+ int image_seq;
+
+ /* Make sure UBI version is OK */
+ if (ech->version != UBI_VERSION) {
+ ubi_err("this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ech->version);
+ return -EINVAL;
+ }
+
+ ec = be64_to_cpu(ech->ec);
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. The EC headers have 64 bits
+ * reserved, but we anyway make use of only 31 bit
+ * values, as this seems to be enough for any existing
+ * flash. Upgrade UBI and use 64-bit erase counters
+ * internally.
+ */
+ ubi_err("erase counter overflow, max is %d",
+ UBI_MAX_ERASECOUNTER);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that all PEBs have the same image sequence number.
+ * This allows us to detect situations when users flash UBI
+ * images incorrectly, so that the flash has the new UBI image
+ * and leftovers from the old one. This feature was added
+ * relatively recently, and the sequence number was always
+ * zero, because old UBI implementations always set it to zero.
+ * For this reasons, we do not panic if some PEBs have zero
+ * sequence number, while other PEBs have non-zero sequence
+ * number.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq && image_seq)
+ ubi->image_seq = image_seq;
+ if (ubi->image_seq && image_seq &&
+ ubi->image_seq != image_seq) {
+ ubi_err("bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+ }
+
+ /* OK, we've done with the EC header, let's look at the VID header */
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_BAD_HDR_EBADMSG:
+ if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
+ /*
+ * Both EC and VID headers are corrupted and were read
+ * with data integrity error, probably this is a bad
+ * PEB, bit it is not marked as bad yet. This may also
+ * be a result of power cut during erasure.
+ */
+ ai->maybe_bad_peb_count += 1;
+ case UBI_IO_BAD_HDR:
+ if (ec_err)
+ /*
+ * Both headers are corrupted. There is a possibility
+ * that this a valid UBI PEB which has corresponding
+ * LEB, but the headers are corrupted. However, it is
+ * impossible to distinguish it from a PEB which just
+ * contains garbage because of a power cut during erase
+ * operation. So we just schedule this PEB for erasure.
+ *
+ * Besides, in case of NOR flash, we deliberately
+ * corrupt both headers because NOR flash erasure is
+ * slow and can start from the end.
+ */
+ err = 0;
+ else
+ /*
+ * The EC was OK, but the VID header is corrupted. We
+ * have to check what is in the data area.
+ */
+ err = check_corruption(ubi, vidh, pnum);
+
+ if (err < 0)
+ return err;
+ else if (!err)
+ /* This corruption is caused by a power cut */
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ /* This is an unexpected corruption */
+ err = add_corrupted(ai, pnum, ec);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF_BITFLIPS:
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ default:
+ ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
+ if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_RO:
+ ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
+ vol_id, lnum);
+ ubi->ro_mode = 1;
+ break;
+
+ case UBI_COMPAT_PRESERVE:
+ ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_REJECT:
+ ubi_err("incompatible internal volume %d:%d found",
+ vol_id, lnum);
+ return -EINVAL;
+ }
+ }
+
+ if (ec_err)
+ ubi_warn("valid VID header but corrupted EC header at PEB %d",
+ pnum);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+ if (err)
+ return err;
+
+adjust_mean_ec:
+ if (!ec_err) {
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
+ }
+
+ return 0;
+}
+
+/**
+ * late_analysis - analyze the overall situation with PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
+ */
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ int max_corr, peb_count;
+
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
+ max_corr = peb_count / 20 ?: 8;
+
+ /*
+ * Few corrupted PEBs is not a problem and may be just a result of
+ * unclean reboots. However, many of them may indicate some problems
+ * with the flash HW or driver.
+ */
+ if (ai->corr_peb_count) {
+ ubi_err("%d PEBs are corrupted and preserved",
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_err(" %d", aeb->pnum);
+ pr_err("\n");
+
+ /*
+ * If too many PEBs are corrupted, we refuse attaching,
+ * otherwise, only print a warning.
+ */
+ if (ai->corr_peb_count >= max_corr) {
+ ubi_err("too many corrupted PEBs, refusing");
+ return -EINVAL;
+ }
+ }
+
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
+ /*
+ * All PEBs are empty, or almost all - a couple PEBs look like
+ * they may be bad PEBs which were not marked as bad yet.
+ *
+ * This piece of code basically tries to distinguish between
+ * the following situations:
+ *
+ * 1. Flash is empty, but there are few bad PEBs, which are not
+ * marked as bad so far, and which were read with error. We
+ * want to go ahead and format this flash. While formatting,
+ * the faulty PEBs will probably be marked as bad.
+ *
+ * 2. Flash contains non-UBI data and we do not want to format
+ * it and destroy possibly important information.
+ */
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
+ ubi_msg("empty MTD device detected");
+ get_random_bytes(&ubi->image_seq,
+ sizeof(ubi->image_seq));
+ } else {
+ ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kfree(aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kfree(aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kfree(aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kfree(aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kfree(aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
+ */
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
+{
+ int err, pnum;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return err;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ ubi_msg("scanning is finished");
+
+ /* Calculate mean erase counter */
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+ err = late_analysis(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ /*
+ * In case of unknown erase counter we use the mean erase counter
+ * value.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = self_check_ai(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ return 0;
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+ return err;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
+ }
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ return ubi_scan_fastmap(ubi, ai, fm_anchor);
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+
+ return ai;
+}
+
+/**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi, int force_scan)
+{
+ int err;
+ struct ubi_attach_info *ai;
+
+ ai = alloc_ai("ubi_aeb_slab_cache");
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
+ }
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, ai);
+ if (err > 0) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai("ubi_aeb_slab_cache2");
+ if (!ai)
+ return -ENOMEM;
+ }
+
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
+
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
+
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+ if (!scan_ai)
+ goto out_wl;
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
+ }
+#endif
+
+ destroy_ai(ai);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
+
+/**
+ * self_check_ai - check the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero if the attaching information is all right, and a
+ * negative error code if not or if an error occurred.
+ */
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int pnum, err, vols_found = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
+ uint8_t *buf;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ /*
+ * At first, check that attaching information is OK.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ int leb_count = 0;
+
+ vols_found += 1;
+
+ if (ai->is_empty) {
+ ubi_err("bad is_empty flag");
+ goto bad_av;
+ }
+
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
+ ubi_err("negative values");
+ goto bad_av;
+ }
+
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("bad vol_id");
+ goto bad_av;
+ }
+
+ if (av->vol_id > ai->highest_vol_id) {
+ ubi_err("highest_vol_id is %d, but vol_id %d is there",
+ ai->highest_vol_id, av->vol_id);
+ goto out;
+ }
+
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err("bad vol_type");
+ goto bad_av;
+ }
+
+ if (av->data_pad > ubi->leb_size / 2) {
+ ubi_err("bad data_pad");
+ goto bad_av;
+ }
+
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ last_aeb = aeb;
+ leb_count += 1;
+
+ if (aeb->pnum < 0 || aeb->ec < 0) {
+ ubi_err("negative values");
+ goto bad_aeb;
+ }
+
+ if (aeb->ec < ai->min_ec) {
+ ubi_err("bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->ec > ai->max_ec) {
+ ubi_err("bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->pnum >= ubi->peb_count) {
+ ubi_err("too high PEB number %d, total PEBs %d",
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
+ ubi_err("bad lnum or used_ebs");
+ goto bad_aeb;
+ }
+ } else {
+ if (av->used_ebs != 0) {
+ ubi_err("non-zero used_ebs");
+ goto bad_aeb;
+ }
+ }
+
+ if (aeb->lnum > av->highest_lnum) {
+ ubi_err("incorrect highest_lnum or lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (av->leb_count != leb_count) {
+ ubi_err("bad leb_count, %d objects in the tree",
+ leb_count);
+ goto bad_av;
+ }
+
+ if (!last_aeb)
+ continue;
+
+ aeb = last_aeb;
+
+ if (aeb->lnum != av->highest_lnum) {
+ ubi_err("bad highest_lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (vols_found != ai->vols_found) {
+ ubi_err("bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
+ goto out;
+ }
+
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ int vol_type;
+
+ last_aeb = aeb;
+
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("VID header is not OK (%d)", err);
+ if (err > 0)
+ err = -EIO;
+ return err;
+ }
+
+ vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ if (av->vol_type != vol_type) {
+ ubi_err("bad vol_type");
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err("bad sqnum %llu", aeb->sqnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err("bad vol_id %d", av->vol_id);
+ goto bad_vid_hdr;
+ }
+
+ if (av->compat != vidh->compat) {
+ ubi_err("bad compat %d", vidh->compat);
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad lnum %d", aeb->lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err("bad used_ebs %d", av->used_ebs);
+ goto bad_vid_hdr;
+ }
+
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err("bad data_pad %d", av->data_pad);
+ goto bad_vid_hdr;
+ }
+ }
+
+ if (!last_aeb)
+ continue;
+
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad highest_lnum %d", av->highest_lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err("bad last_data_size %d", av->last_data_size);
+ goto bad_vid_hdr;
+ }
+ }
+
+ /*
+ * Make sure that all the physical eraseblocks are in one of the lists
+ * or trees.
+ */
+ buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ } else if (err)
+ buf[pnum] = 1;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
+
+ err = 0;
+ for (pnum = 0; pnum < ubi->peb_count; pnum++)
+ if (!buf[pnum]) {
+ ubi_err("PEB %d is not referred", pnum);
+ err = 1;
+ }
+
+ kfree(buf);
+ if (err)
+ goto out;
+ return 0;
+
+bad_aeb:
+ ubi_err("bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
+ goto out;
+
+bad_av:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ goto out;
+
+bad_vid_hdr:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
+
+out:
+ dump_stack();
+ return -EINVAL;
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 1ea12095c2..c4bffae7f6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -12,6 +12,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём),
* Frank Haverkamp
@@ -24,107 +27,73 @@
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
*/
-#ifdef UBI_LINUX
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/stat.h>
-#include <linux/miscdevice.h>
#include <linux/log2.h>
-#include <linux/kthread.h>
-#endif
-#include "ubi-barebox.h"
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
- * @name: MTD device name or number string
+ * @name: MTD character device node path, MTD device name, or MTD device number
+ * string
* @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
*/
-struct mtd_dev_param
-{
+struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int vid_hdr_offs;
+ int max_beb_per1024;
};
-/* Numbers of elements set in the @mtd_dev_param array */
-static int mtd_devs = 0;
-
/* MTD devices specification parameters */
-static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
-
-/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class *ubi_class;
-
-#ifdef UBI_LINUX
-/* Slab cache for wear-leveling entries */
-struct kmem_cache *ubi_wl_entry_slab;
-
-/* UBI control character device */
-static struct miscdevice ubi_ctrl_cdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "ubi_ctrl",
- .fops = &ubi_ctrl_cdev_operations,
-};
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert = 1;
#endif
/* All UBI devices in system */
struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
-#ifdef UBI_LINUX
-/* Serializes UBI devices creations and removals */
-DEFINE_MUTEX(ubi_devices_mutex);
-
-/* Protects @ubi_devices and @ubi->ref_count */
-static DEFINE_SPINLOCK(ubi_devices_lock);
-
-/* "Show" method for files in '/<sysfs>/class/ubi/' */
-static ssize_t ubi_version_show(struct class *class, char *buf)
+/**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
{
- return sprintf(buf, "%d\n", UBI_VERSION);
-}
-
-/* UBI version attribute ('/<sysfs>/class/ubi/version') */
-static struct class_attribute ubi_version =
- __ATTR(version, S_IRUGO, ubi_version_show, NULL);
-
-static ssize_t dev_attribute_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
-static struct device_attribute dev_eraseblock_size =
- __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_avail_eraseblocks =
- __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_total_eraseblocks =
- __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_volumes_count =
- __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_max_ec =
- __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_reserved_for_bad =
- __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_bad_peb_count =
- __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_max_vol_count =
- __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_min_io_size =
- __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_bgt_enabled =
- __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
-static struct device_attribute dev_mtd_num =
- __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+ struct ubi_notification nt;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+ ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ if (ubi_update_fastmap(ubi)) {
+ ubi_err("Unable to update fastmap!");
+ ubi_ro_mode(ubi);
+ }
+ }
#endif
+ return 0;
+}
/**
* ubi_get_device - get UBI device.
@@ -139,14 +108,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
{
struct ubi_device *ubi;
- spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
- if (ubi) {
- ubi_assert(ubi->ref_count >= 0);
- ubi->ref_count += 1;
- get_device(&ubi->dev);
- }
- spin_unlock(&ubi_devices_lock);
+
+ ubi->ref_count++;
return ubi;
}
@@ -157,198 +121,11 @@ struct ubi_device *ubi_get_device(int ubi_num)
*/
void ubi_put_device(struct ubi_device *ubi)
{
- spin_lock(&ubi_devices_lock);
- ubi->ref_count -= 1;
- put_device(&ubi->dev);
- spin_unlock(&ubi_devices_lock);
-}
-
-/**
- * ubi_get_by_major - get UBI device description object by character device
- * major number.
- * @major: major number
- *
- * This function is similar to 'ubi_get_device()', but it searches the device
- * by its major number.
- */
-struct ubi_device *ubi_get_by_major(int major)
-{
- int i;
- struct ubi_device *ubi;
-
- spin_lock(&ubi_devices_lock);
- for (i = 0; i < UBI_MAX_DEVICES; i++) {
- ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
- ubi_assert(ubi->ref_count >= 0);
- ubi->ref_count += 1;
- get_device(&ubi->dev);
- spin_unlock(&ubi_devices_lock);
- return ubi;
- }
- }
- spin_unlock(&ubi_devices_lock);
-
- return NULL;
-}
-
-/**
- * ubi_major2num - get UBI device number by character device major number.
- * @major: major number
- *
- * This function searches UBI device number object by its major number. If UBI
- * device was not found, this function returns -ENODEV, otherwise the UBI device
- * number is returned.
- */
-int ubi_major2num(int major)
-{
- int i, ubi_num = -ENODEV;
-
- spin_lock(&ubi_devices_lock);
- for (i = 0; i < UBI_MAX_DEVICES; i++) {
- struct ubi_device *ubi = ubi_devices[i];
-
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
- ubi_num = ubi->ubi_num;
- break;
- }
- }
- spin_unlock(&ubi_devices_lock);
-
- return ubi_num;
-}
-
-#ifdef UBI_LINUX
-/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
-static ssize_t dev_attribute_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t ret;
- struct ubi_device *ubi;
-
- /*
- * The below code looks weird, but it actually makes sense. We get the
- * UBI device reference from the contained 'struct ubi_device'. But it
- * is unclear if the device was removed or not yet. Indeed, if the
- * device was removed before we increased its reference count,
- * 'ubi_get_device()' will return -ENODEV and we fail.
- *
- * Remember, 'struct ubi_device' is freed in the release function, so
- * we still can use 'ubi->ubi_num'.
- */
- ubi = container_of(dev, struct ubi_device, dev);
- ubi = ubi_get_device(ubi->ubi_num);
- if (!ubi)
- return -ENODEV;
-
- if (attr == &dev_eraseblock_size)
- ret = sprintf(buf, "%d\n", ubi->leb_size);
- else if (attr == &dev_avail_eraseblocks)
- ret = sprintf(buf, "%d\n", ubi->avail_pebs);
- else if (attr == &dev_total_eraseblocks)
- ret = sprintf(buf, "%d\n", ubi->good_peb_count);
- else if (attr == &dev_volumes_count)
- ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
- else if (attr == &dev_max_ec)
- ret = sprintf(buf, "%d\n", ubi->max_ec);
- else if (attr == &dev_reserved_for_bad)
- ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
- else if (attr == &dev_bad_peb_count)
- ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
- else if (attr == &dev_max_vol_count)
- ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
- else if (attr == &dev_min_io_size)
- ret = sprintf(buf, "%d\n", ubi->min_io_size);
- else if (attr == &dev_bgt_enabled)
- ret = sprintf(buf, "%d\n", ubi->thread_enabled);
- else if (attr == &dev_mtd_num)
- ret = sprintf(buf, "%d\n", ubi->mtd->index);
- else
- ret = -EINVAL;
-
- ubi_put_device(ubi);
- return ret;
-}
-
-/* Fake "release" method for UBI devices */
-static void dev_release(struct device *dev) { }
-
-/**
- * ubi_sysfs_init - initialize sysfs for an UBI device.
- * @ubi: UBI device description object
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- */
-static int ubi_sysfs_init(struct ubi_device *ubi)
-{
- int err;
-
- ubi->dev.release = dev_release;
- ubi->dev.devt = ubi->cdev.dev;
- ubi->dev.class = ubi_class;
- sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
- err = device_register(&ubi->dev);
- if (err)
- return err;
-
- err = device_create_file(&ubi->dev, &dev_eraseblock_size);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_volumes_count);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_max_ec);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_bad_peb_count);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_max_vol_count);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_min_io_size);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_bgt_enabled);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_mtd_num);
- return err;
-}
-
-/**
- * ubi_sysfs_close - close sysfs for an UBI device.
- * @ubi: UBI device description object
- */
-static void ubi_sysfs_close(struct ubi_device *ubi)
-{
- device_remove_file(&ubi->dev, &dev_mtd_num);
- device_remove_file(&ubi->dev, &dev_bgt_enabled);
- device_remove_file(&ubi->dev, &dev_min_io_size);
- device_remove_file(&ubi->dev, &dev_max_vol_count);
- device_remove_file(&ubi->dev, &dev_bad_peb_count);
- device_remove_file(&ubi->dev, &dev_reserved_for_bad);
- device_remove_file(&ubi->dev, &dev_max_ec);
- device_remove_file(&ubi->dev, &dev_volumes_count);
- device_remove_file(&ubi->dev, &dev_total_eraseblocks);
- device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
- device_remove_file(&ubi->dev, &dev_eraseblock_size);
- device_unregister(&ubi->dev);
+ ubi->ref_count--;
}
-#endif
/**
- * kill_volumes - destroy all volumes.
+ * kill_volumes - destroy all user volumes.
* @ubi: UBI device description object
*/
static void kill_volumes(struct ubi_device *ubi)
@@ -363,49 +140,34 @@ static void kill_volumes(struct ubi_device *ubi)
/**
* uif_init - initialize user interfaces for an UBI device.
* @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken, otherwise set to %0
+ *
+ * This function initializes various user interfaces for an UBI device. If the
+ * initialization fails at an early stage, this function frees all the
+ * resources it allocated, returns an error, and @ref is set to %0. However,
+ * if the initialization fails after the UBI device was registered in the
+ * driver core subsystem, this function takes a reference to @ubi->dev, because
+ * otherwise the release function ('dev_release()') would free whole @ubi
+ * object. The @ref argument is set to %1 in this case. The caller has to put
+ * this reference.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int uif_init(struct ubi_device *ubi)
+static int uif_init(struct ubi_device *ubi, int *ref)
{
int i, err;
-#ifdef UBI_LINUX
- dev_t dev;
-#endif
+ *ref = 0;
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
- /*
- * Major numbers for the UBI character devices are allocated
- * dynamically. Major numbers of volume character devices are
- * equivalent to ones of the corresponding UBI character device. Minor
- * numbers of UBI character devices are 0, while minor numbers of
- * volume character devices start from 1. Thus, we allocate one major
- * number and ubi->vtbl_slots + 1 minor numbers.
- */
- err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
- if (err) {
- ubi_err("cannot register UBI character devices");
- return err;
- }
-
- ubi_assert(MINOR(dev) == 0);
- cdev_init(&ubi->cdev, &ubi_cdev_operations);
- dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
-#ifdef UBI_LINUX
- ubi->cdev.owner = THIS_MODULE;
-#endif
err = ubi_cdev_add(ubi);
if (err) {
ubi_err("cannot add character device");
goto out_unreg;
}
- err = ubi_sysfs_init(ubi);
- if (err)
- goto out_sysfs;
-
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i]) {
err = ubi_add_volume(ubi, ubi->volumes[i]);
@@ -419,11 +181,8 @@ static int uif_init(struct ubi_device *ubi)
out_volumes:
kill_volumes(ubi);
-out_sysfs:
- ubi_sysfs_close(ubi);
- ubi_cdev_remove(ubi);
+ devfs_remove(&ubi->cdev);
out_unreg:
- unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
return err;
}
@@ -431,68 +190,64 @@ out_unreg:
/**
* uif_close - close user interfaces for an UBI device.
* @ubi: UBI device description object
+ *
+ * Note, since this function un-registers UBI volume device objects (@vol->dev),
+ * the memory allocated voe the volumes is freed as well (in the release
+ * function).
*/
static void uif_close(struct ubi_device *ubi)
{
kill_volumes(ubi);
- ubi_sysfs_close(ubi);
ubi_cdev_remove(ubi);
- unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
}
/**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
*/
-static int attach_by_scanning(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
{
- int err;
- struct ubi_scan_info *si;
-
- si = ubi_scan(ubi);
- if (IS_ERR(si))
- return PTR_ERR(si);
+ int i;
- ubi->bad_peb_count = si->bad_peb_count;
- ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
- ubi->max_ec = si->max_ec;
- ubi->mean_ec = si->mean_ec;
+ for (i = ubi->vtbl_slots;
+ i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ kfree(ubi->volumes[i]->eba_tbl);
+ kfree(ubi->volumes[i]);
+ }
+}
- err = ubi_read_volume_table(ubi, si);
- if (err)
- goto out_si;
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+ int limit, device_pebs;
+ uint64_t device_size;
- err = ubi_wl_init_scan(ubi, si);
- if (err)
- goto out_vtbl;
+ if (!max_beb_per1024)
+ return 0;
- err = ubi_eba_init_scan(ubi, si);
- if (err)
- goto out_wl;
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = ubi->mtd->size;
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
- ubi_scan_destroy_si(si);
- return 0;
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
-out_wl:
- ubi_wl_close(ubi);
-out_vtbl:
- vfree(ubi->vtbl);
-out_si:
- ubi_scan_destroy_si(si);
- return err;
+ return limit;
}
/**
- * io_init - initialize I/O unit for a given UBI device.
+ * io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
@@ -505,8 +260,11 @@ out_si:
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
@@ -533,8 +291,15 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ if (mtd_can_have_bb(ubi->mtd)) {
ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
+
+ if (ubi->mtd->type == MTD_NORFLASH) {
+ ubi_assert(ubi->mtd->writesize == 1);
+ ubi->nor_flash = 1;
+ }
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
@@ -554,14 +319,28 @@ static int io_init(struct ubi_device *ubi)
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+ ubi->max_write_size = ubi->mtd->writesize; /* FIXME: writebufsize */
+ /*
+ * Maximum write size has to be greater or equivalent to min. I/O
+ * size, and be multiple of min. I/O size.
+ */
+ if (ubi->max_write_size < ubi->min_io_size ||
+ ubi->max_write_size % ubi->min_io_size ||
+ !is_power_of_2(ubi->max_write_size)) {
+ ubi_err("bad write buffer size %d for %d min. I/O unit",
+ ubi->max_write_size, ubi->min_io_size);
+ return -EINVAL;
+ }
+
/* Calculate default aligned sizes of EC and VID headers */
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
- dbg_msg("min_io_size %d", ubi->min_io_size);
- dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
- dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
- dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
@@ -575,13 +354,13 @@ static int io_init(struct ubi_device *ubi)
}
/* Similar for the data offset */
- ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
- dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
- dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
- dbg_msg("leb_start %d", ubi->leb_start);
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
@@ -601,41 +380,38 @@ static int io_init(struct ubi_device *ubi)
}
/*
+ * Set maximum amount of physical erroneous eraseblocks to be 10%.
+ * Erroneous PEB are those which have read errors.
+ */
+ ubi->max_erroneous = ubi->peb_count / 10;
+ if (ubi->max_erroneous < 16)
+ ubi->max_erroneous = 16;
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
+
+ /*
* It may happen that EC and VID headers are situated in one minimal
* I/O unit. In this case we can only accept this UBI image in
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
- ubi_warn("EC and VID headers are in the same minimal I/O unit, "
- "switch to read-only mode");
+ ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
- ubi_msg("MTD device %d is write-protected, attach in "
- "read-only mode", ubi->mtd->index);
+ ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
ubi->ro_mode = 1;
}
- ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
- ubi->peb_size, ubi->peb_size >> 10);
- ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
- ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
- if (ubi->hdrs_min_io_size != ubi->min_io_size)
- ubi_msg("sub-page size: %d",
- ubi->hdrs_min_io_size);
- ubi_msg("VID header offset: %d (aligned %d)",
- ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
- ubi_msg("data offset: %d", ubi->leb_start);
-
/*
- * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
- * each physical eraseblock. So, we skip ubi->bad_peb_count
- * uninitialized and initialize it after scanning.
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
*/
return 0;
@@ -646,7 +422,7 @@ static int io_init(struct ubi_device *ubi)
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
@@ -657,9 +433,14 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
+ if (ubi->ro_mode) {
+ ubi_warn("skip auto-resize because of R/O mode");
+ return 0;
+ }
+
/*
* Clear the auto-resize flag in the volume in-memory copy of the
- * volume table, and 'ubi_resize_volume()' will propogate this change
+ * volume table, and 'ubi_resize_volume()' will propagate this change
* to the flash.
*/
ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
@@ -668,11 +449,10 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
struct ubi_vtbl_record vtbl_rec;
/*
- * No avalilable PEBs to re-size the volume, clear the flag on
+ * No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -695,9 +475,10 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
/**
* ubi_attach_mtd_dev - attach an MTD device.
- * @mtd_dev: MTD device description object
+ * @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -708,10 +489,17 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
{
struct ubi_device *ubi;
- int i, err;
+ int i, err, ref = 0;
+
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
/*
* Check if we already have the same MTD device attached.
@@ -722,7 +510,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd == ubi->mtd) {
- dbg_err("mtd%d is already attached to ubi%d",
+ ubi_err("mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -737,8 +525,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err("refuse attaching mtd%d - it is already emulated on "
- "top of UBI", mtd->index);
+ ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
return -EINVAL;
}
@@ -748,7 +536,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
+ ubi_err("only %d UBI devices may be created",
+ UBI_MAX_DEVICES);
return -ENFILE;
}
} else {
@@ -757,7 +546,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- dbg_err("ubi%d already exists", ubi_num);
+ ubi_err("ubi%d already exists", ubi_num);
return -EEXIST;
}
}
@@ -771,36 +560,55 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
- mutex_init(&ubi->buf_mutex);
- mutex_init(&ubi->ckvol_mutex);
- mutex_init(&ubi->volumes_mutex);
- spin_lock_init(&ubi->volumes_lock);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+ ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+ ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+ ubi->fm_disabled = !fm_autoconvert;
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
- err = io_init(ubi);
+ err = io_init(ubi, max_beb_per1024);
if (err)
goto out_free;
err = -ENOMEM;
- ubi->peb_buf1 = vmalloc(ubi->peb_size);
- if (!ubi->peb_buf1)
+ ubi->peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf)
goto out_free;
- ubi->peb_buf2 = vmalloc(ubi->peb_size);
- if (!ubi->peb_buf2)
- goto out_free;
-
-#ifdef CONFIG_MTD_UBI_DEBUG
- mutex_init(&ubi->dbg_buf_mutex);
- ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
- if (!ubi->dbg_peb_buf)
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
+ if (!ubi->fm_buf)
goto out_free;
#endif
-
- err = attach_by_scanning(ubi);
+ err = ubi_attach(ubi, 0);
if (err) {
- dbg_err("failed to attach by scanning, error %d", err);
+ ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_free;
}
@@ -810,55 +618,47 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
goto out_detach;
}
- err = uif_init(ubi);
+ err = uif_init(ubi, &ref);
if (err)
goto out_detach;
- ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
- if (IS_ERR(ubi->bgt_thread)) {
- err = PTR_ERR(ubi->bgt_thread);
- ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
- err);
- goto out_uif;
- }
+ ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+ mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+ ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
- ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
- ubi_msg("MTD device name: \"%s\"", mtd->name);
- ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
- ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
- ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
- ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
- ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
- ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
- ubi_msg("number of user volumes: %d",
- ubi->vol_count - UBI_INT_VOL_COUNT);
- ubi_msg("available PEBs: %d", ubi->avail_pebs);
- ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
- ubi_msg("number of PEBs reserved for bad PEB handling: %d",
- ubi->beb_rsvd_pebs);
- ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
-
- /* Enable the background thread */
- if (!DBG_DISABLE_BGT) {
- ubi->thread_enabled = 1;
- wake_up_process(ubi->bgt_thread);
- }
+ /*
+ * The below lock makes sure we do not race with 'ubi_thread()' which
+ * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+ */
+ ubi->thread_enabled = 1;
+ wake_up_process(ubi->bgt_thread);
ubi_devices[ubi_num] = ubi;
+
return ubi_num;
-out_uif:
- uif_close(ubi);
out_detach:
- ubi_eba_close(ubi);
ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_free:
- vfree(ubi->peb_buf1);
- vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
- vfree(ubi->dbg_peb_buf);
-#endif
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
kfree(ubi);
return err;
}
@@ -876,181 +676,43 @@ out_free:
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
-int ubi_detach_mtd_dev(struct mtd_info *mtd, int anyway)
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
{
struct ubi_device *ubi;
- int ubi_num = 0, i;
-
- spin_lock(&ubi_devices_lock);
- for (i = 0; i < UBI_MAX_DEVICES; i++) {
- ubi = ubi_devices[i];
- if (ubi && mtd == ubi->mtd) {
- ubi_num = i;
- break;
- }
- }
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
- if (!ubi) {
- spin_unlock(&ubi_devices_lock);
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
return -EINVAL;
- }
- if (ubi->ref_count) {
- if (!anyway) {
- spin_unlock(&ubi_devices_lock);
- return -EBUSY;
- }
- /* This may only happen if there is a bug */
- ubi_err("%s reference count %d, destroy anyway",
- ubi->ubi_name, ubi->ref_count);
- }
+ ubi->ref_count--;
+
+ if (ubi->ref_count)
+ return -EBUSY;
+
ubi_devices[ubi_num] = NULL;
- spin_unlock(&ubi_devices_lock);
ubi_assert(ubi_num == ubi->ubi_num);
- dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
- /*
- * Before freeing anything, we have to stop the background thread to
- * prevent it from doing anything on this device while we are freeing.
- */
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
+ ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap. */
+ ubi_update_fastmap(ubi);
+ ubi_free_fastmap(ubi);
+#endif
uif_close(ubi);
- ubi_eba_close(ubi);
+
ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
- vfree(ubi->peb_buf1);
- vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
- vfree(ubi->dbg_peb_buf);
-#endif
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
kfree(ubi);
- return 0;
-}
-
-/**
- * bytes_str_to_int - convert a string representing number of bytes to an
- * integer.
- * @str: the string to convert
- *
- * This function returns positive resulting integer in case of success and a
- * negative error code in case of failure.
- */
-static int __init bytes_str_to_int(const char *str)
-{
- char *endp;
- unsigned long result;
- result = simple_strtoul(str, &endp, 0);
- if (str == endp || result < 0) {
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
- return -EINVAL;
- }
-
- switch (*endp) {
- case 'G':
- result *= 1024;
- case 'M':
- result *= 1024;
- case 'K':
- result *= 1024;
- if (endp[1] == 'i' && endp[2] == 'B')
- endp += 2;
- case '\0':
- break;
- default:
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
- return -EINVAL;
- }
-
- return result;
-}
-
-/**
- * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
- * @val: the parameter value to parse
- * @kp: not used
- *
- * This function returns zero in case of success and a negative error code in
- * case of error.
- */
-int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
-{
- int i, len;
- struct mtd_dev_param *p;
- char buf[MTD_PARAM_LEN_MAX];
- char *pbuf = &buf[0];
- char *tokens[2] = {NULL, NULL};
-
- if (!val)
- return -EINVAL;
-
- if (mtd_devs == UBI_MAX_DEVICES) {
- printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
-
- len = strnlen(val, MTD_PARAM_LEN_MAX);
- if (len == MTD_PARAM_LEN_MAX) {
- printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
- "max. is %d\n", val, MTD_PARAM_LEN_MAX);
- return -EINVAL;
- }
-
- if (len == 0) {
- printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
- "ignored\n");
- return 0;
- }
-
- strcpy(buf, val);
-
- /* Get rid of the final newline */
- if (buf[len - 1] == '\n')
- buf[len - 1] = '\0';
-
- for (i = 0; i < 2; i++)
- tokens[i] = strsep(&pbuf, ",");
-
- if (pbuf) {
- printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
- val);
- return -EINVAL;
- }
-
- p = &mtd_dev_param[mtd_devs];
- strcpy(&p->name[0], tokens[0]);
-
- if (tokens[1])
- p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
-
- if (p->vid_hdr_offs < 0)
- return p->vid_hdr_offs;
-
- mtd_devs += 1;
return 0;
}
-
-module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num>[,<vid_hdr_offs>].\n"
- "Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number or name.\n"
- "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
- "header position and data starting position to be used "
- "by UBI.\n"
- "Example: mtd=content,1984 mtd=4 - attach MTD device"
- "with name \"content\" using VID header offset 1984, and "
- "MTD device number 4 with default VID header offset.");
-
-MODULE_VERSION(__stringify(UBI_VERSION));
-MODULE_DESCRIPTION("UBI - Unsorted Block Images");
-MODULE_AUTHOR("Artem Bityutskiy");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4bc4a99019..3907673386 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -23,7 +23,7 @@ static ssize_t ubi_volume_cdev_read(struct cdev *cdev, void *buf, size_t size,
loff_t offp = offset;
int usable_leb_size = vol->usable_leb_size;
- printf("%s: %zd @ 0x%08llx\n", __func__, size, offset);
+ debug("%s: %zd @ 0x%08llx\n", __func__, size, offset);
len = size > usable_leb_size ? usable_leb_size : size;
@@ -103,13 +103,16 @@ static int ubi_volume_cdev_close(struct cdev *cdev)
(priv->written % vol->usable_leb_size);
if (remaining) {
- void *buf = xzalloc(remaining);
+ void *buf = kmalloc(remaining, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
memset(buf, 0xff, remaining);
err = ubi_more_update_data(ubi, vol, buf, remaining);
- free(buf);
+ kfree(buf);
if (err < 0) {
printf("Couldnt or partially wrote data \n");
@@ -134,7 +137,7 @@ static int ubi_volume_cdev_close(struct cdev *cdev)
}
vol->checked = 1;
- ubi_gluebi_updated(vol);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
}
return 0;
@@ -165,7 +168,7 @@ int ubi_volume_cdev_add(struct ubi_device *ubi, struct ubi_volume *vol)
struct ubi_volume_cdev_priv *priv;
int ret;
- priv = xzalloc(sizeof(*priv));
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
priv->vol = vol;
priv->ubi = ubi;
@@ -177,7 +180,7 @@ int ubi_volume_cdev_add(struct ubi_device *ubi, struct ubi_volume *vol)
printf("registering %s as /dev/%s\n", vol->name, cdev->name);
ret = devfs_create(cdev);
if (ret) {
- free(priv);
+ kfree(priv);
free(cdev->name);
}
@@ -190,8 +193,8 @@ void ubi_volume_cdev_remove(struct ubi_volume *vol)
struct ubi_volume_cdev_priv *priv = cdev->priv;
devfs_remove(cdev);
- free(cdev->name);
- free(priv);
+ kfree(cdev->name);
+ kfree(priv);
}
static int ubi_cdev_ioctl(struct cdev *cdev, int cmd, void *buf)
@@ -206,7 +209,8 @@ static int ubi_cdev_ioctl(struct cdev *cdev, int cmd, void *buf)
UBI_EXCLUSIVE);
if (IS_ERR(desc))
return PTR_ERR(desc);
- ubi_remove_volume(desc);
+ ubi_remove_volume(desc, 0);
+ ubi_close_volume(desc);
break;
case UBI_IOCMKVOL:
if (!req->bytes)
@@ -217,7 +221,6 @@ static int ubi_cdev_ioctl(struct cdev *cdev, int cmd, void *buf)
return 0;
}
-
static struct file_operations ubi_fops = {
.ioctl = ubi_cdev_ioctl,
};
@@ -235,7 +238,7 @@ int ubi_cdev_add(struct ubi_device *ubi)
printf("registering /dev/%s\n", cdev->name);
ret = devfs_create(cdev);
if (ret)
- free(cdev->name);
+ kfree(cdev->name);
return ret;
}
@@ -247,5 +250,5 @@ void ubi_cdev_remove(struct ubi_device *ubi)
printf("removing %s\n", cdev->name);
devfs_remove(cdev);
- free(cdev->name);
+ kfree(cdev->name);
}
diff --git a/drivers/mtd/ubi/crc32defs.h b/drivers/mtd/ubi/crc32defs.h
deleted file mode 100644
index f5a5401765..0000000000
--- a/drivers/mtd/ubi/crc32defs.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */
-/* For less performance-sensitive, use 4 */
-#ifndef CRC_LE_BITS
-# define CRC_LE_BITS 8
-#endif
-#ifndef CRC_BE_BITS
-# define CRC_BE_BITS 8
-#endif
-
-/*
- * Little-endian CRC computation. Used with serial bit streams sent
- * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
- */
-#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1
-# error CRC_LE_BITS must be a power of 2 between 1 and 8
-#endif
-
-/*
- * Big-endian CRC computation. Used with serial bit streams sent
- * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
- */
-#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1
-# error CRC_BE_BITS must be a power of 2 between 1 and 8
-#endif
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 42e56687a7..59c6048349 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -11,179 +11,204 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
+#include "ubi.h"
+
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
*/
-#include "ubi-barebox.h"
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
-#include "ubi.h"
+ ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
/**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_ec_hdr - dump an erase counter header.
* @ec_hdr: the erase counter header to dump
*/
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
- dbg_msg("erase counter header dump:");
- dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic));
- dbg_msg("version %d", (int)ec_hdr->version);
- dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec));
- dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset));
- dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset));
- dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc));
- dbg_msg("erase counter header hexdump:");
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
* @vid_hdr: the volume identifier header to dump
*/
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
- dbg_msg("volume identifier header dump:");
- dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic));
- dbg_msg("version %d", (int)vid_hdr->version);
- dbg_msg("vol_type %d", (int)vid_hdr->vol_type);
- dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag);
- dbg_msg("compat %d", (int)vid_hdr->compat);
- dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id));
- dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum));
- dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver));
- dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size));
- dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs));
- dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad));
- dbg_msg("sqnum %llu",
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
- dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc));
- dbg_msg("volume identifier header hexdump:");
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ vid_hdr, UBI_VID_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
* @vol: UBI volume description object
*/
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
{
- dbg_msg("volume information dump:");
- dbg_msg("vol_id %d", vol->vol_id);
- dbg_msg("reserved_pebs %d", vol->reserved_pebs);
- dbg_msg("alignment %d", vol->alignment);
- dbg_msg("data_pad %d", vol->data_pad);
- dbg_msg("vol_type %d", vol->vol_type);
- dbg_msg("name_len %d", vol->name_len);
- dbg_msg("usable_leb_size %d", vol->usable_leb_size);
- dbg_msg("used_ebs %d", vol->used_ebs);
- dbg_msg("used_bytes %lld", vol->used_bytes);
- dbg_msg("last_eb_bytes %d", vol->last_eb_bytes);
- dbg_msg("corrupted %d", vol->corrupted);
- dbg_msg("upd_marker %d", vol->upd_marker);
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- dbg_msg("name %s", vol->name);
+ pr_err("\tname %s\n", vol->name);
} else {
- dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
- vol->name[0], vol->name[1], vol->name[2],
- vol->name[3], vol->name[4]);
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
+ vol->name[0], vol->name[1], vol->name[2],
+ vol->name[3], vol->name[4]);
}
}
/**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
* @r: the object to dump
* @idx: volume table index
*/
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
- dbg_msg("volume table record %d dump:", idx);
- dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs));
- dbg_msg("alignment %d", be32_to_cpu(r->alignment));
- dbg_msg("data_pad %d", be32_to_cpu(r->data_pad));
- dbg_msg("vol_type %d", (int)r->vol_type);
- dbg_msg("upd_marker %d", (int)r->upd_marker);
- dbg_msg("name_len %d", name_len);
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
if (r->name[0] == '\0') {
- dbg_msg("name NULL");
+ pr_err("\tname NULL\n");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- dbg_msg("name %s", &r->name[0]);
+ pr_err("\tname %s\n", &r->name[0]);
} else {
- dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- dbg_msg("crc %#08x", be32_to_cpu(r->crc));
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
}
/**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
*/
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
{
- dbg_msg("volume scanning information dump:");
- dbg_msg("vol_id %d", sv->vol_id);
- dbg_msg("highest_lnum %d", sv->highest_lnum);
- dbg_msg("leb_count %d", sv->leb_count);
- dbg_msg("compat %d", sv->compat);
- dbg_msg("vol_type %d", sv->vol_type);
- dbg_msg("used_ebs %d", sv->used_ebs);
- dbg_msg("last_data_size %d", sv->last_data_size);
- dbg_msg("data_pad %d", sv->data_pad);
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
}
/**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
* @type: object type: 0 - not corrupted, 1 - corrupted
*/
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
- dbg_msg("eraseblock scanning information dump:");
- dbg_msg("ec %d", seb->ec);
- dbg_msg("pnum %d", seb->pnum);
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
if (type == 0) {
- dbg_msg("lnum %d", seb->lnum);
- dbg_msg("scrub %d", seb->scrub);
- dbg_msg("sqnum %llu", seb->sqnum);
- dbg_msg("leb_ver %u", seb->leb_ver);
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
}
}
/**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
* @req: the object to dump
*/
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
- dbg_msg("volume creation request dump:");
- dbg_msg("vol_id %d", req->vol_id);
- dbg_msg("alignment %d", req->alignment);
- dbg_msg("bytes %lld", (long long)req->bytes);
- dbg_msg("vol_type %d", req->vol_type);
- dbg_msg("name_len %d", req->name_len);
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
memcpy(nm, req->name, 16);
nm[16] = 0;
- dbg_msg("the 1st 16 characters of the name: %s", nm);
+ pr_err("\t1st 16 characters of name: %s\n", nm);
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index b6ffa456d4..6bcb995b4d 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -11,6 +11,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
@@ -18,132 +21,109 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-#ifdef CONFIG_MTD_UBI_DEBUG
-#ifdef UBI_LINUX
-#include <linux/random.h>
-#endif
-
-#define ubi_assert(expr) BUG_ON(!(expr))
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-#else
-#define ubi_assert(expr) ({})
-#define dbg_err(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
-#define DBG_DISABLE_BGT 1
-#else
-#define DBG_DISABLE_BGT 0
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG
-/* Generic debugging message */
-#define dbg_msg(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", \
- __FUNCTION__, ##__VA_ARGS__)
-
-#define ubi_dbg_dump_stack() dump_stack()
-
-struct ubi_ec_hdr;
-struct ubi_vid_hdr;
-struct ubi_volume;
-struct ubi_vtbl_record;
-struct ubi_scan_volume;
-struct ubi_scan_leb;
-struct ubi_mkvol_req;
-
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-
-#else
-
-#define dbg_msg(fmt, ...) ({})
-#define ubi_dbg_dump_stack() ({})
-#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
-#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
-#define ubi_dbg_dump_vol_info(vol) ({})
-#define ubi_dbg_dump_vtbl_record(r, idx) ({})
-#define ubi_dbg_dump_sv(sv) ({})
-#define ubi_dbg_dump_seb(seb, type) ({})
-#define ubi_dbg_dump_mkvol_req(req) ({})
-
-#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
-/* Messages from the eraseblock association unit */
-#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_eba(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
-/* Messages from the wear-leveling unit */
-#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_wl(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
-/* Messages from the input/output unit */
-#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_io(fmt, ...) ({})
-#endif
-
-#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
+#include <stdlib.h>
+
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
+#define ubi_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ pr_crit("UBI assert failed in %s at %u\n", \
+ __func__, __LINE__); \
+ dump_stack(); \
+ } \
+} while (0)
+
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+ print_hex_dump(l, ps, pt, r, g, b, len, a)
+
+#define ubi_dbg_msg(type, fmt, ...) \
+ pr_debug("UBI DBG " type ": " fmt "\n", \
+ ##__VA_ARGS__)
+
+/* General debugging messages */
+#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+/* Messages from the eraseblock association sub-system */
+#define dbg_eba(fmt, ...) ubi_dbg_msg("eba", fmt, ##__VA_ARGS__)
+/* Messages from the wear-leveling sub-system */
+#define dbg_wl(fmt, ...) ubi_dbg_msg("wl", fmt, ##__VA_ARGS__)
+/* Messages from the input/output sub-system */
+#define dbg_io(fmt, ...) ubi_dbg_msg("io", fmt, ##__VA_ARGS__)
/* Initialization and build messages */
-#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
-#else
-#define dbg_bld(fmt, ...) ({})
-#endif
+#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
+
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
+}
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
*
* Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
*/
-static inline int ubi_dbg_is_bitflip(void)
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- return !(random32() % 200);
+ if (ubi->dbg.emulate_bitflips)
+ return !(random32() % 200);
+ return 0;
}
-#else
-#define ubi_dbg_is_bitflip() 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if a write failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_write_failure(void)
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- return !(random32() % 500);
+ if (ubi->dbg.emulate_io_failures)
+ return !(random32() % 500);
+ return 0;
}
-#else
-#define ubi_dbg_is_write_failure() 0
-#endif
-#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if an erase failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_erase_failure(void)
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 400);
+ return 0;
+}
+
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
}
-#else
-#define ubi_dbg_is_erase_failure() 0
-#endif
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index fe4aae208d..37b14f07af 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -11,25 +11,28 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
- * The UBI Eraseblock Association (EBA) unit.
+ * The UBI Eraseblock Association (EBA) sub-system.
*
- * This unit is responsible for I/O to/from logical eraseblock.
+ * This sub-system is responsible for I/O to/from logical eraseblock.
*
* Although in this implementation the EBA table is fully kept and managed in
* RAM, which assumes poor scalability, it might be (partially) maintained on
* flash in future implementations.
*
- * The EBA unit implements per-logical eraseblock locking. Before accessing a
- * logical eraseblock it is locked for reading or writing. The per-logical
- * eraseblock locking is implemented by means of the lock tree. The lock tree
- * is an RB-tree which refers all the currently locked logical eraseblocks. The
- * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
- * (@vol_id, @lnum) pairs.
+ * The EBA sub-system implements per-logical eraseblock locking. Before
+ * accessing a logical eraseblock it is locked for reading or writing. The
+ * per-logical eraseblock locking is implemented by means of the lock tree. The
+ * lock tree is an RB-tree which refers all the currently locked logical
+ * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
+ * They are indexed by (@vol_id, @lnum) pairs.
*
* EBA also maintains the global sequence counter which is incremented each
* time a logical eraseblock is mapped to a physical eraseblock and it is
@@ -38,13 +41,7 @@
* 64 bits is enough to never overflow.
*/
-#ifdef UBI_LINUX
-#include <linux/slab.h>
-#include <linux/crc32.h>
#include <linux/err.h>
-#endif
-
-#include "ubi-barebox.h"
#include "ubi.h"
/* Number of physical eraseblocks reserved for atomic LEB change operation */
@@ -58,13 +55,11 @@
* global sequence counter value. It also increases the global sequence
* counter.
*/
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
- spin_lock(&ubi->ltree_lock);
sqnum = ubi->global_sqnum++;
- spin_unlock(&ubi->ltree_lock);
return sqnum;
}
@@ -143,11 +138,9 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
return ERR_PTR(-ENOMEM);
le->users = 0;
- init_rwsem(&le->mutex);
le->vol_id = vol_id;
le->lnum = lnum;
- spin_lock(&ubi->ltree_lock);
le1 = ltree_lookup(ubi, vol_id, lnum);
if (le1) {
@@ -188,11 +181,8 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
rb_insert_color(&le->rb, &ubi->ltree);
}
le->users += 1;
- spin_unlock(&ubi->ltree_lock);
-
- if (le_free)
- kfree(le_free);
+ kfree(le_free);
return le;
}
@@ -212,7 +202,6 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
- down_read(&le->mutex);
return 0;
}
@@ -224,22 +213,15 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
*/
static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
- int _free = 0;
struct ubi_ltree_entry *le;
- spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
le->users -= 1;
ubi_assert(le->users >= 0);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
- _free = 1;
- }
- spin_unlock(&ubi->ltree_lock);
-
- up_read(&le->mutex);
- if (_free)
kfree(le);
+ }
}
/**
@@ -258,7 +240,6 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
- down_write(&le->mutex);
return 0;
}
@@ -275,27 +256,19 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
{
- int _free;
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
- if (down_write_trylock(&le->mutex))
- return 0;
/* Contention, cancel */
- spin_lock(&ubi->ltree_lock);
le->users -= 1;
ubi_assert(le->users >= 0);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
- _free = 1;
- } else
- _free = 0;
- spin_unlock(&ubi->ltree_lock);
- if (_free)
kfree(le);
+ }
return 1;
}
@@ -308,23 +281,15 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
*/
static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
- int _free;
struct ubi_ltree_entry *le;
- spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
le->users -= 1;
ubi_assert(le->users >= 0);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
- _free = 1;
- } else
- _free = 0;
- spin_unlock(&ubi->ltree_lock);
-
- up_write(&le->mutex);
- if (_free)
kfree(le);
+ }
}
/**
@@ -357,7 +322,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
- err = ubi_wl_put_peb(ubi, pnum, 0);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -434,9 +399,10 @@ retry:
* may try to recover data. FIXME: but this is
* not implemented.
*/
- if (err == UBI_IO_BAD_VID_HDR) {
- ubi_warn("bad VID header at PEB %d, LEB"
- "%d:%d", pnum, vol_id, lnum);
+ if (err == UBI_IO_BAD_HDR_EBADMSG ||
+ err == UBI_IO_BAD_HDR) {
+ ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -457,7 +423,7 @@ retry:
if (err == UBI_IO_BITFLIPS) {
scrub = 1;
err = 0;
- } else if (err == -EBADMSG) {
+ } else if (mtd_is_eccerr(err)) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
goto out_unlock;
scrub = 1;
@@ -517,16 +483,12 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
struct ubi_vid_hdr *vid_hdr;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr) {
+ if (!vid_hdr)
return -ENOMEM;
- }
-
- mutex_lock(&ubi->buf_mutex);
retry:
- new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+ new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
- mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
}
@@ -540,39 +502,38 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
data_size = offset + len;
- memset(ubi->peb_buf1 + offset, 0xFF, len);
+ memset(ubi->peb_buf + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
- err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
- goto out_put;
+ goto out_unlock;
}
- memcpy(ubi->peb_buf1 + offset, buf, len);
+ memcpy(ubi->peb_buf + offset, buf, len);
- err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
+ err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err)
goto write_error;
- mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
- ubi_wl_put_peb(ubi, pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
return 0;
+out_unlock:
out_put:
- mutex_unlock(&ubi->buf_mutex);
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -582,9 +543,8 @@ write_error:
* get another one.
*/
ubi_warn("failed to write to PEB %d", new_pnum);
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
- mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
@@ -600,7 +560,6 @@ write_error:
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
@@ -608,7 +567,7 @@ write_error:
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype)
+ const void *buf, int offset, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -649,14 +608,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -676,9 +635,8 @@ retry:
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
- ubi_warn("failed to write %d bytes at offset %d of "
- "LEB %d:%d, PEB %d", len, offset, vol_id,
- lnum, pnum);
+ ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
goto write_error;
}
}
@@ -702,7 +660,7 @@ write_error:
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -710,7 +668,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -722,7 +680,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
@@ -734,13 +691,12 @@ write_error:
* to the real data size, although the @buf buffer has to contain the
* alignment. In all other cases, @len has to be aligned.
*
- * It is prohibited to write more then once to logical eraseblocks of static
+ * It is prohibited to write more than once to logical eraseblocks of static
* volumes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs)
+ int lnum, const void *buf, int len, int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -765,7 +721,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -778,7 +734,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -822,7 +778,7 @@ write_error:
return err;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -830,7 +786,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -842,7 +798,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -854,7 +809,7 @@ write_error:
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype)
+ int lnum, const void *buf, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -871,19 +826,18 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
- mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
goto out_mutex;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -896,7 +850,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_leb_unlock;
@@ -920,7 +874,7 @@ retry:
}
if (vol->eba_tbl[lnum] >= 0) {
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
if (err)
goto out_leb_unlock;
}
@@ -930,7 +884,6 @@ retry:
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
- mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -945,18 +898,45 @@ write_error:
goto out_leb_unlock;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
/**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
+ err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
+ return 0;
+ return 1;
+}
+
+/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
@@ -966,10 +946,9 @@ write_error:
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
- * o %0 in case of success;
- * o %1 if the operation was canceled and should be tried later (e.g.,
- * because a bit-flip was detected at the target PEB);
- * o %2 if the volume is being deleted and this LEB should not be moved.
+ * o %0 in case of success;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
+ * o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
@@ -981,7 +960,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
- dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
data_size = be32_to_cpu(vid_hdr->data_size);
@@ -991,21 +970,18 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
idx = vol_id2idx(ubi, vol_id);
- spin_lock(&ubi->volumes_lock);
/*
* Note, we may race with volume deletion, which means that the volume
* this logical eraseblock belongs to might be being deleted. Since the
- * volume deletion unmaps all the volume's logical eraseblocks, it will
+ * volume deletion un-maps all the volume's logical eraseblocks, it will
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
if (!vol) {
/* No need to do further work, cancel */
- dbg_eba("volume %d is being removed, cancel", vol_id);
- spin_unlock(&ubi->volumes_lock);
- return 2;
+ dbg_wl("volume %d is being removed, cancel", vol_id);
+ return MOVE_CANCEL_RACE;
}
- spin_unlock(&ubi->volumes_lock);
/*
* We do not want anybody to write to this logical eraseblock while we
@@ -1017,12 +993,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* (@from). This task locks the LEB and goes sleep in the
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
- * LEB is already locked, we just do not move it and return %1.
+ * LEB is already locked, we just do not move it and return
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
- dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
- return err;
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return MOVE_RETRY;
}
/*
@@ -1031,30 +1010,29 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
- err = 1;
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
+ err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
- * OK, now the LEB is locked and we can safely start moving iy. Since
- * this function utilizes thie @ubi->peb1_buf buffer which is shared
- * with some other functions, so lock the buffer by taking the
+ * OK, now the LEB is locked and we can safely start moving it. Since
+ * this function utilizes the @ubi->peb_buf buffer which is shared
+ * with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
- mutex_lock(&ubi->buf_mutex);
- dbg_eba("read %d bytes of data", aldata_size);
- err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
+ dbg_wl("read %d bytes of data", aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
+ err = MOVE_SOURCE_RD_ERR;
goto out_unlock_buf;
}
/*
- * Now we have got to calculate how much data we have to to copy. In
+ * Now we have got to calculate how much data we have to copy. In
* case of a static volume it is fairly easy - the VID header contains
* the data size. In case of a dynamic volume it is more difficult - we
* have to read the contents, cut 0xFF bytes from the end and copy only
@@ -1065,14 +1043,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
aldata_size = data_size =
- ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
+ ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
- cond_resched();
- crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
- cond_resched();
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
/*
- * It may turn out to me that the whole @from physical eraseblock
+ * It may turn out to be that the whole @from physical eraseblock
* contains only 0xFF bytes. Then we have to only write the VID header
* and do not write any data. This also means we should not set
* @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
@@ -1082,51 +1058,57 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
- if (err)
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
-
- cond_resched();
+ }
/* Read the VID header back and check if it was written correctly */
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
- if (err != UBI_IO_BITFLIPS)
- ubi_warn("cannot read VID header back from PEB %d", to);
- else
- err = 1;
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading VID header back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
if (data_size > 0) {
- err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
- if (err)
+ err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
-
- cond_resched();
+ }
/*
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
-
- err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
+ memset(ubi->peb_buf, 0xFF, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
- if (err != UBI_IO_BITFLIPS)
- ubi_warn("cannot read data back from PEB %d",
- to);
- else
- err = 1;
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading data back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
- cond_resched();
-
- if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
- ubi_warn("read data back from PEB %d - it is different",
+ if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+ ubi_warn("read data back from PEB %d and it is different",
to);
+ err = -EINVAL;
goto out_unlock_buf;
}
}
@@ -1135,35 +1117,169 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vol->eba_tbl[lnum] = to;
out_unlock_buf:
- mutex_unlock(&ubi->buf_mutex);
out_unlock_leb:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
/**
- * ubi_eba_init_scan - initialize the EBA unit using scanning information.
+ * print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
- * @si: scanning information
+ *
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
+ * cannot reserve enough PEBs for bad block handling. This function makes a
+ * decision whether we have to print a warning or not. The algorithm is as
+ * follows:
+ * o if this is a new UBI image, then just print the warning
+ * o if this is an UBI image which has already been used for some time, print
+ * a warning only if we can reserve less than 10% of the expected amount of
+ * the reserved PEB.
+ *
+ * The idea is that when UBI is used, PEBs become bad, and the reserved pool
+ * of PEBs becomes smaller, which is normal and we do not want to scare users
+ * with a warning every time they attach the MTD device. This was an issue
+ * reported by real users.
+ */
+static void print_rsvd_warning(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ /*
+ * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
+ * large number to distinguish between newly flashed and used images.
+ */
+ if (ai->max_sqnum > (1 << 18)) {
+ int min = ubi->beb_rsvd_level / 10;
+
+ if (!min)
+ min = 1;
+ if (ubi->beb_rsvd_pebs > min)
+ return;
+ }
+
+ ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ if (ubi->corr_peb_count)
+ ubi_warn("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
+}
+
+/**
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, j, err, num_volumes;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct rb_node *rb;
- dbg_eba("initialize EBA unit");
+ dbg_eba("initialize EBA sub-system");
- spin_lock_init(&ubi->ltree_lock);
- mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- ubi->global_sqnum = si->max_sqnum + 1;
+ ubi->global_sqnum = ai->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
@@ -1171,8 +1287,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (!vol)
continue;
- cond_resched();
-
vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
GFP_KERNEL);
if (!vol->eba_tbl) {
@@ -1183,24 +1297,27 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
- sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
- if (!sv)
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
continue;
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- if (seb->lnum >= vol->reserved_pebs)
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
- ubi_scan_move_to_list(sv, seb, &si->erase);
- vol->eba_tbl[seb->lnum] = seb->pnum;
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
err = -ENOSPC;
goto out_free;
}
@@ -1213,9 +1330,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
- ubi_warn("cannot reserve enough PEBs for bad PEB "
- "handling, reserved %d, need %d",
- ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ print_rsvd_warning(ubi, ai);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
@@ -1223,7 +1338,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
- dbg_eba("EBA unit is initialized");
+ dbg_eba("EBA sub-system is initialized");
return 0;
out_free:
@@ -1231,23 +1346,7 @@ out_free:
if (!ubi->volumes[i])
continue;
kfree(ubi->volumes[i]->eba_tbl);
+ ubi->volumes[i]->eba_tbl = NULL;
}
return err;
}
-
-/**
- * ubi_eba_close - close EBA unit.
- * @ubi: UBI device description object
- */
-void ubi_eba_close(const struct ubi_device *ubi)
-{
- int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
-
- dbg_eba("close EBA unit");
-
- for (i = 0; i < num_volumes; i++) {
- if (!ubi->volumes[i])
- continue;
- kfree(ubi->volumes[i]->eba_tbl);
- }
-}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 0000000000..939e5346ff
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1514 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include "ubi.h"
+#include <linux/math64.h>
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_hdr) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+ (sizeof(struct ubi_fm_eba) + \
+ (ubi->peb_count * sizeof(__be32))) + \
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kzalloc(sizeof(*aeb), GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > av->vol_id)
+ p = &(*p)->rb_right;
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->used_ebs = used_ebs;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kfree(new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kzalloc(sizeof(*victim), GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size = \
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kfree(new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kfree(new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ kfree(aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @eba_orphans: list of PEBs which need to be scanned
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *eba_orphans, struct list_head *lfree)
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb, *tmp_aeb;
+ int i, pnum, err, found_orphan, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err("bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (ret == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ubi_err("bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ err = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, lfree, pnum, ec, 1);
+ else
+ add_aeb(ai, lfree, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ found_orphan = 0;
+ list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ found_orphan = 1;
+ break;
+ }
+ }
+ if (found_orphan) {
+ kfree(tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+
+ new_aeb = kzalloc(sizeof(*new_aeb), GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err("fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, eba_orphans, lfree;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&lfree);
+ INIT_LIST_HEAD(&eba_orphans);
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err("bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err("bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err("bad fastmap vol header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err("bad fastmap EBA header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ /* This can happen if a PEB is already in an EBA known
+ * by this fastmap but the PEB itself is not in the used
+ * list.
+ * In this case the PEB can be within the fastmap pool
+ * or while writing the fastmap it was in the protection
+ * queue.
+ */
+ if (!aeb) {
+ aeb = kzalloc(sizeof(*aeb), GFP_KERNEL);
+ if (!aeb) {
+ ret = -ENOMEM;
+
+ goto fail;
+ }
+
+ aeb->lnum = j;
+ aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
+ aeb->ec = -1;
+ aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+ list_add_tail(&aeb->u.list, &eba_orphans);
+ continue;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
+ u.list) {
+ int err;
+
+ if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
+ ubi_err("bad PEB in fastmap EBA orphan list");
+ ret = UBI_BAD_FASTMAP;
+ kfree(ech);
+ goto fail;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i " \
+ "err:%i", tmp_aeb->pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ kfree(ech);
+
+ goto fail;
+ } else if (err == UBI_IO_BITFLIPS)
+ tmp_aeb->scrub = 1;
+
+ tmp_aeb->ec = be64_to_cpu(ech->ec);
+ assign_aeb_to_av(ai, tmp_aeb, av);
+ }
+
+ kfree(ech);
+ }
+
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
+ &eba_orphans, &lfree);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
+ &eba_orphans, &lfree);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &lfree, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err("bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+ ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ if (!ubi->image_seq)
+ ubi->image_seq = be32_to_cpu(ech->image_seq);
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err("bad fastmap anchor vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err("bad fastmap data vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err("fastmap data CRC is invalid");
+ ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg("attached by fastmap");
+ ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err("Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct rb_node *node;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++)
+ fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++)
+ fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+
+ for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ if (ret) {
+ ubi_err("unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+out:
+ return ret;
+}
+
+void ubi_free_fastmap(struct ubi_device *ubi)
+{
+ struct ubi_fastmap_layout *fm = ubi->fm;
+ int i;
+
+ if (!fm)
+ return;
+
+ for (i = 0; i < fm->used_blocks; i++)
+ kfree(fm->e[i]);
+
+ kfree(ubi->fm);
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ * @fm: the fastmap to be destroyed
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int ret, i;
+ struct ubi_vid_hdr *vh;
+
+ ret = erase_block(ubi, fm->e[0]->pnum);
+ if (ret < 0)
+ return ret;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ return -ENOMEM;
+
+ /* deleting the current fastmap SB is not enough, an old SB may exist,
+ * so create a (corrupted) SB such that fastmap will find it and fall
+ * back to scanning mode in any case */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+
+ for (i = 0; i < fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
+
+ return ret;
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled)
+ return 0;
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret)
+ return ret;
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm)
+ return -ENOMEM;
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ new_fm->e[i] = kzalloc(sizeof(struct ubi_ainf_peb), GFP_KERNEL);
+ if (!new_fm->e[i]) {
+ while (i--)
+ kfree(new_fm->e[i]);
+
+ kfree(new_fm);
+ return -ENOMEM;
+ }
+ }
+
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err("fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+
+ if (!tmp_e && !old_fm) {
+ int j;
+ ubi_err("could not get any free erase block");
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ } else if (!tmp_e && old_fm) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ int j;
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+
+ ubi_err("could not erase old fastmap PEB");
+ goto err;
+ }
+
+ new_fm->e[i]->pnum = old_fm->e[i]->pnum;
+ new_fm->e[i]->ec = old_fm->e[i]->ec;
+ } else {
+ new_fm->e[i]->pnum = tmp_e->pnum;
+ new_fm->e[i]->ec = tmp_e->ec;
+
+ if (old_fm)
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ }
+ }
+
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ int i;
+ ubi_err("could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+ new_fm->e[0]->ec = ret;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+ } else {
+ if (!tmp_e) {
+ int i;
+ ubi_err("could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+
+ ret = ubi_write_fastmap(ubi, new_fm);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ kfree(old_fm);
+ return ret;
+
+err:
+ kfree(new_fm);
+
+ ubi_warn("Unable to write new fastmap, err=%i", ret);
+
+ ret = 0;
+ if (old_fm) {
+ ret = invalidate_fastmap(ubi, old_fm);
+ if (ret < 0)
+ ubi_err("Unable to invalidiate current fastmap!");
+ else if (ret)
+ ret = 0;
+ }
+ goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 000fc5d921..87852d32f1 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -12,20 +12,23 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
- * UBI input/output unit.
+ * UBI input/output sub-system.
*
- * This unit provides a uniform way to work with all kinds of the underlying
- * MTD devices. It also implements handy functions for reading and writing UBI
- * headers.
+ * This sub-system provides a uniform way to work with all kinds of the
+ * underlying MTD devices. It also implements handy functions for reading and
+ * writing UBI headers.
*
* We are trying to have a paranoid mindset and not to trust to what we read
- * from the flash media in order to be more secure and robust. So this unit
- * validates every single header it reads from the flash media.
+ * from the flash media in order to be more secure and robust. So this
+ * sub-system validates every single header it reads from the flash media.
*
* Some words about how the eraseblock headers are stored.
*
@@ -61,9 +64,9 @@
* device, e.g., make @ubi->min_io_size = 512 in the example above?
*
* A: because when writing a sub-page, MTD still writes a full 2K page but the
- * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
- * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
- * prefer to use sub-pages only for EV and VID headers.
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
*
* As it was noted above, the VID header may start at a non-aligned offset.
* For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
@@ -76,39 +79,25 @@
* 512-byte chunks, we have to allocate one more buffer and copy our VID header
* to offset 448 of this buffer.
*
- * The I/O unit does the following trick in order to avoid this extra copy.
- * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header
- * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the
- * VID header is being written out, it shifts the VID header pointer back and
- * writes the whole sub-page.
+ * The I/O sub-system does the following trick in order to avoid this extra
+ * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
+ * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
+ * When the VID header is being written out, it shifts the VID header pointer
+ * back and writes the whole sub-page.
*/
-#ifdef UBI_LINUX
-#include <linux/crc32.h>
#include <linux/err.h>
-#endif
-
-#include "ubi-barebox.h"
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr);
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
- int len);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
/**
* ubi_io_read - read data from a physical eraseblock.
@@ -145,51 +134,76 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(len > 0);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
+
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err) {
- if (err == -EUCLEAN) {
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
+
+ if (mtd_is_bitflip(err)) {
/*
* -EUCLEAN is reported if there was a bit-flip which
* was corrected, so this is harmless.
+ *
+ * We do not report about it here unless debugging is
+ * enabled. A corresponding message will be printed
+ * later, when it is has been scrubbed.
*/
ubi_msg("fixable bit-flip detected at PEB %d", pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
- if (read != len && retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d while reading %d bytes from PEB %d:%d, "
- "read only %zd bytes, retry",
- err, len, pnum, offset, read);
- yield();
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
goto retry;
}
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
- ubi_dbg_dump_stack();
+ ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
* all the requested data. But some buggy drivers might do
* this, so we change it to -EIO.
*/
- if (read != len && err == -EBADMSG) {
+ if (read != len && mtd_is_eccerr(err)) {
ubi_assert(0);
- printk("%s[%d] not here\n", __func__, __LINE__);
-/* err = -EIO; */
+ err = -EIO;
}
} else {
ubi_assert(len == read);
- if (ubi_dbg_is_bitflip()) {
- dbg_msg("bit-flip (emulated)");
+ if (ubi_dbg_is_bitflip(ubi)) {
+ dbg_gen("bit-flip (emulated)");
err = UBI_IO_BITFLIPS;
}
}
@@ -233,46 +247,60 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return -EROFS;
}
- /* The below has to be compiled out if paranoid checks are disabled */
-
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = paranoid_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
if (offset >= ubi->leb_start) {
/*
* We write to the data area of the physical eraseblock. Make
* sure it has valid EC and VID headers.
*/
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
- err = paranoid_check_peb_vid_hdr(ubi, pnum);
+ return err;
+ err = self_check_peb_vid_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
}
- if (ubi_dbg_is_write_failure()) {
- dbg_err("cannot write %d bytes to PEB %d:%d "
- "(emulated)", len, pnum, offset);
- ubi_dbg_dump_stack();
+ if (ubi_dbg_is_write_failure(ubi)) {
+ ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
return -EIO;
}
addr = (loff_t)pnum * ubi->peb_size + offset;
err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written"
- " %zd bytes", err, len, pnum, offset, written);
- ubi_dbg_dump_stack();
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
} else
ubi_assert(written == len);
+ if (!err) {
+ err = self_check_write(ubi, buf, pnum, offset, len);
+ if (err)
+ return err;
+
+ /*
+ * Since we always write sequentially, the rest of the PEB has
+ * to contain only 0xFF bytes.
+ */
+ offset += len;
+ len = ubi->peb_size - offset;
+ if (len)
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ }
+
return err;
}
@@ -285,7 +313,6 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
*/
static void erase_callback(struct erase_info *ei)
{
- wake_up_interruptible((wait_queue_head_t *)ei->priv);
}
/**
@@ -301,82 +328,57 @@ static int do_sync_erase(struct ubi_device *ubi, int pnum)
{
int err, retries = 0;
struct erase_info ei;
- wait_queue_head_t wq;
dbg_io("erase PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err("read-only mode");
+ return -EROFS;
+ }
retry:
- init_waitqueue_head(&wq);
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = ubi->mtd;
ei.addr = (loff_t)pnum * ubi->peb_size;
ei.len = ubi->peb_size;
ei.callback = erase_callback;
- ei.priv = (unsigned long)&wq;
err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d while erasing PEB %d, retry",
- err, pnum);
- yield();
+ ubi_warn("error %d while erasing PEB %d, retry",
+ err, pnum);
goto retry;
}
ubi_err("cannot erase PEB %d, error %d", pnum, err);
- ubi_dbg_dump_stack();
+ dump_stack();
return err;
}
- err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
- ei.state == MTD_ERASE_FAILED);
- if (err) {
- ubi_err("interrupted PEB %d erasure", pnum);
- return -EINTR;
- }
-
if (ei.state == MTD_ERASE_FAILED) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error while erasing PEB %d, retry", pnum);
- yield();
+ ubi_warn("error while erasing PEB %d, retry", pnum);
goto retry;
}
ubi_err("cannot erase PEB %d", pnum);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EIO;
}
- err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
- if (ubi_dbg_is_erase_failure() && !err) {
- dbg_err("cannot erase PEB %d (emulated)", pnum);
+ if (ubi_dbg_is_erase_failure(ubi)) {
+ ubi_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
return 0;
}
-/**
- * check_pattern - check if buffer contains only a certain byte pattern.
- * @buf: buffer to check
- * @patt: the pattern to check
- * @size: buffer size in bytes
- *
- * This function returns %1 in there are only @patt bytes in @buf, and %0 if
- * something else was also found.
- */
-static int check_pattern(const void *buf, uint8_t patt, int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- if (((const uint8_t *)buf)[i] != patt)
- return 0;
- return 1;
-}
-
/* Patterns to write to a physical eraseblock when torturing it */
static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
@@ -393,21 +395,21 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
{
int err, i, patt_count;
+ ubi_msg("run torture test for PEB %d", pnum);
patt_count = ARRAY_SIZE(patterns);
ubi_assert(patt_count > 0);
- mutex_lock(&ubi->buf_mutex);
for (i = 0; i < patt_count; i++) {
err = do_sync_erase(ubi, pnum);
if (err)
goto out;
/* Make sure the PEB contains only 0xFF bytes */
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
+ err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
if (err == 0) {
ubi_err("erased PEB %d, but a non-0xFF byte found",
pnum);
@@ -416,17 +418,18 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
}
/* Write a pattern and check it */
- memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
- err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size);
+ err = ubi_check_pattern(ubi->peb_buf, patterns[i],
+ ubi->peb_size);
if (err == 0) {
ubi_err("pattern %x checking failed for PEB %d",
patterns[i], pnum);
@@ -436,10 +439,10 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
}
err = patt_count;
+ ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
out:
- mutex_unlock(&ubi->buf_mutex);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* If a bit-flip or data integrity error was detected, the test
* has not passed because it happened on a freshly erased
@@ -453,6 +456,90 @@ out:
}
/**
+ * nor_erase_prepare - prepare a NOR flash PEB for erasure.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to prepare
+ *
+ * NOR flash, or at least some of them, have peculiar embedded PEB erasure
+ * algorithm: the PEB is first filled with zeroes, then it is erased. And
+ * filling with zeroes starts from the end of the PEB. This was observed with
+ * Spansion S29GL512N NOR flash.
+ *
+ * This means that in case of a power cut we may end up with intact data at the
+ * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
+ * EC and VID headers are OK, but a large chunk of data at the end of PEB is
+ * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
+ * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
+ *
+ * This function is called before erasing NOR PEBs and it zeroes out EC and VID
+ * magic numbers in order to invalidate them and prevent the failures. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
+{
+ int err, err1;
+ size_t written;
+ loff_t addr;
+ uint32_t data = 0;
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
+ struct ubi_vid_hdr vid_hdr;
+
+ /*
+ * It is important to first invalidate the EC header, and then the VID
+ * header. Otherwise a power cut may lead to valid EC header and
+ * invalid VID header, in which case UBI will treat this PEB as
+ * corrupted and will try to preserve it, and print scary warnings.
+ */
+ addr = (loff_t)pnum * ubi->peb_size;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (!err) {
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (!err)
+ return 0;
+ }
+
+ /*
+ * We failed to write to the media. This was observed with Spansion
+ * S29GL512N NOR flash. Most probably the previously eraseblock erasure
+ * was interrupted at a very inappropriate moment, so it became
+ * unwritable. In this case we probably anyway have garbage in this
+ * PEB.
+ */
+ err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF) {
+ struct ubi_ec_hdr ec_hdr;
+
+ err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR ||
+ err1 == UBI_IO_FF)
+ /*
+ * Both VID and EC headers are corrupted, so we can
+ * safely erase this PEB and not afraid that it will be
+ * treated as a valid PEB in case of an unclean reboot.
+ */
+ return 0;
+ }
+
+ /*
+ * The PEB contains a valid VID header, but we cannot invalidate it.
+ * Supposedly the flash media or the driver is screwed up, so return an
+ * error.
+ */
+ ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
+ pnum, err, err1);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ return -EIO;
+}
+
+/**
* ubi_io_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to erase
@@ -461,7 +548,7 @@ out:
* This function synchronously erases physical eraseblock @pnum. If @torture
* flag is not zero, the physical eraseblock is checked by means of writing
* different patterns to it and reading them back. If the torturing is enabled,
- * the physical eraseblock is erased more then once.
+ * the physical eraseblock is erased more than once.
*
* This function returns the number of erasures made in case of success, %-EIO
* if the erasure failed or the torturing test failed, and other negative error
@@ -474,15 +561,21 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err != 0)
- return err > 0 ? -EINVAL : err;
+ return err;
if (ubi->ro_mode) {
ubi_err("read-only mode");
return -EROFS;
}
+ if (ubi->nor_flash) {
+ err = nor_erase_prepare(ubi, pnum);
+ if (err)
+ return err;
+ }
+
if (torture) {
ret = torture_peb(ubi, pnum);
if (ret < 0)
@@ -548,7 +641,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
if (!ubi->bad_allowed)
return 0;
- err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
return err;
@@ -573,8 +666,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
- ubi_err("node with incompatible UBI version found: "
- "this UBI version is %d, image version is %d",
+ ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
@@ -600,8 +692,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad EC header");
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return 1;
}
@@ -621,67 +713,58 @@ bad:
* o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
* and corrected by the flash driver; this is harmless but may indicate that
* this eraseblock may become bad soon (but may be not);
- * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error);
- * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty;
+ * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
+ * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
+ * a data integrity error (uncorrectable ECC error in case of NAND);
+ * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
* o a negative error code in case of failure.
*/
int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr, int verbose)
{
- int err, read_err = 0;
+ int err, read_err;
uint32_t crc, magic, hdr_crc;
dbg_io("read EC header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- if (UBI_IO_DEBUG)
- verbose = 1;
- err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
- if (err) {
- if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
- return err;
+ read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (read_err) {
+ if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
/*
* We read all the data, but either a correctable bit-flip
- * occurred, or MTD reported about some data integrity error,
- * like an ECC error in case of NAND. The former is harmless,
- * the later may mean that the read data is corrupted. But we
- * have a CRC check-sum and we will detect this. If the EC
- * header is still OK, we just report this as there was a
- * bit-flip.
+ * occurred, or MTD reported a data integrity error
+ * (uncorrectable ECC error in case of NAND). The former is
+ * harmless, the later may mean that the read data is
+ * corrupted. But we have a CRC check-sum and we will detect
+ * this. If the EC header is still OK, we just report this as
+ * there was a bit-flip, to force scrubbing.
*/
- read_err = err;
}
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
/*
* The magic field is wrong. Let's check if we have read all
* 0xFF. If yes, this physical eraseblock is assumed to be
* empty.
- *
- * But if there was a read error, we do not test it for all
- * 0xFFs. Even if it does contain all 0xFFs, this error
- * indicates that something is still wrong with this physical
- * eraseblock and we anyway cannot treat it as empty.
*/
- if (read_err != -EBADMSG &&
- check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
+ if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
-
- /*
- * The below is just a paranoid check, it has to be
- * compiled out if paranoid checks are disabled.
- */
- err = paranoid_check_all_ff(ubi, pnum, 0,
- ubi->peb_size);
- if (err)
- return err > 0 ? UBI_IO_BAD_EC_HDR : err;
-
if (verbose)
- ubi_warn("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
- return UBI_IO_PEB_EMPTY;
+ ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
}
/*
@@ -689,11 +772,13 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
}
- return UBI_IO_BAD_EC_HDR;
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
}
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -701,11 +786,17 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad EC header CRC at PEB %d, calculated %#08x,"
- " read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
}
- return UBI_IO_BAD_EC_HDR;
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
}
/* And of course validate what has just been read from the media */
@@ -715,6 +806,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
return -EINVAL;
}
+ /*
+ * If there was %-EBADMSG, but the header CRC is still OK, report about
+ * a bit-flip to force scrubbing on this PEB.
+ */
return read_err ? UBI_IO_BITFLIPS : 0;
}
@@ -746,12 +841,13 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
ec_hdr->version = UBI_VERSION;
ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
- return -EINVAL;
+ return err;
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
@@ -780,40 +876,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
- dbg_err("bad copy_flag");
+ ubi_err("bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
- dbg_err("negative values");
+ ubi_err("negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
- dbg_err("bad vol_id");
+ ubi_err("bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
- dbg_err("bad vol_type");
+ ubi_err("bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
- dbg_err("bad data_pad");
+ ubi_err("bad data_pad");
goto bad;
}
@@ -825,45 +921,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
- dbg_err("zero used_ebs");
+ ubi_err("zero used_ebs");
goto bad;
}
if (data_size == 0) {
- dbg_err("zero data_size");
+ ubi_err("zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
- dbg_err("bad data_size");
+ ubi_err("bad data_size");
goto bad;
}
} else if (lnum == used_ebs - 1) {
if (data_size == 0) {
- dbg_err("bad data_size at last LEB");
+ ubi_err("bad data_size at last LEB");
goto bad;
}
} else {
- dbg_err("too high lnum");
+ ubi_err("too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
- dbg_err("non-zero data CRC");
+ ubi_err("non-zero data CRC");
goto bad;
}
if (data_size != 0) {
- dbg_err("non-zero data_size");
+ ubi_err("non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
- dbg_err("zero data_size of copy");
+ ubi_err("zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
- dbg_err("bad used_ebs");
+ ubi_err("bad used_ebs");
goto bad;
}
}
@@ -872,8 +968,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad VID header");
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return 1;
}
@@ -887,88 +983,53 @@ bad:
*
* This function reads the volume identifier header from physical eraseblock
* @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
- * volume identifier header. The following codes may be returned:
+ * volume identifier header. The error codes are the same as in
+ * 'ubi_io_read_ec_hdr()'.
*
- * o %0 if the CRC checksum is correct and the header was successfully read;
- * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
- * and corrected by the flash driver; this is harmless but may indicate that
- * this eraseblock may become bad soon;
- * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC
- * error detected);
- * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
- * header there);
- * o a negative error code in case of failure.
+ * Note, the implementation of this function is also very similar to
+ * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
*/
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr, int verbose)
{
- int err, read_err = 0;
+ int err, read_err;
uint32_t crc, magic, hdr_crc;
void *p;
dbg_io("read VID header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- if (UBI_IO_DEBUG)
- verbose = 1;
p = (char *)vid_hdr - ubi->vid_hdr_shift;
- err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
- if (err) {
- if (err != UBI_IO_BITFLIPS && err != -EBADMSG)
- return err;
-
- /*
- * We read all the data, but either a correctable bit-flip
- * occurred, or MTD reported about some data integrity error,
- * like an ECC error in case of NAND. The former is harmless,
- * the later may mean the read data is corrupted. But we have a
- * CRC check-sum and we will identify this. If the VID header is
- * still OK, we just report this as there was a bit-flip.
- */
- read_err = err;
- }
+ if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
- /*
- * If we have read all 0xFF bytes, the VID header probably does
- * not exist and the physical eraseblock is assumed to be free.
- *
- * But if there was a read error, we do not test the data for
- * 0xFFs. Even if it does contain all 0xFFs, this error
- * indicates that something is still wrong with this physical
- * eraseblock and it cannot be regarded as free.
- */
- if (read_err != -EBADMSG &&
- check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
- /* The physical eraseblock is supposedly free */
-
- /*
- * The below is just a paranoid check, it has to be
- * compiled out if paranoid checks are disabled.
- */
- err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start,
- ubi->leb_size);
- if (err)
- return err > 0 ? UBI_IO_BAD_VID_HDR : err;
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+ if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
- ubi_warn("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
- return UBI_IO_PEB_FREE;
+ ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
}
- /*
- * This is not a valid VID header, and these are not 0xFF
- * bytes. Report that the header is corrupted.
- */
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
}
- return UBI_IO_BAD_VID_HDR;
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
}
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
@@ -976,14 +1037,18 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
}
- return UBI_IO_BAD_VID_HDR;
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
}
- /* Validate the VID header that we have just read */
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
ubi_err("validation failed for PEB %d", pnum);
@@ -1018,18 +1083,18 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL: err;
+ return err;
vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
vid_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
- return -EINVAL;
+ return err;
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -1037,44 +1102,48 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
/**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
- * This function returns zero if the physical eraseblock is good, a positive
- * number if it is bad and a negative error code if an error occurred.
+ * This function returns zero if the physical eraseblock is good, %-EINVAL if
+ * it is bad and a negative error code if an error occurred.
*/
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_stack();
- return err;
+ ubi_err("self-check failed for PEB %d", pnum);
+ dump_stack();
+ return err > 0 ? -EINVAL : err;
}
/**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the erase counter header belongs to
* @ec_hdr: the erase counter header to check
*
* This function returns zero if the erase counter header contains valid
- * values, and %1 if not.
+ * values, and %-EINVAL if not.
*/
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t magic;
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
ubi_err("bad magic %#08x, must be %#08x",
@@ -1084,53 +1153,55 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return 0;
fail:
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
- return 1;
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return -EINVAL;
}
/**
- * paranoid_check_peb_ec_hdr - check that the erase counter header of a
- * physical eraseblock is in-place and is all right.
+ * self_check_peb_ec_hdr - check erase counter header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
- * This function returns zero if the erase counter header is all right, %1 if
- * not, and a negative error code if an error occurred.
+ * This function returns zero if the erase counter header is all right and and
+ * a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
- err = 1;
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ err = -EINVAL;
goto exit;
}
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
exit:
kfree(ec_hdr);
@@ -1138,20 +1209,23 @@ exit:
}
/**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the volume identifier header belongs to
* @vid_hdr: the volume identifier header to check
*
* This function returns zero if the volume identifier header is all right, and
- * %1 if not.
+ * %-EINVAL if not.
*/
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
{
int err;
uint32_t magic;
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
@@ -1161,36 +1235,38 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
- return 1;
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return -EINVAL;
}
/**
- * paranoid_check_peb_vid_hdr - check that the volume identifier header of a
- * physical eraseblock is in-place and is all right.
+ * self_check_peb_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
- * %1 if not, and a negative error code if an error occurred.
+ * and a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_vid_hdr *vid_hdr;
void *p;
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
@@ -1198,22 +1274,22 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
- if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
- err = 1;
+ ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ err = -EINVAL;
goto exit;
}
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1221,51 +1297,122 @@ exit:
}
/**
- * paranoid_check_all_ff - check that a region of flash is empty.
+ * self_check_write - make sure write succeeded.
+ * @ubi: UBI device description object
+ * @buf: buffer with data which were written
+ * @pnum: physical eraseblock number the data were written to
+ * @offset: offset within the physical eraseblock the data were written to
+ * @len: how many bytes were written
+ *
+ * This functions reads data which were recently written and compares it with
+ * the original data buffer - the data have to match. Returns zero if the data
+ * match and a negative error code if not or in case of failure.
+ */
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
+{
+ int err, i;
+ size_t read;
+ void *buf1;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf1 = kmalloc(len, GFP_KERNEL);
+ if (!buf1) {
+ ubi_err("cannot allocate memory to check writes");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+ if (err && !mtd_is_bitflip(err))
+ goto out_free;
+
+ for (i = 0; i < len; i++) {
+ uint8_t c = ((uint8_t *)buf)[i];
+ uint8_t c1 = ((uint8_t *)buf1)[i];
+ int dump_len;
+
+ if (c == c1)
+ continue;
+
+ ubi_err("self-check failed for PEB %d:%d, len %d",
+ pnum, offset, len);
+ ubi_msg("data differ at position %d", i);
+ dump_len = max_t(int, 128, len - i);
+ ubi_msg("hex dump of the original buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf + i, dump_len, 1);
+ ubi_msg("hex dump of the read buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf1 + i, dump_len, 1);
+ dump_stack();
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ vfree(buf1);
+ return 0;
+
+out_free:
+ vfree(buf1);
+ return err;
+}
+
+/**
+ * ubi_self_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
* @len: the length of the region to check
*
* This function returns zero if only 0xFF bytes are present at offset
- * @offset of the physical eraseblock @pnum, %1 if not, and a negative error
- * code if an error occurred.
+ * @offset of the physical eraseblock @pnum, and a negative error code if not
+ * or if an error occurred.
*/
-static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
- int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
+ void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- mutex_lock(&ubi->dbg_buf_mutex);
- err = mtd_read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ubi_err("cannot allocate memory to check for 0xFFs");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && !mtd_is_bitflip(err)) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
goto error;
}
- err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
+ err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not "
- "contain all 0xFF bytes", pnum, offset, len);
+ ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
goto fail;
}
- mutex_unlock(&ubi->dbg_buf_mutex);
+ vfree(buf);
return 0;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- dbg_msg("hex dump of the %d-%d region", offset, offset + len);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->dbg_peb_buf, len, 1);
- err = 1;
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_msg("hex dump of the %d-%d region", offset, offset + len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+ err = -EINVAL;
error:
- ubi_dbg_dump_stack();
- mutex_unlock(&ubi->dbg_buf_mutex);
+ dump_stack();
+ vfree(buf);
return err;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 7cf200cfda..fca8cc9e15 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -11,78 +11,20 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/* This file mostly implements UBI kernel API functions */
-#ifdef UBI_LINUX
-#include <linux/module.h>
#include <linux/err.h>
-#include <asm/div64.h>
-#endif
-
-#include "ubi-barebox.h"
+#include <asm-generic/div64.h>
#include "ubi.h"
/**
- * ubi_get_device_info - get information about UBI device.
- * @ubi_num: UBI device number
- * @di: the information is stored here
- *
- * This function returns %0 in case of success, %-EINVAL if the UBI device
- * number is invalid, and %-ENODEV if there is no such UBI device.
- */
-int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
-{
- struct ubi_device *ubi;
-
- if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
- return -EINVAL;
-
- ubi = ubi_get_device(ubi_num);
- if (!ubi)
- return -ENODEV;
-
- di->ubi_num = ubi->ubi_num;
- di->leb_size = ubi->leb_size;
- di->min_io_size = ubi->min_io_size;
- di->ro_mode = ubi->ro_mode;
- di->cdev = &ubi->cdev;
-
- ubi_put_device(ubi);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ubi_get_device_info);
-
-/**
- * ubi_get_volume_info - get information about UBI volume.
- * @desc: volume descriptor
- * @vi: the information is stored here
- */
-void ubi_get_volume_info(struct ubi_volume_desc *desc,
- struct ubi_volume_info *vi)
-{
- struct ubi_volume *vol = desc->vol;
- struct ubi_device *ubi = vol->ubi;
-
- vi->vol_id = vol->vol_id;
- vi->ubi_num = ubi->ubi_num;
- vi->size = vol->reserved_pebs;
- vi->used_bytes = vol->used_bytes;
- vi->vol_type = vol->vol_type;
- vi->corrupted = vol->corrupted;
- vi->upd_marker = vol->upd_marker;
- vi->alignment = vol->alignment;
- vi->usable_leb_size = vol->usable_leb_size;
- vi->name_len = vol->name_len;
- vi->name = vol->name;
- vi->cdev = &vol->cdev;
-}
-EXPORT_SYMBOL_GPL(ubi_get_volume_info);
-
-/**
* ubi_open_volume - open UBI volume.
* @ubi_num: UBI device number
* @vol_id: volume ID
@@ -107,7 +49,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
struct ubi_device *ubi;
struct ubi_volume *vol;
- dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+ dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
@@ -135,10 +77,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
}
err = -ENODEV;
- if (!try_module_get(THIS_MODULE))
- goto out_free;
- spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
if (!vol)
goto out_unlock;
@@ -163,19 +102,15 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
vol->exclusive = 1;
break;
}
- get_device(&vol->dev);
vol->ref_count += 1;
- spin_unlock(&ubi->volumes_lock);
desc->vol = vol;
desc->mode = mode;
- mutex_lock(&ubi->ckvol_mutex);
if (!vol->checked) {
/* This is the first open - check the volume */
err = ubi_check_volume(ubi, vol_id);
if (err < 0) {
- mutex_unlock(&ubi->ckvol_mutex);
ubi_close_volume(desc);
return ERR_PTR(err);
}
@@ -186,17 +121,15 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
}
vol->checked = 1;
}
- mutex_unlock(&ubi->ckvol_mutex);
return desc;
out_unlock:
- spin_unlock(&ubi->volumes_lock);
- module_put(THIS_MODULE);
-out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
+ ubi_err("cannot open device %d, volume %d, error %d",
+ ubi_num, vol_id, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -216,7 +149,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
struct ubi_device *ubi;
struct ubi_volume_desc *ret;
- dbg_msg("open volume %s, mode %d", name, mode);
+ dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
if (!name)
return ERR_PTR(-EINVAL);
@@ -232,7 +165,6 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
if (!ubi)
return ERR_PTR(-ENODEV);
- spin_lock(&ubi->volumes_lock);
/* Walk all volumes of this UBI device */
for (i = 0; i < ubi->vtbl_slots; i++) {
struct ubi_volume *vol = ubi->volumes[i];
@@ -242,7 +174,6 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
break;
}
}
- spin_unlock(&ubi->volumes_lock);
if (vol_id >= 0)
ret = ubi_open_volume(ubi_num, vol_id, mode);
@@ -267,9 +198,9 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
+ dbg_gen("close device %d, volume %d, mode %d",
+ ubi->ubi_num, vol->vol_id, desc->mode);
- spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
case UBI_READONLY:
vol->readers -= 1;
@@ -281,12 +212,9 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
vol->exclusive = 0;
}
vol->ref_count -= 1;
- spin_unlock(&ubi->volumes_lock);
kfree(desc);
- put_device(&vol->dev);
ubi_put_device(ubi);
- module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -324,7 +252,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
struct ubi_device *ubi = vol->ubi;
int err, vol_id = vol->vol_id;
- dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
lnum >= vol->used_ebs || offset < 0 || len < 0 ||
@@ -346,7 +274,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
return 0;
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
- if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn("mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
}
@@ -362,11 +290,9 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* @buf: data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
*
* This function takes care of physical eraseblock write failures. If write to
* the physical eraseblock write operation fails, the logical eraseblock is
@@ -383,13 +309,13 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* returns immediately with %-EBADF code.
*/
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype)
+ int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int vol_id = vol->vol_id;
- dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+ dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
return -EINVAL;
@@ -402,17 +328,13 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@@ -422,24 +344,23 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* @lnum: logical eraseblock number to change
* @buf: data to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
- * data, which has to be aligned. The length may be shorter then the logical
+ * data, which has to be aligned. The length may be shorter than the logical
* eraseblock size, ant the logical eraseblock may be appended to more times
* later on. This function guarantees that in case of an unclean reboot the old
* contents is preserved. Returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int len, int dtype)
+ int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int vol_id = vol->vol_id;
- dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+ dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
return -EINVAL;
@@ -451,17 +372,13 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -483,7 +400,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
struct ubi_device *ubi = vol->ubi;
int err;
- dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@@ -498,7 +415,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (err)
return err;
- return ubi_wl_flush(ubi);
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_erase);
@@ -509,7 +426,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* This function un-maps logical eraseblock @lnum and schedules the
* corresponding physical eraseblock for erasure, so that it will eventually be
- * physically erased in background. This operation is much faster then the
+ * physically erased in background. This operation is much faster than the
* erase operation.
*
* Unlike erase, the un-map operation does not guarantee that the logical
@@ -528,7 +445,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* The main and obvious use-case of this function is when the contents of a
* logical eraseblock has to be re-written. Then it is much more efficient to
- * first un-map it, then write new data, rather then first erase it, then write
+ * first un-map it, then write new data, rather than first erase it, then write
* new data. Note, once new data has been written to the logical eraseblock,
* UBI guarantees that the old contents has gone forever. In other words, if an
* unclean reboot happens after the logical eraseblock has been un-mapped and
@@ -543,7 +460,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@@ -559,13 +476,12 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
- * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
- * @dtype: expected data type
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
- * eraseblock. This means, that after a successfull invocation of this
+ * eraseblock. This means, that after a successful invocation of this
* function the logical eraseblock @lnum will be empty (contain only %0xFF
* bytes) and be mapped to a physical eraseblock, even if an unclean reboot
* happens.
@@ -575,12 +491,12 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@@ -588,17 +504,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
return -EBADMSG;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
@@ -622,7 +534,7 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
- dbg_msg("test LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
@@ -633,3 +545,30 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
return vol->eba_tbl[lnum] >= 0;
}
EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 1ef0e47407..f913d701a5 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -11,13 +11,15 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/* Here we keep miscellaneous functions which are used all over the UBI code */
-#include "ubi-barebox.h"
#include "ubi.h"
/**
@@ -79,7 +81,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
- if (err == -EBADMSG)
+ if (mtd_is_eccerr(err))
err = 1;
break;
}
@@ -90,14 +92,62 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
void ubi_calculate_reserved(struct ubi_device *ubi)
{
- ubi->beb_rsvd_level = ubi->good_peb_count/100;
- ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
- if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
- ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
+}
+
+/**
+ * ubi_check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+int ubi_check_pattern(const void *buf, uint8_t patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (((const uint8_t *)buf)[i] != patt)
+ return 0;
+ return 1;
}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
deleted file mode 100644
index fedea5bb28..0000000000
--- a/drivers/mtd/ubi/scan.c
+++ /dev/null
@@ -1,1359 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-/*
- * UBI scanning unit.
- *
- * This unit is responsible for scanning the flash media, checking UBI
- * headers and providing complete information about the UBI flash image.
- *
- * The scanning information is represented by a &struct ubi_scan_info' object.
- * Information about found volumes is represented by &struct ubi_scan_volume
- * objects which are kept in volume RB-tree with root at the @volumes field.
- * The RB-tree is indexed by the volume ID.
- *
- * Found logical eraseblocks are represented by &struct ubi_scan_leb objects.
- * These objects are kept in per-volume RB-trees with the root at the
- * corresponding &struct ubi_scan_volume object. To put it differently, we keep
- * an RB-tree of per-volume objects and each of these objects is the root of
- * RB-tree of per-eraseblock objects.
- *
- * Corrupted physical eraseblocks are put to the @corr list, free physical
- * eraseblocks are put to the @free list and the physical eraseblock to be
- * erased are put to the @erase list.
- */
-
-#ifdef UBI_LINUX
-#include <linux/err.h>
-#include <linux/crc32.h>
-#include <asm/div64.h>
-#endif
-
-#include "ubi-barebox.h"
-#include "ubi.h"
-
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
-#else
-#define paranoid_check_si(ubi, si) 0
-#endif
-
-/* Temporary variables used during scanning */
-static struct ubi_ec_hdr *ech;
-static struct ubi_vid_hdr *vidh;
-
-/**
- * add_to_list - add physical eraseblock to a list.
- * @si: scanning information
- * @pnum: physical eraseblock number to add
- * @ec: erase counter of the physical eraseblock
- * @list: the list to add to
- *
- * This function adds physical eraseblock @pnum to free, erase, corrupted or
- * alien lists. Returns zero in case of success and a negative error code in
- * case of failure.
- */
-static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
- struct list_head *list)
-{
- struct ubi_scan_leb *seb;
-
- if (list == &si->free)
- dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
- else if (list == &si->erase)
- dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
- else if (list == &si->corr)
- dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- else if (list == &si->alien)
- dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
- else
- BUG();
-
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
- if (!seb)
- return -ENOMEM;
-
- seb->pnum = pnum;
- seb->ec = ec;
- list_add_tail(&seb->u.list, list);
- return 0;
-}
-
-/**
- * validate_vid_hdr - check that volume identifier header is correct and
- * consistent.
- * @vid_hdr: the volume identifier header to check
- * @sv: information about the volume this logical eraseblock belongs to
- * @pnum: physical eraseblock number the VID header came from
- *
- * This function checks that data stored in @vid_hdr is consistent. Returns
- * non-zero if an inconsistency was found and zero if not.
- *
- * Note, UBI does sanity check of everything it reads from the flash media.
- * Most of the checks are done in the I/O unit. Here we check that the
- * information in the VID header is consistent to the information in other VID
- * headers of the same volume.
- */
-static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
- const struct ubi_scan_volume *sv, int pnum)
-{
- int vol_type = vid_hdr->vol_type;
- int vol_id = be32_to_cpu(vid_hdr->vol_id);
- int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
- int data_pad = be32_to_cpu(vid_hdr->data_pad);
-
- if (sv->leb_count != 0) {
- int sv_vol_type;
-
- /*
- * This is not the first logical eraseblock belonging to this
- * volume. Ensure that the data in its VID header is consistent
- * to the data in previous logical eraseblock headers.
- */
-
- if (vol_id != sv->vol_id) {
- dbg_err("inconsistent vol_id");
- goto bad;
- }
-
- if (sv->vol_type == UBI_STATIC_VOLUME)
- sv_vol_type = UBI_VID_STATIC;
- else
- sv_vol_type = UBI_VID_DYNAMIC;
-
- if (vol_type != sv_vol_type) {
- dbg_err("inconsistent vol_type");
- goto bad;
- }
-
- if (used_ebs != sv->used_ebs) {
- dbg_err("inconsistent used_ebs");
- goto bad;
- }
-
- if (data_pad != sv->data_pad) {
- dbg_err("inconsistent data_pad");
- goto bad;
- }
- }
-
- return 0;
-
-bad:
- ubi_err("inconsistent VID header at PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_sv(sv);
- return -EINVAL;
-}
-
-/**
- * add_volume - add volume to the scanning information.
- * @si: scanning information
- * @vol_id: ID of the volume to add
- * @pnum: physical eraseblock number
- * @vid_hdr: volume identifier header
- *
- * If the volume corresponding to the @vid_hdr logical eraseblock is already
- * present in the scanning information, this function does nothing. Otherwise
- * it adds corresponding volume to the scanning information. Returns a pointer
- * to the scanning volume object in case of success and a negative error code
- * in case of failure.
- */
-static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
- int pnum,
- const struct ubi_vid_hdr *vid_hdr)
-{
- struct ubi_scan_volume *sv;
- struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
-
- ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
-
- /* Walk the volume RB-tree to look if this volume is already present */
- while (*p) {
- parent = *p;
- sv = rb_entry(parent, struct ubi_scan_volume, rb);
-
- if (vol_id == sv->vol_id)
- return sv;
-
- if (vol_id > sv->vol_id)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- /* The volume is absent - add it */
- sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
- if (!sv)
- return ERR_PTR(-ENOMEM);
-
- sv->highest_lnum = sv->leb_count = 0;
- sv->vol_id = vol_id;
- sv->root = RB_ROOT;
- sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
- sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
- sv->compat = vid_hdr->compat;
- sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
- : UBI_STATIC_VOLUME;
- if (vol_id > si->highest_vol_id)
- si->highest_vol_id = vol_id;
-
- rb_link_node(&sv->rb, parent, p);
- rb_insert_color(&sv->rb, &si->volumes);
- si->vols_found += 1;
- dbg_bld("added volume %d", vol_id);
- return sv;
-}
-
-/**
- * compare_lebs - find out which logical eraseblock is newer.
- * @ubi: UBI device description object
- * @seb: first logical eraseblock to compare
- * @pnum: physical eraseblock number of the second logical eraseblock to
- * compare
- * @vid_hdr: volume identifier header of the second logical eraseblock
- *
- * This function compares 2 copies of a LEB and informs which one is newer. In
- * case of success this function returns a positive value, in case of failure, a
- * negative error code is returned. The success return codes use the following
- * bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
- * second PEB (described by @pnum and @vid_hdr);
- * o bit 0 is set: the second PEB is newer;
- * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
- * o bit 1 is set: bit-flips were detected in the newer LEB;
- * o bit 2 is cleared: the older LEB is not corrupted;
- * o bit 2 is set: the older LEB is corrupted.
- */
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
- int pnum, const struct ubi_vid_hdr *vid_hdr)
-{
- void *buf;
- int len, err, second_is_newer, bitflips = 0, corrupted = 0;
- uint32_t data_crc, crc;
- struct ubi_vid_hdr *vh = NULL;
- unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
-
- if (seb->sqnum == 0 && sqnum2 == 0) {
- long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
-
- /*
- * UBI constantly increases the logical eraseblock version
- * number and it can overflow. Thus, we have to bear in mind
- * that versions that are close to %0xFFFFFFFF are less then
- * versions that are close to %0.
- *
- * The UBI WL unit guarantees that the number of pending tasks
- * is not greater then %0x7FFFFFFF. So, if the difference
- * between any two versions is greater or equivalent to
- * %0x7FFFFFFF, there was an overflow and the logical
- * eraseblock with lower version is actually newer then the one
- * with higher version.
- *
- * FIXME: but this is anyway obsolete and will be removed at
- * some point.
- */
- dbg_bld("using old crappy leb_ver stuff");
-
- if (v1 == v2) {
- ubi_err("PEB %d and PEB %d have the same version %lld",
- seb->pnum, pnum, v1);
- return -EINVAL;
- }
-
- abs = v1 - v2;
- if (abs < 0)
- abs = -abs;
-
- if (abs < 0x7FFFFFFF)
- /* Non-overflow situation */
- second_is_newer = (v2 > v1);
- else
- second_is_newer = (v2 < v1);
- } else
- /* Obviously the LEB with lower sequence counter is older */
- second_is_newer = sqnum2 > seb->sqnum;
-
- /*
- * Now we know which copy is newer. If the copy flag of the PEB with
- * newer version is not set, then we just return, otherwise we have to
- * check data CRC. For the second PEB we already have the VID header,
- * for the first one - we'll need to re-read it from flash.
- *
- * FIXME: this may be optimized so that we wouldn't read twice.
- */
-
- if (second_is_newer) {
- if (!vid_hdr->copy_flag) {
- /* It is not a copy, so it is newer */
- dbg_bld("second PEB %d is newer, copy_flag is unset",
- pnum);
- return 1;
- }
- } else {
- pnum = seb->pnum;
-
- vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vh)
- return -ENOMEM;
-
- err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
- if (err) {
- if (err == UBI_IO_BITFLIPS)
- bitflips = 1;
- else {
- dbg_err("VID of PEB %d header is bad, but it "
- "was OK earlier", pnum);
- if (err > 0)
- err = -EIO;
-
- goto out_free_vidh;
- }
- }
-
- if (!vh->copy_flag) {
- /* It is not a copy, so it is newer */
- dbg_bld("first PEB %d is newer, copy_flag is unset",
- pnum);
- err = bitflips << 1;
- goto out_free_vidh;
- }
-
- vid_hdr = vh;
- }
-
- /* Read the data of the copy and check the CRC */
-
- len = be32_to_cpu(vid_hdr->data_size);
- buf = vmalloc(len);
- if (!buf) {
- err = -ENOMEM;
- goto out_free_vidh;
- }
-
- err = ubi_io_read_data(ubi, buf, pnum, 0, len);
- if (err && err != UBI_IO_BITFLIPS)
- goto out_free_buf;
-
- data_crc = be32_to_cpu(vid_hdr->data_crc);
- crc = crc32(UBI_CRC32_INIT, buf, len);
- if (crc != data_crc) {
- dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
- pnum, crc, data_crc);
- corrupted = 1;
- bitflips = 0;
- second_is_newer = !second_is_newer;
- } else {
- dbg_bld("PEB %d CRC is OK", pnum);
- bitflips = !!err;
- }
-
- vfree(buf);
- ubi_free_vid_hdr(ubi, vh);
-
- if (second_is_newer)
- dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
- else
- dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
-
- return second_is_newer | (bitflips << 1) | (corrupted << 2);
-
-out_free_buf:
- vfree(buf);
-out_free_vidh:
- ubi_free_vid_hdr(ubi, vh);
- return err;
-}
-
-/**
- * ubi_scan_add_used - add information about a physical eraseblock to the
- * scanning information.
- * @ubi: UBI device description object
- * @si: scanning information
- * @pnum: the physical eraseblock number
- * @ec: erase counter
- * @vid_hdr: the volume identifier header
- * @bitflips: if bit-flips were detected when this physical eraseblock was read
- *
- * This function adds information about a used physical eraseblock to the
- * 'used' tree of the corresponding volume. The function is rather complex
- * because it has to handle cases when this is not the first physical
- * eraseblock belonging to the same logical eraseblock, and the newer one has
- * to be picked, while the older one has to be dropped. This function returns
- * zero in case of success and a negative error code in case of failure.
- */
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips)
-{
- int err, vol_id, lnum;
- uint32_t leb_ver;
- unsigned long long sqnum;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
- struct rb_node **p, *parent = NULL;
-
- vol_id = be32_to_cpu(vid_hdr->vol_id);
- lnum = be32_to_cpu(vid_hdr->lnum);
- sqnum = be64_to_cpu(vid_hdr->sqnum);
- leb_ver = be32_to_cpu(vid_hdr->leb_ver);
-
- dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
- pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
-
- sv = add_volume(si, vol_id, pnum, vid_hdr);
- if (IS_ERR(sv) < 0)
- return PTR_ERR(sv);
-
- if (si->max_sqnum < sqnum)
- si->max_sqnum = sqnum;
-
- /*
- * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
- * if this is the first instance of this logical eraseblock or not.
- */
- p = &sv->root.rb_node;
- while (*p) {
- int cmp_res;
-
- parent = *p;
- seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
- if (lnum != seb->lnum) {
- if (lnum < seb->lnum)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- continue;
- }
-
- /*
- * There is already a physical eraseblock describing the same
- * logical eraseblock present.
- */
-
- dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
- "LEB ver %u, EC %d", seb->pnum, seb->sqnum,
- seb->leb_ver, seb->ec);
-
- /*
- * Make sure that the logical eraseblocks have different
- * versions. Otherwise the image is bad.
- */
- if (seb->leb_ver == leb_ver && leb_ver != 0) {
- ubi_err("two LEBs with same version %u", leb_ver);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- return -EINVAL;
- }
-
- /*
- * Make sure that the logical eraseblocks have different
- * sequence numbers. Otherwise the image is bad.
- *
- * FIXME: remove 'sqnum != 0' check when leb_ver is removed.
- */
- if (seb->sqnum == sqnum && sqnum != 0) {
- ubi_err("two LEBs with same sequence number %llu",
- sqnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- return -EINVAL;
- }
-
- /*
- * Now we have to drop the older one and preserve the newer
- * one.
- */
- cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
- if (cmp_res < 0)
- return cmp_res;
-
- if (cmp_res & 1) {
- /*
- * This logical eraseblock is newer then the one
- * found earlier.
- */
- err = validate_vid_hdr(vid_hdr, sv, pnum);
- if (err)
- return err;
-
- if (cmp_res & 4)
- err = add_to_list(si, seb->pnum, seb->ec,
- &si->corr);
- else
- err = add_to_list(si, seb->pnum, seb->ec,
- &si->erase);
- if (err)
- return err;
-
- seb->ec = ec;
- seb->pnum = pnum;
- seb->scrub = ((cmp_res & 2) || bitflips);
- seb->sqnum = sqnum;
- seb->leb_ver = leb_ver;
-
- if (sv->highest_lnum == lnum)
- sv->last_data_size =
- be32_to_cpu(vid_hdr->data_size);
-
- return 0;
- } else {
- /*
- * This logical eraseblock is older then the one found
- * previously.
- */
- if (cmp_res & 4)
- return add_to_list(si, pnum, ec, &si->corr);
- else
- return add_to_list(si, pnum, ec, &si->erase);
- }
- }
-
- /*
- * We've met this logical eraseblock for the first time, add it to the
- * scanning information.
- */
-
- err = validate_vid_hdr(vid_hdr, sv, pnum);
- if (err)
- return err;
-
- seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL);
- if (!seb)
- return -ENOMEM;
-
- seb->ec = ec;
- seb->pnum = pnum;
- seb->lnum = lnum;
- seb->sqnum = sqnum;
- seb->scrub = bitflips;
- seb->leb_ver = leb_ver;
-
- if (sv->highest_lnum <= lnum) {
- sv->highest_lnum = lnum;
- sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
- }
-
- sv->leb_count += 1;
- rb_link_node(&seb->u.rb, parent, p);
- rb_insert_color(&seb->u.rb, &sv->root);
- return 0;
-}
-
-/**
- * ubi_scan_find_sv - find information about a particular volume in the
- * scanning information.
- * @si: scanning information
- * @vol_id: the requested volume ID
- *
- * This function returns a pointer to the volume description or %NULL if there
- * are no data about this volume in the scanning information.
- */
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id)
-{
- struct ubi_scan_volume *sv;
- struct rb_node *p = si->volumes.rb_node;
-
- while (p) {
- sv = rb_entry(p, struct ubi_scan_volume, rb);
-
- if (vol_id == sv->vol_id)
- return sv;
-
- if (vol_id > sv->vol_id)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
-
- return NULL;
-}
-
-/**
- * ubi_scan_find_seb - find information about a particular logical
- * eraseblock in the volume scanning information.
- * @sv: a pointer to the volume scanning information
- * @lnum: the requested logical eraseblock
- *
- * This function returns a pointer to the scanning logical eraseblock or %NULL
- * if there are no data about it in the scanning volume information.
- */
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum)
-{
- struct ubi_scan_leb *seb;
- struct rb_node *p = sv->root.rb_node;
-
- while (p) {
- seb = rb_entry(p, struct ubi_scan_leb, u.rb);
-
- if (lnum == seb->lnum)
- return seb;
-
- if (lnum > seb->lnum)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
-
- return NULL;
-}
-
-/**
- * ubi_scan_rm_volume - delete scanning information about a volume.
- * @si: scanning information
- * @sv: the volume scanning information to delete
- */
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
-{
- struct rb_node *rb;
- struct ubi_scan_leb *seb;
-
- dbg_bld("remove scanning information about volume %d", sv->vol_id);
-
- while ((rb = rb_first(&sv->root))) {
- seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, &si->erase);
- }
-
- rb_erase(&sv->rb, &si->volumes);
- kfree(sv);
- si->vols_found -= 1;
-}
-
-/**
- * ubi_scan_erase_peb - erase a physical eraseblock.
- * @ubi: UBI device description object
- * @si: scanning information
- * @pnum: physical eraseblock number to erase;
- * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- *
- * This function erases physical eraseblock 'pnum', and writes the erase
- * counter header to it. This function should only be used on UBI device
- * initialization stages, when the EBA unit had not been yet initialized. This
- * function returns zero in case of success and a negative error code in case
- * of failure.
- */
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec)
-{
- int err;
- struct ubi_ec_hdr *ec_hdr;
-
- if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
- /*
- * Erase counter overflow. Upgrade UBI and use 64-bit
- * erase counters internally.
- */
- ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
- return -EINVAL;
- }
-
- ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ec_hdr)
- return -ENOMEM;
-
- ec_hdr->ec = cpu_to_be64(ec);
-
- err = ubi_io_sync_erase(ubi, pnum, 0);
- if (err < 0)
- goto out_free;
-
- err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
-
-out_free:
- kfree(ec_hdr);
- return err;
-}
-
-/**
- * ubi_scan_get_free_peb - get a free physical eraseblock.
- * @ubi: UBI device description object
- * @si: scanning information
- *
- * This function returns a free physical eraseblock. It is supposed to be
- * called on the UBI initialization stages when the wear-leveling unit is not
- * initialized yet. This function picks a physical eraseblocks from one of the
- * lists, writes the EC header if it is needed, and removes it from the list.
- *
- * This function returns scanning physical eraseblock information in case of
- * success and an error code in case of failure.
- */
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si)
-{
- int err = 0, i;
- struct ubi_scan_leb *seb;
-
- if (!list_empty(&si->free)) {
- seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
- list_del(&seb->u.list);
- dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
- }
-
- for (i = 0; i < 2; i++) {
- struct list_head *head;
- struct ubi_scan_leb *tmp_seb;
-
- if (i == 0)
- head = &si->erase;
- else
- head = &si->corr;
-
- /*
- * We try to erase the first physical eraseblock from the @head
- * list and pick it if we succeed, or try to erase the
- * next one if not. And so forth. We don't want to take care
- * about bad eraseblocks here - they'll be handled later.
- */
- list_for_each_entry_safe(seb, tmp_seb, head, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
-
- err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
- if (err)
- continue;
-
- seb->ec += 1;
- list_del(&seb->u.list);
- dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
- }
- }
-
- ubi_err("no eraseblocks found");
- return ERR_PTR(-ENOSPC);
-}
-
-/**
- * process_eb - read UBI headers, check them and add corresponding data
- * to the scanning information.
- * @ubi: UBI device description object
- * @si: scanning information
- * @pnum: the physical eraseblock number
- *
- * This function returns a zero if the physical eraseblock was successfully
- * handled and a negative error code in case of failure.
- */
-static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
-{
- long long uninitialized_var(ec);
- int err, bitflips = 0, vol_id, ec_corr = 0;
-
- dbg_bld("scan PEB %d", pnum);
-
- /* Skip bad physical eraseblocks */
- err = ubi_io_is_bad(ubi, pnum);
- if (err < 0)
- return err;
- else if (err) {
- /*
- * FIXME: this is actually duty of the I/O unit to initialize
- * this, but MTD does not provide enough information.
- */
- si->bad_peb_count += 1;
- return 0;
- }
-
- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
- if (err < 0)
- return err;
- else if (err == UBI_IO_BITFLIPS)
- bitflips = 1;
- else if (err == UBI_IO_PEB_EMPTY)
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase);
- else if (err == UBI_IO_BAD_EC_HDR) {
- /*
- * We have to also look at the VID header, possibly it is not
- * corrupted. Set %bitflips flag in order to make this PEB be
- * moved and EC be re-created.
- */
- ec_corr = 1;
- ec = UBI_SCAN_UNKNOWN_EC;
- bitflips = 1;
- }
-
- si->is_empty = 0;
-
- if (!ec_corr) {
- /* Make sure UBI version is OK */
- if (ech->version != UBI_VERSION) {
- ubi_err("this UBI version is %d, image version is %d",
- UBI_VERSION, (int)ech->version);
- return -EINVAL;
- }
-
- ec = be64_to_cpu(ech->ec);
- if (ec > UBI_MAX_ERASECOUNTER) {
- /*
- * Erase counter overflow. The EC headers have 64 bits
- * reserved, but we anyway make use of only 31 bit
- * values, as this seems to be enough for any existing
- * flash. Upgrade UBI and use 64-bit erase counters
- * internally.
- */
- ubi_err("erase counter overflow, max is %d",
- UBI_MAX_ERASECOUNTER);
- ubi_dbg_dump_ec_hdr(ech);
- return -EINVAL;
- }
- }
-
- /* OK, we've done with the EC header, let's look at the VID header */
-
- err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
- if (err < 0)
- return err;
- else if (err == UBI_IO_BITFLIPS)
- bitflips = 1;
- else if (err == UBI_IO_BAD_VID_HDR ||
- (err == UBI_IO_PEB_FREE && ec_corr)) {
- /* VID header is corrupted */
- err = add_to_list(si, pnum, ec, &si->corr);
- if (err)
- return err;
- goto adjust_mean_ec;
- } else if (err == UBI_IO_PEB_FREE) {
- /* No VID header - the physical eraseblock is free */
- err = add_to_list(si, pnum, ec, &si->free);
- if (err)
- return err;
- goto adjust_mean_ec;
- }
-
- vol_id = be32_to_cpu(vidh->vol_id);
- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
- int lnum = be32_to_cpu(vidh->lnum);
-
- /* Unsupported internal volume */
- switch (vidh->compat) {
- case UBI_COMPAT_DELETE:
- ubi_msg("\"delete\" compatible internal volume %d:%d"
- " found, remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, &si->corr);
- if (err)
- return err;
- break;
-
- case UBI_COMPAT_RO:
- ubi_msg("read-only compatible internal volume %d:%d"
- " found, switch to read-only mode",
- vol_id, lnum);
- ubi->ro_mode = 1;
- break;
-
- case UBI_COMPAT_PRESERVE:
- ubi_msg("\"preserve\" compatible internal volume %d:%d"
- " found", vol_id, lnum);
- err = add_to_list(si, pnum, ec, &si->alien);
- if (err)
- return err;
- si->alien_peb_count += 1;
- return 0;
-
- case UBI_COMPAT_REJECT:
- ubi_err("incompatible internal volume %d:%d found",
- vol_id, lnum);
- return -EINVAL;
- }
- }
-
- /* Both UBI headers seem to be fine */
- err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
- if (err)
- return err;
-
-adjust_mean_ec:
- if (!ec_corr) {
- si->ec_sum += ec;
- si->ec_count += 1;
- if (ec > si->max_ec)
- si->max_ec = ec;
- if (ec < si->min_ec)
- si->min_ec = ec;
- }
-
- return 0;
-}
-
-/**
- * ubi_scan - scan an MTD device.
- * @ubi: UBI device description object
- *
- * This function does full scanning of an MTD device and returns complete
- * information about it. In case of failure, an error code is returned.
- */
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
-{
- int err, pnum;
- struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
- struct ubi_scan_info *si;
-
- si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
- if (!si)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&si->corr);
- INIT_LIST_HEAD(&si->free);
- INIT_LIST_HEAD(&si->erase);
- INIT_LIST_HEAD(&si->alien);
- si->volumes = RB_ROOT;
- si->is_empty = 1;
-
- err = -ENOMEM;
- ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ech)
- goto out_si;
-
- vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vidh)
- goto out_ech;
-
- for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- cond_resched();
-
-// dbg_msg("process PEB %d", pnum);
- err = process_eb(ubi, si, pnum);
- if(err < 0)
- printf("err: %d\n", err);
- if (err < 0)
- goto out_vidh;
- }
-
- dbg_msg("scanning is finished");
-
- /* Calculate mean erase counter */
- if (si->ec_count) {
- do_div(si->ec_sum, si->ec_count);
- si->mean_ec = si->ec_sum;
- }
-
- if (si->is_empty)
- ubi_msg("empty MTD device detected");
-
- /*
- * In case of unknown erase counter we use the mean erase counter
- * value.
- */
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
- }
-
- list_for_each_entry(seb, &si->free, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
- }
-
- list_for_each_entry(seb, &si->corr, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
-
- list_for_each_entry(seb, &si->erase, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
-
- err = paranoid_check_si(ubi, si);
- if (err) {
- if (err > 0)
- err = -EINVAL;
- goto out_vidh;
- }
-
- ubi_free_vid_hdr(ubi, vidh);
- kfree(ech);
-
- return si;
-
-out_vidh:
- ubi_free_vid_hdr(ubi, vidh);
-out_ech:
- kfree(ech);
-out_si:
- ubi_scan_destroy_si(si);
- return ERR_PTR(err);
-}
-
-/**
- * destroy_sv - free the scanning volume information
- * @sv: scanning volume information
- *
- * This function destroys the volume RB-tree (@sv->root) and the scanning
- * volume information.
- */
-static void destroy_sv(struct ubi_scan_volume *sv)
-{
- struct ubi_scan_leb *seb;
- struct rb_node *this = sv->root.rb_node;
-
- while (this) {
- if (this->rb_left)
- this = this->rb_left;
- else if (this->rb_right)
- this = this->rb_right;
- else {
- seb = rb_entry(this, struct ubi_scan_leb, u.rb);
- this = rb_parent(this);
- if (this) {
- if (this->rb_left == &seb->u.rb)
- this->rb_left = NULL;
- else
- this->rb_right = NULL;
- }
-
- kfree(seb);
- }
- }
- kfree(sv);
-}
-
-/**
- * ubi_scan_destroy_si - destroy scanning information.
- * @si: scanning information
- */
-void ubi_scan_destroy_si(struct ubi_scan_info *si)
-{
- struct ubi_scan_leb *seb, *seb_tmp;
- struct ubi_scan_volume *sv;
- struct rb_node *rb;
-
- list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
- list_del(&seb->u.list);
- kfree(seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
- list_del(&seb->u.list);
- kfree(seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
- list_del(&seb->u.list);
- kfree(seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
- list_del(&seb->u.list);
- kfree(seb);
- }
-
- /* Destroy the volume RB-tree */
- rb = si->volumes.rb_node;
- while (rb) {
- if (rb->rb_left)
- rb = rb->rb_left;
- else if (rb->rb_right)
- rb = rb->rb_right;
- else {
- sv = rb_entry(rb, struct ubi_scan_volume, rb);
-
- rb = rb_parent(rb);
- if (rb) {
- if (rb->rb_left == &sv->rb)
- rb->rb_left = NULL;
- else
- rb->rb_right = NULL;
- }
-
- destroy_sv(sv);
- }
- }
-
- kfree(si);
-}
-
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
-/**
- * paranoid_check_si - check if the scanning information is correct and
- * consistent.
- * @ubi: UBI device description object
- * @si: scanning information
- *
- * This function returns zero if the scanning information is all right, %1 if
- * not and a negative error code if an error occurred.
- */
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
-{
- int pnum, err, vols_found = 0;
- struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *last_seb;
- uint8_t *buf;
-
- /*
- * At first, check that scanning information is OK.
- */
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- int leb_count = 0;
-
- cond_resched();
-
- vols_found += 1;
-
- if (si->is_empty) {
- ubi_err("bad is_empty flag");
- goto bad_sv;
- }
-
- if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
- sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
- sv->data_pad < 0 || sv->last_data_size < 0) {
- ubi_err("negative values");
- goto bad_sv;
- }
-
- if (sv->vol_id >= UBI_MAX_VOLUMES &&
- sv->vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("bad vol_id");
- goto bad_sv;
- }
-
- if (sv->vol_id > si->highest_vol_id) {
- ubi_err("highest_vol_id is %d, but vol_id %d is there",
- si->highest_vol_id, sv->vol_id);
- goto out;
- }
-
- if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
- sv->vol_type != UBI_STATIC_VOLUME) {
- ubi_err("bad vol_type");
- goto bad_sv;
- }
-
- if (sv->data_pad > ubi->leb_size / 2) {
- ubi_err("bad data_pad");
- goto bad_sv;
- }
-
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
- cond_resched();
-
- last_seb = seb;
- leb_count += 1;
-
- if (seb->pnum < 0 || seb->ec < 0) {
- ubi_err("negative values");
- goto bad_seb;
- }
-
- if (seb->ec < si->min_ec) {
- ubi_err("bad si->min_ec (%d), %d found",
- si->min_ec, seb->ec);
- goto bad_seb;
- }
-
- if (seb->ec > si->max_ec) {
- ubi_err("bad si->max_ec (%d), %d found",
- si->max_ec, seb->ec);
- goto bad_seb;
- }
-
- if (seb->pnum >= ubi->peb_count) {
- ubi_err("too high PEB number %d, total PEBs %d",
- seb->pnum, ubi->peb_count);
- goto bad_seb;
- }
-
- if (sv->vol_type == UBI_STATIC_VOLUME) {
- if (seb->lnum >= sv->used_ebs) {
- ubi_err("bad lnum or used_ebs");
- goto bad_seb;
- }
- } else {
- if (sv->used_ebs != 0) {
- ubi_err("non-zero used_ebs");
- goto bad_seb;
- }
- }
-
- if (seb->lnum > sv->highest_lnum) {
- ubi_err("incorrect highest_lnum or lnum");
- goto bad_seb;
- }
- }
-
- if (sv->leb_count != leb_count) {
- ubi_err("bad leb_count, %d objects in the tree",
- leb_count);
- goto bad_sv;
- }
-
- if (!last_seb)
- continue;
-
- seb = last_seb;
-
- if (seb->lnum != sv->highest_lnum) {
- ubi_err("bad highest_lnum");
- goto bad_seb;
- }
- }
-
- if (vols_found != si->vols_found) {
- ubi_err("bad si->vols_found %d, should be %d",
- si->vols_found, vols_found);
- goto out;
- }
-
- /* Check that scanning information is correct */
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
- int vol_type;
-
- cond_resched();
-
- last_seb = seb;
-
- err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
- if (err && err != UBI_IO_BITFLIPS) {
- ubi_err("VID header is not OK (%d)", err);
- if (err > 0)
- err = -EIO;
- return err;
- }
-
- vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
- UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
- if (sv->vol_type != vol_type) {
- ubi_err("bad vol_type");
- goto bad_vid_hdr;
- }
-
- if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
- ubi_err("bad sqnum %llu", seb->sqnum);
- goto bad_vid_hdr;
- }
-
- if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
- ubi_err("bad vol_id %d", sv->vol_id);
- goto bad_vid_hdr;
- }
-
- if (sv->compat != vidh->compat) {
- ubi_err("bad compat %d", vidh->compat);
- goto bad_vid_hdr;
- }
-
- if (seb->lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad lnum %d", seb->lnum);
- goto bad_vid_hdr;
- }
-
- if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
- ubi_err("bad used_ebs %d", sv->used_ebs);
- goto bad_vid_hdr;
- }
-
- if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
- ubi_err("bad data_pad %d", sv->data_pad);
- goto bad_vid_hdr;
- }
-
- if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
- ubi_err("bad leb_ver %u", seb->leb_ver);
- goto bad_vid_hdr;
- }
- }
-
- if (!last_seb)
- continue;
-
- if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad highest_lnum %d", sv->highest_lnum);
- goto bad_vid_hdr;
- }
-
- if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
- ubi_err("bad last_data_size %d", sv->last_data_size);
- goto bad_vid_hdr;
- }
- }
-
- /*
- * Make sure that all the physical eraseblocks are in one of the lists
- * or trees.
- */
- buf = kzalloc(ubi->peb_count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- err = ubi_io_is_bad(ubi, pnum);
- if (err < 0) {
- kfree(buf);
- return err;
- }
- else if (err)
- buf[pnum] = 1;
- }
-
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- buf[seb->pnum] = 1;
-
- list_for_each_entry(seb, &si->free, u.list)
- buf[seb->pnum] = 1;
-
- list_for_each_entry(seb, &si->corr, u.list)
- buf[seb->pnum] = 1;
-
- list_for_each_entry(seb, &si->erase, u.list)
- buf[seb->pnum] = 1;
-
- list_for_each_entry(seb, &si->alien, u.list)
- buf[seb->pnum] = 1;
-
- err = 0;
- for (pnum = 0; pnum < ubi->peb_count; pnum++)
- if (!buf[pnum]) {
- ubi_err("PEB %d is not referred", pnum);
- err = 1;
- }
-
- kfree(buf);
- if (err)
- goto out;
- return 0;
-
-bad_seb:
- ubi_err("bad scanning information about LEB %d", seb->lnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_sv(sv);
- goto out;
-
-bad_sv:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
- goto out;
-
-bad_vid_hdr:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vid_hdr(vidh);
-
-out:
- ubi_dbg_dump_stack();
- return 1;
-}
-
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
deleted file mode 100644
index 5ea0da4a0d..0000000000
--- a/drivers/mtd/ubi/scan.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-#ifndef __UBI_SCAN_H__
-#define __UBI_SCAN_H__
-
-/* The erase counter value for this physical eraseblock is unknown */
-#define UBI_SCAN_UNKNOWN_EC (-1)
-
-/**
- * struct ubi_scan_leb - scanning information about a physical eraseblock.
- * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- * @pnum: physical eraseblock number
- * @lnum: logical eraseblock number
- * @scrub: if this physical eraseblock needs scrubbing
- * @sqnum: sequence number
- * @u: unions RB-tree or @list links
- * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
- * @u.list: link in one of the eraseblock lists
- * @leb_ver: logical eraseblock version (obsolete)
- *
- * One object of this type is allocated for each physical eraseblock during
- * scanning.
- */
-struct ubi_scan_leb {
- int ec;
- int pnum;
- int lnum;
- int scrub;
- unsigned long long sqnum;
- union {
- struct rb_node rb;
- struct list_head list;
- } u;
- uint32_t leb_ver;
-};
-
-/**
- * struct ubi_scan_volume - scanning information about a volume.
- * @vol_id: volume ID
- * @highest_lnum: highest logical eraseblock number in this volume
- * @leb_count: number of logical eraseblocks in this volume
- * @vol_type: volume type
- * @used_ebs: number of used logical eraseblocks in this volume (only for
- * static volumes)
- * @last_data_size: amount of data in the last logical eraseblock of this
- * volume (always equivalent to the usable logical eraseblock size in case of
- * dynamic volumes)
- * @data_pad: how many bytes at the end of logical eraseblocks of this volume
- * are not used (due to volume alignment)
- * @compat: compatibility flags of this volume
- * @rb: link in the volume RB-tree
- * @root: root of the RB-tree containing all the eraseblock belonging to this
- * volume (&struct ubi_scan_leb objects)
- *
- * One object of this type is allocated for each volume during scanning.
- */
-struct ubi_scan_volume {
- int vol_id;
- int highest_lnum;
- int leb_count;
- int vol_type;
- int used_ebs;
- int last_data_size;
- int data_pad;
- int compat;
- struct rb_node rb;
- struct rb_root root;
-};
-
-/**
- * struct ubi_scan_info - UBI scanning information.
- * @volumes: root of the volume RB-tree
- * @corr: list of corrupted physical eraseblocks
- * @free: list of free physical eraseblocks
- * @erase: list of physical eraseblocks which have to be erased
- * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
- * @bad_peb_count: count of bad physical eraseblocks
- * those belonging to "preserve"-compatible internal volumes)
- * @vols_found: number of volumes found during scanning
- * @highest_vol_id: highest volume ID
- * @alien_peb_count: count of physical eraseblocks in the @alien list
- * @is_empty: flag indicating whether the MTD device is empty or not
- * @min_ec: lowest erase counter value
- * @max_ec: highest erase counter value
- * @max_sqnum: highest sequence number value
- * @mean_ec: mean erase counter value
- * @ec_sum: a temporary variable used when calculating @mean_ec
- * @ec_count: a temporary variable used when calculating @mean_ec
- *
- * This data structure contains the result of scanning and may be used by other
- * UBI units to build final UBI data structures, further error-recovery and so
- * on.
- */
-struct ubi_scan_info {
- struct rb_root volumes;
- struct list_head corr;
- struct list_head free;
- struct list_head erase;
- struct list_head alien;
- int bad_peb_count;
- int vols_found;
- int highest_vol_id;
- int alien_peb_count;
- int is_empty;
- int min_ec;
- int max_ec;
- unsigned long long max_sqnum;
- int mean_ec;
- uint64_t ec_sum;
- int ec_count;
-};
-
-struct ubi_device;
-struct ubi_vid_hdr;
-
-/*
- * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a
- * list.
- *
- * @sv: volume scanning information
- * @seb: scanning eraseblock infprmation
- * @list: the list to move to
- */
-static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
- struct ubi_scan_leb *seb,
- struct list_head *list)
-{
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, list);
-}
-
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips);
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id);
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum);
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si);
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec);
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
-void ubi_scan_destroy_si(struct ubi_scan_info *si);
-
-#endif /* !__UBI_SCAN_H__ */
diff --git a/drivers/mtd/ubi/ubi-barebox.h b/drivers/mtd/ubi/ubi-barebox.h
index 757460796c..58c8a6239c 100644
--- a/drivers/mtd/ubi/ubi-barebox.h
+++ b/drivers/mtd/ubi/ubi-barebox.h
@@ -25,74 +25,20 @@
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
-#include <linux/log2.h>
#define crc32(seed, data, length) crc32_no_comp(seed, (unsigned char const *)data, length)
-#define DPRINTK(format, args...) \
-do { \
- printf("%s[%d]: " format "\n", __func__, __LINE__, ##args); \
-} while (0)
-
/* configurable */
#define CONFIG_MTD_UBI_WL_THRESHOLD 4096
-#define CONFIG_MTD_UBI_BEB_RESERVE 1
#define UBI_IO_DEBUG 0
-/* debug options (Linux: drivers/mtd/ubi/Kconfig.debug) */
-#undef CONFIG_MTD_UBI_DEBUG
-#undef CONFIG_MTD_UBI_DEBUG_PARANOID
-#undef CONFIG_MTD_UBI_DEBUG_MSG
-#undef CONFIG_MTD_UBI_DEBUG_MSG_EBA
-#undef CONFIG_MTD_UBI_DEBUG_MSG_WL
-#undef CONFIG_MTD_UBI_DEBUG_MSG_IO
-#undef CONFIG_MTD_UBI_DEBUG_MSG_BLD
-#define CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
-
-/* build.c */
-#define get_device(...)
-#define put_device(...)
-#define ubi_sysfs_init(...) 0
-#define ubi_sysfs_close(...) do { } while (0)
-
-/* FIXME */
-#define MKDEV(...) 0
-#define MAJOR(dev) 0
-#define MINOR(dev) 0
-
-#define alloc_chrdev_region(...) 0
-#define unregister_chrdev_region(...)
-
-#define class_create(...) __builtin_return_address(0)
-#define class_create_file(...) 0
-#define class_remove_file(...)
-#define class_destroy(...)
-#define misc_register(...) 0
-#define misc_deregister(...)
-
-/* vmt.c */
-#define device_register(...) 0
-#define volume_sysfs_init(...) 0
-#define volume_sysfs_close(...) do { } while (0)
-
-/* kapi.c */
-
-/* eba.c */
-
-/* io.c */
-#define init_waitqueue_head(...) do { } while (0)
-#define wait_event_interruptible(...) 0
-#define wake_up_interruptible(...) do { } while (0)
-#define print_hex_dump(...) do { } while (0)
-#define dump_stack(...) do { } while (0)
-
-/* wl.c */
-#define task_pid_nr(x) 0
-#define set_freezable(...) do { } while (0)
-#define try_to_freeze(...) 0
-#define set_current_state(...) do { } while (0)
-#define kthread_should_stop(...) 0
-#define schedule() do { } while (0)
+#define DUMP_PREFIX_OFFSET 0
+static inline void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ memory_display(buf, 0, len, 4, 0);
+}
/* upd.c */
static inline unsigned long copy_from_user(void *dest, const void *src,
@@ -103,84 +49,23 @@ static inline unsigned long copy_from_user(void *dest, const void *src,
}
/* common */
-typedef int spinlock_t;
-typedef int wait_queue_head_t;
-#define spin_lock_init(...)
-#define spin_lock(...)
-#define spin_unlock(...)
-
-#define mutex_init(...)
-#define mutex_lock(...)
-#define mutex_unlock(...)
-
-#define init_rwsem(...) do { } while (0)
-#define down_read(...) do { } while (0)
-#define down_write(...) do { } while (0)
-#define down_write_trylock(...) 1
-#define up_read(...) do { } while (0)
-#define up_write(...) do { } while (0)
-
-struct kmem_cache { int i; };
-#define kmem_cache_create(...) 1
-#define kmem_cache_alloc(obj, gfp) malloc(sizeof(struct ubi_wl_entry))
-#define kmem_cache_free(obj, size) free(size)
-#define kmem_cache_destroy(...)
-
-#define cond_resched() do { } while (0)
-#define yield() do { } while (0)
-
-#define GFP_KERNEL 0
-#define GFP_NOFS 1
-#define __init
-#define __exit
+#define GFP_NOFS 1
-#define kthread_create(...) __builtin_return_address(0)
-#define kthread_stop(...) do { } while (0)
#define wake_up_process(...) do { } while (0)
#define BUS_ID_SIZE 20
-struct rw_semaphore { int i; };
-struct device {
- struct device *parent;
- struct class *class;
- char bus_id[BUS_ID_SIZE]; /* position on parent bus */
- dev_t devt; /* dev_t, creates the sysfs "dev" */
- void (*release)(struct device *dev);
-};
-struct mutex { int i; };
-struct kernel_param { int i; };
-
-struct cdev_ {
- int owner;
- dev_t dev;
-};
-#define cdev_init(...) do { } while (0)
-#define cdev_add(...) 0
-#define cdev_del(...) do { } while (0)
-
#define MAX_ERRNO 4095
-/* module */
-#define THIS_MODULE 0
-#define try_module_get(...) 1
-#define module_put(...) do { } while (0)
-#define module_init(...)
-#define module_exit(...)
-#define module_param_call(...)
-#define MODULE_PARM_DESC(...)
-#define MODULE_VERSION(...)
-
#ifndef __UBIFS_H__
#include "ubi.h"
#endif
/* functions */
-extern int ubi_mtd_param_parse(const char *val, struct kernel_param *kp);
-extern int ubi_init(void);
-extern void ubi_exit(void);
extern struct ubi_device *ubi_devices[];
+int ubi_cdev_add(struct ubi_device *ubi);
+
#endif
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
new file mode 100644
index 0000000000..ac2b24d178
--- /dev/null
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Thomas Gleixner
+ * Frank Haverkamp
+ * Oliver Lohmann
+ * Andreas Arnez
+ */
+
+/*
+ * This file defines the layout of UBI headers and all the other UBI on-flash
+ * data structures.
+ */
+
+#ifndef __UBI_MEDIA_H__
+#define __UBI_MEDIA_H__
+
+#include <asm/byteorder.h>
+
+/* The version of UBI images supported by this implementation */
+#define UBI_VERSION 1
+
+/* The highest erase counter value supported by this implementation */
+#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF
+
+/* The initial CRC32 value used when calculating CRC checksums */
+#define UBI_CRC32_INIT 0xFFFFFFFFU
+
+/* Erase counter header magic number (ASCII "UBI#") */
+#define UBI_EC_HDR_MAGIC 0x55424923
+/* Volume identifier header magic number (ASCII "UBI!") */
+#define UBI_VID_HDR_MAGIC 0x55424921
+
+/*
+ * Volume type constants used in the volume identifier header.
+ *
+ * @UBI_VID_DYNAMIC: dynamic volume
+ * @UBI_VID_STATIC: static volume
+ */
+enum {
+ UBI_VID_DYNAMIC = 1,
+ UBI_VID_STATIC = 2
+};
+
+/*
+ * Volume flags used in the volume table record.
+ *
+ * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ *
+ * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
+ * table. UBI automatically re-sizes the volume which has this flag and makes
+ * the volume to be of largest possible size. This means that if after the
+ * initialization UBI finds out that there are available physical eraseblocks
+ * present on the device, it automatically appends all of them to the volume
+ * (the physical eraseblocks reserved for bad eraseblocks handling and other
+ * reserved physical eraseblocks are not taken). So, if there is a volume with
+ * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
+ * eraseblocks will be zero after UBI is loaded, because all of them will be
+ * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
+ * after the volume had been initialized.
+ *
+ * The auto-resize feature is useful for device production purposes. For
+ * example, different NAND flash chips may have different amount of initial bad
+ * eraseblocks, depending of particular chip instance. Manufacturers of NAND
+ * chips usually guarantee that the amount of initial bad eraseblocks does not
+ * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
+ * flashed to the end devices in production, he does not know the exact amount
+ * of good physical eraseblocks the NAND chip on the device will have, but this
+ * number is required to calculate the volume sized and put them to the volume
+ * table of the UBI image. In this case, one of the volumes (e.g., the one
+ * which will store the root file system) is marked as "auto-resizable", and
+ * UBI will adjust its size on the first boot if needed.
+ *
+ * Note, first UBI reserves some amount of physical eraseblocks for bad
+ * eraseblock handling, and then re-sizes the volume, not vice-versa. This
+ * means that the pool of reserved physical eraseblocks will always be present.
+ */
+enum {
+ UBI_VTBL_AUTORESIZE_FLG = 0x01,
+};
+
+/*
+ * Compatibility constants used by internal volumes.
+ *
+ * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
+ * to the flash
+ * @UBI_COMPAT_RO: attach this device in read-only mode
+ * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
+ * physical eraseblocks, don't allow the wear-leveling
+ * sub-system to move them
+ * @UBI_COMPAT_REJECT: reject this UBI image
+ */
+enum {
+ UBI_COMPAT_DELETE = 1,
+ UBI_COMPAT_RO = 2,
+ UBI_COMPAT_PRESERVE = 4,
+ UBI_COMPAT_REJECT = 5
+};
+
+/* Sizes of UBI headers */
+#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
+#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
+
+/* Sizes of UBI headers without the ending CRC */
+#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_ec_hdr - UBI erase counter header.
+ * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
+ * @version: version of UBI implementation which is supposed to accept this
+ * UBI image
+ * @padding1: reserved for future, zeroes
+ * @ec: the erase counter
+ * @vid_hdr_offset: where the VID header starts
+ * @data_offset: where the user data start
+ * @image_seq: image sequence number
+ * @padding2: reserved for future, zeroes
+ * @hdr_crc: erase counter header CRC checksum
+ *
+ * The erase counter header takes 64 bytes and has a plenty of unused space for
+ * future usage. The unused fields are zeroed. The @version field is used to
+ * indicate the version of UBI implementation which is supposed to be able to
+ * work with this UBI image. If @version is greater than the current UBI
+ * version, the image is rejected. This may be useful in future if something
+ * is changed radically. This field is duplicated in the volume identifier
+ * header.
+ *
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * volume identifier header and user data, relative to the beginning of the
+ * physical eraseblock. These values have to be the same for all physical
+ * eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when attaching the flash.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when attaching the media.
+ */
+struct ubi_ec_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be64 ec; /* Warning: the current limit is 31-bit anyway! */
+ __be32 vid_hdr_offset;
+ __be32 data_offset;
+ __be32 image_seq;
+ __u8 padding2[32];
+ __be32 hdr_crc;
+} __packed;
+
+/**
+ * struct ubi_vid_hdr - on-flash UBI volume identifier header.
+ * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
+ * @version: UBI implementation version which is supposed to accept this UBI
+ * image (%UBI_VERSION)
+ * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
+ * @copy_flag: if this logical eraseblock was copied from another physical
+ * eraseblock (for wear-leveling reasons)
+ * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
+ * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
+ * @vol_id: ID of this volume
+ * @lnum: logical eraseblock number
+ * @padding1: reserved for future, zeroes
+ * @data_size: how many bytes of data this logical eraseblock contains
+ * @used_ebs: total number of used logical eraseblocks in this volume
+ * @data_pad: how many bytes at the end of this physical eraseblock are not
+ * used
+ * @data_crc: CRC checksum of the data stored in this logical eraseblock
+ * @padding2: reserved for future, zeroes
+ * @sqnum: sequence number
+ * @padding3: reserved for future, zeroes
+ * @hdr_crc: volume identifier header CRC checksum
+ *
+ * The @sqnum is the value of the global sequence counter at the time when this
+ * VID header was created. The global sequence counter is incremented each time
+ * UBI writes a new VID header to the flash, i.e. when it maps a logical
+ * eraseblock to a new physical eraseblock. The global sequence counter is an
+ * unsigned 64-bit integer and we assume it never overflows. The @sqnum
+ * (sequence number) is used to distinguish between older and newer versions of
+ * logical eraseblocks.
+ *
+ * There are 2 situations when there may be more than one physical eraseblock
+ * corresponding to the same logical eraseblock, i.e., having the same @vol_id
+ * and @lnum values in the volume identifier header. Suppose we have a logical
+ * eraseblock L and it is mapped to the physical eraseblock P.
+ *
+ * 1. Because UBI may erase physical eraseblocks asynchronously, the following
+ * situation is possible: L is asynchronously erased, so P is scheduled for
+ * erasure, then L is written to,i.e. mapped to another physical eraseblock P1,
+ * so P1 is written to, then an unclean reboot happens. Result - there are 2
+ * physical eraseblocks P and P1 corresponding to the same logical eraseblock
+ * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the
+ * flash.
+ *
+ * 2. From time to time UBI moves logical eraseblocks to other physical
+ * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P
+ * to P1, and an unclean reboot happens before P is physically erased, there
+ * are two physical eraseblocks P and P1 corresponding to L and UBI has to
+ * select one of them when the flash is attached. The @sqnum field says which
+ * PEB is the original (obviously P will have lower @sqnum) and the copy. But
+ * it is not enough to select the physical eraseblock with the higher sequence
+ * number, because the unclean reboot could have happen in the middle of the
+ * copying process, so the data in P is corrupted. It is also not enough to
+ * just select the physical eraseblock with lower sequence number, because the
+ * data there may be old (consider a case if more data was added to P1 after
+ * the copying). Moreover, the unclean reboot may happen when the erasure of P
+ * was just started, so it result in unstable P, which is "mostly" OK, but
+ * still has unstable bits.
+ *
+ * UBI uses the @copy_flag field to indicate that this logical eraseblock is a
+ * copy. UBI also calculates data CRC when the data is moved and stores it at
+ * the @data_crc field of the copy (P1). So when UBI needs to pick one physical
+ * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is
+ * examined. If it is cleared, the situation* is simple and the newer one is
+ * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC
+ * checksum is correct, this physical eraseblock is selected (P1). Otherwise
+ * the older one (P) is selected.
+ *
+ * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
+ * Internal volumes are not seen from outside and are used for various internal
+ * UBI purposes. In this implementation there is only one internal volume - the
+ * layout volume. Internal volumes are the main mechanism of UBI extensions.
+ * For example, in future one may introduce a journal internal volume. Internal
+ * volumes have their own reserved range of IDs.
+ *
+ * The @compat field is only used for internal volumes and contains the "degree
+ * of their compatibility". It is always zero for user volumes. This field
+ * provides a mechanism to introduce UBI extensions and to be still compatible
+ * with older UBI binaries. For example, if someone introduced a journal in
+ * future, he would probably use %UBI_COMPAT_DELETE compatibility for the
+ * journal volume. And in this case, older UBI binaries, which know nothing
+ * about the journal volume, would just delete this volume and work perfectly
+ * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image
+ * - it just ignores the Ext3fs journal.
+ *
+ * The @data_crc field contains the CRC checksum of the contents of the logical
+ * eraseblock if this is a static volume. In case of dynamic volumes, it does
+ * not contain the CRC checksum as a rule. The only exception is when the
+ * data of the physical eraseblock was moved by the wear-leveling sub-system,
+ * then the wear-leveling sub-system calculates the data CRC and stores it in
+ * the @data_crc field. And of course, the @copy_flag is %in this case.
+ *
+ * The @data_size field is used only for static volumes because UBI has to know
+ * how many bytes of data are stored in this eraseblock. For dynamic volumes,
+ * this field usually contains zero. The only exception is when the data of the
+ * physical eraseblock was moved to another physical eraseblock for
+ * wear-leveling reasons. In this case, UBI calculates CRC checksum of the
+ * contents and uses both @data_crc and @data_size fields. In this case, the
+ * @data_size field contains data size.
+ *
+ * The @used_ebs field is used only for static volumes and indicates how many
+ * eraseblocks the data of the volume takes. For dynamic volumes this field is
+ * not used and always contains zero.
+ *
+ * The @data_pad is calculated when volumes are created using the alignment
+ * parameter. So, effectively, the @data_pad field reduces the size of logical
+ * eraseblocks of this volume. This is very handy when one uses block-oriented
+ * software (say, cramfs) on top of the UBI volume.
+ */
+struct ubi_vid_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 vol_type;
+ __u8 copy_flag;
+ __u8 compat;
+ __be32 vol_id;
+ __be32 lnum;
+ __u8 padding1[4];
+ __be32 data_size;
+ __be32 used_ebs;
+ __be32 data_pad;
+ __be32 data_crc;
+ __u8 padding2[4];
+ __be64 sqnum;
+ __u8 padding3[12];
+ __be32 hdr_crc;
+} __packed;
+
+/* Internal UBI volumes count */
+#define UBI_INT_VOL_COUNT 1
+
+/*
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
+ */
+#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
+
+/* The layout volume contains the volume table */
+
+#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
+#define UBI_LAYOUT_VOLUME_ALIGN 1
+#define UBI_LAYOUT_VOLUME_EBS 2
+#define UBI_LAYOUT_VOLUME_NAME "layout volume"
+#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
+
+/* The maximum number of volumes per one UBI device */
+#define UBI_MAX_VOLUMES 128
+
+/* The maximum volume name length */
+#define UBI_VOL_NAME_MAX 127
+
+/* Size of the volume table record */
+#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
+
+/* Size of the volume table record without the ending CRC */
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_vtbl_record - a record in the volume table.
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are unused at the end of the each physical
+ * eraseblock to satisfy the requested alignment
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @upd_marker: if volume update was started but not finished
+ * @name_len: volume name length
+ * @name: the volume name
+ * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
+ * @padding: reserved, zeroes
+ * @crc: a CRC32 checksum of the record
+ *
+ * The volume table records are stored in the volume table, which is stored in
+ * the layout volume. The layout volume consists of 2 logical eraseblock, each
+ * of which contains a copy of the volume table (i.e., the volume table is
+ * duplicated). The volume table is an array of &struct ubi_vtbl_record
+ * objects indexed by the volume ID.
+ *
+ * If the size of the logical eraseblock is large enough to fit
+ * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES
+ * records. Otherwise, it contains as many records as it can fit (i.e., size of
+ * logical eraseblock divided by sizeof(struct ubi_vtbl_record)).
+ *
+ * The @upd_marker flag is used to implement volume update. It is set to %1
+ * before update and set to %0 after the update. So if the update operation was
+ * interrupted, UBI knows that the volume is corrupted.
+ *
+ * The @alignment field is specified when the volume is created and cannot be
+ * later changed. It may be useful, for example, when a block-oriented file
+ * system works on top of UBI. The @data_pad field is calculated using the
+ * logical eraseblock size and @alignment. The alignment must be multiple to the
+ * minimal flash I/O unit. If @alignment is 1, all the available space of
+ * the physical eraseblocks is used.
+ *
+ * Empty records contain all zeroes and the CRC checksum of those zeroes.
+ */
+struct ubi_vtbl_record {
+ __be32 reserved_pebs;
+ __be32 alignment;
+ __be32 data_pad;
+ __u8 vol_type;
+ __u8 upd_marker;
+ __be16 name_len;
+ __u8 name[UBI_VOL_NAME_MAX+1];
+ __u8 flags;
+ __u8 padding[23];
+ __be32 crc;
+} __packed;
+
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+#define UBI_FM_WL_POOL_SIZE 25
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
+#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 964a3c4ee9..b13be53862 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -12,6 +12,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
@@ -19,36 +22,16 @@
#ifndef __UBI_UBI_H__
#define __UBI_UBI_H__
-#ifdef UBI_LINUX
-#include <linux/init.h>
+#include <common.h>
+#include <malloc.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/rwsem.h>
-#include <linux/spinlock.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/string.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/ubi.h>
-#endif
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
-#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
#include <mtd/ubi-media.h>
-
-#include "scan.h"
-#include "debug.h"
+#include "ubi-barebox.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -57,21 +40,21 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_info("UBI: " fmt "\n", ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
__func__, ##__VA_ARGS__)
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
-
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
#define UBI_LEB_UNMAPPED -1
/*
@@ -81,37 +64,98 @@
#define UBI_IO_RETRIES 3
/*
- * Error codes returned by the I/O unit.
- *
- * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
- * 0xFF bytes
- * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a
- * valid erase counter header, and the rest are %0xFF bytes
- * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC)
- * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or
- * CRC)
+ * Length of the protection queue. The length is effectively equivalent to the
+ * number of (global) erase cycles PEBs are protected from the wear-leveling
+ * worker.
+ */
+#define UBI_PROT_QUEUE_LEN 10
+
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
+ * Error codes returned by the I/O sub-system.
+ *
+ * UBI_IO_FF: the read region of flash contains only 0xFFs
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
+ * data integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
* UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ *
+ * Note, it is probably better to have bit-flip and ebadmsg as flags which can
+ * be or'ed with other error code. But this is a big change because there are
+ * may callers, so it does not worth the risk of introducing a bug
+ */
+enum {
+ UBI_IO_FF = 1,
+ UBI_IO_FF_BITFLIPS,
+ UBI_IO_BAD_HDR,
+ UBI_IO_BAD_HDR_EBADMSG,
+ UBI_IO_BITFLIPS,
+};
+
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ * PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ * PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ * PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ * PEB
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
+ */
+enum {
+ MOVE_CANCEL_RACE = 1,
+ MOVE_SOURCE_RD_ERR,
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_TARGET_BITFLIPS,
+ MOVE_RETRY,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
*/
enum {
- UBI_IO_PEB_EMPTY = 1,
- UBI_IO_PEB_FREE,
- UBI_IO_BAD_EC_HDR,
- UBI_IO_BAD_VID_HDR,
- UBI_IO_BITFLIPS
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
};
/**
* struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
+ * @u.rb: link in the corresponding (free/used) RB-tree
+ * @u.list: link in the protection queue
* @ec: erase counter
* @pnum: physical eraseblock number
*
- * This data structure is used in the WL unit. Each physical eraseblock has a
- * corresponding &struct wl_entry object which may be kept in different
- * RB-trees. See WL unit for details.
+ * This data structure is used in the WL sub-system. Each physical eraseblock
+ * has a corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL sub-system for details.
*/
struct ubi_wl_entry {
- struct rb_node rb;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
int ec;
int pnum;
};
@@ -125,22 +169,77 @@ struct ubi_wl_entry {
* @mutex: read/write mutex to implement read/write access serialization to
* the (@vol_id, @lnum) logical eraseblock
*
- * This data structure is used in the EBA unit to implement per-LEB locking.
- * When a logical eraseblock is being locked - corresponding
+ * This data structure is used in the EBA sub-system to implement per-LEB
+ * locking. When a logical eraseblock is being locked - corresponding
* &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
- * See EBA unit for details.
+ * See EBA sub-system for details.
*/
struct ubi_ltree_entry {
struct rb_node rb;
int vol_id;
int lnum;
int users;
- struct rw_semaphore mutex;
+};
+
+/**
+ * struct ubi_rename_entry - volume re-name description data structure.
+ * @new_name_len: new volume name length
+ * @new_name: new volume name
+ * @remove: if not zero, this volume should be removed, not re-named
+ * @desc: descriptor of the volume
+ * @list: links re-name entries into a list
+ *
+ * This data structure is utilized in the multiple volume re-name code. Namely,
+ * UBI first creates a list of &struct ubi_rename_entry objects from the
+ * &struct ubi_rnvol_req request object, and then utilizes this list to do all
+ * the job.
+ */
+struct ubi_rename_entry {
+ int new_name_len;
+ char new_name[UBI_VOL_NAME_MAX + 1];
+ int remove;
+ struct ubi_volume_desc *desc;
+ struct list_head list;
};
struct ubi_volume_desc;
/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
* @cdev: character device object to create character device
@@ -166,8 +265,6 @@ struct ubi_volume_desc;
* @upd_ebs: how many eraseblocks are expected to be updated
* @ch_lnum: LEB number which is being changing by the atomic LEB change
* operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- * change operation
* @upd_bytes: how many bytes are expected to be received for volume update or
* atomic LEB change
* @upd_received: how many bytes were already received for volume update or
@@ -181,10 +278,7 @@ struct ubi_volume_desc;
* @upd_marker: %1 if the update marker is set for this volume
* @updating: %1 if the volume is being updated
* @changing_leb: %1 if the atomic LEB change ioctl command is in progress
- *
- * @gluebi_desc: gluebi UBI volume descriptor
- * @gluebi_refcount: reference count of the gluebi MTD device
- * @gluebi_mtd: MTD device description object of the gluebi MTD device
+ * @direct_writes: %1 if direct writes are enabled for this volume
*
* The @corrupted field indicates that the volume's contents is corrupted.
* Since UBI protects only static volumes, this field is not relevant to
@@ -195,7 +289,7 @@ struct ubi_volume_desc;
* the moment or is damaged because of an unclean reboot.
*/
struct ubi_volume {
- struct device dev;
+ struct device_d dev;
struct cdev cdev;
struct ubi_device *ubi;
int vol_id;
@@ -213,11 +307,10 @@ struct ubi_volume {
int alignment;
int data_pad;
int name_len;
- char name[UBI_VOL_NAME_MAX+1];
+ char name[UBI_VOL_NAME_MAX + 1];
int upd_ebs;
int ch_lnum;
- int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
@@ -228,22 +321,11 @@ struct ubi_volume {
unsigned int upd_marker:1;
unsigned int updating:1;
unsigned int changing_leb:1;
-
-#ifdef CONFIG_MTD_UBI_GLUEBI
- /*
- * Gluebi-related stuff may be compiled out.
- * TODO: this should not be built into UBI but should be a separate
- * ubimtd driver which works on top of UBI and emulates MTD devices.
- */
- struct ubi_volume_desc *gluebi_desc;
- int gluebi_refcount;
- struct mtd_info gluebi_mtd;
-#endif
+ unsigned int direct_writes:1;
};
/**
- * struct ubi_volume_desc - descriptor of the UBI volume returned when it is
- * opened.
+ * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
* @vol: reference to the corresponding volume description object
* @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE)
*/
@@ -255,6 +337,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -267,6 +380,7 @@ struct ubi_wl_entry;
* @vol->readers, @vol->writers, @vol->exclusive,
* @vol->ref_count, @vol->mapping and @vol->eba_tbl.
* @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
*
* @rsvd_pebs: count of reserved physical eraseblocks
* @avail_pebs: count of available physical eraseblocks
@@ -275,12 +389,13 @@ struct ubi_wl_entry;
* @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
*
* @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
- * of UBI ititializetion
+ * of UBI initialization
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
- * @volumes_mutex: protects on-flash volume table and serializes volume
- * changes, like creation, deletion, update, resize
+ * @device_mutex: protects on-flash volume table and serializes volume
+ * creation, deletion, update, re-size, re-name and set
+ * property
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
@@ -290,20 +405,33 @@ struct ubi_wl_entry;
* @ltree: the lock tree
* @alc_mutex: serializes "atomic LEB change" operations
*
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ *
* @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
* @scrub: RB-tree of physical eraseblocks which need scrubbing
- * @prot: protection trees
- * @prot.pnum: protection tree indexed by physical eraseblock numbers
- * @prot.aec: protection tree indexed by absolute erase counter value
- * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
- * fields
+ * @pq: protection queue (contain physical eraseblocks which are temporarily
+ * protected from the wear-leveling worker)
+ * @pq_head: protection queue head
+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ * @erroneous, and @erroneous_peb_count fields
* @move_mutex: serializes eraseblock moves
+ * @work_sem: synchronizes the WL worker with use tasks
* @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
* physical eraseblock
- * @abs_ec: absolute erase counter
* @move_from: physical eraseblock from where the data is being moved
* @move_to: physical eraseblock where the data is being moved to
* @move_to_put: if the "to" PEB was put
@@ -316,76 +444,86 @@ struct ubi_wl_entry;
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
+ * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
+ * used by UBI)
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
* @min_io_size: minimal input/output unit size of the underlying MTD device
* @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
* @ro_mode: if the UBI device is in read-only mode
* @leb_size: logical eraseblock size
* @leb_start: starting offset of logical eraseblocks within physical
- * eraseblocks
+ * eraseblocks
* @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
* @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
* @vid_hdr_offset: starting offset of the volume identifier header (might be
- * unaligned)
+ * unaligned)
* @vid_hdr_aloffset: starting offset of the VID header aligned to
* @hdrs_min_io_size
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
+ * @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
* @mtd: MTD device descriptor
*
- * @peb_buf1: a buffer of PEB size used for different purposes
- * @peb_buf2: another buffer of PEB size used for different purposes
- * @buf_mutex: proptects @peb_buf1 and @peb_buf2
- * @dbg_peb_buf: buffer of PEB size used for debugging
- * @dbg_buf_mutex: proptects @dbg_peb_buf
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
+ * @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
*/
struct ubi_device {
struct cdev cdev;
- struct device dev;
+ struct device_d dev;
int ubi_num;
char ubi_name[sizeof(UBI_NAME_STR)+5];
int vol_count;
struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
- spinlock_t volumes_lock;
int ref_count;
+ int image_seq;
int rsvd_pebs;
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
+ int bad_peb_limit;
int autoresize_vol_id;
int vtbl_slots;
int vtbl_size;
struct ubi_vtbl_record *vtbl;
- struct mutex volumes_mutex;
int max_ec;
- /* TODO: mean_ec is not updated run-time, fix */
+ /* Note, mean_ec is not updated run-time - should be fixed */
int mean_ec;
- /* EBA unit's stuff */
+ /* EBA sub-system's stuff */
unsigned long long global_sqnum;
- spinlock_t ltree_lock;
struct rb_root ltree;
- struct mutex alc_mutex;
- /* Wear-leveling unit's stuff */
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ void *fm_buf;
+ size_t fm_size;
+
+ /* Wear-leveling sub-system's stuff */
struct rb_root used;
+ struct rb_root erroneous;
struct rb_root free;
+ int free_count;
struct rb_root scrub;
- struct {
- struct rb_root pnum;
- struct rb_root aec;
- } prot;
- spinlock_t wl_lock;
- struct mutex move_mutex;
- struct rw_semaphore work_sem;
+ struct list_head pq[UBI_PROT_QUEUE_LEN];
+ int pq_head;
int wl_scheduled;
struct ubi_wl_entry **lookuptbl;
- unsigned long long abs_ec;
struct ubi_wl_entry *move_from;
struct ubi_wl_entry *move_to;
int move_to_put;
@@ -395,12 +533,15 @@ struct ubi_device {
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
- /* I/O unit's stuff */
+ /* I/O sub-system's stuff */
long long flash_size;
int peb_count;
int peb_size;
int bad_peb_count;
int good_peb_count;
+ int corr_peb_count;
+ int erroneous_peb_count;
+ int max_erroneous;
int min_io_size;
int hdrs_min_io_size;
int ro_mode;
@@ -411,35 +552,192 @@ struct ubi_device {
int vid_hdr_offset;
int vid_hdr_aloffset;
int vid_hdr_shift;
- int bad_allowed;
+ unsigned int bad_allowed:1;
+ unsigned int nor_flash:1;
+ int max_write_size;
struct mtd_info *mtd;
- void *peb_buf1;
- void *peb_buf2;
- struct mutex buf_mutex;
- struct mutex ckvol_mutex;
-#ifdef CONFIG_MTD_UBI_DEBUG
- void *dbg_peb_buf;
- struct mutex dbg_buf_mutex;
-#endif
+ void *peb_buf;
+
+ struct ubi_debug_info dbg;
};
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
+};
+
+#include "debug.h"
+
extern struct kmem_cache *ubi_wl_entry_slab;
-extern struct file_operations ubi_ctrl_cdev_operations;
-extern struct file_operations ubi_cdev_operations;
-extern struct file_operations ubi_vol_cdev_operations;
+extern const struct file_operations ubi_ctrl_cdev_operations;
+extern const struct file_operations ubi_cdev_operations;
+extern const struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
-extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
+
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* vmt.c */
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
-int ubi_remove_volume(struct ubi_volume_desc *desc);
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl);
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
@@ -455,20 +753,12 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count);
/* misc.c */
-int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length);
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
-
-/* gluebi.c */
-#ifdef CONFIG_MTD_UBI_GLUEBI
-int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
-int ubi_destroy_gluebi(struct ubi_volume *vol);
-void ubi_gluebi_updated(struct ubi_volume *vol);
-#else
-#define ubi_create_gluebi(ubi, vol) 0
-#define ubi_destroy_gluebi(vol) 0
-#define ubi_gluebi_updated(vol)
-#endif
+int ubi_check_pattern(const void *buf, uint8_t patt, int size);
/* eba.c */
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
@@ -476,25 +766,33 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype);
+ const void *buf, int offset, int len);
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs);
+ int lnum, const void *buf, int len, int used_ebs);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype);
+ int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
-void ubi_eba_close(const struct ubi_device *ubi);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
/* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -514,12 +812,11 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
-int ubi_detach_mtd_dev(struct mtd_info *mtd, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
struct ubi_device *ubi_get_by_major(int major);
int ubi_major2num(int major);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
/* cdev.c */
int ubi_cdev_add(struct ubi_device *ubi);
@@ -527,9 +824,23 @@ void ubi_cdev_remove(struct ubi_device *ubi);
int ubi_volume_cdev_add(struct ubi_device *ubi, struct ubi_volume *vol);
void ubi_volume_cdev_remove(struct ubi_volume *vol);
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
+void ubi_free_fastmap(struct ubi_device *ubi);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
- * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
+ * @rb: a pointer to type 'struct rb_node' to use as a loop counter
* @pos: a pointer to RB-tree entry type to use as a loop counter
* @root: RB-tree's root
* @member: the name of the 'struct rb_node' within the RB-tree entry
@@ -538,7 +849,23 @@ void ubi_volume_cdev_remove(struct ubi_volume *vol);
for (rb = rb_first(root), \
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
rb; \
- rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member))
+ rb = rb_next(rb), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
@@ -550,7 +877,7 @@ void ubi_volume_cdev_remove(struct ubi_volume *vol);
* failure.
*/
static inline struct ubi_vid_hdr *
-ubi_zalloc_vid_hdr(const struct ubi_device *ubi, unsigned int gfp_flags)
+ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
{
void *vid_hdr;
@@ -614,6 +941,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
+ dump_stack();
}
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 9f5e06fe11..fae201463c 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -12,6 +12,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*
@@ -35,13 +38,8 @@
* transaction with a roll-back capability.
*/
-#ifdef UBI_LINUX
#include <linux/err.h>
-#include <asm/uaccess.h>
-#include <asm/div64.h>
-#endif
-
-#include "ubi-barebox.h"
+#include <linux/math64.h>
#include "ubi.h"
/**
@@ -57,21 +55,18 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
int err;
struct ubi_vtbl_record vtbl_rec;
- dbg_msg("set update marker for volume %d", vol->vol_id);
+ dbg_gen("set update marker for volume %d", vol->vol_id);
if (vol->upd_marker) {
ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
- dbg_msg("already set");
+ dbg_gen("already set");
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
- mutex_lock(&ubi->volumes_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
- mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 1;
return err;
}
@@ -90,30 +85,26 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes)
{
int err;
- uint64_t tmp;
struct ubi_vtbl_record vtbl_rec;
- dbg_msg("clear update marker for volume %d", vol->vol_id);
+ dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
if (vol->vol_type == UBI_STATIC_VOLUME) {
vol->corrupted = 0;
- vol->used_bytes = tmp = bytes;
- vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size);
- vol->used_ebs = tmp;
+ vol->used_bytes = bytes;
+ vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
+ &vol->last_eb_bytes);
if (vol->last_eb_bytes)
vol->used_ebs += 1;
else
vol->last_eb_bytes = vol->usable_leb_size;
}
- mutex_lock(&ubi->volumes_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
- mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 0;
return err;
}
@@ -132,9 +123,8 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes)
{
int i, err;
- uint64_t tmp;
- dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+ dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
ubi_assert(!vol->updating && !vol->changing_leb);
vol->updating = 1;
@@ -150,21 +140,23 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
}
if (bytes == 0) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
- err = ubi_wl_flush(ubi);
- if (!err)
- vol->updating = 0;
+ vol->updating = 0;
+ return 0;
}
vol->upd_buf = vmalloc(ubi->leb_size);
if (!vol->upd_buf)
return -ENOMEM;
- tmp = bytes;
- vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size);
- vol->upd_ebs += tmp;
+ vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
vol->upd_bytes = bytes;
vol->upd_received = 0;
return 0;
@@ -178,7 +170,7 @@ int ubi_finish_update(struct ubi_device *ubi, struct ubi_volume *vol)
err = clear_update_marker(ubi, vol, vol->upd_bytes);
if (err)
return err;
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err == 0) {
vol->updating = 0;
vfree(vol->upd_buf);
@@ -201,17 +193,15 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
{
ubi_assert(!vol->updating && !vol->changing_leb);
- dbg_msg("start changing LEB %d:%d, %u bytes",
+ dbg_gen("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
- return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
- req->dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->ch_dtype = req->dtype;
vol->upd_buf = vmalloc(req->bytes);
if (!vol->upd_buf)
@@ -260,11 +250,11 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
memset(buf + len, 0xFF, l - len);
len = ubi_calc_data_len(ubi, buf, l);
if (len == 0) {
- dbg_msg("all %d bytes contain 0xFF - skip", len);
+ dbg_gen("all %d bytes contain 0xFF - skip", len);
return 0;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
} else {
/*
* When writing static volume, and this is the last logical
@@ -276,8 +266,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
- UBI_UNKNOWN, used_ebs);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
}
return err;
@@ -285,6 +274,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
/**
* ubi_more_update_data - write more update data.
+ * @ubi: UBI device description object
* @vol: volume description object
* @buf: write data (user-space memory buffer)
* @count: how much bytes to write
@@ -298,19 +288,15 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
- uint64_t tmp;
int lnum, offs, err = 0, len, to_write = count;
- dbg_msg("write %d of %lld bytes, %lld already passed",
+ dbg_gen("write %d of %lld bytes, %lld already passed",
count, vol->upd_bytes, vol->upd_received);
if (ubi->ro_mode)
return -EROFS;
- tmp = vol->upd_received;
- offs = do_div(tmp, vol->usable_leb_size);
- lnum = tmp;
-
+ lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
if (vol->upd_received + count > vol->upd_bytes)
to_write = count = vol->upd_bytes - vol->upd_received;
@@ -334,12 +320,13 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
return -EFAULT;
- if (offs + len == vol->usable_leb_size) {
+ if (offs + len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
int flush_len = offs + len;
/*
- * OK, we gathered the whole eraseblock, it's time to flush
- * the buffer.
+ * OK, we gathered either the whole eraseblock or this
+ * is the last chunk, it's time to flush the buffer.
*/
ubi_assert(flush_len <= vol->usable_leb_size);
err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
@@ -383,12 +370,25 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
}
ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ /* The update is finished, clear the update marker */
+ err = clear_update_marker(ubi, vol, vol->upd_bytes);
+ if (err)
+ return err;
+ vol->updating = 0;
+ err = to_write;
+ vfree(vol->upd_buf);
+ }
return err;
}
/**
* ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @ubi: UBI device description object
* @vol: volume description object
* @buf: write data (user-space memory buffer)
* @count: how much bytes to write
@@ -405,7 +405,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
{
int err;
- dbg_msg("write %d of %lld bytes, %lld already passed",
+ dbg_gen("write %d of %lld bytes, %lld already passed",
count, vol->upd_bytes, vol->upd_received);
if (ubi->ro_mode)
@@ -423,10 +423,11 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
if (vol->upd_received == vol->upd_bytes) {
int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
- memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
+ memset(vol->upd_buf + vol->upd_bytes, 0xFF,
+ len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
- vol->upd_buf, len, UBI_UNKNOWN);
+ vol->upd_buf, len);
if (err)
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 0d6995eb6a..65339efb93 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -11,6 +11,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
@@ -20,178 +23,11 @@
* resizing.
*/
-#ifdef UBI_LINUX
#include <linux/err.h>
-#include <asm/div64.h>
-#endif
-
-#include "ubi-barebox.h"
+#include <linux/math64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static void paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi)
-#endif
-
-#ifdef UBI_LINUX
-static ssize_t vol_attribute_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
-static struct device_attribute attr_vol_reserved_ebs =
- __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_type =
- __ATTR(type, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_name =
- __ATTR(name, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_corrupted =
- __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_alignment =
- __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_usable_eb_size =
- __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_data_bytes =
- __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
-static struct device_attribute attr_vol_upd_marker =
- __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
-
-/*
- * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
- *
- * Consider a situation:
- * A. process 1 opens a sysfs file related to volume Y, say
- * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
- * B. process 2 removes volume Y;
- * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
- *
- * In this situation, this function will return %-ENODEV because it will find
- * out that the volume was removed from the @ubi->volumes array.
- */
-static ssize_t vol_attribute_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int ret;
- struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
- struct ubi_device *ubi;
-
- ubi = ubi_get_device(vol->ubi->ubi_num);
- if (!ubi)
- return -ENODEV;
-
- spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
- spin_unlock(&ubi->volumes_lock);
- ubi_put_device(ubi);
- return -ENODEV;
- }
- /* Take a reference to prevent volume removal */
- vol->ref_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
- if (attr == &attr_vol_reserved_ebs)
- ret = sprintf(buf, "%d\n", vol->reserved_pebs);
- else if (attr == &attr_vol_type) {
- const char *tp;
-
- if (vol->vol_type == UBI_DYNAMIC_VOLUME)
- tp = "dynamic";
- else
- tp = "static";
- ret = sprintf(buf, "%s\n", tp);
- } else if (attr == &attr_vol_name)
- ret = sprintf(buf, "%s\n", vol->name);
- else if (attr == &attr_vol_corrupted)
- ret = sprintf(buf, "%d\n", vol->corrupted);
- else if (attr == &attr_vol_alignment)
- ret = sprintf(buf, "%d\n", vol->alignment);
- else if (attr == &attr_vol_usable_eb_size)
- ret = sprintf(buf, "%d\n", vol->usable_leb_size);
- else if (attr == &attr_vol_data_bytes)
- ret = sprintf(buf, "%lld\n", vol->used_bytes);
- else if (attr == &attr_vol_upd_marker)
- ret = sprintf(buf, "%d\n", vol->upd_marker);
- else
- /* This must be a bug */
- ret = -EINVAL;
-
- /* We've done the operation, drop volume and UBI device references */
- spin_lock(&ubi->volumes_lock);
- vol->ref_count -= 1;
- ubi_assert(vol->ref_count >= 0);
- spin_unlock(&ubi->volumes_lock);
- ubi_put_device(ubi);
- return ret;
-}
-#endif
-
-/* Release method for volume devices */
-static void vol_release(struct device *dev)
-{
- struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
-
- kfree(vol);
-}
-
-#ifdef UBI_LINUX
-/**
- * volume_sysfs_init - initialize sysfs for new volume.
- * @ubi: UBI device description object
- * @vol: volume description object
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, this function does not free allocated resources in case of failure -
- * the caller does it. This is because this would cause release() here and the
- * caller would oops.
- */
-static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
-{
- int err;
-
- err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_type);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_name);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_corrupted);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_alignment);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_data_bytes);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_upd_marker);
- return err;
-}
-
-/**
- * volume_sysfs_close - close sysfs for a volume.
- * @vol: volume description object
- */
-static void volume_sysfs_close(struct ubi_volume *vol)
-{
- device_remove_file(&vol->dev, &attr_vol_upd_marker);
- device_remove_file(&vol->dev, &attr_vol_data_bytes);
- device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
- device_remove_file(&vol->dev, &attr_vol_alignment);
- device_remove_file(&vol->dev, &attr_vol_corrupted);
- device_remove_file(&vol->dev, &attr_vol_name);
- device_remove_file(&vol->dev, &attr_vol_type);
- device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
- device_unregister(&vol->dev);
-}
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
/**
* ubi_create_volume - create volume.
@@ -202,15 +38,13 @@ static void volume_sysfs_close(struct ubi_volume *vol)
* %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
* error code in case of failure. Note, the caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
- int i, err, vol_id = req->vol_id, dont_free = 0;
+ int i, err, vol_id = req->vol_id, do_free = 1;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
- uint64_t bytes;
- dev_t dev;
if (ubi->ro_mode)
return -EROFS;
@@ -219,10 +53,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (!vol)
return -ENOMEM;
- spin_lock(&ubi->volumes_lock);
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
- dbg_msg("search for vacant volume ID");
+ dbg_gen("search for vacant volume ID");
for (i = 0; i < ubi->vtbl_slots; i++)
if (!ubi->volumes[i]) {
vol_id = i;
@@ -230,21 +63,21 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
if (vol_id == UBI_VOL_NUM_AUTO) {
- dbg_err("out of volume IDs");
+ ubi_err("out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
req->vol_id = vol_id;
}
- dbg_msg("volume ID %d, %llu bytes, type %d, name %s",
- vol_id, (unsigned long long)req->bytes,
+ dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+ ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
(int)req->vol_type, req->name);
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
- dbg_err("volume %d already exists", vol_id);
+ ubi_err("volume %d already exists", vol_id);
goto out_unlock;
}
@@ -253,40 +86,40 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
- dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+ ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
/* Calculate how many eraseblocks are requested */
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
- bytes = req->bytes;
- if (do_div(bytes, vol->usable_leb_size))
- vol->reserved_pebs = 1;
- vol->reserved_pebs += bytes;
+ vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
}
ubi->avail_pebs -= vol->reserved_pebs;
ubi->rsvd_pebs += vol->reserved_pebs;
- spin_unlock(&ubi->volumes_lock);
vol->vol_id = vol_id;
vol->alignment = req->alignment;
vol->data_pad = ubi->leb_size % vol->alignment;
vol->vol_type = req->vol_type;
vol->name_len = req->name_len;
- memcpy(vol->name, req->name, vol->name_len + 1);
+ memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
/*
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
@@ -305,47 +138,22 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
} else {
- bytes = vol->used_bytes;
- vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
- vol->used_ebs = bytes;
- if (vol->last_eb_bytes)
+ vol->used_ebs = div_u64_rem(vol->used_bytes,
+ vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes != 0)
vol->used_ebs += 1;
else
vol->last_eb_bytes = vol->usable_leb_size;
}
/* Register character device for the volume */
- cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
-#ifdef UBI_LINUX
- vol->cdev.owner = THIS_MODULE;
-#endif
- dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
err = ubi_volume_cdev_add(ubi, vol);
if (err) {
ubi_err("cannot add character device");
goto out_mapping;
}
- err = ubi_create_gluebi(ubi, vol);
- if (err)
- goto out_cdev;
-
- vol->dev.release = vol_release;
- vol->dev.parent = &ubi->dev;
- vol->dev.devt = dev;
- vol->dev.class = ubi_class;
-
- sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
- err = device_register(&vol->dev);
- if (err) {
- ubi_err("cannot register device");
- goto out_gluebi;
- }
-
- err = volume_sysfs_init(ubi, vol);
- if (err)
- goto out_sysfs;
-
/* Fill volume table record */
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
@@ -356,49 +164,37 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vtbl_rec.vol_type = UBI_VID_DYNAMIC;
else
vtbl_rec.vol_type = UBI_VID_STATIC;
- memcpy(vtbl_rec.name, vol->name, vol->name_len + 1);
+ memcpy(vtbl_rec.name, vol->name, vol->name_len);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
- paranoid_check_volumes(ubi);
- return 0;
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ self_check_volumes(ubi);
+ return err;
out_sysfs:
/*
- * We have registered our device, we should not free the volume*
+ * We have registered our device, we should not free the volume
* description object in this function in case of an error - it is
* freed by the release function.
*
* Get device reference to prevent the release function from being
* called just after sysfs has been closed.
*/
- dont_free = 1;
- get_device(&vol->dev);
- volume_sysfs_close(vol);
-out_gluebi:
- if (ubi_destroy_gluebi(vol))
- dbg_err("cannot destroy gluebi for volume %d:%d",
- ubi->ubi_num, vol_id);
-out_cdev:
- ubi_volume_cdev_remove(vol);
+ do_free = 0;
out_mapping:
- kfree(vol->eba_tbl);
+ if (do_free)
+ kfree(vol->eba_tbl);
out_acc:
- spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
out_unlock:
- spin_unlock(&ubi->volumes_lock);
- if (dont_free)
- put_device(&vol->dev);
- else
+ if (do_free)
kfree(vol);
ubi_err("cannot create volume %d, error %d", vol_id, err);
return err;
@@ -407,26 +203,26 @@ out_unlock:
/**
* ubi_remove_volume - remove volume.
* @desc: volume descriptor
+ * @no_vtbl: do not change volume table if not zero
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * code in case of failure. The caller has to have the @ubi->device_mutex
* locked.
*/
-int ubi_remove_volume(struct ubi_volume_desc *desc)
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
- dbg_msg("remove UBI volume %d", vol_id);
+ dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
ubi_assert(desc->mode == UBI_EXCLUSIVE);
ubi_assert(vol == ubi->volumes[vol_id]);
if (ubi->ro_mode)
return -EROFS;
- spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
/*
* The volume is busy, probably someone is reading one of its
@@ -436,15 +232,12 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
goto out_unlock;
}
ubi->volumes[vol_id] = NULL;
- spin_unlock(&ubi->volumes_lock);
- err = ubi_destroy_gluebi(vol);
- if (err)
- goto out_err;
-
- err = ubi_change_vtbl_record(ubi, vol_id, NULL);
- if (err)
- goto out_err;
+ if (!no_vtbl) {
+ err = ubi_change_vtbl_record(ubi, vol_id, NULL);
+ if (err)
+ goto out_err;
+ }
for (i = 0; i < vol->reserved_pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, i);
@@ -452,35 +245,23 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
goto out_err;
}
- kfree(vol->eba_tbl);
- vol->eba_tbl = NULL;
- ubi_volume_cdev_remove(vol);
- volume_sysfs_close(vol);
+ devfs_remove(&vol->cdev);
- spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
- i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (i > 0) {
- i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
- ubi->avail_pebs -= i;
- ubi->rsvd_pebs += i;
- ubi->beb_rsvd_pebs += i;
- if (i > 0)
- ubi_msg("reserve more %d PEBs", i);
- }
+ ubi_update_reserved(ubi);
ubi->vol_count -= 1;
- spin_unlock(&ubi->volumes_lock);
- paranoid_check_volumes(ubi);
- return 0;
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
+
+ return err;
out_err:
ubi_err("cannot remove volume %d, error %d", vol_id, err);
- spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
out_unlock:
- spin_unlock(&ubi->volumes_lock);
return err;
}
@@ -491,7 +272,7 @@ out_unlock:
*
* This function re-sizes the volume and returns zero in case of success, and a
* negative error code in case of failure. The caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
@@ -504,12 +285,12 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (ubi->ro_mode)
return -EROFS;
- dbg_msg("re-size volume %d to from %d to %d PEBs",
- vol_id, vol->reserved_pebs, reserved_pebs);
+ dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+ ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
- dbg_err("too small size %d, %d LEBs contain data",
+ ubi_err("too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
@@ -525,22 +306,20 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = UBI_LEB_UNMAPPED;
- spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
- spin_unlock(&ubi->volumes_lock);
err = -EBUSY;
goto out_free;
}
- spin_unlock(&ubi->volumes_lock);
/* Reserve physical eraseblocks */
pebs = reserved_pebs - vol->reserved_pebs;
if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs: requested %d, available %d",
+ ubi_err("not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
- spin_unlock(&ubi->volumes_lock);
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
err = -ENOSPC;
goto out_free;
}
@@ -550,11 +329,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
vol->eba_tbl = new_mapping;
- spin_unlock(&ubi->volumes_lock);
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -566,23 +344,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
}
- spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
- pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (pebs > 0) {
- pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
- ubi->avail_pebs -= pebs;
- ubi->rsvd_pebs += pebs;
- ubi->beb_rsvd_pebs += pebs;
- if (pebs > 0)
- ubi_msg("reserve more %d PEBs", pebs);
- }
+ ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
vol->eba_tbl = new_mapping;
- spin_unlock(&ubi->volumes_lock);
}
vol->reserved_pebs = reserved_pebs;
@@ -593,15 +361,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
- paranoid_check_volumes(ubi);
- return 0;
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+ self_check_volumes(ubi);
+ return err;
out_acc:
if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= pebs;
ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
}
out_free:
kfree(new_mapping);
@@ -609,6 +376,43 @@ out_free:
}
/**
+ * ubi_rename_volumes - re-name UBI volumes.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names or removes volumes specified in the re-name list.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
+{
+ int err;
+ struct ubi_rename_entry *re;
+
+ err = ubi_vtbl_rename_volumes(ubi, rename_list);
+ if (err)
+ return err;
+
+ list_for_each_entry(re, rename_list, list) {
+ if (re->remove) {
+ err = ubi_remove_volume(re->desc, 1);
+ if (err)
+ break;
+ } else {
+ struct ubi_volume *vol = re->desc->vol;
+
+ vol->name_len = re->new_name_len;
+ memcpy(vol->name, re->new_name, re->new_name_len + 1);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
+ }
+ }
+
+ if (!err)
+ self_check_volumes(ubi);
+ return err;
+}
+
+/**
* ubi_add_volume - add volume.
* @ubi: UBI device description object
* @vol: volume description object
@@ -619,53 +423,21 @@ out_free:
*/
int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
- int err, vol_id = vol->vol_id;
- dev_t dev;
+ int err = 0;
- dbg_msg("add volume %d", vol_id);
- ubi_dbg_dump_vol_info(vol);
+ dbg_gen("add volume");
/* Register character device for the volume */
- cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
-#ifdef UBI_LINUX
- vol->cdev.owner = THIS_MODULE;
-#endif
- dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
err = ubi_volume_cdev_add(ubi, vol);
if (err) {
- ubi_err("cannot add character device for volume %d, error %d",
- vol_id, err);
- return err;
- }
-
- err = ubi_create_gluebi(ubi, vol);
- if (err)
- goto out_cdev;
-
- vol->dev.release = vol_release;
- vol->dev.parent = &ubi->dev;
- vol->dev.devt = dev;
- vol->dev.class = ubi_class;
- sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
- err = device_register(&vol->dev);
- if (err)
- goto out_gluebi;
-
- err = volume_sysfs_init(ubi, vol);
- if (err) {
- ubi_volume_cdev_remove(vol);
- err = ubi_destroy_gluebi(vol);
- volume_sysfs_close(vol);
+ ubi_err("cannot add character device for volume, error %d",
+ err);
return err;
}
- paranoid_check_volumes(ubi);
- return 0;
+ self_check_volumes(ubi);
+ return err;
-out_gluebi:
- err = ubi_destroy_gluebi(vol);
-out_cdev:
- ubi_volume_cdev_remove(vol);
return err;
}
@@ -679,24 +451,20 @@ out_cdev:
*/
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
- int err;
-
- dbg_msg("free volume %d", vol->vol_id);
+ dbg_gen("free volume %d", vol->vol_id);
ubi->volumes[vol->vol_id] = NULL;
- err = ubi_destroy_gluebi(vol);
- ubi_volume_cdev_remove(vol);
- volume_sysfs_close(vol);
+ devfs_remove(&vol->cdev);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
/**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
+ *
+ * Returns zero if volume is all right and a a negative error code if not.
*/
-static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -704,7 +472,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
long long n;
const char *name;
- spin_lock(&ubi->volumes_lock);
reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
vol = ubi->volumes[idx];
@@ -713,17 +480,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
ubi_err("no volume info, but volume exists");
goto fail;
}
- spin_unlock(&ubi->volumes_lock);
- return;
- }
-
- if (vol->exclusive) {
- /*
- * The volume may be being created at the moment, do not check
- * it (e.g., it may be in the middle of ubi_create_volume().
- */
- spin_unlock(&ubi->volumes_lock);
- return;
+ return 0;
}
if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
@@ -755,7 +512,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
}
if (vol->upd_marker && vol->corrupted) {
- dbg_err("update marker and corrupted simultaneously");
+ ubi_err("update marker and corrupted simultaneously");
goto fail;
}
@@ -775,11 +532,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
goto fail;
}
- if (!vol->name) {
- ubi_err("NULL volume name");
- goto fail;
- }
-
n = strnlen(vol->name, vol->name_len + 1);
if (n != vol->name_len) {
ubi_err("bad name_len %lld", n);
@@ -833,31 +585,40 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
if (alignment != vol->alignment || data_pad != vol->data_pad ||
upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
- name_len!= vol->name_len || strncmp(name, vol->name, name_len)) {
+ name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
ubi_err("volume info is different");
goto fail;
}
- spin_unlock(&ubi->volumes_lock);
- return;
+ return 0;
fail:
- ubi_err("paranoid check failed for volume %d", vol_id);
- ubi_dbg_dump_vol_info(vol);
- ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
- spin_unlock(&ubi->volumes_lock);
- BUG();
+ ubi_err("self-check failed for volume %d", vol_id);
+ if (vol)
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ dump_stack();
+ return -EINVAL;
}
/**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
+ *
+ * Returns zero if volumes are all right and a a negative error code if not.
*/
-static void paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
{
- int i;
+ int i, err = 0;
- for (i = 0; i < ubi->vtbl_slots; i++)
- paranoid_check_volume(ubi, i);
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ err = self_check_volume(ubi, i);
+ if (err)
+ break;
+ }
+
+ return err;
}
-#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 0468f59d15..596a19545b 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -12,6 +12,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
@@ -34,16 +37,15 @@
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
* In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
*
* But it would still be beneficial to store this information in the volume
* table. For example, suppose we have a static volume X, and all its physical
* eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
* corresponding to the volume X. According to the volume table volume X does
* exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
*
* The volume table also stores so-called "update marker", which is used for
* volume updates. Before updating the volume, the update marker is set, and
@@ -53,20 +55,10 @@
* damaged.
*/
-#ifdef UBI_LINUX
-#include <linux/crc32.h>
#include <linux/err.h>
-#include <asm/div64.h>
-#endif
-
-#include "ubi-barebox.h"
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
/* Empty volume table record */
static struct ubi_vtbl_record empty_vtbl_record;
@@ -106,18 +98,68 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
+ if (err)
+ return err;
+ }
+
+ self_vtbl_check(ubi);
+ return 0;
+}
+
+/**
+ * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names multiple volumes specified in @req in the volume
+ * table. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list)
+{
+ int i, err;
+ struct ubi_rename_entry *re;
+ struct ubi_volume *layout_vol;
+
+ list_for_each_entry(re, rename_list, list) {
+ uint32_t crc;
+ struct ubi_volume *vol = re->desc->vol;
+ struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
+
+ if (re->remove) {
+ memcpy(vtbl_rec, &empty_vtbl_record,
+ sizeof(struct ubi_vtbl_record));
+ continue;
+ }
+
+ vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
+ memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
+ memset(vtbl_rec->name + re->new_name_len, 0,
+ UBI_VOL_NAME_MAX + 1 - re->new_name_len);
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec,
+ UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_unmap_leb(ubi, layout_vol, i);
+ if (err)
+ return err;
+
+ err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
+ ubi->vtbl_size);
if (err)
return err;
}
- paranoid_vtbl_check(ubi);
return 0;
}
/**
- * vtbl_check - check if volume table is not corrupted and contains sensible
- * data.
+ * vtbl_check - check if volume table is not corrupted and sensible.
* @ubi: UBI device description object
* @vtbl: volume table
*
@@ -133,21 +175,19 @@ static int vtbl_check(const struct ubi_device *ubi,
const char *name;
for (i = 0; i < ubi->vtbl_slots; i++) {
- cond_resched();
-
reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
alignment = be32_to_cpu(vtbl[i].alignment);
data_pad = be32_to_cpu(vtbl[i].data_pad);
upd_marker = vtbl[i].upd_marker;
vol_type = vtbl[i].vol_type;
name_len = be16_to_cpu(vtbl[i].name_len);
- name = (const char *) &vtbl[i].name[0];
+ name = &vtbl[i].name[0];
crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -179,7 +219,7 @@ static int vtbl_check(const struct ubi_device *ubi,
n = ubi->leb_size % alignment;
if (data_pad != n) {
- dbg_err("bad data_pad, has to be %d", n);
+ ubi_err("bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
@@ -195,8 +235,8 @@ static int vtbl_check(const struct ubi_device *ubi,
}
if (reserved_pebs > ubi->good_peb_count) {
- dbg_err("too large reserved_pebs, good PEBs %d",
- ubi->good_peb_count);
+ ubi_err("too large reserved_pebs %d, good PEBs %d",
+ reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
}
@@ -224,11 +264,11 @@ static int vtbl_check(const struct ubi_device *ubi,
int len2 = be16_to_cpu(vtbl[n].name_len);
if (len1 > 0 && len1 == len2 &&
- !strncmp((char *)vtbl[i].name, (char *)vtbl[n].name, len1)) {
- ubi_err("volumes %d and %d have the same name"
- " \"%s\"", i, n, vtbl[i].name);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
- ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+ !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
+ ubi_err("volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
}
}
@@ -238,76 +278,64 @@ static int vtbl_check(const struct ubi_device *ubi,
bad:
ubi_err("volume table check failed: record %d, error %d", i, err);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
/**
* create_vtbl - create a copy of volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @copy: number of the volume table copy
* @vtbl: contents of the volume table
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
- static struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *new_seb, *old_seb = NULL;
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_ainf_peb *new_aeb;
- ubi_msg("create volume table (copy #%d)", copy + 1);
+ dbg_gen("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
return -ENOMEM;
- /*
- * Check if there is a logical eraseblock which would have to contain
- * this volume table copy was found during scanning. It has to be wiped
- * out.
- */
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (sv)
- old_seb = ubi_scan_find_seb(sv, copy);
-
retry:
- new_seb = ubi_scan_get_free_peb(ubi, si);
- if (IS_ERR(new_seb)) {
- err = PTR_ERR(new_seb);
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
goto out_free;
}
- vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
vid_hdr->lnum = cpu_to_be32(copy);
- vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
- vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
- err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
if (err)
goto write_error;
/* Write the layout volume contents */
- err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
if (err)
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old
- * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
- err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
- vid_hdr, 0);
- kfree(new_seb);
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kfree(new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -317,10 +345,10 @@ write_error:
* Probably this physical eraseblock went bad, try to pick
* another one.
*/
- list_add_tail(&new_seb->u.list, &si->corr);
+ list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_seb);
+ kfree(new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -330,20 +358,20 @@ out_free:
/**
* process_lvol - process the layout volume.
* @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
*
* This function is responsible for reading the layout volume, ensuring it is
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si,
- struct ubi_scan_volume *sv)
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
{
int err;
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
@@ -365,38 +393,37 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* 0 contains more recent information.
*
* So the plan is to first check LEB 0. Then
- * a. if LEB 0 is OK, it must be containing the most resent data; then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
* we compare it with LEB 1, and if they are different, we copy LEB
* 0 to LEB 1;
* b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
* to LEB 0.
*/
- dbg_msg("check layout volume");
+ dbg_gen("check layout volume");
/* Read both LEB 0 and LEB 1 into memory */
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vmalloc(ubi->vtbl_size);
- if (!leb[seb->lnum]) {
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- memset(leb[seb->lnum], 0, ubi->vtbl_size);
- err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi->vtbl_size);
- if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
/*
* Scrub the PEB later. Note, -EBADMSG indicates an
* uncorrectable ECC error, but we have our own CRC and
* the data will be checked later. If the data is OK,
* the PEB will be scrubbed (because we set
- * seb->scrub). If the data is not OK, the contents of
+ * aeb->scrub). If the data is not OK, the contents of
* the PEB will be recovered from the second copy, and
- * seb->scrub will be cleared in
- * 'ubi_scan_add_used()'.
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
*/
- seb->scrub = 1;
+ aeb->scrub = 1;
else if (err)
goto out_free;
}
@@ -411,10 +438,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
if (!leb_corrupted[0]) {
/* LEB 0 is OK */
if (leb[1])
- leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size);
+ leb_corrupted[1] = memcmp(leb[0], leb[1],
+ ubi->vtbl_size);
if (leb_corrupted[1]) {
ubi_warn("volume table copy #2 is corrupted");
- err = create_vtbl(ubi, si, 1, leb[0]);
+ err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -437,7 +465,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
}
ubi_warn("volume table copy #1 is corrupted");
- err = create_vtbl(ubi, si, 0, leb[1]);
+ err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -455,21 +483,20 @@ out_free:
/**
* create_empty_lvol - create empty layout volume.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
int i;
struct ubi_vtbl_record *vtbl;
- vtbl = vmalloc(ubi->vtbl_size);
+ vtbl = vzalloc(ubi->vtbl_size);
if (!vtbl)
return ERR_PTR(-ENOMEM);
- memset(vtbl, 0, ubi->vtbl_size);
for (i = 0; i < ubi->vtbl_slots; i++)
memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
@@ -477,7 +504,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
int err;
- err = create_vtbl(ubi, si, i, vtbl);
+ err = create_vtbl(ubi, ai, i, vtbl);
if (err) {
vfree(vtbl);
return ERR_PTR(err);
@@ -490,23 +517,22 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
/**
* init_volumes - initialize volume information for existing volumes.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
* @vtbl: volume table
*
* This function allocates volume description objects for existing volumes.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
int i, reserved_pebs = 0;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
for (i = 0; i < ubi->vtbl_slots; i++) {
- cond_resched();
-
if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
continue; /* Empty record */
@@ -517,6 +543,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
vol->alignment = be32_to_cpu(vtbl[i].alignment);
vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
vol->name_len = be16_to_cpu(vtbl[i].name_len);
@@ -528,8 +555,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
- ubi_err("more then one auto-resize volume (%d "
- "and %d)", ubi->autoresize_vol_id, i);
+ ubi_err("more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
}
@@ -556,8 +583,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/* Static volumes only */
- sv = ubi_scan_find_sv(si, i);
- if (!sv) {
+ av = ubi_find_av(ai, i);
+ if (!av) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
@@ -569,22 +596,22 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
continue;
}
- if (sv->leb_count != sv->used_ebs) {
+ if (av->leb_count != av->used_ebs) {
/*
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
ubi_warn("static volume %d misses %d LEBs - corrupted",
- sv->vol_id, sv->used_ebs - sv->leb_count);
+ av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
}
- vol->used_ebs = sv->used_ebs;
+ vol->used_ebs = av->used_ebs;
vol->used_bytes =
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
- vol->used_bytes += sv->last_data_size;
- vol->last_eb_bytes = sv->last_data_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
}
/* And add the layout volume */
@@ -593,7 +620,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
return -ENOMEM;
vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
- vol->alignment = 1;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
vol->vol_type = UBI_DYNAMIC_VOLUME;
vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
@@ -611,9 +638,13 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
ubi->vol_count += 1;
vol->ubi = ubi;
- if (reserved_pebs > ubi->avail_pebs)
+ if (reserved_pebs > ubi->avail_pebs) {
ubi_err("not enough PEBs, required %d, available %d",
reserved_pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ }
ubi->rsvd_pebs += reserved_pebs;
ubi->avail_pebs -= reserved_pebs;
@@ -621,105 +652,102 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
* @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
*
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_sv(const struct ubi_volume *vol,
- const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
{
int err;
- if (sv->highest_lnum >= vol->reserved_pebs) {
+ if (av->highest_lnum >= vol->reserved_pebs) {
err = 1;
goto bad;
}
- if (sv->leb_count > vol->reserved_pebs) {
+ if (av->leb_count > vol->reserved_pebs) {
err = 2;
goto bad;
}
- if (sv->vol_type != vol->vol_type) {
+ if (av->vol_type != vol->vol_type) {
err = 3;
goto bad;
}
- if (sv->used_ebs > vol->reserved_pebs) {
+ if (av->used_ebs > vol->reserved_pebs) {
err = 4;
goto bad;
}
- if (sv->data_pad != vol->data_pad) {
+ if (av->data_pad != vol->data_pad) {
err = 5;
goto bad;
}
return 0;
bad:
- ubi_err("bad scanning information, error %d", err);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vol_info(vol);
+ ubi_err("bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
return -EINVAL;
}
/**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
* information is OK and %-EINVAL if it is not.
*/
-static int check_scanning_info(const struct ubi_device *ubi,
- struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err, i;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
- ubi_err("scanning found %d volumes, maximum is %d + %d",
- si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err("found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
- if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
- si->highest_vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("too large volume ID %d found by scanning",
- si->highest_vol_id);
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("too large volume ID %d found", ai->highest_vol_id);
return -EINVAL;
}
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- cond_resched();
-
- sv = ubi_scan_find_sv(si, i);
+ av = ubi_find_av(ai, i);
vol = ubi->volumes[i];
if (!vol) {
- if (sv)
- ubi_scan_rm_volume(si, sv);
+ if (av)
+ ubi_remove_av(ai, av);
continue;
}
if (vol->reserved_pebs == 0) {
ubi_assert(i < ubi->vtbl_slots);
- if (!sv)
+ if (!av)
continue;
/*
- * During scanning we found a volume which does not
+ * During attaching we found a volume which does not
* exist according to the information in the volume
* table. This must have happened due to an unclean
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
- ubi_msg("finish volume %d removal", sv->vol_id);
- ubi_scan_rm_volume(si, sv);
- } else if (sv) {
- err = check_sv(vol, sv);
+ ubi_msg("finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
if (err)
return err;
}
@@ -729,19 +757,18 @@ static int check_scanning_info(const struct ubi_device *ubi,
}
/**
- * ubi_read_volume_table - read volume table.
- * information.
+ * ubi_read_volume_table - read the volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function reads volume table, checks it, recover from errors if needed,
* or creates it if needed. Returns zero in case of success and a negative
* error code in case of failure.
*/
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, err;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -756,8 +783,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (!sv) {
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
/*
* No logical eraseblocks belonging to the layout volume were
* found. This could mean that the flash is just empty. In
@@ -766,8 +793,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* But if flash is not empty this must be a corruption or the
* MTD device just contains garbage.
*/
- if (si->is_empty) {
- ubi->vtbl = create_empty_lvol(ubi, si);
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
@@ -775,33 +802,33 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return -EINVAL;
}
} else {
- if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
- dbg_err("too many LEBs (%d) in layout volume",
- sv->leb_count);
+ ubi_err("too many LEBs (%d) in layout volume",
+ av->leb_count);
return -EINVAL;
}
- ubi->vtbl = process_lvol(ubi, si, sv);
+ ubi->vtbl = process_lvol(ubi, ai, av);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
}
- ubi->avail_pebs = ubi->good_peb_count;
+ ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
/*
* The layout volume is OK, initialize the corresponding in-RAM data
* structures.
*/
- err = init_volumes(ubi, si, ubi->vtbl);
+ err = init_volumes(ubi, ai, ubi->vtbl);
if (err)
goto out_free;
/*
- * Get sure that the scanning information is consistent to the
+ * Make sure that the attaching information is consistent to the
* information stored in the volume table.
*/
- err = check_scanning_info(ubi, si);
+ err = check_attaching_info(ubi, ai);
if (err)
goto out_free;
@@ -809,26 +836,24 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
out_free:
vfree(ubi->vtbl);
- for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
- if (ubi->volumes[i]) {
- kfree(ubi->volumes[i]);
- ubi->volumes[i] = NULL;
- }
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
+ }
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
/**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
* @ubi: UBI device description object
*/
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
{
+ if (!ubi_dbg_chk_gen(ubi))
+ return;
+
if (vtbl_check(ubi, ubi->vtbl)) {
- ubi_err("paranoid check failed");
+ ubi_err("self-check failed");
BUG();
}
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7f99ac26a6..d0d3c97ecf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -11,102 +11,115 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
*/
/*
- * UBI wear-leveling unit.
+ * UBI wear-leveling sub-system.
*
- * This unit is responsible for wear-leveling. It works in terms of physical
- * eraseblocks and erase counters and knows nothing about logical eraseblocks,
- * volumes, etc. From this unit's perspective all physical eraseblocks are of
- * two types - used and free. Used physical eraseblocks are those that were
- * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
- * those that were put by the 'ubi_wl_put_peb()' function.
+ * This sub-system is responsible for wear-leveling. It works in terms of
+ * physical eraseblocks and erase counters and knows nothing about logical
+ * eraseblocks, volumes, etc. From this sub-system's perspective all physical
+ * eraseblocks are of two types - used and free. Used physical eraseblocks are
+ * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
+ * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
*
* Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
- * header. The rest of the physical eraseblock contains only 0xFF bytes.
+ * header. The rest of the physical eraseblock contains only %0xFF bytes.
*
- * When physical eraseblocks are returned to the WL unit by means of the
+ * When physical eraseblocks are returned to the WL sub-system by means of the
* 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
* done asynchronously in context of the per-UBI device background thread,
- * which is also managed by the WL unit.
+ * which is also managed by the WL sub-system.
*
* The wear-leveling is ensured by means of moving the contents of used
* physical eraseblocks with low erase counter to free physical eraseblocks
* with high erase counter.
*
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL unit may pick a free physical eraseblock with low erase counter, and
- * so forth.
+ * If the WL sub-system fails to erase a physical eraseblock, it marks it as
+ * bad.
*
- * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
+ * This sub-system is also responsible for scrubbing. If a bit-flip is detected
+ * in a physical eraseblock, it has to be moved. Technically this is the same
+ * as moving it for wear-leveling reasons.
*
- * This unit is also responsible for scrubbing. If a bit-flip is detected in a
- * physical eraseblock, it has to be moved. Technically this is the same as
- * moving it for wear-leveling reasons.
+ * As it was said, for the UBI sub-system all physical eraseblocks are either
+ * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
*
- * As it was said, for the UBI unit all physical eraseblocks are either "free"
- * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
- * eraseblocks are kept in a set of different RB-trees: @wl->used,
- * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is a protection queue in between where this
+ * physical eraseblock is temporarily stored (@wl->pq).
+ *
+ * All this protection stuff is needed because:
+ * o we don't want to move physical eraseblocks just after we have given them
+ * to the user; instead, we first want to let users fill them up with data;
+ *
+ * o there is a chance that the user will put the physical eraseblock very
+ * soon, so it makes sense not to move it for some time, but wait.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * protection queue. Eraseblocks are put to the tail of this queue when they
+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
+ * head of the queue on each erase operation (for any eraseblock). So the
+ * length of the queue defines how may (global) erase cycles PEBs are protected.
+ *
+ * To put it differently, each physical eraseblock has 2 main states: free and
+ * used. The former state corresponds to the @wl->free tree. The latter state
+ * is split up on several sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ * erroneous - e.g., there was a read error;
+ * o the WL movement is temporarily prohibited (@wl->pq queue);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those structures.
*
* Note, in this implementation, we keep a small in-RAM object for each physical
* eraseblock. This is surely not a scalable solution. But it appears to be good
* enough for moderately large flashes and it is simple. In future, one may
- * re-work this unit and make it more scalable.
+ * re-work this sub-system and make it more scalable.
*
- * At the moment this unit does not utilize the sequence number, which was
- * introduced relatively recently. But it would be wise to do this because the
- * sequence number of a logical eraseblock characterizes how old is it. For
+ * At the moment this sub-system does not utilize the sequence number, which
+ * was introduced relatively recently. But it would be wise to do this because
+ * the sequence number of a logical eraseblock characterizes how old is it. For
* example, when we move a PEB with low erase counter, and we need to pick the
* target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
* pick target PEB with an average EC if our PEB is not very "old". This is a
- * room for future re-works of the WL unit.
- *
- * FIXME: looks too complex, should be simplified (later).
+ * room for future re-works of the WL sub-system.
*/
-#ifdef UBI_LINUX
-#include <linux/slab.h>
-#include <linux/crc32.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#endif
-
-#include "ubi-barebox.h"
#include "ubi.h"
/* Number of physical eraseblocks reserved for wear-leveling purposes */
#define WL_RESERVED_PEBS 1
/*
- * How many erase cycles are short term, unknown, and long term physical
- * eraseblocks protected.
- */
-#define ST_PROTECTION 16
-#define U_PROTECTION 10
-#define LT_PROTECTION 4
-
-/*
* Maximum difference between two erase counters. If this threshold is
- * exceeded, the WL unit starts moving data from used physical eraseblocks with
- * low erase counter to free physical eraseblocks with high erase counter.
+ * exceeded, the WL sub-system starts moving data from used physical
+ * eraseblocks with low erase counter to free physical eraseblocks with high
+ * erase counter.
*/
#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
/*
- * When a physical eraseblock is moved, the WL unit has to pick the target
+ * When a physical eraseblock is moved, the WL sub-system has to pick the target
* physical eraseblock to move to. The simplest way would be just to pick the
* one with the highest erase counter. But in certain workloads this could lead
* to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
* situation when the picked physical eraseblock is constantly erased after the
* data is written to it. So, we have a constant which limits the highest erase
- * counter of the free physical eraseblock to pick. Namely, the WL unit does
- * not pick eraseblocks with erase counter greater then the lowest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL sub-system
+ * does not pick eraseblocks with erase counter greater than the lowest erase
* counter plus %WL_FREE_MAX_DIFF.
*/
#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
@@ -117,89 +130,36 @@
*/
#define WL_MAX_FAILURES 32
-/**
- * struct ubi_wl_prot_entry - PEB protection entry.
- * @rb_pnum: link in the @wl->prot.pnum RB-tree
- * @rb_aec: link in the @wl->prot.aec RB-tree
- * @abs_ec: the absolute erase counter value when the protection ends
- * @e: the wear-leveling entry of the physical eraseblock under protection
- *
- * When the WL unit returns a physical eraseblock, the physical eraseblock is
- * protected from being moved for some "time". For this reason, the physical
- * eraseblock is not directly moved from the @wl->free tree to the @wl->used
- * tree. There is one more tree in between where this physical eraseblock is
- * temporarily stored (@wl->prot).
- *
- * All this protection stuff is needed because:
- * o we don't want to move physical eraseblocks just after we have given them
- * to the user; instead, we first want to let users fill them up with data;
- *
- * o there is a chance that the user will put the physical eraseblock very
- * soon, so it makes sense not to move it for some time, but wait; this is
- * especially important in case of "short term" physical eraseblocks.
- *
- * Physical eraseblocks stay protected only for limited time. But the "time" is
- * measured in erase cycles in this case. This is implemented with help of the
- * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
- * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
- * the @wl->used tree.
- *
- * Protected physical eraseblocks are searched by physical eraseblock number
- * (when they are put) and by the absolute erase counter (to check if it is
- * time to move them to the @wl->used tree). So there are actually 2 RB-trees
- * storing the protected physical eraseblocks: @wl->prot.pnum and
- * @wl->prot.aec. They are referred to as the "protection" trees. The
- * first one is indexed by the physical eraseblock number. The second one is
- * indexed by the absolute erase counter. Both trees store
- * &struct ubi_wl_prot_entry objects.
- *
- * Each physical eraseblock has 2 main states: free and used. The former state
- * corresponds to the @wl->free tree. The latter state is split up on several
- * sub-states:
- * o the WL movement is allowed (@wl->used tree);
- * o the WL movement is temporarily prohibited (@wl->prot.pnum and
- * @wl->prot.aec trees);
- * o scrubbing is needed (@wl->scrub tree).
- *
- * Depending on the sub-state, wear-leveling entries of the used physical
- * eraseblocks may be kept in one of those trees.
- */
-struct ubi_wl_prot_entry {
- struct rb_node rb_pnum;
- struct rb_node rb_aec;
- unsigned long long abs_ec;
- struct ubi_wl_entry *e;
-};
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+#ifdef CONFIG_MTD_UBI_FASTMAP
/**
- * struct ubi_work - UBI work description data structure.
- * @list: a link in the list of pending works
- * @func: worker function
- * @priv: private data of the worker function
- *
- * @e: physical eraseblock to erase
- * @torture: if the physical eraseblock has to be tortured
- *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
- * case of failure.
+ * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ * @ubi: UBI device description object
+ * @pnum: the to be checked PEB
*/
-struct ubi_work {
- struct list_head list;
- int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
- /* The below fields are only relevant to erasure works */
- struct ubi_wl_entry *e;
- int torture;
-};
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ int i;
+
+ if (!ubi->fm)
+ return 0;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ if (ubi->fm->e[i]->pnum == pnum)
+ return 1;
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
- struct rb_root *root);
+ return 0;
+}
#else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(e, root)
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ return 0;
+}
#endif
/**
@@ -219,7 +179,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
struct ubi_wl_entry *e1;
parent = *p;
- e1 = rb_entry(parent, struct ubi_wl_entry, rb);
+ e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
if (e->ec < e1->ec)
p = &(*p)->rb_left;
@@ -234,8 +194,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
}
}
- rb_link_node(&e->rb, parent, p);
- rb_insert_color(&e->rb, root);
+ rb_link_node(&e->u.rb, parent, p);
+ rb_insert_color(&e->u.rb, root);
}
/**
@@ -250,27 +210,19 @@ static int do_work(struct ubi_device *ubi)
int err;
struct ubi_work *wrk;
- cond_resched();
-
/*
* @ubi->work_sem is used to synchronize with the workers. Workers take
* it in read mode, so many of them may be doing works at a time. But
* the queue flush code has to be sure the whole queue of works is
* done, and it takes the mutex in write mode.
*/
- down_read(&ubi->work_sem);
- spin_lock(&ubi->wl_lock);
- if (list_empty(&ubi->works)) {
- spin_unlock(&ubi->wl_lock);
- up_read(&ubi->work_sem);
+ if (list_empty(&ubi->works))
return 0;
- }
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
ubi->works_count -= 1;
ubi_assert(ubi->works_count >= 0);
- spin_unlock(&ubi->wl_lock);
/*
* Call the worker function. Do not touch the work structure
@@ -280,7 +232,6 @@ static int do_work(struct ubi_device *ubi)
err = wrk->func(ubi, wrk, 0);
if (err)
ubi_err("work failed with error code %d", err);
- up_read(&ubi->work_sem);
return err;
}
@@ -298,18 +249,12 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
- spin_lock(&ubi->wl_lock);
while (!ubi->free.rb_node) {
- spin_unlock(&ubi->wl_lock);
-
dbg_wl("do one work synchronously");
err = do_work(ubi);
if (err)
return err;
-
- spin_lock(&ubi->wl_lock);
}
- spin_unlock(&ubi->wl_lock);
return 0;
}
@@ -330,7 +275,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
while (p) {
struct ubi_wl_entry *e1;
- e1 = rb_entry(p, struct ubi_wl_entry, rb);
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e->pnum == e1->pnum) {
ubi_assert(e == e1);
@@ -354,223 +299,393 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
- * prot_tree_add - add physical eraseblock to protection trees.
+ * prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object
* @e: the physical eraseblock to add
- * @pe: protection entry object to use
- * @abs_ec: absolute erase counter value when this physical eraseblock has
- * to be removed from the protection trees.
*
- * @wl->lock has to be locked.
+ * This function adds @e to the tail of the protection queue @ubi->pq, where
+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
+ * be locked.
*/
-static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
- struct ubi_wl_prot_entry *pe, int abs_ec)
+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
- struct rb_node **p, *parent = NULL;
- struct ubi_wl_prot_entry *pe1;
-
- pe->e = e;
- pe->abs_ec = ubi->abs_ec + abs_ec;
-
- p = &ubi->prot.pnum.rb_node;
- while (*p) {
- parent = *p;
- pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
-
- if (e->pnum < pe1->e->pnum)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&pe->rb_pnum, parent, p);
- rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
-
- p = &ubi->prot.aec.rb_node;
- parent = NULL;
- while (*p) {
- parent = *p;
- pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
+ int pq_tail = ubi->pq_head - 1;
- if (pe->abs_ec < pe1->abs_ec)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&pe->rb_aec, parent, p);
- rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
+ if (pq_tail < 0)
+ pq_tail = UBI_PROT_QUEUE_LEN - 1;
+ ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
+ list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
+ dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
}
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
* @root: the RB-tree where to look for
- * @max: highest possible erase counter
+ * @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less then @max.
+ * min + @diff, where min is the smallest erase counter.
*/
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e;
+ struct ubi_wl_entry *e, *prev_e = NULL;
+ int max;
- e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
- max += e->ec;
+ e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ max = e->ec + diff;
p = root->rb_node;
while (p) {
struct ubi_wl_entry *e1;
- e1 = rb_entry(p, struct ubi_wl_entry, rb);
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e1->ec >= max)
p = p->rb_left;
else {
p = p->rb_right;
+ prev_e = e;
e = e1;
}
}
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
return e;
}
/**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
* @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
+ * @root: the RB-tree where to look for
*
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure. Might sleep.
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
*/
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
{
- int err, protect, medium_ec;
struct ubi_wl_entry *e, *first, *last;
- struct ubi_wl_prot_entry *pe;
- ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
- dtype == UBI_UNKNOWN);
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
- if (!pe)
- return -ENOMEM;
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+#endif
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+#endif
+
+/**
+ * __wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ */
+static int __wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
retry:
- spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
- ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
- spin_unlock(&ubi->wl_lock);
- kfree(pe);
+ ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
- spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
- if (err < 0) {
- kfree(pe);
+ if (err < 0)
return err;
- }
goto retry;
}
- switch (dtype) {
- case UBI_LONGTERM:
- /*
- * For long term data we pick a physical eraseblock
- * with high erase counter. But the highest erase
- * counter we can pick is bounded by the the lowest
- * erase counter plus %WL_FREE_MAX_DIFF.
- */
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- protect = LT_PROTECTION;
- break;
- case UBI_UNKNOWN:
- /*
- * For unknown data we pick a physical eraseblock with
- * medium erase counter. But we by no means can pick a
- * physical eraseblock with erase counter greater or
- * equivalent than the lowest erase counter plus
- * %WL_FREE_MAX_DIFF.
- */
- first = rb_entry(rb_first(&ubi->free),
- struct ubi_wl_entry, rb);
- last = rb_entry(rb_last(&ubi->free),
- struct ubi_wl_entry, rb);
-
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, rb);
- else {
- medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
- e = find_wl_entry(&ubi->free, medium_ec);
- }
- protect = U_PROTECTION;
- break;
- case UBI_SHORTTERM:
- /*
- * For short term data we pick a physical eraseblock
- * with the lowest erase counter as we expect it will
- * be erased soon.
- */
- e = rb_entry(rb_first(&ubi->free),
- struct ubi_wl_entry, rb);
- protect = ST_PROTECTION;
- break;
- default:
- protect = 0;
- e = NULL;
- BUG();
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err("no free eraseblocks");
+ return -ENOSPC;
}
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
/*
- * Move the physical eraseblock to the protection trees where it will
+ * Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time.
*/
- paranoid_check_in_wl_tree(e, &ubi->free);
- rb_erase(&e->rb, &ubi->free);
- prot_tree_add(ubi, e, pe, protect);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+ /* We have to enqueue e only if fastmap is disabled,
+ * is fastmap enabled prot_queue_add() will be called by
+ * ubi_wl_get_peb() after removing e from the pool. */
+ prot_queue_add(ubi, e);
+#endif
+ return e->pnum;
+}
- dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
- spin_unlock(&ubi->wl_lock);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
- return e->pnum;
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
}
/**
- * prot_tree_del - remove a physical eraseblock from the protection trees
+ * refill_wl_pool - refills all the fastmap pool used by the
+ * WL sub-system.
* @ubi: UBI device description object
- * @pnum: the physical eraseblock to remove
+ */
+static void refill_wl_pool(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ pool->pebs[pool->size] = e->pnum;
+ }
+ pool->used = 0;
+}
+
+/**
+ * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_user_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ break;
+
+ pool->pebs[pool->size] = __wl_get_peb(ubi);
+ if (pool->pebs[pool->size] < 0)
+ break;
+ }
+ pool->used = 0;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ refill_wl_pool(ubi);
+ refill_wl_user_pool(ubi);
+}
+
+/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+ if (!pool->size || !wl_pool->size || pool->used == pool->size ||
+ wl_pool->used == wl_pool->size)
+ ubi_update_fastmap(ubi);
+
+ /* we got not a single free PEB */
+ if (!pool->size)
+ ret = -ENOSPC;
+ else {
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ }
+
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
- * This function returns PEB @pnum from the protection trees and returns zero
- * in case of success and %-ENODEV if the PEB was not found in the protection
- * trees.
+ * @ubi: UBI device description object
*/
-static int prot_tree_del(struct ubi_device *ubi, int pnum)
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
- struct rb_node *p;
- struct ubi_wl_prot_entry *pe = NULL;
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
- p = ubi->prot.pnum.rb_node;
- while (p) {
+ if (pool->used == pool->size || !pool->size) {
+ ubi_update_fastmap(ubi);
+ if (pool->used == pool->size || !pool->size)
+ BUG();
+ }
- pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
+ pnum = pool->pebs[pool->used++];
- if (pnum == pe->e->pnum)
- goto found;
+ return ubi->lookuptbl[pnum];
+}
+#else
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
- if (pnum < pe->e->pnum)
- p = p->rb_left;
- else
- p = p->rb_right;
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int peb, err;
+
+ peb = __wl_get_peb(ubi);
+
+ err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
+ return err;
}
- return -ENODEV;
+ return peb;
+}
+#endif
+
+/**
+ * prot_queue_del - remove a physical eraseblock from the protection queue.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to remove
+ *
+ * This function deletes PEB @pnum from the protection queue and returns zero
+ * in case of success and %-ENODEV if the PEB was not found.
+ */
+static int prot_queue_del(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ e = ubi->lookuptbl[pnum];
+ if (!e)
+ return -ENODEV;
-found:
- ubi_assert(pe->e->pnum == pnum);
- rb_erase(&pe->rb_aec, &ubi->prot.aec);
- rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
- kfree(pe);
+ if (self_check_in_pq(ubi, e))
+ return -ENODEV;
+
+ list_del(&e->u.list);
+ dbg_wl("deleted PEB %d from the protection queue", e->pnum);
return 0;
}
@@ -583,7 +698,8 @@ found:
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int torture)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -591,8 +707,8 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
- err = paranoid_check_ec(ubi, e->pnum, e->ec);
- if (err > 0)
+ err = self_check_ec(ubi, e->pnum, e->ec);
+ if (err)
return -EINVAL;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -624,10 +740,8 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
goto out_free;
e->ec = ec;
- spin_lock(&ubi->wl_lock);
if (e->ec > ubi->max_ec)
ubi->max_ec = e->ec;
- spin_unlock(&ubi->wl_lock);
out_free:
kfree(ec_hdr);
@@ -635,91 +749,108 @@ out_free:
}
/**
- * check_protection_over - check if it is time to stop protecting some
- * physical eraseblocks.
+ * serve_prot_queue - check if it is time to stop protecting PEBs.
* @ubi: UBI device description object
*
- * This function is called after each erase operation, when the absolute erase
- * counter is incremented, to check if some physical eraseblock have not to be
- * protected any longer. These physical eraseblocks are moved from the
- * protection trees to the used tree.
+ * This function is called after each erase operation and removes PEBs from the
+ * tail of the protection queue. These PEBs have been protected for long enough
+ * and should be moved to the used tree.
*/
-static void check_protection_over(struct ubi_device *ubi)
+static void serve_prot_queue(struct ubi_device *ubi)
{
- struct ubi_wl_prot_entry *pe;
+ struct ubi_wl_entry *e, *tmp;
+ int count;
/*
* There may be several protected physical eraseblock to remove,
* process them all.
*/
- while (1) {
- spin_lock(&ubi->wl_lock);
- if (!ubi->prot.aec.rb_node) {
- spin_unlock(&ubi->wl_lock);
- break;
- }
+repeat:
+ count = 0;
+ list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
+ dbg_wl("PEB %d EC %d protection over, move to used tree",
+ e->pnum, e->ec);
- pe = rb_entry(rb_first(&ubi->prot.aec),
- struct ubi_wl_prot_entry, rb_aec);
-
- if (pe->abs_ec > ubi->abs_ec) {
- spin_unlock(&ubi->wl_lock);
- break;
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->used);
+ if (count++ > 32) {
+ /*
+ * Let's be nice and avoid holding the spinlock for
+ * too long.
+ */
+ goto repeat;
}
-
- dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
- pe->e->pnum, ubi->abs_ec, pe->abs_ec);
- rb_erase(&pe->rb_aec, &ubi->prot.aec);
- rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
- wl_tree_add(pe->e, &ubi->used);
- spin_unlock(&ubi->wl_lock);
-
- kfree(pe);
- cond_resched();
}
+
+ ubi->pq_head += 1;
+ if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
+ ubi->pq_head = 0;
+ ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
}
/**
- * schedule_ubi_work - schedule a work.
+ * __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
- * This function enqueues a work defined by @wrk to the tail of the pending
- * works list.
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list. Can only be used of ubi->work_sem is already held in read mode!
*/
-static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
- spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
+ wake_up_process(ubi->bgt_thread);
+}
- /*
- * U-Boot special: We have no bgt_thread in U-Boot!
- * So just call do_work() here directly.
- */
- do_work(ubi);
-
- spin_unlock(&ubi->wl_lock);
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ __schedule_ubi_work(ubi, wrk);
}
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+#endif
+
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+ int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
+ ubi_assert(e);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
@@ -729,6 +860,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->func = &erase_worker;
wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
schedule_ubi_work(ubi, wl_wrk);
@@ -736,6 +869,76 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
}
/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ } else {
+ e->ec = fm_e->ec;
+ kfree(fm_e);
+ }
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+#endif
+
+/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
@@ -748,13 +951,15 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, put = 0, scrubbing = 0, protect = 0;
- struct ubi_wl_prot_entry *uninitialized_var(pe);
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int vol_id = -1, uninitialized_var(lnum);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
kfree(wrk);
-
if (cancel)
return 0;
@@ -762,8 +967,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!vid_hdr)
return -ENOMEM;
- mutex_lock(&ubi->move_mutex);
- spin_lock(&ubi->wl_lock);
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put);
@@ -784,39 +987,60 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
if (!ubi->scrub.rb_node) {
+#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
- e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
goto out_cancel;
}
- paranoid_check_in_wl_tree(e1, &ubi->used);
- rb_erase(&e1->rb, &ubi->used);
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
/* Perform scrubbing */
scrubbing = 1;
- e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(e1, &ubi->scrub);
- rb_erase(&e1->rb, &ubi->scrub);
+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(e2, &ubi->free);
- rb_erase(&e2->rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
- spin_unlock(&ubi->wl_lock);
/*
* Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
@@ -831,129 +1055,182 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
if (err && err != UBI_IO_BITFLIPS) {
- if (err == UBI_IO_PEB_FREE) {
+ if (err == UBI_IO_FF) {
/*
* We are trying to move PEB without a VID header. UBI
* always write VID headers shortly after the PEB was
- * given, so we have a situation when it did not have
- * chance to write it down because it was preempted.
- * Just re-schedule the work, so that next time it will
- * likely have the VID header in place.
+ * given, so we have a situation when it has not yet
+ * had a chance to write it, because it was preempted.
+ * So add this PEB to the protection queue so far,
+ * because presumably more data will be written there
+ * (including the missing VID header), and then we'll
+ * move it.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
+ protect = 1;
+ goto out_not_moved;
+ } else if (err == UBI_IO_FF_BITFLIPS) {
+ /*
+ * The same situation as %UBI_IO_FF, but bit-flips were
+ * detected. It is better to schedule this PEB for
+ * scrubbing.
+ */
+ dbg_wl("PEB %d has no VID header but has bit-flips",
+ e1->pnum);
+ scrubbing = 1;
goto out_not_moved;
}
ubi_err("error %d while reading VID header from PEB %d",
err, e1->pnum);
- if (err > 0)
- err = -EIO;
goto out_error;
}
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
-
- if (err < 0)
- goto out_error;
- if (err == 1)
+ if (err == MOVE_CANCEL_RACE) {
+ /*
+ * The LEB has not been moved because the volume is
+ * being deleted or the PEB has been put meanwhile. We
+ * should prevent this PEB from being selected for
+ * wear-leveling movement again, so put it to the
+ * protection queue.
+ */
+ protect = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+ * Target PEB had bit-flips or write error - torture it.
+ */
+ torture = 1;
goto out_not_moved;
+ }
- /*
- * For some reason the LEB was not moved - it might be because
- * the volume is being deleted. We should prevent this PEB from
- * being selected for wear-levelling movement for some "time",
- * so put it to the protection tree.
- */
+ if (err == MOVE_SOURCE_RD_ERR) {
+ /*
+ * An error happened while reading the source PEB. Do
+ * not switch to R/O mode in this case, and give the
+ * upper layers a possibility to recover from this,
+ * e.g. by unmapping corresponding LEB. Instead, just
+ * put this PEB to the @ubi->erroneous list to prevent
+ * UBI from trying to move it over and over again.
+ */
+ if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+ ubi_err("too many erroneous eraseblocks (%d)",
+ ubi->erroneous_peb_count);
+ goto out_error;
+ }
+ erroneous = 1;
+ goto out_not_moved;
+ }
- dbg_wl("cancelled moving PEB %d", e1->pnum);
- pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
- if (!pe) {
- err = -ENOMEM;
+ if (err < 0)
goto out_error;
- }
- protect = 1;
+ ubi_assert(0);
}
+ /* The PEB has been successfully moved */
+ if (scrubbing)
+ ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
ubi_free_vid_hdr(ubi, vid_hdr);
- spin_lock(&ubi->wl_lock);
- if (protect)
- prot_tree_add(ubi, e1, pe, protect);
- if (!ubi->move_to_put)
+
+ if (!ubi->move_to_put) {
wl_tree_add(e2, &ubi->used);
- else
- put = 1;
+ e2 = NULL;
+ }
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- if (put) {
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ if (err) {
+ kfree(e1);
+ if (e2)
+ kfree(e2);
+ goto out_ro;
+ }
+
+ if (e2) {
/*
* Well, the target PEB was put meanwhile, schedule it for
* erasure.
*/
- dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
- err = schedule_erase(ubi, e2, 0);
- if (err)
- goto out_error;
- }
-
- if (!protect) {
- err = schedule_erase(ubi, e1, 0);
- if (err)
- goto out_error;
+ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ e2->pnum, vol_id, lnum);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+ if (err) {
+ kfree(e2);
+ goto out_ro;
+ }
}
-
dbg_wl("done");
- mutex_unlock(&ubi->move_mutex);
return 0;
/*
* For some reasons the LEB was not moved, might be an error, might be
* something else. @e1 was not changed, so return it back. @e2 might
- * be changed, schedule it for erasure.
+ * have been changed, schedule it for erasure.
*/
out_not_moved:
- ubi_free_vid_hdr(ubi, vid_hdr);
- spin_lock(&ubi->wl_lock);
- if (scrubbing)
+ if (vol_id != -1)
+ dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+ e1->pnum, vol_id, lnum, e2->pnum, err);
+ else
+ dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+ e1->pnum, e2->pnum, err);
+ if (protect)
+ prot_queue_add(ubi, e1);
+ else if (erroneous) {
+ wl_tree_add(e1, &ubi->erroneous);
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
+ ubi_assert(!ubi->move_to_put);
ubi->move_from = ubi->move_to = NULL;
- ubi->move_to_put = ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
-
- err = schedule_erase(ubi, e2, 0);
- if (err)
- goto out_error;
+ ubi->wl_scheduled = 0;
- mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+ if (err) {
+ kfree(e2);
+ goto out_ro;
+ }
return 0;
out_error:
- ubi_err("error %d while moving PEB %d to PEB %d",
- err, e1->pnum, e2->pnum);
-
- ubi_free_vid_hdr(ubi, vid_hdr);
- spin_lock(&ubi->wl_lock);
+ if (vol_id != -1)
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+ else
+ ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ err, e1->pnum, vol_id, lnum, e2->pnum);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- kmem_cache_free(ubi_wl_entry_slab, e1);
- kmem_cache_free(ubi_wl_entry_slab, e2);
- ubi_ro_mode(ubi);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ kfree(e1);
+ kfree(e2);
- mutex_unlock(&ubi->move_mutex);
- return err;
+out_ro:
+ ubi_ro_mode(ubi);
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
out_cancel:
ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- mutex_unlock(&ubi->move_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
}
@@ -961,19 +1238,19 @@ out_cancel:
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-static int ensure_wear_leveling(struct ubi_device *ubi)
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_wl_entry *e1;
struct ubi_wl_entry *e2;
struct ubi_work *wrk;
- spin_lock(&ubi->wl_lock);
if (ubi->wl_scheduled)
/* Wear-leveling is already in the work queue */
goto out_unlock;
@@ -990,11 +1267,11 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
/*
* We schedule wear-leveling only if the difference between the
* lowest erase counter of used physical eraseblocks and a high
- * erase counter of free physical eraseblocks is greater then
+ * erase counter of free physical eraseblocks is greater than
* %UBI_WL_THRESHOLD.
*/
- e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -1003,7 +1280,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
dbg_wl("schedule scrubbing");
ubi->wl_scheduled = 1;
- spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
@@ -1011,18 +1287,47 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
goto out_cancel;
}
+ wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
- spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
out_unlock:
- spin_unlock(&ubi->wl_lock);
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ if (ubi->wl_scheduled)
+ return 0;
+
+ ubi->wl_scheduled = 1;
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ ubi->wl_scheduled = 0;
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+#endif
+
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
@@ -1038,61 +1343,66 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum, err, need;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
+ kfree(e);
return 0;
}
- dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
/* Fine, we've erased it successfully */
kfree(wl_wrk);
- spin_lock(&ubi->wl_lock);
- ubi->abs_ec += 1;
wl_tree_add(e, &ubi->free);
- spin_unlock(&ubi->wl_lock);
+ ubi->free_count++;
/*
- * One more erase operation has happened, take care about protected
- * physical eraseblocks.
+ * One more erase operation has happened, take care about
+ * protected physical eraseblocks.
*/
- check_protection_over(ubi);
+ serve_prot_queue(ubi);
/* And take care about wear-leveling */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 1);
return err;
}
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
err = err1;
goto out_ro;
}
return err;
- } else if (err != -EIO) {
+ }
+
+ kfree(e);
+ if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
- * errors again and again. Well, lets switch to RO mode.
+ * errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
- }
/* It is %-EIO, the PEB went bad */
@@ -1101,49 +1411,55 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
}
- spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
-
if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- goto out_ro;
+ if (ubi->avail_pebs == 0) {
+ ubi_err("no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
}
- spin_unlock(&ubi->volumes_lock);
ubi_msg("mark PEB %d as bad", pnum);
-
err = ubi_io_mark_bad(ubi, pnum);
if (err)
goto out_ro;
- spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs == 0)
- ubi_warn("last PEB from the reserved pool was used");
- spin_unlock(&ubi->volumes_lock);
+ if (available_consumed)
+ ubi_warn("no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
+ ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
+ else
+ ubi_warn("last PEB from the reserve was used");
return err;
out_ro:
+ if (available_consumed)
+ ubi->avail_pebs += 1;
ubi_ro_mode(ubi);
return err;
}
/**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
+ * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
*
@@ -1152,7 +1468,8 @@ out_ro:
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success, and a negative error code in case of failure.
*/
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
{
int err;
struct ubi_wl_entry *e;
@@ -1162,7 +1479,6 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum < ubi->peb_count);
retry:
- spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (e == ubi->move_from) {
/*
@@ -1171,52 +1487,49 @@ retry:
* wear-leveling worker.
*/
dbg_wl("PEB %d is being moved, wait", pnum);
- spin_unlock(&ubi->wl_lock);
- /* Wait for the WL worker by taking the @ubi->move_mutex */
- mutex_lock(&ubi->move_mutex);
- mutex_unlock(&ubi->move_mutex);
goto retry;
} else if (e == ubi->move_to) {
/*
* User is putting the physical eraseblock which was selected
* as the target the data is moved to. It may happen if the EBA
- * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
- * the WL unit has not put the PEB to the "used" tree yet, but
- * it is about to do this. So we just set a flag which will
- * tell the WL worker that the PEB is not needed anymore and
- * should be scheduled for erasure.
+ * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
+ * but the WL sub-system has not put the PEB to the "used" tree
+ * yet, but it is about to do this. So we just set a flag which
+ * will tell the WL worker that the PEB is not needed anymore
+ * and should be scheduled for erasure.
*/
dbg_wl("PEB %d is the target of data moving", pnum);
ubi_assert(!ubi->move_to_put);
ubi->move_to_put = 1;
- spin_unlock(&ubi->wl_lock);
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
- rb_erase(&e->rb, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(e, &ubi->scrub);
- rb_erase(&e->rb, &ubi->scrub);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ rb_erase(&e->u.rb, &ubi->scrub);
+ } else if (in_wl_tree(e, &ubi->erroneous)) {
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ rb_erase(&e->u.rb, &ubi->erroneous);
+ ubi->erroneous_peb_count -= 1;
+ ubi_assert(ubi->erroneous_peb_count >= 0);
+ /* Erroneous PEBs should be tortured */
+ torture = 1;
} else {
- err = prot_tree_del(ubi, e->pnum);
+ err = prot_queue_del(ubi, e->pnum);
if (err) {
ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi);
- spin_unlock(&ubi->wl_lock);
return err;
}
}
}
- spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, torture);
- if (err) {
- spin_lock(&ubi->wl_lock);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
+ if (err)
wl_tree_add(e, &ubi->used);
- spin_unlock(&ubi->wl_lock);
- }
return err;
}
@@ -1238,10 +1551,9 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
ubi_msg("schedule PEB %d for scrubbing", pnum);
retry:
- spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
- if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
- spin_unlock(&ubi->wl_lock);
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
return 0;
}
@@ -1252,78 +1564,79 @@ retry:
* tree. We should just wait a little and let the WL worker
* proceed.
*/
- spin_unlock(&ubi->wl_lock);
dbg_wl("the PEB %d is not in proper tree, retry", pnum);
- yield();
goto retry;
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
- rb_erase(&e->rb, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
- err = prot_tree_del(ubi, e->pnum);
+ err = prot_queue_del(ubi, e->pnum);
if (err) {
ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi);
- spin_unlock(&ubi->wl_lock);
return err;
}
}
wl_tree_add(e, &ubi->scrub);
- spin_unlock(&ubi->wl_lock);
/*
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
- return ensure_wear_leveling(ubi);
+ return ensure_wear_leveling(ubi, 0);
}
/**
* ubi_wl_flush - flush all pending works.
* @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
*/
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
{
- int err;
+ int err = 0;
+ int found = 1;
/*
- * Erase while the pending works queue is not empty, but not more then
+ * Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
- dbg_wl("flush (%d pending works)", ubi->works_count);
- while (ubi->works_count) {
- err = do_work(ubi);
- if (err)
- return err;
- }
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
- /*
- * Make sure all the works which have been done in parallel are
- * finished.
- */
- down_write(&ubi->work_sem);
- up_write(&ubi->work_sem);
+ while (found) {
+ struct ubi_work *wrk;
+ found = 0;
- /*
- * And in case last was the WL worker and it cancelled the LEB
- * movement, flush again.
- */
- while (ubi->works_count) {
- dbg_wl("flush more (%d pending works)", ubi->works_count);
- err = do_work(ubi);
- if (err)
- return err;
+ list_for_each_entry(wrk, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err)
+ return err;
+
+ found = 1;
+ break;
+ }
+ }
}
- return 0;
+ return err;
}
/**
@@ -1342,75 +1655,19 @@ static void tree_destroy(struct rb_root *root)
else if (rb->rb_right)
rb = rb->rb_right;
else {
- e = rb_entry(rb, struct ubi_wl_entry, rb);
+ e = rb_entry(rb, struct ubi_wl_entry, u.rb);
rb = rb_parent(rb);
if (rb) {
- if (rb->rb_left == &e->rb)
+ if (rb->rb_left == &e->u.rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
- kmem_cache_free(ubi_wl_entry_slab, e);
- }
- }
-}
-
-/**
- * ubi_thread - UBI background thread.
- * @u: the UBI device description object pointer
- */
-int ubi_thread(void *u)
-{
- int failures = 0;
- struct ubi_device *ubi = u;
-
- ubi_msg("background thread \"%s\" started, PID %d",
- ubi->bgt_name, task_pid_nr(current));
-
- set_freezable();
- for (;;) {
- int err;
-
- if (kthread_should_stop())
- break;
-
- if (try_to_freeze())
- continue;
-
- spin_lock(&ubi->wl_lock);
- if (list_empty(&ubi->works) || ubi->ro_mode ||
- !ubi->thread_enabled) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock(&ubi->wl_lock);
- schedule();
- continue;
+ kfree(e);
}
- spin_unlock(&ubi->wl_lock);
-
- err = do_work(ubi);
- if (err) {
- ubi_err("%s: work failed with error code %d",
- ubi->bgt_name, err);
- if (failures++ > WL_MAX_FAILURES) {
- /*
- * Too many failures, disable the thread and
- * switch to read-only mode.
- */
- ubi_msg("%s: %d consecutive failures",
- ubi->bgt_name, WL_MAX_FAILURES);
- ubi_ro_mode(ubi);
- break;
- }
- } else
- failures = 0;
-
- cond_resched();
}
-
- dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
- return 0;
}
/**
@@ -1431,29 +1688,23 @@ static void cancel_pending(struct ubi_device *ubi)
}
/**
- * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
- * information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
*/
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int err;
+ int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *tmp;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
-
- ubi->used = ubi->free = ubi->scrub = RB_ROOT;
- ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
- spin_lock_init(&ubi->wl_lock);
- mutex_init(&ubi->move_mutex);
- init_rwsem(&ubi->work_sem);
- ubi->max_ec = si->max_ec;
+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
+ ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1463,64 +1714,57 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (!ubi->lookuptbl)
return err;
- list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
- cond_resched();
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
+ INIT_LIST_HEAD(&ubi->pq[i]);
+ ubi->pq_head = 0;
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(ubi_wl_entry_slab, e);
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ kfree(e);
goto out_free;
}
- }
- list_for_each_entry(seb, &si->free, u.list) {
- cond_resched();
+ found_pebs++;
+ }
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ ubi->free_count = 0;
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
- wl_tree_add(e, &ubi->free);
- ubi->lookuptbl[e->pnum] = e;
- }
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
- list_for_each_entry(seb, &si->corr, u.list) {
- cond_resched();
-
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(ubi_wl_entry_slab, e);
- goto out_free;
- }
- }
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
- cond_resched();
+ found_pebs++;
+ }
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (!seb->scrub) {
+
+ if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);
@@ -1529,19 +1773,38 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
+
+ found_pebs++;
}
}
- if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+ dbg_wl("found %i PEBs", found_pebs);
+
+ if (ubi->fm)
+ ubi_assert(ubi->good_peb_count == \
+ found_pebs + ubi->fm->used_blocks);
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Reserve enough LEBs to store two fastmaps. */
+ reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
+#endif
+
+ if (ubi->avail_pebs < reserved_pebs) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, WL_RESERVED_PEBS);
+ ubi->avail_pebs, reserved_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err("%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
goto out_free;
}
- ubi->avail_pebs -= WL_RESERVED_PEBS;
- ubi->rsvd_pebs += WL_RESERVED_PEBS;
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
@@ -1557,72 +1820,57 @@ out_free:
}
/**
- * protection_trees_destroy - destroy the protection RB-trees.
+ * protection_queue_destroy - destroy the protection queue.
* @ubi: UBI device description object
*/
-static void protection_trees_destroy(struct ubi_device *ubi)
+static void protection_queue_destroy(struct ubi_device *ubi)
{
- struct rb_node *rb;
- struct ubi_wl_prot_entry *pe;
+ int i;
+ struct ubi_wl_entry *e, *tmp;
- rb = ubi->prot.aec.rb_node;
- while (rb) {
- if (rb->rb_left)
- rb = rb->rb_left;
- else if (rb->rb_right)
- rb = rb->rb_right;
- else {
- pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
-
- rb = rb_parent(rb);
- if (rb) {
- if (rb->rb_left == &pe->rb_aec)
- rb->rb_left = NULL;
- else
- rb->rb_right = NULL;
- }
-
- kmem_cache_free(ubi_wl_entry_slab, pe->e);
- kfree(pe);
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ list_del(&e->u.list);
+ kfree(e);
}
}
}
/**
- * ubi_wl_close - close the wear-leveling unit.
+ * ubi_wl_close - close the wear-leveling sub-system.
* @ubi: UBI device description object
*/
void ubi_wl_close(struct ubi_device *ubi)
{
- dbg_wl("close the UBI wear-leveling unit");
-
+ dbg_wl("close the WL sub-system");
cancel_pending(ubi);
- protection_trees_destroy(ubi);
+ protection_queue_destroy(ubi);
tree_destroy(&ubi->used);
+ tree_destroy(&ubi->erroneous);
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
-
/**
- * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
- * is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
*
* This function returns zero if the erase counter of physical eraseblock @pnum
- * is equivalent to @ec, %1 if not, and a negative error code if an error
+ * is equivalent to @ec, and a negative error code if not or if an error
* occurred.
*/
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
@@ -1635,10 +1883,10 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
}
read_ec = be64_to_cpu(ec_hdr->ec);
- if (ec != read_ec) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
- ubi_dbg_dump_stack();
+ dump_stack();
err = 1;
} else
err = 0;
@@ -1649,24 +1897,53 @@ out_free:
}
/**
- * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
- * in a WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
- * This function returns zero if @e is in the @root RB-tree and %1 if it
+ * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
- struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
{
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
if (in_wl_tree(e, root))
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+ ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
- ubi_dbg_dump_stack();
- return 1;
+ dump_stack();
+ return -EINVAL;
}
-#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
+/**
+ * self_check_in_pq - check if wear-leveling entry is in the protection
+ * queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
+ */
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 0;
+
+ ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
+ e->pnum, e->ec);
+ dump_stack();
+ return -EINVAL;
+}
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index e7d07334b0..058afc2788 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_OFTREE_MEM_GENERIC) += mem_generic.o
obj-$(CONFIG_GPIOLIB) += of_gpio.o
obj-y += partition.o
obj-y += of_net.o
+obj-$(CONFIG_MTD) += of_mtd.o
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
new file mode 100644
index 0000000000..239f1f9892
--- /dev/null
+++ b/drivers/of/of_mtd.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * OF helpers for mtd.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+#include <common.h>
+#include <of_mtd.h>
+#include <linux/mtd/nand.h>
+
+/**
+ * It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h
+ * into the device tree binding of 'nand-ecc', so that MTD
+ * device driver can get nand ecc from device tree.
+ */
+static const char *nand_ecc_modes[] = {
+ [NAND_ECC_NONE] = "none",
+ [NAND_ECC_SOFT] = "soft",
+ [NAND_ECC_HW] = "hw",
+ [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+};
+
+/**
+ * of_get_nand_ecc_mode - Get nand ecc mode for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * The function gets ecc mode string from property 'nand-ecc-mode',
+ * and return its index in nand_ecc_modes table, or errno in error case.
+ */
+int of_get_nand_ecc_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+ if (!strcasecmp(pm, nand_ecc_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode);
+
+/**
+ * of_get_nand_bus_width - Get nand bus witdh for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * return bus width option, or errno in error case.
+ */
+int of_get_nand_bus_width(struct device_node *np)
+{
+ u32 val;
+
+ if (of_property_read_u32(np, "nand-bus-width", &val))
+ return 8;
+
+ switch(val) {
+ case 8:
+ case 16:
+ return val;
+ default:
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(of_get_nand_bus_width);
+
+/**
+ * of_get_nand_on_flash_bbt - Get nand on flash bbt for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * return true if present false other wise
+ */
+bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return of_property_read_bool(np, "nand-on-flash-bbt");
+}
+EXPORT_SYMBOL_GPL(of_get_nand_on_flash_bbt);
diff --git a/drivers/serial/serial_auart.c b/drivers/serial/serial_auart.c
index 98f7c75de5..6518dbb77d 100644
--- a/drivers/serial/serial_auart.c
+++ b/drivers/serial/serial_auart.c
@@ -171,7 +171,7 @@ static int auart_clocksource_clock_change(struct notifier_block *nb, unsigned lo
static void auart_serial_init_port(struct auart_priv *priv)
{
- mxs_reset_block(priv->base + HW_UARTAPP_CTRL0, 0);
+ stmp_reset_block(priv->base + HW_UARTAPP_CTRL0, 0);
/* Disable UART */
writel(0x0, priv->base + HW_UARTAPP_CTRL2);
diff --git a/drivers/spi/mxs_spi.c b/drivers/spi/mxs_spi.c
index 8dfd6d54e5..4e539bfbca 100644
--- a/drivers/spi/mxs_spi.c
+++ b/drivers/spi/mxs_spi.c
@@ -20,6 +20,7 @@
#include <clock.h>
#include <errno.h>
#include <io.h>
+#include <stmp-device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/mmu.h>
@@ -99,11 +100,11 @@ static int mxs_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- mxs_reset_block(mxs->regs + HW_SSP_CTRL0, 0);
+ stmp_reset_block(mxs->regs + HW_SSP_CTRL0);
val |= SSP_CTRL0_SSP_ASSERT_OUT(spi->chip_select);
val |= SSP_CTRL0_BUS_WIDTH(0);
- writel(val, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
+ writel(val, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
val = SSP_CTRL1_SSP_MODE(0) | SSP_CTRL1_WORD_LENGTH(7);
val |= (mxs->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0;
@@ -120,14 +121,14 @@ static int mxs_spi_setup(struct spi_device *spi)
static void mxs_spi_start_xfer(struct mxs_spi *mxs)
{
- writel(SSP_CTRL0_LOCK_CS, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
- writel(SSP_CTRL0_IGNORE_CRC, mxs->regs + HW_SSP_CTRL0 + BIT_CLR);
+ writel(SSP_CTRL0_LOCK_CS, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+ writel(SSP_CTRL0_IGNORE_CRC, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
}
static void mxs_spi_end_xfer(struct mxs_spi *mxs)
{
- writel(SSP_CTRL0_LOCK_CS, mxs->regs + HW_SSP_CTRL0 + BIT_CLR);
- writel(SSP_CTRL0_IGNORE_CRC, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
+ writel(SSP_CTRL0_LOCK_CS, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(SSP_CTRL0_IGNORE_CRC, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
}
static void mxs_spi_set_cs(struct spi_device *spi)
@@ -136,8 +137,8 @@ static void mxs_spi_set_cs(struct spi_device *spi)
const uint32_t mask = SSP_CTRL0_WAIT_FOR_CMD | SSP_CTRL0_WAIT_FOR_IRQ;
uint32_t select = SSP_CTRL0_SSP_ASSERT_OUT(spi->chip_select);
- writel(mask, mxs->regs + HW_SSP_CTRL0 + BIT_CLR);
- writel(select, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
+ writel(mask, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(select, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
}
static int mxs_spi_xfer_pio(struct spi_device *spi,
@@ -159,11 +160,11 @@ static int mxs_spi_xfer_pio(struct spi_device *spi,
writel(1, mxs->regs + HW_SSP_XFER_COUNT);
if (write)
- writel(SSP_CTRL0_READ, mxs->regs + HW_SSP_CTRL0 + BIT_CLR);
+ writel(SSP_CTRL0_READ, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
else
- writel(SSP_CTRL0_READ, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
+ writel(SSP_CTRL0_READ, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- writel(SSP_CTRL0_RUN, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
+ writel(SSP_CTRL0_RUN, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (wait_on_timeout(MXS_SPI_MAX_TIMEOUT,
(readl(mxs->regs + HW_SSP_CTRL0) & SSP_CTRL0_RUN) == SSP_CTRL0_RUN)) {
@@ -174,7 +175,7 @@ static int mxs_spi_xfer_pio(struct spi_device *spi,
if (write)
writel(*data++, mxs->regs + HW_SSP_DATA);
- writel(SSP_CTRL0_DATA_XFER, mxs->regs + HW_SSP_CTRL0 + BIT_SET);
+ writel(SSP_CTRL0_DATA_XFER, mxs->regs + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
if (!write) {
if (wait_on_timeout(MXS_SPI_MAX_TIMEOUT,
@@ -240,7 +241,7 @@ static int mxs_spi_transfer(struct spi_device *spi, struct spi_message *mesg)
}
}
- writel(SSP_CTRL1_DMA_ENABLE, mxs->regs + HW_SSP_CTRL1 + BIT_CLR);
+ writel(SSP_CTRL1_DMA_ENABLE, mxs->regs + HW_SSP_CTRL1 + STMP_OFFSET_REG_CLR);
ret = mxs_spi_xfer_pio(spi, data, t->len, write, flags);
if (ret < 0)
return ret;
diff --git a/drivers/video/stm.c b/drivers/video/stm.c
index d5212f8a12..0875c9b0f7 100644
--- a/drivers/video/stm.c
+++ b/drivers/video/stm.c
@@ -24,6 +24,7 @@
#include <errno.h>
#include <xfuncs.h>
#include <io.h>
+#include <stmp-device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <mach/imx-regs.h>
@@ -222,7 +223,7 @@ static void stmfb_enable_controller(struct fb_info *fb_info)
* Sometimes some data is still present in the FIFO. This leads into
* a correct but shifted picture. Clearing the FIFO helps
*/
- writel(CTRL1_FIFO_CLEAR, fbi->base + HW_LCDIF_CTRL1 + BIT_SET);
+ writel(CTRL1_FIFO_CLEAR, fbi->base + HW_LCDIF_CTRL1 + STMP_OFFSET_REG_SET);
/* if it was disabled, re-enable the mode again */
reg = readl(fbi->base + HW_LCDIF_CTRL);
@@ -255,14 +256,14 @@ static void stmfb_enable_controller(struct fb_info *fb_info)
}
/* stop FIFO reset */
- writel(CTRL1_FIFO_CLEAR, fbi->base + HW_LCDIF_CTRL1 + BIT_CLR);
+ writel(CTRL1_FIFO_CLEAR, fbi->base + HW_LCDIF_CTRL1 + STMP_OFFSET_REG_CLR);
/* enable LCD using LCD_RESET signal*/
if (fbi->pdata->flags & USE_LCD_RESET)
- writel(CTRL1_RESET, fbi->base + HW_LCDIF_CTRL1 + BIT_SET);
+ writel(CTRL1_RESET, fbi->base + HW_LCDIF_CTRL1 + STMP_OFFSET_REG_SET);
/* start the engine right now */
- writel(CTRL_RUN, fbi->base + HW_LCDIF_CTRL + BIT_SET);
+ writel(CTRL_RUN, fbi->base + HW_LCDIF_CTRL + STMP_OFFSET_REG_SET);
if (fbi->pdata->enable)
fbi->pdata->enable(1);
@@ -277,7 +278,7 @@ static void stmfb_disable_controller(struct fb_info *fb_info)
/* disable LCD using LCD_RESET signal*/
if (fbi->pdata->flags & USE_LCD_RESET)
- writel(CTRL1_RESET, fbi->base + HW_LCDIF_CTRL1 + BIT_CLR);
+ writel(CTRL1_RESET, fbi->base + HW_LCDIF_CTRL1 + STMP_OFFSET_REG_CLR);
if (fbi->pdata->enable)
fbi->pdata->enable(0);