summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2018-08-29 14:19:52 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2018-08-31 08:23:30 +0200
commit4bb3d5b5c29b48a0e63c96b66c4b5d487b9ef21f (patch)
treeb464b6f559894b196e851bdfccfe7ef7a3f9656c
parent8c7ef1e98537e90712cd4fdc0972f737e03680b5 (diff)
downloadbarebox-4bb3d5b5c29b48a0e63c96b66c4b5d487b9ef21f.tar.gz
barebox-4bb3d5b5c29b48a0e63c96b66c4b5d487b9ef21f.tar.xz
ubi: Fix races around ubi_refill_pools()
When writing a new Fastmap the first thing that happens is refilling the pools in memory. At this stage it is possible that new PEBs from the new pools get already claimed and written with data. If this happens before the new Fastmap data structure hits the flash and we face power cut the freshly written PEB will not scanned and unnoticed. Solve the issue by locking the pools until Fastmap is written. Cc: <stable@vger.kernel.org> Fixes: dbb7d2a88d ("UBI: Add fastmap core") Signed-off-by: Richard Weinberger <richard@nod.at> [Fix conflicts] Signed-off-by: Teresa Remmet <t.remmet@phytec.de> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c4
-rw-r--r--drivers/mtd/ubi/wl.c15
2 files changed, 11 insertions, 8 deletions
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 1885b155dd..07982aab16 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -230,7 +230,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ __schedule_ubi_work(ubi, wrk);
return 0;
}
@@ -268,7 +268,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
}
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
- return schedule_erase(ubi, e, vol_id, lnum, torture);
+ return schedule_erase(ubi, e, vol_id, lnum, torture, true);
}
/**
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 764e8dddc2..54c119d20b 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -554,7 +554,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int vol_id, int lnum, int torture)
+ int vol_id, int lnum, int torture, bool nested)
{
struct ubi_work *wl_wrk;
@@ -573,7 +573,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
- schedule_ubi_work(ubi, wl_wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wl_wrk);
+ else
+ schedule_ubi_work(ubi, wl_wrk);
return 0;
}
@@ -1047,7 +1050,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
wl_entry_destroy(ubi, e);
err = err1;
@@ -1204,7 +1207,7 @@ retry:
}
}
- err = schedule_erase(ubi, e, vol_id, lnum, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
if (err)
wl_tree_add(e, &ubi->used);
@@ -1428,7 +1431,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
goto out_free;
}
@@ -1501,7 +1504,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->ec = aeb->ec;
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
goto out_free;
}