summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2016-10-19 14:42:55 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2016-10-19 14:49:10 +0200
commit75ee6ab0765d67f84d8152d3d18065c17ea9b50c (patch)
tree8dfd6877e0fb49441c110266812b21c4f06b8dbc
parent834f6bf5e5f1169065376ad1aeb6a6266e66ce5c (diff)
downloadbarebox-75ee6ab0765d67f84d8152d3d18065c17ea9b50c.tar.gz
barebox-75ee6ab0765d67f84d8152d3d18065c17ea9b50c.tar.xz
mtd: ubi: enable thread earlier
Since "57cebc4 mtd: ubi: Fix scrubbing during attach" we make sure that the wear level worker does not start too early. However, now there are cases when the worker starts too late. When a ubi image is freshly written a volume may be autoresized. This has to be done after the wear level worker is started because otherwise the initial fastmap update will not be able to find any anchor PEBs. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r--drivers/mtd/ubi/build.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 617c63e5ac..dd90e17cb7 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -622,6 +622,11 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
goto out_free;
}
+ ubi->thread_enabled = 1;
+
+ /* No threading, call ubi_thread directly */
+ ubi_thread(ubi);
+
if (ubi->autoresize_vol_id != -1) {
err = autoresize(ubi, ubi->autoresize_vol_id);
if (err)
@@ -663,15 +668,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
dev_add_param_int_ro(&ubi->dev, "available_pebs", ubi->avail_pebs, "%d");
dev_add_param_int_ro(&ubi->dev, "reserved_pebs", ubi->rsvd_pebs, "%d");
- /*
- * The below lock makes sure we do not race with 'ubi_thread()' which
- * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
- */
- ubi->thread_enabled = 1;
-
- /* No threading, call ubi_thread directly */
- ubi_thread(ubi);
-
ubi_devices[ubi_num] = ubi;
return ubi_num;