ubi: fastmap: Check wl_pool for free peb before wear leveling
UBI fetches free peb from wl_pool during wear leveling, so UBI should check wl_pool's empty status before wear leveling. Otherwise, UBI will miss wear leveling chances when free pebs are run out. Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com> Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
parent
d09e9a2bdd
commit
14072ee33d
@ -275,6 +275,58 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* next_peb_for_wl - returns next PEB to be used internally by the
|
||||||
|
* WL sub-system.
|
||||||
|
*
|
||||||
|
* @ubi: UBI device description object
|
||||||
|
*/
|
||||||
|
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
|
||||||
|
{
|
||||||
|
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
|
||||||
|
int pnum;
|
||||||
|
|
||||||
|
if (pool->used == pool->size)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pnum = pool->pebs[pool->used];
|
||||||
|
return ubi->lookuptbl[pnum];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* need_wear_leveling - checks whether to trigger a wear leveling work.
|
||||||
|
* UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
|
||||||
|
* and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
|
||||||
|
* 'wl_pool' by ubi_refill_pools().
|
||||||
|
*
|
||||||
|
* @ubi: UBI device description object
|
||||||
|
*/
|
||||||
|
static bool need_wear_leveling(struct ubi_device *ubi)
|
||||||
|
{
|
||||||
|
int ec;
|
||||||
|
struct ubi_wl_entry *e;
|
||||||
|
|
||||||
|
if (!ubi->used.rb_node)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
e = next_peb_for_wl(ubi);
|
||||||
|
if (!e) {
|
||||||
|
if (!ubi->free.rb_node)
|
||||||
|
return false;
|
||||||
|
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
|
||||||
|
ec = e->ec;
|
||||||
|
} else {
|
||||||
|
ec = e->ec;
|
||||||
|
if (ubi->free.rb_node) {
|
||||||
|
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
|
||||||
|
ec = max(ec, e->ec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
|
||||||
|
|
||||||
|
return ec - e->ec >= UBI_WL_THRESHOLD;
|
||||||
|
}
|
||||||
|
|
||||||
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
|
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
|
||||||
*
|
*
|
||||||
* @ubi: UBI device description object
|
* @ubi: UBI device description object
|
||||||
|
@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
|||||||
ubi_assert(!ubi->move_from && !ubi->move_to);
|
ubi_assert(!ubi->move_from && !ubi->move_to);
|
||||||
ubi_assert(!ubi->move_to_put);
|
ubi_assert(!ubi->move_to_put);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||||
|
if (!next_peb_for_wl(ubi) ||
|
||||||
|
#else
|
||||||
if (!ubi->free.rb_node ||
|
if (!ubi->free.rb_node ||
|
||||||
|
#endif
|
||||||
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
|
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
|
||||||
/*
|
/*
|
||||||
* No free physical eraseblocks? Well, they must be waiting in
|
* No free physical eraseblocks? Well, they must be waiting in
|
||||||
@ -1003,8 +1007,6 @@ out_cancel:
|
|||||||
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
|
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
struct ubi_wl_entry *e1;
|
|
||||||
struct ubi_wl_entry *e2;
|
|
||||||
struct ubi_work *wrk;
|
struct ubi_work *wrk;
|
||||||
|
|
||||||
spin_lock(&ubi->wl_lock);
|
spin_lock(&ubi->wl_lock);
|
||||||
@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
|
|||||||
* the WL worker has to be scheduled anyway.
|
* the WL worker has to be scheduled anyway.
|
||||||
*/
|
*/
|
||||||
if (!ubi->scrub.rb_node) {
|
if (!ubi->scrub.rb_node) {
|
||||||
|
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||||
|
if (!need_wear_leveling(ubi))
|
||||||
|
goto out_unlock;
|
||||||
|
#else
|
||||||
|
struct ubi_wl_entry *e1;
|
||||||
|
struct ubi_wl_entry *e2;
|
||||||
|
|
||||||
if (!ubi->used.rb_node || !ubi->free.rb_node)
|
if (!ubi->used.rb_node || !ubi->free.rb_node)
|
||||||
/* No physical eraseblocks - no deal */
|
/* No physical eraseblocks - no deal */
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
|
|||||||
|
|
||||||
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
|
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
#endif
|
||||||
dbg_wl("schedule wear-leveling");
|
dbg_wl("schedule wear-leveling");
|
||||||
} else
|
} else
|
||||||
dbg_wl("schedule scrubbing");
|
dbg_wl("schedule scrubbing");
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
static void update_fastmap_work_fn(struct work_struct *wrk);
|
static void update_fastmap_work_fn(struct work_struct *wrk);
|
||||||
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
|
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
|
||||||
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
|
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
|
||||||
|
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
|
||||||
|
static bool need_wear_leveling(struct ubi_device *ubi);
|
||||||
static void ubi_fastmap_close(struct ubi_device *ubi);
|
static void ubi_fastmap_close(struct ubi_device *ubi);
|
||||||
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
|
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user