root/drivers/mtd/ubi/fastmap-wl.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. update_fastmap_work_fn
  2. find_anchor_wl_entry
  3. return_unused_pool_pebs
  4. anchor_pebs_available
  5. ubi_wl_get_fm_peb
  6. ubi_refill_pools
  7. produce_free_peb
  8. ubi_wl_get_peb
  9. get_peb_for_wl
  10. ubi_ensure_anchor_pebs
  11. ubi_wl_put_fm_peb
  12. ubi_is_erase_work
  13. ubi_fastmap_close
  14. may_reserve_for_fm

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2012 Linutronix GmbH
   4  * Copyright (c) 2014 sigma star gmbh
   5  * Author: Richard Weinberger <richard@nod.at>
   6  */
   7 
   8 /**
   9  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
  10  * @wrk: the work description object
  11  */
  12 static void update_fastmap_work_fn(struct work_struct *wrk)
  13 {
  14         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
  15 
  16         ubi_update_fastmap(ubi);
  17         spin_lock(&ubi->wl_lock);
  18         ubi->fm_work_scheduled = 0;
  19         spin_unlock(&ubi->wl_lock);
  20 }
  21 
  22 /**
  23  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
  24  * @root: the RB-tree where to look for
  25  */
  26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
  27 {
  28         struct rb_node *p;
  29         struct ubi_wl_entry *e, *victim = NULL;
  30         int max_ec = UBI_MAX_ERASECOUNTER;
  31 
  32         ubi_rb_for_each_entry(p, e, root, u.rb) {
  33                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
  34                         victim = e;
  35                         max_ec = e->ec;
  36                 }
  37         }
  38 
  39         return victim;
  40 }
  41 
  42 /**
  43  * return_unused_pool_pebs - returns unused PEB to the free tree.
  44  * @ubi: UBI device description object
  45  * @pool: fastmap pool description object
  46  */
  47 static void return_unused_pool_pebs(struct ubi_device *ubi,
  48                                     struct ubi_fm_pool *pool)
  49 {
  50         int i;
  51         struct ubi_wl_entry *e;
  52 
  53         for (i = pool->used; i < pool->size; i++) {
  54                 e = ubi->lookuptbl[pool->pebs[i]];
  55                 wl_tree_add(e, &ubi->free);
  56                 ubi->free_count++;
  57         }
  58 }
  59 
  60 static int anchor_pebs_available(struct rb_root *root)
  61 {
  62         struct rb_node *p;
  63         struct ubi_wl_entry *e;
  64 
  65         ubi_rb_for_each_entry(p, e, root, u.rb)
  66                 if (e->pnum < UBI_FM_MAX_START)
  67                         return 1;
  68 
  69         return 0;
  70 }
  71 
  72 /**
  73  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
  74  * @ubi: UBI device description object
  75  * @anchor: This PEB will be used as anchor PEB by fastmap
  76  *
  77  * The function returns a physical erase block with a given maximal number
  78  * and removes it from the wl subsystem.
  79  * Must be called with wl_lock held!
  80  */
  81 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
  82 {
  83         struct ubi_wl_entry *e = NULL;
  84 
  85         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
  86                 goto out;
  87 
  88         if (anchor)
  89                 e = find_anchor_wl_entry(&ubi->free);
  90         else
  91                 e = find_mean_wl_entry(ubi, &ubi->free);
  92 
  93         if (!e)
  94                 goto out;
  95 
  96         self_check_in_wl_tree(ubi, e, &ubi->free);
  97 
  98         /* remove it from the free list,
  99          * the wl subsystem does no longer know this erase block */
 100         rb_erase(&e->u.rb, &ubi->free);
 101         ubi->free_count--;
 102 out:
 103         return e;
 104 }
 105 
 106 /**
 107  * ubi_refill_pools - refills all fastmap PEB pools.
 108  * @ubi: UBI device description object
 109  */
 110 void ubi_refill_pools(struct ubi_device *ubi)
 111 {
 112         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
 113         struct ubi_fm_pool *pool = &ubi->fm_pool;
 114         struct ubi_wl_entry *e;
 115         int enough;
 116 
 117         spin_lock(&ubi->wl_lock);
 118 
 119         return_unused_pool_pebs(ubi, wl_pool);
 120         return_unused_pool_pebs(ubi, pool);
 121 
 122         wl_pool->size = 0;
 123         pool->size = 0;
 124 
 125         for (;;) {
 126                 enough = 0;
 127                 if (pool->size < pool->max_size) {
 128                         if (!ubi->free.rb_node)
 129                                 break;
 130 
 131                         e = wl_get_wle(ubi);
 132                         if (!e)
 133                                 break;
 134 
 135                         pool->pebs[pool->size] = e->pnum;
 136                         pool->size++;
 137                 } else
 138                         enough++;
 139 
 140                 if (wl_pool->size < wl_pool->max_size) {
 141                         if (!ubi->free.rb_node ||
 142                            (ubi->free_count - ubi->beb_rsvd_pebs < 5))
 143                                 break;
 144 
 145                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
 146                         self_check_in_wl_tree(ubi, e, &ubi->free);
 147                         rb_erase(&e->u.rb, &ubi->free);
 148                         ubi->free_count--;
 149 
 150                         wl_pool->pebs[wl_pool->size] = e->pnum;
 151                         wl_pool->size++;
 152                 } else
 153                         enough++;
 154 
 155                 if (enough == 2)
 156                         break;
 157         }
 158 
 159         wl_pool->used = 0;
 160         pool->used = 0;
 161 
 162         spin_unlock(&ubi->wl_lock);
 163 }
 164 
 165 /**
 166  * produce_free_peb - produce a free physical eraseblock.
 167  * @ubi: UBI device description object
 168  *
 169  * This function tries to make a free PEB by means of synchronous execution of
 170  * pending works. This may be needed if, for example the background thread is
 171  * disabled. Returns zero in case of success and a negative error code in case
 172  * of failure.
 173  */
 174 static int produce_free_peb(struct ubi_device *ubi)
 175 {
 176         int err;
 177 
 178         while (!ubi->free.rb_node && ubi->works_count) {
 179                 dbg_wl("do one work synchronously");
 180                 err = do_work(ubi);
 181 
 182                 if (err)
 183                         return err;
 184         }
 185 
 186         return 0;
 187 }
 188 
 189 /**
 190  * ubi_wl_get_peb - get a physical eraseblock.
 191  * @ubi: UBI device description object
 192  *
 193  * This function returns a physical eraseblock in case of success and a
 194  * negative error code in case of failure.
 195  * Returns with ubi->fm_eba_sem held in read mode!
 196  */
 197 int ubi_wl_get_peb(struct ubi_device *ubi)
 198 {
 199         int ret, attempts = 0;
 200         struct ubi_fm_pool *pool = &ubi->fm_pool;
 201         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
 202 
 203 again:
 204         down_read(&ubi->fm_eba_sem);
 205         spin_lock(&ubi->wl_lock);
 206 
 207         /* We check here also for the WL pool because at this point we can
 208          * refill the WL pool synchronous. */
 209         if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
 210                 spin_unlock(&ubi->wl_lock);
 211                 up_read(&ubi->fm_eba_sem);
 212                 ret = ubi_update_fastmap(ubi);
 213                 if (ret) {
 214                         ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
 215                         down_read(&ubi->fm_eba_sem);
 216                         return -ENOSPC;
 217                 }
 218                 down_read(&ubi->fm_eba_sem);
 219                 spin_lock(&ubi->wl_lock);
 220         }
 221 
 222         if (pool->used == pool->size) {
 223                 spin_unlock(&ubi->wl_lock);
 224                 attempts++;
 225                 if (attempts == 10) {
 226                         ubi_err(ubi, "Unable to get a free PEB from user WL pool");
 227                         ret = -ENOSPC;
 228                         goto out;
 229                 }
 230                 up_read(&ubi->fm_eba_sem);
 231                 ret = produce_free_peb(ubi);
 232                 if (ret < 0) {
 233                         down_read(&ubi->fm_eba_sem);
 234                         goto out;
 235                 }
 236                 goto again;
 237         }
 238 
 239         ubi_assert(pool->used < pool->size);
 240         ret = pool->pebs[pool->used++];
 241         prot_queue_add(ubi, ubi->lookuptbl[ret]);
 242         spin_unlock(&ubi->wl_lock);
 243 out:
 244         return ret;
 245 }
 246 
 247 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
 248  *
 249  * @ubi: UBI device description object
 250  */
 251 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 252 {
 253         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
 254         int pnum;
 255 
 256         ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
 257 
 258         if (pool->used == pool->size) {
 259                 /* We cannot update the fastmap here because this
 260                  * function is called in atomic context.
 261                  * Let's fail here and refill/update it as soon as possible. */
 262                 if (!ubi->fm_work_scheduled) {
 263                         ubi->fm_work_scheduled = 1;
 264                         schedule_work(&ubi->fm_work);
 265                 }
 266                 return NULL;
 267         }
 268 
 269         pnum = pool->pebs[pool->used++];
 270         return ubi->lookuptbl[pnum];
 271 }
 272 
 273 /**
 274  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
 275  * @ubi: UBI device description object
 276  */
 277 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
 278 {
 279         struct ubi_work *wrk;
 280 
 281         spin_lock(&ubi->wl_lock);
 282         if (ubi->wl_scheduled) {
 283                 spin_unlock(&ubi->wl_lock);
 284                 return 0;
 285         }
 286         ubi->wl_scheduled = 1;
 287         spin_unlock(&ubi->wl_lock);
 288 
 289         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
 290         if (!wrk) {
 291                 spin_lock(&ubi->wl_lock);
 292                 ubi->wl_scheduled = 0;
 293                 spin_unlock(&ubi->wl_lock);
 294                 return -ENOMEM;
 295         }
 296 
 297         wrk->anchor = 1;
 298         wrk->func = &wear_leveling_worker;
 299         __schedule_ubi_work(ubi, wrk);
 300         return 0;
 301 }
 302 
 303 /**
 304  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
 305  * sub-system.
 306  * see: ubi_wl_put_peb()
 307  *
 308  * @ubi: UBI device description object
 309  * @fm_e: physical eraseblock to return
 310  * @lnum: the last used logical eraseblock number for the PEB
 311  * @torture: if this physical eraseblock has to be tortured
 312  */
 313 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
 314                       int lnum, int torture)
 315 {
 316         struct ubi_wl_entry *e;
 317         int vol_id, pnum = fm_e->pnum;
 318 
 319         dbg_wl("PEB %d", pnum);
 320 
 321         ubi_assert(pnum >= 0);
 322         ubi_assert(pnum < ubi->peb_count);
 323 
 324         spin_lock(&ubi->wl_lock);
 325         e = ubi->lookuptbl[pnum];
 326 
 327         /* This can happen if we recovered from a fastmap the very
 328          * first time and writing now a new one. In this case the wl system
 329          * has never seen any PEB used by the original fastmap.
 330          */
 331         if (!e) {
 332                 e = fm_e;
 333                 ubi_assert(e->ec >= 0);
 334                 ubi->lookuptbl[pnum] = e;
 335         }
 336 
 337         spin_unlock(&ubi->wl_lock);
 338 
 339         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
 340         return schedule_erase(ubi, e, vol_id, lnum, torture, true);
 341 }
 342 
 343 /**
 344  * ubi_is_erase_work - checks whether a work is erase work.
 345  * @wrk: The work object to be checked
 346  */
 347 int ubi_is_erase_work(struct ubi_work *wrk)
 348 {
 349         return wrk->func == erase_worker;
 350 }
 351 
 352 static void ubi_fastmap_close(struct ubi_device *ubi)
 353 {
 354         int i;
 355 
 356         return_unused_pool_pebs(ubi, &ubi->fm_pool);
 357         return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
 358 
 359         if (ubi->fm) {
 360                 for (i = 0; i < ubi->fm->used_blocks; i++)
 361                         kfree(ubi->fm->e[i]);
 362         }
 363         kfree(ubi->fm);
 364 }
 365 
 366 /**
 367  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
 368  * See find_mean_wl_entry()
 369  *
 370  * @ubi: UBI device description object
 371  * @e: physical eraseblock to return
 372  * @root: RB tree to test against.
 373  */
 374 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
 375                                            struct ubi_wl_entry *e,
 376                                            struct rb_root *root) {
 377         if (e && !ubi->fm_disabled && !ubi->fm &&
 378             e->pnum < UBI_FM_MAX_START)
 379                 e = rb_entry(rb_next(root->rb_node),
 380                              struct ubi_wl_entry, u.rb);
 381 
 382         return e;
 383 }

/* [<][>][^][v][top][bottom][index][help] */