Lines Matching refs:reg

134 void *dm_rh_region_context(struct dm_region *reg)  in dm_rh_region_context()  argument
136 return reg->rh->context; in dm_rh_region_context()
140 region_t dm_rh_get_region_key(struct dm_region *reg) in dm_rh_get_region_key() argument
142 return reg->key; in dm_rh_get_region_key()
238 struct dm_region *reg, *nreg; in dm_region_hash_destroy() local
242 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
244 BUG_ON(atomic_read(&reg->pending)); in dm_region_hash_destroy()
245 mempool_free(reg, rh->region_pool); in dm_region_hash_destroy()
273 struct dm_region *reg; in __rh_lookup() local
276 list_for_each_entry(reg, bucket, hash_list) in __rh_lookup()
277 if (reg->key == region) in __rh_lookup()
278 return reg; in __rh_lookup()
283 static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) in __rh_insert() argument
285 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
290 struct dm_region *reg, *nreg; in __rh_alloc() local
305 reg = __rh_lookup(rh, region); in __rh_alloc()
306 if (reg) in __rh_alloc()
317 reg = nreg; in __rh_alloc()
321 return reg; in __rh_alloc()
326 struct dm_region *reg; in __rh_find() local
328 reg = __rh_lookup(rh, region); in __rh_find()
329 if (!reg) { in __rh_find()
331 reg = __rh_alloc(rh, region); in __rh_find()
335 return reg; in __rh_find()
341 struct dm_region *reg; in dm_rh_get_state() local
344 reg = __rh_lookup(rh, region); in dm_rh_get_state()
347 if (reg) in dm_rh_get_state()
348 return reg->state; in dm_rh_get_state()
364 static void complete_resync_work(struct dm_region *reg, int success) in complete_resync_work() argument
366 struct dm_region_hash *rh = reg->rh; in complete_resync_work()
368 rh->log->type->set_region_sync(rh->log, reg->key, success); in complete_resync_work()
379 rh->dispatch_bios(rh->context, &reg->delayed_bios); in complete_resync_work()
399 struct dm_region *reg; in dm_rh_mark_nosync() local
415 reg = __rh_find(rh, region); in dm_rh_mark_nosync()
419 BUG_ON(!reg); in dm_rh_mark_nosync()
420 BUG_ON(!list_empty(&reg->list)); in dm_rh_mark_nosync()
430 recovering = (reg->state == DM_RH_RECOVERING); in dm_rh_mark_nosync()
431 reg->state = DM_RH_NOSYNC; in dm_rh_mark_nosync()
432 BUG_ON(!list_empty(&reg->list)); in dm_rh_mark_nosync()
436 complete_resync_work(reg, 0); in dm_rh_mark_nosync()
442 struct dm_region *reg, *next; in dm_rh_update_states() local
456 list_for_each_entry(reg, &clean, list) in dm_rh_update_states()
457 list_del(&reg->hash_list); in dm_rh_update_states()
463 list_for_each_entry(reg, &recovered, list) in dm_rh_update_states()
464 list_del(&reg->hash_list); in dm_rh_update_states()
471 list_for_each_entry(reg, &failed_recovered, list) in dm_rh_update_states()
472 list_del(&reg->hash_list); in dm_rh_update_states()
483 list_for_each_entry_safe(reg, next, &recovered, list) { in dm_rh_update_states()
484 rh->log->type->clear_region(rh->log, reg->key); in dm_rh_update_states()
485 complete_resync_work(reg, 1); in dm_rh_update_states()
486 mempool_free(reg, rh->region_pool); in dm_rh_update_states()
489 list_for_each_entry_safe(reg, next, &failed_recovered, list) { in dm_rh_update_states()
490 complete_resync_work(reg, errors_handled ? 0 : 1); in dm_rh_update_states()
491 mempool_free(reg, rh->region_pool); in dm_rh_update_states()
494 list_for_each_entry_safe(reg, next, &clean, list) { in dm_rh_update_states()
495 rh->log->type->clear_region(rh->log, reg->key); in dm_rh_update_states()
496 mempool_free(reg, rh->region_pool); in dm_rh_update_states()
505 struct dm_region *reg; in rh_inc() local
508 reg = __rh_find(rh, region); in rh_inc()
511 atomic_inc(&reg->pending); in rh_inc()
513 if (reg->state == DM_RH_CLEAN) { in rh_inc()
514 reg->state = DM_RH_DIRTY; in rh_inc()
515 list_del_init(&reg->list); /* take off the clean list */ in rh_inc()
518 rh->log->type->mark_region(rh->log, reg->key); in rh_inc()
541 struct dm_region *reg; in dm_rh_dec() local
545 reg = __rh_lookup(rh, region); in dm_rh_dec()
549 if (atomic_dec_and_test(&reg->pending)) { in dm_rh_dec()
568 reg->state = DM_RH_NOSYNC; in dm_rh_dec()
569 } else if (reg->state == DM_RH_RECOVERING) { in dm_rh_dec()
570 list_add_tail(&reg->list, &rh->quiesced_regions); in dm_rh_dec()
571 } else if (reg->state == DM_RH_DIRTY) { in dm_rh_dec()
572 reg->state = DM_RH_CLEAN; in dm_rh_dec()
573 list_add(&reg->list, &rh->clean_regions); in dm_rh_dec()
591 struct dm_region *reg; in __rh_recovery_prepare() local
605 reg = __rh_find(rh, region); in __rh_recovery_prepare()
609 reg->state = DM_RH_RECOVERING; in __rh_recovery_prepare()
612 if (atomic_read(&reg->pending)) in __rh_recovery_prepare()
613 list_del_init(&reg->list); in __rh_recovery_prepare()
615 list_move(&reg->list, &rh->quiesced_regions); in __rh_recovery_prepare()
647 struct dm_region *reg = NULL; in dm_rh_recovery_start() local
651 reg = list_entry(rh->quiesced_regions.next, in dm_rh_recovery_start()
653 list_del_init(&reg->list); /* remove from the quiesced list */ in dm_rh_recovery_start()
657 return reg; in dm_rh_recovery_start()
661 void dm_rh_recovery_end(struct dm_region *reg, int success) in dm_rh_recovery_end() argument
663 struct dm_region_hash *rh = reg->rh; in dm_rh_recovery_end()
667 list_add(&reg->list, &reg->rh->recovered_regions); in dm_rh_recovery_end()
669 list_add(&reg->list, &reg->rh->failed_recovered_regions); in dm_rh_recovery_end()
692 struct dm_region *reg; in dm_rh_delay() local
695 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); in dm_rh_delay()
696 bio_list_add(&reg->delayed_bios, bio); in dm_rh_delay()