Lines Matching refs:lc

84 static int userspace_do_request(struct log_c *lc, const char *uuid,  in userspace_do_request()  argument
96 r = dm_consult_userspace(uuid, lc->luid, request_type, data, in userspace_do_request()
107 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, in userspace_do_request()
108 lc->usr_argv_str, in userspace_do_request()
109 strlen(lc->usr_argv_str) + 1, in userspace_do_request()
115 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, in userspace_do_request()
159 struct log_c *lc = container_of(work, struct log_c, flush_log_work.work); in do_flush() local
161 atomic_set(&lc->sched_flush, 0); in do_flush()
163 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL); in do_flush()
166 dm_table_event(lc->ti->table); in do_flush()
196 struct log_c *lc = NULL; in userspace_ctr() local
207 lc = kzalloc(sizeof(*lc), GFP_KERNEL); in userspace_ctr()
208 if (!lc) { in userspace_ctr()
214 lc->luid = (unsigned long)lc; in userspace_ctr()
216 lc->ti = ti; in userspace_ctr()
220 kfree(lc); in userspace_ctr()
224 lc->usr_argc = argc; in userspace_ctr()
226 strncpy(lc->uuid, argv[0], DM_UUID_LEN); in userspace_ctr()
229 spin_lock_init(&lc->flush_lock); in userspace_ctr()
230 INIT_LIST_HEAD(&lc->mark_list); in userspace_ctr()
231 INIT_LIST_HEAD(&lc->clear_list); in userspace_ctr()
234 lc->integrated_flush = 1; in userspace_ctr()
241 kfree(lc); in userspace_ctr()
252 lc->flush_entry_pool = mempool_create_slab_pool(FLUSH_ENTRY_POOL_SIZE, in userspace_ctr()
254 if (!lc->flush_entry_pool) { in userspace_ctr()
263 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, in userspace_ctr()
277 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, in userspace_ctr()
285 lc->region_size = (uint32_t)rdata; in userspace_ctr()
286 lc->region_count = dm_sector_div_up(ti->len, lc->region_size); in userspace_ctr()
295 dm_table_get_mode(ti->table), &lc->log_dev); in userspace_ctr()
301 if (lc->integrated_flush) { in userspace_ctr()
302 lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0); in userspace_ctr()
303 if (!lc->dmlog_wq) { in userspace_ctr()
309 INIT_DELAYED_WORK(&lc->flush_log_work, do_flush); in userspace_ctr()
310 atomic_set(&lc->sched_flush, 0); in userspace_ctr()
316 if (lc->flush_entry_pool) in userspace_ctr()
317 mempool_destroy(lc->flush_entry_pool); in userspace_ctr()
318 kfree(lc); in userspace_ctr()
321 lc->usr_argv_str = ctr_str; in userspace_ctr()
322 log->context = lc; in userspace_ctr()
330 struct log_c *lc = log->context; in userspace_dtr() local
332 if (lc->integrated_flush) { in userspace_dtr()
334 if (atomic_read(&lc->sched_flush)) in userspace_dtr()
335 flush_delayed_work(&lc->flush_log_work); in userspace_dtr()
337 destroy_workqueue(lc->dmlog_wq); in userspace_dtr()
340 (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, in userspace_dtr()
343 if (lc->log_dev) in userspace_dtr()
344 dm_put_device(lc->ti, lc->log_dev); in userspace_dtr()
346 mempool_destroy(lc->flush_entry_pool); in userspace_dtr()
348 kfree(lc->usr_argv_str); in userspace_dtr()
349 kfree(lc); in userspace_dtr()
357 struct log_c *lc = log->context; in userspace_presuspend() local
359 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, in userspace_presuspend()
368 struct log_c *lc = log->context; in userspace_postsuspend() local
373 if (lc->integrated_flush && atomic_read(&lc->sched_flush)) in userspace_postsuspend()
374 flush_delayed_work(&lc->flush_log_work); in userspace_postsuspend()
376 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, in userspace_postsuspend()
385 struct log_c *lc = log->context; in userspace_resume() local
387 lc->in_sync_hint = 0; in userspace_resume()
388 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, in userspace_resume()
396 struct log_c *lc = log->context; in userspace_get_region_size() local
398 return lc->region_size; in userspace_get_region_size()
415 struct log_c *lc = log->context; in userspace_is_clean() local
418 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN, in userspace_is_clean()
443 struct log_c *lc = log->context; in userspace_in_sync() local
462 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC, in userspace_in_sync()
468 static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list) in flush_one_by_one() argument
474 r = userspace_do_request(lc, lc->uuid, fe->type, in flush_one_by_one()
485 static int flush_by_group(struct log_c *lc, struct list_head *flush_list, in flush_by_group() argument
513 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, in flush_by_group()
523 r = userspace_do_request(lc, lc->uuid, type, in flush_by_group()
532 r = flush_one_by_one(lc, flush_list); in flush_by_group()
568 struct log_c *lc = log->context; in userspace_flush() local
574 mempool_t *flush_entry_pool = lc->flush_entry_pool; in userspace_flush()
576 spin_lock_irqsave(&lc->flush_lock, flags); in userspace_flush()
577 list_splice_init(&lc->mark_list, &mark_list); in userspace_flush()
578 list_splice_init(&lc->clear_list, &clear_list); in userspace_flush()
579 spin_unlock_irqrestore(&lc->flush_lock, flags); in userspace_flush()
587 r = flush_by_group(lc, &clear_list, 0); in userspace_flush()
591 if (!lc->integrated_flush) { in userspace_flush()
592 r = flush_by_group(lc, &mark_list, 0); in userspace_flush()
595 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, in userspace_flush()
603 r = flush_by_group(lc, &mark_list, 1); in userspace_flush()
607 if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) { in userspace_flush()
612 queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ); in userspace_flush()
613 atomic_set(&lc->sched_flush, 1); in userspace_flush()
619 cancel_delayed_work(&lc->flush_log_work); in userspace_flush()
620 atomic_set(&lc->sched_flush, 0); in userspace_flush()
639 dm_table_event(lc->ti->table); in userspace_flush()
653 struct log_c *lc = log->context; in userspace_mark_region() local
657 fe = mempool_alloc(lc->flush_entry_pool, GFP_NOIO); in userspace_mark_region()
660 spin_lock_irqsave(&lc->flush_lock, flags); in userspace_mark_region()
663 list_add(&fe->list, &lc->mark_list); in userspace_mark_region()
664 spin_unlock_irqrestore(&lc->flush_lock, flags); in userspace_mark_region()
682 struct log_c *lc = log->context; in userspace_clear_region() local
691 fe = mempool_alloc(lc->flush_entry_pool, GFP_ATOMIC); in userspace_clear_region()
697 spin_lock_irqsave(&lc->flush_lock, flags); in userspace_clear_region()
700 list_add(&fe->list, &lc->clear_list); in userspace_clear_region()
701 spin_unlock_irqrestore(&lc->flush_lock, flags); in userspace_clear_region()
718 struct log_c *lc = log->context; in userspace_get_resync_work() local
724 if (lc->in_sync_hint >= lc->region_count) in userspace_get_resync_work()
728 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK, in userspace_get_resync_work()
744 struct log_c *lc = log->context; in userspace_set_region_sync() local
753 (void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC, in userspace_set_region_sync()
776 struct log_c *lc = log->context; in userspace_get_sync_count() local
779 r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT, in userspace_get_sync_count()
785 if (sync_count >= lc->region_count) in userspace_get_sync_count()
786 lc->in_sync_hint = lc->region_count; in userspace_get_sync_count()
802 struct log_c *lc = log->context; in userspace_status() local
806 r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO, in userspace_status()
816 table_args = strchr(lc->usr_argv_str, ' '); in userspace_status()
820 DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid); in userspace_status()
821 if (lc->integrated_flush) in userspace_status()
839 struct log_c *lc = log->context; in userspace_is_remote_recovering() local
854 if (region < lc->in_sync_hint) in userspace_is_remote_recovering()
860 r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING, in userspace_is_remote_recovering()
866 lc->in_sync_hint = pkg.in_sync_hint; in userspace_is_remote_recovering()