Lines Matching refs:dm_device

552 static struct hv_dynmem_device dm_device;  variable
564 mutex_lock(&dm_device.ha_region_mutex); in hv_memory_notifier()
568 dm_device.num_pages_onlined += mem->nr_pages; in hv_memory_notifier()
570 mutex_unlock(&dm_device.ha_region_mutex); in hv_memory_notifier()
571 if (dm_device.ha_waiting) { in hv_memory_notifier()
572 dm_device.ha_waiting = false; in hv_memory_notifier()
573 complete(&dm_device.ol_waitevent); in hv_memory_notifier()
578 mutex_lock(&dm_device.ha_region_mutex); in hv_memory_notifier()
579 dm_device.num_pages_onlined -= mem->nr_pages; in hv_memory_notifier()
580 mutex_unlock(&dm_device.ha_region_mutex); in hv_memory_notifier()
632 init_completion(&dm_device.ol_waitevent); in hv_mem_hot_add()
633 dm_device.ha_waiting = true; in hv_mem_hot_add()
635 mutex_unlock(&dm_device.ha_region_mutex); in hv_mem_hot_add()
654 mutex_lock(&dm_device.ha_region_mutex); in hv_mem_hot_add()
664 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); in hv_mem_hot_add()
665 mutex_lock(&dm_device.ha_region_mutex); in hv_mem_hot_add()
666 post_status(&dm_device); in hv_mem_hot_add()
679 list_for_each(cur, &dm_device.ha_region_list) { in hv_online_page()
703 if (list_empty(&dm_device.ha_region_list)) in pfn_covered()
706 list_for_each(cur, &dm_device.ha_region_list) { in pfn_covered()
757 if (list_empty(&dm_device.ha_region_list)) in handle_pg_range()
760 list_for_each(cur, &dm_device.ha_region_list) { in handle_pg_range()
835 if (!dm_device.host_specified_ha_region) in process_hot_add()
850 list_add_tail(&ha_region->list, &dm_device.ha_region_list); in process_hot_add()
874 struct hv_dynmem_device *dm = &dm_device; in hot_add_req()
881 mutex_lock(&dm_device.ha_region_mutex); in hot_add_req()
915 mutex_unlock(&dm_device.ha_region_mutex); in hot_add_req()
1136 unsigned int num_pages = dm_device.balloon_wrk.num_pages; in balloon_up()
1173 num_ballooned = alloc_balloon_pages(&dm_device, num_pages, in balloon_up()
1184 dm_device.state = DM_INITIALIZED; in balloon_up()
1195 ret = vmbus_sendpacket(dm_device.dev->channel, in balloon_up()
1203 post_status(&dm_device); in balloon_up()
1213 free_balloon_pages(&dm_device, in balloon_up()
1232 complete(&dm_device.config_event); in balloon_down()
1243 vmbus_sendpacket(dm_device.dev->channel, &resp, in balloon_down()
1259 &dm_device.config_event, 1*HZ); in dm_thread_func()
1264 reinit_completion(&dm_device.config_event); in dm_thread_func()
1367 dm_device.balloon_wrk.num_pages = bal_msg->num_pages; in balloon_onchannelcallback()
1368 schedule_work(&dm_device.balloon_wrk.wrk); in balloon_onchannelcallback()
1402 schedule_work(&dm_device.ha_wrk.wrk); in balloon_onchannelcallback()
1441 dm_device.dev = dev; in balloon_probe()
1442 dm_device.state = DM_INITIALIZING; in balloon_probe()
1443 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7; in balloon_probe()
1444 init_completion(&dm_device.host_event); in balloon_probe()
1445 init_completion(&dm_device.config_event); in balloon_probe()
1446 INIT_LIST_HEAD(&dm_device.ha_region_list); in balloon_probe()
1447 mutex_init(&dm_device.ha_region_mutex); in balloon_probe()
1448 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); in balloon_probe()
1449 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); in balloon_probe()
1450 dm_device.host_specified_ha_region = false; in balloon_probe()
1452 dm_device.thread = in balloon_probe()
1453 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); in balloon_probe()
1454 if (IS_ERR(dm_device.thread)) { in balloon_probe()
1455 ret = PTR_ERR(dm_device.thread); in balloon_probe()
1464 hv_set_drvdata(dev, &dm_device); in balloon_probe()
1485 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); in balloon_probe()
1495 if (dm_device.state == DM_INIT_ERROR) { in balloon_probe()
1531 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); in balloon_probe()
1541 if (dm_device.state == DM_INIT_ERROR) { in balloon_probe()
1546 dm_device.state = DM_INITIALIZED; in balloon_probe()
1554 kthread_stop(dm_device.thread); in balloon_probe()