Lines Matching refs:rmapp

1041 	unsigned long *rmapp;  in rmap_add()  local
1045 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_add()
1046 return pte_list_add(vcpu, spte, rmapp); in rmap_add()
1053 unsigned long *rmapp; in rmap_remove() local
1057 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); in rmap_remove()
1058 pte_list_remove(spte, rmapp); in rmap_remove()
1178 static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, in __rmap_write_protect() argument
1185 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in __rmap_write_protect()
1206 static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp) in __rmap_clear_dirty() argument
1212 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in __rmap_clear_dirty()
1233 static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp) in __rmap_set_dirty() argument
1239 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in __rmap_set_dirty()
1263 unsigned long *rmapp; in kvm_mmu_write_protect_pt_masked() local
1266 rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked()
1268 __rmap_write_protect(kvm, rmapp, false); in kvm_mmu_write_protect_pt_masked()
1288 unsigned long *rmapp; in kvm_mmu_clear_dirty_pt_masked() local
1291 rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked()
1293 __rmap_clear_dirty(kvm, rmapp); in kvm_mmu_clear_dirty_pt_masked()
1325 unsigned long *rmapp; in rmap_write_protect() local
1333 rmapp = __gfn_to_rmap(gfn, i, slot); in rmap_write_protect()
1334 write_protected |= __rmap_write_protect(kvm, rmapp, true); in rmap_write_protect()
1340 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_unmap_rmapp() argument
1348 while ((sptep = rmap_get_first(*rmapp, &iter))) { in kvm_unmap_rmapp()
1360 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_set_pte_rmapp() argument
1374 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in kvm_set_pte_rmapp()
1383 sptep = rmap_get_first(*rmapp, &iter); in kvm_set_pte_rmapp()
1409 unsigned long *rmapp, in kvm_handle_hva_range() argument
1441 unsigned long *rmapp; in kvm_handle_hva_range() local
1451 rmapp = __gfn_to_rmap(gfn_start, j, memslot); in kvm_handle_hva_range()
1455 ret |= handler(kvm, rmapp++, memslot, in kvm_handle_hva_range()
1465 int (*handler)(struct kvm *kvm, unsigned long *rmapp, in kvm_handle_hva() argument
1488 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_age_rmapp() argument
1498 for (sptep = rmap_get_first(*rmapp, &iter); sptep; in kvm_age_rmapp()
1512 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_test_age_rmapp() argument
1528 for (sptep = rmap_get_first(*rmapp, &iter); sptep; in kvm_test_age_rmapp()
1545 unsigned long *rmapp; in rmap_recycle() local
1550 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_recycle()
1552 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
4391 unsigned long *rmapp; in kvm_mmu_slot_remove_write_access() local
4394 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; in kvm_mmu_slot_remove_write_access()
4397 for (index = 0; index <= last_index; ++index, ++rmapp) { in kvm_mmu_slot_remove_write_access()
4398 if (*rmapp) in kvm_mmu_slot_remove_write_access()
4399 flush |= __rmap_write_protect(kvm, rmapp, in kvm_mmu_slot_remove_write_access()
4432 unsigned long *rmapp) in kvm_mmu_zap_collapsible_spte() argument
4440 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in kvm_mmu_zap_collapsible_spte()
4457 sptep = rmap_get_first(*rmapp, &iter); in kvm_mmu_zap_collapsible_spte()
4470 unsigned long *rmapp; in kvm_mmu_zap_collapsible_sptes() local
4475 rmapp = memslot->arch.rmap[0]; in kvm_mmu_zap_collapsible_sptes()
4479 for (index = 0; index <= last_index; ++index, ++rmapp) { in kvm_mmu_zap_collapsible_sptes()
4480 if (*rmapp) in kvm_mmu_zap_collapsible_sptes()
4481 flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp); in kvm_mmu_zap_collapsible_sptes()
4502 unsigned long *rmapp; in kvm_mmu_slot_leaf_clear_dirty() local
4510 rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1]; in kvm_mmu_slot_leaf_clear_dirty()
4514 for (index = 0; index <= last_index; ++index, ++rmapp) { in kvm_mmu_slot_leaf_clear_dirty()
4515 if (*rmapp) in kvm_mmu_slot_leaf_clear_dirty()
4516 flush |= __rmap_clear_dirty(kvm, rmapp); in kvm_mmu_slot_leaf_clear_dirty()
4550 unsigned long *rmapp; in kvm_mmu_slot_largepage_remove_write_access() local
4553 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; in kvm_mmu_slot_largepage_remove_write_access()
4556 for (index = 0; index <= last_index; ++index, ++rmapp) { in kvm_mmu_slot_largepage_remove_write_access()
4557 if (*rmapp) in kvm_mmu_slot_largepage_remove_write_access()
4558 flush |= __rmap_write_protect(kvm, rmapp, in kvm_mmu_slot_largepage_remove_write_access()
4588 unsigned long *rmapp; in kvm_mmu_slot_set_dirty() local
4591 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; in kvm_mmu_slot_set_dirty()
4594 for (index = 0; index <= last_index; ++index, ++rmapp) { in kvm_mmu_slot_set_dirty()
4595 if (*rmapp) in kvm_mmu_slot_set_dirty()
4596 flush |= __rmap_set_dirty(kvm, rmapp); in kvm_mmu_slot_set_dirty()