Lines Matching refs:hvc

53 static int do_ops(struct host_vm_change *hvc, int end,  in do_ops()  argument
60 op = &hvc->ops[i]; in do_ops()
63 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len, in do_ops()
65 op->u.mmap.offset, finished, &hvc->data); in do_ops()
68 ret = unmap(hvc->id, op->u.munmap.addr, in do_ops()
69 op->u.munmap.len, finished, &hvc->data); in do_ops()
72 ret = protect(hvc->id, op->u.mprotect.addr, in do_ops()
74 finished, &hvc->data); in do_ops()
88 unsigned int prot, struct host_vm_change *hvc) in add_mmap() argument
95 if (hvc->index != 0) { in add_mmap()
96 last = &hvc->ops[hvc->index - 1]; in add_mmap()
106 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mmap()
107 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mmap()
108 hvc->index = 0; in add_mmap()
111 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mmap()
123 struct host_vm_change *hvc) in add_munmap() argument
131 if (hvc->index != 0) { in add_munmap()
132 last = &hvc->ops[hvc->index - 1]; in add_munmap()
140 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_munmap()
141 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_munmap()
142 hvc->index = 0; in add_munmap()
145 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_munmap()
153 unsigned int prot, struct host_vm_change *hvc) in add_mprotect() argument
158 if (hvc->index != 0) { in add_mprotect()
159 last = &hvc->ops[hvc->index - 1]; in add_mprotect()
168 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mprotect()
169 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mprotect()
170 hvc->index = 0; in add_mprotect()
173 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mprotect()
185 struct host_vm_change *hvc) in update_pte_range() argument
206 if (hvc->force || pte_newpage(*pte)) { in update_pte_range()
209 PAGE_SIZE, prot, hvc); in update_pte_range()
211 ret = add_munmap(addr, PAGE_SIZE, hvc); in update_pte_range()
213 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); in update_pte_range()
221 struct host_vm_change *hvc) in update_pmd_range() argument
231 if (hvc->force || pmd_newpage(*pmd)) { in update_pmd_range()
232 ret = add_munmap(addr, next - addr, hvc); in update_pmd_range()
236 else ret = update_pte_range(pmd, addr, next, hvc); in update_pmd_range()
243 struct host_vm_change *hvc) in update_pud_range() argument
253 if (hvc->force || pud_newpage(*pud)) { in update_pud_range()
254 ret = add_munmap(addr, next - addr, hvc); in update_pud_range()
258 else ret = update_pmd_range(pud, addr, next, hvc); in update_pud_range()
267 struct host_vm_change hvc; in fix_range_common() local
271 hvc = INIT_HVC(mm, force); in fix_range_common()
277 ret = add_munmap(addr, next - addr, &hvc); in fix_range_common()
281 else ret = update_pud_range(pgd, addr, next, &hvc); in fix_range_common()
285 ret = do_ops(&hvc, hvc.index, 1); in fix_range_common()