Lines Matching refs:pgd

418 __visible pgdval_t xen_pgd_val(pgd_t pgd)  in xen_pgd_val()  argument
420 return pte_mfn_to_pfn(pgd.pgd); in xen_pgd_val()
432 __visible pgd_t xen_make_pgd(pgdval_t pgd) in xen_make_pgd() argument
434 pgd = pte_pfn_to_mfn(pgd); in xen_make_pgd()
435 return native_make_pgd(pgd); in xen_make_pgd()
520 static pgd_t *xen_get_user_pgd(pgd_t *pgd) in xen_get_user_pgd() argument
522 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); in xen_get_user_pgd()
523 unsigned offset = pgd - pgd_page; in xen_get_user_pgd()
609 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, in __xen_pgd_walk() argument
652 if (!pgd_val(pgd[pgdidx])) in __xen_pgd_walk()
655 pud = pud_offset(&pgd[pgdidx], 0); in __xen_pgd_walk()
695 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); in __xen_pgd_walk()
705 return __xen_pgd_walk(mm, mm->pgd, func, limit); in xen_pgd_walk()
801 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_pin() argument
803 trace_xen_mmu_pgd_pin(mm, pgd); in __xen_pgd_pin()
807 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { in __xen_pgd_pin()
818 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_pin()
820 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin()
831 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), in __xen_pgd_pin()
834 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin()
841 __xen_pgd_pin(mm, mm->pgd); in xen_pgd_pin()
928 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_unpin() argument
930 trace_xen_mmu_pgd_unpin(mm, pgd); in __xen_pgd_unpin()
934 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_unpin()
938 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_unpin()
950 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), in __xen_pgd_unpin()
954 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); in __xen_pgd_unpin()
961 __xen_pgd_unpin(mm, mm->pgd); in xen_pgd_unpin()
1015 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) in drop_other_mm_ref()
1035 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) in xen_drop_mm_ref()
1049 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) in xen_drop_mm_ref()
1088 if (xen_page_pinned(mm->pgd)) in xen_exit_mmap()
1409 pgd_t *pgd = mm->pgd; in xen_pgd_alloc() local
1412 BUG_ON(PagePinned(virt_to_page(pgd))); in xen_pgd_alloc()
1416 struct page *page = virt_to_page(pgd); in xen_pgd_alloc()
1434 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); in xen_pgd_alloc()
1441 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) in xen_pgd_free() argument
1444 pgd_t *user_pgd = xen_get_user_pgd(pgd); in xen_pgd_free()
1562 bool pinned = PagePinned(virt_to_page(mm->pgd)); in xen_alloc_ptpage()
1806 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) in xen_setup_kernel_pagetable() argument
1842 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable()
1845 addr[0] = (unsigned long)pgd; in xen_setup_kernel_pagetable()
1874 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in xen_setup_kernel_pagetable()
1941 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) in xen_setup_kernel_pagetable() argument
1952 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); in xen_setup_kernel_pagetable()
1957 copy_page(initial_page_table, pgd); in xen_setup_kernel_pagetable()
1965 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in xen_setup_kernel_pagetable()
2409 a.gpa = __pa(mm->pgd); in xen_hvm_exit_mmap()