root_level        399 arch/x86/include/asm/kvm_host.h 	u8 root_level;
root_level       2572 arch/x86/kvm/mmu.c 	    && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
root_level       2649 arch/x86/kvm/mmu.c 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
root_level       3746 arch/x86/kvm/mmu.c 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
root_level       3835 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
root_level       3867 arch/x86/kvm/mmu.c 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
root_level       3944 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
root_level       4348 arch/x86/kvm/mmu.c 	context->root_level = 0;
root_level       4399 arch/x86/kvm/mmu.c 	    mmu->root_level >= PT64_ROOT_4LEVEL) {
root_level       4607 arch/x86/kvm/mmu.c 				cpuid_maxphyaddr(vcpu), context->root_level,
root_level       4902 arch/x86/kvm/mmu.c 	unsigned root_level = mmu->root_level;
root_level       4904 arch/x86/kvm/mmu.c 	mmu->last_nonleaf_level = root_level;
root_level       4905 arch/x86/kvm/mmu.c 	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
root_level       4914 arch/x86/kvm/mmu.c 	context->root_level = level;
root_level       4934 arch/x86/kvm/mmu.c 	int root_level = is_la57_mode(vcpu) ?
root_level       4937 arch/x86/kvm/mmu.c 	paging64_init_context_common(vcpu, context, root_level);
root_level       4944 arch/x86/kvm/mmu.c 	context->root_level = PT32_ROOT_LEVEL;
root_level       5041 arch/x86/kvm/mmu.c 		context->root_level = 0;
root_level       5044 arch/x86/kvm/mmu.c 		context->root_level = is_la57_mode(vcpu) ?
root_level       5050 arch/x86/kvm/mmu.c 		context->root_level = PT32E_ROOT_LEVEL;
root_level       5055 arch/x86/kvm/mmu.c 		context->root_level = PT32_ROOT_LEVEL;
root_level       5164 arch/x86/kvm/mmu.c 	context->root_level = PT64_ROOT_4LEVEL;
root_level       5211 arch/x86/kvm/mmu.c 		g_context->root_level = 0;
root_level       5215 arch/x86/kvm/mmu.c 		g_context->root_level = is_la57_mode(vcpu) ?
root_level       5221 arch/x86/kvm/mmu.c 		g_context->root_level = PT32E_ROOT_LEVEL;
root_level       5226 arch/x86/kvm/mmu.c 		g_context->root_level = PT32_ROOT_LEVEL;
root_level         62 arch/x86/kvm/mmu_audit.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
root_level         66 arch/x86/kvm/mmu_audit.c 		__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
root_level        122 arch/x86/kvm/mmu_audit.c 			     "ent %llxn", vcpu->arch.mmu->root_level, pfn,
root_level        320 arch/x86/kvm/paging_tmpl.h 	walker->level = mmu->root_level;
root_level        628 arch/x86/kvm/paging_tmpl.h 	top_level = vcpu->arch.mmu->root_level;
root_level        129 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 					       adev->vm_manager.root_level);
root_level        131 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (level == adev->vm_manager.root_level)
root_level        154 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
root_level        170 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (level <= adev->vm_manager.root_level)
root_level        371 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	cursor->level = adev->vm_manager.root_level;
root_level        707 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	unsigned level = adev->vm_manager.root_level;
root_level       1202 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	level += params->adev->vm_manager.root_level;
root_level       1407 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		if (cursor.level == adev->vm_manager.root_level) {
root_level       1432 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			   cursor.level - 1 != adev->vm_manager.root_level) {
root_level       2615 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
root_level       2618 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
root_level       2621 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
root_level       2720 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
root_level       2790 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	enum amdgpu_vm_level root = adev->vm_manager.root_level;
root_level        306 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 	enum amdgpu_vm_level			root_level;
root_level        513 fs/btrfs/backref.c 	int root_level;
root_level        537 fs/btrfs/backref.c 		root_level = btrfs_header_level(root->commit_root);
root_level        539 fs/btrfs/backref.c 		root_level = btrfs_header_level(root->node);
root_level        541 fs/btrfs/backref.c 		root_level = btrfs_old_root_level(root, time_seq);
root_level        543 fs/btrfs/backref.c 	if (root_level + 1 == level) {
root_level        249 fs/btrfs/ctree.h 	u8 root_level;
root_level       1949 fs/btrfs/ctree.h BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
root_level       2123 fs/btrfs/ctree.h 			 root_level, 8);
root_level       1692 fs/btrfs/qgroup.c static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
root_level       1698 fs/btrfs/qgroup.c 	if (root_level == 0)
root_level       1701 fs/btrfs/qgroup.c 	while (level <= root_level) {
root_level       1712 fs/btrfs/qgroup.c 			if (level != root_level) {
root_level       1732 fs/btrfs/qgroup.c 	eb = path->nodes[root_level];
root_level       1733 fs/btrfs/qgroup.c 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
root_level       1788 fs/btrfs/qgroup.c 				    int dst_level, int root_level,
root_level       1795 fs/btrfs/qgroup.c 	int cur_level = root_level;
root_level       1798 fs/btrfs/qgroup.c 	BUG_ON(dst_level > root_level);
root_level       1800 fs/btrfs/qgroup.c 	if (btrfs_header_level(src_eb) != root_level)
root_level       1816 fs/btrfs/qgroup.c 	src_path->nodes[root_level] = src_eb;
root_level       1817 fs/btrfs/qgroup.c 	src_path->slots[root_level] = dst_path->slots[root_level];
root_level       1818 fs/btrfs/qgroup.c 	src_path->locks[root_level] = 0;
root_level       1927 fs/btrfs/qgroup.c 					   int cur_level, int root_level,
root_level       1938 fs/btrfs/qgroup.c 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
root_level       1939 fs/btrfs/qgroup.c 	    root_level < cur_level) {
root_level       1942 fs/btrfs/qgroup.c 			__func__, cur_level, root_level);
root_level       1957 fs/btrfs/qgroup.c 		if (cur_level == root_level) {
root_level       1960 fs/btrfs/qgroup.c 				__func__, root_level, root_level, cur_level);
root_level       2000 fs/btrfs/qgroup.c 				       root_level, trace_leaf);
root_level       2016 fs/btrfs/qgroup.c 					dst_path, cur_level - 1, root_level,
root_level       2092 fs/btrfs/qgroup.c 			       u64 root_gen, int root_level)
root_level       2100 fs/btrfs/qgroup.c 	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
root_level       2107 fs/btrfs/qgroup.c 		ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
root_level       2112 fs/btrfs/qgroup.c 	if (root_level == 0) {
root_level       2131 fs/btrfs/qgroup.c 	path->nodes[root_level] = root_eb;
root_level       2132 fs/btrfs/qgroup.c 	path->slots[root_level] = 0;
root_level       2133 fs/btrfs/qgroup.c 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
root_level       2135 fs/btrfs/qgroup.c 	level = root_level;
root_level       2185 fs/btrfs/qgroup.c 			ret = adjust_slots_upwards(path, root_level);
root_level        329 fs/btrfs/qgroup.h 			       u64 root_gen, int root_level);
root_level       6571 fs/btrfs/send.c 				    int *level, int root_level)
root_level       6580 fs/btrfs/send.c 		if (*level == root_level)
root_level       6601 fs/btrfs/send.c 			int *level, int root_level,
root_level       6608 fs/btrfs/send.c 		ret = tree_move_next_or_upnext(path, level, root_level);
root_level       1703 fs/btrfs/transaction.c 	super->root_level = root_item->level;