pmc_idx           235 arch/x86/kvm/pmu.c void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
pmc_idx           237 arch/x86/kvm/pmu.c 	struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
pmc_idx           245 arch/x86/kvm/pmu.c 		int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
pmc_idx           279 arch/x86/kvm/pmu.c bool is_vmware_backdoor_pmc(u32 pmc_idx)
pmc_idx           281 arch/x86/kvm/pmu.c 	switch (pmc_idx) {
pmc_idx            29 arch/x86/kvm/pmu.h 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
pmc_idx           118 arch/x86/kvm/pmu.h void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
pmc_idx           133 arch/x86/kvm/pmu.h bool is_vmware_backdoor_pmc(u32 pmc_idx);
pmc_idx           160 arch/x86/kvm/pmu_amd.c static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
pmc_idx           170 arch/x86/kvm/pmu_amd.c 		pmc_idx *= 2;
pmc_idx           173 arch/x86/kvm/pmu_amd.c 	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
pmc_idx           105 arch/x86/kvm/vmx/pmu_intel.c static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
pmc_idx           107 arch/x86/kvm/vmx/pmu_intel.c 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
pmc_idx           108 arch/x86/kvm/vmx/pmu_intel.c 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
pmc_idx           111 arch/x86/kvm/vmx/pmu_intel.c 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;