Lines Matching refs:u64
225 u64 *spt;
265 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
278 u64 *spte, const void *pte);
292 u64 *pae_root;
293 u64 *lm_root;
294 u64 rsvd_bits_mask[2][4];
295 u64 bad_mt_xwr;
306 u64 pdptrs[4]; /* pae */
317 u64 counter;
318 u64 eventsel;
327 u64 fixed_ctr_ctrl;
328 u64 global_ctrl;
329 u64 global_status;
330 u64 global_ovf_ctrl;
331 u64 counter_bitmask[2];
332 u64 global_ctrl_mask;
333 u64 reserved_bits;
338 u64 reprogram_pmi;
364 u64 efer;
365 u64 apic_base;
370 u64 ia32_misc_enable_msr;
372 u64 ia32_xss;
405 u64 xcr0;
406 u64 guest_supported_xcr0;
451 u64 msr_val;
452 u64 last_steal;
453 u64 accum_steal;
458 u64 last_guest_tsc;
459 u64 last_host_tsc;
460 u64 tsc_offset_adjustment;
461 u64 this_tsc_nsec;
462 u64 this_tsc_write;
463 u64 this_tsc_generation;
476 u64 pat;
485 u64 mcg_cap;
486 u64 mcg_status;
487 u64 mcg_ctl;
488 u64 *mce_banks;
491 u64 mmio_gva;
494 u64 mmio_gen;
502 u64 hv_vapic;
513 u64 msr_val;
520 u64 length;
521 u64 status;
525 u64 msr_val;
609 u64 last_tsc_nsec;
610 u64 last_tsc_write;
612 u64 cur_tsc_nsec;
613 u64 cur_tsc_write;
614 u64 cur_tsc_offset;
615 u64 cur_tsc_generation;
620 u64 master_kernel_ns;
631 u64 hv_guest_os_id;
632 u64 hv_hypercall;
633 u64 hv_tsc_page;
687 u64 data;
721 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
723 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
736 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
741 u64 (*get_dr6)(struct kvm_vcpu *vcpu);
776 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
783 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
796 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
797 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
799 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
800 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
802 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
867 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
868 u64 dirty_mask, u64 nx_mask, u64 x_mask);
910 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
939 void kvm_enable_efer_bits(u64);
940 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
941 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
968 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
970 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
1064 u64 value; in read_msr()
1081 static inline u64 get_canonical(u64 la) in get_canonical()
1086 static inline bool is_noncanonical_address(u64 la) in is_noncanonical_address()
1155 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1178 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
1181 int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);