root/virt/kvm/arm/arch_timer.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kvm_phys_timer_read
  2. get_timer_map
  3. userspace_irqchip
  4. soft_timer_start
  5. soft_timer_cancel
  6. kvm_arch_timer_handler
  7. kvm_timer_compute_delta
  8. kvm_timer_irq_can_fire
  9. kvm_timer_earliest_exp
  10. kvm_bg_timer_expire
  11. kvm_hrtimer_expire
  12. kvm_timer_should_fire
  13. kvm_timer_is_pending
  14. kvm_timer_update_run
  15. kvm_timer_update_irq
  16. timer_emulate
  17. timer_save_state
  18. kvm_timer_blocking
  19. kvm_timer_unblocking
  20. timer_restore_state
  21. set_cntvoff
  22. set_timer_irq_phys_active
  23. kvm_timer_vcpu_load_gic
  24. kvm_timer_vcpu_load_nogic
  25. kvm_timer_vcpu_load
  26. kvm_timer_should_notify_user
  27. kvm_timer_vcpu_put
  28. unmask_vtimer_irq_user
  29. kvm_timer_sync_hwstate
  30. kvm_timer_vcpu_reset
  31. update_vtimer_cntvoff
  32. kvm_timer_vcpu_init
  33. kvm_timer_init_interrupt
  34. kvm_arm_timer_set_reg
  35. read_timer_ctl
  36. kvm_arm_timer_get_reg
  37. kvm_arm_timer_read
  38. kvm_arm_timer_read_sysreg
  39. kvm_arm_timer_write
  40. kvm_arm_timer_write_sysreg
  41. kvm_timer_starting_cpu
  42. kvm_timer_dying_cpu
  43. kvm_timer_hyp_init
  44. kvm_timer_vcpu_terminate
  45. timer_irqs_are_valid
  46. kvm_arch_timer_get_input_level
  47. kvm_timer_enable
  48. kvm_timer_init_vhe
  49. set_timer_irqs
  50. kvm_arm_timer_set_attr
  51. kvm_arm_timer_get_attr
  52. kvm_arm_timer_has_attr

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2012 ARM Ltd.
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  */
   6 
   7 #include <linux/cpu.h>
   8 #include <linux/kvm.h>
   9 #include <linux/kvm_host.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/irq.h>
  12 #include <linux/uaccess.h>
  13 
  14 #include <clocksource/arm_arch_timer.h>
  15 #include <asm/arch_timer.h>
  16 #include <asm/kvm_emulate.h>
  17 #include <asm/kvm_hyp.h>
  18 
  19 #include <kvm/arm_vgic.h>
  20 #include <kvm/arm_arch_timer.h>
  21 
  22 #include "trace.h"
  23 
  24 static struct timecounter *timecounter;
  25 static unsigned int host_vtimer_irq;
  26 static unsigned int host_ptimer_irq;
  27 static u32 host_vtimer_irq_flags;
  28 static u32 host_ptimer_irq_flags;
  29 
  30 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
  31 
  32 static const struct kvm_irq_level default_ptimer_irq = {
  33         .irq    = 30,
  34         .level  = 1,
  35 };
  36 
  37 static const struct kvm_irq_level default_vtimer_irq = {
  38         .irq    = 27,
  39         .level  = 1,
  40 };
  41 
  42 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
  43 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
  44                                  struct arch_timer_context *timer_ctx);
  45 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
  46 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
  47                                 struct arch_timer_context *timer,
  48                                 enum kvm_arch_timer_regs treg,
  49                                 u64 val);
  50 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
  51                               struct arch_timer_context *timer,
  52                               enum kvm_arch_timer_regs treg);
  53 
  54 u64 kvm_phys_timer_read(void)
  55 {
  56         return timecounter->cc->read(timecounter->cc);
  57 }
  58 
  59 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
  60 {
  61         if (has_vhe()) {
  62                 map->direct_vtimer = vcpu_vtimer(vcpu);
  63                 map->direct_ptimer = vcpu_ptimer(vcpu);
  64                 map->emul_ptimer = NULL;
  65         } else {
  66                 map->direct_vtimer = vcpu_vtimer(vcpu);
  67                 map->direct_ptimer = NULL;
  68                 map->emul_ptimer = vcpu_ptimer(vcpu);
  69         }
  70 
  71         trace_kvm_get_timer_map(vcpu->vcpu_id, map);
  72 }
  73 
  74 static inline bool userspace_irqchip(struct kvm *kvm)
  75 {
  76         return static_branch_unlikely(&userspace_irqchip_in_use) &&
  77                 unlikely(!irqchip_in_kernel(kvm));
  78 }
  79 
  80 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
  81 {
  82         hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
  83                       HRTIMER_MODE_ABS);
  84 }
  85 
  86 static void soft_timer_cancel(struct hrtimer *hrt)
  87 {
  88         hrtimer_cancel(hrt);
  89 }
  90 
  91 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
  92 {
  93         struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
  94         struct arch_timer_context *ctx;
  95         struct timer_map map;
  96 
  97         /*
  98          * We may see a timer interrupt after vcpu_put() has been called which
  99          * sets the CPU's vcpu pointer to NULL, because even though the timer
 100          * has been disabled in timer_save_state(), the hardware interrupt
 101          * signal may not have been retired from the interrupt controller yet.
 102          */
 103         if (!vcpu)
 104                 return IRQ_HANDLED;
 105 
 106         get_timer_map(vcpu, &map);
 107 
 108         if (irq == host_vtimer_irq)
 109                 ctx = map.direct_vtimer;
 110         else
 111                 ctx = map.direct_ptimer;
 112 
 113         if (kvm_timer_should_fire(ctx))
 114                 kvm_timer_update_irq(vcpu, true, ctx);
 115 
 116         if (userspace_irqchip(vcpu->kvm) &&
 117             !static_branch_unlikely(&has_gic_active_state))
 118                 disable_percpu_irq(host_vtimer_irq);
 119 
 120         return IRQ_HANDLED;
 121 }
 122 
 123 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
 124 {
 125         u64 cval, now;
 126 
 127         cval = timer_ctx->cnt_cval;
 128         now = kvm_phys_timer_read() - timer_ctx->cntvoff;
 129 
 130         if (now < cval) {
 131                 u64 ns;
 132 
 133                 ns = cyclecounter_cyc2ns(timecounter->cc,
 134                                          cval - now,
 135                                          timecounter->mask,
 136                                          &timecounter->frac);
 137                 return ns;
 138         }
 139 
 140         return 0;
 141 }
 142 
 143 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
 144 {
 145         WARN_ON(timer_ctx && timer_ctx->loaded);
 146         return timer_ctx &&
 147                !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
 148                 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
 149 }
 150 
 151 /*
 152  * Returns the earliest expiration time in ns among guest timers.
 153  * Note that it will return 0 if none of timers can fire.
 154  */
 155 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
 156 {
 157         u64 min_delta = ULLONG_MAX;
 158         int i;
 159 
 160         for (i = 0; i < NR_KVM_TIMERS; i++) {
 161                 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
 162 
 163                 WARN(ctx->loaded, "timer %d loaded\n", i);
 164                 if (kvm_timer_irq_can_fire(ctx))
 165                         min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
 166         }
 167 
 168         /* If none of timers can fire, then return 0 */
 169         if (min_delta == ULLONG_MAX)
 170                 return 0;
 171 
 172         return min_delta;
 173 }
 174 
 175 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
 176 {
 177         struct arch_timer_cpu *timer;
 178         struct kvm_vcpu *vcpu;
 179         u64 ns;
 180 
 181         timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
 182         vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
 183 
 184         /*
 185          * Check that the timer has really expired from the guest's
 186          * PoV (NTP on the host may have forced it to expire
 187          * early). If we should have slept longer, restart it.
 188          */
 189         ns = kvm_timer_earliest_exp(vcpu);
 190         if (unlikely(ns)) {
 191                 hrtimer_forward_now(hrt, ns_to_ktime(ns));
 192                 return HRTIMER_RESTART;
 193         }
 194 
 195         kvm_vcpu_wake_up(vcpu);
 196         return HRTIMER_NORESTART;
 197 }
 198 
 199 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
 200 {
 201         struct arch_timer_context *ctx;
 202         struct kvm_vcpu *vcpu;
 203         u64 ns;
 204 
 205         ctx = container_of(hrt, struct arch_timer_context, hrtimer);
 206         vcpu = ctx->vcpu;
 207 
 208         trace_kvm_timer_hrtimer_expire(ctx);
 209 
 210         /*
 211          * Check that the timer has really expired from the guest's
 212          * PoV (NTP on the host may have forced it to expire
 213          * early). If not ready, schedule for a later time.
 214          */
 215         ns = kvm_timer_compute_delta(ctx);
 216         if (unlikely(ns)) {
 217                 hrtimer_forward_now(hrt, ns_to_ktime(ns));
 218                 return HRTIMER_RESTART;
 219         }
 220 
 221         kvm_timer_update_irq(vcpu, true, ctx);
 222         return HRTIMER_NORESTART;
 223 }
 224 
 225 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
 226 {
 227         enum kvm_arch_timers index;
 228         u64 cval, now;
 229 
 230         if (!timer_ctx)
 231                 return false;
 232 
 233         index = arch_timer_ctx_index(timer_ctx);
 234 
 235         if (timer_ctx->loaded) {
 236                 u32 cnt_ctl = 0;
 237 
 238                 switch (index) {
 239                 case TIMER_VTIMER:
 240                         cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
 241                         break;
 242                 case TIMER_PTIMER:
 243                         cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
 244                         break;
 245                 case NR_KVM_TIMERS:
 246                         /* GCC is braindead */
 247                         cnt_ctl = 0;
 248                         break;
 249                 }
 250 
 251                 return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
 252                         (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
 253                        !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
 254         }
 255 
 256         if (!kvm_timer_irq_can_fire(timer_ctx))
 257                 return false;
 258 
 259         cval = timer_ctx->cnt_cval;
 260         now = kvm_phys_timer_read() - timer_ctx->cntvoff;
 261 
 262         return cval <= now;
 263 }
 264 
 265 bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
 266 {
 267         struct timer_map map;
 268 
 269         get_timer_map(vcpu, &map);
 270 
 271         return kvm_timer_should_fire(map.direct_vtimer) ||
 272                kvm_timer_should_fire(map.direct_ptimer) ||
 273                kvm_timer_should_fire(map.emul_ptimer);
 274 }
 275 
 276 /*
 277  * Reflect the timer output level into the kvm_run structure
 278  */
 279 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
 280 {
 281         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 282         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
 283         struct kvm_sync_regs *regs = &vcpu->run->s.regs;
 284 
 285         /* Populate the device bitmap with the timer states */
 286         regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
 287                                     KVM_ARM_DEV_EL1_PTIMER);
 288         if (kvm_timer_should_fire(vtimer))
 289                 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
 290         if (kvm_timer_should_fire(ptimer))
 291                 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
 292 }
 293 
 294 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
 295                                  struct arch_timer_context *timer_ctx)
 296 {
 297         int ret;
 298 
 299         timer_ctx->irq.level = new_level;
 300         trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
 301                                    timer_ctx->irq.level);
 302 
 303         if (!userspace_irqchip(vcpu->kvm)) {
 304                 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
 305                                           timer_ctx->irq.irq,
 306                                           timer_ctx->irq.level,
 307                                           timer_ctx);
 308                 WARN_ON(ret);
 309         }
 310 }
 311 
 312 /* Only called for a fully emulated timer */
 313 static void timer_emulate(struct arch_timer_context *ctx)
 314 {
 315         bool should_fire = kvm_timer_should_fire(ctx);
 316 
 317         trace_kvm_timer_emulate(ctx, should_fire);
 318 
 319         if (should_fire != ctx->irq.level) {
 320                 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
 321                 return;
 322         }
 323 
 324         /*
 325          * If the timer can fire now, we don't need to have a soft timer
 326          * scheduled for the future.  If the timer cannot fire at all,
 327          * then we also don't need a soft timer.
 328          */
 329         if (!kvm_timer_irq_can_fire(ctx)) {
 330                 soft_timer_cancel(&ctx->hrtimer);
 331                 return;
 332         }
 333 
 334         soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
 335 }
 336 
 337 static void timer_save_state(struct arch_timer_context *ctx)
 338 {
 339         struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
 340         enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
 341         unsigned long flags;
 342 
 343         if (!timer->enabled)
 344                 return;
 345 
 346         local_irq_save(flags);
 347 
 348         if (!ctx->loaded)
 349                 goto out;
 350 
 351         switch (index) {
 352         case TIMER_VTIMER:
 353                 ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
 354                 ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL);
 355 
 356                 /* Disable the timer */
 357                 write_sysreg_el0(0, SYS_CNTV_CTL);
 358                 isb();
 359 
 360                 break;
 361         case TIMER_PTIMER:
 362                 ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
 363                 ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL);
 364 
 365                 /* Disable the timer */
 366                 write_sysreg_el0(0, SYS_CNTP_CTL);
 367                 isb();
 368 
 369                 break;
 370         case NR_KVM_TIMERS:
 371                 BUG();
 372         }
 373 
 374         trace_kvm_timer_save_state(ctx);
 375 
 376         ctx->loaded = false;
 377 out:
 378         local_irq_restore(flags);
 379 }
 380 
 381 /*
 382  * Schedule the background timer before calling kvm_vcpu_block, so that this
 383  * thread is removed from its waitqueue and made runnable when there's a timer
 384  * interrupt to handle.
 385  */
 386 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
 387 {
 388         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 389         struct timer_map map;
 390 
 391         get_timer_map(vcpu, &map);
 392 
 393         /*
 394          * If no timers are capable of raising interrupts (disabled or
 395          * masked), then there's no more work for us to do.
 396          */
 397         if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
 398             !kvm_timer_irq_can_fire(map.direct_ptimer) &&
 399             !kvm_timer_irq_can_fire(map.emul_ptimer))
 400                 return;
 401 
 402         /*
 403          * At least one guest time will expire. Schedule a background timer.
 404          * Set the earliest expiration time among the guest timers.
 405          */
 406         soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
 407 }
 408 
 409 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
 410 {
 411         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 412 
 413         soft_timer_cancel(&timer->bg_timer);
 414 }
 415 
 416 static void timer_restore_state(struct arch_timer_context *ctx)
 417 {
 418         struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
 419         enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
 420         unsigned long flags;
 421 
 422         if (!timer->enabled)
 423                 return;
 424 
 425         local_irq_save(flags);
 426 
 427         if (ctx->loaded)
 428                 goto out;
 429 
 430         switch (index) {
 431         case TIMER_VTIMER:
 432                 write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL);
 433                 isb();
 434                 write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL);
 435                 break;
 436         case TIMER_PTIMER:
 437                 write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL);
 438                 isb();
 439                 write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL);
 440                 break;
 441         case NR_KVM_TIMERS:
 442                 BUG();
 443         }
 444 
 445         trace_kvm_timer_restore_state(ctx);
 446 
 447         ctx->loaded = true;
 448 out:
 449         local_irq_restore(flags);
 450 }
 451 
 452 static void set_cntvoff(u64 cntvoff)
 453 {
 454         u32 low = lower_32_bits(cntvoff);
 455         u32 high = upper_32_bits(cntvoff);
 456 
 457         /*
 458          * Since kvm_call_hyp doesn't fully support the ARM PCS especially on
 459          * 32-bit systems, but rather passes register by register shifted one
 460          * place (we put the function address in r0/x0), we cannot simply pass
 461          * a 64-bit value as an argument, but have to split the value in two
 462          * 32-bit halves.
 463          */
 464         kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
 465 }
 466 
 467 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
 468 {
 469         int r;
 470         r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
 471         WARN_ON(r);
 472 }
 473 
 474 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
 475 {
 476         struct kvm_vcpu *vcpu = ctx->vcpu;
 477         bool phys_active = false;
 478 
 479         /*
 480          * Update the timer output so that it is likely to match the
 481          * state we're about to restore. If the timer expires between
 482          * this point and the register restoration, we'll take the
 483          * interrupt anyway.
 484          */
 485         kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
 486 
 487         if (irqchip_in_kernel(vcpu->kvm))
 488                 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
 489 
 490         phys_active |= ctx->irq.level;
 491 
 492         set_timer_irq_phys_active(ctx, phys_active);
 493 }
 494 
 495 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 496 {
 497         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 498 
 499         /*
 500          * Update the timer output so that it is likely to match the
 501          * state we're about to restore. If the timer expires between
 502          * this point and the register restoration, we'll take the
 503          * interrupt anyway.
 504          */
 505         kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
 506 
 507         /*
 508          * When using a userspace irqchip with the architected timers and a
 509          * host interrupt controller that doesn't support an active state, we
 510          * must still prevent continuously exiting from the guest, and
 511          * therefore mask the physical interrupt by disabling it on the host
 512          * interrupt controller when the virtual level is high, such that the
 513          * guest can make forward progress.  Once we detect the output level
 514          * being de-asserted, we unmask the interrupt again so that we exit
 515          * from the guest when the timer fires.
 516          */
 517         if (vtimer->irq.level)
 518                 disable_percpu_irq(host_vtimer_irq);
 519         else
 520                 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 521 }
 522 
 523 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
 524 {
 525         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 526         struct timer_map map;
 527 
 528         if (unlikely(!timer->enabled))
 529                 return;
 530 
 531         get_timer_map(vcpu, &map);
 532 
 533         if (static_branch_likely(&has_gic_active_state)) {
 534                 kvm_timer_vcpu_load_gic(map.direct_vtimer);
 535                 if (map.direct_ptimer)
 536                         kvm_timer_vcpu_load_gic(map.direct_ptimer);
 537         } else {
 538                 kvm_timer_vcpu_load_nogic(vcpu);
 539         }
 540 
 541         set_cntvoff(map.direct_vtimer->cntvoff);
 542 
 543         kvm_timer_unblocking(vcpu);
 544 
 545         timer_restore_state(map.direct_vtimer);
 546         if (map.direct_ptimer)
 547                 timer_restore_state(map.direct_ptimer);
 548 
 549         if (map.emul_ptimer)
 550                 timer_emulate(map.emul_ptimer);
 551 }
 552 
 553 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
 554 {
 555         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 556         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
 557         struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
 558         bool vlevel, plevel;
 559 
 560         if (likely(irqchip_in_kernel(vcpu->kvm)))
 561                 return false;
 562 
 563         vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
 564         plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
 565 
 566         return kvm_timer_should_fire(vtimer) != vlevel ||
 567                kvm_timer_should_fire(ptimer) != plevel;
 568 }
 569 
 570 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
 571 {
 572         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 573         struct timer_map map;
 574 
 575         if (unlikely(!timer->enabled))
 576                 return;
 577 
 578         get_timer_map(vcpu, &map);
 579 
 580         timer_save_state(map.direct_vtimer);
 581         if (map.direct_ptimer)
 582                 timer_save_state(map.direct_ptimer);
 583 
 584         /*
 585          * Cancel soft timer emulation, because the only case where we
 586          * need it after a vcpu_put is in the context of a sleeping VCPU, and
 587          * in that case we already factor in the deadline for the physical
 588          * timer when scheduling the bg_timer.
 589          *
 590          * In any case, we re-schedule the hrtimer for the physical timer when
 591          * coming back to the VCPU thread in kvm_timer_vcpu_load().
 592          */
 593         if (map.emul_ptimer)
 594                 soft_timer_cancel(&map.emul_ptimer->hrtimer);
 595 
 596         if (swait_active(kvm_arch_vcpu_wq(vcpu)))
 597                 kvm_timer_blocking(vcpu);
 598 
 599         /*
 600          * The kernel may decide to run userspace after calling vcpu_put, so
 601          * we reset cntvoff to 0 to ensure a consistent read between user
 602          * accesses to the virtual counter and kernel access to the physical
 603          * counter of non-VHE case. For VHE, the virtual counter uses a fixed
 604          * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
 605          */
 606         set_cntvoff(0);
 607 }
 608 
 609 /*
 610  * With a userspace irqchip we have to check if the guest de-asserted the
 611  * timer and if so, unmask the timer irq signal on the host interrupt
 612  * controller to ensure that we see future timer signals.
 613  */
 614 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
 615 {
 616         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 617 
 618         if (!kvm_timer_should_fire(vtimer)) {
 619                 kvm_timer_update_irq(vcpu, false, vtimer);
 620                 if (static_branch_likely(&has_gic_active_state))
 621                         set_timer_irq_phys_active(vtimer, false);
 622                 else
 623                         enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 624         }
 625 }
 626 
 627 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
 628 {
 629         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 630 
 631         if (unlikely(!timer->enabled))
 632                 return;
 633 
 634         if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
 635                 unmask_vtimer_irq_user(vcpu);
 636 }
 637 
 638 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
 639 {
 640         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 641         struct timer_map map;
 642 
 643         get_timer_map(vcpu, &map);
 644 
 645         /*
 646          * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
 647          * and to 0 for ARMv7.  We provide an implementation that always
 648          * resets the timer to be disabled and unmasked and is compliant with
 649          * the ARMv7 architecture.
 650          */
 651         vcpu_vtimer(vcpu)->cnt_ctl = 0;
 652         vcpu_ptimer(vcpu)->cnt_ctl = 0;
 653 
 654         if (timer->enabled) {
 655                 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
 656                 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
 657 
 658                 if (irqchip_in_kernel(vcpu->kvm)) {
 659                         kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
 660                         if (map.direct_ptimer)
 661                                 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
 662                 }
 663         }
 664 
 665         if (map.emul_ptimer)
 666                 soft_timer_cancel(&map.emul_ptimer->hrtimer);
 667 
 668         return 0;
 669 }
 670 
 671 /* Make the updates of cntvoff for all vtimer contexts atomic */
 672 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
 673 {
 674         int i;
 675         struct kvm *kvm = vcpu->kvm;
 676         struct kvm_vcpu *tmp;
 677 
 678         mutex_lock(&kvm->lock);
 679         kvm_for_each_vcpu(i, tmp, kvm)
 680                 vcpu_vtimer(tmp)->cntvoff = cntvoff;
 681 
 682         /*
 683          * When called from the vcpu create path, the CPU being created is not
 684          * included in the loop above, so we just set it here as well.
 685          */
 686         vcpu_vtimer(vcpu)->cntvoff = cntvoff;
 687         mutex_unlock(&kvm->lock);
 688 }
 689 
 690 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 691 {
 692         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 693         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 694         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
 695 
 696         /* Synchronize cntvoff across all vtimers of a VM. */
 697         update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
 698         ptimer->cntvoff = 0;
 699 
 700         hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 701         timer->bg_timer.function = kvm_bg_timer_expire;
 702 
 703         hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 704         hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 705         vtimer->hrtimer.function = kvm_hrtimer_expire;
 706         ptimer->hrtimer.function = kvm_hrtimer_expire;
 707 
 708         vtimer->irq.irq = default_vtimer_irq.irq;
 709         ptimer->irq.irq = default_ptimer_irq.irq;
 710 
 711         vtimer->host_timer_irq = host_vtimer_irq;
 712         ptimer->host_timer_irq = host_ptimer_irq;
 713 
 714         vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
 715         ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
 716 
 717         vtimer->vcpu = vcpu;
 718         ptimer->vcpu = vcpu;
 719 }
 720 
 721 static void kvm_timer_init_interrupt(void *info)
 722 {
 723         enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 724         enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
 725 }
 726 
 727 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
 728 {
 729         struct arch_timer_context *timer;
 730 
 731         switch (regid) {
 732         case KVM_REG_ARM_TIMER_CTL:
 733                 timer = vcpu_vtimer(vcpu);
 734                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
 735                 break;
 736         case KVM_REG_ARM_TIMER_CNT:
 737                 timer = vcpu_vtimer(vcpu);
 738                 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
 739                 break;
 740         case KVM_REG_ARM_TIMER_CVAL:
 741                 timer = vcpu_vtimer(vcpu);
 742                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
 743                 break;
 744         case KVM_REG_ARM_PTIMER_CTL:
 745                 timer = vcpu_ptimer(vcpu);
 746                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
 747                 break;
 748         case KVM_REG_ARM_PTIMER_CVAL:
 749                 timer = vcpu_ptimer(vcpu);
 750                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
 751                 break;
 752 
 753         default:
 754                 return -1;
 755         }
 756 
 757         return 0;
 758 }
 759 
 760 static u64 read_timer_ctl(struct arch_timer_context *timer)
 761 {
 762         /*
 763          * Set ISTATUS bit if it's expired.
 764          * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
 765          * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
 766          * regardless of ENABLE bit for our implementation convenience.
 767          */
 768         if (!kvm_timer_compute_delta(timer))
 769                 return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
 770         else
 771                 return timer->cnt_ctl;
 772 }
 773 
 774 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
 775 {
 776         switch (regid) {
 777         case KVM_REG_ARM_TIMER_CTL:
 778                 return kvm_arm_timer_read(vcpu,
 779                                           vcpu_vtimer(vcpu), TIMER_REG_CTL);
 780         case KVM_REG_ARM_TIMER_CNT:
 781                 return kvm_arm_timer_read(vcpu,
 782                                           vcpu_vtimer(vcpu), TIMER_REG_CNT);
 783         case KVM_REG_ARM_TIMER_CVAL:
 784                 return kvm_arm_timer_read(vcpu,
 785                                           vcpu_vtimer(vcpu), TIMER_REG_CVAL);
 786         case KVM_REG_ARM_PTIMER_CTL:
 787                 return kvm_arm_timer_read(vcpu,
 788                                           vcpu_ptimer(vcpu), TIMER_REG_CTL);
 789         case KVM_REG_ARM_PTIMER_CNT:
 790                 return kvm_arm_timer_read(vcpu,
 791                                           vcpu_vtimer(vcpu), TIMER_REG_CNT);
 792         case KVM_REG_ARM_PTIMER_CVAL:
 793                 return kvm_arm_timer_read(vcpu,
 794                                           vcpu_ptimer(vcpu), TIMER_REG_CVAL);
 795         }
 796         return (u64)-1;
 797 }
 798 
 799 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
 800                               struct arch_timer_context *timer,
 801                               enum kvm_arch_timer_regs treg)
 802 {
 803         u64 val;
 804 
 805         switch (treg) {
 806         case TIMER_REG_TVAL:
 807                 val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
 808                 val &= lower_32_bits(val);
 809                 break;
 810 
 811         case TIMER_REG_CTL:
 812                 val = read_timer_ctl(timer);
 813                 break;
 814 
 815         case TIMER_REG_CVAL:
 816                 val = timer->cnt_cval;
 817                 break;
 818 
 819         case TIMER_REG_CNT:
 820                 val = kvm_phys_timer_read() - timer->cntvoff;
 821                 break;
 822 
 823         default:
 824                 BUG();
 825         }
 826 
 827         return val;
 828 }
 829 
 830 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
 831                               enum kvm_arch_timers tmr,
 832                               enum kvm_arch_timer_regs treg)
 833 {
 834         u64 val;
 835 
 836         preempt_disable();
 837         kvm_timer_vcpu_put(vcpu);
 838 
 839         val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
 840 
 841         kvm_timer_vcpu_load(vcpu);
 842         preempt_enable();
 843 
 844         return val;
 845 }
 846 
 847 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
 848                                 struct arch_timer_context *timer,
 849                                 enum kvm_arch_timer_regs treg,
 850                                 u64 val)
 851 {
 852         switch (treg) {
 853         case TIMER_REG_TVAL:
 854                 timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
 855                 break;
 856 
 857         case TIMER_REG_CTL:
 858                 timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
 859                 break;
 860 
 861         case TIMER_REG_CVAL:
 862                 timer->cnt_cval = val;
 863                 break;
 864 
 865         default:
 866                 BUG();
 867         }
 868 }
 869 
 870 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
 871                                 enum kvm_arch_timers tmr,
 872                                 enum kvm_arch_timer_regs treg,
 873                                 u64 val)
 874 {
 875         preempt_disable();
 876         kvm_timer_vcpu_put(vcpu);
 877 
 878         kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
 879 
 880         kvm_timer_vcpu_load(vcpu);
 881         preempt_enable();
 882 }
 883 
 884 static int kvm_timer_starting_cpu(unsigned int cpu)
 885 {
 886         kvm_timer_init_interrupt(NULL);
 887         return 0;
 888 }
 889 
 890 static int kvm_timer_dying_cpu(unsigned int cpu)
 891 {
 892         disable_percpu_irq(host_vtimer_irq);
 893         return 0;
 894 }
 895 
 896 int kvm_timer_hyp_init(bool has_gic)
 897 {
 898         struct arch_timer_kvm_info *info;
 899         int err;
 900 
 901         info = arch_timer_get_kvm_info();
 902         timecounter = &info->timecounter;
 903 
 904         if (!timecounter->cc) {
 905                 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
 906                 return -ENODEV;
 907         }
 908 
 909         /* First, do the virtual EL1 timer irq */
 910 
 911         if (info->virtual_irq <= 0) {
 912                 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
 913                         info->virtual_irq);
 914                 return -ENODEV;
 915         }
 916         host_vtimer_irq = info->virtual_irq;
 917 
 918         host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
 919         if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
 920             host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
 921                 kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
 922                         host_vtimer_irq);
 923                 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
 924         }
 925 
 926         err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
 927                                  "kvm guest vtimer", kvm_get_running_vcpus());
 928         if (err) {
 929                 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
 930                         host_vtimer_irq, err);
 931                 return err;
 932         }
 933 
 934         if (has_gic) {
 935                 err = irq_set_vcpu_affinity(host_vtimer_irq,
 936                                             kvm_get_running_vcpus());
 937                 if (err) {
 938                         kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
 939                         goto out_free_irq;
 940                 }
 941 
 942                 static_branch_enable(&has_gic_active_state);
 943         }
 944 
 945         kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
 946 
 947         /* Now let's do the physical EL1 timer irq */
 948 
 949         if (info->physical_irq > 0) {
 950                 host_ptimer_irq = info->physical_irq;
 951                 host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
 952                 if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
 953                     host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
 954                         kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
 955                                 host_ptimer_irq);
 956                         host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
 957                 }
 958 
 959                 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
 960                                          "kvm guest ptimer", kvm_get_running_vcpus());
 961                 if (err) {
 962                         kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
 963                                 host_ptimer_irq, err);
 964                         return err;
 965                 }
 966 
 967                 if (has_gic) {
 968                         err = irq_set_vcpu_affinity(host_ptimer_irq,
 969                                                     kvm_get_running_vcpus());
 970                         if (err) {
 971                                 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
 972                                 goto out_free_irq;
 973                         }
 974                 }
 975 
 976                 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
 977         } else if (has_vhe()) {
 978                 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
 979                         info->physical_irq);
 980                 err = -ENODEV;
 981                 goto out_free_irq;
 982         }
 983 
 984         cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
 985                           "kvm/arm/timer:starting", kvm_timer_starting_cpu,
 986                           kvm_timer_dying_cpu);
 987         return 0;
 988 out_free_irq:
 989         free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
 990         return err;
 991 }
 992 
 993 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
 994 {
 995         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
 996 
 997         soft_timer_cancel(&timer->bg_timer);
 998 }
 999 
1000 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1001 {
1002         int vtimer_irq, ptimer_irq;
1003         int i, ret;
1004 
1005         vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1006         ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1007         if (ret)
1008                 return false;
1009 
1010         ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1011         ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1012         if (ret)
1013                 return false;
1014 
1015         kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1016                 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1017                     vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1018                         return false;
1019         }
1020 
1021         return true;
1022 }
1023 
1024 bool kvm_arch_timer_get_input_level(int vintid)
1025 {
1026         struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
1027         struct arch_timer_context *timer;
1028 
1029         if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1030                 timer = vcpu_vtimer(vcpu);
1031         else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1032                 timer = vcpu_ptimer(vcpu);
1033         else
1034                 BUG();
1035 
1036         return kvm_timer_should_fire(timer);
1037 }
1038 
1039 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1040 {
1041         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1042         struct timer_map map;
1043         int ret;
1044 
1045         if (timer->enabled)
1046                 return 0;
1047 
1048         /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1049         if (!irqchip_in_kernel(vcpu->kvm))
1050                 goto no_vgic;
1051 
1052         if (!vgic_initialized(vcpu->kvm))
1053                 return -ENODEV;
1054 
1055         if (!timer_irqs_are_valid(vcpu)) {
1056                 kvm_debug("incorrectly configured timer irqs\n");
1057                 return -EINVAL;
1058         }
1059 
1060         get_timer_map(vcpu, &map);
1061 
1062         ret = kvm_vgic_map_phys_irq(vcpu,
1063                                     map.direct_vtimer->host_timer_irq,
1064                                     map.direct_vtimer->irq.irq,
1065                                     kvm_arch_timer_get_input_level);
1066         if (ret)
1067                 return ret;
1068 
1069         if (map.direct_ptimer) {
1070                 ret = kvm_vgic_map_phys_irq(vcpu,
1071                                             map.direct_ptimer->host_timer_irq,
1072                                             map.direct_ptimer->irq.irq,
1073                                             kvm_arch_timer_get_input_level);
1074         }
1075 
1076         if (ret)
1077                 return ret;
1078 
1079 no_vgic:
1080         timer->enabled = 1;
1081         return 0;
1082 }
1083 
1084 /*
1085  * On VHE system, we only need to configure the EL2 timer trap register once,
1086  * not for every world switch.
1087  * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1088  * and this makes those bits have no effect for the host kernel execution.
1089  */
1090 void kvm_timer_init_vhe(void)
1091 {
1092         /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1093         u32 cnthctl_shift = 10;
1094         u64 val;
1095 
1096         /*
1097          * VHE systems allow the guest direct access to the EL1 physical
1098          * timer/counter.
1099          */
1100         val = read_sysreg(cnthctl_el2);
1101         val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1102         val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1103         write_sysreg(val, cnthctl_el2);
1104 }
1105 
1106 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1107 {
1108         struct kvm_vcpu *vcpu;
1109         int i;
1110 
1111         kvm_for_each_vcpu(i, vcpu, kvm) {
1112                 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1113                 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1114         }
1115 }
1116 
1117 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1118 {
1119         int __user *uaddr = (int __user *)(long)attr->addr;
1120         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1121         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1122         int irq;
1123 
1124         if (!irqchip_in_kernel(vcpu->kvm))
1125                 return -EINVAL;
1126 
1127         if (get_user(irq, uaddr))
1128                 return -EFAULT;
1129 
1130         if (!(irq_is_ppi(irq)))
1131                 return -EINVAL;
1132 
1133         if (vcpu->arch.timer_cpu.enabled)
1134                 return -EBUSY;
1135 
1136         switch (attr->attr) {
1137         case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1138                 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1139                 break;
1140         case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1141                 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1142                 break;
1143         default:
1144                 return -ENXIO;
1145         }
1146 
1147         return 0;
1148 }
1149 
1150 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1151 {
1152         int __user *uaddr = (int __user *)(long)attr->addr;
1153         struct arch_timer_context *timer;
1154         int irq;
1155 
1156         switch (attr->attr) {
1157         case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1158                 timer = vcpu_vtimer(vcpu);
1159                 break;
1160         case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1161                 timer = vcpu_ptimer(vcpu);
1162                 break;
1163         default:
1164                 return -ENXIO;
1165         }
1166 
1167         irq = timer->irq.irq;
1168         return put_user(irq, uaddr);
1169 }
1170 
1171 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1172 {
1173         switch (attr->attr) {
1174         case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1175         case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1176                 return 0;
1177         }
1178 
1179         return -ENXIO;
1180 }

/* [<][>][^][v][top][bottom][index][help] */