Lines Matching refs:vlr

1011 			       struct vgic_lr vlr)  in vgic_set_lr()  argument
1013 vgic_ops->set_lr(vcpu, lr, vlr); in vgic_set_lr()
1017 struct vgic_lr vlr) in vgic_sync_lr_elrsr() argument
1019 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr); in vgic_sync_lr_elrsr()
1070 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); in vgic_retire_lr() local
1072 vlr.state = 0; in vgic_retire_lr()
1073 vgic_set_lr(vcpu, lr_nr, vlr); in vgic_retire_lr()
1076 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); in vgic_retire_lr()
1094 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in vgic_retire_disabled_irqs() local
1096 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { in vgic_retire_disabled_irqs()
1097 vgic_retire_lr(lr, vlr.irq, vcpu); in vgic_retire_disabled_irqs()
1098 if (vgic_irq_is_queued(vcpu, vlr.irq)) in vgic_retire_disabled_irqs()
1099 vgic_irq_clear_queued(vcpu, vlr.irq); in vgic_retire_disabled_irqs()
1105 int lr_nr, struct vgic_lr vlr) in vgic_queue_irq_to_lr() argument
1108 vlr.state |= LR_STATE_ACTIVE; in vgic_queue_irq_to_lr()
1109 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); in vgic_queue_irq_to_lr()
1113 vlr.state |= LR_STATE_PENDING; in vgic_queue_irq_to_lr()
1114 kvm_debug("Set pending: 0x%x\n", vlr.state); in vgic_queue_irq_to_lr()
1118 vlr.state |= LR_EOI_INT; in vgic_queue_irq_to_lr()
1120 vgic_set_lr(vcpu, lr_nr, vlr); in vgic_queue_irq_to_lr()
1121 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); in vgic_queue_irq_to_lr()
1133 struct vgic_lr vlr; in vgic_queue_irq() local
1147 vlr = vgic_get_lr(vcpu, lr); in vgic_queue_irq()
1148 if (vlr.source == sgi_source_id) { in vgic_queue_irq()
1149 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); in vgic_queue_irq()
1151 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); in vgic_queue_irq()
1166 vlr.irq = irq; in vgic_queue_irq()
1167 vlr.source = sgi_source_id; in vgic_queue_irq()
1168 vlr.state = 0; in vgic_queue_irq()
1169 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); in vgic_queue_irq()
1278 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); in vgic_process_maintenance() local
1279 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); in vgic_process_maintenance()
1282 vgic_irq_clear_queued(vcpu, vlr.irq); in vgic_process_maintenance()
1283 WARN_ON(vlr.state & LR_STATE_MASK); in vgic_process_maintenance()
1284 vlr.state = 0; in vgic_process_maintenance()
1285 vgic_set_lr(vcpu, lr, vlr); in vgic_process_maintenance()
1298 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); in vgic_process_maintenance()
1308 vlr.irq - VGIC_NR_PRIVATE_IRQS); in vgic_process_maintenance()
1312 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { in vgic_process_maintenance()
1313 vgic_cpu_irq_set(vcpu, vlr.irq); in vgic_process_maintenance()
1316 vgic_dist_irq_clear_pending(vcpu, vlr.irq); in vgic_process_maintenance()
1317 vgic_cpu_irq_clear(vcpu, vlr.irq); in vgic_process_maintenance()
1326 vgic_sync_lr_elrsr(vcpu, lr, vlr); in vgic_process_maintenance()
1360 struct vgic_lr vlr; in __kvm_vgic_sync_hwstate() local
1365 vlr = vgic_get_lr(vcpu, lr); in __kvm_vgic_sync_hwstate()
1367 BUG_ON(vlr.irq >= dist->nr_irqs); in __kvm_vgic_sync_hwstate()
1368 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; in __kvm_vgic_sync_hwstate()