root/arch/s390/kvm/interrupt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sca_ext_call_pending
  2. sca_inject_ext_call
  3. sca_clear_ext_call
  4. psw_extint_disabled
  5. psw_ioint_disabled
  6. psw_mchk_disabled
  7. psw_interrupts_disabled
  8. ckc_interrupts_enabled
  9. ckc_irq_pending
  10. cpu_timer_interrupts_enabled
  11. cpu_timer_irq_pending
  12. isc_to_isc_bits
  13. isc_to_int_word
  14. int_word_to_isc
  15. gisa_set_iam
  16. gisa_clear_ipm
  17. gisa_get_ipm_or_restore_iam
  18. gisa_in_alert_list
  19. gisa_set_ipm_gisc
  20. gisa_get_ipm
  21. gisa_clear_ipm_gisc
  22. gisa_tac_ipm_gisc
  23. pending_irqs_no_gisa
  24. pending_irqs
  25. isc_to_irq_type
  26. irq_type_to_isc
  27. disable_iscs
  28. deliverable_irqs
  29. __set_cpu_idle
  30. __unset_cpu_idle
  31. __reset_intercept_indicators
  32. set_intercept_indicators_io
  33. set_intercept_indicators_ext
  34. set_intercept_indicators_mchk
  35. set_intercept_indicators_stop
  36. set_intercept_indicators
  37. __deliver_cpu_timer
  38. __deliver_ckc
  39. __deliver_pfault_init
  40. __write_machine_check
  41. __deliver_machine_check
  42. __deliver_restart
  43. __deliver_set_prefix
  44. __deliver_emergency_signal
  45. __deliver_external_call
  46. __deliver_prog
  47. __deliver_service
  48. __deliver_pfault_done
  49. __deliver_virtio
  50. __do_deliver_io
  51. __deliver_io
  52. kvm_s390_ext_call_pending
  53. kvm_s390_vcpu_has_irq
  54. kvm_cpu_has_pending_timer
  55. __calculate_sltime
  56. kvm_s390_handle_wait
  57. kvm_s390_vcpu_wakeup
  58. kvm_s390_idle_wakeup
  59. kvm_s390_clear_local_irqs
  60. kvm_s390_deliver_pending_interrupts
  61. __inject_prog
  62. __inject_pfault_init
  63. __inject_extcall
  64. __inject_set_prefix
  65. __inject_sigp_stop
  66. __inject_sigp_restart
  67. __inject_sigp_emergency
  68. __inject_mchk
  69. __inject_ckc
  70. __inject_cpu_timer
  71. get_io_int
  72. get_top_io_int
  73. get_top_gisa_isc
  74. kvm_s390_get_io_int
  75. __inject_service
  76. __inject_virtio
  77. __inject_pfault_done
  78. __inject_float_mchk
  79. __inject_io
  80. __floating_irq_kick
  81. __inject_vm
  82. kvm_s390_inject_vm
  83. kvm_s390_reinject_io_int
  84. s390int_to_s390irq
  85. kvm_s390_is_stop_irq_pending
  86. kvm_s390_clear_stop_irq
  87. do_inject_vcpu
  88. kvm_s390_inject_vcpu
  89. clear_irq_list
  90. inti_to_irq
  91. kvm_s390_clear_float_irqs
  92. get_all_floating_irqs
  93. flic_ais_mode_get_all
  94. flic_get_attr
  95. copy_irq_from_user
  96. enqueue_floating_irq
  97. get_io_adapter
  98. register_io_adapter
  99. kvm_s390_mask_adapter
  100. kvm_s390_adapter_map
  101. kvm_s390_adapter_unmap
  102. kvm_s390_destroy_adapters
  103. modify_io_adapter
  104. clear_io_irq
  105. modify_ais_mode
  106. kvm_s390_inject_airq
  107. flic_inject_airq
  108. flic_ais_mode_set_all
  109. flic_set_attr
  110. flic_has_attr
  111. flic_create
  112. flic_destroy
  113. get_ind_bit
  114. get_map_info
  115. adapter_indicators_set
  116. set_adapter_int
  117. kvm_s390_reinject_machine_check
  118. kvm_set_routing_entry
  119. kvm_set_msi
  120. kvm_s390_set_irq_state
  121. store_local_irq
  122. kvm_s390_get_irq_state
  123. __airqs_kick_single_vcpu
  124. gisa_vcpu_kicker
  125. process_gib_alert_list
  126. kvm_s390_gisa_clear
  127. kvm_s390_gisa_init
  128. kvm_s390_gisa_destroy
  129. kvm_s390_gisc_register
  130. kvm_s390_gisc_unregister
  131. gib_alert_irq_handler
  132. kvm_s390_gib_destroy
  133. kvm_s390_gib_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * handling kvm guest interrupts
   4  *
   5  * Copyright IBM Corp. 2008, 2015
   6  *
   7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
   8  */
   9 
  10 #define KMSG_COMPONENT "kvm-s390"
  11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12 
  13 #include <linux/interrupt.h>
  14 #include <linux/kvm_host.h>
  15 #include <linux/hrtimer.h>
  16 #include <linux/mmu_context.h>
  17 #include <linux/nospec.h>
  18 #include <linux/signal.h>
  19 #include <linux/slab.h>
  20 #include <linux/bitmap.h>
  21 #include <linux/vmalloc.h>
  22 #include <asm/asm-offsets.h>
  23 #include <asm/dis.h>
  24 #include <linux/uaccess.h>
  25 #include <asm/sclp.h>
  26 #include <asm/isc.h>
  27 #include <asm/gmap.h>
  28 #include <asm/switch_to.h>
  29 #include <asm/nmi.h>
  30 #include <asm/airq.h>
  31 #include "kvm-s390.h"
  32 #include "gaccess.h"
  33 #include "trace-s390.h"
  34 
  35 #define PFAULT_INIT 0x0600
  36 #define PFAULT_DONE 0x0680
  37 #define VIRTIO_PARAM 0x0d00
  38 
  39 static struct kvm_s390_gib *gib;
  40 
  41 /* handle external calls via sigp interpretation facility */
  42 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
  43 {
  44         int c, scn;
  45 
  46         if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
  47                 return 0;
  48 
  49         BUG_ON(!kvm_s390_use_sca_entries());
  50         read_lock(&vcpu->kvm->arch.sca_lock);
  51         if (vcpu->kvm->arch.use_esca) {
  52                 struct esca_block *sca = vcpu->kvm->arch.sca;
  53                 union esca_sigp_ctrl sigp_ctrl =
  54                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
  55 
  56                 c = sigp_ctrl.c;
  57                 scn = sigp_ctrl.scn;
  58         } else {
  59                 struct bsca_block *sca = vcpu->kvm->arch.sca;
  60                 union bsca_sigp_ctrl sigp_ctrl =
  61                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
  62 
  63                 c = sigp_ctrl.c;
  64                 scn = sigp_ctrl.scn;
  65         }
  66         read_unlock(&vcpu->kvm->arch.sca_lock);
  67 
  68         if (src_id)
  69                 *src_id = scn;
  70 
  71         return c;
  72 }
  73 
  74 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
  75 {
  76         int expect, rc;
  77 
  78         BUG_ON(!kvm_s390_use_sca_entries());
  79         read_lock(&vcpu->kvm->arch.sca_lock);
  80         if (vcpu->kvm->arch.use_esca) {
  81                 struct esca_block *sca = vcpu->kvm->arch.sca;
  82                 union esca_sigp_ctrl *sigp_ctrl =
  83                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  84                 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
  85 
  86                 new_val.scn = src_id;
  87                 new_val.c = 1;
  88                 old_val.c = 0;
  89 
  90                 expect = old_val.value;
  91                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
  92         } else {
  93                 struct bsca_block *sca = vcpu->kvm->arch.sca;
  94                 union bsca_sigp_ctrl *sigp_ctrl =
  95                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  96                 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
  97 
  98                 new_val.scn = src_id;
  99                 new_val.c = 1;
 100                 old_val.c = 0;
 101 
 102                 expect = old_val.value;
 103                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
 104         }
 105         read_unlock(&vcpu->kvm->arch.sca_lock);
 106 
 107         if (rc != expect) {
 108                 /* another external call is pending */
 109                 return -EBUSY;
 110         }
 111         kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
 112         return 0;
 113 }
 114 
 115 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
 116 {
 117         int rc, expect;
 118 
 119         if (!kvm_s390_use_sca_entries())
 120                 return;
 121         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
 122         read_lock(&vcpu->kvm->arch.sca_lock);
 123         if (vcpu->kvm->arch.use_esca) {
 124                 struct esca_block *sca = vcpu->kvm->arch.sca;
 125                 union esca_sigp_ctrl *sigp_ctrl =
 126                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
 127                 union esca_sigp_ctrl old = *sigp_ctrl;
 128 
 129                 expect = old.value;
 130                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
 131         } else {
 132                 struct bsca_block *sca = vcpu->kvm->arch.sca;
 133                 union bsca_sigp_ctrl *sigp_ctrl =
 134                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
 135                 union bsca_sigp_ctrl old = *sigp_ctrl;
 136 
 137                 expect = old.value;
 138                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
 139         }
 140         read_unlock(&vcpu->kvm->arch.sca_lock);
 141         WARN_ON(rc != expect); /* cannot clear? */
 142 }
 143 
 144 int psw_extint_disabled(struct kvm_vcpu *vcpu)
 145 {
 146         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
 147 }
 148 
 149 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
 150 {
 151         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
 152 }
 153 
 154 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
 155 {
 156         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
 157 }
 158 
 159 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
 160 {
 161         return psw_extint_disabled(vcpu) &&
 162                psw_ioint_disabled(vcpu) &&
 163                psw_mchk_disabled(vcpu);
 164 }
 165 
 166 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
 167 {
 168         if (psw_extint_disabled(vcpu) ||
 169             !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
 170                 return 0;
 171         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
 172                 /* No timer interrupts when single stepping */
 173                 return 0;
 174         return 1;
 175 }
 176 
 177 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
 178 {
 179         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
 180         const u64 ckc = vcpu->arch.sie_block->ckc;
 181 
 182         if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
 183                 if ((s64)ckc >= (s64)now)
 184                         return 0;
 185         } else if (ckc >= now) {
 186                 return 0;
 187         }
 188         return ckc_interrupts_enabled(vcpu);
 189 }
 190 
 191 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
 192 {
 193         return !psw_extint_disabled(vcpu) &&
 194                (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
 195 }
 196 
 197 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
 198 {
 199         if (!cpu_timer_interrupts_enabled(vcpu))
 200                 return 0;
 201         return kvm_s390_get_cpu_timer(vcpu) >> 63;
 202 }
 203 
 204 static uint64_t isc_to_isc_bits(int isc)
 205 {
 206         return (0x80 >> isc) << 24;
 207 }
 208 
 209 static inline u32 isc_to_int_word(u8 isc)
 210 {
 211         return ((u32)isc << 27) | 0x80000000;
 212 }
 213 
 214 static inline u8 int_word_to_isc(u32 int_word)
 215 {
 216         return (int_word & 0x38000000) >> 27;
 217 }
 218 
 219 /*
 220  * To use atomic bitmap functions, we have to provide a bitmap address
 221  * that is u64 aligned. However, the ipm might be u32 aligned.
 222  * Therefore, we logically start the bitmap at the very beginning of the
 223  * struct and fixup the bit number.
 224  */
 225 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
 226 
 227 /**
 228  * gisa_set_iam - change the GISA interruption alert mask
 229  *
 230  * @gisa: gisa to operate on
 231  * @iam: new IAM value to use
 232  *
 233  * Change the IAM atomically with the next alert address and the IPM
 234  * of the GISA if the GISA is not part of the GIB alert list. All three
 235  * fields are located in the first long word of the GISA.
 236  *
 237  * Returns: 0 on success
 238  *          -EBUSY in case the gisa is part of the alert list
 239  */
 240 static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
 241 {
 242         u64 word, _word;
 243 
 244         do {
 245                 word = READ_ONCE(gisa->u64.word[0]);
 246                 if ((u64)gisa != word >> 32)
 247                         return -EBUSY;
 248                 _word = (word & ~0xffUL) | iam;
 249         } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
 250 
 251         return 0;
 252 }
 253 
 254 /**
 255  * gisa_clear_ipm - clear the GISA interruption pending mask
 256  *
 257  * @gisa: gisa to operate on
 258  *
 259  * Clear the IPM atomically with the next alert address and the IAM
 260  * of the GISA unconditionally. All three fields are located in the
 261  * first long word of the GISA.
 262  */
 263 static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
 264 {
 265         u64 word, _word;
 266 
 267         do {
 268                 word = READ_ONCE(gisa->u64.word[0]);
 269                 _word = word & ~(0xffUL << 24);
 270         } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
 271 }
 272 
 273 /**
 274  * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
 275  *
 276  * @gi: gisa interrupt struct to work on
 277  *
 278  * Atomically restores the interruption alert mask if none of the
 279  * relevant ISCs are pending and return the IPM.
 280  *
 281  * Returns: the relevant pending ISCs
 282  */
 283 static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
 284 {
 285         u8 pending_mask, alert_mask;
 286         u64 word, _word;
 287 
 288         do {
 289                 word = READ_ONCE(gi->origin->u64.word[0]);
 290                 alert_mask = READ_ONCE(gi->alert.mask);
 291                 pending_mask = (u8)(word >> 24) & alert_mask;
 292                 if (pending_mask)
 293                         return pending_mask;
 294                 _word = (word & ~0xffUL) | alert_mask;
 295         } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
 296 
 297         return 0;
 298 }
 299 
 300 static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
 301 {
 302         return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
 303 }
 304 
 305 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 306 {
 307         set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 308 }
 309 
 310 static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
 311 {
 312         return READ_ONCE(gisa->ipm);
 313 }
 314 
 315 static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 316 {
 317         clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 318 }
 319 
 320 static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 321 {
 322         return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 323 }
 324 
 325 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
 326 {
 327         return vcpu->kvm->arch.float_int.pending_irqs |
 328                 vcpu->arch.local_int.pending_irqs;
 329 }
 330 
 331 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
 332 {
 333         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
 334         unsigned long pending_mask;
 335 
 336         pending_mask = pending_irqs_no_gisa(vcpu);
 337         if (gi->origin)
 338                 pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
 339         return pending_mask;
 340 }
 341 
 342 static inline int isc_to_irq_type(unsigned long isc)
 343 {
 344         return IRQ_PEND_IO_ISC_0 - isc;
 345 }
 346 
 347 static inline int irq_type_to_isc(unsigned long irq_type)
 348 {
 349         return IRQ_PEND_IO_ISC_0 - irq_type;
 350 }
 351 
 352 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
 353                                    unsigned long active_mask)
 354 {
 355         int i;
 356 
 357         for (i = 0; i <= MAX_ISC; i++)
 358                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
 359                         active_mask &= ~(1UL << (isc_to_irq_type(i)));
 360 
 361         return active_mask;
 362 }
 363 
 364 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
 365 {
 366         unsigned long active_mask;
 367 
 368         active_mask = pending_irqs(vcpu);
 369         if (!active_mask)
 370                 return 0;
 371 
 372         if (psw_extint_disabled(vcpu))
 373                 active_mask &= ~IRQ_PEND_EXT_MASK;
 374         if (psw_ioint_disabled(vcpu))
 375                 active_mask &= ~IRQ_PEND_IO_MASK;
 376         else
 377                 active_mask = disable_iscs(vcpu, active_mask);
 378         if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
 379                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
 380         if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
 381                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
 382         if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
 383                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
 384         if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
 385                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
 386         if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
 387                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
 388         if (psw_mchk_disabled(vcpu))
 389                 active_mask &= ~IRQ_PEND_MCHK_MASK;
 390         /*
 391          * Check both floating and local interrupt's cr14 because
 392          * bit IRQ_PEND_MCHK_REP could be set in both cases.
 393          */
 394         if (!(vcpu->arch.sie_block->gcr[14] &
 395            (vcpu->kvm->arch.float_int.mchk.cr14 |
 396            vcpu->arch.local_int.irq.mchk.cr14)))
 397                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
 398 
 399         /*
 400          * STOP irqs will never be actively delivered. They are triggered via
 401          * intercept requests and cleared when the stop intercept is performed.
 402          */
 403         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
 404 
 405         return active_mask;
 406 }
 407 
 408 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 409 {
 410         kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
 411         set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
 412 }
 413 
 414 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 415 {
 416         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
 417         clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
 418 }
 419 
 420 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 421 {
 422         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
 423                                       CPUSTAT_STOP_INT);
 424         vcpu->arch.sie_block->lctl = 0x0000;
 425         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
 426 
 427         if (guestdbg_enabled(vcpu)) {
 428                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
 429                                                LCTL_CR10 | LCTL_CR11);
 430                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
 431         }
 432 }
 433 
 434 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
 435 {
 436         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
 437                 return;
 438         if (psw_ioint_disabled(vcpu))
 439                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
 440         else
 441                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
 442 }
 443 
 444 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
 445 {
 446         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
 447                 return;
 448         if (psw_extint_disabled(vcpu))
 449                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
 450         else
 451                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
 452 }
 453 
 454 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
 455 {
 456         if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
 457                 return;
 458         if (psw_mchk_disabled(vcpu))
 459                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
 460         else
 461                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
 462 }
 463 
 464 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
 465 {
 466         if (kvm_s390_is_stop_irq_pending(vcpu))
 467                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
 468 }
 469 
 470 /* Set interception request for non-deliverable interrupts */
 471 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
 472 {
 473         set_intercept_indicators_io(vcpu);
 474         set_intercept_indicators_ext(vcpu);
 475         set_intercept_indicators_mchk(vcpu);
 476         set_intercept_indicators_stop(vcpu);
 477 }
 478 
 479 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
 480 {
 481         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 482         int rc;
 483 
 484         vcpu->stat.deliver_cputm++;
 485         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
 486                                          0, 0);
 487 
 488         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
 489                            (u16 *)__LC_EXT_INT_CODE);
 490         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
 491         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 492                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 493         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
 494                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 495         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
 496         return rc ? -EFAULT : 0;
 497 }
 498 
 499 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
 500 {
 501         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 502         int rc;
 503 
 504         vcpu->stat.deliver_ckc++;
 505         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
 506                                          0, 0);
 507 
 508         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
 509                            (u16 __user *)__LC_EXT_INT_CODE);
 510         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
 511         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 512                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 513         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
 514                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 515         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
 516         return rc ? -EFAULT : 0;
 517 }
 518 
 519 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
 520 {
 521         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 522         struct kvm_s390_ext_info ext;
 523         int rc;
 524 
 525         spin_lock(&li->lock);
 526         ext = li->irq.ext;
 527         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
 528         li->irq.ext.ext_params2 = 0;
 529         spin_unlock(&li->lock);
 530 
 531         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
 532                    ext.ext_params2);
 533         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 534                                          KVM_S390_INT_PFAULT_INIT,
 535                                          0, ext.ext_params2);
 536 
 537         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
 538         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
 539         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 540                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 541         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
 542                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 543         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
 544         return rc ? -EFAULT : 0;
 545 }
 546 
 547 static int __write_machine_check(struct kvm_vcpu *vcpu,
 548                                  struct kvm_s390_mchk_info *mchk)
 549 {
 550         unsigned long ext_sa_addr;
 551         unsigned long lc;
 552         freg_t fprs[NUM_FPRS];
 553         union mci mci;
 554         int rc;
 555 
 556         mci.val = mchk->mcic;
 557         /* take care of lazy register loading */
 558         save_fpu_regs();
 559         save_access_regs(vcpu->run->s.regs.acrs);
 560         if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
 561                 save_gs_cb(current->thread.gs_cb);
 562 
 563         /* Extended save area */
 564         rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
 565                            sizeof(unsigned long));
 566         /* Only bits 0 through 63-LC are used for address formation */
 567         lc = ext_sa_addr & MCESA_LC_MASK;
 568         if (test_kvm_facility(vcpu->kvm, 133)) {
 569                 switch (lc) {
 570                 case 0:
 571                 case 10:
 572                         ext_sa_addr &= ~0x3ffUL;
 573                         break;
 574                 case 11:
 575                         ext_sa_addr &= ~0x7ffUL;
 576                         break;
 577                 case 12:
 578                         ext_sa_addr &= ~0xfffUL;
 579                         break;
 580                 default:
 581                         ext_sa_addr = 0;
 582                         break;
 583                 }
 584         } else {
 585                 ext_sa_addr &= ~0x3ffUL;
 586         }
 587 
 588         if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
 589                 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
 590                                     512))
 591                         mci.vr = 0;
 592         } else {
 593                 mci.vr = 0;
 594         }
 595         if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
 596             && (lc == 11 || lc == 12)) {
 597                 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
 598                                     &vcpu->run->s.regs.gscb, 32))
 599                         mci.gs = 0;
 600         } else {
 601                 mci.gs = 0;
 602         }
 603 
 604         /* General interruption information */
 605         rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
 606         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
 607                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 608         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
 609                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 610         rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
 611 
 612         /* Register-save areas */
 613         if (MACHINE_HAS_VX) {
 614                 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
 615                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
 616         } else {
 617                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
 618                                      vcpu->run->s.regs.fprs, 128);
 619         }
 620         rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
 621                              vcpu->run->s.regs.gprs, 128);
 622         rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
 623                            (u32 __user *) __LC_FP_CREG_SAVE_AREA);
 624         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
 625                            (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
 626         rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
 627                            (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
 628         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
 629                            (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
 630         rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
 631                              &vcpu->run->s.regs.acrs, 64);
 632         rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
 633                              &vcpu->arch.sie_block->gcr, 128);
 634 
 635         /* Extended interruption information */
 636         rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
 637                            (u32 __user *) __LC_EXT_DAMAGE_CODE);
 638         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
 639                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
 640         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
 641                              sizeof(mchk->fixed_logout));
 642         return rc ? -EFAULT : 0;
 643 }
 644 
 645 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
 646 {
 647         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 648         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 649         struct kvm_s390_mchk_info mchk = {};
 650         int deliver = 0;
 651         int rc = 0;
 652 
 653         spin_lock(&fi->lock);
 654         spin_lock(&li->lock);
 655         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
 656             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
 657                 /*
 658                  * If there was an exigent machine check pending, then any
 659                  * repressible machine checks that might have been pending
 660                  * are indicated along with it, so always clear bits for
 661                  * repressible and exigent interrupts
 662                  */
 663                 mchk = li->irq.mchk;
 664                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
 665                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
 666                 memset(&li->irq.mchk, 0, sizeof(mchk));
 667                 deliver = 1;
 668         }
 669         /*
 670          * We indicate floating repressible conditions along with
 671          * other pending conditions. Channel Report Pending and Channel
 672          * Subsystem damage are the only two and and are indicated by
 673          * bits in mcic and masked in cr14.
 674          */
 675         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
 676                 mchk.mcic |= fi->mchk.mcic;
 677                 mchk.cr14 |= fi->mchk.cr14;
 678                 memset(&fi->mchk, 0, sizeof(mchk));
 679                 deliver = 1;
 680         }
 681         spin_unlock(&li->lock);
 682         spin_unlock(&fi->lock);
 683 
 684         if (deliver) {
 685                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
 686                            mchk.mcic);
 687                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 688                                                  KVM_S390_MCHK,
 689                                                  mchk.cr14, mchk.mcic);
 690                 vcpu->stat.deliver_machine_check++;
 691                 rc = __write_machine_check(vcpu, &mchk);
 692         }
 693         return rc;
 694 }
 695 
 696 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
 697 {
 698         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 699         int rc;
 700 
 701         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
 702         vcpu->stat.deliver_restart_signal++;
 703         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
 704 
 705         rc  = write_guest_lc(vcpu,
 706                              offsetof(struct lowcore, restart_old_psw),
 707                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 708         rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
 709                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 710         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
 711         return rc ? -EFAULT : 0;
 712 }
 713 
 714 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
 715 {
 716         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 717         struct kvm_s390_prefix_info prefix;
 718 
 719         spin_lock(&li->lock);
 720         prefix = li->irq.prefix;
 721         li->irq.prefix.address = 0;
 722         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
 723         spin_unlock(&li->lock);
 724 
 725         vcpu->stat.deliver_prefix_signal++;
 726         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 727                                          KVM_S390_SIGP_SET_PREFIX,
 728                                          prefix.address, 0);
 729 
 730         kvm_s390_set_prefix(vcpu, prefix.address);
 731         return 0;
 732 }
 733 
 734 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
 735 {
 736         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 737         int rc;
 738         int cpu_addr;
 739 
 740         spin_lock(&li->lock);
 741         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
 742         clear_bit(cpu_addr, li->sigp_emerg_pending);
 743         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
 744                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
 745         spin_unlock(&li->lock);
 746 
 747         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
 748         vcpu->stat.deliver_emergency_signal++;
 749         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
 750                                          cpu_addr, 0);
 751 
 752         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
 753                            (u16 *)__LC_EXT_INT_CODE);
 754         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
 755         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 756                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 757         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
 758                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 759         return rc ? -EFAULT : 0;
 760 }
 761 
 762 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
 763 {
 764         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 765         struct kvm_s390_extcall_info extcall;
 766         int rc;
 767 
 768         spin_lock(&li->lock);
 769         extcall = li->irq.extcall;
 770         li->irq.extcall.code = 0;
 771         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
 772         spin_unlock(&li->lock);
 773 
 774         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
 775         vcpu->stat.deliver_external_call++;
 776         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 777                                          KVM_S390_INT_EXTERNAL_CALL,
 778                                          extcall.code, 0);
 779 
 780         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
 781                            (u16 *)__LC_EXT_INT_CODE);
 782         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
 783         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 784                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 785         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
 786                             sizeof(psw_t));
 787         return rc ? -EFAULT : 0;
 788 }
 789 
 790 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
 791 {
 792         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 793         struct kvm_s390_pgm_info pgm_info;
 794         int rc = 0, nullifying = false;
 795         u16 ilen;
 796 
 797         spin_lock(&li->lock);
 798         pgm_info = li->irq.pgm;
 799         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
 800         memset(&li->irq.pgm, 0, sizeof(pgm_info));
 801         spin_unlock(&li->lock);
 802 
 803         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
 804         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
 805                    pgm_info.code, ilen);
 806         vcpu->stat.deliver_program++;
 807         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
 808                                          pgm_info.code, 0);
 809 
 810         switch (pgm_info.code & ~PGM_PER) {
 811         case PGM_AFX_TRANSLATION:
 812         case PGM_ASX_TRANSLATION:
 813         case PGM_EX_TRANSLATION:
 814         case PGM_LFX_TRANSLATION:
 815         case PGM_LSTE_SEQUENCE:
 816         case PGM_LSX_TRANSLATION:
 817         case PGM_LX_TRANSLATION:
 818         case PGM_PRIMARY_AUTHORITY:
 819         case PGM_SECONDARY_AUTHORITY:
 820                 nullifying = true;
 821                 /* fall through */
 822         case PGM_SPACE_SWITCH:
 823                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
 824                                   (u64 *)__LC_TRANS_EXC_CODE);
 825                 break;
 826         case PGM_ALEN_TRANSLATION:
 827         case PGM_ALE_SEQUENCE:
 828         case PGM_ASTE_INSTANCE:
 829         case PGM_ASTE_SEQUENCE:
 830         case PGM_ASTE_VALIDITY:
 831         case PGM_EXTENDED_AUTHORITY:
 832                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
 833                                   (u8 *)__LC_EXC_ACCESS_ID);
 834                 nullifying = true;
 835                 break;
 836         case PGM_ASCE_TYPE:
 837         case PGM_PAGE_TRANSLATION:
 838         case PGM_REGION_FIRST_TRANS:
 839         case PGM_REGION_SECOND_TRANS:
 840         case PGM_REGION_THIRD_TRANS:
 841         case PGM_SEGMENT_TRANSLATION:
 842                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
 843                                   (u64 *)__LC_TRANS_EXC_CODE);
 844                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
 845                                    (u8 *)__LC_EXC_ACCESS_ID);
 846                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
 847                                    (u8 *)__LC_OP_ACCESS_ID);
 848                 nullifying = true;
 849                 break;
 850         case PGM_MONITOR:
 851                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
 852                                   (u16 *)__LC_MON_CLASS_NR);
 853                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
 854                                    (u64 *)__LC_MON_CODE);
 855                 break;
 856         case PGM_VECTOR_PROCESSING:
 857         case PGM_DATA:
 858                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
 859                                   (u32 *)__LC_DATA_EXC_CODE);
 860                 break;
 861         case PGM_PROTECTION:
 862                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
 863                                   (u64 *)__LC_TRANS_EXC_CODE);
 864                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
 865                                    (u8 *)__LC_EXC_ACCESS_ID);
 866                 break;
 867         case PGM_STACK_FULL:
 868         case PGM_STACK_EMPTY:
 869         case PGM_STACK_SPECIFICATION:
 870         case PGM_STACK_TYPE:
 871         case PGM_STACK_OPERATION:
 872         case PGM_TRACE_TABEL:
 873         case PGM_CRYPTO_OPERATION:
 874                 nullifying = true;
 875                 break;
 876         }
 877 
 878         if (pgm_info.code & PGM_PER) {
 879                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
 880                                    (u8 *) __LC_PER_CODE);
 881                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
 882                                    (u8 *)__LC_PER_ATMID);
 883                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
 884                                    (u64 *) __LC_PER_ADDRESS);
 885                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
 886                                    (u8 *) __LC_PER_ACCESS_ID);
 887         }
 888 
 889         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
 890                 kvm_s390_rewind_psw(vcpu, ilen);
 891 
 892         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
 893         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
 894         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
 895                                  (u64 *) __LC_LAST_BREAK);
 896         rc |= put_guest_lc(vcpu, pgm_info.code,
 897                            (u16 *)__LC_PGM_INT_CODE);
 898         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
 899                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 900         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
 901                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 902         return rc ? -EFAULT : 0;
 903 }
 904 
 905 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
 906 {
 907         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 908         struct kvm_s390_ext_info ext;
 909         int rc = 0;
 910 
 911         spin_lock(&fi->lock);
 912         if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
 913                 spin_unlock(&fi->lock);
 914                 return 0;
 915         }
 916         ext = fi->srv_signal;
 917         memset(&fi->srv_signal, 0, sizeof(ext));
 918         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
 919         spin_unlock(&fi->lock);
 920 
 921         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
 922                    ext.ext_params);
 923         vcpu->stat.deliver_service_signal++;
 924         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
 925                                          ext.ext_params, 0);
 926 
 927         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
 928         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
 929         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 930                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 931         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
 932                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 933         rc |= put_guest_lc(vcpu, ext.ext_params,
 934                            (u32 *)__LC_EXT_PARAMS);
 935 
 936         return rc ? -EFAULT : 0;
 937 }
 938 
 939 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
 940 {
 941         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 942         struct kvm_s390_interrupt_info *inti;
 943         int rc = 0;
 944 
 945         spin_lock(&fi->lock);
 946         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
 947                                         struct kvm_s390_interrupt_info,
 948                                         list);
 949         if (inti) {
 950                 list_del(&inti->list);
 951                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
 952         }
 953         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
 954                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
 955         spin_unlock(&fi->lock);
 956 
 957         if (inti) {
 958                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 959                                                  KVM_S390_INT_PFAULT_DONE, 0,
 960                                                  inti->ext.ext_params2);
 961                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
 962                            inti->ext.ext_params2);
 963 
 964                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
 965                                 (u16 *)__LC_EXT_INT_CODE);
 966                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
 967                                 (u16 *)__LC_EXT_CPU_ADDR);
 968                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
 969                                 &vcpu->arch.sie_block->gpsw,
 970                                 sizeof(psw_t));
 971                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
 972                                 &vcpu->arch.sie_block->gpsw,
 973                                 sizeof(psw_t));
 974                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
 975                                 (u64 *)__LC_EXT_PARAMS2);
 976                 kfree(inti);
 977         }
 978         return rc ? -EFAULT : 0;
 979 }
 980 
 981 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
 982 {
 983         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
 984         struct kvm_s390_interrupt_info *inti;
 985         int rc = 0;
 986 
 987         spin_lock(&fi->lock);
 988         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
 989                                         struct kvm_s390_interrupt_info,
 990                                         list);
 991         if (inti) {
 992                 VCPU_EVENT(vcpu, 4,
 993                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
 994                            inti->ext.ext_params, inti->ext.ext_params2);
 995                 vcpu->stat.deliver_virtio++;
 996                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 997                                 inti->type,
 998                                 inti->ext.ext_params,
 999                                 inti->ext.ext_params2);
1000                 list_del(&inti->list);
1001                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
1002         }
1003         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
1004                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1005         spin_unlock(&fi->lock);
1006 
1007         if (inti) {
1008                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1009                                 (u16 *)__LC_EXT_INT_CODE);
1010                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
1011                                 (u16 *)__LC_EXT_CPU_ADDR);
1012                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1013                                 &vcpu->arch.sie_block->gpsw,
1014                                 sizeof(psw_t));
1015                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1016                                 &vcpu->arch.sie_block->gpsw,
1017                                 sizeof(psw_t));
1018                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
1019                                 (u32 *)__LC_EXT_PARAMS);
1020                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1021                                 (u64 *)__LC_EXT_PARAMS2);
1022                 kfree(inti);
1023         }
1024         return rc ? -EFAULT : 0;
1025 }
1026 
1027 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
1028 {
1029         int rc;
1030 
1031         rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
1032         rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
1033         rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
1034         rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
1035         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
1036                              &vcpu->arch.sie_block->gpsw,
1037                              sizeof(psw_t));
1038         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
1039                             &vcpu->arch.sie_block->gpsw,
1040                             sizeof(psw_t));
1041         return rc ? -EFAULT : 0;
1042 }
1043 
1044 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
1045                                      unsigned long irq_type)
1046 {
1047         struct list_head *isc_list;
1048         struct kvm_s390_float_interrupt *fi;
1049         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1050         struct kvm_s390_interrupt_info *inti = NULL;
1051         struct kvm_s390_io_info io;
1052         u32 isc;
1053         int rc = 0;
1054 
1055         fi = &vcpu->kvm->arch.float_int;
1056 
1057         spin_lock(&fi->lock);
1058         isc = irq_type_to_isc(irq_type);
1059         isc_list = &fi->lists[isc];
1060         inti = list_first_entry_or_null(isc_list,
1061                                         struct kvm_s390_interrupt_info,
1062                                         list);
1063         if (inti) {
1064                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1065                         VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
1066                 else
1067                         VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
1068                         inti->io.subchannel_id >> 8,
1069                         inti->io.subchannel_id >> 1 & 0x3,
1070                         inti->io.subchannel_nr);
1071 
1072                 vcpu->stat.deliver_io++;
1073                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1074                                 inti->type,
1075                                 ((__u32)inti->io.subchannel_id << 16) |
1076                                 inti->io.subchannel_nr,
1077                                 ((__u64)inti->io.io_int_parm << 32) |
1078                                 inti->io.io_int_word);
1079                 list_del(&inti->list);
1080                 fi->counters[FIRQ_CNTR_IO] -= 1;
1081         }
1082         if (list_empty(isc_list))
1083                 clear_bit(irq_type, &fi->pending_irqs);
1084         spin_unlock(&fi->lock);
1085 
1086         if (inti) {
1087                 rc = __do_deliver_io(vcpu, &(inti->io));
1088                 kfree(inti);
1089                 goto out;
1090         }
1091 
1092         if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
1093                 /*
1094                  * in case an adapter interrupt was not delivered
1095                  * in SIE context KVM will handle the delivery
1096                  */
1097                 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1098                 memset(&io, 0, sizeof(io));
1099                 io.io_int_word = isc_to_int_word(isc);
1100                 vcpu->stat.deliver_io++;
1101                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1102                         KVM_S390_INT_IO(1, 0, 0, 0),
1103                         ((__u32)io.subchannel_id << 16) |
1104                         io.subchannel_nr,
1105                         ((__u64)io.io_int_parm << 32) |
1106                         io.io_int_word);
1107                 rc = __do_deliver_io(vcpu, &io);
1108         }
1109 out:
1110         return rc;
1111 }
1112 
1113 /* Check whether an external call is pending (deliverable or not) */
1114 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1115 {
1116         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1117 
1118         if (!sclp.has_sigpif)
1119                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1120 
1121         return sca_ext_call_pending(vcpu, NULL);
1122 }
1123 
1124 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1125 {
1126         if (deliverable_irqs(vcpu))
1127                 return 1;
1128 
1129         if (kvm_cpu_has_pending_timer(vcpu))
1130                 return 1;
1131 
1132         /* external call pending and deliverable */
1133         if (kvm_s390_ext_call_pending(vcpu) &&
1134             !psw_extint_disabled(vcpu) &&
1135             (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
1136                 return 1;
1137 
1138         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1139                 return 1;
1140         return 0;
1141 }
1142 
1143 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1144 {
1145         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1146 }
1147 
1148 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1149 {
1150         const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1151         const u64 ckc = vcpu->arch.sie_block->ckc;
1152         u64 cputm, sltime = 0;
1153 
1154         if (ckc_interrupts_enabled(vcpu)) {
1155                 if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
1156                         if ((s64)now < (s64)ckc)
1157                                 sltime = tod_to_ns((s64)ckc - (s64)now);
1158                 } else if (now < ckc) {
1159                         sltime = tod_to_ns(ckc - now);
1160                 }
1161                 /* already expired */
1162                 if (!sltime)
1163                         return 0;
1164                 if (cpu_timer_interrupts_enabled(vcpu)) {
1165                         cputm = kvm_s390_get_cpu_timer(vcpu);
1166                         /* already expired? */
1167                         if (cputm >> 63)
1168                                 return 0;
1169                         return min(sltime, tod_to_ns(cputm));
1170                 }
1171         } else if (cpu_timer_interrupts_enabled(vcpu)) {
1172                 sltime = kvm_s390_get_cpu_timer(vcpu);
1173                 /* already expired? */
1174                 if (sltime >> 63)
1175                         return 0;
1176         }
1177         return sltime;
1178 }
1179 
1180 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1181 {
1182         struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1183         u64 sltime;
1184 
1185         vcpu->stat.exit_wait_state++;
1186 
1187         /* fast path */
1188         if (kvm_arch_vcpu_runnable(vcpu))
1189                 return 0;
1190 
1191         if (psw_interrupts_disabled(vcpu)) {
1192                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1193                 return -EOPNOTSUPP; /* disabled wait */
1194         }
1195 
1196         if (gi->origin &&
1197             (gisa_get_ipm_or_restore_iam(gi) &
1198              vcpu->arch.sie_block->gcr[6] >> 24))
1199                 return 0;
1200 
1201         if (!ckc_interrupts_enabled(vcpu) &&
1202             !cpu_timer_interrupts_enabled(vcpu)) {
1203                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1204                 __set_cpu_idle(vcpu);
1205                 goto no_timer;
1206         }
1207 
1208         sltime = __calculate_sltime(vcpu);
1209         if (!sltime)
1210                 return 0;
1211 
1212         __set_cpu_idle(vcpu);
1213         hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1214         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1215 no_timer:
1216         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1217         kvm_vcpu_block(vcpu);
1218         __unset_cpu_idle(vcpu);
1219         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1220 
1221         hrtimer_cancel(&vcpu->arch.ckc_timer);
1222         return 0;
1223 }
1224 
1225 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1226 {
1227         vcpu->valid_wakeup = true;
1228         kvm_vcpu_wake_up(vcpu);
1229 
1230         /*
1231          * The VCPU might not be sleeping but rather executing VSIE. Let's
1232          * kick it, so it leaves the SIE to process the request.
1233          */
1234         kvm_s390_vsie_kick(vcpu);
1235 }
1236 
1237 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1238 {
1239         struct kvm_vcpu *vcpu;
1240         u64 sltime;
1241 
1242         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1243         sltime = __calculate_sltime(vcpu);
1244 
1245         /*
1246          * If the monotonic clock runs faster than the tod clock we might be
1247          * woken up too early and have to go back to sleep to avoid deadlocks.
1248          */
1249         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1250                 return HRTIMER_RESTART;
1251         kvm_s390_vcpu_wakeup(vcpu);
1252         return HRTIMER_NORESTART;
1253 }
1254 
1255 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1256 {
1257         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1258 
1259         spin_lock(&li->lock);
1260         li->pending_irqs = 0;
1261         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1262         memset(&li->irq, 0, sizeof(li->irq));
1263         spin_unlock(&li->lock);
1264 
1265         sca_clear_ext_call(vcpu);
1266 }
1267 
1268 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1269 {
1270         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1271         int rc = 0;
1272         unsigned long irq_type;
1273         unsigned long irqs;
1274 
1275         __reset_intercept_indicators(vcpu);
1276 
1277         /* pending ckc conditions might have been invalidated */
1278         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1279         if (ckc_irq_pending(vcpu))
1280                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1281 
1282         /* pending cpu timer conditions might have been invalidated */
1283         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1284         if (cpu_timer_irq_pending(vcpu))
1285                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1286 
1287         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1288                 /* bits are in the reverse order of interrupt priority */
1289                 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1290                 switch (irq_type) {
1291                 case IRQ_PEND_IO_ISC_0:
1292                 case IRQ_PEND_IO_ISC_1:
1293                 case IRQ_PEND_IO_ISC_2:
1294                 case IRQ_PEND_IO_ISC_3:
1295                 case IRQ_PEND_IO_ISC_4:
1296                 case IRQ_PEND_IO_ISC_5:
1297                 case IRQ_PEND_IO_ISC_6:
1298                 case IRQ_PEND_IO_ISC_7:
1299                         rc = __deliver_io(vcpu, irq_type);
1300                         break;
1301                 case IRQ_PEND_MCHK_EX:
1302                 case IRQ_PEND_MCHK_REP:
1303                         rc = __deliver_machine_check(vcpu);
1304                         break;
1305                 case IRQ_PEND_PROG:
1306                         rc = __deliver_prog(vcpu);
1307                         break;
1308                 case IRQ_PEND_EXT_EMERGENCY:
1309                         rc = __deliver_emergency_signal(vcpu);
1310                         break;
1311                 case IRQ_PEND_EXT_EXTERNAL:
1312                         rc = __deliver_external_call(vcpu);
1313                         break;
1314                 case IRQ_PEND_EXT_CLOCK_COMP:
1315                         rc = __deliver_ckc(vcpu);
1316                         break;
1317                 case IRQ_PEND_EXT_CPU_TIMER:
1318                         rc = __deliver_cpu_timer(vcpu);
1319                         break;
1320                 case IRQ_PEND_RESTART:
1321                         rc = __deliver_restart(vcpu);
1322                         break;
1323                 case IRQ_PEND_SET_PREFIX:
1324                         rc = __deliver_set_prefix(vcpu);
1325                         break;
1326                 case IRQ_PEND_PFAULT_INIT:
1327                         rc = __deliver_pfault_init(vcpu);
1328                         break;
1329                 case IRQ_PEND_EXT_SERVICE:
1330                         rc = __deliver_service(vcpu);
1331                         break;
1332                 case IRQ_PEND_PFAULT_DONE:
1333                         rc = __deliver_pfault_done(vcpu);
1334                         break;
1335                 case IRQ_PEND_VIRTIO:
1336                         rc = __deliver_virtio(vcpu);
1337                         break;
1338                 default:
1339                         WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1340                         clear_bit(irq_type, &li->pending_irqs);
1341                 }
1342         }
1343 
1344         set_intercept_indicators(vcpu);
1345 
1346         return rc;
1347 }
1348 
1349 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1350 {
1351         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1352 
1353         vcpu->stat.inject_program++;
1354         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1355         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1356                                    irq->u.pgm.code, 0);
1357 
1358         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1359                 /* auto detection if no valid ILC was given */
1360                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1361                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1362                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1363         }
1364 
1365         if (irq->u.pgm.code == PGM_PER) {
1366                 li->irq.pgm.code |= PGM_PER;
1367                 li->irq.pgm.flags = irq->u.pgm.flags;
1368                 /* only modify PER related information */
1369                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1370                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1371                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1372                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1373         } else if (!(irq->u.pgm.code & PGM_PER)) {
1374                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1375                                    irq->u.pgm.code;
1376                 li->irq.pgm.flags = irq->u.pgm.flags;
1377                 /* only modify non-PER information */
1378                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1379                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1380                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1381                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1382                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1383                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1384         } else {
1385                 li->irq.pgm = irq->u.pgm;
1386         }
1387         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1388         return 0;
1389 }
1390 
1391 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1392 {
1393         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1394 
1395         vcpu->stat.inject_pfault_init++;
1396         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1397                    irq->u.ext.ext_params2);
1398         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1399                                    irq->u.ext.ext_params,
1400                                    irq->u.ext.ext_params2);
1401 
1402         li->irq.ext = irq->u.ext;
1403         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1404         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1405         return 0;
1406 }
1407 
1408 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1409 {
1410         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1411         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1412         uint16_t src_id = irq->u.extcall.code;
1413 
1414         vcpu->stat.inject_external_call++;
1415         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1416                    src_id);
1417         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1418                                    src_id, 0);
1419 
1420         /* sending vcpu invalid */
1421         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1422                 return -EINVAL;
1423 
1424         if (sclp.has_sigpif)
1425                 return sca_inject_ext_call(vcpu, src_id);
1426 
1427         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1428                 return -EBUSY;
1429         *extcall = irq->u.extcall;
1430         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1431         return 0;
1432 }
1433 
1434 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1435 {
1436         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1437         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1438 
1439         vcpu->stat.inject_set_prefix++;
1440         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1441                    irq->u.prefix.address);
1442         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1443                                    irq->u.prefix.address, 0);
1444 
1445         if (!is_vcpu_stopped(vcpu))
1446                 return -EBUSY;
1447 
1448         *prefix = irq->u.prefix;
1449         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1450         return 0;
1451 }
1452 
1453 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1454 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1455 {
1456         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1457         struct kvm_s390_stop_info *stop = &li->irq.stop;
1458         int rc = 0;
1459 
1460         vcpu->stat.inject_stop_signal++;
1461         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1462 
1463         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1464                 return -EINVAL;
1465 
1466         if (is_vcpu_stopped(vcpu)) {
1467                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1468                         rc = kvm_s390_store_status_unloaded(vcpu,
1469                                                 KVM_S390_STORE_STATUS_NOADDR);
1470                 return rc;
1471         }
1472 
1473         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1474                 return -EBUSY;
1475         stop->flags = irq->u.stop.flags;
1476         kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1477         return 0;
1478 }
1479 
1480 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1481                                  struct kvm_s390_irq *irq)
1482 {
1483         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1484 
1485         vcpu->stat.inject_restart++;
1486         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1487         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1488 
1489         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1490         return 0;
1491 }
1492 
1493 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1494                                    struct kvm_s390_irq *irq)
1495 {
1496         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1497 
1498         vcpu->stat.inject_emergency_signal++;
1499         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1500                    irq->u.emerg.code);
1501         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1502                                    irq->u.emerg.code, 0);
1503 
1504         /* sending vcpu invalid */
1505         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1506                 return -EINVAL;
1507 
1508         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1509         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1510         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1511         return 0;
1512 }
1513 
1514 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1515 {
1516         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1517         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1518 
1519         vcpu->stat.inject_mchk++;
1520         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1521                    irq->u.mchk.mcic);
1522         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1523                                    irq->u.mchk.mcic);
1524 
1525         /*
1526          * Because repressible machine checks can be indicated along with
1527          * exigent machine checks (PoP, Chapter 11, Interruption action)
1528          * we need to combine cr14, mcic and external damage code.
1529          * Failing storage address and the logout area should not be or'ed
1530          * together, we just indicate the last occurrence of the corresponding
1531          * machine check
1532          */
1533         mchk->cr14 |= irq->u.mchk.cr14;
1534         mchk->mcic |= irq->u.mchk.mcic;
1535         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1536         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1537         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1538                sizeof(mchk->fixed_logout));
1539         if (mchk->mcic & MCHK_EX_MASK)
1540                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1541         else if (mchk->mcic & MCHK_REP_MASK)
1542                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1543         return 0;
1544 }
1545 
1546 static int __inject_ckc(struct kvm_vcpu *vcpu)
1547 {
1548         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1549 
1550         vcpu->stat.inject_ckc++;
1551         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1552         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1553                                    0, 0);
1554 
1555         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1556         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1557         return 0;
1558 }
1559 
1560 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1561 {
1562         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1563 
1564         vcpu->stat.inject_cputm++;
1565         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1566         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1567                                    0, 0);
1568 
1569         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1570         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1571         return 0;
1572 }
1573 
1574 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1575                                                   int isc, u32 schid)
1576 {
1577         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1578         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1579         struct kvm_s390_interrupt_info *iter;
1580         u16 id = (schid & 0xffff0000U) >> 16;
1581         u16 nr = schid & 0x0000ffffU;
1582 
1583         spin_lock(&fi->lock);
1584         list_for_each_entry(iter, isc_list, list) {
1585                 if (schid && (id != iter->io.subchannel_id ||
1586                               nr != iter->io.subchannel_nr))
1587                         continue;
1588                 /* found an appropriate entry */
1589                 list_del_init(&iter->list);
1590                 fi->counters[FIRQ_CNTR_IO] -= 1;
1591                 if (list_empty(isc_list))
1592                         clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1593                 spin_unlock(&fi->lock);
1594                 return iter;
1595         }
1596         spin_unlock(&fi->lock);
1597         return NULL;
1598 }
1599 
1600 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1601                                                       u64 isc_mask, u32 schid)
1602 {
1603         struct kvm_s390_interrupt_info *inti = NULL;
1604         int isc;
1605 
1606         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1607                 if (isc_mask & isc_to_isc_bits(isc))
1608                         inti = get_io_int(kvm, isc, schid);
1609         }
1610         return inti;
1611 }
1612 
1613 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1614 {
1615         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1616         unsigned long active_mask;
1617         int isc;
1618 
1619         if (schid)
1620                 goto out;
1621         if (!gi->origin)
1622                 goto out;
1623 
1624         active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
1625         while (active_mask) {
1626                 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1627                 if (gisa_tac_ipm_gisc(gi->origin, isc))
1628                         return isc;
1629                 clear_bit_inv(isc, &active_mask);
1630         }
1631 out:
1632         return -EINVAL;
1633 }
1634 
1635 /*
1636  * Dequeue and return an I/O interrupt matching any of the interruption
1637  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1638  * Take into account the interrupts pending in the interrupt list and in GISA.
1639  *
1640  * Note that for a guest that does not enable I/O interrupts
1641  * but relies on TPI, a flood of classic interrupts may starve
1642  * out adapter interrupts on the same isc. Linux does not do
1643  * that, and it is possible to work around the issue by configuring
1644  * different iscs for classic and adapter interrupts in the guest,
1645  * but we may want to revisit this in the future.
1646  */
1647 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1648                                                     u64 isc_mask, u32 schid)
1649 {
1650         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1651         struct kvm_s390_interrupt_info *inti, *tmp_inti;
1652         int isc;
1653 
1654         inti = get_top_io_int(kvm, isc_mask, schid);
1655 
1656         isc = get_top_gisa_isc(kvm, isc_mask, schid);
1657         if (isc < 0)
1658                 /* no AI in GISA */
1659                 goto out;
1660 
1661         if (!inti)
1662                 /* AI in GISA but no classical IO int */
1663                 goto gisa_out;
1664 
1665         /* both types of interrupts present */
1666         if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1667                 /* classical IO int with higher priority */
1668                 gisa_set_ipm_gisc(gi->origin, isc);
1669                 goto out;
1670         }
1671 gisa_out:
1672         tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1673         if (tmp_inti) {
1674                 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1675                 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1676                 if (inti)
1677                         kvm_s390_reinject_io_int(kvm, inti);
1678                 inti = tmp_inti;
1679         } else
1680                 gisa_set_ipm_gisc(gi->origin, isc);
1681 out:
1682         return inti;
1683 }
1684 
1685 #define SCCB_MASK 0xFFFFFFF8
1686 #define SCCB_EVENT_PENDING 0x3
1687 
1688 static int __inject_service(struct kvm *kvm,
1689                              struct kvm_s390_interrupt_info *inti)
1690 {
1691         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1692 
1693         kvm->stat.inject_service_signal++;
1694         spin_lock(&fi->lock);
1695         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1696         /*
1697          * Early versions of the QEMU s390 bios will inject several
1698          * service interrupts after another without handling a
1699          * condition code indicating busy.
1700          * We will silently ignore those superfluous sccb values.
1701          * A future version of QEMU will take care of serialization
1702          * of servc requests
1703          */
1704         if (fi->srv_signal.ext_params & SCCB_MASK)
1705                 goto out;
1706         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1707         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1708 out:
1709         spin_unlock(&fi->lock);
1710         kfree(inti);
1711         return 0;
1712 }
1713 
1714 static int __inject_virtio(struct kvm *kvm,
1715                             struct kvm_s390_interrupt_info *inti)
1716 {
1717         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1718 
1719         kvm->stat.inject_virtio++;
1720         spin_lock(&fi->lock);
1721         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1722                 spin_unlock(&fi->lock);
1723                 return -EBUSY;
1724         }
1725         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1726         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1727         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1728         spin_unlock(&fi->lock);
1729         return 0;
1730 }
1731 
1732 static int __inject_pfault_done(struct kvm *kvm,
1733                                  struct kvm_s390_interrupt_info *inti)
1734 {
1735         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1736 
1737         kvm->stat.inject_pfault_done++;
1738         spin_lock(&fi->lock);
1739         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1740                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1741                 spin_unlock(&fi->lock);
1742                 return -EBUSY;
1743         }
1744         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1745         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1746         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1747         spin_unlock(&fi->lock);
1748         return 0;
1749 }
1750 
1751 #define CR_PENDING_SUBCLASS 28
1752 static int __inject_float_mchk(struct kvm *kvm,
1753                                 struct kvm_s390_interrupt_info *inti)
1754 {
1755         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1756 
1757         kvm->stat.inject_float_mchk++;
1758         spin_lock(&fi->lock);
1759         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1760         fi->mchk.mcic |= inti->mchk.mcic;
1761         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1762         spin_unlock(&fi->lock);
1763         kfree(inti);
1764         return 0;
1765 }
1766 
1767 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1768 {
1769         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1770         struct kvm_s390_float_interrupt *fi;
1771         struct list_head *list;
1772         int isc;
1773 
1774         kvm->stat.inject_io++;
1775         isc = int_word_to_isc(inti->io.io_int_word);
1776 
1777         if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
1778                 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1779                 gisa_set_ipm_gisc(gi->origin, isc);
1780                 kfree(inti);
1781                 return 0;
1782         }
1783 
1784         fi = &kvm->arch.float_int;
1785         spin_lock(&fi->lock);
1786         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1787                 spin_unlock(&fi->lock);
1788                 return -EBUSY;
1789         }
1790         fi->counters[FIRQ_CNTR_IO] += 1;
1791 
1792         if (inti->type & KVM_S390_INT_IO_AI_MASK)
1793                 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1794         else
1795                 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1796                         inti->io.subchannel_id >> 8,
1797                         inti->io.subchannel_id >> 1 & 0x3,
1798                         inti->io.subchannel_nr);
1799         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1800         list_add_tail(&inti->list, list);
1801         set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1802         spin_unlock(&fi->lock);
1803         return 0;
1804 }
1805 
1806 /*
1807  * Find a destination VCPU for a floating irq and kick it.
1808  */
1809 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1810 {
1811         struct kvm_vcpu *dst_vcpu;
1812         int sigcpu, online_vcpus, nr_tries = 0;
1813 
1814         online_vcpus = atomic_read(&kvm->online_vcpus);
1815         if (!online_vcpus)
1816                 return;
1817 
1818         /* find idle VCPUs first, then round robin */
1819         sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
1820         if (sigcpu == online_vcpus) {
1821                 do {
1822                         sigcpu = kvm->arch.float_int.next_rr_cpu++;
1823                         kvm->arch.float_int.next_rr_cpu %= online_vcpus;
1824                         /* avoid endless loops if all vcpus are stopped */
1825                         if (nr_tries++ >= online_vcpus)
1826                                 return;
1827                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1828         }
1829         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1830 
1831         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1832         switch (type) {
1833         case KVM_S390_MCHK:
1834                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1835                 break;
1836         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1837                 if (!(type & KVM_S390_INT_IO_AI_MASK &&
1838                       kvm->arch.gisa_int.origin))
1839                         kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1840                 break;
1841         default:
1842                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1843                 break;
1844         }
1845         kvm_s390_vcpu_wakeup(dst_vcpu);
1846 }
1847 
1848 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1849 {
1850         u64 type = READ_ONCE(inti->type);
1851         int rc;
1852 
1853         switch (type) {
1854         case KVM_S390_MCHK:
1855                 rc = __inject_float_mchk(kvm, inti);
1856                 break;
1857         case KVM_S390_INT_VIRTIO:
1858                 rc = __inject_virtio(kvm, inti);
1859                 break;
1860         case KVM_S390_INT_SERVICE:
1861                 rc = __inject_service(kvm, inti);
1862                 break;
1863         case KVM_S390_INT_PFAULT_DONE:
1864                 rc = __inject_pfault_done(kvm, inti);
1865                 break;
1866         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1867                 rc = __inject_io(kvm, inti);
1868                 break;
1869         default:
1870                 rc = -EINVAL;
1871         }
1872         if (rc)
1873                 return rc;
1874 
1875         __floating_irq_kick(kvm, type);
1876         return 0;
1877 }
1878 
1879 int kvm_s390_inject_vm(struct kvm *kvm,
1880                        struct kvm_s390_interrupt *s390int)
1881 {
1882         struct kvm_s390_interrupt_info *inti;
1883         int rc;
1884 
1885         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1886         if (!inti)
1887                 return -ENOMEM;
1888 
1889         inti->type = s390int->type;
1890         switch (inti->type) {
1891         case KVM_S390_INT_VIRTIO:
1892                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1893                          s390int->parm, s390int->parm64);
1894                 inti->ext.ext_params = s390int->parm;
1895                 inti->ext.ext_params2 = s390int->parm64;
1896                 break;
1897         case KVM_S390_INT_SERVICE:
1898                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1899                 inti->ext.ext_params = s390int->parm;
1900                 break;
1901         case KVM_S390_INT_PFAULT_DONE:
1902                 inti->ext.ext_params2 = s390int->parm64;
1903                 break;
1904         case KVM_S390_MCHK:
1905                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1906                          s390int->parm64);
1907                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1908                 inti->mchk.mcic = s390int->parm64;
1909                 break;
1910         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1911                 inti->io.subchannel_id = s390int->parm >> 16;
1912                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1913                 inti->io.io_int_parm = s390int->parm64 >> 32;
1914                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1915                 break;
1916         default:
1917                 kfree(inti);
1918                 return -EINVAL;
1919         }
1920         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1921                                  2);
1922 
1923         rc = __inject_vm(kvm, inti);
1924         if (rc)
1925                 kfree(inti);
1926         return rc;
1927 }
1928 
1929 int kvm_s390_reinject_io_int(struct kvm *kvm,
1930                               struct kvm_s390_interrupt_info *inti)
1931 {
1932         return __inject_vm(kvm, inti);
1933 }
1934 
1935 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1936                        struct kvm_s390_irq *irq)
1937 {
1938         irq->type = s390int->type;
1939         switch (irq->type) {
1940         case KVM_S390_PROGRAM_INT:
1941                 if (s390int->parm & 0xffff0000)
1942                         return -EINVAL;
1943                 irq->u.pgm.code = s390int->parm;
1944                 break;
1945         case KVM_S390_SIGP_SET_PREFIX:
1946                 irq->u.prefix.address = s390int->parm;
1947                 break;
1948         case KVM_S390_SIGP_STOP:
1949                 irq->u.stop.flags = s390int->parm;
1950                 break;
1951         case KVM_S390_INT_EXTERNAL_CALL:
1952                 if (s390int->parm & 0xffff0000)
1953                         return -EINVAL;
1954                 irq->u.extcall.code = s390int->parm;
1955                 break;
1956         case KVM_S390_INT_EMERGENCY:
1957                 if (s390int->parm & 0xffff0000)
1958                         return -EINVAL;
1959                 irq->u.emerg.code = s390int->parm;
1960                 break;
1961         case KVM_S390_MCHK:
1962                 irq->u.mchk.mcic = s390int->parm64;
1963                 break;
1964         case KVM_S390_INT_PFAULT_INIT:
1965                 irq->u.ext.ext_params = s390int->parm;
1966                 irq->u.ext.ext_params2 = s390int->parm64;
1967                 break;
1968         case KVM_S390_RESTART:
1969         case KVM_S390_INT_CLOCK_COMP:
1970         case KVM_S390_INT_CPU_TIMER:
1971                 break;
1972         default:
1973                 return -EINVAL;
1974         }
1975         return 0;
1976 }
1977 
1978 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1979 {
1980         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1981 
1982         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1983 }
1984 
1985 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1986 {
1987         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1988 
1989         spin_lock(&li->lock);
1990         li->irq.stop.flags = 0;
1991         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1992         spin_unlock(&li->lock);
1993 }
1994 
1995 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1996 {
1997         int rc;
1998 
1999         switch (irq->type) {
2000         case KVM_S390_PROGRAM_INT:
2001                 rc = __inject_prog(vcpu, irq);
2002                 break;
2003         case KVM_S390_SIGP_SET_PREFIX:
2004                 rc = __inject_set_prefix(vcpu, irq);
2005                 break;
2006         case KVM_S390_SIGP_STOP:
2007                 rc = __inject_sigp_stop(vcpu, irq);
2008                 break;
2009         case KVM_S390_RESTART:
2010                 rc = __inject_sigp_restart(vcpu, irq);
2011                 break;
2012         case KVM_S390_INT_CLOCK_COMP:
2013                 rc = __inject_ckc(vcpu);
2014                 break;
2015         case KVM_S390_INT_CPU_TIMER:
2016                 rc = __inject_cpu_timer(vcpu);
2017                 break;
2018         case KVM_S390_INT_EXTERNAL_CALL:
2019                 rc = __inject_extcall(vcpu, irq);
2020                 break;
2021         case KVM_S390_INT_EMERGENCY:
2022                 rc = __inject_sigp_emergency(vcpu, irq);
2023                 break;
2024         case KVM_S390_MCHK:
2025                 rc = __inject_mchk(vcpu, irq);
2026                 break;
2027         case KVM_S390_INT_PFAULT_INIT:
2028                 rc = __inject_pfault_init(vcpu, irq);
2029                 break;
2030         case KVM_S390_INT_VIRTIO:
2031         case KVM_S390_INT_SERVICE:
2032         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2033         default:
2034                 rc = -EINVAL;
2035         }
2036 
2037         return rc;
2038 }
2039 
2040 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2041 {
2042         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2043         int rc;
2044 
2045         spin_lock(&li->lock);
2046         rc = do_inject_vcpu(vcpu, irq);
2047         spin_unlock(&li->lock);
2048         if (!rc)
2049                 kvm_s390_vcpu_wakeup(vcpu);
2050         return rc;
2051 }
2052 
2053 static inline void clear_irq_list(struct list_head *_list)
2054 {
2055         struct kvm_s390_interrupt_info *inti, *n;
2056 
2057         list_for_each_entry_safe(inti, n, _list, list) {
2058                 list_del(&inti->list);
2059                 kfree(inti);
2060         }
2061 }
2062 
2063 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2064                        struct kvm_s390_irq *irq)
2065 {
2066         irq->type = inti->type;
2067         switch (inti->type) {
2068         case KVM_S390_INT_PFAULT_INIT:
2069         case KVM_S390_INT_PFAULT_DONE:
2070         case KVM_S390_INT_VIRTIO:
2071                 irq->u.ext = inti->ext;
2072                 break;
2073         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2074                 irq->u.io = inti->io;
2075                 break;
2076         }
2077 }
2078 
2079 void kvm_s390_clear_float_irqs(struct kvm *kvm)
2080 {
2081         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2082         int i;
2083 
2084         spin_lock(&fi->lock);
2085         fi->pending_irqs = 0;
2086         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
2087         memset(&fi->mchk, 0, sizeof(fi->mchk));
2088         for (i = 0; i < FIRQ_LIST_COUNT; i++)
2089                 clear_irq_list(&fi->lists[i]);
2090         for (i = 0; i < FIRQ_MAX_COUNT; i++)
2091                 fi->counters[i] = 0;
2092         spin_unlock(&fi->lock);
2093         kvm_s390_gisa_clear(kvm);
2094 };
2095 
2096 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2097 {
2098         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2099         struct kvm_s390_interrupt_info *inti;
2100         struct kvm_s390_float_interrupt *fi;
2101         struct kvm_s390_irq *buf;
2102         struct kvm_s390_irq *irq;
2103         int max_irqs;
2104         int ret = 0;
2105         int n = 0;
2106         int i;
2107 
2108         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2109                 return -EINVAL;
2110 
2111         /*
2112          * We are already using -ENOMEM to signal
2113          * userspace it may retry with a bigger buffer,
2114          * so we need to use something else for this case
2115          */
2116         buf = vzalloc(len);
2117         if (!buf)
2118                 return -ENOBUFS;
2119 
2120         max_irqs = len / sizeof(struct kvm_s390_irq);
2121 
2122         if (gi->origin && gisa_get_ipm(gi->origin)) {
2123                 for (i = 0; i <= MAX_ISC; i++) {
2124                         if (n == max_irqs) {
2125                                 /* signal userspace to try again */
2126                                 ret = -ENOMEM;
2127                                 goto out_nolock;
2128                         }
2129                         if (gisa_tac_ipm_gisc(gi->origin, i)) {
2130                                 irq = (struct kvm_s390_irq *) &buf[n];
2131                                 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2132                                 irq->u.io.io_int_word = isc_to_int_word(i);
2133                                 n++;
2134                         }
2135                 }
2136         }
2137         fi = &kvm->arch.float_int;
2138         spin_lock(&fi->lock);
2139         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2140                 list_for_each_entry(inti, &fi->lists[i], list) {
2141                         if (n == max_irqs) {
2142                                 /* signal userspace to try again */
2143                                 ret = -ENOMEM;
2144                                 goto out;
2145                         }
2146                         inti_to_irq(inti, &buf[n]);
2147                         n++;
2148                 }
2149         }
2150         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
2151                 if (n == max_irqs) {
2152                         /* signal userspace to try again */
2153                         ret = -ENOMEM;
2154                         goto out;
2155                 }
2156                 irq = (struct kvm_s390_irq *) &buf[n];
2157                 irq->type = KVM_S390_INT_SERVICE;
2158                 irq->u.ext = fi->srv_signal;
2159                 n++;
2160         }
2161         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2162                 if (n == max_irqs) {
2163                                 /* signal userspace to try again */
2164                                 ret = -ENOMEM;
2165                                 goto out;
2166                 }
2167                 irq = (struct kvm_s390_irq *) &buf[n];
2168                 irq->type = KVM_S390_MCHK;
2169                 irq->u.mchk = fi->mchk;
2170                 n++;
2171 }
2172 
2173 out:
2174         spin_unlock(&fi->lock);
2175 out_nolock:
2176         if (!ret && n > 0) {
2177                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2178                         ret = -EFAULT;
2179         }
2180         vfree(buf);
2181 
2182         return ret < 0 ? ret : n;
2183 }
2184 
2185 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2186 {
2187         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2188         struct kvm_s390_ais_all ais;
2189 
2190         if (attr->attr < sizeof(ais))
2191                 return -EINVAL;
2192 
2193         if (!test_kvm_facility(kvm, 72))
2194                 return -EOPNOTSUPP;
2195 
2196         mutex_lock(&fi->ais_lock);
2197         ais.simm = fi->simm;
2198         ais.nimm = fi->nimm;
2199         mutex_unlock(&fi->ais_lock);
2200 
2201         if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2202                 return -EFAULT;
2203 
2204         return 0;
2205 }
2206 
2207 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2208 {
2209         int r;
2210 
2211         switch (attr->group) {
2212         case KVM_DEV_FLIC_GET_ALL_IRQS:
2213                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2214                                           attr->attr);
2215                 break;
2216         case KVM_DEV_FLIC_AISM_ALL:
2217                 r = flic_ais_mode_get_all(dev->kvm, attr);
2218                 break;
2219         default:
2220                 r = -EINVAL;
2221         }
2222 
2223         return r;
2224 }
2225 
2226 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2227                                      u64 addr)
2228 {
2229         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2230         void *target = NULL;
2231         void __user *source;
2232         u64 size;
2233 
2234         if (get_user(inti->type, (u64 __user *)addr))
2235                 return -EFAULT;
2236 
2237         switch (inti->type) {
2238         case KVM_S390_INT_PFAULT_INIT:
2239         case KVM_S390_INT_PFAULT_DONE:
2240         case KVM_S390_INT_VIRTIO:
2241         case KVM_S390_INT_SERVICE:
2242                 target = (void *) &inti->ext;
2243                 source = &uptr->u.ext;
2244                 size = sizeof(inti->ext);
2245                 break;
2246         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2247                 target = (void *) &inti->io;
2248                 source = &uptr->u.io;
2249                 size = sizeof(inti->io);
2250                 break;
2251         case KVM_S390_MCHK:
2252                 target = (void *) &inti->mchk;
2253                 source = &uptr->u.mchk;
2254                 size = sizeof(inti->mchk);
2255                 break;
2256         default:
2257                 return -EINVAL;
2258         }
2259 
2260         if (copy_from_user(target, source, size))
2261                 return -EFAULT;
2262 
2263         return 0;
2264 }
2265 
2266 static int enqueue_floating_irq(struct kvm_device *dev,
2267                                 struct kvm_device_attr *attr)
2268 {
2269         struct kvm_s390_interrupt_info *inti = NULL;
2270         int r = 0;
2271         int len = attr->attr;
2272 
2273         if (len % sizeof(struct kvm_s390_irq) != 0)
2274                 return -EINVAL;
2275         else if (len > KVM_S390_FLIC_MAX_BUFFER)
2276                 return -EINVAL;
2277 
2278         while (len >= sizeof(struct kvm_s390_irq)) {
2279                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2280                 if (!inti)
2281                         return -ENOMEM;
2282 
2283                 r = copy_irq_from_user(inti, attr->addr);
2284                 if (r) {
2285                         kfree(inti);
2286                         return r;
2287                 }
2288                 r = __inject_vm(dev->kvm, inti);
2289                 if (r) {
2290                         kfree(inti);
2291                         return r;
2292                 }
2293                 len -= sizeof(struct kvm_s390_irq);
2294                 attr->addr += sizeof(struct kvm_s390_irq);
2295         }
2296 
2297         return r;
2298 }
2299 
2300 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2301 {
2302         if (id >= MAX_S390_IO_ADAPTERS)
2303                 return NULL;
2304         id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
2305         return kvm->arch.adapters[id];
2306 }
2307 
2308 static int register_io_adapter(struct kvm_device *dev,
2309                                struct kvm_device_attr *attr)
2310 {
2311         struct s390_io_adapter *adapter;
2312         struct kvm_s390_io_adapter adapter_info;
2313 
2314         if (copy_from_user(&adapter_info,
2315                            (void __user *)attr->addr, sizeof(adapter_info)))
2316                 return -EFAULT;
2317 
2318         if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
2319                 return -EINVAL;
2320 
2321         adapter_info.id = array_index_nospec(adapter_info.id,
2322                                              MAX_S390_IO_ADAPTERS);
2323 
2324         if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
2325                 return -EINVAL;
2326 
2327         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2328         if (!adapter)
2329                 return -ENOMEM;
2330 
2331         INIT_LIST_HEAD(&adapter->maps);
2332         init_rwsem(&adapter->maps_lock);
2333         atomic_set(&adapter->nr_maps, 0);
2334         adapter->id = adapter_info.id;
2335         adapter->isc = adapter_info.isc;
2336         adapter->maskable = adapter_info.maskable;
2337         adapter->masked = false;
2338         adapter->swap = adapter_info.swap;
2339         adapter->suppressible = (adapter_info.flags) &
2340                                 KVM_S390_ADAPTER_SUPPRESSIBLE;
2341         dev->kvm->arch.adapters[adapter->id] = adapter;
2342 
2343         return 0;
2344 }
2345 
2346 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2347 {
2348         int ret;
2349         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2350 
2351         if (!adapter || !adapter->maskable)
2352                 return -EINVAL;
2353         ret = adapter->masked;
2354         adapter->masked = masked;
2355         return ret;
2356 }
2357 
2358 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2359 {
2360         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2361         struct s390_map_info *map;
2362         int ret;
2363 
2364         if (!adapter || !addr)
2365                 return -EINVAL;
2366 
2367         map = kzalloc(sizeof(*map), GFP_KERNEL);
2368         if (!map) {
2369                 ret = -ENOMEM;
2370                 goto out;
2371         }
2372         INIT_LIST_HEAD(&map->list);
2373         map->guest_addr = addr;
2374         map->addr = gmap_translate(kvm->arch.gmap, addr);
2375         if (map->addr == -EFAULT) {
2376                 ret = -EFAULT;
2377                 goto out;
2378         }
2379         ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
2380         if (ret < 0)
2381                 goto out;
2382         BUG_ON(ret != 1);
2383         down_write(&adapter->maps_lock);
2384         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2385                 list_add_tail(&map->list, &adapter->maps);
2386                 ret = 0;
2387         } else {
2388                 put_page(map->page);
2389                 ret = -EINVAL;
2390         }
2391         up_write(&adapter->maps_lock);
2392 out:
2393         if (ret)
2394                 kfree(map);
2395         return ret;
2396 }
2397 
2398 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2399 {
2400         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2401         struct s390_map_info *map, *tmp;
2402         int found = 0;
2403 
2404         if (!adapter || !addr)
2405                 return -EINVAL;
2406 
2407         down_write(&adapter->maps_lock);
2408         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2409                 if (map->guest_addr == addr) {
2410                         found = 1;
2411                         atomic_dec(&adapter->nr_maps);
2412                         list_del(&map->list);
2413                         put_page(map->page);
2414                         kfree(map);
2415                         break;
2416                 }
2417         }
2418         up_write(&adapter->maps_lock);
2419 
2420         return found ? 0 : -EINVAL;
2421 }
2422 
2423 void kvm_s390_destroy_adapters(struct kvm *kvm)
2424 {
2425         int i;
2426         struct s390_map_info *map, *tmp;
2427 
2428         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2429                 if (!kvm->arch.adapters[i])
2430                         continue;
2431                 list_for_each_entry_safe(map, tmp,
2432                                          &kvm->arch.adapters[i]->maps, list) {
2433                         list_del(&map->list);
2434                         put_page(map->page);
2435                         kfree(map);
2436                 }
2437                 kfree(kvm->arch.adapters[i]);
2438         }
2439 }
2440 
2441 static int modify_io_adapter(struct kvm_device *dev,
2442                              struct kvm_device_attr *attr)
2443 {
2444         struct kvm_s390_io_adapter_req req;
2445         struct s390_io_adapter *adapter;
2446         int ret;
2447 
2448         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2449                 return -EFAULT;
2450 
2451         adapter = get_io_adapter(dev->kvm, req.id);
2452         if (!adapter)
2453                 return -EINVAL;
2454         switch (req.type) {
2455         case KVM_S390_IO_ADAPTER_MASK:
2456                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2457                 if (ret > 0)
2458                         ret = 0;
2459                 break;
2460         case KVM_S390_IO_ADAPTER_MAP:
2461                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2462                 break;
2463         case KVM_S390_IO_ADAPTER_UNMAP:
2464                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2465                 break;
2466         default:
2467                 ret = -EINVAL;
2468         }
2469 
2470         return ret;
2471 }
2472 
2473 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2474 
2475 {
2476         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2477         u32 schid;
2478 
2479         if (attr->flags)
2480                 return -EINVAL;
2481         if (attr->attr != sizeof(schid))
2482                 return -EINVAL;
2483         if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2484                 return -EFAULT;
2485         if (!schid)
2486                 return -EINVAL;
2487         kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2488         /*
2489          * If userspace is conforming to the architecture, we can have at most
2490          * one pending I/O interrupt per subchannel, so this is effectively a
2491          * clear all.
2492          */
2493         return 0;
2494 }
2495 
2496 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2497 {
2498         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2499         struct kvm_s390_ais_req req;
2500         int ret = 0;
2501 
2502         if (!test_kvm_facility(kvm, 72))
2503                 return -EOPNOTSUPP;
2504 
2505         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2506                 return -EFAULT;
2507 
2508         if (req.isc > MAX_ISC)
2509                 return -EINVAL;
2510 
2511         trace_kvm_s390_modify_ais_mode(req.isc,
2512                                        (fi->simm & AIS_MODE_MASK(req.isc)) ?
2513                                        (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2514                                        2 : KVM_S390_AIS_MODE_SINGLE :
2515                                        KVM_S390_AIS_MODE_ALL, req.mode);
2516 
2517         mutex_lock(&fi->ais_lock);
2518         switch (req.mode) {
2519         case KVM_S390_AIS_MODE_ALL:
2520                 fi->simm &= ~AIS_MODE_MASK(req.isc);
2521                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2522                 break;
2523         case KVM_S390_AIS_MODE_SINGLE:
2524                 fi->simm |= AIS_MODE_MASK(req.isc);
2525                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2526                 break;
2527         default:
2528                 ret = -EINVAL;
2529         }
2530         mutex_unlock(&fi->ais_lock);
2531 
2532         return ret;
2533 }
2534 
2535 static int kvm_s390_inject_airq(struct kvm *kvm,
2536                                 struct s390_io_adapter *adapter)
2537 {
2538         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2539         struct kvm_s390_interrupt s390int = {
2540                 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2541                 .parm = 0,
2542                 .parm64 = isc_to_int_word(adapter->isc),
2543         };
2544         int ret = 0;
2545 
2546         if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2547                 return kvm_s390_inject_vm(kvm, &s390int);
2548 
2549         mutex_lock(&fi->ais_lock);
2550         if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2551                 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2552                 goto out;
2553         }
2554 
2555         ret = kvm_s390_inject_vm(kvm, &s390int);
2556         if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2557                 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2558                 trace_kvm_s390_modify_ais_mode(adapter->isc,
2559                                                KVM_S390_AIS_MODE_SINGLE, 2);
2560         }
2561 out:
2562         mutex_unlock(&fi->ais_lock);
2563         return ret;
2564 }
2565 
2566 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2567 {
2568         unsigned int id = attr->attr;
2569         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2570 
2571         if (!adapter)
2572                 return -EINVAL;
2573 
2574         return kvm_s390_inject_airq(kvm, adapter);
2575 }
2576 
2577 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2578 {
2579         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2580         struct kvm_s390_ais_all ais;
2581 
2582         if (!test_kvm_facility(kvm, 72))
2583                 return -EOPNOTSUPP;
2584 
2585         if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2586                 return -EFAULT;
2587 
2588         mutex_lock(&fi->ais_lock);
2589         fi->simm = ais.simm;
2590         fi->nimm = ais.nimm;
2591         mutex_unlock(&fi->ais_lock);
2592 
2593         return 0;
2594 }
2595 
2596 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2597 {
2598         int r = 0;
2599         unsigned int i;
2600         struct kvm_vcpu *vcpu;
2601 
2602         switch (attr->group) {
2603         case KVM_DEV_FLIC_ENQUEUE:
2604                 r = enqueue_floating_irq(dev, attr);
2605                 break;
2606         case KVM_DEV_FLIC_CLEAR_IRQS:
2607                 kvm_s390_clear_float_irqs(dev->kvm);
2608                 break;
2609         case KVM_DEV_FLIC_APF_ENABLE:
2610                 dev->kvm->arch.gmap->pfault_enabled = 1;
2611                 break;
2612         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2613                 dev->kvm->arch.gmap->pfault_enabled = 0;
2614                 /*
2615                  * Make sure no async faults are in transition when
2616                  * clearing the queues. So we don't need to worry
2617                  * about late coming workers.
2618                  */
2619                 synchronize_srcu(&dev->kvm->srcu);
2620                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2621                         kvm_clear_async_pf_completion_queue(vcpu);
2622                 break;
2623         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2624                 r = register_io_adapter(dev, attr);
2625                 break;
2626         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2627                 r = modify_io_adapter(dev, attr);
2628                 break;
2629         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2630                 r = clear_io_irq(dev->kvm, attr);
2631                 break;
2632         case KVM_DEV_FLIC_AISM:
2633                 r = modify_ais_mode(dev->kvm, attr);
2634                 break;
2635         case KVM_DEV_FLIC_AIRQ_INJECT:
2636                 r = flic_inject_airq(dev->kvm, attr);
2637                 break;
2638         case KVM_DEV_FLIC_AISM_ALL:
2639                 r = flic_ais_mode_set_all(dev->kvm, attr);
2640                 break;
2641         default:
2642                 r = -EINVAL;
2643         }
2644 
2645         return r;
2646 }
2647 
2648 static int flic_has_attr(struct kvm_device *dev,
2649                              struct kvm_device_attr *attr)
2650 {
2651         switch (attr->group) {
2652         case KVM_DEV_FLIC_GET_ALL_IRQS:
2653         case KVM_DEV_FLIC_ENQUEUE:
2654         case KVM_DEV_FLIC_CLEAR_IRQS:
2655         case KVM_DEV_FLIC_APF_ENABLE:
2656         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2657         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2658         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2659         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2660         case KVM_DEV_FLIC_AISM:
2661         case KVM_DEV_FLIC_AIRQ_INJECT:
2662         case KVM_DEV_FLIC_AISM_ALL:
2663                 return 0;
2664         }
2665         return -ENXIO;
2666 }
2667 
2668 static int flic_create(struct kvm_device *dev, u32 type)
2669 {
2670         if (!dev)
2671                 return -EINVAL;
2672         if (dev->kvm->arch.flic)
2673                 return -EINVAL;
2674         dev->kvm->arch.flic = dev;
2675         return 0;
2676 }
2677 
2678 static void flic_destroy(struct kvm_device *dev)
2679 {
2680         dev->kvm->arch.flic = NULL;
2681         kfree(dev);
2682 }
2683 
2684 /* s390 floating irq controller (flic) */
2685 struct kvm_device_ops kvm_flic_ops = {
2686         .name = "kvm-flic",
2687         .get_attr = flic_get_attr,
2688         .set_attr = flic_set_attr,
2689         .has_attr = flic_has_attr,
2690         .create = flic_create,
2691         .destroy = flic_destroy,
2692 };
2693 
2694 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2695 {
2696         unsigned long bit;
2697 
2698         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2699 
2700         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2701 }
2702 
2703 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2704                                           u64 addr)
2705 {
2706         struct s390_map_info *map;
2707 
2708         if (!adapter)
2709                 return NULL;
2710 
2711         list_for_each_entry(map, &adapter->maps, list) {
2712                 if (map->guest_addr == addr)
2713                         return map;
2714         }
2715         return NULL;
2716 }
2717 
2718 static int adapter_indicators_set(struct kvm *kvm,
2719                                   struct s390_io_adapter *adapter,
2720                                   struct kvm_s390_adapter_int *adapter_int)
2721 {
2722         unsigned long bit;
2723         int summary_set, idx;
2724         struct s390_map_info *info;
2725         void *map;
2726 
2727         info = get_map_info(adapter, adapter_int->ind_addr);
2728         if (!info)
2729                 return -1;
2730         map = page_address(info->page);
2731         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2732         set_bit(bit, map);
2733         idx = srcu_read_lock(&kvm->srcu);
2734         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2735         set_page_dirty_lock(info->page);
2736         info = get_map_info(adapter, adapter_int->summary_addr);
2737         if (!info) {
2738                 srcu_read_unlock(&kvm->srcu, idx);
2739                 return -1;
2740         }
2741         map = page_address(info->page);
2742         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2743                           adapter->swap);
2744         summary_set = test_and_set_bit(bit, map);
2745         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2746         set_page_dirty_lock(info->page);
2747         srcu_read_unlock(&kvm->srcu, idx);
2748         return summary_set ? 0 : 1;
2749 }
2750 
2751 /*
2752  * < 0 - not injected due to error
2753  * = 0 - coalesced, summary indicator already active
2754  * > 0 - injected interrupt
2755  */
2756 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2757                            struct kvm *kvm, int irq_source_id, int level,
2758                            bool line_status)
2759 {
2760         int ret;
2761         struct s390_io_adapter *adapter;
2762 
2763         /* We're only interested in the 0->1 transition. */
2764         if (!level)
2765                 return 0;
2766         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2767         if (!adapter)
2768                 return -1;
2769         down_read(&adapter->maps_lock);
2770         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2771         up_read(&adapter->maps_lock);
2772         if ((ret > 0) && !adapter->masked) {
2773                 ret = kvm_s390_inject_airq(kvm, adapter);
2774                 if (ret == 0)
2775                         ret = 1;
2776         }
2777         return ret;
2778 }
2779 
2780 /*
2781  * Inject the machine check to the guest.
2782  */
2783 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2784                                      struct mcck_volatile_info *mcck_info)
2785 {
2786         struct kvm_s390_interrupt_info inti;
2787         struct kvm_s390_irq irq;
2788         struct kvm_s390_mchk_info *mchk;
2789         union mci mci;
2790         __u64 cr14 = 0;         /* upper bits are not used */
2791         int rc;
2792 
2793         mci.val = mcck_info->mcic;
2794         if (mci.sr)
2795                 cr14 |= CR14_RECOVERY_SUBMASK;
2796         if (mci.dg)
2797                 cr14 |= CR14_DEGRADATION_SUBMASK;
2798         if (mci.w)
2799                 cr14 |= CR14_WARNING_SUBMASK;
2800 
2801         mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2802         mchk->cr14 = cr14;
2803         mchk->mcic = mcck_info->mcic;
2804         mchk->ext_damage_code = mcck_info->ext_damage_code;
2805         mchk->failing_storage_address = mcck_info->failing_storage_address;
2806         if (mci.ck) {
2807                 /* Inject the floating machine check */
2808                 inti.type = KVM_S390_MCHK;
2809                 rc = __inject_vm(vcpu->kvm, &inti);
2810         } else {
2811                 /* Inject the machine check to specified vcpu */
2812                 irq.type = KVM_S390_MCHK;
2813                 rc = kvm_s390_inject_vcpu(vcpu, &irq);
2814         }
2815         WARN_ON_ONCE(rc);
2816 }
2817 
2818 int kvm_set_routing_entry(struct kvm *kvm,
2819                           struct kvm_kernel_irq_routing_entry *e,
2820                           const struct kvm_irq_routing_entry *ue)
2821 {
2822         int ret;
2823 
2824         switch (ue->type) {
2825         case KVM_IRQ_ROUTING_S390_ADAPTER:
2826                 e->set = set_adapter_int;
2827                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2828                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2829                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2830                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2831                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2832                 ret = 0;
2833                 break;
2834         default:
2835                 ret = -EINVAL;
2836         }
2837 
2838         return ret;
2839 }
2840 
2841 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2842                 int irq_source_id, int level, bool line_status)
2843 {
2844         return -EINVAL;
2845 }
2846 
2847 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2848 {
2849         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2850         struct kvm_s390_irq *buf;
2851         int r = 0;
2852         int n;
2853 
2854         buf = vmalloc(len);
2855         if (!buf)
2856                 return -ENOMEM;
2857 
2858         if (copy_from_user((void *) buf, irqstate, len)) {
2859                 r = -EFAULT;
2860                 goto out_free;
2861         }
2862 
2863         /*
2864          * Don't allow setting the interrupt state
2865          * when there are already interrupts pending
2866          */
2867         spin_lock(&li->lock);
2868         if (li->pending_irqs) {
2869                 r = -EBUSY;
2870                 goto out_unlock;
2871         }
2872 
2873         for (n = 0; n < len / sizeof(*buf); n++) {
2874                 r = do_inject_vcpu(vcpu, &buf[n]);
2875                 if (r)
2876                         break;
2877         }
2878 
2879 out_unlock:
2880         spin_unlock(&li->lock);
2881 out_free:
2882         vfree(buf);
2883 
2884         return r;
2885 }
2886 
2887 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2888                             struct kvm_s390_irq *irq,
2889                             unsigned long irq_type)
2890 {
2891         switch (irq_type) {
2892         case IRQ_PEND_MCHK_EX:
2893         case IRQ_PEND_MCHK_REP:
2894                 irq->type = KVM_S390_MCHK;
2895                 irq->u.mchk = li->irq.mchk;
2896                 break;
2897         case IRQ_PEND_PROG:
2898                 irq->type = KVM_S390_PROGRAM_INT;
2899                 irq->u.pgm = li->irq.pgm;
2900                 break;
2901         case IRQ_PEND_PFAULT_INIT:
2902                 irq->type = KVM_S390_INT_PFAULT_INIT;
2903                 irq->u.ext = li->irq.ext;
2904                 break;
2905         case IRQ_PEND_EXT_EXTERNAL:
2906                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2907                 irq->u.extcall = li->irq.extcall;
2908                 break;
2909         case IRQ_PEND_EXT_CLOCK_COMP:
2910                 irq->type = KVM_S390_INT_CLOCK_COMP;
2911                 break;
2912         case IRQ_PEND_EXT_CPU_TIMER:
2913                 irq->type = KVM_S390_INT_CPU_TIMER;
2914                 break;
2915         case IRQ_PEND_SIGP_STOP:
2916                 irq->type = KVM_S390_SIGP_STOP;
2917                 irq->u.stop = li->irq.stop;
2918                 break;
2919         case IRQ_PEND_RESTART:
2920                 irq->type = KVM_S390_RESTART;
2921                 break;
2922         case IRQ_PEND_SET_PREFIX:
2923                 irq->type = KVM_S390_SIGP_SET_PREFIX;
2924                 irq->u.prefix = li->irq.prefix;
2925                 break;
2926         }
2927 }
2928 
2929 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2930 {
2931         int scn;
2932         DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
2933         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2934         unsigned long pending_irqs;
2935         struct kvm_s390_irq irq;
2936         unsigned long irq_type;
2937         int cpuaddr;
2938         int n = 0;
2939 
2940         spin_lock(&li->lock);
2941         pending_irqs = li->pending_irqs;
2942         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2943                sizeof(sigp_emerg_pending));
2944         spin_unlock(&li->lock);
2945 
2946         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2947                 memset(&irq, 0, sizeof(irq));
2948                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2949                         continue;
2950                 if (n + sizeof(irq) > len)
2951                         return -ENOBUFS;
2952                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2953                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2954                         return -EFAULT;
2955                 n += sizeof(irq);
2956         }
2957 
2958         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2959                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2960                         memset(&irq, 0, sizeof(irq));
2961                         if (n + sizeof(irq) > len)
2962                                 return -ENOBUFS;
2963                         irq.type = KVM_S390_INT_EMERGENCY;
2964                         irq.u.emerg.code = cpuaddr;
2965                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2966                                 return -EFAULT;
2967                         n += sizeof(irq);
2968                 }
2969         }
2970 
2971         if (sca_ext_call_pending(vcpu, &scn)) {
2972                 if (n + sizeof(irq) > len)
2973                         return -ENOBUFS;
2974                 memset(&irq, 0, sizeof(irq));
2975                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2976                 irq.u.extcall.code = scn;
2977                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2978                         return -EFAULT;
2979                 n += sizeof(irq);
2980         }
2981 
2982         return n;
2983 }
2984 
2985 static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
2986 {
2987         int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
2988         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2989         struct kvm_vcpu *vcpu;
2990 
2991         for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
2992                 vcpu = kvm_get_vcpu(kvm, vcpu_id);
2993                 if (psw_ioint_disabled(vcpu))
2994                         continue;
2995                 deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
2996                 if (deliverable_mask) {
2997                         /* lately kicked but not yet running */
2998                         if (test_and_set_bit(vcpu_id, gi->kicked_mask))
2999                                 return;
3000                         kvm_s390_vcpu_wakeup(vcpu);
3001                         return;
3002                 }
3003         }
3004 }
3005 
3006 static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
3007 {
3008         struct kvm_s390_gisa_interrupt *gi =
3009                 container_of(timer, struct kvm_s390_gisa_interrupt, timer);
3010         struct kvm *kvm =
3011                 container_of(gi->origin, struct sie_page2, gisa)->kvm;
3012         u8 pending_mask;
3013 
3014         pending_mask = gisa_get_ipm_or_restore_iam(gi);
3015         if (pending_mask) {
3016                 __airqs_kick_single_vcpu(kvm, pending_mask);
3017                 hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
3018                 return HRTIMER_RESTART;
3019         };
3020 
3021         return HRTIMER_NORESTART;
3022 }
3023 
3024 #define NULL_GISA_ADDR 0x00000000UL
3025 #define NONE_GISA_ADDR 0x00000001UL
3026 #define GISA_ADDR_MASK 0xfffff000UL
3027 
3028 static void process_gib_alert_list(void)
3029 {
3030         struct kvm_s390_gisa_interrupt *gi;
3031         struct kvm_s390_gisa *gisa;
3032         struct kvm *kvm;
3033         u32 final, origin = 0UL;
3034 
3035         do {
3036                 /*
3037                  * If the NONE_GISA_ADDR is still stored in the alert list
3038                  * origin, we will leave the outer loop. No further GISA has
3039                  * been added to the alert list by millicode while processing
3040                  * the current alert list.
3041                  */
3042                 final = (origin & NONE_GISA_ADDR);
3043                 /*
3044                  * Cut off the alert list and store the NONE_GISA_ADDR in the
3045                  * alert list origin to avoid further GAL interruptions.
3046                  * A new alert list can be build up by millicode in parallel
3047                  * for guests not in the yet cut-off alert list. When in the
3048                  * final loop, store the NULL_GISA_ADDR instead. This will re-
3049                  * enable GAL interruptions on the host again.
3050                  */
3051                 origin = xchg(&gib->alert_list_origin,
3052                               (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
3053                 /*
3054                  * Loop through the just cut-off alert list and start the
3055                  * gisa timers to kick idle vcpus to consume the pending
3056                  * interruptions asap.
3057                  */
3058                 while (origin & GISA_ADDR_MASK) {
3059                         gisa = (struct kvm_s390_gisa *)(u64)origin;
3060                         origin = gisa->next_alert;
3061                         gisa->next_alert = (u32)(u64)gisa;
3062                         kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
3063                         gi = &kvm->arch.gisa_int;
3064                         if (hrtimer_active(&gi->timer))
3065                                 hrtimer_cancel(&gi->timer);
3066                         hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3067                 }
3068         } while (!final);
3069 
3070 }
3071 
3072 void kvm_s390_gisa_clear(struct kvm *kvm)
3073 {
3074         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3075 
3076         if (!gi->origin)
3077                 return;
3078         gisa_clear_ipm(gi->origin);
3079         VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
3080 }
3081 
3082 void kvm_s390_gisa_init(struct kvm *kvm)
3083 {
3084         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3085 
3086         if (!css_general_characteristics.aiv)
3087                 return;
3088         gi->origin = &kvm->arch.sie_page2->gisa;
3089         gi->alert.mask = 0;
3090         spin_lock_init(&gi->alert.ref_lock);
3091         gi->expires = 50 * 1000; /* 50 usec */
3092         hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3093         gi->timer.function = gisa_vcpu_kicker;
3094         memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
3095         gi->origin->next_alert = (u32)(u64)gi->origin;
3096         VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
3097 }
3098 
3099 void kvm_s390_gisa_destroy(struct kvm *kvm)
3100 {
3101         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3102 
3103         if (!gi->origin)
3104                 return;
3105         if (gi->alert.mask)
3106                 KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
3107                           kvm, gi->alert.mask);
3108         while (gisa_in_alert_list(gi->origin))
3109                 cpu_relax();
3110         hrtimer_cancel(&gi->timer);
3111         gi->origin = NULL;
3112 }
3113 
3114 /**
3115  * kvm_s390_gisc_register - register a guest ISC
3116  *
3117  * @kvm:  the kernel vm to work with
3118  * @gisc: the guest interruption sub class to register
3119  *
3120  * The function extends the vm specific alert mask to use.
3121  * The effective IAM mask in the GISA is updated as well
3122  * in case the GISA is not part of the GIB alert list.
3123  * It will be updated latest when the IAM gets restored
3124  * by gisa_get_ipm_or_restore_iam().
3125  *
3126  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3127  *          has registered with the channel subsystem.
3128  *          -ENODEV in case the vm uses no GISA
3129  *          -ERANGE in case the guest ISC is invalid
3130  */
3131 int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
3132 {
3133         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3134 
3135         if (!gi->origin)
3136                 return -ENODEV;
3137         if (gisc > MAX_ISC)
3138                 return -ERANGE;
3139 
3140         spin_lock(&gi->alert.ref_lock);
3141         gi->alert.ref_count[gisc]++;
3142         if (gi->alert.ref_count[gisc] == 1) {
3143                 gi->alert.mask |= 0x80 >> gisc;
3144                 gisa_set_iam(gi->origin, gi->alert.mask);
3145         }
3146         spin_unlock(&gi->alert.ref_lock);
3147 
3148         return gib->nisc;
3149 }
3150 EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
3151 
3152 /**
3153  * kvm_s390_gisc_unregister - unregister a guest ISC
3154  *
3155  * @kvm:  the kernel vm to work with
3156  * @gisc: the guest interruption sub class to register
3157  *
3158  * The function reduces the vm specific alert mask to use.
3159  * The effective IAM mask in the GISA is updated as well
3160  * in case the GISA is not part of the GIB alert list.
3161  * It will be updated latest when the IAM gets restored
3162  * by gisa_get_ipm_or_restore_iam().
3163  *
3164  * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3165  *          has registered with the channel subsystem.
3166  *          -ENODEV in case the vm uses no GISA
3167  *          -ERANGE in case the guest ISC is invalid
3168  *          -EINVAL in case the guest ISC is not registered
3169  */
3170 int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
3171 {
3172         struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3173         int rc = 0;
3174 
3175         if (!gi->origin)
3176                 return -ENODEV;
3177         if (gisc > MAX_ISC)
3178                 return -ERANGE;
3179 
3180         spin_lock(&gi->alert.ref_lock);
3181         if (gi->alert.ref_count[gisc] == 0) {
3182                 rc = -EINVAL;
3183                 goto out;
3184         }
3185         gi->alert.ref_count[gisc]--;
3186         if (gi->alert.ref_count[gisc] == 0) {
3187                 gi->alert.mask &= ~(0x80 >> gisc);
3188                 gisa_set_iam(gi->origin, gi->alert.mask);
3189         }
3190 out:
3191         spin_unlock(&gi->alert.ref_lock);
3192 
3193         return rc;
3194 }
3195 EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
3196 
3197 static void gib_alert_irq_handler(struct airq_struct *airq, bool floating)
3198 {
3199         inc_irq_stat(IRQIO_GAL);
3200         process_gib_alert_list();
3201 }
3202 
3203 static struct airq_struct gib_alert_irq = {
3204         .handler = gib_alert_irq_handler,
3205         .lsi_ptr = &gib_alert_irq.lsi_mask,
3206 };
3207 
3208 void kvm_s390_gib_destroy(void)
3209 {
3210         if (!gib)
3211                 return;
3212         chsc_sgib(0);
3213         unregister_adapter_interrupt(&gib_alert_irq);
3214         free_page((unsigned long)gib);
3215         gib = NULL;
3216 }
3217 
3218 int kvm_s390_gib_init(u8 nisc)
3219 {
3220         int rc = 0;
3221 
3222         if (!css_general_characteristics.aiv) {
3223                 KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
3224                 goto out;
3225         }
3226 
3227         gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
3228         if (!gib) {
3229                 rc = -ENOMEM;
3230                 goto out;
3231         }
3232 
3233         gib_alert_irq.isc = nisc;
3234         if (register_adapter_interrupt(&gib_alert_irq)) {
3235                 pr_err("Registering the GIB alert interruption handler failed\n");
3236                 rc = -EIO;
3237                 goto out_free_gib;
3238         }
3239 
3240         gib->nisc = nisc;
3241         if (chsc_sgib((u32)(u64)gib)) {
3242                 pr_err("Associating the GIB with the AIV facility failed\n");
3243                 free_page((unsigned long)gib);
3244                 gib = NULL;
3245                 rc = -EIO;
3246                 goto out_unreg_gal;
3247         }
3248 
3249         KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
3250         goto out;
3251 
3252 out_unreg_gal:
3253         unregister_adapter_interrupt(&gib_alert_irq);
3254 out_free_gib:
3255         free_page((unsigned long)gib);
3256         gib = NULL;
3257 out:
3258         return rc;
3259 }

/* [<][>][^][v][top][bottom][index][help] */