root/virt/kvm/arm/vgic/vgic-its.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vgic_add_lpi
  2. vgic_its_get_abi
  3. vgic_its_set_abi
  4. find_its_device
  5. find_ite
  6. find_collection
  7. update_lpi_config
  8. vgic_copy_lpi_list
  9. update_affinity
  10. update_affinity_ite
  11. update_affinity_collection
  12. max_lpis_propbaser
  13. its_sync_lpi_pending_table
  14. vgic_mmio_read_its_typer
  15. vgic_mmio_read_its_iidr
  16. vgic_mmio_uaccess_write_its_iidr
  17. vgic_mmio_read_its_idregs
  18. __vgic_its_check_cache
  19. vgic_its_check_cache
  20. vgic_its_cache_translation
  21. vgic_its_invalidate_cache
  22. vgic_its_resolve_lpi
  23. vgic_msi_to_its
  24. vgic_its_trigger_msi
  25. vgic_its_inject_cached_translation
  26. vgic_its_inject_msi
  27. its_free_ite
  28. its_cmd_mask_field
  29. vgic_its_cmd_handle_discard
  30. vgic_its_cmd_handle_movi
  31. vgic_its_check_id
  32. vgic_its_alloc_collection
  33. vgic_its_free_collection
  34. vgic_its_alloc_ite
  35. vgic_its_cmd_handle_mapi
  36. vgic_its_free_device
  37. vgic_its_free_device_list
  38. vgic_its_free_collection_list
  39. vgic_its_alloc_device
  40. vgic_its_cmd_handle_mapd
  41. vgic_its_cmd_handle_mapc
  42. vgic_its_cmd_handle_clear
  43. vgic_its_cmd_handle_inv
  44. vgic_its_cmd_handle_invall
  45. vgic_its_cmd_handle_movall
  46. vgic_its_cmd_handle_int
  47. vgic_its_handle_command
  48. vgic_sanitise_its_baser
  49. vgic_sanitise_its_cbaser
  50. vgic_mmio_read_its_cbaser
  51. vgic_mmio_write_its_cbaser
  52. vgic_its_process_commands
  53. vgic_mmio_write_its_cwriter
  54. vgic_mmio_read_its_cwriter
  55. vgic_mmio_read_its_creadr
  56. vgic_mmio_uaccess_write_its_creadr
  57. vgic_mmio_read_its_baser
  58. vgic_mmio_write_its_baser
  59. vgic_mmio_read_its_ctlr
  60. vgic_mmio_write_its_ctlr
  61. its_mmio_write_wi
  62. vgic_enable_lpis
  63. vgic_register_its_iodev
  64. vgic_lpi_translation_cache_init
  65. vgic_lpi_translation_cache_destroy
  66. vgic_its_create
  67. vgic_its_destroy
  68. vgic_its_has_attr_regs
  69. vgic_its_attr_regs_access
  70. compute_next_devid_offset
  71. compute_next_eventid_offset
  72. scan_its_table
  73. vgic_its_save_ite
  74. vgic_its_restore_ite
  75. vgic_its_ite_cmp
  76. vgic_its_save_itt
  77. vgic_its_restore_itt
  78. vgic_its_save_dte
  79. vgic_its_restore_dte
  80. vgic_its_device_cmp
  81. vgic_its_save_device_tables
  82. handle_l1_dte
  83. vgic_its_restore_device_tables
  84. vgic_its_save_cte
  85. vgic_its_restore_cte
  86. vgic_its_save_collection_table
  87. vgic_its_restore_collection_table
  88. vgic_its_save_tables_v0
  89. vgic_its_restore_tables_v0
  90. vgic_its_commit_v0
  91. vgic_its_reset
  92. vgic_its_has_attr
  93. vgic_its_ctrl
  94. vgic_its_set_attr
  95. vgic_its_get_attr
  96. kvm_vgic_register_its_device

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * GICv3 ITS emulation
   4  *
   5  * Copyright (C) 2015,2016 ARM Ltd.
   6  * Author: Andre Przywara <andre.przywara@arm.com>
   7  */
   8 
   9 #include <linux/cpu.h>
  10 #include <linux/kvm.h>
  11 #include <linux/kvm_host.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/list.h>
  14 #include <linux/uaccess.h>
  15 #include <linux/list_sort.h>
  16 
  17 #include <linux/irqchip/arm-gic-v3.h>
  18 
  19 #include <asm/kvm_emulate.h>
  20 #include <asm/kvm_arm.h>
  21 #include <asm/kvm_mmu.h>
  22 
  23 #include "vgic.h"
  24 #include "vgic-mmio.h"
  25 
  26 static int vgic_its_save_tables_v0(struct vgic_its *its);
  27 static int vgic_its_restore_tables_v0(struct vgic_its *its);
  28 static int vgic_its_commit_v0(struct vgic_its *its);
  29 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
  30                              struct kvm_vcpu *filter_vcpu, bool needs_inv);
  31 
  32 /*
  33  * Creates a new (reference to a) struct vgic_irq for a given LPI.
  34  * If this LPI is already mapped on another ITS, we increase its refcount
  35  * and return a pointer to the existing structure.
  36  * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
  37  * This function returns a pointer to the _unlocked_ structure.
  38  */
  39 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
  40                                      struct kvm_vcpu *vcpu)
  41 {
  42         struct vgic_dist *dist = &kvm->arch.vgic;
  43         struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  44         unsigned long flags;
  45         int ret;
  46 
  47         /* In this case there is no put, since we keep the reference. */
  48         if (irq)
  49                 return irq;
  50 
  51         irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
  52         if (!irq)
  53                 return ERR_PTR(-ENOMEM);
  54 
  55         INIT_LIST_HEAD(&irq->lpi_list);
  56         INIT_LIST_HEAD(&irq->ap_list);
  57         raw_spin_lock_init(&irq->irq_lock);
  58 
  59         irq->config = VGIC_CONFIG_EDGE;
  60         kref_init(&irq->refcount);
  61         irq->intid = intid;
  62         irq->target_vcpu = vcpu;
  63         irq->group = 1;
  64 
  65         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
  66 
  67         /*
  68          * There could be a race with another vgic_add_lpi(), so we need to
  69          * check that we don't add a second list entry with the same LPI.
  70          */
  71         list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
  72                 if (oldirq->intid != intid)
  73                         continue;
  74 
  75                 /* Someone was faster with adding this LPI, lets use that. */
  76                 kfree(irq);
  77                 irq = oldirq;
  78 
  79                 /*
  80                  * This increases the refcount, the caller is expected to
  81                  * call vgic_put_irq() on the returned pointer once it's
  82                  * finished with the IRQ.
  83                  */
  84                 vgic_get_irq_kref(irq);
  85 
  86                 goto out_unlock;
  87         }
  88 
  89         list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
  90         dist->lpi_list_count++;
  91 
  92 out_unlock:
  93         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
  94 
  95         /*
  96          * We "cache" the configuration table entries in our struct vgic_irq's.
  97          * However we only have those structs for mapped IRQs, so we read in
  98          * the respective config data from memory here upon mapping the LPI.
  99          */
 100         ret = update_lpi_config(kvm, irq, NULL, false);
 101         if (ret)
 102                 return ERR_PTR(ret);
 103 
 104         ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
 105         if (ret)
 106                 return ERR_PTR(ret);
 107 
 108         return irq;
 109 }
 110 
 111 struct its_device {
 112         struct list_head dev_list;
 113 
 114         /* the head for the list of ITTEs */
 115         struct list_head itt_head;
 116         u32 num_eventid_bits;
 117         gpa_t itt_addr;
 118         u32 device_id;
 119 };
 120 
 121 #define COLLECTION_NOT_MAPPED ((u32)~0)
 122 
 123 struct its_collection {
 124         struct list_head coll_list;
 125 
 126         u32 collection_id;
 127         u32 target_addr;
 128 };
 129 
 130 #define its_is_collection_mapped(coll) ((coll) && \
 131                                 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
 132 
 133 struct its_ite {
 134         struct list_head ite_list;
 135 
 136         struct vgic_irq *irq;
 137         struct its_collection *collection;
 138         u32 event_id;
 139 };
 140 
 141 struct vgic_translation_cache_entry {
 142         struct list_head        entry;
 143         phys_addr_t             db;
 144         u32                     devid;
 145         u32                     eventid;
 146         struct vgic_irq         *irq;
 147 };
 148 
 149 /**
 150  * struct vgic_its_abi - ITS abi ops and settings
 151  * @cte_esz: collection table entry size
 152  * @dte_esz: device table entry size
 153  * @ite_esz: interrupt translation table entry size
 154  * @save tables: save the ITS tables into guest RAM
 155  * @restore_tables: restore the ITS internal structs from tables
 156  *  stored in guest RAM
 157  * @commit: initialize the registers which expose the ABI settings,
 158  *  especially the entry sizes
 159  */
 160 struct vgic_its_abi {
 161         int cte_esz;
 162         int dte_esz;
 163         int ite_esz;
 164         int (*save_tables)(struct vgic_its *its);
 165         int (*restore_tables)(struct vgic_its *its);
 166         int (*commit)(struct vgic_its *its);
 167 };
 168 
 169 #define ABI_0_ESZ       8
 170 #define ESZ_MAX         ABI_0_ESZ
 171 
 172 static const struct vgic_its_abi its_table_abi_versions[] = {
 173         [0] = {
 174          .cte_esz = ABI_0_ESZ,
 175          .dte_esz = ABI_0_ESZ,
 176          .ite_esz = ABI_0_ESZ,
 177          .save_tables = vgic_its_save_tables_v0,
 178          .restore_tables = vgic_its_restore_tables_v0,
 179          .commit = vgic_its_commit_v0,
 180         },
 181 };
 182 
 183 #define NR_ITS_ABIS     ARRAY_SIZE(its_table_abi_versions)
 184 
 185 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
 186 {
 187         return &its_table_abi_versions[its->abi_rev];
 188 }
 189 
 190 static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
 191 {
 192         const struct vgic_its_abi *abi;
 193 
 194         its->abi_rev = rev;
 195         abi = vgic_its_get_abi(its);
 196         return abi->commit(its);
 197 }
 198 
 199 /*
 200  * Find and returns a device in the device table for an ITS.
 201  * Must be called with the its_lock mutex held.
 202  */
 203 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
 204 {
 205         struct its_device *device;
 206 
 207         list_for_each_entry(device, &its->device_list, dev_list)
 208                 if (device_id == device->device_id)
 209                         return device;
 210 
 211         return NULL;
 212 }
 213 
 214 /*
 215  * Find and returns an interrupt translation table entry (ITTE) for a given
 216  * Device ID/Event ID pair on an ITS.
 217  * Must be called with the its_lock mutex held.
 218  */
 219 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
 220                                   u32 event_id)
 221 {
 222         struct its_device *device;
 223         struct its_ite *ite;
 224 
 225         device = find_its_device(its, device_id);
 226         if (device == NULL)
 227                 return NULL;
 228 
 229         list_for_each_entry(ite, &device->itt_head, ite_list)
 230                 if (ite->event_id == event_id)
 231                         return ite;
 232 
 233         return NULL;
 234 }
 235 
 236 /* To be used as an iterator this macro misses the enclosing parentheses */
 237 #define for_each_lpi_its(dev, ite, its) \
 238         list_for_each_entry(dev, &(its)->device_list, dev_list) \
 239                 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
 240 
 241 #define GIC_LPI_OFFSET 8192
 242 
 243 #define VITS_TYPER_IDBITS 16
 244 #define VITS_TYPER_DEVBITS 16
 245 #define VITS_DTE_MAX_DEVID_OFFSET       (BIT(14) - 1)
 246 #define VITS_ITE_MAX_EVENTID_OFFSET     (BIT(16) - 1)
 247 
 248 /*
 249  * Finds and returns a collection in the ITS collection table.
 250  * Must be called with the its_lock mutex held.
 251  */
 252 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
 253 {
 254         struct its_collection *collection;
 255 
 256         list_for_each_entry(collection, &its->collection_list, coll_list) {
 257                 if (coll_id == collection->collection_id)
 258                         return collection;
 259         }
 260 
 261         return NULL;
 262 }
 263 
 264 #define LPI_PROP_ENABLE_BIT(p)  ((p) & LPI_PROP_ENABLED)
 265 #define LPI_PROP_PRIORITY(p)    ((p) & 0xfc)
 266 
 267 /*
 268  * Reads the configuration data for a given LPI from guest memory and
 269  * updates the fields in struct vgic_irq.
 270  * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
 271  * VCPU. Unconditionally applies if filter_vcpu is NULL.
 272  */
 273 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
 274                              struct kvm_vcpu *filter_vcpu, bool needs_inv)
 275 {
 276         u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
 277         u8 prop;
 278         int ret;
 279         unsigned long flags;
 280 
 281         ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
 282                                   &prop, 1);
 283 
 284         if (ret)
 285                 return ret;
 286 
 287         raw_spin_lock_irqsave(&irq->irq_lock, flags);
 288 
 289         if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
 290                 irq->priority = LPI_PROP_PRIORITY(prop);
 291                 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 292 
 293                 if (!irq->hw) {
 294                         vgic_queue_irq_unlock(kvm, irq, flags);
 295                         return 0;
 296                 }
 297         }
 298 
 299         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 300 
 301         if (irq->hw)
 302                 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
 303 
 304         return 0;
 305 }
 306 
 307 /*
 308  * Create a snapshot of the current LPIs targeting @vcpu, so that we can
 309  * enumerate those LPIs without holding any lock.
 310  * Returns their number and puts the kmalloc'ed array into intid_ptr.
 311  */
 312 int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
 313 {
 314         struct vgic_dist *dist = &kvm->arch.vgic;
 315         struct vgic_irq *irq;
 316         unsigned long flags;
 317         u32 *intids;
 318         int irq_count, i = 0;
 319 
 320         /*
 321          * There is an obvious race between allocating the array and LPIs
 322          * being mapped/unmapped. If we ended up here as a result of a
 323          * command, we're safe (locks are held, preventing another
 324          * command). If coming from another path (such as enabling LPIs),
 325          * we must be careful not to overrun the array.
 326          */
 327         irq_count = READ_ONCE(dist->lpi_list_count);
 328         intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
 329         if (!intids)
 330                 return -ENOMEM;
 331 
 332         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 333         list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
 334                 if (i == irq_count)
 335                         break;
 336                 /* We don't need to "get" the IRQ, as we hold the list lock. */
 337                 if (vcpu && irq->target_vcpu != vcpu)
 338                         continue;
 339                 intids[i++] = irq->intid;
 340         }
 341         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 342 
 343         *intid_ptr = intids;
 344         return i;
 345 }
 346 
 347 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
 348 {
 349         int ret = 0;
 350         unsigned long flags;
 351 
 352         raw_spin_lock_irqsave(&irq->irq_lock, flags);
 353         irq->target_vcpu = vcpu;
 354         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 355 
 356         if (irq->hw) {
 357                 struct its_vlpi_map map;
 358 
 359                 ret = its_get_vlpi(irq->host_irq, &map);
 360                 if (ret)
 361                         return ret;
 362 
 363                 map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
 364 
 365                 ret = its_map_vlpi(irq->host_irq, &map);
 366         }
 367 
 368         return ret;
 369 }
 370 
 371 /*
 372  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
 373  * is targeting) to the VGIC's view, which deals with target VCPUs.
 374  * Needs to be called whenever either the collection for a LPIs has
 375  * changed or the collection itself got retargeted.
 376  */
 377 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
 378 {
 379         struct kvm_vcpu *vcpu;
 380 
 381         if (!its_is_collection_mapped(ite->collection))
 382                 return;
 383 
 384         vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 385         update_affinity(ite->irq, vcpu);
 386 }
 387 
 388 /*
 389  * Updates the target VCPU for every LPI targeting this collection.
 390  * Must be called with the its_lock mutex held.
 391  */
 392 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
 393                                        struct its_collection *coll)
 394 {
 395         struct its_device *device;
 396         struct its_ite *ite;
 397 
 398         for_each_lpi_its(device, ite, its) {
 399                 if (!ite->collection || coll != ite->collection)
 400                         continue;
 401 
 402                 update_affinity_ite(kvm, ite);
 403         }
 404 }
 405 
 406 static u32 max_lpis_propbaser(u64 propbaser)
 407 {
 408         int nr_idbits = (propbaser & 0x1f) + 1;
 409 
 410         return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
 411 }
 412 
 413 /*
 414  * Sync the pending table pending bit of LPIs targeting @vcpu
 415  * with our own data structures. This relies on the LPI being
 416  * mapped before.
 417  */
 418 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 419 {
 420         gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
 421         struct vgic_irq *irq;
 422         int last_byte_offset = -1;
 423         int ret = 0;
 424         u32 *intids;
 425         int nr_irqs, i;
 426         unsigned long flags;
 427         u8 pendmask;
 428 
 429         nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
 430         if (nr_irqs < 0)
 431                 return nr_irqs;
 432 
 433         for (i = 0; i < nr_irqs; i++) {
 434                 int byte_offset, bit_nr;
 435 
 436                 byte_offset = intids[i] / BITS_PER_BYTE;
 437                 bit_nr = intids[i] % BITS_PER_BYTE;
 438 
 439                 /*
 440                  * For contiguously allocated LPIs chances are we just read
 441                  * this very same byte in the last iteration. Reuse that.
 442                  */
 443                 if (byte_offset != last_byte_offset) {
 444                         ret = kvm_read_guest_lock(vcpu->kvm,
 445                                                   pendbase + byte_offset,
 446                                                   &pendmask, 1);
 447                         if (ret) {
 448                                 kfree(intids);
 449                                 return ret;
 450                         }
 451                         last_byte_offset = byte_offset;
 452                 }
 453 
 454                 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
 455                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
 456                 irq->pending_latch = pendmask & (1U << bit_nr);
 457                 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
 458                 vgic_put_irq(vcpu->kvm, irq);
 459         }
 460 
 461         kfree(intids);
 462 
 463         return ret;
 464 }
 465 
 466 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
 467                                               struct vgic_its *its,
 468                                               gpa_t addr, unsigned int len)
 469 {
 470         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
 471         u64 reg = GITS_TYPER_PLPIS;
 472 
 473         /*
 474          * We use linear CPU numbers for redistributor addressing,
 475          * so GITS_TYPER.PTA is 0.
 476          * Also we force all PROPBASER registers to be the same, so
 477          * CommonLPIAff is 0 as well.
 478          * To avoid memory waste in the guest, we keep the number of IDBits and
 479          * DevBits low - as least for the time being.
 480          */
 481         reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
 482         reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
 483         reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
 484 
 485         return extract_bytes(reg, addr & 7, len);
 486 }
 487 
 488 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
 489                                              struct vgic_its *its,
 490                                              gpa_t addr, unsigned int len)
 491 {
 492         u32 val;
 493 
 494         val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
 495         val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
 496         return val;
 497 }
 498 
 499 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
 500                                             struct vgic_its *its,
 501                                             gpa_t addr, unsigned int len,
 502                                             unsigned long val)
 503 {
 504         u32 rev = GITS_IIDR_REV(val);
 505 
 506         if (rev >= NR_ITS_ABIS)
 507                 return -EINVAL;
 508         return vgic_its_set_abi(its, rev);
 509 }
 510 
 511 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
 512                                                struct vgic_its *its,
 513                                                gpa_t addr, unsigned int len)
 514 {
 515         switch (addr & 0xffff) {
 516         case GITS_PIDR0:
 517                 return 0x92;    /* part number, bits[7:0] */
 518         case GITS_PIDR1:
 519                 return 0xb4;    /* part number, bits[11:8] */
 520         case GITS_PIDR2:
 521                 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
 522         case GITS_PIDR4:
 523                 return 0x40;    /* This is a 64K software visible page */
 524         /* The following are the ID registers for (any) GIC. */
 525         case GITS_CIDR0:
 526                 return 0x0d;
 527         case GITS_CIDR1:
 528                 return 0xf0;
 529         case GITS_CIDR2:
 530                 return 0x05;
 531         case GITS_CIDR3:
 532                 return 0xb1;
 533         }
 534 
 535         return 0;
 536 }
 537 
 538 static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
 539                                                phys_addr_t db,
 540                                                u32 devid, u32 eventid)
 541 {
 542         struct vgic_translation_cache_entry *cte;
 543 
 544         list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
 545                 /*
 546                  * If we hit a NULL entry, there is nothing after this
 547                  * point.
 548                  */
 549                 if (!cte->irq)
 550                         break;
 551 
 552                 if (cte->db != db || cte->devid != devid ||
 553                     cte->eventid != eventid)
 554                         continue;
 555 
 556                 /*
 557                  * Move this entry to the head, as it is the most
 558                  * recently used.
 559                  */
 560                 if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
 561                         list_move(&cte->entry, &dist->lpi_translation_cache);
 562 
 563                 return cte->irq;
 564         }
 565 
 566         return NULL;
 567 }
 568 
 569 static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
 570                                              u32 devid, u32 eventid)
 571 {
 572         struct vgic_dist *dist = &kvm->arch.vgic;
 573         struct vgic_irq *irq;
 574         unsigned long flags;
 575 
 576         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 577         irq = __vgic_its_check_cache(dist, db, devid, eventid);
 578         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 579 
 580         return irq;
 581 }
 582 
 583 static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 584                                        u32 devid, u32 eventid,
 585                                        struct vgic_irq *irq)
 586 {
 587         struct vgic_dist *dist = &kvm->arch.vgic;
 588         struct vgic_translation_cache_entry *cte;
 589         unsigned long flags;
 590         phys_addr_t db;
 591 
 592         /* Do not cache a directly injected interrupt */
 593         if (irq->hw)
 594                 return;
 595 
 596         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 597 
 598         if (unlikely(list_empty(&dist->lpi_translation_cache)))
 599                 goto out;
 600 
 601         /*
 602          * We could have raced with another CPU caching the same
 603          * translation behind our back, so let's check it is not in
 604          * already
 605          */
 606         db = its->vgic_its_base + GITS_TRANSLATER;
 607         if (__vgic_its_check_cache(dist, db, devid, eventid))
 608                 goto out;
 609 
 610         /* Always reuse the last entry (LRU policy) */
 611         cte = list_last_entry(&dist->lpi_translation_cache,
 612                               typeof(*cte), entry);
 613 
 614         /*
 615          * Caching the translation implies having an extra reference
 616          * to the interrupt, so drop the potential reference on what
 617          * was in the cache, and increment it on the new interrupt.
 618          */
 619         if (cte->irq)
 620                 __vgic_put_lpi_locked(kvm, cte->irq);
 621 
 622         vgic_get_irq_kref(irq);
 623 
 624         cte->db         = db;
 625         cte->devid      = devid;
 626         cte->eventid    = eventid;
 627         cte->irq        = irq;
 628 
 629         /* Move the new translation to the head of the list */
 630         list_move(&cte->entry, &dist->lpi_translation_cache);
 631 
 632 out:
 633         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 634 }
 635 
 636 void vgic_its_invalidate_cache(struct kvm *kvm)
 637 {
 638         struct vgic_dist *dist = &kvm->arch.vgic;
 639         struct vgic_translation_cache_entry *cte;
 640         unsigned long flags;
 641 
 642         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 643 
 644         list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
 645                 /*
 646                  * If we hit a NULL entry, there is nothing after this
 647                  * point.
 648                  */
 649                 if (!cte->irq)
 650                         break;
 651 
 652                 __vgic_put_lpi_locked(kvm, cte->irq);
 653                 cte->irq = NULL;
 654         }
 655 
 656         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 657 }
 658 
 659 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
 660                          u32 devid, u32 eventid, struct vgic_irq **irq)
 661 {
 662         struct kvm_vcpu *vcpu;
 663         struct its_ite *ite;
 664 
 665         if (!its->enabled)
 666                 return -EBUSY;
 667 
 668         ite = find_ite(its, devid, eventid);
 669         if (!ite || !its_is_collection_mapped(ite->collection))
 670                 return E_ITS_INT_UNMAPPED_INTERRUPT;
 671 
 672         vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
 673         if (!vcpu)
 674                 return E_ITS_INT_UNMAPPED_INTERRUPT;
 675 
 676         if (!vcpu->arch.vgic_cpu.lpis_enabled)
 677                 return -EBUSY;
 678 
 679         vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
 680 
 681         *irq = ite->irq;
 682         return 0;
 683 }
 684 
 685 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
 686 {
 687         u64 address;
 688         struct kvm_io_device *kvm_io_dev;
 689         struct vgic_io_device *iodev;
 690 
 691         if (!vgic_has_its(kvm))
 692                 return ERR_PTR(-ENODEV);
 693 
 694         if (!(msi->flags & KVM_MSI_VALID_DEVID))
 695                 return ERR_PTR(-EINVAL);
 696 
 697         address = (u64)msi->address_hi << 32 | msi->address_lo;
 698 
 699         kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
 700         if (!kvm_io_dev)
 701                 return ERR_PTR(-EINVAL);
 702 
 703         if (kvm_io_dev->ops != &kvm_io_gic_ops)
 704                 return ERR_PTR(-EINVAL);
 705 
 706         iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
 707         if (iodev->iodev_type != IODEV_ITS)
 708                 return ERR_PTR(-EINVAL);
 709 
 710         return iodev->its;
 711 }
 712 
 713 /*
 714  * Find the target VCPU and the LPI number for a given devid/eventid pair
 715  * and make this IRQ pending, possibly injecting it.
 716  * Must be called with the its_lock mutex held.
 717  * Returns 0 on success, a positive error value for any ITS mapping
 718  * related errors and negative error values for generic errors.
 719  */
 720 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
 721                                 u32 devid, u32 eventid)
 722 {
 723         struct vgic_irq *irq = NULL;
 724         unsigned long flags;
 725         int err;
 726 
 727         err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
 728         if (err)
 729                 return err;
 730 
 731         if (irq->hw)
 732                 return irq_set_irqchip_state(irq->host_irq,
 733                                              IRQCHIP_STATE_PENDING, true);
 734 
 735         raw_spin_lock_irqsave(&irq->irq_lock, flags);
 736         irq->pending_latch = true;
 737         vgic_queue_irq_unlock(kvm, irq, flags);
 738 
 739         return 0;
 740 }
 741 
 742 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
 743 {
 744         struct vgic_irq *irq;
 745         unsigned long flags;
 746         phys_addr_t db;
 747 
 748         db = (u64)msi->address_hi << 32 | msi->address_lo;
 749         irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
 750 
 751         if (!irq)
 752                 return -1;
 753 
 754         raw_spin_lock_irqsave(&irq->irq_lock, flags);
 755         irq->pending_latch = true;
 756         vgic_queue_irq_unlock(kvm, irq, flags);
 757 
 758         return 0;
 759 }
 760 
 761 /*
 762  * Queries the KVM IO bus framework to get the ITS pointer from the given
 763  * doorbell address.
 764  * We then call vgic_its_trigger_msi() with the decoded data.
 765  * According to the KVM_SIGNAL_MSI API description returns 1 on success.
 766  */
 767 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 768 {
 769         struct vgic_its *its;
 770         int ret;
 771 
 772         if (!vgic_its_inject_cached_translation(kvm, msi))
 773                 return 1;
 774 
 775         its = vgic_msi_to_its(kvm, msi);
 776         if (IS_ERR(its))
 777                 return PTR_ERR(its);
 778 
 779         mutex_lock(&its->its_lock);
 780         ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
 781         mutex_unlock(&its->its_lock);
 782 
 783         if (ret < 0)
 784                 return ret;
 785 
 786         /*
 787          * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
 788          * if the guest has blocked the MSI. So we map any LPI mapping
 789          * related error to that.
 790          */
 791         if (ret)
 792                 return 0;
 793         else
 794                 return 1;
 795 }
 796 
 797 /* Requires the its_lock to be held. */
 798 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
 799 {
 800         list_del(&ite->ite_list);
 801 
 802         /* This put matches the get in vgic_add_lpi. */
 803         if (ite->irq) {
 804                 if (ite->irq->hw)
 805                         WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
 806 
 807                 vgic_put_irq(kvm, ite->irq);
 808         }
 809 
 810         kfree(ite);
 811 }
 812 
 813 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
 814 {
 815         return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
 816 }
 817 
 818 #define its_cmd_get_command(cmd)        its_cmd_mask_field(cmd, 0,  0,  8)
 819 #define its_cmd_get_deviceid(cmd)       its_cmd_mask_field(cmd, 0, 32, 32)
 820 #define its_cmd_get_size(cmd)           (its_cmd_mask_field(cmd, 1,  0,  5) + 1)
 821 #define its_cmd_get_id(cmd)             its_cmd_mask_field(cmd, 1,  0, 32)
 822 #define its_cmd_get_physical_id(cmd)    its_cmd_mask_field(cmd, 1, 32, 32)
 823 #define its_cmd_get_collection(cmd)     its_cmd_mask_field(cmd, 2,  0, 16)
 824 #define its_cmd_get_ittaddr(cmd)        (its_cmd_mask_field(cmd, 2,  8, 44) << 8)
 825 #define its_cmd_get_target_addr(cmd)    its_cmd_mask_field(cmd, 2, 16, 32)
 826 #define its_cmd_get_validbit(cmd)       its_cmd_mask_field(cmd, 2, 63,  1)
 827 
 828 /*
 829  * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
 830  * Must be called with the its_lock mutex held.
 831  */
 832 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
 833                                        u64 *its_cmd)
 834 {
 835         u32 device_id = its_cmd_get_deviceid(its_cmd);
 836         u32 event_id = its_cmd_get_id(its_cmd);
 837         struct its_ite *ite;
 838 
 839 
 840         ite = find_ite(its, device_id, event_id);
 841         if (ite && ite->collection) {
 842                 /*
 843                  * Though the spec talks about removing the pending state, we
 844                  * don't bother here since we clear the ITTE anyway and the
 845                  * pending state is a property of the ITTE struct.
 846                  */
 847                 vgic_its_invalidate_cache(kvm);
 848 
 849                 its_free_ite(kvm, ite);
 850                 return 0;
 851         }
 852 
 853         return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
 854 }
 855 
 856 /*
 857  * The MOVI command moves an ITTE to a different collection.
 858  * Must be called with the its_lock mutex held.
 859  */
 860 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
 861                                     u64 *its_cmd)
 862 {
 863         u32 device_id = its_cmd_get_deviceid(its_cmd);
 864         u32 event_id = its_cmd_get_id(its_cmd);
 865         u32 coll_id = its_cmd_get_collection(its_cmd);
 866         struct kvm_vcpu *vcpu;
 867         struct its_ite *ite;
 868         struct its_collection *collection;
 869 
 870         ite = find_ite(its, device_id, event_id);
 871         if (!ite)
 872                 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
 873 
 874         if (!its_is_collection_mapped(ite->collection))
 875                 return E_ITS_MOVI_UNMAPPED_COLLECTION;
 876 
 877         collection = find_collection(its, coll_id);
 878         if (!its_is_collection_mapped(collection))
 879                 return E_ITS_MOVI_UNMAPPED_COLLECTION;
 880 
 881         ite->collection = collection;
 882         vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 883 
 884         vgic_its_invalidate_cache(kvm);
 885 
 886         return update_affinity(ite->irq, vcpu);
 887 }
 888 
 889 /*
 890  * Check whether an ID can be stored into the corresponding guest table.
 891  * For a direct table this is pretty easy, but gets a bit nasty for
 892  * indirect tables. We check whether the resulting guest physical address
 893  * is actually valid (covered by a memslot and guest accessible).
 894  * For this we have to read the respective first level entry.
 895  */
 896 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 897                               gpa_t *eaddr)
 898 {
 899         int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
 900         u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
 901         phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
 902         int esz = GITS_BASER_ENTRY_SIZE(baser);
 903         int index, idx;
 904         gfn_t gfn;
 905         bool ret;
 906 
 907         switch (type) {
 908         case GITS_BASER_TYPE_DEVICE:
 909                 if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
 910                         return false;
 911                 break;
 912         case GITS_BASER_TYPE_COLLECTION:
 913                 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
 914                 if (id >= BIT_ULL(16))
 915                         return false;
 916                 break;
 917         default:
 918                 return false;
 919         }
 920 
 921         if (!(baser & GITS_BASER_INDIRECT)) {
 922                 phys_addr_t addr;
 923 
 924                 if (id >= (l1_tbl_size / esz))
 925                         return false;
 926 
 927                 addr = base + id * esz;
 928                 gfn = addr >> PAGE_SHIFT;
 929 
 930                 if (eaddr)
 931                         *eaddr = addr;
 932 
 933                 goto out;
 934         }
 935 
 936         /* calculate and check the index into the 1st level */
 937         index = id / (SZ_64K / esz);
 938         if (index >= (l1_tbl_size / sizeof(u64)))
 939                 return false;
 940 
 941         /* Each 1st level entry is represented by a 64-bit value. */
 942         if (kvm_read_guest_lock(its->dev->kvm,
 943                            base + index * sizeof(indirect_ptr),
 944                            &indirect_ptr, sizeof(indirect_ptr)))
 945                 return false;
 946 
 947         indirect_ptr = le64_to_cpu(indirect_ptr);
 948 
 949         /* check the valid bit of the first level entry */
 950         if (!(indirect_ptr & BIT_ULL(63)))
 951                 return false;
 952 
 953         /* Mask the guest physical address and calculate the frame number. */
 954         indirect_ptr &= GENMASK_ULL(51, 16);
 955 
 956         /* Find the address of the actual entry */
 957         index = id % (SZ_64K / esz);
 958         indirect_ptr += index * esz;
 959         gfn = indirect_ptr >> PAGE_SHIFT;
 960 
 961         if (eaddr)
 962                 *eaddr = indirect_ptr;
 963 
 964 out:
 965         idx = srcu_read_lock(&its->dev->kvm->srcu);
 966         ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
 967         srcu_read_unlock(&its->dev->kvm->srcu, idx);
 968         return ret;
 969 }
 970 
 971 static int vgic_its_alloc_collection(struct vgic_its *its,
 972                                      struct its_collection **colp,
 973                                      u32 coll_id)
 974 {
 975         struct its_collection *collection;
 976 
 977         if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
 978                 return E_ITS_MAPC_COLLECTION_OOR;
 979 
 980         collection = kzalloc(sizeof(*collection), GFP_KERNEL);
 981         if (!collection)
 982                 return -ENOMEM;
 983 
 984         collection->collection_id = coll_id;
 985         collection->target_addr = COLLECTION_NOT_MAPPED;
 986 
 987         list_add_tail(&collection->coll_list, &its->collection_list);
 988         *colp = collection;
 989 
 990         return 0;
 991 }
 992 
 993 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
 994 {
 995         struct its_collection *collection;
 996         struct its_device *device;
 997         struct its_ite *ite;
 998 
 999         /*
1000          * Clearing the mapping for that collection ID removes the
1001          * entry from the list. If there wasn't any before, we can
1002          * go home early.
1003          */
1004         collection = find_collection(its, coll_id);
1005         if (!collection)
1006                 return;
1007 
1008         for_each_lpi_its(device, ite, its)
1009                 if (ite->collection &&
1010                     ite->collection->collection_id == coll_id)
1011                         ite->collection = NULL;
1012 
1013         list_del(&collection->coll_list);
1014         kfree(collection);
1015 }
1016 
1017 /* Must be called with its_lock mutex held */
1018 static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1019                                           struct its_collection *collection,
1020                                           u32 event_id)
1021 {
1022         struct its_ite *ite;
1023 
1024         ite = kzalloc(sizeof(*ite), GFP_KERNEL);
1025         if (!ite)
1026                 return ERR_PTR(-ENOMEM);
1027 
1028         ite->event_id   = event_id;
1029         ite->collection = collection;
1030 
1031         list_add_tail(&ite->ite_list, &device->itt_head);
1032         return ite;
1033 }
1034 
1035 /*
1036  * The MAPTI and MAPI commands map LPIs to ITTEs.
1037  * Must be called with its_lock mutex held.
1038  */
1039 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1040                                     u64 *its_cmd)
1041 {
1042         u32 device_id = its_cmd_get_deviceid(its_cmd);
1043         u32 event_id = its_cmd_get_id(its_cmd);
1044         u32 coll_id = its_cmd_get_collection(its_cmd);
1045         struct its_ite *ite;
1046         struct kvm_vcpu *vcpu = NULL;
1047         struct its_device *device;
1048         struct its_collection *collection, *new_coll = NULL;
1049         struct vgic_irq *irq;
1050         int lpi_nr;
1051 
1052         device = find_its_device(its, device_id);
1053         if (!device)
1054                 return E_ITS_MAPTI_UNMAPPED_DEVICE;
1055 
1056         if (event_id >= BIT_ULL(device->num_eventid_bits))
1057                 return E_ITS_MAPTI_ID_OOR;
1058 
1059         if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1060                 lpi_nr = its_cmd_get_physical_id(its_cmd);
1061         else
1062                 lpi_nr = event_id;
1063         if (lpi_nr < GIC_LPI_OFFSET ||
1064             lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1065                 return E_ITS_MAPTI_PHYSICALID_OOR;
1066 
1067         /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1068         if (find_ite(its, device_id, event_id))
1069                 return 0;
1070 
1071         collection = find_collection(its, coll_id);
1072         if (!collection) {
1073                 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
1074                 if (ret)
1075                         return ret;
1076                 new_coll = collection;
1077         }
1078 
1079         ite = vgic_its_alloc_ite(device, collection, event_id);
1080         if (IS_ERR(ite)) {
1081                 if (new_coll)
1082                         vgic_its_free_collection(its, coll_id);
1083                 return PTR_ERR(ite);
1084         }
1085 
1086         if (its_is_collection_mapped(collection))
1087                 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1088 
1089         irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1090         if (IS_ERR(irq)) {
1091                 if (new_coll)
1092                         vgic_its_free_collection(its, coll_id);
1093                 its_free_ite(kvm, ite);
1094                 return PTR_ERR(irq);
1095         }
1096         ite->irq = irq;
1097 
1098         return 0;
1099 }
1100 
1101 /* Requires the its_lock to be held. */
1102 static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
1103 {
1104         struct its_ite *ite, *temp;
1105 
1106         /*
1107          * The spec says that unmapping a device with still valid
1108          * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1109          * since we cannot leave the memory unreferenced.
1110          */
1111         list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1112                 its_free_ite(kvm, ite);
1113 
1114         vgic_its_invalidate_cache(kvm);
1115 
1116         list_del(&device->dev_list);
1117         kfree(device);
1118 }
1119 
1120 /* its lock must be held */
1121 static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1122 {
1123         struct its_device *cur, *temp;
1124 
1125         list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1126                 vgic_its_free_device(kvm, cur);
1127 }
1128 
1129 /* its lock must be held */
1130 static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1131 {
1132         struct its_collection *cur, *temp;
1133 
1134         list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1135                 vgic_its_free_collection(its, cur->collection_id);
1136 }
1137 
1138 /* Must be called with its_lock mutex held */
1139 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1140                                                 u32 device_id, gpa_t itt_addr,
1141                                                 u8 num_eventid_bits)
1142 {
1143         struct its_device *device;
1144 
1145         device = kzalloc(sizeof(*device), GFP_KERNEL);
1146         if (!device)
1147                 return ERR_PTR(-ENOMEM);
1148 
1149         device->device_id = device_id;
1150         device->itt_addr = itt_addr;
1151         device->num_eventid_bits = num_eventid_bits;
1152         INIT_LIST_HEAD(&device->itt_head);
1153 
1154         list_add_tail(&device->dev_list, &its->device_list);
1155         return device;
1156 }
1157 
1158 /*
1159  * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1160  * Must be called with the its_lock mutex held.
1161  */
1162 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1163                                     u64 *its_cmd)
1164 {
1165         u32 device_id = its_cmd_get_deviceid(its_cmd);
1166         bool valid = its_cmd_get_validbit(its_cmd);
1167         u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1168         gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1169         struct its_device *device;
1170 
1171         if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1172                 return E_ITS_MAPD_DEVICE_OOR;
1173 
1174         if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1175                 return E_ITS_MAPD_ITTSIZE_OOR;
1176 
1177         device = find_its_device(its, device_id);
1178 
1179         /*
1180          * The spec says that calling MAPD on an already mapped device
1181          * invalidates all cached data for this device. We implement this
1182          * by removing the mapping and re-establishing it.
1183          */
1184         if (device)
1185                 vgic_its_free_device(kvm, device);
1186 
1187         /*
1188          * The spec does not say whether unmapping a not-mapped device
1189          * is an error, so we are done in any case.
1190          */
1191         if (!valid)
1192                 return 0;
1193 
1194         device = vgic_its_alloc_device(its, device_id, itt_addr,
1195                                        num_eventid_bits);
1196 
1197         return PTR_ERR_OR_ZERO(device);
1198 }
1199 
1200 /*
1201  * The MAPC command maps collection IDs to redistributors.
1202  * Must be called with the its_lock mutex held.
1203  */
1204 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1205                                     u64 *its_cmd)
1206 {
1207         u16 coll_id;
1208         u32 target_addr;
1209         struct its_collection *collection;
1210         bool valid;
1211 
1212         valid = its_cmd_get_validbit(its_cmd);
1213         coll_id = its_cmd_get_collection(its_cmd);
1214         target_addr = its_cmd_get_target_addr(its_cmd);
1215 
1216         if (target_addr >= atomic_read(&kvm->online_vcpus))
1217                 return E_ITS_MAPC_PROCNUM_OOR;
1218 
1219         if (!valid) {
1220                 vgic_its_free_collection(its, coll_id);
1221                 vgic_its_invalidate_cache(kvm);
1222         } else {
1223                 collection = find_collection(its, coll_id);
1224 
1225                 if (!collection) {
1226                         int ret;
1227 
1228                         ret = vgic_its_alloc_collection(its, &collection,
1229                                                         coll_id);
1230                         if (ret)
1231                                 return ret;
1232                         collection->target_addr = target_addr;
1233                 } else {
1234                         collection->target_addr = target_addr;
1235                         update_affinity_collection(kvm, its, collection);
1236                 }
1237         }
1238 
1239         return 0;
1240 }
1241 
1242 /*
1243  * The CLEAR command removes the pending state for a particular LPI.
1244  * Must be called with the its_lock mutex held.
1245  */
1246 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1247                                      u64 *its_cmd)
1248 {
1249         u32 device_id = its_cmd_get_deviceid(its_cmd);
1250         u32 event_id = its_cmd_get_id(its_cmd);
1251         struct its_ite *ite;
1252 
1253 
1254         ite = find_ite(its, device_id, event_id);
1255         if (!ite)
1256                 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1257 
1258         ite->irq->pending_latch = false;
1259 
1260         if (ite->irq->hw)
1261                 return irq_set_irqchip_state(ite->irq->host_irq,
1262                                              IRQCHIP_STATE_PENDING, false);
1263 
1264         return 0;
1265 }
1266 
1267 /*
1268  * The INV command syncs the configuration bits from the memory table.
1269  * Must be called with the its_lock mutex held.
1270  */
1271 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1272                                    u64 *its_cmd)
1273 {
1274         u32 device_id = its_cmd_get_deviceid(its_cmd);
1275         u32 event_id = its_cmd_get_id(its_cmd);
1276         struct its_ite *ite;
1277 
1278 
1279         ite = find_ite(its, device_id, event_id);
1280         if (!ite)
1281                 return E_ITS_INV_UNMAPPED_INTERRUPT;
1282 
1283         return update_lpi_config(kvm, ite->irq, NULL, true);
1284 }
1285 
1286 /*
1287  * The INVALL command requests flushing of all IRQ data in this collection.
1288  * Find the VCPU mapped to that collection, then iterate over the VM's list
1289  * of mapped LPIs and update the configuration for each IRQ which targets
1290  * the specified vcpu. The configuration will be read from the in-memory
1291  * configuration table.
1292  * Must be called with the its_lock mutex held.
1293  */
1294 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1295                                       u64 *its_cmd)
1296 {
1297         u32 coll_id = its_cmd_get_collection(its_cmd);
1298         struct its_collection *collection;
1299         struct kvm_vcpu *vcpu;
1300         struct vgic_irq *irq;
1301         u32 *intids;
1302         int irq_count, i;
1303 
1304         collection = find_collection(its, coll_id);
1305         if (!its_is_collection_mapped(collection))
1306                 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1307 
1308         vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1309 
1310         irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1311         if (irq_count < 0)
1312                 return irq_count;
1313 
1314         for (i = 0; i < irq_count; i++) {
1315                 irq = vgic_get_irq(kvm, NULL, intids[i]);
1316                 if (!irq)
1317                         continue;
1318                 update_lpi_config(kvm, irq, vcpu, false);
1319                 vgic_put_irq(kvm, irq);
1320         }
1321 
1322         kfree(intids);
1323 
1324         if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1325                 its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1326 
1327         return 0;
1328 }
1329 
1330 /*
1331  * The MOVALL command moves the pending state of all IRQs targeting one
1332  * redistributor to another. We don't hold the pending state in the VCPUs,
1333  * but in the IRQs instead, so there is really not much to do for us here.
1334  * However the spec says that no IRQ must target the old redistributor
1335  * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1336  * This command affects all LPIs in the system that target that redistributor.
1337  */
1338 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1339                                       u64 *its_cmd)
1340 {
1341         u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1342         u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1343         struct kvm_vcpu *vcpu1, *vcpu2;
1344         struct vgic_irq *irq;
1345         u32 *intids;
1346         int irq_count, i;
1347 
1348         if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1349             target2_addr >= atomic_read(&kvm->online_vcpus))
1350                 return E_ITS_MOVALL_PROCNUM_OOR;
1351 
1352         if (target1_addr == target2_addr)
1353                 return 0;
1354 
1355         vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1356         vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1357 
1358         irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
1359         if (irq_count < 0)
1360                 return irq_count;
1361 
1362         for (i = 0; i < irq_count; i++) {
1363                 irq = vgic_get_irq(kvm, NULL, intids[i]);
1364 
1365                 update_affinity(irq, vcpu2);
1366 
1367                 vgic_put_irq(kvm, irq);
1368         }
1369 
1370         vgic_its_invalidate_cache(kvm);
1371 
1372         kfree(intids);
1373         return 0;
1374 }
1375 
1376 /*
1377  * The INT command injects the LPI associated with that DevID/EvID pair.
1378  * Must be called with the its_lock mutex held.
1379  */
1380 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1381                                    u64 *its_cmd)
1382 {
1383         u32 msi_data = its_cmd_get_id(its_cmd);
1384         u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1385 
1386         return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1387 }
1388 
1389 /*
1390  * This function is called with the its_cmd lock held, but the ITS data
1391  * structure lock dropped.
1392  */
1393 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1394                                    u64 *its_cmd)
1395 {
1396         int ret = -ENODEV;
1397 
1398         mutex_lock(&its->its_lock);
1399         switch (its_cmd_get_command(its_cmd)) {
1400         case GITS_CMD_MAPD:
1401                 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1402                 break;
1403         case GITS_CMD_MAPC:
1404                 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1405                 break;
1406         case GITS_CMD_MAPI:
1407                 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1408                 break;
1409         case GITS_CMD_MAPTI:
1410                 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1411                 break;
1412         case GITS_CMD_MOVI:
1413                 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1414                 break;
1415         case GITS_CMD_DISCARD:
1416                 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1417                 break;
1418         case GITS_CMD_CLEAR:
1419                 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1420                 break;
1421         case GITS_CMD_MOVALL:
1422                 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1423                 break;
1424         case GITS_CMD_INT:
1425                 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1426                 break;
1427         case GITS_CMD_INV:
1428                 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1429                 break;
1430         case GITS_CMD_INVALL:
1431                 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1432                 break;
1433         case GITS_CMD_SYNC:
1434                 /* we ignore this command: we are in sync all of the time */
1435                 ret = 0;
1436                 break;
1437         }
1438         mutex_unlock(&its->its_lock);
1439 
1440         return ret;
1441 }
1442 
1443 static u64 vgic_sanitise_its_baser(u64 reg)
1444 {
1445         reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1446                                   GITS_BASER_SHAREABILITY_SHIFT,
1447                                   vgic_sanitise_shareability);
1448         reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1449                                   GITS_BASER_INNER_CACHEABILITY_SHIFT,
1450                                   vgic_sanitise_inner_cacheability);
1451         reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1452                                   GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1453                                   vgic_sanitise_outer_cacheability);
1454 
1455         /* We support only one (ITS) page size: 64K */
1456         reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1457 
1458         return reg;
1459 }
1460 
1461 static u64 vgic_sanitise_its_cbaser(u64 reg)
1462 {
1463         reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1464                                   GITS_CBASER_SHAREABILITY_SHIFT,
1465                                   vgic_sanitise_shareability);
1466         reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1467                                   GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1468                                   vgic_sanitise_inner_cacheability);
1469         reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1470                                   GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1471                                   vgic_sanitise_outer_cacheability);
1472 
1473         /* Sanitise the physical address to be 64k aligned. */
1474         reg &= ~GENMASK_ULL(15, 12);
1475 
1476         return reg;
1477 }
1478 
1479 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1480                                                struct vgic_its *its,
1481                                                gpa_t addr, unsigned int len)
1482 {
1483         return extract_bytes(its->cbaser, addr & 7, len);
1484 }
1485 
1486 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1487                                        gpa_t addr, unsigned int len,
1488                                        unsigned long val)
1489 {
1490         /* When GITS_CTLR.Enable is 1, this register is RO. */
1491         if (its->enabled)
1492                 return;
1493 
1494         mutex_lock(&its->cmd_lock);
1495         its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1496         its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1497         its->creadr = 0;
1498         /*
1499          * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1500          * it to CREADR to make sure we start with an empty command buffer.
1501          */
1502         its->cwriter = its->creadr;
1503         mutex_unlock(&its->cmd_lock);
1504 }
1505 
1506 #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
1507 #define ITS_CMD_SIZE                    32
1508 #define ITS_CMD_OFFSET(reg)             ((reg) & GENMASK(19, 5))
1509 
1510 /* Must be called with the cmd_lock held. */
1511 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1512 {
1513         gpa_t cbaser;
1514         u64 cmd_buf[4];
1515 
1516         /* Commands are only processed when the ITS is enabled. */
1517         if (!its->enabled)
1518                 return;
1519 
1520         cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1521 
1522         while (its->cwriter != its->creadr) {
1523                 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1524                                               cmd_buf, ITS_CMD_SIZE);
1525                 /*
1526                  * If kvm_read_guest() fails, this could be due to the guest
1527                  * programming a bogus value in CBASER or something else going
1528                  * wrong from which we cannot easily recover.
1529                  * According to section 6.3.2 in the GICv3 spec we can just
1530                  * ignore that command then.
1531                  */
1532                 if (!ret)
1533                         vgic_its_handle_command(kvm, its, cmd_buf);
1534 
1535                 its->creadr += ITS_CMD_SIZE;
1536                 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1537                         its->creadr = 0;
1538         }
1539 }
1540 
1541 /*
1542  * By writing to CWRITER the guest announces new commands to be processed.
1543  * To avoid any races in the first place, we take the its_cmd lock, which
1544  * protects our ring buffer variables, so that there is only one user
1545  * per ITS handling commands at a given time.
1546  */
1547 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1548                                         gpa_t addr, unsigned int len,
1549                                         unsigned long val)
1550 {
1551         u64 reg;
1552 
1553         if (!its)
1554                 return;
1555 
1556         mutex_lock(&its->cmd_lock);
1557 
1558         reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1559         reg = ITS_CMD_OFFSET(reg);
1560         if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1561                 mutex_unlock(&its->cmd_lock);
1562                 return;
1563         }
1564         its->cwriter = reg;
1565 
1566         vgic_its_process_commands(kvm, its);
1567 
1568         mutex_unlock(&its->cmd_lock);
1569 }
1570 
1571 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1572                                                 struct vgic_its *its,
1573                                                 gpa_t addr, unsigned int len)
1574 {
1575         return extract_bytes(its->cwriter, addr & 0x7, len);
1576 }
1577 
1578 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1579                                                struct vgic_its *its,
1580                                                gpa_t addr, unsigned int len)
1581 {
1582         return extract_bytes(its->creadr, addr & 0x7, len);
1583 }
1584 
1585 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1586                                               struct vgic_its *its,
1587                                               gpa_t addr, unsigned int len,
1588                                               unsigned long val)
1589 {
1590         u32 cmd_offset;
1591         int ret = 0;
1592 
1593         mutex_lock(&its->cmd_lock);
1594 
1595         if (its->enabled) {
1596                 ret = -EBUSY;
1597                 goto out;
1598         }
1599 
1600         cmd_offset = ITS_CMD_OFFSET(val);
1601         if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1602                 ret = -EINVAL;
1603                 goto out;
1604         }
1605 
1606         its->creadr = cmd_offset;
1607 out:
1608         mutex_unlock(&its->cmd_lock);
1609         return ret;
1610 }
1611 
1612 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1613 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1614                                               struct vgic_its *its,
1615                                               gpa_t addr, unsigned int len)
1616 {
1617         u64 reg;
1618 
1619         switch (BASER_INDEX(addr)) {
1620         case 0:
1621                 reg = its->baser_device_table;
1622                 break;
1623         case 1:
1624                 reg = its->baser_coll_table;
1625                 break;
1626         default:
1627                 reg = 0;
1628                 break;
1629         }
1630 
1631         return extract_bytes(reg, addr & 7, len);
1632 }
1633 
1634 #define GITS_BASER_RO_MASK      (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1635 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1636                                       struct vgic_its *its,
1637                                       gpa_t addr, unsigned int len,
1638                                       unsigned long val)
1639 {
1640         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1641         u64 entry_size, table_type;
1642         u64 reg, *regptr, clearbits = 0;
1643 
1644         /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1645         if (its->enabled)
1646                 return;
1647 
1648         switch (BASER_INDEX(addr)) {
1649         case 0:
1650                 regptr = &its->baser_device_table;
1651                 entry_size = abi->dte_esz;
1652                 table_type = GITS_BASER_TYPE_DEVICE;
1653                 break;
1654         case 1:
1655                 regptr = &its->baser_coll_table;
1656                 entry_size = abi->cte_esz;
1657                 table_type = GITS_BASER_TYPE_COLLECTION;
1658                 clearbits = GITS_BASER_INDIRECT;
1659                 break;
1660         default:
1661                 return;
1662         }
1663 
1664         reg = update_64bit_reg(*regptr, addr & 7, len, val);
1665         reg &= ~GITS_BASER_RO_MASK;
1666         reg &= ~clearbits;
1667 
1668         reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1669         reg |= table_type << GITS_BASER_TYPE_SHIFT;
1670         reg = vgic_sanitise_its_baser(reg);
1671 
1672         *regptr = reg;
1673 
1674         if (!(reg & GITS_BASER_VALID)) {
1675                 /* Take the its_lock to prevent a race with a save/restore */
1676                 mutex_lock(&its->its_lock);
1677                 switch (table_type) {
1678                 case GITS_BASER_TYPE_DEVICE:
1679                         vgic_its_free_device_list(kvm, its);
1680                         break;
1681                 case GITS_BASER_TYPE_COLLECTION:
1682                         vgic_its_free_collection_list(kvm, its);
1683                         break;
1684                 }
1685                 mutex_unlock(&its->its_lock);
1686         }
1687 }
1688 
1689 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1690                                              struct vgic_its *its,
1691                                              gpa_t addr, unsigned int len)
1692 {
1693         u32 reg = 0;
1694 
1695         mutex_lock(&its->cmd_lock);
1696         if (its->creadr == its->cwriter)
1697                 reg |= GITS_CTLR_QUIESCENT;
1698         if (its->enabled)
1699                 reg |= GITS_CTLR_ENABLE;
1700         mutex_unlock(&its->cmd_lock);
1701 
1702         return reg;
1703 }
1704 
1705 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1706                                      gpa_t addr, unsigned int len,
1707                                      unsigned long val)
1708 {
1709         mutex_lock(&its->cmd_lock);
1710 
1711         /*
1712          * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1713          * device/collection BASER are invalid
1714          */
1715         if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1716                 (!(its->baser_device_table & GITS_BASER_VALID) ||
1717                  !(its->baser_coll_table & GITS_BASER_VALID) ||
1718                  !(its->cbaser & GITS_CBASER_VALID)))
1719                 goto out;
1720 
1721         its->enabled = !!(val & GITS_CTLR_ENABLE);
1722         if (!its->enabled)
1723                 vgic_its_invalidate_cache(kvm);
1724 
1725         /*
1726          * Try to process any pending commands. This function bails out early
1727          * if the ITS is disabled or no commands have been queued.
1728          */
1729         vgic_its_process_commands(kvm, its);
1730 
1731 out:
1732         mutex_unlock(&its->cmd_lock);
1733 }
1734 
1735 #define REGISTER_ITS_DESC(off, rd, wr, length, acc)             \
1736 {                                                               \
1737         .reg_offset = off,                                      \
1738         .len = length,                                          \
1739         .access_flags = acc,                                    \
1740         .its_read = rd,                                         \
1741         .its_write = wr,                                        \
1742 }
1743 
1744 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1745 {                                                               \
1746         .reg_offset = off,                                      \
1747         .len = length,                                          \
1748         .access_flags = acc,                                    \
1749         .its_read = rd,                                         \
1750         .its_write = wr,                                        \
1751         .uaccess_its_write = uwr,                               \
1752 }
1753 
1754 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1755                               gpa_t addr, unsigned int len, unsigned long val)
1756 {
1757         /* Ignore */
1758 }
1759 
1760 static struct vgic_register_region its_registers[] = {
1761         REGISTER_ITS_DESC(GITS_CTLR,
1762                 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1763                 VGIC_ACCESS_32bit),
1764         REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1765                 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1766                 vgic_mmio_uaccess_write_its_iidr, 4,
1767                 VGIC_ACCESS_32bit),
1768         REGISTER_ITS_DESC(GITS_TYPER,
1769                 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1770                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1771         REGISTER_ITS_DESC(GITS_CBASER,
1772                 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1773                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1774         REGISTER_ITS_DESC(GITS_CWRITER,
1775                 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1776                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1777         REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1778                 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1779                 vgic_mmio_uaccess_write_its_creadr, 8,
1780                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1781         REGISTER_ITS_DESC(GITS_BASER,
1782                 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1783                 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1784         REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1785                 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1786                 VGIC_ACCESS_32bit),
1787 };
1788 
1789 /* This is called on setting the LPI enable bit in the redistributor. */
1790 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1791 {
1792         if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1793                 its_sync_lpi_pending_table(vcpu);
1794 }
1795 
1796 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1797                                    u64 addr)
1798 {
1799         struct vgic_io_device *iodev = &its->iodev;
1800         int ret;
1801 
1802         mutex_lock(&kvm->slots_lock);
1803         if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1804                 ret = -EBUSY;
1805                 goto out;
1806         }
1807 
1808         its->vgic_its_base = addr;
1809         iodev->regions = its_registers;
1810         iodev->nr_regions = ARRAY_SIZE(its_registers);
1811         kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1812 
1813         iodev->base_addr = its->vgic_its_base;
1814         iodev->iodev_type = IODEV_ITS;
1815         iodev->its = its;
1816         ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1817                                       KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1818 out:
1819         mutex_unlock(&kvm->slots_lock);
1820 
1821         return ret;
1822 }
1823 
1824 /* Default is 16 cached LPIs per vcpu */
1825 #define LPI_DEFAULT_PCPU_CACHE_SIZE     16
1826 
1827 void vgic_lpi_translation_cache_init(struct kvm *kvm)
1828 {
1829         struct vgic_dist *dist = &kvm->arch.vgic;
1830         unsigned int sz;
1831         int i;
1832 
1833         if (!list_empty(&dist->lpi_translation_cache))
1834                 return;
1835 
1836         sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1837 
1838         for (i = 0; i < sz; i++) {
1839                 struct vgic_translation_cache_entry *cte;
1840 
1841                 /* An allocation failure is not fatal */
1842                 cte = kzalloc(sizeof(*cte), GFP_KERNEL);
1843                 if (WARN_ON(!cte))
1844                         break;
1845 
1846                 INIT_LIST_HEAD(&cte->entry);
1847                 list_add(&cte->entry, &dist->lpi_translation_cache);
1848         }
1849 }
1850 
1851 void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
1852 {
1853         struct vgic_dist *dist = &kvm->arch.vgic;
1854         struct vgic_translation_cache_entry *cte, *tmp;
1855 
1856         vgic_its_invalidate_cache(kvm);
1857 
1858         list_for_each_entry_safe(cte, tmp,
1859                                  &dist->lpi_translation_cache, entry) {
1860                 list_del(&cte->entry);
1861                 kfree(cte);
1862         }
1863 }
1864 
1865 #define INITIAL_BASER_VALUE                                               \
1866         (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)                | \
1867          GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)         | \
1868          GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)             | \
1869          GITS_BASER_PAGE_SIZE_64K)
1870 
1871 #define INITIAL_PROPBASER_VALUE                                           \
1872         (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)            | \
1873          GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)     | \
1874          GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1875 
1876 static int vgic_its_create(struct kvm_device *dev, u32 type)
1877 {
1878         struct vgic_its *its;
1879 
1880         if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1881                 return -ENODEV;
1882 
1883         its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1884         if (!its)
1885                 return -ENOMEM;
1886 
1887         if (vgic_initialized(dev->kvm)) {
1888                 int ret = vgic_v4_init(dev->kvm);
1889                 if (ret < 0) {
1890                         kfree(its);
1891                         return ret;
1892                 }
1893 
1894                 vgic_lpi_translation_cache_init(dev->kvm);
1895         }
1896 
1897         mutex_init(&its->its_lock);
1898         mutex_init(&its->cmd_lock);
1899 
1900         its->vgic_its_base = VGIC_ADDR_UNDEF;
1901 
1902         INIT_LIST_HEAD(&its->device_list);
1903         INIT_LIST_HEAD(&its->collection_list);
1904 
1905         dev->kvm->arch.vgic.msis_require_devid = true;
1906         dev->kvm->arch.vgic.has_its = true;
1907         its->enabled = false;
1908         its->dev = dev;
1909 
1910         its->baser_device_table = INITIAL_BASER_VALUE                   |
1911                 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1912         its->baser_coll_table = INITIAL_BASER_VALUE |
1913                 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1914         dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1915 
1916         dev->private = its;
1917 
1918         return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1919 }
1920 
1921 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1922 {
1923         struct kvm *kvm = kvm_dev->kvm;
1924         struct vgic_its *its = kvm_dev->private;
1925 
1926         mutex_lock(&its->its_lock);
1927 
1928         vgic_its_free_device_list(kvm, its);
1929         vgic_its_free_collection_list(kvm, its);
1930 
1931         mutex_unlock(&its->its_lock);
1932         kfree(its);
1933         kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1934 }
1935 
1936 static int vgic_its_has_attr_regs(struct kvm_device *dev,
1937                                   struct kvm_device_attr *attr)
1938 {
1939         const struct vgic_register_region *region;
1940         gpa_t offset = attr->attr;
1941         int align;
1942 
1943         align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1944 
1945         if (offset & align)
1946                 return -EINVAL;
1947 
1948         region = vgic_find_mmio_region(its_registers,
1949                                        ARRAY_SIZE(its_registers),
1950                                        offset);
1951         if (!region)
1952                 return -ENXIO;
1953 
1954         return 0;
1955 }
1956 
1957 static int vgic_its_attr_regs_access(struct kvm_device *dev,
1958                                      struct kvm_device_attr *attr,
1959                                      u64 *reg, bool is_write)
1960 {
1961         const struct vgic_register_region *region;
1962         struct vgic_its *its;
1963         gpa_t addr, offset;
1964         unsigned int len;
1965         int align, ret = 0;
1966 
1967         its = dev->private;
1968         offset = attr->attr;
1969 
1970         /*
1971          * Although the spec supports upper/lower 32-bit accesses to
1972          * 64-bit ITS registers, the userspace ABI requires 64-bit
1973          * accesses to all 64-bit wide registers. We therefore only
1974          * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1975          * registers
1976          */
1977         if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1978                 align = 0x3;
1979         else
1980                 align = 0x7;
1981 
1982         if (offset & align)
1983                 return -EINVAL;
1984 
1985         mutex_lock(&dev->kvm->lock);
1986 
1987         if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1988                 ret = -ENXIO;
1989                 goto out;
1990         }
1991 
1992         region = vgic_find_mmio_region(its_registers,
1993                                        ARRAY_SIZE(its_registers),
1994                                        offset);
1995         if (!region) {
1996                 ret = -ENXIO;
1997                 goto out;
1998         }
1999 
2000         if (!lock_all_vcpus(dev->kvm)) {
2001                 ret = -EBUSY;
2002                 goto out;
2003         }
2004 
2005         addr = its->vgic_its_base + offset;
2006 
2007         len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
2008 
2009         if (is_write) {
2010                 if (region->uaccess_its_write)
2011                         ret = region->uaccess_its_write(dev->kvm, its, addr,
2012                                                         len, *reg);
2013                 else
2014                         region->its_write(dev->kvm, its, addr, len, *reg);
2015         } else {
2016                 *reg = region->its_read(dev->kvm, its, addr, len);
2017         }
2018         unlock_all_vcpus(dev->kvm);
2019 out:
2020         mutex_unlock(&dev->kvm->lock);
2021         return ret;
2022 }
2023 
2024 static u32 compute_next_devid_offset(struct list_head *h,
2025                                      struct its_device *dev)
2026 {
2027         struct its_device *next;
2028         u32 next_offset;
2029 
2030         if (list_is_last(&dev->dev_list, h))
2031                 return 0;
2032         next = list_next_entry(dev, dev_list);
2033         next_offset = next->device_id - dev->device_id;
2034 
2035         return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2036 }
2037 
2038 static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2039 {
2040         struct its_ite *next;
2041         u32 next_offset;
2042 
2043         if (list_is_last(&ite->ite_list, h))
2044                 return 0;
2045         next = list_next_entry(ite, ite_list);
2046         next_offset = next->event_id - ite->event_id;
2047 
2048         return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2049 }
2050 
2051 /**
2052  * entry_fn_t - Callback called on a table entry restore path
2053  * @its: its handle
2054  * @id: id of the entry
2055  * @entry: pointer to the entry
2056  * @opaque: pointer to an opaque data
2057  *
2058  * Return: < 0 on error, 0 if last element was identified, id offset to next
2059  * element otherwise
2060  */
2061 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2062                           void *opaque);
2063 
2064 /**
2065  * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2066  * to each entry
2067  *
2068  * @its: its handle
2069  * @base: base gpa of the table
2070  * @size: size of the table in bytes
2071  * @esz: entry size in bytes
2072  * @start_id: the ID of the first entry in the table
2073  * (non zero for 2d level tables)
2074  * @fn: function to apply on each entry
2075  *
2076  * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2077  * (the last element may not be found on second level tables)
2078  */
2079 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2080                           int start_id, entry_fn_t fn, void *opaque)
2081 {
2082         struct kvm *kvm = its->dev->kvm;
2083         unsigned long len = size;
2084         int id = start_id;
2085         gpa_t gpa = base;
2086         char entry[ESZ_MAX];
2087         int ret;
2088 
2089         memset(entry, 0, esz);
2090 
2091         while (len > 0) {
2092                 int next_offset;
2093                 size_t byte_offset;
2094 
2095                 ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2096                 if (ret)
2097                         return ret;
2098 
2099                 next_offset = fn(its, id, entry, opaque);
2100                 if (next_offset <= 0)
2101                         return next_offset;
2102 
2103                 byte_offset = next_offset * esz;
2104                 id += next_offset;
2105                 gpa += byte_offset;
2106                 len -= byte_offset;
2107         }
2108         return 1;
2109 }
2110 
2111 /**
2112  * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2113  */
2114 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2115                               struct its_ite *ite, gpa_t gpa, int ite_esz)
2116 {
2117         struct kvm *kvm = its->dev->kvm;
2118         u32 next_offset;
2119         u64 val;
2120 
2121         next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2122         val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2123                ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2124                 ite->collection->collection_id;
2125         val = cpu_to_le64(val);
2126         return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
2127 }
2128 
2129 /**
2130  * vgic_its_restore_ite - restore an interrupt translation entry
2131  * @event_id: id used for indexing
2132  * @ptr: pointer to the ITE entry
2133  * @opaque: pointer to the its_device
2134  */
2135 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2136                                 void *ptr, void *opaque)
2137 {
2138         struct its_device *dev = (struct its_device *)opaque;
2139         struct its_collection *collection;
2140         struct kvm *kvm = its->dev->kvm;
2141         struct kvm_vcpu *vcpu = NULL;
2142         u64 val;
2143         u64 *p = (u64 *)ptr;
2144         struct vgic_irq *irq;
2145         u32 coll_id, lpi_id;
2146         struct its_ite *ite;
2147         u32 offset;
2148 
2149         val = *p;
2150 
2151         val = le64_to_cpu(val);
2152 
2153         coll_id = val & KVM_ITS_ITE_ICID_MASK;
2154         lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2155 
2156         if (!lpi_id)
2157                 return 1; /* invalid entry, no choice but to scan next entry */
2158 
2159         if (lpi_id < VGIC_MIN_LPI)
2160                 return -EINVAL;
2161 
2162         offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2163         if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2164                 return -EINVAL;
2165 
2166         collection = find_collection(its, coll_id);
2167         if (!collection)
2168                 return -EINVAL;
2169 
2170         ite = vgic_its_alloc_ite(dev, collection, event_id);
2171         if (IS_ERR(ite))
2172                 return PTR_ERR(ite);
2173 
2174         if (its_is_collection_mapped(collection))
2175                 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
2176 
2177         irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2178         if (IS_ERR(irq))
2179                 return PTR_ERR(irq);
2180         ite->irq = irq;
2181 
2182         return offset;
2183 }
2184 
2185 static int vgic_its_ite_cmp(void *priv, struct list_head *a,
2186                             struct list_head *b)
2187 {
2188         struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2189         struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2190 
2191         if (itea->event_id < iteb->event_id)
2192                 return -1;
2193         else
2194                 return 1;
2195 }
2196 
2197 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2198 {
2199         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2200         gpa_t base = device->itt_addr;
2201         struct its_ite *ite;
2202         int ret;
2203         int ite_esz = abi->ite_esz;
2204 
2205         list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2206 
2207         list_for_each_entry(ite, &device->itt_head, ite_list) {
2208                 gpa_t gpa = base + ite->event_id * ite_esz;
2209 
2210                 /*
2211                  * If an LPI carries the HW bit, this means that this
2212                  * interrupt is controlled by GICv4, and we do not
2213                  * have direct access to that state. Let's simply fail
2214                  * the save operation...
2215                  */
2216                 if (ite->irq->hw)
2217                         return -EACCES;
2218 
2219                 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2220                 if (ret)
2221                         return ret;
2222         }
2223         return 0;
2224 }
2225 
2226 /**
2227  * vgic_its_restore_itt - restore the ITT of a device
2228  *
2229  * @its: its handle
2230  * @dev: device handle
2231  *
2232  * Return 0 on success, < 0 on error
2233  */
2234 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2235 {
2236         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2237         gpa_t base = dev->itt_addr;
2238         int ret;
2239         int ite_esz = abi->ite_esz;
2240         size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2241 
2242         ret = scan_its_table(its, base, max_size, ite_esz, 0,
2243                              vgic_its_restore_ite, dev);
2244 
2245         /* scan_its_table returns +1 if all ITEs are invalid */
2246         if (ret > 0)
2247                 ret = 0;
2248 
2249         return ret;
2250 }
2251 
2252 /**
2253  * vgic_its_save_dte - Save a device table entry at a given GPA
2254  *
2255  * @its: ITS handle
2256  * @dev: ITS device
2257  * @ptr: GPA
2258  */
2259 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2260                              gpa_t ptr, int dte_esz)
2261 {
2262         struct kvm *kvm = its->dev->kvm;
2263         u64 val, itt_addr_field;
2264         u32 next_offset;
2265 
2266         itt_addr_field = dev->itt_addr >> 8;
2267         next_offset = compute_next_devid_offset(&its->device_list, dev);
2268         val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2269                ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2270                (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2271                 (dev->num_eventid_bits - 1));
2272         val = cpu_to_le64(val);
2273         return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2274 }
2275 
2276 /**
2277  * vgic_its_restore_dte - restore a device table entry
2278  *
2279  * @its: its handle
2280  * @id: device id the DTE corresponds to
2281  * @ptr: kernel VA where the 8 byte DTE is located
2282  * @opaque: unused
2283  *
2284  * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2285  * next dte otherwise
2286  */
2287 static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2288                                 void *ptr, void *opaque)
2289 {
2290         struct its_device *dev;
2291         gpa_t itt_addr;
2292         u8 num_eventid_bits;
2293         u64 entry = *(u64 *)ptr;
2294         bool valid;
2295         u32 offset;
2296         int ret;
2297 
2298         entry = le64_to_cpu(entry);
2299 
2300         valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2301         num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2302         itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2303                         >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2304 
2305         if (!valid)
2306                 return 1;
2307 
2308         /* dte entry is valid */
2309         offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2310 
2311         dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2312         if (IS_ERR(dev))
2313                 return PTR_ERR(dev);
2314 
2315         ret = vgic_its_restore_itt(its, dev);
2316         if (ret) {
2317                 vgic_its_free_device(its->dev->kvm, dev);
2318                 return ret;
2319         }
2320 
2321         return offset;
2322 }
2323 
2324 static int vgic_its_device_cmp(void *priv, struct list_head *a,
2325                                struct list_head *b)
2326 {
2327         struct its_device *deva = container_of(a, struct its_device, dev_list);
2328         struct its_device *devb = container_of(b, struct its_device, dev_list);
2329 
2330         if (deva->device_id < devb->device_id)
2331                 return -1;
2332         else
2333                 return 1;
2334 }
2335 
2336 /**
2337  * vgic_its_save_device_tables - Save the device table and all ITT
2338  * into guest RAM
2339  *
2340  * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2341  * returns the GPA of the device entry
2342  */
2343 static int vgic_its_save_device_tables(struct vgic_its *its)
2344 {
2345         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2346         u64 baser = its->baser_device_table;
2347         struct its_device *dev;
2348         int dte_esz = abi->dte_esz;
2349 
2350         if (!(baser & GITS_BASER_VALID))
2351                 return 0;
2352 
2353         list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2354 
2355         list_for_each_entry(dev, &its->device_list, dev_list) {
2356                 int ret;
2357                 gpa_t eaddr;
2358 
2359                 if (!vgic_its_check_id(its, baser,
2360                                        dev->device_id, &eaddr))
2361                         return -EINVAL;
2362 
2363                 ret = vgic_its_save_itt(its, dev);
2364                 if (ret)
2365                         return ret;
2366 
2367                 ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2368                 if (ret)
2369                         return ret;
2370         }
2371         return 0;
2372 }
2373 
2374 /**
2375  * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2376  *
2377  * @its: its handle
2378  * @id: index of the entry in the L1 table
2379  * @addr: kernel VA
2380  * @opaque: unused
2381  *
2382  * L1 table entries are scanned by steps of 1 entry
2383  * Return < 0 if error, 0 if last dte was found when scanning the L2
2384  * table, +1 otherwise (meaning next L1 entry must be scanned)
2385  */
2386 static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2387                          void *opaque)
2388 {
2389         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2390         int l2_start_id = id * (SZ_64K / abi->dte_esz);
2391         u64 entry = *(u64 *)addr;
2392         int dte_esz = abi->dte_esz;
2393         gpa_t gpa;
2394         int ret;
2395 
2396         entry = le64_to_cpu(entry);
2397 
2398         if (!(entry & KVM_ITS_L1E_VALID_MASK))
2399                 return 1;
2400 
2401         gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2402 
2403         ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2404                              l2_start_id, vgic_its_restore_dte, NULL);
2405 
2406         return ret;
2407 }
2408 
2409 /**
2410  * vgic_its_restore_device_tables - Restore the device table and all ITT
2411  * from guest RAM to internal data structs
2412  */
2413 static int vgic_its_restore_device_tables(struct vgic_its *its)
2414 {
2415         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2416         u64 baser = its->baser_device_table;
2417         int l1_esz, ret;
2418         int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2419         gpa_t l1_gpa;
2420 
2421         if (!(baser & GITS_BASER_VALID))
2422                 return 0;
2423 
2424         l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2425 
2426         if (baser & GITS_BASER_INDIRECT) {
2427                 l1_esz = GITS_LVL1_ENTRY_SIZE;
2428                 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2429                                      handle_l1_dte, NULL);
2430         } else {
2431                 l1_esz = abi->dte_esz;
2432                 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2433                                      vgic_its_restore_dte, NULL);
2434         }
2435 
2436         /* scan_its_table returns +1 if all entries are invalid */
2437         if (ret > 0)
2438                 ret = 0;
2439 
2440         return ret;
2441 }
2442 
2443 static int vgic_its_save_cte(struct vgic_its *its,
2444                              struct its_collection *collection,
2445                              gpa_t gpa, int esz)
2446 {
2447         u64 val;
2448 
2449         val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2450                ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2451                collection->collection_id);
2452         val = cpu_to_le64(val);
2453         return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2454 }
2455 
2456 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2457 {
2458         struct its_collection *collection;
2459         struct kvm *kvm = its->dev->kvm;
2460         u32 target_addr, coll_id;
2461         u64 val;
2462         int ret;
2463 
2464         BUG_ON(esz > sizeof(val));
2465         ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2466         if (ret)
2467                 return ret;
2468         val = le64_to_cpu(val);
2469         if (!(val & KVM_ITS_CTE_VALID_MASK))
2470                 return 0;
2471 
2472         target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2473         coll_id = val & KVM_ITS_CTE_ICID_MASK;
2474 
2475         if (target_addr != COLLECTION_NOT_MAPPED &&
2476             target_addr >= atomic_read(&kvm->online_vcpus))
2477                 return -EINVAL;
2478 
2479         collection = find_collection(its, coll_id);
2480         if (collection)
2481                 return -EEXIST;
2482         ret = vgic_its_alloc_collection(its, &collection, coll_id);
2483         if (ret)
2484                 return ret;
2485         collection->target_addr = target_addr;
2486         return 1;
2487 }
2488 
2489 /**
2490  * vgic_its_save_collection_table - Save the collection table into
2491  * guest RAM
2492  */
2493 static int vgic_its_save_collection_table(struct vgic_its *its)
2494 {
2495         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2496         u64 baser = its->baser_coll_table;
2497         gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2498         struct its_collection *collection;
2499         u64 val;
2500         size_t max_size, filled = 0;
2501         int ret, cte_esz = abi->cte_esz;
2502 
2503         if (!(baser & GITS_BASER_VALID))
2504                 return 0;
2505 
2506         max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2507 
2508         list_for_each_entry(collection, &its->collection_list, coll_list) {
2509                 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2510                 if (ret)
2511                         return ret;
2512                 gpa += cte_esz;
2513                 filled += cte_esz;
2514         }
2515 
2516         if (filled == max_size)
2517                 return 0;
2518 
2519         /*
2520          * table is not fully filled, add a last dummy element
2521          * with valid bit unset
2522          */
2523         val = 0;
2524         BUG_ON(cte_esz > sizeof(val));
2525         ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2526         return ret;
2527 }
2528 
2529 /**
2530  * vgic_its_restore_collection_table - reads the collection table
2531  * in guest memory and restores the ITS internal state. Requires the
2532  * BASER registers to be restored before.
2533  */
2534 static int vgic_its_restore_collection_table(struct vgic_its *its)
2535 {
2536         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2537         u64 baser = its->baser_coll_table;
2538         int cte_esz = abi->cte_esz;
2539         size_t max_size, read = 0;
2540         gpa_t gpa;
2541         int ret;
2542 
2543         if (!(baser & GITS_BASER_VALID))
2544                 return 0;
2545 
2546         gpa = GITS_BASER_ADDR_48_to_52(baser);
2547 
2548         max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2549 
2550         while (read < max_size) {
2551                 ret = vgic_its_restore_cte(its, gpa, cte_esz);
2552                 if (ret <= 0)
2553                         break;
2554                 gpa += cte_esz;
2555                 read += cte_esz;
2556         }
2557 
2558         if (ret > 0)
2559                 return 0;
2560 
2561         return ret;
2562 }
2563 
2564 /**
2565  * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2566  * according to v0 ABI
2567  */
2568 static int vgic_its_save_tables_v0(struct vgic_its *its)
2569 {
2570         int ret;
2571 
2572         ret = vgic_its_save_device_tables(its);
2573         if (ret)
2574                 return ret;
2575 
2576         return vgic_its_save_collection_table(its);
2577 }
2578 
2579 /**
2580  * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2581  * to internal data structs according to V0 ABI
2582  *
2583  */
2584 static int vgic_its_restore_tables_v0(struct vgic_its *its)
2585 {
2586         int ret;
2587 
2588         ret = vgic_its_restore_collection_table(its);
2589         if (ret)
2590                 return ret;
2591 
2592         return vgic_its_restore_device_tables(its);
2593 }
2594 
2595 static int vgic_its_commit_v0(struct vgic_its *its)
2596 {
2597         const struct vgic_its_abi *abi;
2598 
2599         abi = vgic_its_get_abi(its);
2600         its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2601         its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2602 
2603         its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2604                                         << GITS_BASER_ENTRY_SIZE_SHIFT);
2605 
2606         its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2607                                         << GITS_BASER_ENTRY_SIZE_SHIFT);
2608         return 0;
2609 }
2610 
2611 static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2612 {
2613         /* We need to keep the ABI specific field values */
2614         its->baser_coll_table &= ~GITS_BASER_VALID;
2615         its->baser_device_table &= ~GITS_BASER_VALID;
2616         its->cbaser = 0;
2617         its->creadr = 0;
2618         its->cwriter = 0;
2619         its->enabled = 0;
2620         vgic_its_free_device_list(kvm, its);
2621         vgic_its_free_collection_list(kvm, its);
2622 }
2623 
2624 static int vgic_its_has_attr(struct kvm_device *dev,
2625                              struct kvm_device_attr *attr)
2626 {
2627         switch (attr->group) {
2628         case KVM_DEV_ARM_VGIC_GRP_ADDR:
2629                 switch (attr->attr) {
2630                 case KVM_VGIC_ITS_ADDR_TYPE:
2631                         return 0;
2632                 }
2633                 break;
2634         case KVM_DEV_ARM_VGIC_GRP_CTRL:
2635                 switch (attr->attr) {
2636                 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2637                         return 0;
2638                 case KVM_DEV_ARM_ITS_CTRL_RESET:
2639                         return 0;
2640                 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2641                         return 0;
2642                 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2643                         return 0;
2644                 }
2645                 break;
2646         case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2647                 return vgic_its_has_attr_regs(dev, attr);
2648         }
2649         return -ENXIO;
2650 }
2651 
2652 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2653 {
2654         const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2655         int ret = 0;
2656 
2657         if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2658                 return 0;
2659 
2660         mutex_lock(&kvm->lock);
2661         mutex_lock(&its->its_lock);
2662 
2663         if (!lock_all_vcpus(kvm)) {
2664                 mutex_unlock(&its->its_lock);
2665                 mutex_unlock(&kvm->lock);
2666                 return -EBUSY;
2667         }
2668 
2669         switch (attr) {
2670         case KVM_DEV_ARM_ITS_CTRL_RESET:
2671                 vgic_its_reset(kvm, its);
2672                 break;
2673         case KVM_DEV_ARM_ITS_SAVE_TABLES:
2674                 ret = abi->save_tables(its);
2675                 break;
2676         case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2677                 ret = abi->restore_tables(its);
2678                 break;
2679         }
2680 
2681         unlock_all_vcpus(kvm);
2682         mutex_unlock(&its->its_lock);
2683         mutex_unlock(&kvm->lock);
2684         return ret;
2685 }
2686 
2687 static int vgic_its_set_attr(struct kvm_device *dev,
2688                              struct kvm_device_attr *attr)
2689 {
2690         struct vgic_its *its = dev->private;
2691         int ret;
2692 
2693         switch (attr->group) {
2694         case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2695                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2696                 unsigned long type = (unsigned long)attr->attr;
2697                 u64 addr;
2698 
2699                 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2700                         return -ENODEV;
2701 
2702                 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2703                         return -EFAULT;
2704 
2705                 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2706                                         addr, SZ_64K);
2707                 if (ret)
2708                         return ret;
2709 
2710                 return vgic_register_its_iodev(dev->kvm, its, addr);
2711         }
2712         case KVM_DEV_ARM_VGIC_GRP_CTRL:
2713                 return vgic_its_ctrl(dev->kvm, its, attr->attr);
2714         case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2715                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2716                 u64 reg;
2717 
2718                 if (get_user(reg, uaddr))
2719                         return -EFAULT;
2720 
2721                 return vgic_its_attr_regs_access(dev, attr, &reg, true);
2722         }
2723         }
2724         return -ENXIO;
2725 }
2726 
2727 static int vgic_its_get_attr(struct kvm_device *dev,
2728                              struct kvm_device_attr *attr)
2729 {
2730         switch (attr->group) {
2731         case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2732                 struct vgic_its *its = dev->private;
2733                 u64 addr = its->vgic_its_base;
2734                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2735                 unsigned long type = (unsigned long)attr->attr;
2736 
2737                 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2738                         return -ENODEV;
2739 
2740                 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2741                         return -EFAULT;
2742                 break;
2743         }
2744         case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2745                 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2746                 u64 reg;
2747                 int ret;
2748 
2749                 ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2750                 if (ret)
2751                         return ret;
2752                 return put_user(reg, uaddr);
2753         }
2754         default:
2755                 return -ENXIO;
2756         }
2757 
2758         return 0;
2759 }
2760 
2761 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2762         .name = "kvm-arm-vgic-its",
2763         .create = vgic_its_create,
2764         .destroy = vgic_its_destroy,
2765         .set_attr = vgic_its_set_attr,
2766         .get_attr = vgic_its_get_attr,
2767         .has_attr = vgic_its_has_attr,
2768 };
2769 
2770 int kvm_vgic_register_its_device(void)
2771 {
2772         return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2773                                        KVM_DEV_TYPE_ARM_VGIC_ITS);
2774 }

/* [<][>][^][v][top][bottom][index][help] */