root/include/uapi/linux/vfio.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
   2 /*
   3  * VFIO API definition
   4  *
   5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
   6  *     Author: Alex Williamson <alex.williamson@redhat.com>
   7  *
   8  * This program is free software; you can redistribute it and/or modify
   9  * it under the terms of the GNU General Public License version 2 as
  10  * published by the Free Software Foundation.
  11  */
  12 #ifndef _UAPIVFIO_H
  13 #define _UAPIVFIO_H
  14 
  15 #include <linux/types.h>
  16 #include <linux/ioctl.h>
  17 
  18 #define VFIO_API_VERSION        0
  19 
  20 
  21 /* Kernel & User level defines for VFIO IOCTLs. */
  22 
  23 /* Extensions */
  24 
  25 #define VFIO_TYPE1_IOMMU                1
  26 #define VFIO_SPAPR_TCE_IOMMU            2
  27 #define VFIO_TYPE1v2_IOMMU              3
  28 /*
  29  * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
  30  * capability is subject to change as groups are added or removed.
  31  */
  32 #define VFIO_DMA_CC_IOMMU               4
  33 
  34 /* Check if EEH is supported */
  35 #define VFIO_EEH                        5
  36 
  37 /* Two-stage IOMMU */
  38 #define VFIO_TYPE1_NESTING_IOMMU        6       /* Implies v2 */
  39 
  40 #define VFIO_SPAPR_TCE_v2_IOMMU         7
  41 
  42 /*
  43  * The No-IOMMU IOMMU offers no translation or isolation for devices and
  44  * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
  45  * code will taint the host kernel and should be used with extreme caution.
  46  */
  47 #define VFIO_NOIOMMU_IOMMU              8
  48 
  49 /*
  50  * The IOCTL interface is designed for extensibility by embedding the
  51  * structure length (argsz) and flags into structures passed between
  52  * kernel and userspace.  We therefore use the _IO() macro for these
  53  * defines to avoid implicitly embedding a size into the ioctl request.
  54  * As structure fields are added, argsz will increase to match and flag
  55  * bits will be defined to indicate additional fields with valid data.
  56  * It's *always* the caller's responsibility to indicate the size of
  57  * the structure passed by setting argsz appropriately.
  58  */
  59 
  60 #define VFIO_TYPE       (';')
  61 #define VFIO_BASE       100
  62 
  63 /*
  64  * For extension of INFO ioctls, VFIO makes use of a capability chain
  65  * designed after PCI/e capabilities.  A flag bit indicates whether
  66  * this capability chain is supported and a field defined in the fixed
  67  * structure defines the offset of the first capability in the chain.
  68  * This field is only valid when the corresponding bit in the flags
  69  * bitmap is set.  This offset field is relative to the start of the
  70  * INFO buffer, as is the next field within each capability header.
  71  * The id within the header is a shared address space per INFO ioctl,
  72  * while the version field is specific to the capability id.  The
  73  * contents following the header are specific to the capability id.
  74  */
  75 struct vfio_info_cap_header {
  76         __u16   id;             /* Identifies capability */
  77         __u16   version;        /* Version specific to the capability ID */
  78         __u32   next;           /* Offset of next capability */
  79 };
  80 
  81 /*
  82  * Callers of INFO ioctls passing insufficiently sized buffers will see
  83  * the capability chain flag bit set, a zero value for the first capability
  84  * offset (if available within the provided argsz), and argsz will be
  85  * updated to report the necessary buffer size.  For compatibility, the
  86  * INFO ioctl will not report error in this case, but the capability chain
  87  * will not be available.
  88  */
  89 
  90 /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
  91 
  92 /**
  93  * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
  94  *
  95  * Report the version of the VFIO API.  This allows us to bump the entire
  96  * API version should we later need to add or change features in incompatible
  97  * ways.
  98  * Return: VFIO_API_VERSION
  99  * Availability: Always
 100  */
 101 #define VFIO_GET_API_VERSION            _IO(VFIO_TYPE, VFIO_BASE + 0)
 102 
 103 /**
 104  * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
 105  *
 106  * Check whether an extension is supported.
 107  * Return: 0 if not supported, 1 (or some other positive integer) if supported.
 108  * Availability: Always
 109  */
 110 #define VFIO_CHECK_EXTENSION            _IO(VFIO_TYPE, VFIO_BASE + 1)
 111 
 112 /**
 113  * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
 114  *
 115  * Set the iommu to the given type.  The type must be supported by an
 116  * iommu driver as verified by calling CHECK_EXTENSION using the same
 117  * type.  A group must be set to this file descriptor before this
 118  * ioctl is available.  The IOMMU interfaces enabled by this call are
 119  * specific to the value set.
 120  * Return: 0 on success, -errno on failure
 121  * Availability: When VFIO group attached
 122  */
 123 #define VFIO_SET_IOMMU                  _IO(VFIO_TYPE, VFIO_BASE + 2)
 124 
 125 /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
 126 
 127 /**
 128  * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
 129  *                                              struct vfio_group_status)
 130  *
 131  * Retrieve information about the group.  Fills in provided
 132  * struct vfio_group_info.  Caller sets argsz.
 133  * Return: 0 on succes, -errno on failure.
 134  * Availability: Always
 135  */
 136 struct vfio_group_status {
 137         __u32   argsz;
 138         __u32   flags;
 139 #define VFIO_GROUP_FLAGS_VIABLE         (1 << 0)
 140 #define VFIO_GROUP_FLAGS_CONTAINER_SET  (1 << 1)
 141 };
 142 #define VFIO_GROUP_GET_STATUS           _IO(VFIO_TYPE, VFIO_BASE + 3)
 143 
 144 /**
 145  * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
 146  *
 147  * Set the container for the VFIO group to the open VFIO file
 148  * descriptor provided.  Groups may only belong to a single
 149  * container.  Containers may, at their discretion, support multiple
 150  * groups.  Only when a container is set are all of the interfaces
 151  * of the VFIO file descriptor and the VFIO group file descriptor
 152  * available to the user.
 153  * Return: 0 on success, -errno on failure.
 154  * Availability: Always
 155  */
 156 #define VFIO_GROUP_SET_CONTAINER        _IO(VFIO_TYPE, VFIO_BASE + 4)
 157 
 158 /**
 159  * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
 160  *
 161  * Remove the group from the attached container.  This is the
 162  * opposite of the SET_CONTAINER call and returns the group to
 163  * an initial state.  All device file descriptors must be released
 164  * prior to calling this interface.  When removing the last group
 165  * from a container, the IOMMU will be disabled and all state lost,
 166  * effectively also returning the VFIO file descriptor to an initial
 167  * state.
 168  * Return: 0 on success, -errno on failure.
 169  * Availability: When attached to container
 170  */
 171 #define VFIO_GROUP_UNSET_CONTAINER      _IO(VFIO_TYPE, VFIO_BASE + 5)
 172 
 173 /**
 174  * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
 175  *
 176  * Return a new file descriptor for the device object described by
 177  * the provided string.  The string should match a device listed in
 178  * the devices subdirectory of the IOMMU group sysfs entry.  The
 179  * group containing the device must already be added to this context.
 180  * Return: new file descriptor on success, -errno on failure.
 181  * Availability: When attached to container
 182  */
 183 #define VFIO_GROUP_GET_DEVICE_FD        _IO(VFIO_TYPE, VFIO_BASE + 6)
 184 
 185 /* --------------- IOCTLs for DEVICE file descriptors --------------- */
 186 
 187 /**
 188  * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
 189  *                                              struct vfio_device_info)
 190  *
 191  * Retrieve information about the device.  Fills in provided
 192  * struct vfio_device_info.  Caller sets argsz.
 193  * Return: 0 on success, -errno on failure.
 194  */
 195 struct vfio_device_info {
 196         __u32   argsz;
 197         __u32   flags;
 198 #define VFIO_DEVICE_FLAGS_RESET (1 << 0)        /* Device supports reset */
 199 #define VFIO_DEVICE_FLAGS_PCI   (1 << 1)        /* vfio-pci device */
 200 #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)     /* vfio-platform device */
 201 #define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)        /* vfio-amba device */
 202 #define VFIO_DEVICE_FLAGS_CCW   (1 << 4)        /* vfio-ccw device */
 203 #define VFIO_DEVICE_FLAGS_AP    (1 << 5)        /* vfio-ap device */
 204         __u32   num_regions;    /* Max region index + 1 */
 205         __u32   num_irqs;       /* Max IRQ index + 1 */
 206 };
 207 #define VFIO_DEVICE_GET_INFO            _IO(VFIO_TYPE, VFIO_BASE + 7)
 208 
 209 /*
 210  * Vendor driver using Mediated device framework should provide device_api
 211  * attribute in supported type attribute groups. Device API string should be one
 212  * of the following corresponding to device flags in vfio_device_info structure.
 213  */
 214 
 215 #define VFIO_DEVICE_API_PCI_STRING              "vfio-pci"
 216 #define VFIO_DEVICE_API_PLATFORM_STRING         "vfio-platform"
 217 #define VFIO_DEVICE_API_AMBA_STRING             "vfio-amba"
 218 #define VFIO_DEVICE_API_CCW_STRING              "vfio-ccw"
 219 #define VFIO_DEVICE_API_AP_STRING               "vfio-ap"
 220 
 221 /**
 222  * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
 223  *                                     struct vfio_region_info)
 224  *
 225  * Retrieve information about a device region.  Caller provides
 226  * struct vfio_region_info with index value set.  Caller sets argsz.
 227  * Implementation of region mapping is bus driver specific.  This is
 228  * intended to describe MMIO, I/O port, as well as bus specific
 229  * regions (ex. PCI config space).  Zero sized regions may be used
 230  * to describe unimplemented regions (ex. unimplemented PCI BARs).
 231  * Return: 0 on success, -errno on failure.
 232  */
 233 struct vfio_region_info {
 234         __u32   argsz;
 235         __u32   flags;
 236 #define VFIO_REGION_INFO_FLAG_READ      (1 << 0) /* Region supports read */
 237 #define VFIO_REGION_INFO_FLAG_WRITE     (1 << 1) /* Region supports write */
 238 #define VFIO_REGION_INFO_FLAG_MMAP      (1 << 2) /* Region supports mmap */
 239 #define VFIO_REGION_INFO_FLAG_CAPS      (1 << 3) /* Info supports caps */
 240         __u32   index;          /* Region index */
 241         __u32   cap_offset;     /* Offset within info struct of first cap */
 242         __u64   size;           /* Region size (bytes) */
 243         __u64   offset;         /* Region offset from start of device fd */
 244 };
 245 #define VFIO_DEVICE_GET_REGION_INFO     _IO(VFIO_TYPE, VFIO_BASE + 8)
 246 
 247 /*
 248  * The sparse mmap capability allows finer granularity of specifying areas
 249  * within a region with mmap support.  When specified, the user should only
 250  * mmap the offset ranges specified by the areas array.  mmaps outside of the
 251  * areas specified may fail (such as the range covering a PCI MSI-X table) or
 252  * may result in improper device behavior.
 253  *
 254  * The structures below define version 1 of this capability.
 255  */
 256 #define VFIO_REGION_INFO_CAP_SPARSE_MMAP        1
 257 
 258 struct vfio_region_sparse_mmap_area {
 259         __u64   offset; /* Offset of mmap'able area within region */
 260         __u64   size;   /* Size of mmap'able area */
 261 };
 262 
 263 struct vfio_region_info_cap_sparse_mmap {
 264         struct vfio_info_cap_header header;
 265         __u32   nr_areas;
 266         __u32   reserved;
 267         struct vfio_region_sparse_mmap_area areas[];
 268 };
 269 
 270 /*
 271  * The device specific type capability allows regions unique to a specific
 272  * device or class of devices to be exposed.  This helps solve the problem for
 273  * vfio bus drivers of defining which region indexes correspond to which region
 274  * on the device, without needing to resort to static indexes, as done by
 275  * vfio-pci.  For instance, if we were to go back in time, we might remove
 276  * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
 277  * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
 278  * make a "VGA" device specific type to describe the VGA access space.  This
 279  * means that non-VGA devices wouldn't need to waste this index, and thus the
 280  * address space associated with it due to implementation of device file
 281  * descriptor offsets in vfio-pci.
 282  *
 283  * The current implementation is now part of the user ABI, so we can't use this
 284  * for VGA, but there are other upcoming use cases, such as opregions for Intel
 285  * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
 286  * use this for future additions.
 287  *
 288  * The structure below defines version 1 of this capability.
 289  */
 290 #define VFIO_REGION_INFO_CAP_TYPE       2
 291 
 292 struct vfio_region_info_cap_type {
 293         struct vfio_info_cap_header header;
 294         __u32 type;     /* global per bus driver */
 295         __u32 subtype;  /* type specific */
 296 };
 297 
 298 /*
 299  * List of region types, global per bus driver.
 300  * If you introduce a new type, please add it here.
 301  */
 302 
 303 /* PCI region type containing a PCI vendor part */
 304 #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE        (1 << 31)
 305 #define VFIO_REGION_TYPE_PCI_VENDOR_MASK        (0xffff)
 306 #define VFIO_REGION_TYPE_GFX                    (1)
 307 #define VFIO_REGION_TYPE_CCW                    (2)
 308 
 309 /* sub-types for VFIO_REGION_TYPE_PCI_* */
 310 
 311 /* 8086 vendor PCI sub-types */
 312 #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION  (1)
 313 #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG  (2)
 314 #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG   (3)
 315 
 316 /* 10de vendor PCI sub-types */
 317 /*
 318  * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
 319  */
 320 #define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM  (1)
 321 
 322 /* 1014 vendor PCI sub-types */
 323 /*
 324  * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
 325  * to do TLB invalidation on a GPU.
 326  */
 327 #define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD    (1)
 328 
 329 /* sub-types for VFIO_REGION_TYPE_GFX */
 330 #define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
 331 
 332 /**
 333  * struct vfio_region_gfx_edid - EDID region layout.
 334  *
 335  * Set display link state and EDID blob.
 336  *
 337  * The EDID blob has monitor information such as brand, name, serial
 338  * number, physical size, supported video modes and more.
 339  *
 340  * This special region allows userspace (typically qemu) set a virtual
 341  * EDID for the virtual monitor, which allows a flexible display
 342  * configuration.
 343  *
 344  * For the edid blob spec look here:
 345  *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
 346  *
 347  * On linux systems you can find the EDID blob in sysfs:
 348  *    /sys/class/drm/${card}/${connector}/edid
 349  *
 350  * You can use the edid-decode ulility (comes with xorg-x11-utils) to
 351  * decode the EDID blob.
 352  *
 353  * @edid_offset: location of the edid blob, relative to the
 354  *               start of the region (readonly).
 355  * @edid_max_size: max size of the edid blob (readonly).
 356  * @edid_size: actual edid size (read/write).
 357  * @link_state: display link state (read/write).
 358  * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
 359  * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
 360  * @max_xres: max display width (0 == no limitation, readonly).
 361  * @max_yres: max display height (0 == no limitation, readonly).
 362  *
 363  * EDID update protocol:
 364  *   (1) set link-state to down.
 365  *   (2) update edid blob and size.
 366  *   (3) set link-state to up.
 367  */
 368 struct vfio_region_gfx_edid {
 369         __u32 edid_offset;
 370         __u32 edid_max_size;
 371         __u32 edid_size;
 372         __u32 max_xres;
 373         __u32 max_yres;
 374         __u32 link_state;
 375 #define VFIO_DEVICE_GFX_LINK_STATE_UP    1
 376 #define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
 377 };
 378 
 379 /* sub-types for VFIO_REGION_TYPE_CCW */
 380 #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD       (1)
 381 
 382 /*
 383  * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
 384  * which allows direct access to non-MSIX registers which happened to be within
 385  * the same system page.
 386  *
 387  * Even though the userspace gets direct access to the MSIX data, the existing
 388  * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
 389  */
 390 #define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE      3
 391 
 392 /*
 393  * Capability with compressed real address (aka SSA - small system address)
 394  * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
 395  * and by the userspace to associate a NVLink bridge with a GPU.
 396  */
 397 #define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT     4
 398 
 399 struct vfio_region_info_cap_nvlink2_ssatgt {
 400         struct vfio_info_cap_header header;
 401         __u64 tgt;
 402 };
 403 
 404 /*
 405  * Capability with an NVLink link speed. The value is read by
 406  * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
 407  * property in the device tree. The value is fixed in the hardware
 408  * and failing to provide the correct value results in the link
 409  * not working with no indication from the driver why.
 410  */
 411 #define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD     5
 412 
 413 struct vfio_region_info_cap_nvlink2_lnkspd {
 414         struct vfio_info_cap_header header;
 415         __u32 link_speed;
 416         __u32 __pad;
 417 };
 418 
 419 /**
 420  * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
 421  *                                  struct vfio_irq_info)
 422  *
 423  * Retrieve information about a device IRQ.  Caller provides
 424  * struct vfio_irq_info with index value set.  Caller sets argsz.
 425  * Implementation of IRQ mapping is bus driver specific.  Indexes
 426  * using multiple IRQs are primarily intended to support MSI-like
 427  * interrupt blocks.  Zero count irq blocks may be used to describe
 428  * unimplemented interrupt types.
 429  *
 430  * The EVENTFD flag indicates the interrupt index supports eventfd based
 431  * signaling.
 432  *
 433  * The MASKABLE flags indicates the index supports MASK and UNMASK
 434  * actions described below.
 435  *
 436  * AUTOMASKED indicates that after signaling, the interrupt line is
 437  * automatically masked by VFIO and the user needs to unmask the line
 438  * to receive new interrupts.  This is primarily intended to distinguish
 439  * level triggered interrupts.
 440  *
 441  * The NORESIZE flag indicates that the interrupt lines within the index
 442  * are setup as a set and new subindexes cannot be enabled without first
 443  * disabling the entire index.  This is used for interrupts like PCI MSI
 444  * and MSI-X where the driver may only use a subset of the available
 445  * indexes, but VFIO needs to enable a specific number of vectors
 446  * upfront.  In the case of MSI-X, where the user can enable MSI-X and
 447  * then add and unmask vectors, it's up to userspace to make the decision
 448  * whether to allocate the maximum supported number of vectors or tear
 449  * down setup and incrementally increase the vectors as each is enabled.
 450  */
 451 struct vfio_irq_info {
 452         __u32   argsz;
 453         __u32   flags;
 454 #define VFIO_IRQ_INFO_EVENTFD           (1 << 0)
 455 #define VFIO_IRQ_INFO_MASKABLE          (1 << 1)
 456 #define VFIO_IRQ_INFO_AUTOMASKED        (1 << 2)
 457 #define VFIO_IRQ_INFO_NORESIZE          (1 << 3)
 458         __u32   index;          /* IRQ index */
 459         __u32   count;          /* Number of IRQs within this index */
 460 };
 461 #define VFIO_DEVICE_GET_IRQ_INFO        _IO(VFIO_TYPE, VFIO_BASE + 9)
 462 
 463 /**
 464  * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
 465  *
 466  * Set signaling, masking, and unmasking of interrupts.  Caller provides
 467  * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
 468  * the range of subindexes being specified.
 469  *
 470  * The DATA flags specify the type of data provided.  If DATA_NONE, the
 471  * operation performs the specified action immediately on the specified
 472  * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
 473  * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
 474  *
 475  * DATA_BOOL allows sparse support for the same on arrays of interrupts.
 476  * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
 477  * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
 478  * data = {1,0,1}
 479  *
 480  * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
 481  * A value of -1 can be used to either de-assign interrupts if already
 482  * assigned or skip un-assigned interrupts.  For example, to set an eventfd
 483  * to be trigger for interrupts [0,0] and [0,2]:
 484  * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
 485  * data = {fd1, -1, fd2}
 486  * If index [0,1] is previously set, two count = 1 ioctls calls would be
 487  * required to set [0,0] and [0,2] without changing [0,1].
 488  *
 489  * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
 490  * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
 491  * from userspace (ie. simulate hardware triggering).
 492  *
 493  * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
 494  * enables the interrupt index for the device.  Individual subindex interrupts
 495  * can be disabled using the -1 value for DATA_EVENTFD or the index can be
 496  * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
 497  *
 498  * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
 499  * ACTION_TRIGGER specifies kernel->user signaling.
 500  */
 501 struct vfio_irq_set {
 502         __u32   argsz;
 503         __u32   flags;
 504 #define VFIO_IRQ_SET_DATA_NONE          (1 << 0) /* Data not present */
 505 #define VFIO_IRQ_SET_DATA_BOOL          (1 << 1) /* Data is bool (u8) */
 506 #define VFIO_IRQ_SET_DATA_EVENTFD       (1 << 2) /* Data is eventfd (s32) */
 507 #define VFIO_IRQ_SET_ACTION_MASK        (1 << 3) /* Mask interrupt */
 508 #define VFIO_IRQ_SET_ACTION_UNMASK      (1 << 4) /* Unmask interrupt */
 509 #define VFIO_IRQ_SET_ACTION_TRIGGER     (1 << 5) /* Trigger interrupt */
 510         __u32   index;
 511         __u32   start;
 512         __u32   count;
 513         __u8    data[];
 514 };
 515 #define VFIO_DEVICE_SET_IRQS            _IO(VFIO_TYPE, VFIO_BASE + 10)
 516 
 517 #define VFIO_IRQ_SET_DATA_TYPE_MASK     (VFIO_IRQ_SET_DATA_NONE | \
 518                                          VFIO_IRQ_SET_DATA_BOOL | \
 519                                          VFIO_IRQ_SET_DATA_EVENTFD)
 520 #define VFIO_IRQ_SET_ACTION_TYPE_MASK   (VFIO_IRQ_SET_ACTION_MASK | \
 521                                          VFIO_IRQ_SET_ACTION_UNMASK | \
 522                                          VFIO_IRQ_SET_ACTION_TRIGGER)
 523 /**
 524  * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
 525  *
 526  * Reset a device.
 527  */
 528 #define VFIO_DEVICE_RESET               _IO(VFIO_TYPE, VFIO_BASE + 11)
 529 
 530 /*
 531  * The VFIO-PCI bus driver makes use of the following fixed region and
 532  * IRQ index mapping.  Unimplemented regions return a size of zero.
 533  * Unimplemented IRQ types return a count of zero.
 534  */
 535 
 536 enum {
 537         VFIO_PCI_BAR0_REGION_INDEX,
 538         VFIO_PCI_BAR1_REGION_INDEX,
 539         VFIO_PCI_BAR2_REGION_INDEX,
 540         VFIO_PCI_BAR3_REGION_INDEX,
 541         VFIO_PCI_BAR4_REGION_INDEX,
 542         VFIO_PCI_BAR5_REGION_INDEX,
 543         VFIO_PCI_ROM_REGION_INDEX,
 544         VFIO_PCI_CONFIG_REGION_INDEX,
 545         /*
 546          * Expose VGA regions defined for PCI base class 03, subclass 00.
 547          * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
 548          * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
 549          * range is found at it's identity mapped offset from the region
 550          * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
 551          * between described ranges are unimplemented.
 552          */
 553         VFIO_PCI_VGA_REGION_INDEX,
 554         VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
 555                                  /* device specific cap to define content. */
 556 };
 557 
 558 enum {
 559         VFIO_PCI_INTX_IRQ_INDEX,
 560         VFIO_PCI_MSI_IRQ_INDEX,
 561         VFIO_PCI_MSIX_IRQ_INDEX,
 562         VFIO_PCI_ERR_IRQ_INDEX,
 563         VFIO_PCI_REQ_IRQ_INDEX,
 564         VFIO_PCI_NUM_IRQS
 565 };
 566 
 567 /*
 568  * The vfio-ccw bus driver makes use of the following fixed region and
 569  * IRQ index mapping. Unimplemented regions return a size of zero.
 570  * Unimplemented IRQ types return a count of zero.
 571  */
 572 
 573 enum {
 574         VFIO_CCW_CONFIG_REGION_INDEX,
 575         VFIO_CCW_NUM_REGIONS
 576 };
 577 
 578 enum {
 579         VFIO_CCW_IO_IRQ_INDEX,
 580         VFIO_CCW_NUM_IRQS
 581 };
 582 
 583 /**
 584  * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
 585  *                                            struct vfio_pci_hot_reset_info)
 586  *
 587  * Return: 0 on success, -errno on failure:
 588  *      -enospc = insufficient buffer, -enodev = unsupported for device.
 589  */
 590 struct vfio_pci_dependent_device {
 591         __u32   group_id;
 592         __u16   segment;
 593         __u8    bus;
 594         __u8    devfn; /* Use PCI_SLOT/PCI_FUNC */
 595 };
 596 
 597 struct vfio_pci_hot_reset_info {
 598         __u32   argsz;
 599         __u32   flags;
 600         __u32   count;
 601         struct vfio_pci_dependent_device        devices[];
 602 };
 603 
 604 #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO      _IO(VFIO_TYPE, VFIO_BASE + 12)
 605 
 606 /**
 607  * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
 608  *                                  struct vfio_pci_hot_reset)
 609  *
 610  * Return: 0 on success, -errno on failure.
 611  */
 612 struct vfio_pci_hot_reset {
 613         __u32   argsz;
 614         __u32   flags;
 615         __u32   count;
 616         __s32   group_fds[];
 617 };
 618 
 619 #define VFIO_DEVICE_PCI_HOT_RESET       _IO(VFIO_TYPE, VFIO_BASE + 13)
 620 
 621 /**
 622  * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
 623  *                                    struct vfio_device_query_gfx_plane)
 624  *
 625  * Set the drm_plane_type and flags, then retrieve the gfx plane info.
 626  *
 627  * flags supported:
 628  * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
 629  *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
 630  *   support for dma-buf.
 631  * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
 632  *   to ask if the mdev supports region. 0 on support, -EINVAL on no
 633  *   support for region.
 634  * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
 635  *   with each call to query the plane info.
 636  * - Others are invalid and return -EINVAL.
 637  *
 638  * Note:
 639  * 1. Plane could be disabled by guest. In that case, success will be
 640  *    returned with zero-initialized drm_format, size, width and height
 641  *    fields.
 642  * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
 643  *
 644  * Return: 0 on success, -errno on other failure.
 645  */
 646 struct vfio_device_gfx_plane_info {
 647         __u32 argsz;
 648         __u32 flags;
 649 #define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
 650 #define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
 651 #define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
 652         /* in */
 653         __u32 drm_plane_type;   /* type of plane: DRM_PLANE_TYPE_* */
 654         /* out */
 655         __u32 drm_format;       /* drm format of plane */
 656         __u64 drm_format_mod;   /* tiled mode */
 657         __u32 width;    /* width of plane */
 658         __u32 height;   /* height of plane */
 659         __u32 stride;   /* stride of plane */
 660         __u32 size;     /* size of plane in bytes, align on page*/
 661         __u32 x_pos;    /* horizontal position of cursor plane */
 662         __u32 y_pos;    /* vertical position of cursor plane*/
 663         __u32 x_hot;    /* horizontal position of cursor hotspot */
 664         __u32 y_hot;    /* vertical position of cursor hotspot */
 665         union {
 666                 __u32 region_index;     /* region index */
 667                 __u32 dmabuf_id;        /* dma-buf id */
 668         };
 669 };
 670 
 671 #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
 672 
 673 /**
 674  * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
 675  *
 676  * Return a new dma-buf file descriptor for an exposed guest framebuffer
 677  * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
 678  * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
 679  */
 680 
 681 #define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
 682 
 683 /**
 684  * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
 685  *                              struct vfio_device_ioeventfd)
 686  *
 687  * Perform a write to the device at the specified device fd offset, with
 688  * the specified data and width when the provided eventfd is triggered.
 689  * vfio bus drivers may not support this for all regions, for all widths,
 690  * or at all.  vfio-pci currently only enables support for BAR regions,
 691  * excluding the MSI-X vector table.
 692  *
 693  * Return: 0 on success, -errno on failure.
 694  */
 695 struct vfio_device_ioeventfd {
 696         __u32   argsz;
 697         __u32   flags;
 698 #define VFIO_DEVICE_IOEVENTFD_8         (1 << 0) /* 1-byte write */
 699 #define VFIO_DEVICE_IOEVENTFD_16        (1 << 1) /* 2-byte write */
 700 #define VFIO_DEVICE_IOEVENTFD_32        (1 << 2) /* 4-byte write */
 701 #define VFIO_DEVICE_IOEVENTFD_64        (1 << 3) /* 8-byte write */
 702 #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
 703         __u64   offset;                 /* device fd offset of write */
 704         __u64   data;                   /* data to be written */
 705         __s32   fd;                     /* -1 for de-assignment */
 706 };
 707 
 708 #define VFIO_DEVICE_IOEVENTFD           _IO(VFIO_TYPE, VFIO_BASE + 16)
 709 
 710 /* -------- API for Type1 VFIO IOMMU -------- */
 711 
 712 /**
 713  * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
 714  *
 715  * Retrieve information about the IOMMU object. Fills in provided
 716  * struct vfio_iommu_info. Caller sets argsz.
 717  *
 718  * XXX Should we do these by CHECK_EXTENSION too?
 719  */
 720 struct vfio_iommu_type1_info {
 721         __u32   argsz;
 722         __u32   flags;
 723 #define VFIO_IOMMU_INFO_PGSIZES (1 << 0)        /* supported page sizes info */
 724 #define VFIO_IOMMU_INFO_CAPS    (1 << 1)        /* Info supports caps */
 725         __u64   iova_pgsizes;   /* Bitmap of supported page sizes */
 726         __u32   cap_offset;     /* Offset within info struct of first cap */
 727 };
 728 
 729 /*
 730  * The IOVA capability allows to report the valid IOVA range(s)
 731  * excluding any non-relaxable reserved regions exposed by
 732  * devices attached to the container. Any DMA map attempt
 733  * outside the valid iova range will return error.
 734  *
 735  * The structures below define version 1 of this capability.
 736  */
 737 #define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
 738 
 739 struct vfio_iova_range {
 740         __u64   start;
 741         __u64   end;
 742 };
 743 
 744 struct vfio_iommu_type1_info_cap_iova_range {
 745         struct  vfio_info_cap_header header;
 746         __u32   nr_iovas;
 747         __u32   reserved;
 748         struct  vfio_iova_range iova_ranges[];
 749 };
 750 
 751 #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
 752 
 753 /**
 754  * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
 755  *
 756  * Map process virtual addresses to IO virtual addresses using the
 757  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
 758  */
 759 struct vfio_iommu_type1_dma_map {
 760         __u32   argsz;
 761         __u32   flags;
 762 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)         /* readable from device */
 763 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)        /* writable from device */
 764         __u64   vaddr;                          /* Process virtual address */
 765         __u64   iova;                           /* IO virtual address */
 766         __u64   size;                           /* Size of mapping (bytes) */
 767 };
 768 
 769 #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
 770 
 771 /**
 772  * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
 773  *                                                      struct vfio_dma_unmap)
 774  *
 775  * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
 776  * Caller sets argsz.  The actual unmapped size is returned in the size
 777  * field.  No guarantee is made to the user that arbitrary unmaps of iova
 778  * or size different from those used in the original mapping call will
 779  * succeed.
 780  */
 781 struct vfio_iommu_type1_dma_unmap {
 782         __u32   argsz;
 783         __u32   flags;
 784         __u64   iova;                           /* IO virtual address */
 785         __u64   size;                           /* Size of mapping (bytes) */
 786 };
 787 
 788 #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
 789 
 790 /*
 791  * IOCTLs to enable/disable IOMMU container usage.
 792  * No parameters are supported.
 793  */
 794 #define VFIO_IOMMU_ENABLE       _IO(VFIO_TYPE, VFIO_BASE + 15)
 795 #define VFIO_IOMMU_DISABLE      _IO(VFIO_TYPE, VFIO_BASE + 16)
 796 
 797 /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
 798 
 799 /*
 800  * The SPAPR TCE DDW info struct provides the information about
 801  * the details of Dynamic DMA window capability.
 802  *
 803  * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
 804  * @max_dynamic_windows_supported tells the maximum number of windows
 805  * which the platform can create.
 806  * @levels tells the maximum number of levels in multi-level IOMMU tables;
 807  * this allows splitting a table into smaller chunks which reduces
 808  * the amount of physically contiguous memory required for the table.
 809  */
 810 struct vfio_iommu_spapr_tce_ddw_info {
 811         __u64 pgsizes;                  /* Bitmap of supported page sizes */
 812         __u32 max_dynamic_windows_supported;
 813         __u32 levels;
 814 };
 815 
 816 /*
 817  * The SPAPR TCE info struct provides the information about the PCI bus
 818  * address ranges available for DMA, these values are programmed into
 819  * the hardware so the guest has to know that information.
 820  *
 821  * The DMA 32 bit window start is an absolute PCI bus address.
 822  * The IOVA address passed via map/unmap ioctls are absolute PCI bus
 823  * addresses too so the window works as a filter rather than an offset
 824  * for IOVA addresses.
 825  *
 826  * Flags supported:
 827  * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
 828  *   (DDW) support is present. @ddw is only supported when DDW is present.
 829  */
 830 struct vfio_iommu_spapr_tce_info {
 831         __u32 argsz;
 832         __u32 flags;
 833 #define VFIO_IOMMU_SPAPR_INFO_DDW       (1 << 0)        /* DDW supported */
 834         __u32 dma32_window_start;       /* 32 bit window start (bytes) */
 835         __u32 dma32_window_size;        /* 32 bit window size (bytes) */
 836         struct vfio_iommu_spapr_tce_ddw_info ddw;
 837 };
 838 
 839 #define VFIO_IOMMU_SPAPR_TCE_GET_INFO   _IO(VFIO_TYPE, VFIO_BASE + 12)
 840 
 841 /*
 842  * EEH PE operation struct provides ways to:
 843  * - enable/disable EEH functionality;
 844  * - unfreeze IO/DMA for frozen PE;
 845  * - read PE state;
 846  * - reset PE;
 847  * - configure PE;
 848  * - inject EEH error.
 849  */
 850 struct vfio_eeh_pe_err {
 851         __u32 type;
 852         __u32 func;
 853         __u64 addr;
 854         __u64 mask;
 855 };
 856 
 857 struct vfio_eeh_pe_op {
 858         __u32 argsz;
 859         __u32 flags;
 860         __u32 op;
 861         union {
 862                 struct vfio_eeh_pe_err err;
 863         };
 864 };
 865 
 866 #define VFIO_EEH_PE_DISABLE             0       /* Disable EEH functionality */
 867 #define VFIO_EEH_PE_ENABLE              1       /* Enable EEH functionality  */
 868 #define VFIO_EEH_PE_UNFREEZE_IO         2       /* Enable IO for frozen PE   */
 869 #define VFIO_EEH_PE_UNFREEZE_DMA        3       /* Enable DMA for frozen PE  */
 870 #define VFIO_EEH_PE_GET_STATE           4       /* PE state retrieval        */
 871 #define  VFIO_EEH_PE_STATE_NORMAL       0       /* PE in functional state    */
 872 #define  VFIO_EEH_PE_STATE_RESET        1       /* PE reset in progress      */
 873 #define  VFIO_EEH_PE_STATE_STOPPED      2       /* Stopped DMA and IO        */
 874 #define  VFIO_EEH_PE_STATE_STOPPED_DMA  4       /* Stopped DMA only          */
 875 #define  VFIO_EEH_PE_STATE_UNAVAIL      5       /* State unavailable         */
 876 #define VFIO_EEH_PE_RESET_DEACTIVATE    5       /* Deassert PE reset         */
 877 #define VFIO_EEH_PE_RESET_HOT           6       /* Assert hot reset          */
 878 #define VFIO_EEH_PE_RESET_FUNDAMENTAL   7       /* Assert fundamental reset  */
 879 #define VFIO_EEH_PE_CONFIGURE           8       /* PE configuration          */
 880 #define VFIO_EEH_PE_INJECT_ERR          9       /* Inject EEH error          */
 881 
 882 #define VFIO_EEH_PE_OP                  _IO(VFIO_TYPE, VFIO_BASE + 21)
 883 
 884 /**
 885  * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
 886  *
 887  * Registers user space memory where DMA is allowed. It pins
 888  * user pages and does the locked memory accounting so
 889  * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
 890  * get faster.
 891  */
 892 struct vfio_iommu_spapr_register_memory {
 893         __u32   argsz;
 894         __u32   flags;
 895         __u64   vaddr;                          /* Process virtual address */
 896         __u64   size;                           /* Size of mapping (bytes) */
 897 };
 898 #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY        _IO(VFIO_TYPE, VFIO_BASE + 17)
 899 
 900 /**
 901  * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
 902  *
 903  * Unregisters user space memory registered with
 904  * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
 905  * Uses vfio_iommu_spapr_register_memory for parameters.
 906  */
 907 #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY      _IO(VFIO_TYPE, VFIO_BASE + 18)
 908 
 909 /**
 910  * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
 911  *
 912  * Creates an additional TCE table and programs it (sets a new DMA window)
 913  * to every IOMMU group in the container. It receives page shift, window
 914  * size and number of levels in the TCE table being created.
 915  *
 916  * It allocates and returns an offset on a PCI bus of the new DMA window.
 917  */
 918 struct vfio_iommu_spapr_tce_create {
 919         __u32 argsz;
 920         __u32 flags;
 921         /* in */
 922         __u32 page_shift;
 923         __u32 __resv1;
 924         __u64 window_size;
 925         __u32 levels;
 926         __u32 __resv2;
 927         /* out */
 928         __u64 start_addr;
 929 };
 930 #define VFIO_IOMMU_SPAPR_TCE_CREATE     _IO(VFIO_TYPE, VFIO_BASE + 19)
 931 
 932 /**
 933  * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
 934  *
 935  * Unprograms a TCE table from all groups in the container and destroys it.
 936  * It receives a PCI bus offset as a window id.
 937  */
 938 struct vfio_iommu_spapr_tce_remove {
 939         __u32 argsz;
 940         __u32 flags;
 941         /* in */
 942         __u64 start_addr;
 943 };
 944 #define VFIO_IOMMU_SPAPR_TCE_REMOVE     _IO(VFIO_TYPE, VFIO_BASE + 20)
 945 
 946 /* ***************************************************************** */
 947 
 948 #endif /* _UAPIVFIO_H */

/* [<][>][^][v][top][bottom][index][help] */