root/drivers/gpu/drm/i915/gvt/gvt.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. to_gvt
  2. intel_gvt_request_service
  3. intel_vgpu_write_pci_bar
  4. intel_vgpu_get_bar_gpa
  5. mmio_hw_access_pre
  6. mmio_hw_access_post
  7. intel_gvt_mmio_set_accessed
  8. intel_gvt_mmio_is_cmd_access
  9. intel_gvt_mmio_is_unalign
  10. intel_gvt_mmio_set_cmd_accessed
  11. intel_gvt_mmio_has_mode_mask
  12. intel_gvt_mmio_is_in_ctx
  13. intel_gvt_mmio_set_in_ctx

   1 /*
   2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21  * SOFTWARE.
  22  *
  23  * Authors:
  24  *    Kevin Tian <kevin.tian@intel.com>
  25  *    Eddie Dong <eddie.dong@intel.com>
  26  *
  27  * Contributors:
  28  *    Niu Bing <bing.niu@intel.com>
  29  *    Zhi Wang <zhi.a.wang@intel.com>
  30  *
  31  */
  32 
  33 #ifndef _GVT_H_
  34 #define _GVT_H_
  35 
  36 #include "debug.h"
  37 #include "hypercall.h"
  38 #include "mmio.h"
  39 #include "reg.h"
  40 #include "interrupt.h"
  41 #include "gtt.h"
  42 #include "display.h"
  43 #include "edid.h"
  44 #include "execlist.h"
  45 #include "scheduler.h"
  46 #include "sched_policy.h"
  47 #include "mmio_context.h"
  48 #include "cmd_parser.h"
  49 #include "fb_decoder.h"
  50 #include "dmabuf.h"
  51 #include "page_track.h"
  52 
  53 #define GVT_MAX_VGPU 8
  54 
  55 struct intel_gvt_host {
  56         struct device *dev;
  57         bool initialized;
  58         int hypervisor_type;
  59         struct intel_gvt_mpt *mpt;
  60 };
  61 
  62 extern struct intel_gvt_host intel_gvt_host;
  63 
  64 /* Describe per-platform limitations. */
  65 struct intel_gvt_device_info {
  66         u32 max_support_vgpus;
  67         u32 cfg_space_size;
  68         u32 mmio_size;
  69         u32 mmio_bar;
  70         unsigned long msi_cap_offset;
  71         u32 gtt_start_offset;
  72         u32 gtt_entry_size;
  73         u32 gtt_entry_size_shift;
  74         int gmadr_bytes_in_cmd;
  75         u32 max_surface_size;
  76 };
  77 
  78 /* GM resources owned by a vGPU */
  79 struct intel_vgpu_gm {
  80         u64 aperture_sz;
  81         u64 hidden_sz;
  82         struct drm_mm_node low_gm_node;
  83         struct drm_mm_node high_gm_node;
  84 };
  85 
  86 #define INTEL_GVT_MAX_NUM_FENCES 32
  87 
  88 /* Fences owned by a vGPU */
  89 struct intel_vgpu_fence {
  90         struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
  91         u32 base;
  92         u32 size;
  93 };
  94 
  95 struct intel_vgpu_mmio {
  96         void *vreg;
  97 };
  98 
  99 #define INTEL_GVT_MAX_BAR_NUM 4
 100 
 101 struct intel_vgpu_pci_bar {
 102         u64 size;
 103         bool tracked;
 104 };
 105 
 106 struct intel_vgpu_cfg_space {
 107         unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
 108         struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
 109 };
 110 
 111 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
 112 
 113 struct intel_vgpu_irq {
 114         bool irq_warn_once[INTEL_GVT_EVENT_MAX];
 115         DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
 116                        INTEL_GVT_EVENT_MAX);
 117 };
 118 
 119 struct intel_vgpu_opregion {
 120         bool mapped;
 121         void *va;
 122         u32 gfn[INTEL_GVT_OPREGION_PAGES];
 123 };
 124 
 125 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
 126 
 127 struct intel_vgpu_display {
 128         struct intel_vgpu_i2c_edid i2c_edid;
 129         struct intel_vgpu_port ports[I915_MAX_PORTS];
 130         struct intel_vgpu_sbi sbi;
 131 };
 132 
 133 struct vgpu_sched_ctl {
 134         int weight;
 135 };
 136 
 137 enum {
 138         INTEL_VGPU_EXECLIST_SUBMISSION = 1,
 139         INTEL_VGPU_GUC_SUBMISSION,
 140 };
 141 
 142 struct intel_vgpu_submission_ops {
 143         const char *name;
 144         int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
 145         void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
 146         void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
 147 };
 148 
 149 struct intel_vgpu_submission {
 150         struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
 151         struct list_head workload_q_head[I915_NUM_ENGINES];
 152         struct intel_context *shadow[I915_NUM_ENGINES];
 153         struct kmem_cache *workloads;
 154         atomic_t running_workload_num;
 155         union {
 156                 u64 i915_context_pml4;
 157                 u64 i915_context_pdps[GEN8_3LVL_PDPES];
 158         };
 159         DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
 160         DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 161         void *ring_scan_buffer[I915_NUM_ENGINES];
 162         int ring_scan_buffer_size[I915_NUM_ENGINES];
 163         const struct intel_vgpu_submission_ops *ops;
 164         int virtual_submission_interface;
 165         bool active;
 166 };
 167 
 168 struct intel_vgpu {
 169         struct intel_gvt *gvt;
 170         struct mutex vgpu_lock;
 171         int id;
 172         unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
 173         bool active;
 174         bool pv_notified;
 175         bool failsafe;
 176         unsigned int resetting_eng;
 177 
 178         /* Both sched_data and sched_ctl can be seen a part of the global gvt
 179          * scheduler structure. So below 2 vgpu data are protected
 180          * by sched_lock, not vgpu_lock.
 181          */
 182         void *sched_data;
 183         struct vgpu_sched_ctl sched_ctl;
 184 
 185         struct intel_vgpu_fence fence;
 186         struct intel_vgpu_gm gm;
 187         struct intel_vgpu_cfg_space cfg_space;
 188         struct intel_vgpu_mmio mmio;
 189         struct intel_vgpu_irq irq;
 190         struct intel_vgpu_gtt gtt;
 191         struct intel_vgpu_opregion opregion;
 192         struct intel_vgpu_display display;
 193         struct intel_vgpu_submission submission;
 194         struct radix_tree_root page_track_tree;
 195         u32 hws_pga[I915_NUM_ENGINES];
 196 
 197         struct dentry *debugfs;
 198 
 199 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 200         struct {
 201                 struct mdev_device *mdev;
 202                 struct vfio_region *region;
 203                 int num_regions;
 204                 struct eventfd_ctx *intx_trigger;
 205                 struct eventfd_ctx *msi_trigger;
 206 
 207                 /*
 208                  * Two caches are used to avoid mapping duplicated pages (eg.
 209                  * scratch pages). This help to reduce dma setup overhead.
 210                  */
 211                 struct rb_root gfn_cache;
 212                 struct rb_root dma_addr_cache;
 213                 unsigned long nr_cache_entries;
 214                 struct mutex cache_lock;
 215 
 216                 struct notifier_block iommu_notifier;
 217                 struct notifier_block group_notifier;
 218                 struct kvm *kvm;
 219                 struct work_struct release_work;
 220                 atomic_t released;
 221                 struct vfio_device *vfio_device;
 222         } vdev;
 223 #endif
 224 
 225         struct list_head dmabuf_obj_list_head;
 226         struct mutex dmabuf_lock;
 227         struct idr object_idr;
 228 
 229         struct completion vblank_done;
 230 
 231         u32 scan_nonprivbb;
 232 };
 233 
 234 /* validating GM healthy status*/
 235 #define vgpu_is_vm_unhealthy(ret_val) \
 236         (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
 237 
 238 struct intel_gvt_gm {
 239         unsigned long vgpu_allocated_low_gm_size;
 240         unsigned long vgpu_allocated_high_gm_size;
 241 };
 242 
 243 struct intel_gvt_fence {
 244         unsigned long vgpu_allocated_fence_num;
 245 };
 246 
 247 /* Special MMIO blocks. */
 248 struct gvt_mmio_block {
 249         unsigned int device;
 250         i915_reg_t   offset;
 251         unsigned int size;
 252         gvt_mmio_func read;
 253         gvt_mmio_func write;
 254 };
 255 
 256 #define INTEL_GVT_MMIO_HASH_BITS 11
 257 
 258 struct intel_gvt_mmio {
 259         u8 *mmio_attribute;
 260 /* Register contains RO bits */
 261 #define F_RO            (1 << 0)
 262 /* Register contains graphics address */
 263 #define F_GMADR         (1 << 1)
 264 /* Mode mask registers with high 16 bits as the mask bits */
 265 #define F_MODE_MASK     (1 << 2)
 266 /* This reg can be accessed by GPU commands */
 267 #define F_CMD_ACCESS    (1 << 3)
 268 /* This reg has been accessed by a VM */
 269 #define F_ACCESSED      (1 << 4)
 270 /* This reg has been accessed through GPU commands */
 271 #define F_CMD_ACCESSED  (1 << 5)
 272 /* This reg could be accessed by unaligned address */
 273 #define F_UNALIGN       (1 << 6)
 274 /* This reg is saved/restored in context */
 275 #define F_IN_CTX        (1 << 7)
 276 
 277         struct gvt_mmio_block *mmio_block;
 278         unsigned int num_mmio_block;
 279 
 280         DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
 281         unsigned long num_tracked_mmio;
 282 };
 283 
 284 struct intel_gvt_firmware {
 285         void *cfg_space;
 286         void *mmio;
 287         bool firmware_loaded;
 288 };
 289 
 290 #define NR_MAX_INTEL_VGPU_TYPES 20
 291 struct intel_vgpu_type {
 292         char name[16];
 293         unsigned int avail_instance;
 294         unsigned int low_gm_size;
 295         unsigned int high_gm_size;
 296         unsigned int fence;
 297         unsigned int weight;
 298         enum intel_vgpu_edid resolution;
 299 };
 300 
 301 struct intel_gvt {
 302         /* GVT scope lock, protect GVT itself, and all resource currently
 303          * not yet protected by special locks(vgpu and scheduler lock).
 304          */
 305         struct mutex lock;
 306         /* scheduler scope lock, protect gvt and vgpu schedule related data */
 307         struct mutex sched_lock;
 308 
 309         struct drm_i915_private *dev_priv;
 310         struct idr vgpu_idr;    /* vGPU IDR pool */
 311 
 312         struct intel_gvt_device_info device_info;
 313         struct intel_gvt_gm gm;
 314         struct intel_gvt_fence fence;
 315         struct intel_gvt_mmio mmio;
 316         struct intel_gvt_firmware firmware;
 317         struct intel_gvt_irq irq;
 318         struct intel_gvt_gtt gtt;
 319         struct intel_gvt_workload_scheduler scheduler;
 320         struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 321         DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
 322         struct intel_vgpu_type *types;
 323         unsigned int num_types;
 324         struct intel_vgpu *idle_vgpu;
 325 
 326         struct task_struct *service_thread;
 327         wait_queue_head_t service_thread_wq;
 328 
 329         /* service_request is always used in bit operation, we should always
 330          * use it with atomic bit ops so that no need to use gvt big lock.
 331          */
 332         unsigned long service_request;
 333 
 334         struct {
 335                 struct engine_mmio *mmio;
 336                 int ctx_mmio_count[I915_NUM_ENGINES];
 337                 u32 *tlb_mmio_offset_list;
 338                 u32 tlb_mmio_offset_list_cnt;
 339                 u32 *mocs_mmio_offset_list;
 340                 u32 mocs_mmio_offset_list_cnt;
 341         } engine_mmio_list;
 342 
 343         struct dentry *debugfs_root;
 344 };
 345 
 346 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
 347 {
 348         return i915->gvt;
 349 }
 350 
 351 enum {
 352         INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
 353 
 354         /* Scheduling trigger by timer */
 355         INTEL_GVT_REQUEST_SCHED = 1,
 356 
 357         /* Scheduling trigger by event */
 358         INTEL_GVT_REQUEST_EVENT_SCHED = 2,
 359 };
 360 
 361 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
 362                 int service)
 363 {
 364         set_bit(service, (void *)&gvt->service_request);
 365         wake_up(&gvt->service_thread_wq);
 366 }
 367 
 368 void intel_gvt_free_firmware(struct intel_gvt *gvt);
 369 int intel_gvt_load_firmware(struct intel_gvt *gvt);
 370 
 371 /* Aperture/GM space definitions for GVT device */
 372 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
 373 #define BYTES_TO_MB(b) ((b) >> 20ULL)
 374 
 375 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
 376 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
 377 #define HOST_FENCE 4
 378 
 379 /* Aperture/GM space definitions for GVT device */
 380 #define gvt_aperture_sz(gvt)      (gvt->dev_priv->ggtt.mappable_end)
 381 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
 382 
 383 #define gvt_ggtt_gm_sz(gvt)       (gvt->dev_priv->ggtt.vm.total)
 384 #define gvt_ggtt_sz(gvt) \
 385         ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
 386 #define gvt_hidden_sz(gvt)        (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
 387 
 388 #define gvt_aperture_gmadr_base(gvt) (0)
 389 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
 390                                      + gvt_aperture_sz(gvt) - 1)
 391 
 392 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
 393                                     + gvt_aperture_sz(gvt))
 394 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
 395                                    + gvt_hidden_sz(gvt) - 1)
 396 
 397 #define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
 398 
 399 /* Aperture/GM space definitions for vGPU */
 400 #define vgpu_aperture_offset(vgpu)      ((vgpu)->gm.low_gm_node.start)
 401 #define vgpu_hidden_offset(vgpu)        ((vgpu)->gm.high_gm_node.start)
 402 #define vgpu_aperture_sz(vgpu)          ((vgpu)->gm.aperture_sz)
 403 #define vgpu_hidden_sz(vgpu)            ((vgpu)->gm.hidden_sz)
 404 
 405 #define vgpu_aperture_pa_base(vgpu) \
 406         (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
 407 
 408 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
 409 
 410 #define vgpu_aperture_pa_end(vgpu) \
 411         (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
 412 
 413 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
 414 #define vgpu_aperture_gmadr_end(vgpu) \
 415         (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
 416 
 417 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
 418 #define vgpu_hidden_gmadr_end(vgpu) \
 419         (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
 420 
 421 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
 422 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
 423 
 424 struct intel_vgpu_creation_params {
 425         __u64 handle;
 426         __u64 low_gm_sz;  /* in MB */
 427         __u64 high_gm_sz; /* in MB */
 428         __u64 fence_sz;
 429         __u64 resolution;
 430         __s32 primary;
 431         __u64 vgpu_id;
 432 
 433         __u32 weight;
 434 };
 435 
 436 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
 437                               struct intel_vgpu_creation_params *param);
 438 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
 439 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
 440 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 441         u32 fence, u64 value);
 442 
 443 /* Macros for easily accessing vGPU virtual/shadow register.
 444    Explicitly seperate use for typed MMIO reg or real offset.*/
 445 #define vgpu_vreg_t(vgpu, reg) \
 446         (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
 447 #define vgpu_vreg(vgpu, offset) \
 448         (*(u32 *)(vgpu->mmio.vreg + (offset)))
 449 #define vgpu_vreg64_t(vgpu, reg) \
 450         (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
 451 #define vgpu_vreg64(vgpu, offset) \
 452         (*(u64 *)(vgpu->mmio.vreg + (offset)))
 453 
 454 #define for_each_active_vgpu(gvt, vgpu, id) \
 455         idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
 456                 for_each_if(vgpu->active)
 457 
 458 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
 459                                             u32 offset, u32 val, bool low)
 460 {
 461         u32 *pval;
 462 
 463         /* BAR offset should be 32 bits algiend */
 464         offset = rounddown(offset, 4);
 465         pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
 466 
 467         if (low) {
 468                 /*
 469                  * only update bit 31 - bit 4,
 470                  * leave the bit 3 - bit 0 unchanged.
 471                  */
 472                 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
 473         } else {
 474                 *pval = val;
 475         }
 476 }
 477 
 478 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
 479 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
 480 
 481 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
 482 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
 483 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 484                                          struct intel_vgpu_type *type);
 485 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
 486 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
 487 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 488                                  intel_engine_mask_t engine_mask);
 489 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
 490 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
 491 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
 492 
 493 /* validating GM functions */
 494 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
 495         ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
 496          (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
 497 
 498 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
 499         ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
 500          (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
 501 
 502 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
 503          ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
 504           (vgpu_gmadr_is_hidden(vgpu, gmadr))))
 505 
 506 #define gvt_gmadr_is_aperture(gvt, gmadr) \
 507          ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
 508           (gmadr <= gvt_aperture_gmadr_end(gvt)))
 509 
 510 #define gvt_gmadr_is_hidden(gvt, gmadr) \
 511           ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
 512            (gmadr <= gvt_hidden_gmadr_end(gvt)))
 513 
 514 #define gvt_gmadr_is_valid(gvt, gmadr) \
 515           (gvt_gmadr_is_aperture(gvt, gmadr) || \
 516             gvt_gmadr_is_hidden(gvt, gmadr))
 517 
 518 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
 519 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
 520 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
 521 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 522                              unsigned long *h_index);
 523 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
 524                              unsigned long *g_index);
 525 
 526 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
 527                 bool primary);
 528 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
 529 
 530 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
 531                 void *p_data, unsigned int bytes);
 532 
 533 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 534                 void *p_data, unsigned int bytes);
 535 
 536 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
 537 
 538 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
 539 {
 540         /* We are 64bit bar. */
 541         return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
 542                         PCI_BASE_ADDRESS_MEM_MASK;
 543 }
 544 
 545 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
 546 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
 547 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
 548 
 549 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
 550 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 551 
 552 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
 553 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
 554 
 555 struct intel_gvt_ops {
 556         int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
 557                                 unsigned int);
 558         int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
 559                                 unsigned int);
 560         int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
 561                                 unsigned int);
 562         int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
 563                                 unsigned int);
 564         struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
 565                                 struct intel_vgpu_type *);
 566         void (*vgpu_destroy)(struct intel_vgpu *vgpu);
 567         void (*vgpu_release)(struct intel_vgpu *vgpu);
 568         void (*vgpu_reset)(struct intel_vgpu *);
 569         void (*vgpu_activate)(struct intel_vgpu *);
 570         void (*vgpu_deactivate)(struct intel_vgpu *);
 571         struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
 572                         const char *name);
 573         bool (*get_gvt_attrs)(struct attribute ***type_attrs,
 574                         struct attribute_group ***intel_vgpu_type_groups);
 575         int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
 576         int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
 577         int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
 578                                      unsigned int);
 579         void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
 580 };
 581 
 582 
 583 enum {
 584         GVT_FAILSAFE_UNSUPPORTED_GUEST,
 585         GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
 586         GVT_FAILSAFE_GUEST_ERR,
 587 };
 588 
 589 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
 590 {
 591         intel_runtime_pm_get(&dev_priv->runtime_pm);
 592 }
 593 
 594 static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
 595 {
 596         intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
 597 }
 598 
 599 /**
 600  * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
 601  * @gvt: a GVT device
 602  * @offset: register offset
 603  *
 604  */
 605 static inline void intel_gvt_mmio_set_accessed(
 606                         struct intel_gvt *gvt, unsigned int offset)
 607 {
 608         gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
 609 }
 610 
 611 /**
 612  * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
 613  * @gvt: a GVT device
 614  * @offset: register offset
 615  *
 616  */
 617 static inline bool intel_gvt_mmio_is_cmd_access(
 618                         struct intel_gvt *gvt, unsigned int offset)
 619 {
 620         return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
 621 }
 622 
 623 /**
 624  * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
 625  * @gvt: a GVT device
 626  * @offset: register offset
 627  *
 628  */
 629 static inline bool intel_gvt_mmio_is_unalign(
 630                         struct intel_gvt *gvt, unsigned int offset)
 631 {
 632         return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
 633 }
 634 
 635 /**
 636  * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
 637  * @gvt: a GVT device
 638  * @offset: register offset
 639  *
 640  */
 641 static inline void intel_gvt_mmio_set_cmd_accessed(
 642                         struct intel_gvt *gvt, unsigned int offset)
 643 {
 644         gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
 645 }
 646 
 647 /**
 648  * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
 649  * @gvt: a GVT device
 650  * @offset: register offset
 651  *
 652  * Returns:
 653  * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
 654  *
 655  */
 656 static inline bool intel_gvt_mmio_has_mode_mask(
 657                         struct intel_gvt *gvt, unsigned int offset)
 658 {
 659         return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
 660 }
 661 
 662 /**
 663  * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
 664  * @gvt: a GVT device
 665  * @offset: register offset
 666  *
 667  * Returns:
 668  * True if a MMIO has a in-context mask, false if it isn't.
 669  *
 670  */
 671 static inline bool intel_gvt_mmio_is_in_ctx(
 672                         struct intel_gvt *gvt, unsigned int offset)
 673 {
 674         return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
 675 }
 676 
 677 /**
 678  * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
 679  * @gvt: a GVT device
 680  * @offset: register offset
 681  *
 682  */
 683 static inline void intel_gvt_mmio_set_in_ctx(
 684                         struct intel_gvt *gvt, unsigned int offset)
 685 {
 686         gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
 687 }
 688 
 689 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
 690 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
 691 void intel_gvt_debugfs_init(struct intel_gvt *gvt);
 692 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
 693 
 694 
 695 #include "trace.h"
 696 #include "mpt.h"
 697 
 698 #endif

/* [<][>][^][v][top][bottom][index][help] */