root/drivers/gpu/drm/i915/gvt/opregion.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. virt_vbt_generation
  2. intel_vgpu_init_opregion
  3. map_vgpu_opregion
  4. intel_vgpu_opregion_base_write_handler
  5. intel_vgpu_clean_opregion
  6. opregion_func_name
  7. opregion_subfunc_name
  8. querying_capabilities
  9. intel_vgpu_emulate_opregion_request

   1 /*
   2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21  * SOFTWARE.
  22  */
  23 
  24 #include <linux/acpi.h>
  25 #include "i915_drv.h"
  26 #include "gvt.h"
  27 
  28 /*
  29  * Note: Only for GVT-g virtual VBT generation, other usage must
  30  * not do like this.
  31  */
  32 #define _INTEL_BIOS_PRIVATE
  33 #include "display/intel_vbt_defs.h"
  34 
  35 #define OPREGION_SIGNATURE "IntelGraphicsMem"
  36 #define MBOX_VBT      (1<<3)
  37 
  38 /* device handle */
  39 #define DEVICE_TYPE_CRT    0x01
  40 #define DEVICE_TYPE_EFP1   0x04
  41 #define DEVICE_TYPE_EFP2   0x40
  42 #define DEVICE_TYPE_EFP3   0x20
  43 #define DEVICE_TYPE_EFP4   0x10
  44 
  45 struct opregion_header {
  46         u8 signature[16];
  47         u32 size;
  48         u32 opregion_ver;
  49         u8 bios_ver[32];
  50         u8 vbios_ver[16];
  51         u8 driver_ver[16];
  52         u32 mboxes;
  53         u32 driver_model;
  54         u32 pcon;
  55         u8 dver[32];
  56         u8 rsvd[124];
  57 } __packed;
  58 
  59 struct bdb_data_header {
  60         u8 id;
  61         u16 size; /* data size */
  62 } __packed;
  63 
  64 /* For supporting windows guest with opregion, here hardcode the emulated
  65  * bdb header version as '186', and the corresponding child_device_config
  66  * length should be '33' but not '38'.
  67  */
  68 struct efp_child_device_config {
  69         u16 handle;
  70         u16 device_type;
  71         u16 device_class;
  72         u8 i2c_speed;
  73         u8 dp_onboard_redriver; /* 158 */
  74         u8 dp_ondock_redriver; /* 158 */
  75         u8 hdmi_level_shifter_value:4; /* 169 */
  76         u8 hdmi_max_data_rate:4; /* 204 */
  77         u16 dtd_buf_ptr; /* 161 */
  78         u8 edidless_efp:1; /* 161 */
  79         u8 compression_enable:1; /* 198 */
  80         u8 compression_method:1; /* 198 */
  81         u8 ganged_edp:1; /* 202 */
  82         u8 skip0:4;
  83         u8 compression_structure_index:4; /* 198 */
  84         u8 skip1:4;
  85         u8 slave_port; /*  202 */
  86         u8 skip2;
  87         u8 dvo_port;
  88         u8 i2c_pin; /* for add-in card */
  89         u8 slave_addr; /* for add-in card */
  90         u8 ddc_pin;
  91         u16 edid_ptr;
  92         u8 dvo_config;
  93         u8 efp_docked_port:1; /* 158 */
  94         u8 lane_reversal:1; /* 184 */
  95         u8 onboard_lspcon:1; /* 192 */
  96         u8 iboost_enable:1; /* 196 */
  97         u8 hpd_invert:1; /* BXT 196 */
  98         u8 slip3:3;
  99         u8 hdmi_compat:1;
 100         u8 dp_compat:1;
 101         u8 tmds_compat:1;
 102         u8 skip4:5;
 103         u8 aux_channel;
 104         u8 dongle_detect;
 105         u8 pipe_cap:2;
 106         u8 sdvo_stall:1; /* 158 */
 107         u8 hpd_status:2;
 108         u8 integrated_encoder:1;
 109         u8 skip5:2;
 110         u8 dvo_wiring;
 111         u8 mipi_bridge_type; /* 171 */
 112         u16 device_class_ext;
 113         u8 dvo_function;
 114 } __packed;
 115 
 116 struct vbt {
 117         /* header->bdb_offset point to bdb_header offset */
 118         struct vbt_header header;
 119         struct bdb_header bdb_header;
 120 
 121         struct bdb_data_header general_features_header;
 122         struct bdb_general_features general_features;
 123 
 124         struct bdb_data_header general_definitions_header;
 125         struct bdb_general_definitions general_definitions;
 126 
 127         struct efp_child_device_config child0;
 128         struct efp_child_device_config child1;
 129         struct efp_child_device_config child2;
 130         struct efp_child_device_config child3;
 131 
 132         struct bdb_data_header driver_features_header;
 133         struct bdb_driver_features driver_features;
 134 };
 135 
 136 static void virt_vbt_generation(struct vbt *v)
 137 {
 138         int num_child;
 139 
 140         memset(v, 0, sizeof(struct vbt));
 141 
 142         v->header.signature[0] = '$';
 143         v->header.signature[1] = 'V';
 144         v->header.signature[2] = 'B';
 145         v->header.signature[3] = 'T';
 146 
 147         /* there's features depending on version! */
 148         v->header.version = 155;
 149         v->header.header_size = sizeof(v->header);
 150         v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
 151         v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 152 
 153         strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
 154         v->bdb_header.version = 186; /* child_dev_size = 33 */
 155         v->bdb_header.header_size = sizeof(v->bdb_header);
 156 
 157         v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
 158                 - sizeof(struct bdb_header);
 159 
 160         /* general features */
 161         v->general_features_header.id = BDB_GENERAL_FEATURES;
 162         v->general_features_header.size = sizeof(struct bdb_general_features);
 163         v->general_features.int_crt_support = 0;
 164         v->general_features.int_tv_support = 0;
 165 
 166         /* child device */
 167         num_child = 4; /* each port has one child */
 168         v->general_definitions.child_dev_size =
 169                 sizeof(struct efp_child_device_config);
 170         v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
 171         /* size will include child devices */
 172         v->general_definitions_header.size =
 173                 sizeof(struct bdb_general_definitions) +
 174                         num_child * v->general_definitions.child_dev_size;
 175 
 176         /* portA */
 177         v->child0.handle = DEVICE_TYPE_EFP1;
 178         v->child0.device_type = DEVICE_TYPE_DP;
 179         v->child0.dvo_port = DVO_PORT_DPA;
 180         v->child0.aux_channel = DP_AUX_A;
 181         v->child0.dp_compat = true;
 182         v->child0.integrated_encoder = true;
 183 
 184         /* portB */
 185         v->child1.handle = DEVICE_TYPE_EFP2;
 186         v->child1.device_type = DEVICE_TYPE_DP;
 187         v->child1.dvo_port = DVO_PORT_DPB;
 188         v->child1.aux_channel = DP_AUX_B;
 189         v->child1.dp_compat = true;
 190         v->child1.integrated_encoder = true;
 191 
 192         /* portC */
 193         v->child2.handle = DEVICE_TYPE_EFP3;
 194         v->child2.device_type = DEVICE_TYPE_DP;
 195         v->child2.dvo_port = DVO_PORT_DPC;
 196         v->child2.aux_channel = DP_AUX_C;
 197         v->child2.dp_compat = true;
 198         v->child2.integrated_encoder = true;
 199 
 200         /* portD */
 201         v->child3.handle = DEVICE_TYPE_EFP4;
 202         v->child3.device_type = DEVICE_TYPE_DP;
 203         v->child3.dvo_port = DVO_PORT_DPD;
 204         v->child3.aux_channel = DP_AUX_D;
 205         v->child3.dp_compat = true;
 206         v->child3.integrated_encoder = true;
 207 
 208         /* driver features */
 209         v->driver_features_header.id = BDB_DRIVER_FEATURES;
 210         v->driver_features_header.size = sizeof(struct bdb_driver_features);
 211         v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
 212 }
 213 
 214 /**
 215  * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
 216  * @vgpu: a vGPU
 217  *
 218  * Returns:
 219  * Zero on success, negative error code if failed.
 220  */
 221 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
 222 {
 223         u8 *buf;
 224         struct opregion_header *header;
 225         struct vbt v;
 226         const char opregion_signature[16] = OPREGION_SIGNATURE;
 227 
 228         gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
 229         vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
 230                         __GFP_ZERO,
 231                         get_order(INTEL_GVT_OPREGION_SIZE));
 232         if (!vgpu_opregion(vgpu)->va) {
 233                 gvt_err("fail to get memory for vgpu virt opregion\n");
 234                 return -ENOMEM;
 235         }
 236 
 237         /* emulated opregion with VBT mailbox only */
 238         buf = (u8 *)vgpu_opregion(vgpu)->va;
 239         header = (struct opregion_header *)buf;
 240         memcpy(header->signature, opregion_signature,
 241                sizeof(opregion_signature));
 242         header->size = 0x8;
 243         header->opregion_ver = 0x02000000;
 244         header->mboxes = MBOX_VBT;
 245 
 246         /* for unknown reason, the value in LID field is incorrect
 247          * which block the windows guest, so workaround it by force
 248          * setting it to "OPEN"
 249          */
 250         buf[INTEL_GVT_OPREGION_CLID] = 0x3;
 251 
 252         /* emulated vbt from virt vbt generation */
 253         virt_vbt_generation(&v);
 254         memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
 255 
 256         return 0;
 257 }
 258 
 259 static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
 260 {
 261         u64 mfn;
 262         int i, ret;
 263 
 264         for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
 265                 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
 266                         + i * PAGE_SIZE);
 267                 if (mfn == INTEL_GVT_INVALID_ADDR) {
 268                         gvt_vgpu_err("fail to get MFN from VA\n");
 269                         return -EINVAL;
 270                 }
 271                 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
 272                                 vgpu_opregion(vgpu)->gfn[i],
 273                                 mfn, 1, map);
 274                 if (ret) {
 275                         gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
 276                                 ret);
 277                         return ret;
 278                 }
 279         }
 280 
 281         vgpu_opregion(vgpu)->mapped = map;
 282 
 283         return 0;
 284 }
 285 
 286 /**
 287  * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
 288  *
 289  * @vgpu: a vGPU
 290  * @gpa: guest physical address of opregion
 291  *
 292  * Returns:
 293  * Zero on success, negative error code if failed.
 294  */
 295 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
 296 {
 297 
 298         int i, ret = 0;
 299 
 300         gvt_dbg_core("emulate opregion from kernel\n");
 301 
 302         switch (intel_gvt_host.hypervisor_type) {
 303         case INTEL_GVT_HYPERVISOR_KVM:
 304                 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
 305                         vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
 306                 break;
 307         case INTEL_GVT_HYPERVISOR_XEN:
 308                 /**
 309                  * Wins guest on Xengt will write this register twice: xen
 310                  * hvmloader and windows graphic driver.
 311                  */
 312                 if (vgpu_opregion(vgpu)->mapped)
 313                         map_vgpu_opregion(vgpu, false);
 314 
 315                 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
 316                         vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
 317 
 318                 ret = map_vgpu_opregion(vgpu, true);
 319                 break;
 320         default:
 321                 ret = -EINVAL;
 322                 gvt_vgpu_err("not supported hypervisor\n");
 323         }
 324 
 325         return ret;
 326 }
 327 
 328 /**
 329  * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
 330  * @vgpu: a vGPU
 331  *
 332  */
 333 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
 334 {
 335         gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
 336 
 337         if (!vgpu_opregion(vgpu)->va)
 338                 return;
 339 
 340         if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
 341                 if (vgpu_opregion(vgpu)->mapped)
 342                         map_vgpu_opregion(vgpu, false);
 343         } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
 344                 /* Guest opregion is released by VFIO */
 345         }
 346         free_pages((unsigned long)vgpu_opregion(vgpu)->va,
 347                    get_order(INTEL_GVT_OPREGION_SIZE));
 348 
 349         vgpu_opregion(vgpu)->va = NULL;
 350 
 351 }
 352 
 353 
 354 #define GVT_OPREGION_FUNC(scic)                                 \
 355         ({                                                      \
 356          u32 __ret;                                             \
 357          __ret = (scic & OPREGION_SCIC_FUNC_MASK) >>            \
 358          OPREGION_SCIC_FUNC_SHIFT;                              \
 359          __ret;                                                 \
 360          })
 361 
 362 #define GVT_OPREGION_SUBFUNC(scic)                              \
 363         ({                                                      \
 364          u32 __ret;                                             \
 365          __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >>         \
 366          OPREGION_SCIC_SUBFUNC_SHIFT;                           \
 367          __ret;                                                 \
 368          })
 369 
 370 static const char *opregion_func_name(u32 func)
 371 {
 372         const char *name = NULL;
 373 
 374         switch (func) {
 375         case 0 ... 3:
 376         case 5:
 377         case 7 ... 15:
 378                 name = "Reserved";
 379                 break;
 380 
 381         case 4:
 382                 name = "Get BIOS Data";
 383                 break;
 384 
 385         case 6:
 386                 name = "System BIOS Callbacks";
 387                 break;
 388 
 389         default:
 390                 name = "Unknown";
 391                 break;
 392         }
 393         return name;
 394 }
 395 
 396 static const char *opregion_subfunc_name(u32 subfunc)
 397 {
 398         const char *name = NULL;
 399 
 400         switch (subfunc) {
 401         case 0:
 402                 name = "Supported Calls";
 403                 break;
 404 
 405         case 1:
 406                 name = "Requested Callbacks";
 407                 break;
 408 
 409         case 2 ... 3:
 410         case 8 ... 9:
 411                 name = "Reserved";
 412                 break;
 413 
 414         case 5:
 415                 name = "Boot Display";
 416                 break;
 417 
 418         case 6:
 419                 name = "TV-Standard/Video-Connector";
 420                 break;
 421 
 422         case 7:
 423                 name = "Internal Graphics";
 424                 break;
 425 
 426         case 10:
 427                 name = "Spread Spectrum Clocks";
 428                 break;
 429 
 430         case 11:
 431                 name = "Get AKSV";
 432                 break;
 433 
 434         default:
 435                 name = "Unknown";
 436                 break;
 437         }
 438         return name;
 439 };
 440 
 441 static bool querying_capabilities(u32 scic)
 442 {
 443         u32 func, subfunc;
 444 
 445         func = GVT_OPREGION_FUNC(scic);
 446         subfunc = GVT_OPREGION_SUBFUNC(scic);
 447 
 448         if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
 449                 subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
 450                 || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
 451                  subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
 452                 || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
 453                  subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
 454                 return true;
 455         }
 456         return false;
 457 }
 458 
 459 /**
 460  * intel_vgpu_emulate_opregion_request - emulating OpRegion request
 461  * @vgpu: a vGPU
 462  * @swsci: SWSCI request
 463  *
 464  * Returns:
 465  * Zero on success, negative error code if failed
 466  */
 467 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
 468 {
 469         u32 scic, parm;
 470         u32 func, subfunc;
 471         u64 scic_pa = 0, parm_pa = 0;
 472         int ret;
 473 
 474         switch (intel_gvt_host.hypervisor_type) {
 475         case INTEL_GVT_HYPERVISOR_XEN:
 476                 scic = *((u32 *)vgpu_opregion(vgpu)->va +
 477                                         INTEL_GVT_OPREGION_SCIC);
 478                 parm = *((u32 *)vgpu_opregion(vgpu)->va +
 479                                         INTEL_GVT_OPREGION_PARM);
 480                 break;
 481         case INTEL_GVT_HYPERVISOR_KVM:
 482                 scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
 483                                         INTEL_GVT_OPREGION_SCIC;
 484                 parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
 485                                         INTEL_GVT_OPREGION_PARM;
 486 
 487                 ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
 488                                                     &scic, sizeof(scic));
 489                 if (ret) {
 490                         gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
 491                                 ret, scic_pa, sizeof(scic));
 492                         return ret;
 493                 }
 494 
 495                 ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
 496                                                     &parm, sizeof(parm));
 497                 if (ret) {
 498                         gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
 499                                 ret, scic_pa, sizeof(scic));
 500                         return ret;
 501                 }
 502 
 503                 break;
 504         default:
 505                 gvt_vgpu_err("not supported hypervisor\n");
 506                 return -EINVAL;
 507         }
 508 
 509         if (!(swsci & SWSCI_SCI_SELECT)) {
 510                 gvt_vgpu_err("requesting SMI service\n");
 511                 return 0;
 512         }
 513         /* ignore non 0->1 trasitions */
 514         if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
 515                                 & SWSCI_SCI_TRIGGER) ||
 516                         !(swsci & SWSCI_SCI_TRIGGER)) {
 517                 return 0;
 518         }
 519 
 520         func = GVT_OPREGION_FUNC(scic);
 521         subfunc = GVT_OPREGION_SUBFUNC(scic);
 522         if (!querying_capabilities(scic)) {
 523                 gvt_vgpu_err("requesting runtime service: func \"%s\","
 524                                 " subfunc \"%s\"\n",
 525                                 opregion_func_name(func),
 526                                 opregion_subfunc_name(subfunc));
 527                 /*
 528                  * emulate exit status of function call, '0' means
 529                  * "failure, generic, unsupported or unknown cause"
 530                  */
 531                 scic &= ~OPREGION_SCIC_EXIT_MASK;
 532                 goto out;
 533         }
 534 
 535         scic = 0;
 536         parm = 0;
 537 
 538 out:
 539         switch (intel_gvt_host.hypervisor_type) {
 540         case INTEL_GVT_HYPERVISOR_XEN:
 541                 *((u32 *)vgpu_opregion(vgpu)->va +
 542                                         INTEL_GVT_OPREGION_SCIC) = scic;
 543                 *((u32 *)vgpu_opregion(vgpu)->va +
 544                                         INTEL_GVT_OPREGION_PARM) = parm;
 545                 break;
 546         case INTEL_GVT_HYPERVISOR_KVM:
 547                 ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
 548                                                     &scic, sizeof(scic));
 549                 if (ret) {
 550                         gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
 551                                 ret, scic_pa, sizeof(scic));
 552                         return ret;
 553                 }
 554 
 555                 ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
 556                                                     &parm, sizeof(parm));
 557                 if (ret) {
 558                         gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
 559                                 ret, scic_pa, sizeof(scic));
 560                         return ret;
 561                 }
 562 
 563                 break;
 564         default:
 565                 gvt_vgpu_err("not supported hypervisor\n");
 566                 return -EINVAL;
 567         }
 568 
 569         return 0;
 570 }

/* [<][>][^][v][top][bottom][index][help] */