root/drivers/gpu/drm/vmwgfx/vmwgfx_context.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vmw_context_cotables_unref
  2. vmw_hw_context_destroy
  3. vmw_gb_context_init
  4. vmw_context_init
  5. vmw_gb_context_create
  6. vmw_gb_context_bind
  7. vmw_gb_context_unbind
  8. vmw_gb_context_destroy
  9. vmw_dx_context_create
  10. vmw_dx_context_bind
  11. vmw_dx_context_scrub_cotables
  12. vmw_dx_context_unbind
  13. vmw_dx_context_destroy
  14. vmw_user_context_base_to_res
  15. vmw_user_context_free
  16. vmw_user_context_base_release
  17. vmw_context_destroy_ioctl
  18. vmw_context_define
  19. vmw_context_define_ioctl
  20. vmw_extended_context_define_ioctl
  21. vmw_context_binding_list
  22. vmw_context_res_man
  23. vmw_context_cotable
  24. vmw_context_binding_state
  25. vmw_context_bind_dx_query
  26. vmw_context_get_dx_query_mob

   1 // SPDX-License-Identifier: GPL-2.0 OR MIT
   2 /**************************************************************************
   3  *
   4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5  *
   6  * Permission is hereby granted, free of charge, to any person obtaining a
   7  * copy of this software and associated documentation files (the
   8  * "Software"), to deal in the Software without restriction, including
   9  * without limitation the rights to use, copy, modify, merge, publish,
  10  * distribute, sub license, and/or sell copies of the Software, and to
  11  * permit persons to whom the Software is furnished to do so, subject to
  12  * the following conditions:
  13  *
  14  * The above copyright notice and this permission notice (including the
  15  * next paragraph) shall be included in all copies or substantial portions
  16  * of the Software.
  17  *
  18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25  *
  26  **************************************************************************/
  27 
  28 #include <drm/ttm/ttm_placement.h>
  29 
  30 #include "vmwgfx_drv.h"
  31 #include "vmwgfx_resource_priv.h"
  32 #include "vmwgfx_binding.h"
  33 
  34 struct vmw_user_context {
  35         struct ttm_base_object base;
  36         struct vmw_resource res;
  37         struct vmw_ctx_binding_state *cbs;
  38         struct vmw_cmdbuf_res_manager *man;
  39         struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
  40         spinlock_t cotable_lock;
  41         struct vmw_buffer_object *dx_query_mob;
  42 };
  43 
  44 static void vmw_user_context_free(struct vmw_resource *res);
  45 static struct vmw_resource *
  46 vmw_user_context_base_to_res(struct ttm_base_object *base);
  47 
  48 static int vmw_gb_context_create(struct vmw_resource *res);
  49 static int vmw_gb_context_bind(struct vmw_resource *res,
  50                                struct ttm_validate_buffer *val_buf);
  51 static int vmw_gb_context_unbind(struct vmw_resource *res,
  52                                  bool readback,
  53                                  struct ttm_validate_buffer *val_buf);
  54 static int vmw_gb_context_destroy(struct vmw_resource *res);
  55 static int vmw_dx_context_create(struct vmw_resource *res);
  56 static int vmw_dx_context_bind(struct vmw_resource *res,
  57                                struct ttm_validate_buffer *val_buf);
  58 static int vmw_dx_context_unbind(struct vmw_resource *res,
  59                                  bool readback,
  60                                  struct ttm_validate_buffer *val_buf);
  61 static int vmw_dx_context_destroy(struct vmw_resource *res);
  62 
  63 static uint64_t vmw_user_context_size;
  64 
  65 static const struct vmw_user_resource_conv user_context_conv = {
  66         .object_type = VMW_RES_CONTEXT,
  67         .base_obj_to_res = vmw_user_context_base_to_res,
  68         .res_free = vmw_user_context_free
  69 };
  70 
  71 const struct vmw_user_resource_conv *user_context_converter =
  72         &user_context_conv;
  73 
  74 
  75 static const struct vmw_res_func vmw_legacy_context_func = {
  76         .res_type = vmw_res_context,
  77         .needs_backup = false,
  78         .may_evict = false,
  79         .type_name = "legacy contexts",
  80         .backup_placement = NULL,
  81         .create = NULL,
  82         .destroy = NULL,
  83         .bind = NULL,
  84         .unbind = NULL
  85 };
  86 
  87 static const struct vmw_res_func vmw_gb_context_func = {
  88         .res_type = vmw_res_context,
  89         .needs_backup = true,
  90         .may_evict = true,
  91         .prio = 3,
  92         .dirty_prio = 3,
  93         .type_name = "guest backed contexts",
  94         .backup_placement = &vmw_mob_placement,
  95         .create = vmw_gb_context_create,
  96         .destroy = vmw_gb_context_destroy,
  97         .bind = vmw_gb_context_bind,
  98         .unbind = vmw_gb_context_unbind
  99 };
 100 
 101 static const struct vmw_res_func vmw_dx_context_func = {
 102         .res_type = vmw_res_dx_context,
 103         .needs_backup = true,
 104         .may_evict = true,
 105         .prio = 3,
 106         .dirty_prio = 3,
 107         .type_name = "dx contexts",
 108         .backup_placement = &vmw_mob_placement,
 109         .create = vmw_dx_context_create,
 110         .destroy = vmw_dx_context_destroy,
 111         .bind = vmw_dx_context_bind,
 112         .unbind = vmw_dx_context_unbind
 113 };
 114 
 115 /**
 116  * Context management:
 117  */
 118 
 119 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
 120 {
 121         struct vmw_resource *res;
 122         int i;
 123 
 124         for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 125                 spin_lock(&uctx->cotable_lock);
 126                 res = uctx->cotables[i];
 127                 uctx->cotables[i] = NULL;
 128                 spin_unlock(&uctx->cotable_lock);
 129 
 130                 if (res)
 131                         vmw_resource_unreference(&res);
 132         }
 133 }
 134 
 135 static void vmw_hw_context_destroy(struct vmw_resource *res)
 136 {
 137         struct vmw_user_context *uctx =
 138                 container_of(res, struct vmw_user_context, res);
 139         struct vmw_private *dev_priv = res->dev_priv;
 140         struct {
 141                 SVGA3dCmdHeader header;
 142                 SVGA3dCmdDestroyContext body;
 143         } *cmd;
 144 
 145 
 146         if (res->func->destroy == vmw_gb_context_destroy ||
 147             res->func->destroy == vmw_dx_context_destroy) {
 148                 mutex_lock(&dev_priv->cmdbuf_mutex);
 149                 vmw_cmdbuf_res_man_destroy(uctx->man);
 150                 mutex_lock(&dev_priv->binding_mutex);
 151                 vmw_binding_state_kill(uctx->cbs);
 152                 (void) res->func->destroy(res);
 153                 mutex_unlock(&dev_priv->binding_mutex);
 154                 if (dev_priv->pinned_bo != NULL &&
 155                     !dev_priv->query_cid_valid)
 156                         __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 157                 mutex_unlock(&dev_priv->cmdbuf_mutex);
 158                 vmw_context_cotables_unref(uctx);
 159                 return;
 160         }
 161 
 162         vmw_execbuf_release_pinned_bo(dev_priv);
 163         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 164         if (unlikely(cmd == NULL))
 165                 return;
 166 
 167         cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
 168         cmd->header.size = sizeof(cmd->body);
 169         cmd->body.cid = res->id;
 170 
 171         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 172         vmw_fifo_resource_dec(dev_priv);
 173 }
 174 
 175 static int vmw_gb_context_init(struct vmw_private *dev_priv,
 176                                bool dx,
 177                                struct vmw_resource *res,
 178                                void (*res_free)(struct vmw_resource *res))
 179 {
 180         int ret, i;
 181         struct vmw_user_context *uctx =
 182                 container_of(res, struct vmw_user_context, res);
 183 
 184         res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
 185                             SVGA3D_CONTEXT_DATA_SIZE);
 186         ret = vmw_resource_init(dev_priv, res, true,
 187                                 res_free,
 188                                 dx ? &vmw_dx_context_func :
 189                                 &vmw_gb_context_func);
 190         if (unlikely(ret != 0))
 191                 goto out_err;
 192 
 193         if (dev_priv->has_mob) {
 194                 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
 195                 if (IS_ERR(uctx->man)) {
 196                         ret = PTR_ERR(uctx->man);
 197                         uctx->man = NULL;
 198                         goto out_err;
 199                 }
 200         }
 201 
 202         uctx->cbs = vmw_binding_state_alloc(dev_priv);
 203         if (IS_ERR(uctx->cbs)) {
 204                 ret = PTR_ERR(uctx->cbs);
 205                 goto out_err;
 206         }
 207 
 208         spin_lock_init(&uctx->cotable_lock);
 209 
 210         if (dx) {
 211                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 212                         uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
 213                                                               &uctx->res, i);
 214                         if (IS_ERR(uctx->cotables[i])) {
 215                                 ret = PTR_ERR(uctx->cotables[i]);
 216                                 goto out_cotables;
 217                         }
 218                 }
 219         }
 220 
 221         res->hw_destroy = vmw_hw_context_destroy;
 222         return 0;
 223 
 224 out_cotables:
 225         vmw_context_cotables_unref(uctx);
 226 out_err:
 227         if (res_free)
 228                 res_free(res);
 229         else
 230                 kfree(res);
 231         return ret;
 232 }
 233 
 234 static int vmw_context_init(struct vmw_private *dev_priv,
 235                             struct vmw_resource *res,
 236                             void (*res_free)(struct vmw_resource *res),
 237                             bool dx)
 238 {
 239         int ret;
 240 
 241         struct {
 242                 SVGA3dCmdHeader header;
 243                 SVGA3dCmdDefineContext body;
 244         } *cmd;
 245 
 246         if (dev_priv->has_mob)
 247                 return vmw_gb_context_init(dev_priv, dx, res, res_free);
 248 
 249         ret = vmw_resource_init(dev_priv, res, false,
 250                                 res_free, &vmw_legacy_context_func);
 251 
 252         if (unlikely(ret != 0)) {
 253                 DRM_ERROR("Failed to allocate a resource id.\n");
 254                 goto out_early;
 255         }
 256 
 257         if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
 258                 DRM_ERROR("Out of hw context ids.\n");
 259                 vmw_resource_unreference(&res);
 260                 return -ENOMEM;
 261         }
 262 
 263         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 264         if (unlikely(cmd == NULL)) {
 265                 vmw_resource_unreference(&res);
 266                 return -ENOMEM;
 267         }
 268 
 269         cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
 270         cmd->header.size = sizeof(cmd->body);
 271         cmd->body.cid = res->id;
 272 
 273         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 274         vmw_fifo_resource_inc(dev_priv);
 275         res->hw_destroy = vmw_hw_context_destroy;
 276         return 0;
 277 
 278 out_early:
 279         if (res_free == NULL)
 280                 kfree(res);
 281         else
 282                 res_free(res);
 283         return ret;
 284 }
 285 
 286 
 287 /*
 288  * GB context.
 289  */
 290 
 291 static int vmw_gb_context_create(struct vmw_resource *res)
 292 {
 293         struct vmw_private *dev_priv = res->dev_priv;
 294         int ret;
 295         struct {
 296                 SVGA3dCmdHeader header;
 297                 SVGA3dCmdDefineGBContext body;
 298         } *cmd;
 299 
 300         if (likely(res->id != -1))
 301                 return 0;
 302 
 303         ret = vmw_resource_alloc_id(res);
 304         if (unlikely(ret != 0)) {
 305                 DRM_ERROR("Failed to allocate a context id.\n");
 306                 goto out_no_id;
 307         }
 308 
 309         if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
 310                 ret = -EBUSY;
 311                 goto out_no_fifo;
 312         }
 313 
 314         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 315         if (unlikely(cmd == NULL)) {
 316                 ret = -ENOMEM;
 317                 goto out_no_fifo;
 318         }
 319 
 320         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
 321         cmd->header.size = sizeof(cmd->body);
 322         cmd->body.cid = res->id;
 323         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 324         vmw_fifo_resource_inc(dev_priv);
 325 
 326         return 0;
 327 
 328 out_no_fifo:
 329         vmw_resource_release_id(res);
 330 out_no_id:
 331         return ret;
 332 }
 333 
 334 static int vmw_gb_context_bind(struct vmw_resource *res,
 335                                struct ttm_validate_buffer *val_buf)
 336 {
 337         struct vmw_private *dev_priv = res->dev_priv;
 338         struct {
 339                 SVGA3dCmdHeader header;
 340                 SVGA3dCmdBindGBContext body;
 341         } *cmd;
 342         struct ttm_buffer_object *bo = val_buf->bo;
 343 
 344         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 345 
 346         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 347         if (unlikely(cmd == NULL))
 348                 return -ENOMEM;
 349 
 350         cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 351         cmd->header.size = sizeof(cmd->body);
 352         cmd->body.cid = res->id;
 353         cmd->body.mobid = bo->mem.start;
 354         cmd->body.validContents = res->backup_dirty;
 355         res->backup_dirty = false;
 356         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 357 
 358         return 0;
 359 }
 360 
 361 static int vmw_gb_context_unbind(struct vmw_resource *res,
 362                                  bool readback,
 363                                  struct ttm_validate_buffer *val_buf)
 364 {
 365         struct vmw_private *dev_priv = res->dev_priv;
 366         struct ttm_buffer_object *bo = val_buf->bo;
 367         struct vmw_fence_obj *fence;
 368         struct vmw_user_context *uctx =
 369                 container_of(res, struct vmw_user_context, res);
 370 
 371         struct {
 372                 SVGA3dCmdHeader header;
 373                 SVGA3dCmdReadbackGBContext body;
 374         } *cmd1;
 375         struct {
 376                 SVGA3dCmdHeader header;
 377                 SVGA3dCmdBindGBContext body;
 378         } *cmd2;
 379         uint32_t submit_size;
 380         uint8_t *cmd;
 381 
 382 
 383         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 384 
 385         mutex_lock(&dev_priv->binding_mutex);
 386         vmw_binding_state_scrub(uctx->cbs);
 387 
 388         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 389 
 390         cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
 391         if (unlikely(cmd == NULL)) {
 392                 mutex_unlock(&dev_priv->binding_mutex);
 393                 return -ENOMEM;
 394         }
 395 
 396         cmd2 = (void *) cmd;
 397         if (readback) {
 398                 cmd1 = (void *) cmd;
 399                 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
 400                 cmd1->header.size = sizeof(cmd1->body);
 401                 cmd1->body.cid = res->id;
 402                 cmd2 = (void *) (&cmd1[1]);
 403         }
 404         cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 405         cmd2->header.size = sizeof(cmd2->body);
 406         cmd2->body.cid = res->id;
 407         cmd2->body.mobid = SVGA3D_INVALID_ID;
 408 
 409         vmw_fifo_commit(dev_priv, submit_size);
 410         mutex_unlock(&dev_priv->binding_mutex);
 411 
 412         /*
 413          * Create a fence object and fence the backup buffer.
 414          */
 415 
 416         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
 417                                           &fence, NULL);
 418 
 419         vmw_bo_fence_single(bo, fence);
 420 
 421         if (likely(fence != NULL))
 422                 vmw_fence_obj_unreference(&fence);
 423 
 424         return 0;
 425 }
 426 
 427 static int vmw_gb_context_destroy(struct vmw_resource *res)
 428 {
 429         struct vmw_private *dev_priv = res->dev_priv;
 430         struct {
 431                 SVGA3dCmdHeader header;
 432                 SVGA3dCmdDestroyGBContext body;
 433         } *cmd;
 434 
 435         if (likely(res->id == -1))
 436                 return 0;
 437 
 438         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 439         if (unlikely(cmd == NULL))
 440                 return -ENOMEM;
 441 
 442         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
 443         cmd->header.size = sizeof(cmd->body);
 444         cmd->body.cid = res->id;
 445         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 446         if (dev_priv->query_cid == res->id)
 447                 dev_priv->query_cid_valid = false;
 448         vmw_resource_release_id(res);
 449         vmw_fifo_resource_dec(dev_priv);
 450 
 451         return 0;
 452 }
 453 
 454 /*
 455  * DX context.
 456  */
 457 
 458 static int vmw_dx_context_create(struct vmw_resource *res)
 459 {
 460         struct vmw_private *dev_priv = res->dev_priv;
 461         int ret;
 462         struct {
 463                 SVGA3dCmdHeader header;
 464                 SVGA3dCmdDXDefineContext body;
 465         } *cmd;
 466 
 467         if (likely(res->id != -1))
 468                 return 0;
 469 
 470         ret = vmw_resource_alloc_id(res);
 471         if (unlikely(ret != 0)) {
 472                 DRM_ERROR("Failed to allocate a context id.\n");
 473                 goto out_no_id;
 474         }
 475 
 476         if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
 477                 ret = -EBUSY;
 478                 goto out_no_fifo;
 479         }
 480 
 481         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 482         if (unlikely(cmd == NULL)) {
 483                 ret = -ENOMEM;
 484                 goto out_no_fifo;
 485         }
 486 
 487         cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
 488         cmd->header.size = sizeof(cmd->body);
 489         cmd->body.cid = res->id;
 490         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 491         vmw_fifo_resource_inc(dev_priv);
 492 
 493         return 0;
 494 
 495 out_no_fifo:
 496         vmw_resource_release_id(res);
 497 out_no_id:
 498         return ret;
 499 }
 500 
 501 static int vmw_dx_context_bind(struct vmw_resource *res,
 502                                struct ttm_validate_buffer *val_buf)
 503 {
 504         struct vmw_private *dev_priv = res->dev_priv;
 505         struct {
 506                 SVGA3dCmdHeader header;
 507                 SVGA3dCmdDXBindContext body;
 508         } *cmd;
 509         struct ttm_buffer_object *bo = val_buf->bo;
 510 
 511         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 512 
 513         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 514         if (unlikely(cmd == NULL))
 515                 return -ENOMEM;
 516 
 517         cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
 518         cmd->header.size = sizeof(cmd->body);
 519         cmd->body.cid = res->id;
 520         cmd->body.mobid = bo->mem.start;
 521         cmd->body.validContents = res->backup_dirty;
 522         res->backup_dirty = false;
 523         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 524 
 525 
 526         return 0;
 527 }
 528 
 529 /**
 530  * vmw_dx_context_scrub_cotables - Scrub all bindings and
 531  * cotables from a context
 532  *
 533  * @ctx: Pointer to the context resource
 534  * @readback: Whether to save the otable contents on scrubbing.
 535  *
 536  * COtables must be unbound before their context, but unbinding requires
 537  * the backup buffer being reserved, whereas scrubbing does not.
 538  * This function scrubs all cotables of a context, potentially reading back
 539  * the contents into their backup buffers. However, scrubbing cotables
 540  * also makes the device context invalid, so scrub all bindings first so
 541  * that doesn't have to be done later with an invalid context.
 542  */
 543 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
 544                                    bool readback)
 545 {
 546         struct vmw_user_context *uctx =
 547                 container_of(ctx, struct vmw_user_context, res);
 548         int i;
 549 
 550         vmw_binding_state_scrub(uctx->cbs);
 551         for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 552                 struct vmw_resource *res;
 553 
 554                 /* Avoid racing with ongoing cotable destruction. */
 555                 spin_lock(&uctx->cotable_lock);
 556                 res = uctx->cotables[vmw_cotable_scrub_order[i]];
 557                 if (res)
 558                         res = vmw_resource_reference_unless_doomed(res);
 559                 spin_unlock(&uctx->cotable_lock);
 560                 if (!res)
 561                         continue;
 562 
 563                 WARN_ON(vmw_cotable_scrub(res, readback));
 564                 vmw_resource_unreference(&res);
 565         }
 566 }
 567 
 568 static int vmw_dx_context_unbind(struct vmw_resource *res,
 569                                  bool readback,
 570                                  struct ttm_validate_buffer *val_buf)
 571 {
 572         struct vmw_private *dev_priv = res->dev_priv;
 573         struct ttm_buffer_object *bo = val_buf->bo;
 574         struct vmw_fence_obj *fence;
 575         struct vmw_user_context *uctx =
 576                 container_of(res, struct vmw_user_context, res);
 577 
 578         struct {
 579                 SVGA3dCmdHeader header;
 580                 SVGA3dCmdDXReadbackContext body;
 581         } *cmd1;
 582         struct {
 583                 SVGA3dCmdHeader header;
 584                 SVGA3dCmdDXBindContext body;
 585         } *cmd2;
 586         uint32_t submit_size;
 587         uint8_t *cmd;
 588 
 589 
 590         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 591 
 592         mutex_lock(&dev_priv->binding_mutex);
 593         vmw_dx_context_scrub_cotables(res, readback);
 594 
 595         if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
 596             readback) {
 597                 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
 598                 if (vmw_query_readback_all(uctx->dx_query_mob))
 599                         DRM_ERROR("Failed to read back query states\n");
 600         }
 601 
 602         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 603 
 604         cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
 605         if (unlikely(cmd == NULL)) {
 606                 mutex_unlock(&dev_priv->binding_mutex);
 607                 return -ENOMEM;
 608         }
 609 
 610         cmd2 = (void *) cmd;
 611         if (readback) {
 612                 cmd1 = (void *) cmd;
 613                 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
 614                 cmd1->header.size = sizeof(cmd1->body);
 615                 cmd1->body.cid = res->id;
 616                 cmd2 = (void *) (&cmd1[1]);
 617         }
 618         cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
 619         cmd2->header.size = sizeof(cmd2->body);
 620         cmd2->body.cid = res->id;
 621         cmd2->body.mobid = SVGA3D_INVALID_ID;
 622 
 623         vmw_fifo_commit(dev_priv, submit_size);
 624         mutex_unlock(&dev_priv->binding_mutex);
 625 
 626         /*
 627          * Create a fence object and fence the backup buffer.
 628          */
 629 
 630         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
 631                                           &fence, NULL);
 632 
 633         vmw_bo_fence_single(bo, fence);
 634 
 635         if (likely(fence != NULL))
 636                 vmw_fence_obj_unreference(&fence);
 637 
 638         return 0;
 639 }
 640 
 641 static int vmw_dx_context_destroy(struct vmw_resource *res)
 642 {
 643         struct vmw_private *dev_priv = res->dev_priv;
 644         struct {
 645                 SVGA3dCmdHeader header;
 646                 SVGA3dCmdDXDestroyContext body;
 647         } *cmd;
 648 
 649         if (likely(res->id == -1))
 650                 return 0;
 651 
 652         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 653         if (unlikely(cmd == NULL))
 654                 return -ENOMEM;
 655 
 656         cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
 657         cmd->header.size = sizeof(cmd->body);
 658         cmd->body.cid = res->id;
 659         vmw_fifo_commit(dev_priv, sizeof(*cmd));
 660         if (dev_priv->query_cid == res->id)
 661                 dev_priv->query_cid_valid = false;
 662         vmw_resource_release_id(res);
 663         vmw_fifo_resource_dec(dev_priv);
 664 
 665         return 0;
 666 }
 667 
 668 /**
 669  * User-space context management:
 670  */
 671 
 672 static struct vmw_resource *
 673 vmw_user_context_base_to_res(struct ttm_base_object *base)
 674 {
 675         return &(container_of(base, struct vmw_user_context, base)->res);
 676 }
 677 
 678 static void vmw_user_context_free(struct vmw_resource *res)
 679 {
 680         struct vmw_user_context *ctx =
 681             container_of(res, struct vmw_user_context, res);
 682         struct vmw_private *dev_priv = res->dev_priv;
 683 
 684         if (ctx->cbs)
 685                 vmw_binding_state_free(ctx->cbs);
 686 
 687         (void) vmw_context_bind_dx_query(res, NULL);
 688 
 689         ttm_base_object_kfree(ctx, base);
 690         ttm_mem_global_free(vmw_mem_glob(dev_priv),
 691                             vmw_user_context_size);
 692 }
 693 
 694 /**
 695  * This function is called when user space has no more references on the
 696  * base object. It releases the base-object's reference on the resource object.
 697  */
 698 
 699 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
 700 {
 701         struct ttm_base_object *base = *p_base;
 702         struct vmw_user_context *ctx =
 703             container_of(base, struct vmw_user_context, base);
 704         struct vmw_resource *res = &ctx->res;
 705 
 706         *p_base = NULL;
 707         vmw_resource_unreference(&res);
 708 }
 709 
 710 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 711                               struct drm_file *file_priv)
 712 {
 713         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 714         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 715 
 716         return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 717 }
 718 
 719 static int vmw_context_define(struct drm_device *dev, void *data,
 720                               struct drm_file *file_priv, bool dx)
 721 {
 722         struct vmw_private *dev_priv = vmw_priv(dev);
 723         struct vmw_user_context *ctx;
 724         struct vmw_resource *res;
 725         struct vmw_resource *tmp;
 726         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
 727         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 728         struct ttm_operation_ctx ttm_opt_ctx = {
 729                 .interruptible = true,
 730                 .no_wait_gpu = false
 731         };
 732         int ret;
 733 
 734         if (!dev_priv->has_dx && dx) {
 735                 VMW_DEBUG_USER("DX contexts not supported by device.\n");
 736                 return -EINVAL;
 737         }
 738 
 739         if (unlikely(vmw_user_context_size == 0))
 740                 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
 741                   ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
 742                   + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
 743 
 744         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 745         if (unlikely(ret != 0))
 746                 return ret;
 747 
 748         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 749                                    vmw_user_context_size,
 750                                    &ttm_opt_ctx);
 751         if (unlikely(ret != 0)) {
 752                 if (ret != -ERESTARTSYS)
 753                         DRM_ERROR("Out of graphics memory for context"
 754                                   " creation.\n");
 755                 goto out_unlock;
 756         }
 757 
 758         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 759         if (unlikely(!ctx)) {
 760                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
 761                                     vmw_user_context_size);
 762                 ret = -ENOMEM;
 763                 goto out_unlock;
 764         }
 765 
 766         res = &ctx->res;
 767         ctx->base.shareable = false;
 768         ctx->base.tfile = NULL;
 769 
 770         /*
 771          * From here on, the destructor takes over resource freeing.
 772          */
 773 
 774         ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
 775         if (unlikely(ret != 0))
 776                 goto out_unlock;
 777 
 778         tmp = vmw_resource_reference(&ctx->res);
 779         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
 780                                    &vmw_user_context_base_release, NULL);
 781 
 782         if (unlikely(ret != 0)) {
 783                 vmw_resource_unreference(&tmp);
 784                 goto out_err;
 785         }
 786 
 787         arg->cid = ctx->base.handle;
 788 out_err:
 789         vmw_resource_unreference(&res);
 790 out_unlock:
 791         ttm_read_unlock(&dev_priv->reservation_sem);
 792         return ret;
 793 }
 794 
 795 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 796                              struct drm_file *file_priv)
 797 {
 798         return vmw_context_define(dev, data, file_priv, false);
 799 }
 800 
 801 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
 802                                       struct drm_file *file_priv)
 803 {
 804         union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
 805         struct drm_vmw_context_arg *rep = &arg->rep;
 806 
 807         switch (arg->req) {
 808         case drm_vmw_context_legacy:
 809                 return vmw_context_define(dev, rep, file_priv, false);
 810         case drm_vmw_context_dx:
 811                 return vmw_context_define(dev, rep, file_priv, true);
 812         default:
 813                 break;
 814         }
 815         return -EINVAL;
 816 }
 817 
 818 /**
 819  * vmw_context_binding_list - Return a list of context bindings
 820  *
 821  * @ctx: The context resource
 822  *
 823  * Returns the current list of bindings of the given context. Note that
 824  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
 825  */
 826 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 827 {
 828         struct vmw_user_context *uctx =
 829                 container_of(ctx, struct vmw_user_context, res);
 830 
 831         return vmw_binding_state_list(uctx->cbs);
 832 }
 833 
 834 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 835 {
 836         return container_of(ctx, struct vmw_user_context, res)->man;
 837 }
 838 
 839 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
 840                                          SVGACOTableType cotable_type)
 841 {
 842         if (cotable_type >= SVGA_COTABLE_DX10_MAX)
 843                 return ERR_PTR(-EINVAL);
 844 
 845         return container_of(ctx, struct vmw_user_context, res)->
 846                 cotables[cotable_type];
 847 }
 848 
 849 /**
 850  * vmw_context_binding_state -
 851  * Return a pointer to a context binding state structure
 852  *
 853  * @ctx: The context resource
 854  *
 855  * Returns the current state of bindings of the given context. Note that
 856  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
 857  */
 858 struct vmw_ctx_binding_state *
 859 vmw_context_binding_state(struct vmw_resource *ctx)
 860 {
 861         return container_of(ctx, struct vmw_user_context, res)->cbs;
 862 }
 863 
 864 /**
 865  * vmw_context_bind_dx_query -
 866  * Sets query MOB for the context.  If @mob is NULL, then this function will
 867  * remove the association between the MOB and the context.  This function
 868  * assumes the binding_mutex is held.
 869  *
 870  * @ctx_res: The context resource
 871  * @mob: a reference to the query MOB
 872  *
 873  * Returns -EINVAL if a MOB has already been set and does not match the one
 874  * specified in the parameter.  0 otherwise.
 875  */
 876 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
 877                               struct vmw_buffer_object *mob)
 878 {
 879         struct vmw_user_context *uctx =
 880                 container_of(ctx_res, struct vmw_user_context, res);
 881 
 882         if (mob == NULL) {
 883                 if (uctx->dx_query_mob) {
 884                         uctx->dx_query_mob->dx_query_ctx = NULL;
 885                         vmw_bo_unreference(&uctx->dx_query_mob);
 886                         uctx->dx_query_mob = NULL;
 887                 }
 888 
 889                 return 0;
 890         }
 891 
 892         /* Can only have one MOB per context for queries */
 893         if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
 894                 return -EINVAL;
 895 
 896         mob->dx_query_ctx  = ctx_res;
 897 
 898         if (!uctx->dx_query_mob)
 899                 uctx->dx_query_mob = vmw_bo_reference(mob);
 900 
 901         return 0;
 902 }
 903 
 904 /**
 905  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
 906  *
 907  * @ctx_res: The context resource
 908  */
 909 struct vmw_buffer_object *
 910 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
 911 {
 912         struct vmw_user_context *uctx =
 913                 container_of(ctx_res, struct vmw_user_context, res);
 914 
 915         return uctx->dx_query_mob;
 916 }

/* [<][>][^][v][top][bottom][index][help] */