root/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vmw_validation_mem_alloc
  2. vmw_validation_mem_free
  3. vmw_validation_find_bo_dup
  4. vmw_validation_find_res_dup
  5. vmw_validation_add_bo
  6. vmw_validation_add_resource
  7. vmw_validation_res_set_dirty
  8. vmw_validation_res_switch_backup
  9. vmw_validation_res_reserve
  10. vmw_validation_res_unreserve
  11. vmw_validation_bo_validate_single
  12. vmw_validation_bo_validate
  13. vmw_validation_res_validate
  14. vmw_validation_drop_ht
  15. vmw_validation_unref_lists
  16. vmw_validation_prepare
  17. vmw_validation_revert
  18. vmw_validation_done
  19. vmw_validation_preload_bo
  20. vmw_validation_preload_res

   1 // SPDX-License-Identifier: GPL-2.0 OR MIT
   2 /**************************************************************************
   3  *
   4  * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
   5  * All Rights Reserved.
   6  *
   7  * Permission is hereby granted, free of charge, to any person obtaining a
   8  * copy of this software and associated documentation files (the
   9  * "Software"), to deal in the Software without restriction, including
  10  * without limitation the rights to use, copy, modify, merge, publish,
  11  * distribute, sub license, and/or sell copies of the Software, and to
  12  * permit persons to whom the Software is furnished to do so, subject to
  13  * the following conditions:
  14  *
  15  * The above copyright notice and this permission notice (including the
  16  * next paragraph) shall be included in all copies or substantial portions
  17  * of the Software.
  18  *
  19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26  *
  27  **************************************************************************/
  28 #include <linux/slab.h>
  29 #include "vmwgfx_validation.h"
  30 #include "vmwgfx_drv.h"
  31 
  32 /**
  33  * struct vmw_validation_bo_node - Buffer object validation metadata.
  34  * @base: Metadata used for TTM reservation- and validation.
  35  * @hash: A hash entry used for the duplicate detection hash table.
  36  * @as_mob: Validate as mob.
  37  * @cpu_blit: Validate for cpu blit access.
  38  *
  39  * Bit fields are used since these structures are allocated and freed in
  40  * large numbers and space conservation is desired.
  41  */
  42 struct vmw_validation_bo_node {
  43         struct ttm_validate_buffer base;
  44         struct drm_hash_item hash;
  45         u32 as_mob : 1;
  46         u32 cpu_blit : 1;
  47 };
  48 
  49 /**
  50  * struct vmw_validation_res_node - Resource validation metadata.
  51  * @head: List head for the resource validation list.
  52  * @hash: A hash entry used for the duplicate detection hash table.
  53  * @res: Reference counted resource pointer.
  54  * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
  55  * to a resource.
  56  * @new_backup_offset: Offset into the new backup mob for resources that can
  57  * share MOBs.
  58  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
  59  * the command stream provides a mob bind operation.
  60  * @switching_backup: The validation process is switching backup MOB.
  61  * @first_usage: True iff the resource has been seen only once in the current
  62  * validation batch.
  63  * @reserved: Whether the resource is currently reserved by this process.
  64  * @private: Optionally additional memory for caller-private data.
  65  *
  66  * Bit fields are used since these structures are allocated and freed in
  67  * large numbers and space conservation is desired.
  68  */
  69 struct vmw_validation_res_node {
  70         struct list_head head;
  71         struct drm_hash_item hash;
  72         struct vmw_resource *res;
  73         struct vmw_buffer_object *new_backup;
  74         unsigned long new_backup_offset;
  75         u32 no_buffer_needed : 1;
  76         u32 switching_backup : 1;
  77         u32 first_usage : 1;
  78         u32 reserved : 1;
  79         u32 dirty : 1;
  80         u32 dirty_set : 1;
  81         unsigned long private[0];
  82 };
  83 
  84 /**
  85  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
  86  * context based allocator
  87  * @ctx: The validation context
  88  * @size: The number of bytes to allocated.
  89  *
  90  * The memory allocated may not exceed PAGE_SIZE, and the returned
  91  * address is aligned to sizeof(long). All memory allocated this way is
  92  * reclaimed after validation when calling any of the exported functions:
  93  * vmw_validation_unref_lists()
  94  * vmw_validation_revert()
  95  * vmw_validation_done()
  96  *
  97  * Return: Pointer to the allocated memory on success. NULL on failure.
  98  */
  99 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
 100                                unsigned int size)
 101 {
 102         void *addr;
 103 
 104         size = vmw_validation_align(size);
 105         if (size > PAGE_SIZE)
 106                 return NULL;
 107 
 108         if (ctx->mem_size_left < size) {
 109                 struct page *page;
 110 
 111                 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
 112                         int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
 113 
 114                         if (ret)
 115                                 return NULL;
 116 
 117                         ctx->vm_size_left += ctx->vm->gran;
 118                         ctx->total_mem += ctx->vm->gran;
 119                 }
 120 
 121                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 122                 if (!page)
 123                         return NULL;
 124 
 125                 if (ctx->vm)
 126                         ctx->vm_size_left -= PAGE_SIZE;
 127 
 128                 list_add_tail(&page->lru, &ctx->page_list);
 129                 ctx->page_address = page_address(page);
 130                 ctx->mem_size_left = PAGE_SIZE;
 131         }
 132 
 133         addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
 134         ctx->mem_size_left -= size;
 135 
 136         return addr;
 137 }
 138 
 139 /**
 140  * vmw_validation_mem_free - Free all memory allocated using
 141  * vmw_validation_mem_alloc()
 142  * @ctx: The validation context
 143  *
 144  * All memory previously allocated for this context using
 145  * vmw_validation_mem_alloc() is freed.
 146  */
 147 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
 148 {
 149         struct page *entry, *next;
 150 
 151         list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
 152                 list_del_init(&entry->lru);
 153                 __free_page(entry);
 154         }
 155 
 156         ctx->mem_size_left = 0;
 157         if (ctx->vm && ctx->total_mem) {
 158                 ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
 159                 ctx->total_mem = 0;
 160                 ctx->vm_size_left = 0;
 161         }
 162 }
 163 
 164 /**
 165  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
 166  * validation context's lists.
 167  * @ctx: The validation context to search.
 168  * @vbo: The buffer object to search for.
 169  *
 170  * Return: Pointer to the struct vmw_validation_bo_node referencing the
 171  * duplicate, or NULL if none found.
 172  */
 173 static struct vmw_validation_bo_node *
 174 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
 175                            struct vmw_buffer_object *vbo)
 176 {
 177         struct  vmw_validation_bo_node *bo_node = NULL;
 178 
 179         if (!ctx->merge_dups)
 180                 return NULL;
 181 
 182         if (ctx->ht) {
 183                 struct drm_hash_item *hash;
 184 
 185                 if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
 186                         bo_node = container_of(hash, typeof(*bo_node), hash);
 187         } else {
 188                 struct  vmw_validation_bo_node *entry;
 189 
 190                 list_for_each_entry(entry, &ctx->bo_list, base.head) {
 191                         if (entry->base.bo == &vbo->base) {
 192                                 bo_node = entry;
 193                                 break;
 194                         }
 195                 }
 196         }
 197 
 198         return bo_node;
 199 }
 200 
 201 /**
 202  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
 203  * validation context's lists.
 204  * @ctx: The validation context to search.
 205  * @vbo: The buffer object to search for.
 206  *
 207  * Return: Pointer to the struct vmw_validation_bo_node referencing the
 208  * duplicate, or NULL if none found.
 209  */
 210 static struct vmw_validation_res_node *
 211 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
 212                             struct vmw_resource *res)
 213 {
 214         struct  vmw_validation_res_node *res_node = NULL;
 215 
 216         if (!ctx->merge_dups)
 217                 return NULL;
 218 
 219         if (ctx->ht) {
 220                 struct drm_hash_item *hash;
 221 
 222                 if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
 223                         res_node = container_of(hash, typeof(*res_node), hash);
 224         } else {
 225                 struct  vmw_validation_res_node *entry;
 226 
 227                 list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
 228                         if (entry->res == res) {
 229                                 res_node = entry;
 230                                 goto out;
 231                         }
 232                 }
 233 
 234                 list_for_each_entry(entry, &ctx->resource_list, head) {
 235                         if (entry->res == res) {
 236                                 res_node = entry;
 237                                 break;
 238                         }
 239                 }
 240 
 241         }
 242 out:
 243         return res_node;
 244 }
 245 
 246 /**
 247  * vmw_validation_add_bo - Add a buffer object to the validation context.
 248  * @ctx: The validation context.
 249  * @vbo: The buffer object.
 250  * @as_mob: Validate as mob, otherwise suitable for GMR operations.
 251  * @cpu_blit: Validate in a page-mappable location.
 252  *
 253  * Return: Zero on success, negative error code otherwise.
 254  */
 255 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
 256                           struct vmw_buffer_object *vbo,
 257                           bool as_mob,
 258                           bool cpu_blit)
 259 {
 260         struct vmw_validation_bo_node *bo_node;
 261 
 262         bo_node = vmw_validation_find_bo_dup(ctx, vbo);
 263         if (bo_node) {
 264                 if (bo_node->as_mob != as_mob ||
 265                     bo_node->cpu_blit != cpu_blit) {
 266                         DRM_ERROR("Inconsistent buffer usage.\n");
 267                         return -EINVAL;
 268                 }
 269         } else {
 270                 struct ttm_validate_buffer *val_buf;
 271                 int ret;
 272 
 273                 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
 274                 if (!bo_node)
 275                         return -ENOMEM;
 276 
 277                 if (ctx->ht) {
 278                         bo_node->hash.key = (unsigned long) vbo;
 279                         ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
 280                         if (ret) {
 281                                 DRM_ERROR("Failed to initialize a buffer "
 282                                           "validation entry.\n");
 283                                 return ret;
 284                         }
 285                 }
 286                 val_buf = &bo_node->base;
 287                 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
 288                 if (!val_buf->bo)
 289                         return -ESRCH;
 290                 val_buf->num_shared = 0;
 291                 list_add_tail(&val_buf->head, &ctx->bo_list);
 292                 bo_node->as_mob = as_mob;
 293                 bo_node->cpu_blit = cpu_blit;
 294         }
 295 
 296         return 0;
 297 }
 298 
 299 /**
 300  * vmw_validation_add_resource - Add a resource to the validation context.
 301  * @ctx: The validation context.
 302  * @res: The resource.
 303  * @priv_size: Size of private, additional metadata.
 304  * @dirty: Whether to change dirty status.
 305  * @p_node: Output pointer of additional metadata address.
 306  * @first_usage: Whether this was the first time this resource was seen.
 307  *
 308  * Return: Zero on success, negative error code otherwise.
 309  */
 310 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
 311                                 struct vmw_resource *res,
 312                                 size_t priv_size,
 313                                 u32 dirty,
 314                                 void **p_node,
 315                                 bool *first_usage)
 316 {
 317         struct vmw_validation_res_node *node;
 318         int ret;
 319 
 320         node = vmw_validation_find_res_dup(ctx, res);
 321         if (node) {
 322                 node->first_usage = 0;
 323                 goto out_fill;
 324         }
 325 
 326         node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
 327         if (!node) {
 328                 VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
 329                 return -ENOMEM;
 330         }
 331 
 332         if (ctx->ht) {
 333                 node->hash.key = (unsigned long) res;
 334                 ret = drm_ht_insert_item(ctx->ht, &node->hash);
 335                 if (ret) {
 336                         DRM_ERROR("Failed to initialize a resource validation "
 337                                   "entry.\n");
 338                         return ret;
 339                 }
 340         }
 341         node->res = vmw_resource_reference_unless_doomed(res);
 342         if (!node->res)
 343                 return -ESRCH;
 344 
 345         node->first_usage = 1;
 346         if (!res->dev_priv->has_mob) {
 347                 list_add_tail(&node->head, &ctx->resource_list);
 348         } else {
 349                 switch (vmw_res_type(res)) {
 350                 case vmw_res_context:
 351                 case vmw_res_dx_context:
 352                         list_add(&node->head, &ctx->resource_ctx_list);
 353                         break;
 354                 case vmw_res_cotable:
 355                         list_add_tail(&node->head, &ctx->resource_ctx_list);
 356                         break;
 357                 default:
 358                         list_add_tail(&node->head, &ctx->resource_list);
 359                         break;
 360                 }
 361         }
 362 
 363 out_fill:
 364         if (dirty) {
 365                 node->dirty_set = 1;
 366                 /* Overwriting previous information here is intentional! */
 367                 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
 368         }
 369         if (first_usage)
 370                 *first_usage = node->first_usage;
 371         if (p_node)
 372                 *p_node = &node->private;
 373 
 374         return 0;
 375 }
 376 
 377 /**
 378  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
 379  * validation.
 380  * @ctx: The validation context.
 381  * @val_private: The additional meta-data pointer returned when the
 382  * resource was registered with the validation context. Used to identify
 383  * the resource.
 384  * @dirty: Dirty information VMW_RES_DIRTY_XX
 385  */
 386 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
 387                                   void *val_private, u32 dirty)
 388 {
 389         struct vmw_validation_res_node *val;
 390 
 391         if (!dirty)
 392                 return;
 393 
 394         val = container_of(val_private, typeof(*val), private);
 395         val->dirty_set = 1;
 396         /* Overwriting previous information here is intentional! */
 397         val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
 398 }
 399 
 400 /**
 401  * vmw_validation_res_switch_backup - Register a backup MOB switch during
 402  * validation.
 403  * @ctx: The validation context.
 404  * @val_private: The additional meta-data pointer returned when the
 405  * resource was registered with the validation context. Used to identify
 406  * the resource.
 407  * @vbo: The new backup buffer object MOB. This buffer object needs to have
 408  * already been registered with the validation context.
 409  * @backup_offset: Offset into the new backup MOB.
 410  */
 411 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
 412                                       void *val_private,
 413                                       struct vmw_buffer_object *vbo,
 414                                       unsigned long backup_offset)
 415 {
 416         struct vmw_validation_res_node *val;
 417 
 418         val = container_of(val_private, typeof(*val), private);
 419 
 420         val->switching_backup = 1;
 421         if (val->first_usage)
 422                 val->no_buffer_needed = 1;
 423 
 424         val->new_backup = vbo;
 425         val->new_backup_offset = backup_offset;
 426 }
 427 
 428 /**
 429  * vmw_validation_res_reserve - Reserve all resources registered with this
 430  * validation context.
 431  * @ctx: The validation context.
 432  * @intr: Use interruptible waits when possible.
 433  *
 434  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
 435  * code on failure.
 436  */
 437 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
 438                                bool intr)
 439 {
 440         struct vmw_validation_res_node *val;
 441         int ret = 0;
 442 
 443         list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
 444 
 445         list_for_each_entry(val, &ctx->resource_list, head) {
 446                 struct vmw_resource *res = val->res;
 447 
 448                 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
 449                 if (ret)
 450                         goto out_unreserve;
 451 
 452                 val->reserved = 1;
 453                 if (res->backup) {
 454                         struct vmw_buffer_object *vbo = res->backup;
 455 
 456                         ret = vmw_validation_add_bo
 457                                 (ctx, vbo, vmw_resource_needs_backup(res),
 458                                  false);
 459                         if (ret)
 460                                 goto out_unreserve;
 461                 }
 462         }
 463 
 464         return 0;
 465 
 466 out_unreserve:
 467         vmw_validation_res_unreserve(ctx, true);
 468         return ret;
 469 }
 470 
 471 /**
 472  * vmw_validation_res_unreserve - Unreserve all reserved resources
 473  * registered with this validation context.
 474  * @ctx: The validation context.
 475  * @backoff: Whether this is a backoff- of a commit-type operation. This
 476  * is used to determine whether to switch backup MOBs or not.
 477  */
 478 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
 479                                  bool backoff)
 480 {
 481         struct vmw_validation_res_node *val;
 482 
 483         list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
 484         if (backoff)
 485                 list_for_each_entry(val, &ctx->resource_list, head) {
 486                         if (val->reserved)
 487                                 vmw_resource_unreserve(val->res,
 488                                                        false, false, false,
 489                                                        NULL, 0);
 490                 }
 491         else
 492                 list_for_each_entry(val, &ctx->resource_list, head) {
 493                         if (val->reserved)
 494                                 vmw_resource_unreserve(val->res,
 495                                                        val->dirty_set,
 496                                                        val->dirty,
 497                                                        val->switching_backup,
 498                                                        val->new_backup,
 499                                                        val->new_backup_offset);
 500                 }
 501 }
 502 
 503 /**
 504  * vmw_validation_bo_validate_single - Validate a single buffer object.
 505  * @bo: The TTM buffer object base.
 506  * @interruptible: Whether to perform waits interruptible if possible.
 507  * @validate_as_mob: Whether to validate in MOB memory.
 508  *
 509  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
 510  * code on failure.
 511  */
 512 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
 513                                       bool interruptible,
 514                                       bool validate_as_mob)
 515 {
 516         struct vmw_buffer_object *vbo =
 517                 container_of(bo, struct vmw_buffer_object, base);
 518         struct ttm_operation_ctx ctx = {
 519                 .interruptible = interruptible,
 520                 .no_wait_gpu = false
 521         };
 522         int ret;
 523 
 524         if (vbo->pin_count > 0)
 525                 return 0;
 526 
 527         if (validate_as_mob)
 528                 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 529 
 530         /**
 531          * Put BO in VRAM if there is space, otherwise as a GMR.
 532          * If there is no space in VRAM and GMR ids are all used up,
 533          * start evicting GMRs to make room. If the DMA buffer can't be
 534          * used as a GMR, this will return -ENOMEM.
 535          */
 536 
 537         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 538         if (ret == 0 || ret == -ERESTARTSYS)
 539                 return ret;
 540 
 541         /**
 542          * If that failed, try VRAM again, this time evicting
 543          * previous contents.
 544          */
 545 
 546         ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 547         return ret;
 548 }
 549 
 550 /**
 551  * vmw_validation_bo_validate - Validate all buffer objects registered with
 552  * the validation context.
 553  * @ctx: The validation context.
 554  * @intr: Whether to perform waits interruptible if possible.
 555  *
 556  * Return: Zero on success, -ERESTARTSYS if interrupted,
 557  * negative error code on failure.
 558  */
 559 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
 560 {
 561         struct vmw_validation_bo_node *entry;
 562         int ret;
 563 
 564         list_for_each_entry(entry, &ctx->bo_list, base.head) {
 565                 if (entry->cpu_blit) {
 566                         struct ttm_operation_ctx ctx = {
 567                                 .interruptible = intr,
 568                                 .no_wait_gpu = false
 569                         };
 570 
 571                         ret = ttm_bo_validate(entry->base.bo,
 572                                               &vmw_nonfixed_placement, &ctx);
 573                 } else {
 574                         ret = vmw_validation_bo_validate_single
 575                         (entry->base.bo, intr, entry->as_mob);
 576                 }
 577                 if (ret)
 578                         return ret;
 579         }
 580         return 0;
 581 }
 582 
 583 /**
 584  * vmw_validation_res_validate - Validate all resources registered with the
 585  * validation context.
 586  * @ctx: The validation context.
 587  * @intr: Whether to perform waits interruptible if possible.
 588  *
 589  * Before this function is called, all resource backup buffers must have
 590  * been validated.
 591  *
 592  * Return: Zero on success, -ERESTARTSYS if interrupted,
 593  * negative error code on failure.
 594  */
 595 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
 596 {
 597         struct vmw_validation_res_node *val;
 598         int ret;
 599 
 600         list_for_each_entry(val, &ctx->resource_list, head) {
 601                 struct vmw_resource *res = val->res;
 602                 struct vmw_buffer_object *backup = res->backup;
 603 
 604                 ret = vmw_resource_validate(res, intr);
 605                 if (ret) {
 606                         if (ret != -ERESTARTSYS)
 607                                 DRM_ERROR("Failed to validate resource.\n");
 608                         return ret;
 609                 }
 610 
 611                 /* Check if the resource switched backup buffer */
 612                 if (backup && res->backup && (backup != res->backup)) {
 613                         struct vmw_buffer_object *vbo = res->backup;
 614 
 615                         ret = vmw_validation_add_bo
 616                                 (ctx, vbo, vmw_resource_needs_backup(res),
 617                                  false);
 618                         if (ret)
 619                                 return ret;
 620                 }
 621         }
 622         return 0;
 623 }
 624 
 625 /**
 626  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
 627  * and unregister it from this validation context.
 628  * @ctx: The validation context.
 629  *
 630  * The hash table used for duplicate finding is an expensive resource and
 631  * may be protected by mutexes that may cause deadlocks during resource
 632  * unreferencing if held. After resource- and buffer object registering,
 633  * there is no longer any use for this hash table, so allow freeing it
 634  * either to shorten any mutex locking time, or before resources- and
 635  * buffer objects are freed during validation context cleanup.
 636  */
 637 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
 638 {
 639         struct vmw_validation_bo_node *entry;
 640         struct vmw_validation_res_node *val;
 641 
 642         if (!ctx->ht)
 643                 return;
 644 
 645         list_for_each_entry(entry, &ctx->bo_list, base.head)
 646                 (void) drm_ht_remove_item(ctx->ht, &entry->hash);
 647 
 648         list_for_each_entry(val, &ctx->resource_list, head)
 649                 (void) drm_ht_remove_item(ctx->ht, &val->hash);
 650 
 651         list_for_each_entry(val, &ctx->resource_ctx_list, head)
 652                 (void) drm_ht_remove_item(ctx->ht, &val->hash);
 653 
 654         ctx->ht = NULL;
 655 }
 656 
 657 /**
 658  * vmw_validation_unref_lists - Unregister previously registered buffer
 659  * object and resources.
 660  * @ctx: The validation context.
 661  *
 662  * Note that this function may cause buffer object- and resource destructors
 663  * to be invoked.
 664  */
 665 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
 666 {
 667         struct vmw_validation_bo_node *entry;
 668         struct vmw_validation_res_node *val;
 669 
 670         list_for_each_entry(entry, &ctx->bo_list, base.head) {
 671                 ttm_bo_put(entry->base.bo);
 672                 entry->base.bo = NULL;
 673         }
 674 
 675         list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
 676         list_for_each_entry(val, &ctx->resource_list, head)
 677                 vmw_resource_unreference(&val->res);
 678 
 679         /*
 680          * No need to detach each list entry since they are all freed with
 681          * vmw_validation_free_mem. Just make the inaccessible.
 682          */
 683         INIT_LIST_HEAD(&ctx->bo_list);
 684         INIT_LIST_HEAD(&ctx->resource_list);
 685 
 686         vmw_validation_mem_free(ctx);
 687 }
 688 
 689 /**
 690  * vmw_validation_prepare - Prepare a validation context for command
 691  * submission.
 692  * @ctx: The validation context.
 693  * @mutex: The mutex used to protect resource reservation.
 694  * @intr: Whether to perform waits interruptible if possible.
 695  *
 696  * Note that the single reservation mutex @mutex is an unfortunate
 697  * construct. Ideally resource reservation should be moved to per-resource
 698  * ww_mutexes.
 699  * If this functions doesn't return Zero to indicate success, all resources
 700  * are left unreserved but still referenced.
 701  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
 702  * on error.
 703  */
 704 int vmw_validation_prepare(struct vmw_validation_context *ctx,
 705                            struct mutex *mutex,
 706                            bool intr)
 707 {
 708         int ret = 0;
 709 
 710         if (mutex) {
 711                 if (intr)
 712                         ret = mutex_lock_interruptible(mutex);
 713                 else
 714                         mutex_lock(mutex);
 715                 if (ret)
 716                         return -ERESTARTSYS;
 717         }
 718 
 719         ctx->res_mutex = mutex;
 720         ret = vmw_validation_res_reserve(ctx, intr);
 721         if (ret)
 722                 goto out_no_res_reserve;
 723 
 724         ret = vmw_validation_bo_reserve(ctx, intr);
 725         if (ret)
 726                 goto out_no_bo_reserve;
 727 
 728         ret = vmw_validation_bo_validate(ctx, intr);
 729         if (ret)
 730                 goto out_no_validate;
 731 
 732         ret = vmw_validation_res_validate(ctx, intr);
 733         if (ret)
 734                 goto out_no_validate;
 735 
 736         return 0;
 737 
 738 out_no_validate:
 739         vmw_validation_bo_backoff(ctx);
 740 out_no_bo_reserve:
 741         vmw_validation_res_unreserve(ctx, true);
 742 out_no_res_reserve:
 743         if (mutex)
 744                 mutex_unlock(mutex);
 745 
 746         return ret;
 747 }
 748 
 749 /**
 750  * vmw_validation_revert - Revert validation actions if command submission
 751  * failed.
 752  *
 753  * @ctx: The validation context.
 754  *
 755  * The caller still needs to unref resources after a call to this function.
 756  */
 757 void vmw_validation_revert(struct vmw_validation_context *ctx)
 758 {
 759         vmw_validation_bo_backoff(ctx);
 760         vmw_validation_res_unreserve(ctx, true);
 761         if (ctx->res_mutex)
 762                 mutex_unlock(ctx->res_mutex);
 763         vmw_validation_unref_lists(ctx);
 764 }
 765 
 766 /**
 767  * vmw_validation_cone - Commit validation actions after command submission
 768  * success.
 769  * @ctx: The validation context.
 770  * @fence: Fence with which to fence all buffer objects taking part in the
 771  * command submission.
 772  *
 773  * The caller does NOT need to unref resources after a call to this function.
 774  */
 775 void vmw_validation_done(struct vmw_validation_context *ctx,
 776                          struct vmw_fence_obj *fence)
 777 {
 778         vmw_validation_bo_fence(ctx, fence);
 779         vmw_validation_res_unreserve(ctx, false);
 780         if (ctx->res_mutex)
 781                 mutex_unlock(ctx->res_mutex);
 782         vmw_validation_unref_lists(ctx);
 783 }
 784 
 785 /**
 786  * vmw_validation_preload_bo - Preload the validation memory allocator for a
 787  * call to vmw_validation_add_bo().
 788  * @ctx: Pointer to the validation context.
 789  *
 790  * Iff this function returns successfully, the next call to
 791  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
 792  * but voids the guarantee.
 793  *
 794  * Returns: Zero if successful, %-EINVAL otherwise.
 795  */
 796 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
 797 {
 798         unsigned int size = sizeof(struct vmw_validation_bo_node);
 799 
 800         if (!vmw_validation_mem_alloc(ctx, size))
 801                 return -ENOMEM;
 802 
 803         ctx->mem_size_left += size;
 804         return 0;
 805 }
 806 
 807 /**
 808  * vmw_validation_preload_res - Preload the validation memory allocator for a
 809  * call to vmw_validation_add_res().
 810  * @ctx: Pointer to the validation context.
 811  * @size: Size of the validation node extra data. See below.
 812  *
 813  * Iff this function returns successfully, the next call to
 814  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
 815  * sleep. An error is not fatal but voids the guarantee.
 816  *
 817  * Returns: Zero if successful, %-EINVAL otherwise.
 818  */
 819 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
 820                                unsigned int size)
 821 {
 822         size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
 823                                     size) +
 824                 vmw_validation_align(sizeof(struct vmw_validation_bo_node));
 825         if (!vmw_validation_mem_alloc(ctx, size))
 826                 return -ENOMEM;
 827 
 828         ctx->mem_size_left += size;
 829         return 0;
 830 }

/* [<][>][^][v][top][bottom][index][help] */