Lines Matching refs:res
63 static void vmw_user_surface_free(struct vmw_resource *res);
66 static int vmw_legacy_srf_bind(struct vmw_resource *res,
68 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
71 static int vmw_legacy_srf_create(struct vmw_resource *res);
72 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
73 static int vmw_gb_surface_create(struct vmw_resource *res);
74 static int vmw_gb_surface_bind(struct vmw_resource *res,
76 static int vmw_gb_surface_unbind(struct vmw_resource *res,
79 static int vmw_gb_surface_destroy(struct vmw_resource *res);
222 cmd->body.sid = srf->res.id; in vmw_surface_define_encode()
273 body->host.sid = srf->res.id; in vmw_surface_dma_encode()
309 static void vmw_hw_surface_destroy(struct vmw_resource *res) in vmw_hw_surface_destroy() argument
312 struct vmw_private *dev_priv = res->dev_priv; in vmw_hw_surface_destroy()
316 if (res->func->destroy == vmw_gb_surface_destroy) { in vmw_hw_surface_destroy()
317 (void) vmw_gb_surface_destroy(res); in vmw_hw_surface_destroy()
321 if (res->id != -1) { in vmw_hw_surface_destroy()
330 vmw_surface_destroy_encode(res->id, cmd); in vmw_hw_surface_destroy()
340 srf = vmw_res_to_srf(res); in vmw_hw_surface_destroy()
341 dev_priv->used_memory_size -= res->backup_size; in vmw_hw_surface_destroy()
360 static int vmw_legacy_srf_create(struct vmw_resource *res) in vmw_legacy_srf_create() argument
362 struct vmw_private *dev_priv = res->dev_priv; in vmw_legacy_srf_create()
368 if (likely(res->id != -1)) in vmw_legacy_srf_create()
371 srf = vmw_res_to_srf(res); in vmw_legacy_srf_create()
372 if (unlikely(dev_priv->used_memory_size + res->backup_size >= in vmw_legacy_srf_create()
380 ret = vmw_resource_alloc_id(res); in vmw_legacy_srf_create()
386 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { in vmw_legacy_srf_create()
410 dev_priv->used_memory_size += res->backup_size; in vmw_legacy_srf_create()
414 vmw_resource_release_id(res); in vmw_legacy_srf_create()
435 static int vmw_legacy_srf_dma(struct vmw_resource *res, in vmw_legacy_srf_dma() argument
442 struct vmw_surface *srf = vmw_res_to_srf(res); in vmw_legacy_srf_dma()
444 struct vmw_private *dev_priv = res->dev_priv; in vmw_legacy_srf_dma()
487 static int vmw_legacy_srf_bind(struct vmw_resource *res, in vmw_legacy_srf_bind() argument
490 if (!res->backup_dirty) in vmw_legacy_srf_bind()
493 return vmw_legacy_srf_dma(res, val_buf, true); in vmw_legacy_srf_bind()
508 static int vmw_legacy_srf_unbind(struct vmw_resource *res, in vmw_legacy_srf_unbind() argument
513 return vmw_legacy_srf_dma(res, val_buf, false); in vmw_legacy_srf_unbind()
524 static int vmw_legacy_srf_destroy(struct vmw_resource *res) in vmw_legacy_srf_destroy() argument
526 struct vmw_private *dev_priv = res->dev_priv; in vmw_legacy_srf_destroy()
530 BUG_ON(res->id == -1); in vmw_legacy_srf_destroy()
544 vmw_surface_destroy_encode(res->id, cmd); in vmw_legacy_srf_destroy()
551 dev_priv->used_memory_size -= res->backup_size; in vmw_legacy_srf_destroy()
557 vmw_resource_release_id(res); in vmw_legacy_srf_destroy()
573 void (*res_free) (struct vmw_resource *res)) in vmw_surface_init()
576 struct vmw_resource *res = &srf->res; in vmw_surface_init() local
581 ret = vmw_resource_init(dev_priv, res, true, res_free, in vmw_surface_init()
588 res_free(res); in vmw_surface_init()
597 vmw_resource_activate(res, vmw_hw_surface_destroy); in vmw_surface_init()
614 prime.base)->srf.res); in vmw_user_surface_base_to_res()
622 static void vmw_user_surface_free(struct vmw_resource *res) in vmw_user_surface_free() argument
624 struct vmw_surface *srf = vmw_res_to_srf(res); in vmw_user_surface_free()
627 struct vmw_private *dev_priv = srf->res.dev_priv; in vmw_user_surface_free()
653 struct vmw_resource *res = &user_srf->srf.res; in vmw_user_surface_base_release() local
658 vmw_resource_unreference(&res); in vmw_user_surface_base_release()
692 struct vmw_resource *res; in vmw_surface_define_ioctl() local
752 res = &srf->res; in vmw_surface_define_ioctl()
806 res->backup_size = cur_bo_offset; in vmw_surface_define_ioctl()
849 res->backup_size, in vmw_surface_define_ioctl()
852 &res->backup, in vmw_surface_define_ioctl()
855 vmw_resource_unreference(&res); in vmw_surface_define_ioctl()
860 tmp = vmw_resource_reference(&srf->res); in vmw_surface_define_ioctl()
861 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, in vmw_surface_define_ioctl()
867 vmw_resource_unreference(&res); in vmw_surface_define_ioctl()
872 vmw_resource_unreference(&res); in vmw_surface_define_ioctl()
1021 static int vmw_gb_surface_create(struct vmw_resource *res) in vmw_gb_surface_create() argument
1023 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_create()
1024 struct vmw_surface *srf = vmw_res_to_srf(res); in vmw_gb_surface_create()
1032 if (likely(res->id != -1)) in vmw_gb_surface_create()
1036 ret = vmw_resource_alloc_id(res); in vmw_gb_surface_create()
1042 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { in vmw_gb_surface_create()
1059 cmd->body.sid = srf->res.id; in vmw_gb_surface_create()
1073 vmw_resource_release_id(res); in vmw_gb_surface_create()
1080 static int vmw_gb_surface_bind(struct vmw_resource *res, in vmw_gb_surface_bind() argument
1083 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_bind()
1097 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); in vmw_gb_surface_bind()
1108 cmd1->body.sid = res->id; in vmw_gb_surface_bind()
1110 if (res->backup_dirty) { in vmw_gb_surface_bind()
1114 cmd2->body.sid = res->id; in vmw_gb_surface_bind()
1115 res->backup_dirty = false; in vmw_gb_surface_bind()
1122 static int vmw_gb_surface_unbind(struct vmw_resource *res, in vmw_gb_surface_unbind() argument
1126 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_unbind()
1160 cmd1->body.sid = res->id; in vmw_gb_surface_unbind()
1166 cmd2->body.sid = res->id; in vmw_gb_surface_unbind()
1172 cmd3->body.sid = res->id; in vmw_gb_surface_unbind()
1192 static int vmw_gb_surface_destroy(struct vmw_resource *res) in vmw_gb_surface_destroy() argument
1194 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_destroy()
1200 if (likely(res->id == -1)) in vmw_gb_surface_destroy()
1204 vmw_context_binding_res_list_scrub(&res->binding_head); in vmw_gb_surface_destroy()
1216 cmd->body.sid = res->id; in vmw_gb_surface_destroy()
1219 vmw_resource_release_id(res); in vmw_gb_surface_destroy()
1239 struct vmw_resource *res; in vmw_gb_surface_define_ioctl() local
1283 res = &srf->res; in vmw_gb_surface_define_ioctl()
1296 res->backup_size = svga3dsurface_get_serialized_size in vmw_gb_surface_define_ioctl()
1316 &res->backup, in vmw_gb_surface_define_ioctl()
1321 res->backup_size, in vmw_gb_surface_define_ioctl()
1325 &res->backup, in vmw_gb_surface_define_ioctl()
1329 vmw_resource_unreference(&res); in vmw_gb_surface_define_ioctl()
1333 tmp = vmw_resource_reference(&srf->res); in vmw_gb_surface_define_ioctl()
1334 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, in vmw_gb_surface_define_ioctl()
1342 vmw_resource_unreference(&res); in vmw_gb_surface_define_ioctl()
1347 rep->backup_size = res->backup_size; in vmw_gb_surface_define_ioctl()
1348 if (res->backup) { in vmw_gb_surface_define_ioctl()
1350 drm_vma_node_offset_addr(&res->backup->base.vma_node); in vmw_gb_surface_define_ioctl()
1351 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; in vmw_gb_surface_define_ioctl()
1359 vmw_resource_unreference(&res); in vmw_gb_surface_define_ioctl()
1400 if (srf->res.backup == NULL) { in vmw_gb_surface_reference_ioctl()
1406 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, in vmw_gb_surface_reference_ioctl()
1427 rep->crep.backup_size = srf->res.backup_size; in vmw_gb_surface_reference_ioctl()
1430 drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); in vmw_gb_surface_reference_ioctl()
1431 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; in vmw_gb_surface_reference_ioctl()