1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include <ttm/ttm_placement.h>
31#include "svga3d_surfacedefs.h"
32
33/**
34 * struct vmw_user_surface - User-space visible surface resource
35 *
36 * @base:           The TTM base object handling user-space visibility.
37 * @srf:            The surface metadata.
38 * @size:           TTM accounting size for the surface.
39 * @master:         master of the creating client. Used for security check.
40 */
41struct vmw_user_surface {
42	struct ttm_prime_object prime;
43	struct vmw_surface srf;
44	uint32_t size;
45	struct drm_master *master;
46	struct ttm_base_object *backup_base;
47};
48
49/**
50 * struct vmw_surface_offset - Backing store mip level offset info
51 *
52 * @face:           Surface face.
53 * @mip:            Mip level.
54 * @bo_offset:      Offset into backing store of this mip level.
55 *
56 */
57struct vmw_surface_offset {
58	uint32_t face;
59	uint32_t mip;
60	uint32_t bo_offset;
61};
62
63static void vmw_user_surface_free(struct vmw_resource *res);
64static struct vmw_resource *
65vmw_user_surface_base_to_res(struct ttm_base_object *base);
66static int vmw_legacy_srf_bind(struct vmw_resource *res,
67			       struct ttm_validate_buffer *val_buf);
68static int vmw_legacy_srf_unbind(struct vmw_resource *res,
69				 bool readback,
70				 struct ttm_validate_buffer *val_buf);
71static int vmw_legacy_srf_create(struct vmw_resource *res);
72static int vmw_legacy_srf_destroy(struct vmw_resource *res);
73static int vmw_gb_surface_create(struct vmw_resource *res);
74static int vmw_gb_surface_bind(struct vmw_resource *res,
75			       struct ttm_validate_buffer *val_buf);
76static int vmw_gb_surface_unbind(struct vmw_resource *res,
77				 bool readback,
78				 struct ttm_validate_buffer *val_buf);
79static int vmw_gb_surface_destroy(struct vmw_resource *res);
80
81
82static const struct vmw_user_resource_conv user_surface_conv = {
83	.object_type = VMW_RES_SURFACE,
84	.base_obj_to_res = vmw_user_surface_base_to_res,
85	.res_free = vmw_user_surface_free
86};
87
88const struct vmw_user_resource_conv *user_surface_converter =
89	&user_surface_conv;
90
91
92static uint64_t vmw_user_surface_size;
93
94static const struct vmw_res_func vmw_legacy_surface_func = {
95	.res_type = vmw_res_surface,
96	.needs_backup = false,
97	.may_evict = true,
98	.type_name = "legacy surfaces",
99	.backup_placement = &vmw_srf_placement,
100	.create = &vmw_legacy_srf_create,
101	.destroy = &vmw_legacy_srf_destroy,
102	.bind = &vmw_legacy_srf_bind,
103	.unbind = &vmw_legacy_srf_unbind
104};
105
106static const struct vmw_res_func vmw_gb_surface_func = {
107	.res_type = vmw_res_surface,
108	.needs_backup = true,
109	.may_evict = true,
110	.type_name = "guest backed surfaces",
111	.backup_placement = &vmw_mob_placement,
112	.create = vmw_gb_surface_create,
113	.destroy = vmw_gb_surface_destroy,
114	.bind = vmw_gb_surface_bind,
115	.unbind = vmw_gb_surface_unbind
116};
117
118/**
119 * struct vmw_surface_dma - SVGA3D DMA command
120 */
121struct vmw_surface_dma {
122	SVGA3dCmdHeader header;
123	SVGA3dCmdSurfaceDMA body;
124	SVGA3dCopyBox cb;
125	SVGA3dCmdSurfaceDMASuffix suffix;
126};
127
128/**
129 * struct vmw_surface_define - SVGA3D Surface Define command
130 */
131struct vmw_surface_define {
132	SVGA3dCmdHeader header;
133	SVGA3dCmdDefineSurface body;
134};
135
136/**
137 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
138 */
139struct vmw_surface_destroy {
140	SVGA3dCmdHeader header;
141	SVGA3dCmdDestroySurface body;
142};
143
144
145/**
146 * vmw_surface_dma_size - Compute fifo size for a dma command.
147 *
148 * @srf: Pointer to a struct vmw_surface
149 *
150 * Computes the required size for a surface dma command for backup or
151 * restoration of the surface represented by @srf.
152 */
153static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
154{
155	return srf->num_sizes * sizeof(struct vmw_surface_dma);
156}
157
158
159/**
160 * vmw_surface_define_size - Compute fifo size for a surface define command.
161 *
162 * @srf: Pointer to a struct vmw_surface
163 *
164 * Computes the required size for a surface define command for the definition
165 * of the surface represented by @srf.
166 */
167static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
168{
169	return sizeof(struct vmw_surface_define) + srf->num_sizes *
170		sizeof(SVGA3dSize);
171}
172
173
174/**
175 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
176 *
177 * Computes the required size for a surface destroy command for the destruction
178 * of a hw surface.
179 */
180static inline uint32_t vmw_surface_destroy_size(void)
181{
182	return sizeof(struct vmw_surface_destroy);
183}
184
185/**
186 * vmw_surface_destroy_encode - Encode a surface_destroy command.
187 *
188 * @id: The surface id
189 * @cmd_space: Pointer to memory area in which the commands should be encoded.
190 */
191static void vmw_surface_destroy_encode(uint32_t id,
192				       void *cmd_space)
193{
194	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
195		cmd_space;
196
197	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
198	cmd->header.size = sizeof(cmd->body);
199	cmd->body.sid = id;
200}
201
202/**
203 * vmw_surface_define_encode - Encode a surface_define command.
204 *
205 * @srf: Pointer to a struct vmw_surface object.
206 * @cmd_space: Pointer to memory area in which the commands should be encoded.
207 */
208static void vmw_surface_define_encode(const struct vmw_surface *srf,
209				      void *cmd_space)
210{
211	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
212		cmd_space;
213	struct drm_vmw_size *src_size;
214	SVGA3dSize *cmd_size;
215	uint32_t cmd_len;
216	int i;
217
218	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
219
220	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
221	cmd->header.size = cmd_len;
222	cmd->body.sid = srf->res.id;
223	cmd->body.surfaceFlags = srf->flags;
224	cmd->body.format = cpu_to_le32(srf->format);
225	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
226		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
227
228	cmd += 1;
229	cmd_size = (SVGA3dSize *) cmd;
230	src_size = srf->sizes;
231
232	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
233		cmd_size->width = src_size->width;
234		cmd_size->height = src_size->height;
235		cmd_size->depth = src_size->depth;
236	}
237}
238
239/**
240 * vmw_surface_dma_encode - Encode a surface_dma command.
241 *
242 * @srf: Pointer to a struct vmw_surface object.
243 * @cmd_space: Pointer to memory area in which the commands should be encoded.
244 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
245 * should be placed or read from.
246 * @to_surface: Boolean whether to DMA to the surface or from the surface.
247 */
248static void vmw_surface_dma_encode(struct vmw_surface *srf,
249				   void *cmd_space,
250				   const SVGAGuestPtr *ptr,
251				   bool to_surface)
252{
253	uint32_t i;
254	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
255	const struct svga3d_surface_desc *desc =
256		svga3dsurface_get_desc(srf->format);
257
258	for (i = 0; i < srf->num_sizes; ++i) {
259		SVGA3dCmdHeader *header = &cmd->header;
260		SVGA3dCmdSurfaceDMA *body = &cmd->body;
261		SVGA3dCopyBox *cb = &cmd->cb;
262		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
263		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
264		const struct drm_vmw_size *cur_size = &srf->sizes[i];
265
266		header->id = SVGA_3D_CMD_SURFACE_DMA;
267		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
268
269		body->guest.ptr = *ptr;
270		body->guest.ptr.offset += cur_offset->bo_offset;
271		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
272								  cur_size);
273		body->host.sid = srf->res.id;
274		body->host.face = cur_offset->face;
275		body->host.mipmap = cur_offset->mip;
276		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
277				  SVGA3D_READ_HOST_VRAM);
278		cb->x = 0;
279		cb->y = 0;
280		cb->z = 0;
281		cb->srcx = 0;
282		cb->srcy = 0;
283		cb->srcz = 0;
284		cb->w = cur_size->width;
285		cb->h = cur_size->height;
286		cb->d = cur_size->depth;
287
288		suffix->suffixSize = sizeof(*suffix);
289		suffix->maximumOffset =
290			svga3dsurface_get_image_buffer_size(desc, cur_size,
291							    body->guest.pitch);
292		suffix->flags.discard = 0;
293		suffix->flags.unsynchronized = 0;
294		suffix->flags.reserved = 0;
295		++cmd;
296	}
297};
298
299
300/**
301 * vmw_hw_surface_destroy - destroy a Device surface
302 *
303 * @res:        Pointer to a struct vmw_resource embedded in a struct
304 *              vmw_surface.
305 *
306 * Destroys a the device surface associated with a struct vmw_surface if
307 * any, and adjusts accounting and resource count accordingly.
308 */
309static void vmw_hw_surface_destroy(struct vmw_resource *res)
310{
311
312	struct vmw_private *dev_priv = res->dev_priv;
313	struct vmw_surface *srf;
314	void *cmd;
315
316	if (res->func->destroy == vmw_gb_surface_destroy) {
317		(void) vmw_gb_surface_destroy(res);
318		return;
319	}
320
321	if (res->id != -1) {
322
323		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
324		if (unlikely(cmd == NULL)) {
325			DRM_ERROR("Failed reserving FIFO space for surface "
326				  "destruction.\n");
327			return;
328		}
329
330		vmw_surface_destroy_encode(res->id, cmd);
331		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
332
333		/*
334		 * used_memory_size_atomic, or separate lock
335		 * to avoid taking dev_priv::cmdbuf_mutex in
336		 * the destroy path.
337		 */
338
339		mutex_lock(&dev_priv->cmdbuf_mutex);
340		srf = vmw_res_to_srf(res);
341		dev_priv->used_memory_size -= res->backup_size;
342		mutex_unlock(&dev_priv->cmdbuf_mutex);
343	}
344	vmw_3d_resource_dec(dev_priv, false);
345}
346
347/**
348 * vmw_legacy_srf_create - Create a device surface as part of the
349 * resource validation process.
350 *
351 * @res: Pointer to a struct vmw_surface.
352 *
353 * If the surface doesn't have a hw id.
354 *
355 * Returns -EBUSY if there wasn't sufficient device resources to
356 * complete the validation. Retry after freeing up resources.
357 *
358 * May return other errors if the kernel is out of guest resources.
359 */
360static int vmw_legacy_srf_create(struct vmw_resource *res)
361{
362	struct vmw_private *dev_priv = res->dev_priv;
363	struct vmw_surface *srf;
364	uint32_t submit_size;
365	uint8_t *cmd;
366	int ret;
367
368	if (likely(res->id != -1))
369		return 0;
370
371	srf = vmw_res_to_srf(res);
372	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
373		     dev_priv->memory_size))
374		return -EBUSY;
375
376	/*
377	 * Alloc id for the resource.
378	 */
379
380	ret = vmw_resource_alloc_id(res);
381	if (unlikely(ret != 0)) {
382		DRM_ERROR("Failed to allocate a surface id.\n");
383		goto out_no_id;
384	}
385
386	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
387		ret = -EBUSY;
388		goto out_no_fifo;
389	}
390
391	/*
392	 * Encode surface define- commands.
393	 */
394
395	submit_size = vmw_surface_define_size(srf);
396	cmd = vmw_fifo_reserve(dev_priv, submit_size);
397	if (unlikely(cmd == NULL)) {
398		DRM_ERROR("Failed reserving FIFO space for surface "
399			  "creation.\n");
400		ret = -ENOMEM;
401		goto out_no_fifo;
402	}
403
404	vmw_surface_define_encode(srf, cmd);
405	vmw_fifo_commit(dev_priv, submit_size);
406	/*
407	 * Surface memory usage accounting.
408	 */
409
410	dev_priv->used_memory_size += res->backup_size;
411	return 0;
412
413out_no_fifo:
414	vmw_resource_release_id(res);
415out_no_id:
416	return ret;
417}
418
419/**
420 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
421 *
422 * @res:            Pointer to a struct vmw_res embedded in a struct
423 *                  vmw_surface.
424 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
425 *                  information about the backup buffer.
426 * @bind:           Boolean wether to DMA to the surface.
427 *
428 * Transfer backup data to or from a legacy surface as part of the
429 * validation process.
430 * May return other errors if the kernel is out of guest resources.
431 * The backup buffer will be fenced or idle upon successful completion,
432 * and if the surface needs persistent backup storage, the backup buffer
433 * will also be returned reserved iff @bind is true.
434 */
435static int vmw_legacy_srf_dma(struct vmw_resource *res,
436			      struct ttm_validate_buffer *val_buf,
437			      bool bind)
438{
439	SVGAGuestPtr ptr;
440	struct vmw_fence_obj *fence;
441	uint32_t submit_size;
442	struct vmw_surface *srf = vmw_res_to_srf(res);
443	uint8_t *cmd;
444	struct vmw_private *dev_priv = res->dev_priv;
445
446	BUG_ON(val_buf->bo == NULL);
447
448	submit_size = vmw_surface_dma_size(srf);
449	cmd = vmw_fifo_reserve(dev_priv, submit_size);
450	if (unlikely(cmd == NULL)) {
451		DRM_ERROR("Failed reserving FIFO space for surface "
452			  "DMA.\n");
453		return -ENOMEM;
454	}
455	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
456	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
457
458	vmw_fifo_commit(dev_priv, submit_size);
459
460	/*
461	 * Create a fence object and fence the backup buffer.
462	 */
463
464	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
465					  &fence, NULL);
466
467	vmw_fence_single_bo(val_buf->bo, fence);
468
469	if (likely(fence != NULL))
470		vmw_fence_obj_unreference(&fence);
471
472	return 0;
473}
474
475/**
476 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
477 *                       surface validation process.
478 *
479 * @res:            Pointer to a struct vmw_res embedded in a struct
480 *                  vmw_surface.
481 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
482 *                  information about the backup buffer.
483 *
484 * This function will copy backup data to the surface if the
485 * backup buffer is dirty.
486 */
487static int vmw_legacy_srf_bind(struct vmw_resource *res,
488			       struct ttm_validate_buffer *val_buf)
489{
490	if (!res->backup_dirty)
491		return 0;
492
493	return vmw_legacy_srf_dma(res, val_buf, true);
494}
495
496
497/**
498 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
499 *                         surface eviction process.
500 *
501 * @res:            Pointer to a struct vmw_res embedded in a struct
502 *                  vmw_surface.
503 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
504 *                  information about the backup buffer.
505 *
506 * This function will copy backup data from the surface.
507 */
508static int vmw_legacy_srf_unbind(struct vmw_resource *res,
509				 bool readback,
510				 struct ttm_validate_buffer *val_buf)
511{
512	if (unlikely(readback))
513		return vmw_legacy_srf_dma(res, val_buf, false);
514	return 0;
515}
516
517/**
518 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
519 *                          resource eviction process.
520 *
521 * @res:            Pointer to a struct vmw_res embedded in a struct
522 *                  vmw_surface.
523 */
524static int vmw_legacy_srf_destroy(struct vmw_resource *res)
525{
526	struct vmw_private *dev_priv = res->dev_priv;
527	uint32_t submit_size;
528	uint8_t *cmd;
529
530	BUG_ON(res->id == -1);
531
532	/*
533	 * Encode the dma- and surface destroy commands.
534	 */
535
536	submit_size = vmw_surface_destroy_size();
537	cmd = vmw_fifo_reserve(dev_priv, submit_size);
538	if (unlikely(cmd == NULL)) {
539		DRM_ERROR("Failed reserving FIFO space for surface "
540			  "eviction.\n");
541		return -ENOMEM;
542	}
543
544	vmw_surface_destroy_encode(res->id, cmd);
545	vmw_fifo_commit(dev_priv, submit_size);
546
547	/*
548	 * Surface memory usage accounting.
549	 */
550
551	dev_priv->used_memory_size -= res->backup_size;
552
553	/*
554	 * Release the surface ID.
555	 */
556
557	vmw_resource_release_id(res);
558
559	return 0;
560}
561
562
563/**
564 * vmw_surface_init - initialize a struct vmw_surface
565 *
566 * @dev_priv:       Pointer to a device private struct.
567 * @srf:            Pointer to the struct vmw_surface to initialize.
568 * @res_free:       Pointer to a resource destructor used to free
569 *                  the object.
570 */
571static int vmw_surface_init(struct vmw_private *dev_priv,
572			    struct vmw_surface *srf,
573			    void (*res_free) (struct vmw_resource *res))
574{
575	int ret;
576	struct vmw_resource *res = &srf->res;
577
578	BUG_ON(res_free == NULL);
579	if (!dev_priv->has_mob)
580		(void) vmw_3d_resource_inc(dev_priv, false);
581	ret = vmw_resource_init(dev_priv, res, true, res_free,
582				(dev_priv->has_mob) ? &vmw_gb_surface_func :
583				&vmw_legacy_surface_func);
584
585	if (unlikely(ret != 0)) {
586		if (!dev_priv->has_mob)
587			vmw_3d_resource_dec(dev_priv, false);
588		res_free(res);
589		return ret;
590	}
591
592	/*
593	 * The surface won't be visible to hardware until a
594	 * surface validate.
595	 */
596
597	vmw_resource_activate(res, vmw_hw_surface_destroy);
598	return ret;
599}
600
601/**
602 * vmw_user_surface_base_to_res - TTM base object to resource converter for
603 *                                user visible surfaces
604 *
605 * @base:           Pointer to a TTM base object
606 *
607 * Returns the struct vmw_resource embedded in a struct vmw_surface
608 * for the user-visible object identified by the TTM base object @base.
609 */
610static struct vmw_resource *
611vmw_user_surface_base_to_res(struct ttm_base_object *base)
612{
613	return &(container_of(base, struct vmw_user_surface,
614			      prime.base)->srf.res);
615}
616
617/**
618 * vmw_user_surface_free - User visible surface resource destructor
619 *
620 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
621 */
622static void vmw_user_surface_free(struct vmw_resource *res)
623{
624	struct vmw_surface *srf = vmw_res_to_srf(res);
625	struct vmw_user_surface *user_srf =
626	    container_of(srf, struct vmw_user_surface, srf);
627	struct vmw_private *dev_priv = srf->res.dev_priv;
628	uint32_t size = user_srf->size;
629
630	if (user_srf->master)
631		drm_master_put(&user_srf->master);
632	kfree(srf->offsets);
633	kfree(srf->sizes);
634	kfree(srf->snooper.image);
635	ttm_prime_object_kfree(user_srf, prime);
636	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
637}
638
639/**
640 * vmw_user_surface_free - User visible surface TTM base object destructor
641 *
642 * @p_base:         Pointer to a pointer to a TTM base object
643 *                  embedded in a struct vmw_user_surface.
644 *
645 * Drops the base object's reference on its resource, and the
646 * pointer pointed to by *p_base is set to NULL.
647 */
648static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
649{
650	struct ttm_base_object *base = *p_base;
651	struct vmw_user_surface *user_srf =
652	    container_of(base, struct vmw_user_surface, prime.base);
653	struct vmw_resource *res = &user_srf->srf.res;
654
655	*p_base = NULL;
656	if (user_srf->backup_base)
657		ttm_base_object_unref(&user_srf->backup_base);
658	vmw_resource_unreference(&res);
659}
660
661/**
662 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
663 *                                  the user surface destroy functionality.
664 *
665 * @dev:            Pointer to a struct drm_device.
666 * @data:           Pointer to data copied from / to user-space.
667 * @file_priv:      Pointer to a drm file private structure.
668 */
669int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
670			      struct drm_file *file_priv)
671{
672	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
673	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
674
675	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
676}
677
678/**
679 * vmw_user_surface_define_ioctl - Ioctl function implementing
680 *                                  the user surface define functionality.
681 *
682 * @dev:            Pointer to a struct drm_device.
683 * @data:           Pointer to data copied from / to user-space.
684 * @file_priv:      Pointer to a drm file private structure.
685 */
686int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
687			     struct drm_file *file_priv)
688{
689	struct vmw_private *dev_priv = vmw_priv(dev);
690	struct vmw_user_surface *user_srf;
691	struct vmw_surface *srf;
692	struct vmw_resource *res;
693	struct vmw_resource *tmp;
694	union drm_vmw_surface_create_arg *arg =
695	    (union drm_vmw_surface_create_arg *)data;
696	struct drm_vmw_surface_create_req *req = &arg->req;
697	struct drm_vmw_surface_arg *rep = &arg->rep;
698	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
699	struct drm_vmw_size __user *user_sizes;
700	int ret;
701	int i, j;
702	uint32_t cur_bo_offset;
703	struct drm_vmw_size *cur_size;
704	struct vmw_surface_offset *cur_offset;
705	uint32_t num_sizes;
706	uint32_t size;
707	const struct svga3d_surface_desc *desc;
708
709	if (unlikely(vmw_user_surface_size == 0))
710		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
711			128;
712
713	num_sizes = 0;
714	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
715		num_sizes += req->mip_levels[i];
716
717	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
718	    DRM_VMW_MAX_MIP_LEVELS)
719		return -EINVAL;
720
721	size = vmw_user_surface_size + 128 +
722		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
723		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
724
725
726	desc = svga3dsurface_get_desc(req->format);
727	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
728		DRM_ERROR("Invalid surface format for surface creation.\n");
729		return -EINVAL;
730	}
731
732	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
733	if (unlikely(ret != 0))
734		return ret;
735
736	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
737				   size, false, true);
738	if (unlikely(ret != 0)) {
739		if (ret != -ERESTARTSYS)
740			DRM_ERROR("Out of graphics memory for surface"
741				  " creation.\n");
742		goto out_unlock;
743	}
744
745	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
746	if (unlikely(user_srf == NULL)) {
747		ret = -ENOMEM;
748		goto out_no_user_srf;
749	}
750
751	srf = &user_srf->srf;
752	res = &srf->res;
753
754	srf->flags = req->flags;
755	srf->format = req->format;
756	srf->scanout = req->scanout;
757
758	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
759	srf->num_sizes = num_sizes;
760	user_srf->size = size;
761
762	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
763	if (unlikely(srf->sizes == NULL)) {
764		ret = -ENOMEM;
765		goto out_no_sizes;
766	}
767	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
768			       GFP_KERNEL);
769	if (unlikely(srf->sizes == NULL)) {
770		ret = -ENOMEM;
771		goto out_no_offsets;
772	}
773
774	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
775	    req->size_addr;
776
777	ret = copy_from_user(srf->sizes, user_sizes,
778			     srf->num_sizes * sizeof(*srf->sizes));
779	if (unlikely(ret != 0)) {
780		ret = -EFAULT;
781		goto out_no_copy;
782	}
783
784	srf->base_size = *srf->sizes;
785	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
786	srf->multisample_count = 0;
787
788	cur_bo_offset = 0;
789	cur_offset = srf->offsets;
790	cur_size = srf->sizes;
791
792	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
793		for (j = 0; j < srf->mip_levels[i]; ++j) {
794			uint32_t stride = svga3dsurface_calculate_pitch
795				(desc, cur_size);
796
797			cur_offset->face = i;
798			cur_offset->mip = j;
799			cur_offset->bo_offset = cur_bo_offset;
800			cur_bo_offset += svga3dsurface_get_image_buffer_size
801				(desc, cur_size, stride);
802			++cur_offset;
803			++cur_size;
804		}
805	}
806	res->backup_size = cur_bo_offset;
807	if (srf->scanout &&
808	    srf->num_sizes == 1 &&
809	    srf->sizes[0].width == 64 &&
810	    srf->sizes[0].height == 64 &&
811	    srf->format == SVGA3D_A8R8G8B8) {
812
813		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
814		/* clear the image */
815		if (srf->snooper.image) {
816			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
817		} else {
818			DRM_ERROR("Failed to allocate cursor_image\n");
819			ret = -ENOMEM;
820			goto out_no_copy;
821		}
822	} else {
823		srf->snooper.image = NULL;
824	}
825	srf->snooper.crtc = NULL;
826
827	user_srf->prime.base.shareable = false;
828	user_srf->prime.base.tfile = NULL;
829	if (drm_is_primary_client(file_priv))
830		user_srf->master = drm_master_get(file_priv->master);
831
832	/**
833	 * From this point, the generic resource management functions
834	 * destroy the object on failure.
835	 */
836
837	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
838	if (unlikely(ret != 0))
839		goto out_unlock;
840
841	/*
842	 * A gb-aware client referencing a shared surface will
843	 * expect a backup buffer to be present.
844	 */
845	if (dev_priv->has_mob && req->shareable) {
846		uint32_t backup_handle;
847
848		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
849					    res->backup_size,
850					    true,
851					    &backup_handle,
852					    &res->backup,
853					    &user_srf->backup_base);
854		if (unlikely(ret != 0)) {
855			vmw_resource_unreference(&res);
856			goto out_unlock;
857		}
858	}
859
860	tmp = vmw_resource_reference(&srf->res);
861	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
862				    req->shareable, VMW_RES_SURFACE,
863				    &vmw_user_surface_base_release, NULL);
864
865	if (unlikely(ret != 0)) {
866		vmw_resource_unreference(&tmp);
867		vmw_resource_unreference(&res);
868		goto out_unlock;
869	}
870
871	rep->sid = user_srf->prime.base.hash.key;
872	vmw_resource_unreference(&res);
873
874	ttm_read_unlock(&dev_priv->reservation_sem);
875	return 0;
876out_no_copy:
877	kfree(srf->offsets);
878out_no_offsets:
879	kfree(srf->sizes);
880out_no_sizes:
881	ttm_prime_object_kfree(user_srf, prime);
882out_no_user_srf:
883	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
884out_unlock:
885	ttm_read_unlock(&dev_priv->reservation_sem);
886	return ret;
887}
888
889
890static int
891vmw_surface_handle_reference(struct vmw_private *dev_priv,
892			     struct drm_file *file_priv,
893			     uint32_t u_handle,
894			     enum drm_vmw_handle_type handle_type,
895			     struct ttm_base_object **base_p)
896{
897	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
898	struct vmw_user_surface *user_srf;
899	uint32_t handle;
900	struct ttm_base_object *base;
901	int ret;
902
903	if (handle_type == DRM_VMW_HANDLE_PRIME) {
904		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
905		if (unlikely(ret != 0))
906			return ret;
907	} else {
908		if (unlikely(drm_is_render_client(file_priv))) {
909			DRM_ERROR("Render client refused legacy "
910				  "surface reference.\n");
911			return -EACCES;
912		}
913		handle = u_handle;
914	}
915
916	ret = -EINVAL;
917	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
918	if (unlikely(base == NULL)) {
919		DRM_ERROR("Could not find surface to reference.\n");
920		goto out_no_lookup;
921	}
922
923	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
924		DRM_ERROR("Referenced object is not a surface.\n");
925		goto out_bad_resource;
926	}
927
928	if (handle_type != DRM_VMW_HANDLE_PRIME) {
929		user_srf = container_of(base, struct vmw_user_surface,
930					prime.base);
931
932		/*
933		 * Make sure the surface creator has the same
934		 * authenticating master.
935		 */
936		if (drm_is_primary_client(file_priv) &&
937		    user_srf->master != file_priv->master) {
938			DRM_ERROR("Trying to reference surface outside of"
939				  " master domain.\n");
940			ret = -EACCES;
941			goto out_bad_resource;
942		}
943
944		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
945		if (unlikely(ret != 0)) {
946			DRM_ERROR("Could not add a reference to a surface.\n");
947			goto out_bad_resource;
948		}
949	}
950
951	*base_p = base;
952	return 0;
953
954out_bad_resource:
955	ttm_base_object_unref(&base);
956out_no_lookup:
957	if (handle_type == DRM_VMW_HANDLE_PRIME)
958		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
959
960	return ret;
961}
962
963/**
964 * vmw_user_surface_define_ioctl - Ioctl function implementing
965 *                                  the user surface reference functionality.
966 *
967 * @dev:            Pointer to a struct drm_device.
968 * @data:           Pointer to data copied from / to user-space.
969 * @file_priv:      Pointer to a drm file private structure.
970 */
971int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
972				struct drm_file *file_priv)
973{
974	struct vmw_private *dev_priv = vmw_priv(dev);
975	union drm_vmw_surface_reference_arg *arg =
976	    (union drm_vmw_surface_reference_arg *)data;
977	struct drm_vmw_surface_arg *req = &arg->req;
978	struct drm_vmw_surface_create_req *rep = &arg->rep;
979	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
980	struct vmw_surface *srf;
981	struct vmw_user_surface *user_srf;
982	struct drm_vmw_size __user *user_sizes;
983	struct ttm_base_object *base;
984	int ret;
985
986	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
987					   req->handle_type, &base);
988	if (unlikely(ret != 0))
989		return ret;
990
991	user_srf = container_of(base, struct vmw_user_surface, prime.base);
992	srf = &user_srf->srf;
993
994	rep->flags = srf->flags;
995	rep->format = srf->format;
996	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
997	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
998	    rep->size_addr;
999
1000	if (user_sizes)
1001		ret = copy_to_user(user_sizes, &srf->base_size,
1002				   sizeof(srf->base_size));
1003	if (unlikely(ret != 0)) {
1004		DRM_ERROR("copy_to_user failed %p %u\n",
1005			  user_sizes, srf->num_sizes);
1006		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
1007		ret = -EFAULT;
1008	}
1009
1010	ttm_base_object_unref(&base);
1011
1012	return ret;
1013}
1014
1015/**
1016 * vmw_surface_define_encode - Encode a surface_define command.
1017 *
1018 * @srf: Pointer to a struct vmw_surface object.
1019 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1020 */
1021static int vmw_gb_surface_create(struct vmw_resource *res)
1022{
1023	struct vmw_private *dev_priv = res->dev_priv;
1024	struct vmw_surface *srf = vmw_res_to_srf(res);
1025	uint32_t cmd_len, submit_len;
1026	int ret;
1027	struct {
1028		SVGA3dCmdHeader header;
1029		SVGA3dCmdDefineGBSurface body;
1030	} *cmd;
1031
1032	if (likely(res->id != -1))
1033		return 0;
1034
1035	(void) vmw_3d_resource_inc(dev_priv, false);
1036	ret = vmw_resource_alloc_id(res);
1037	if (unlikely(ret != 0)) {
1038		DRM_ERROR("Failed to allocate a surface id.\n");
1039		goto out_no_id;
1040	}
1041
1042	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1043		ret = -EBUSY;
1044		goto out_no_fifo;
1045	}
1046
1047	cmd_len = sizeof(cmd->body);
1048	submit_len = sizeof(*cmd);
1049	cmd = vmw_fifo_reserve(dev_priv, submit_len);
1050	if (unlikely(cmd == NULL)) {
1051		DRM_ERROR("Failed reserving FIFO space for surface "
1052			  "creation.\n");
1053		ret = -ENOMEM;
1054		goto out_no_fifo;
1055	}
1056
1057	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1058	cmd->header.size = cmd_len;
1059	cmd->body.sid = srf->res.id;
1060	cmd->body.surfaceFlags = srf->flags;
1061	cmd->body.format = cpu_to_le32(srf->format);
1062	cmd->body.numMipLevels = srf->mip_levels[0];
1063	cmd->body.multisampleCount = srf->multisample_count;
1064	cmd->body.autogenFilter = srf->autogen_filter;
1065	cmd->body.size.width = srf->base_size.width;
1066	cmd->body.size.height = srf->base_size.height;
1067	cmd->body.size.depth = srf->base_size.depth;
1068	vmw_fifo_commit(dev_priv, submit_len);
1069
1070	return 0;
1071
1072out_no_fifo:
1073	vmw_resource_release_id(res);
1074out_no_id:
1075	vmw_3d_resource_dec(dev_priv, false);
1076	return ret;
1077}
1078
1079
1080static int vmw_gb_surface_bind(struct vmw_resource *res,
1081			       struct ttm_validate_buffer *val_buf)
1082{
1083	struct vmw_private *dev_priv = res->dev_priv;
1084	struct {
1085		SVGA3dCmdHeader header;
1086		SVGA3dCmdBindGBSurface body;
1087	} *cmd1;
1088	struct {
1089		SVGA3dCmdHeader header;
1090		SVGA3dCmdUpdateGBSurface body;
1091	} *cmd2;
1092	uint32_t submit_size;
1093	struct ttm_buffer_object *bo = val_buf->bo;
1094
1095	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1096
1097	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1098
1099	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1100	if (unlikely(cmd1 == NULL)) {
1101		DRM_ERROR("Failed reserving FIFO space for surface "
1102			  "binding.\n");
1103		return -ENOMEM;
1104	}
1105
1106	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1107	cmd1->header.size = sizeof(cmd1->body);
1108	cmd1->body.sid = res->id;
1109	cmd1->body.mobid = bo->mem.start;
1110	if (res->backup_dirty) {
1111		cmd2 = (void *) &cmd1[1];
1112		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1113		cmd2->header.size = sizeof(cmd2->body);
1114		cmd2->body.sid = res->id;
1115		res->backup_dirty = false;
1116	}
1117	vmw_fifo_commit(dev_priv, submit_size);
1118
1119	return 0;
1120}
1121
1122static int vmw_gb_surface_unbind(struct vmw_resource *res,
1123				 bool readback,
1124				 struct ttm_validate_buffer *val_buf)
1125{
1126	struct vmw_private *dev_priv = res->dev_priv;
1127	struct ttm_buffer_object *bo = val_buf->bo;
1128	struct vmw_fence_obj *fence;
1129
1130	struct {
1131		SVGA3dCmdHeader header;
1132		SVGA3dCmdReadbackGBSurface body;
1133	} *cmd1;
1134	struct {
1135		SVGA3dCmdHeader header;
1136		SVGA3dCmdInvalidateGBSurface body;
1137	} *cmd2;
1138	struct {
1139		SVGA3dCmdHeader header;
1140		SVGA3dCmdBindGBSurface body;
1141	} *cmd3;
1142	uint32_t submit_size;
1143	uint8_t *cmd;
1144
1145
1146	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1147
1148	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1149	cmd = vmw_fifo_reserve(dev_priv, submit_size);
1150	if (unlikely(cmd == NULL)) {
1151		DRM_ERROR("Failed reserving FIFO space for surface "
1152			  "unbinding.\n");
1153		return -ENOMEM;
1154	}
1155
1156	if (readback) {
1157		cmd1 = (void *) cmd;
1158		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1159		cmd1->header.size = sizeof(cmd1->body);
1160		cmd1->body.sid = res->id;
1161		cmd3 = (void *) &cmd1[1];
1162	} else {
1163		cmd2 = (void *) cmd;
1164		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1165		cmd2->header.size = sizeof(cmd2->body);
1166		cmd2->body.sid = res->id;
1167		cmd3 = (void *) &cmd2[1];
1168	}
1169
1170	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1171	cmd3->header.size = sizeof(cmd3->body);
1172	cmd3->body.sid = res->id;
1173	cmd3->body.mobid = SVGA3D_INVALID_ID;
1174
1175	vmw_fifo_commit(dev_priv, submit_size);
1176
1177	/*
1178	 * Create a fence object and fence the backup buffer.
1179	 */
1180
1181	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1182					  &fence, NULL);
1183
1184	vmw_fence_single_bo(val_buf->bo, fence);
1185
1186	if (likely(fence != NULL))
1187		vmw_fence_obj_unreference(&fence);
1188
1189	return 0;
1190}
1191
1192static int vmw_gb_surface_destroy(struct vmw_resource *res)
1193{
1194	struct vmw_private *dev_priv = res->dev_priv;
1195	struct {
1196		SVGA3dCmdHeader header;
1197		SVGA3dCmdDestroyGBSurface body;
1198	} *cmd;
1199
1200	if (likely(res->id == -1))
1201		return 0;
1202
1203	mutex_lock(&dev_priv->binding_mutex);
1204	vmw_context_binding_res_list_scrub(&res->binding_head);
1205
1206	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1207	if (unlikely(cmd == NULL)) {
1208		DRM_ERROR("Failed reserving FIFO space for surface "
1209			  "destruction.\n");
1210		mutex_unlock(&dev_priv->binding_mutex);
1211		return -ENOMEM;
1212	}
1213
1214	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1215	cmd->header.size = sizeof(cmd->body);
1216	cmd->body.sid = res->id;
1217	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1218	mutex_unlock(&dev_priv->binding_mutex);
1219	vmw_resource_release_id(res);
1220	vmw_3d_resource_dec(dev_priv, false);
1221
1222	return 0;
1223}
1224
1225/**
1226 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1227 *                               the user surface define functionality.
1228 *
1229 * @dev:            Pointer to a struct drm_device.
1230 * @data:           Pointer to data copied from / to user-space.
1231 * @file_priv:      Pointer to a drm file private structure.
1232 */
1233int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1234				struct drm_file *file_priv)
1235{
1236	struct vmw_private *dev_priv = vmw_priv(dev);
1237	struct vmw_user_surface *user_srf;
1238	struct vmw_surface *srf;
1239	struct vmw_resource *res;
1240	struct vmw_resource *tmp;
1241	union drm_vmw_gb_surface_create_arg *arg =
1242	    (union drm_vmw_gb_surface_create_arg *)data;
1243	struct drm_vmw_gb_surface_create_req *req = &arg->req;
1244	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1245	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1246	int ret;
1247	uint32_t size;
1248	const struct svga3d_surface_desc *desc;
1249	uint32_t backup_handle;
1250
1251	if (unlikely(vmw_user_surface_size == 0))
1252		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1253			128;
1254
1255	size = vmw_user_surface_size + 128;
1256
1257	desc = svga3dsurface_get_desc(req->format);
1258	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1259		DRM_ERROR("Invalid surface format for surface creation.\n");
1260		return -EINVAL;
1261	}
1262
1263	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1264	if (unlikely(ret != 0))
1265		return ret;
1266
1267	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1268				   size, false, true);
1269	if (unlikely(ret != 0)) {
1270		if (ret != -ERESTARTSYS)
1271			DRM_ERROR("Out of graphics memory for surface"
1272				  " creation.\n");
1273		goto out_unlock;
1274	}
1275
1276	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1277	if (unlikely(user_srf == NULL)) {
1278		ret = -ENOMEM;
1279		goto out_no_user_srf;
1280	}
1281
1282	srf = &user_srf->srf;
1283	res = &srf->res;
1284
1285	srf->flags = req->svga3d_flags;
1286	srf->format = req->format;
1287	srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
1288	srf->mip_levels[0] = req->mip_levels;
1289	srf->num_sizes = 1;
1290	srf->sizes = NULL;
1291	srf->offsets = NULL;
1292	user_srf->size = size;
1293	srf->base_size = req->base_size;
1294	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1295	srf->multisample_count = req->multisample_count;
1296	res->backup_size = svga3dsurface_get_serialized_size
1297	  (srf->format, srf->base_size, srf->mip_levels[0],
1298	   srf->flags & SVGA3D_SURFACE_CUBEMAP);
1299
1300	user_srf->prime.base.shareable = false;
1301	user_srf->prime.base.tfile = NULL;
1302	if (drm_is_primary_client(file_priv))
1303		user_srf->master = drm_master_get(file_priv->master);
1304
1305	/**
1306	 * From this point, the generic resource management functions
1307	 * destroy the object on failure.
1308	 */
1309
1310	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1311	if (unlikely(ret != 0))
1312		goto out_unlock;
1313
1314	if (req->buffer_handle != SVGA3D_INVALID_ID) {
1315		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1316					     &res->backup,
1317					     &user_srf->backup_base);
1318	} else if (req->drm_surface_flags &
1319		   drm_vmw_surface_flag_create_buffer)
1320		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1321					    res->backup_size,
1322					    req->drm_surface_flags &
1323					    drm_vmw_surface_flag_shareable,
1324					    &backup_handle,
1325					    &res->backup,
1326					    &user_srf->backup_base);
1327
1328	if (unlikely(ret != 0)) {
1329		vmw_resource_unreference(&res);
1330		goto out_unlock;
1331	}
1332
1333	tmp = vmw_resource_reference(&srf->res);
1334	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1335				    req->drm_surface_flags &
1336				    drm_vmw_surface_flag_shareable,
1337				    VMW_RES_SURFACE,
1338				    &vmw_user_surface_base_release, NULL);
1339
1340	if (unlikely(ret != 0)) {
1341		vmw_resource_unreference(&tmp);
1342		vmw_resource_unreference(&res);
1343		goto out_unlock;
1344	}
1345
1346	rep->handle = user_srf->prime.base.hash.key;
1347	rep->backup_size = res->backup_size;
1348	if (res->backup) {
1349		rep->buffer_map_handle =
1350			drm_vma_node_offset_addr(&res->backup->base.vma_node);
1351		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1352		rep->buffer_handle = backup_handle;
1353	} else {
1354		rep->buffer_map_handle = 0;
1355		rep->buffer_size = 0;
1356		rep->buffer_handle = SVGA3D_INVALID_ID;
1357	}
1358
1359	vmw_resource_unreference(&res);
1360
1361	ttm_read_unlock(&dev_priv->reservation_sem);
1362	return 0;
1363out_no_user_srf:
1364	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1365out_unlock:
1366	ttm_read_unlock(&dev_priv->reservation_sem);
1367	return ret;
1368}
1369
1370/**
1371 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1372 *                                  the user surface reference functionality.
1373 *
1374 * @dev:            Pointer to a struct drm_device.
1375 * @data:           Pointer to data copied from / to user-space.
1376 * @file_priv:      Pointer to a drm file private structure.
1377 */
1378int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1379				   struct drm_file *file_priv)
1380{
1381	struct vmw_private *dev_priv = vmw_priv(dev);
1382	union drm_vmw_gb_surface_reference_arg *arg =
1383	    (union drm_vmw_gb_surface_reference_arg *)data;
1384	struct drm_vmw_surface_arg *req = &arg->req;
1385	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1386	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1387	struct vmw_surface *srf;
1388	struct vmw_user_surface *user_srf;
1389	struct ttm_base_object *base;
1390	uint32_t backup_handle;
1391	int ret = -EINVAL;
1392
1393	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1394					   req->handle_type, &base);
1395	if (unlikely(ret != 0))
1396		return ret;
1397
1398	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1399	srf = &user_srf->srf;
1400	if (srf->res.backup == NULL) {
1401		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1402		goto out_bad_resource;
1403	}
1404
1405	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1406	ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1407					&backup_handle);
1408	mutex_unlock(&dev_priv->cmdbuf_mutex);
1409
1410	if (unlikely(ret != 0)) {
1411		DRM_ERROR("Could not add a reference to a GB surface "
1412			  "backup buffer.\n");
1413		(void) ttm_ref_object_base_unref(tfile, base->hash.key,
1414						 TTM_REF_USAGE);
1415		goto out_bad_resource;
1416	}
1417
1418	rep->creq.svga3d_flags = srf->flags;
1419	rep->creq.format = srf->format;
1420	rep->creq.mip_levels = srf->mip_levels[0];
1421	rep->creq.drm_surface_flags = 0;
1422	rep->creq.multisample_count = srf->multisample_count;
1423	rep->creq.autogen_filter = srf->autogen_filter;
1424	rep->creq.buffer_handle = backup_handle;
1425	rep->creq.base_size = srf->base_size;
1426	rep->crep.handle = user_srf->prime.base.hash.key;
1427	rep->crep.backup_size = srf->res.backup_size;
1428	rep->crep.buffer_handle = backup_handle;
1429	rep->crep.buffer_map_handle =
1430		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1431	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1432
1433out_bad_resource:
1434	ttm_base_object_unref(&base);
1435
1436	return ret;
1437}
1438