• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/vmwgfx/
1/**************************************************************************
2 *
3 * Copyright �� 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
38struct vmw_user_context {
39	struct ttm_base_object base;
40	struct vmw_resource res;
41};
42
43struct vmw_user_surface {
44	struct ttm_base_object base;
45	struct vmw_surface srf;
46};
47
48struct vmw_user_dma_buffer {
49	struct ttm_base_object base;
50	struct vmw_dma_buffer dma;
51};
52
53struct vmw_bo_user_rep {
54	uint32_t handle;
55	uint64_t map_handle;
56};
57
58struct vmw_stream {
59	struct vmw_resource res;
60	uint32_t stream_id;
61};
62
63struct vmw_user_stream {
64	struct ttm_base_object base;
65	struct vmw_stream stream;
66};
67
68static inline struct vmw_dma_buffer *
69vmw_dma_buffer(struct ttm_buffer_object *bo)
70{
71	return container_of(bo, struct vmw_dma_buffer, base);
72}
73
74static inline struct vmw_user_dma_buffer *
75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76{
77	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79}
80
81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82{
83	kref_get(&res->kref);
84	return res;
85}
86
87static void vmw_resource_release(struct kref *kref)
88{
89	struct vmw_resource *res =
90	    container_of(kref, struct vmw_resource, kref);
91	struct vmw_private *dev_priv = res->dev_priv;
92
93	idr_remove(res->idr, res->id);
94	write_unlock(&dev_priv->resource_lock);
95
96	if (likely(res->hw_destroy != NULL))
97		res->hw_destroy(res);
98
99	if (res->res_free != NULL)
100		res->res_free(res);
101	else
102		kfree(res);
103
104	write_lock(&dev_priv->resource_lock);
105}
106
107void vmw_resource_unreference(struct vmw_resource **p_res)
108{
109	struct vmw_resource *res = *p_res;
110	struct vmw_private *dev_priv = res->dev_priv;
111
112	*p_res = NULL;
113	write_lock(&dev_priv->resource_lock);
114	kref_put(&res->kref, vmw_resource_release);
115	write_unlock(&dev_priv->resource_lock);
116}
117
118static int vmw_resource_init(struct vmw_private *dev_priv,
119			     struct vmw_resource *res,
120			     struct idr *idr,
121			     enum ttm_object_type obj_type,
122			     void (*res_free) (struct vmw_resource *res))
123{
124	int ret;
125
126	kref_init(&res->kref);
127	res->hw_destroy = NULL;
128	res->res_free = res_free;
129	res->res_type = obj_type;
130	res->idr = idr;
131	res->avail = false;
132	res->dev_priv = dev_priv;
133
134	do {
135		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136			return -ENOMEM;
137
138		write_lock(&dev_priv->resource_lock);
139		ret = idr_get_new_above(idr, res, 1, &res->id);
140		write_unlock(&dev_priv->resource_lock);
141
142	} while (ret == -EAGAIN);
143
144	return ret;
145}
146
147/**
148 * vmw_resource_activate
149 *
150 * @res:        Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
152 *
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
158 */
159
160static void vmw_resource_activate(struct vmw_resource *res,
161				  void (*hw_destroy) (struct vmw_resource *))
162{
163	struct vmw_private *dev_priv = res->dev_priv;
164
165	write_lock(&dev_priv->resource_lock);
166	res->avail = true;
167	res->hw_destroy = hw_destroy;
168	write_unlock(&dev_priv->resource_lock);
169}
170
171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172					 struct idr *idr, int id)
173{
174	struct vmw_resource *res;
175
176	read_lock(&dev_priv->resource_lock);
177	res = idr_find(idr, id);
178	if (res && res->avail)
179		kref_get(&res->kref);
180	else
181		res = NULL;
182	read_unlock(&dev_priv->resource_lock);
183
184	if (unlikely(res == NULL))
185		return NULL;
186
187	return res;
188}
189
190/**
191 * Context management:
192 */
193
194static void vmw_hw_context_destroy(struct vmw_resource *res)
195{
196
197	struct vmw_private *dev_priv = res->dev_priv;
198	struct {
199		SVGA3dCmdHeader header;
200		SVGA3dCmdDestroyContext body;
201	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203	if (unlikely(cmd == NULL)) {
204		DRM_ERROR("Failed reserving FIFO space for surface "
205			  "destruction.\n");
206		return;
207	}
208
209	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211	cmd->body.cid = cpu_to_le32(res->id);
212
213	vmw_fifo_commit(dev_priv, sizeof(*cmd));
214	vmw_3d_resource_dec(dev_priv);
215}
216
217static int vmw_context_init(struct vmw_private *dev_priv,
218			    struct vmw_resource *res,
219			    void (*res_free) (struct vmw_resource *res))
220{
221	int ret;
222
223	struct {
224		SVGA3dCmdHeader header;
225		SVGA3dCmdDefineContext body;
226	} *cmd;
227
228	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
229				VMW_RES_CONTEXT, res_free);
230
231	if (unlikely(ret != 0)) {
232		if (res_free == NULL)
233			kfree(res);
234		else
235			res_free(res);
236		return ret;
237	}
238
239	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
240	if (unlikely(cmd == NULL)) {
241		DRM_ERROR("Fifo reserve failed.\n");
242		vmw_resource_unreference(&res);
243		return -ENOMEM;
244	}
245
246	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
247	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
248	cmd->body.cid = cpu_to_le32(res->id);
249
250	vmw_fifo_commit(dev_priv, sizeof(*cmd));
251	(void) vmw_3d_resource_inc(dev_priv);
252	vmw_resource_activate(res, vmw_hw_context_destroy);
253	return 0;
254}
255
256struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
257{
258	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
259	int ret;
260
261	if (unlikely(res == NULL))
262		return NULL;
263
264	ret = vmw_context_init(dev_priv, res, NULL);
265	return (ret == 0) ? res : NULL;
266}
267
268/**
269 * User-space context management:
270 */
271
272static void vmw_user_context_free(struct vmw_resource *res)
273{
274	struct vmw_user_context *ctx =
275	    container_of(res, struct vmw_user_context, res);
276
277	kfree(ctx);
278}
279
280/**
281 * This function is called when user space has no more references on the
282 * base object. It releases the base-object's reference on the resource object.
283 */
284
285static void vmw_user_context_base_release(struct ttm_base_object **p_base)
286{
287	struct ttm_base_object *base = *p_base;
288	struct vmw_user_context *ctx =
289	    container_of(base, struct vmw_user_context, base);
290	struct vmw_resource *res = &ctx->res;
291
292	*p_base = NULL;
293	vmw_resource_unreference(&res);
294}
295
296int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
297			      struct drm_file *file_priv)
298{
299	struct vmw_private *dev_priv = vmw_priv(dev);
300	struct vmw_resource *res;
301	struct vmw_user_context *ctx;
302	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
303	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
304	int ret = 0;
305
306	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
307	if (unlikely(res == NULL))
308		return -EINVAL;
309
310	if (res->res_free != &vmw_user_context_free) {
311		ret = -EINVAL;
312		goto out;
313	}
314
315	ctx = container_of(res, struct vmw_user_context, res);
316	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
317		ret = -EPERM;
318		goto out;
319	}
320
321	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
322out:
323	vmw_resource_unreference(&res);
324	return ret;
325}
326
327int vmw_context_define_ioctl(struct drm_device *dev, void *data,
328			     struct drm_file *file_priv)
329{
330	struct vmw_private *dev_priv = vmw_priv(dev);
331	struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
332	struct vmw_resource *res;
333	struct vmw_resource *tmp;
334	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
335	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
336	int ret;
337
338	if (unlikely(ctx == NULL))
339		return -ENOMEM;
340
341	res = &ctx->res;
342	ctx->base.shareable = false;
343	ctx->base.tfile = NULL;
344
345	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
346	if (unlikely(ret != 0))
347		return ret;
348
349	tmp = vmw_resource_reference(&ctx->res);
350	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
351				   &vmw_user_context_base_release, NULL);
352
353	if (unlikely(ret != 0)) {
354		vmw_resource_unreference(&tmp);
355		goto out_err;
356	}
357
358	arg->cid = res->id;
359out_err:
360	vmw_resource_unreference(&res);
361	return ret;
362
363}
364
365int vmw_context_check(struct vmw_private *dev_priv,
366		      struct ttm_object_file *tfile,
367		      int id)
368{
369	struct vmw_resource *res;
370	int ret = 0;
371
372	read_lock(&dev_priv->resource_lock);
373	res = idr_find(&dev_priv->context_idr, id);
374	if (res && res->avail) {
375		struct vmw_user_context *ctx =
376			container_of(res, struct vmw_user_context, res);
377		if (ctx->base.tfile != tfile && !ctx->base.shareable)
378			ret = -EPERM;
379	} else
380		ret = -EINVAL;
381	read_unlock(&dev_priv->resource_lock);
382
383	return ret;
384}
385
386
387/**
388 * Surface management.
389 */
390
391static void vmw_hw_surface_destroy(struct vmw_resource *res)
392{
393
394	struct vmw_private *dev_priv = res->dev_priv;
395	struct {
396		SVGA3dCmdHeader header;
397		SVGA3dCmdDestroySurface body;
398	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
399
400	if (unlikely(cmd == NULL)) {
401		DRM_ERROR("Failed reserving FIFO space for surface "
402			  "destruction.\n");
403		return;
404	}
405
406	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
407	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
408	cmd->body.sid = cpu_to_le32(res->id);
409
410	vmw_fifo_commit(dev_priv, sizeof(*cmd));
411	vmw_3d_resource_dec(dev_priv);
412}
413
414void vmw_surface_res_free(struct vmw_resource *res)
415{
416	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
417
418	kfree(srf->sizes);
419	kfree(srf->snooper.image);
420	kfree(srf);
421}
422
423int vmw_surface_init(struct vmw_private *dev_priv,
424		     struct vmw_surface *srf,
425		     void (*res_free) (struct vmw_resource *res))
426{
427	int ret;
428	struct {
429		SVGA3dCmdHeader header;
430		SVGA3dCmdDefineSurface body;
431	} *cmd;
432	SVGA3dSize *cmd_size;
433	struct vmw_resource *res = &srf->res;
434	struct drm_vmw_size *src_size;
435	size_t submit_size;
436	uint32_t cmd_len;
437	int i;
438
439	BUG_ON(res_free == NULL);
440	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
441				VMW_RES_SURFACE, res_free);
442
443	if (unlikely(ret != 0)) {
444		res_free(res);
445		return ret;
446	}
447
448	submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
449	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
450
451	cmd = vmw_fifo_reserve(dev_priv, submit_size);
452	if (unlikely(cmd == NULL)) {
453		DRM_ERROR("Fifo reserve failed for create surface.\n");
454		vmw_resource_unreference(&res);
455		return -ENOMEM;
456	}
457
458	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
459	cmd->header.size = cpu_to_le32(cmd_len);
460	cmd->body.sid = cpu_to_le32(res->id);
461	cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
462	cmd->body.format = cpu_to_le32(srf->format);
463	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
464		cmd->body.face[i].numMipLevels =
465		    cpu_to_le32(srf->mip_levels[i]);
466	}
467
468	cmd += 1;
469	cmd_size = (SVGA3dSize *) cmd;
470	src_size = srf->sizes;
471
472	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
473		cmd_size->width = cpu_to_le32(src_size->width);
474		cmd_size->height = cpu_to_le32(src_size->height);
475		cmd_size->depth = cpu_to_le32(src_size->depth);
476	}
477
478	vmw_fifo_commit(dev_priv, submit_size);
479	(void) vmw_3d_resource_inc(dev_priv);
480	vmw_resource_activate(res, vmw_hw_surface_destroy);
481	return 0;
482}
483
484static void vmw_user_surface_free(struct vmw_resource *res)
485{
486	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
487	struct vmw_user_surface *user_srf =
488	    container_of(srf, struct vmw_user_surface, srf);
489
490	kfree(srf->sizes);
491	kfree(srf->snooper.image);
492	kfree(user_srf);
493}
494
495int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
496				   struct ttm_object_file *tfile,
497				   uint32_t handle, struct vmw_surface **out)
498{
499	struct vmw_resource *res;
500	struct vmw_surface *srf;
501	struct vmw_user_surface *user_srf;
502	struct ttm_base_object *base;
503	int ret = -EINVAL;
504
505	base = ttm_base_object_lookup(tfile, handle);
506	if (unlikely(base == NULL))
507		return -EINVAL;
508
509	if (unlikely(base->object_type != VMW_RES_SURFACE))
510		goto out_bad_resource;
511
512	user_srf = container_of(base, struct vmw_user_surface, base);
513	srf = &user_srf->srf;
514	res = &srf->res;
515
516	read_lock(&dev_priv->resource_lock);
517
518	if (!res->avail || res->res_free != &vmw_user_surface_free) {
519		read_unlock(&dev_priv->resource_lock);
520		goto out_bad_resource;
521	}
522
523	kref_get(&res->kref);
524	read_unlock(&dev_priv->resource_lock);
525
526	*out = srf;
527	ret = 0;
528
529out_bad_resource:
530	ttm_base_object_unref(&base);
531
532	return ret;
533}
534
535static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
536{
537	struct ttm_base_object *base = *p_base;
538	struct vmw_user_surface *user_srf =
539	    container_of(base, struct vmw_user_surface, base);
540	struct vmw_resource *res = &user_srf->srf.res;
541
542	*p_base = NULL;
543	vmw_resource_unreference(&res);
544}
545
546int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
547			      struct drm_file *file_priv)
548{
549	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
550	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
551
552	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
553}
554
555int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
556			     struct drm_file *file_priv)
557{
558	struct vmw_private *dev_priv = vmw_priv(dev);
559	struct vmw_user_surface *user_srf =
560	    kmalloc(sizeof(*user_srf), GFP_KERNEL);
561	struct vmw_surface *srf;
562	struct vmw_resource *res;
563	struct vmw_resource *tmp;
564	union drm_vmw_surface_create_arg *arg =
565	    (union drm_vmw_surface_create_arg *)data;
566	struct drm_vmw_surface_create_req *req = &arg->req;
567	struct drm_vmw_surface_arg *rep = &arg->rep;
568	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
569	struct drm_vmw_size __user *user_sizes;
570	int ret;
571	int i;
572
573	if (unlikely(user_srf == NULL))
574		return -ENOMEM;
575
576	srf = &user_srf->srf;
577	res = &srf->res;
578
579	srf->flags = req->flags;
580	srf->format = req->format;
581	srf->scanout = req->scanout;
582	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583	srf->num_sizes = 0;
584	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
585		srf->num_sizes += srf->mip_levels[i];
586
587	if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
588	    DRM_VMW_MAX_MIP_LEVELS) {
589		ret = -EINVAL;
590		goto out_err0;
591	}
592
593	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
594	if (unlikely(srf->sizes == NULL)) {
595		ret = -ENOMEM;
596		goto out_err0;
597	}
598
599	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
600	    req->size_addr;
601
602	ret = copy_from_user(srf->sizes, user_sizes,
603			     srf->num_sizes * sizeof(*srf->sizes));
604	if (unlikely(ret != 0)) {
605		ret = -EFAULT;
606		goto out_err1;
607	}
608
609	if (srf->scanout &&
610	    srf->num_sizes == 1 &&
611	    srf->sizes[0].width == 64 &&
612	    srf->sizes[0].height == 64 &&
613	    srf->format == SVGA3D_A8R8G8B8) {
614
615		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
616		/* clear the image */
617		if (srf->snooper.image) {
618			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
619		} else {
620			DRM_ERROR("Failed to allocate cursor_image\n");
621			ret = -ENOMEM;
622			goto out_err1;
623		}
624	} else {
625		srf->snooper.image = NULL;
626	}
627	srf->snooper.crtc = NULL;
628
629	user_srf->base.shareable = false;
630	user_srf->base.tfile = NULL;
631
632	/**
633	 * From this point, the generic resource management functions
634	 * destroy the object on failure.
635	 */
636
637	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
638	if (unlikely(ret != 0))
639		return ret;
640
641	tmp = vmw_resource_reference(&srf->res);
642	ret = ttm_base_object_init(tfile, &user_srf->base,
643				   req->shareable, VMW_RES_SURFACE,
644				   &vmw_user_surface_base_release, NULL);
645
646	if (unlikely(ret != 0)) {
647		vmw_resource_unreference(&tmp);
648		vmw_resource_unreference(&res);
649		return ret;
650	}
651
652	rep->sid = user_srf->base.hash.key;
653	if (rep->sid == SVGA3D_INVALID_ID)
654		DRM_ERROR("Created bad Surface ID.\n");
655
656	vmw_resource_unreference(&res);
657	return 0;
658out_err1:
659	kfree(srf->sizes);
660out_err0:
661	kfree(user_srf);
662	return ret;
663}
664
665int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
666				struct drm_file *file_priv)
667{
668	union drm_vmw_surface_reference_arg *arg =
669	    (union drm_vmw_surface_reference_arg *)data;
670	struct drm_vmw_surface_arg *req = &arg->req;
671	struct drm_vmw_surface_create_req *rep = &arg->rep;
672	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
673	struct vmw_surface *srf;
674	struct vmw_user_surface *user_srf;
675	struct drm_vmw_size __user *user_sizes;
676	struct ttm_base_object *base;
677	int ret = -EINVAL;
678
679	base = ttm_base_object_lookup(tfile, req->sid);
680	if (unlikely(base == NULL)) {
681		DRM_ERROR("Could not find surface to reference.\n");
682		return -EINVAL;
683	}
684
685	if (unlikely(base->object_type != VMW_RES_SURFACE))
686		goto out_bad_resource;
687
688	user_srf = container_of(base, struct vmw_user_surface, base);
689	srf = &user_srf->srf;
690
691	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
692	if (unlikely(ret != 0)) {
693		DRM_ERROR("Could not add a reference to a surface.\n");
694		goto out_no_reference;
695	}
696
697	rep->flags = srf->flags;
698	rep->format = srf->format;
699	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
700	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
701	    rep->size_addr;
702
703	if (user_sizes)
704		ret = copy_to_user(user_sizes, srf->sizes,
705				   srf->num_sizes * sizeof(*srf->sizes));
706	if (unlikely(ret != 0)) {
707		DRM_ERROR("copy_to_user failed %p %u\n",
708			  user_sizes, srf->num_sizes);
709		ret = -EFAULT;
710	}
711out_bad_resource:
712out_no_reference:
713	ttm_base_object_unref(&base);
714
715	return ret;
716}
717
718int vmw_surface_check(struct vmw_private *dev_priv,
719		      struct ttm_object_file *tfile,
720		      uint32_t handle, int *id)
721{
722	struct ttm_base_object *base;
723	struct vmw_user_surface *user_srf;
724
725	int ret = -EPERM;
726
727	base = ttm_base_object_lookup(tfile, handle);
728	if (unlikely(base == NULL))
729		return -EINVAL;
730
731	if (unlikely(base->object_type != VMW_RES_SURFACE))
732		goto out_bad_surface;
733
734	user_srf = container_of(base, struct vmw_user_surface, base);
735	*id = user_srf->srf.res.id;
736	ret = 0;
737
738out_bad_surface:
739
740	ttm_base_object_unref(&base);
741	return ret;
742}
743
744/**
745 * Buffer management.
746 */
747
748static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
749				  unsigned long num_pages)
750{
751	static size_t bo_user_size = ~0;
752
753	size_t page_array_size =
754	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
755
756	if (unlikely(bo_user_size == ~0)) {
757		bo_user_size = glob->ttm_bo_extra_size +
758		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
759	}
760
761	return bo_user_size + page_array_size;
762}
763
764void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
765{
766	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
767	struct ttm_bo_global *glob = bo->glob;
768	struct vmw_private *dev_priv =
769		container_of(bo->bdev, struct vmw_private, bdev);
770
771	if (vmw_bo->gmr_bound) {
772		vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
773		spin_lock(&glob->lru_lock);
774		ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
775		spin_unlock(&glob->lru_lock);
776		vmw_bo->gmr_bound = false;
777	}
778}
779
780void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
781{
782	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
783	struct ttm_bo_global *glob = bo->glob;
784
785	vmw_dmabuf_gmr_unbind(bo);
786	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
787	kfree(vmw_bo);
788}
789
790int vmw_dmabuf_init(struct vmw_private *dev_priv,
791		    struct vmw_dma_buffer *vmw_bo,
792		    size_t size, struct ttm_placement *placement,
793		    bool interruptible,
794		    void (*bo_free) (struct ttm_buffer_object *bo))
795{
796	struct ttm_bo_device *bdev = &dev_priv->bdev;
797	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
798	size_t acc_size;
799	int ret;
800
801	BUG_ON(!bo_free);
802
803	acc_size =
804	    vmw_dmabuf_acc_size(bdev->glob,
805				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
806
807	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
808	if (unlikely(ret != 0)) {
809		/* we must free the bo here as
810		 * ttm_buffer_object_init does so as well */
811		bo_free(&vmw_bo->base);
812		return ret;
813	}
814
815	memset(vmw_bo, 0, sizeof(*vmw_bo));
816
817	INIT_LIST_HEAD(&vmw_bo->gmr_lru);
818	INIT_LIST_HEAD(&vmw_bo->validate_list);
819	vmw_bo->gmr_id = 0;
820	vmw_bo->gmr_bound = false;
821
822	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
823			  ttm_bo_type_device, placement,
824			  0, 0, interruptible,
825			  NULL, acc_size, bo_free);
826	return ret;
827}
828
829static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
830{
831	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
832	struct ttm_bo_global *glob = bo->glob;
833
834	vmw_dmabuf_gmr_unbind(bo);
835	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
836	kfree(vmw_user_bo);
837}
838
839static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
840{
841	struct vmw_user_dma_buffer *vmw_user_bo;
842	struct ttm_base_object *base = *p_base;
843	struct ttm_buffer_object *bo;
844
845	*p_base = NULL;
846
847	if (unlikely(base == NULL))
848		return;
849
850	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
851	bo = &vmw_user_bo->dma.base;
852	ttm_bo_unref(&bo);
853}
854
855int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
856			   struct drm_file *file_priv)
857{
858	struct vmw_private *dev_priv = vmw_priv(dev);
859	union drm_vmw_alloc_dmabuf_arg *arg =
860	    (union drm_vmw_alloc_dmabuf_arg *)data;
861	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
862	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
863	struct vmw_user_dma_buffer *vmw_user_bo;
864	struct ttm_buffer_object *tmp;
865	struct vmw_master *vmaster = vmw_master(file_priv->master);
866	int ret;
867
868	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
869	if (unlikely(vmw_user_bo == NULL))
870		return -ENOMEM;
871
872	ret = ttm_read_lock(&vmaster->lock, true);
873	if (unlikely(ret != 0)) {
874		kfree(vmw_user_bo);
875		return ret;
876	}
877
878	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
879			      &vmw_vram_sys_placement, true,
880			      &vmw_user_dmabuf_destroy);
881	if (unlikely(ret != 0))
882		return ret;
883
884	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
885	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
886				   &vmw_user_bo->base,
887				   false,
888				   ttm_buffer_type,
889				   &vmw_user_dmabuf_release, NULL);
890	if (unlikely(ret != 0)) {
891		ttm_bo_unref(&tmp);
892	} else {
893		rep->handle = vmw_user_bo->base.hash.key;
894		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
895		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
896		rep->cur_gmr_offset = 0;
897	}
898	ttm_bo_unref(&tmp);
899
900	ttm_read_unlock(&vmaster->lock);
901
902	return 0;
903}
904
905int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
906			   struct drm_file *file_priv)
907{
908	struct drm_vmw_unref_dmabuf_arg *arg =
909	    (struct drm_vmw_unref_dmabuf_arg *)data;
910
911	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
912					 arg->handle,
913					 TTM_REF_USAGE);
914}
915
916uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
917				  uint32_t cur_validate_node)
918{
919	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
920
921	if (likely(vmw_bo->on_validate_list))
922		return vmw_bo->cur_validate_node;
923
924	vmw_bo->cur_validate_node = cur_validate_node;
925	vmw_bo->on_validate_list = true;
926
927	return cur_validate_node;
928}
929
930void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
931{
932	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
933
934	vmw_bo->on_validate_list = false;
935}
936
937uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
938{
939	struct vmw_dma_buffer *vmw_bo;
940
941	if (bo->mem.mem_type == TTM_PL_VRAM)
942		return SVGA_GMR_FRAMEBUFFER;
943
944	vmw_bo = vmw_dma_buffer(bo);
945
946	return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
947}
948
949void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
950{
951	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
952	vmw_bo->gmr_bound = true;
953	vmw_bo->gmr_id = id;
954}
955
956int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
957			   uint32_t handle, struct vmw_dma_buffer **out)
958{
959	struct vmw_user_dma_buffer *vmw_user_bo;
960	struct ttm_base_object *base;
961
962	base = ttm_base_object_lookup(tfile, handle);
963	if (unlikely(base == NULL)) {
964		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
965		       (unsigned long)handle);
966		return -ESRCH;
967	}
968
969	if (unlikely(base->object_type != ttm_buffer_type)) {
970		ttm_base_object_unref(&base);
971		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
972		       (unsigned long)handle);
973		return -EINVAL;
974	}
975
976	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
977	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
978	ttm_base_object_unref(&base);
979	*out = &vmw_user_bo->dma;
980
981	return 0;
982}
983
984/**
985 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
986 * when we're out of ids, causing GMR space to be allocated
987 * out of VRAM.
988 */
989
990int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
991{
992	struct ttm_bo_global *glob = dev_priv->bdev.glob;
993	int id;
994	int ret;
995
996	do {
997		if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
998			return -ENOMEM;
999
1000		spin_lock(&glob->lru_lock);
1001		ret = ida_get_new(&dev_priv->gmr_ida, &id);
1002		spin_unlock(&glob->lru_lock);
1003	} while (ret == -EAGAIN);
1004
1005	if (unlikely(ret != 0))
1006		return ret;
1007
1008	if (unlikely(id >= dev_priv->max_gmr_ids)) {
1009		spin_lock(&glob->lru_lock);
1010		ida_remove(&dev_priv->gmr_ida, id);
1011		spin_unlock(&glob->lru_lock);
1012		return -EBUSY;
1013	}
1014
1015	*p_id = (uint32_t) id;
1016	return 0;
1017}
1018
1019/*
1020 * Stream management
1021 */
1022
1023static void vmw_stream_destroy(struct vmw_resource *res)
1024{
1025	struct vmw_private *dev_priv = res->dev_priv;
1026	struct vmw_stream *stream;
1027	int ret;
1028
1029	DRM_INFO("%s: unref\n", __func__);
1030	stream = container_of(res, struct vmw_stream, res);
1031
1032	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1033	WARN_ON(ret != 0);
1034}
1035
1036static int vmw_stream_init(struct vmw_private *dev_priv,
1037			   struct vmw_stream *stream,
1038			   void (*res_free) (struct vmw_resource *res))
1039{
1040	struct vmw_resource *res = &stream->res;
1041	int ret;
1042
1043	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1044				VMW_RES_STREAM, res_free);
1045
1046	if (unlikely(ret != 0)) {
1047		if (res_free == NULL)
1048			kfree(stream);
1049		else
1050			res_free(&stream->res);
1051		return ret;
1052	}
1053
1054	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1055	if (ret) {
1056		vmw_resource_unreference(&res);
1057		return ret;
1058	}
1059
1060	DRM_INFO("%s: claimed\n", __func__);
1061
1062	vmw_resource_activate(&stream->res, vmw_stream_destroy);
1063	return 0;
1064}
1065
1066/**
1067 * User-space context management:
1068 */
1069
1070static void vmw_user_stream_free(struct vmw_resource *res)
1071{
1072	struct vmw_user_stream *stream =
1073	    container_of(res, struct vmw_user_stream, stream.res);
1074
1075	kfree(stream);
1076}
1077
1078/**
1079 * This function is called when user space has no more references on the
1080 * base object. It releases the base-object's reference on the resource object.
1081 */
1082
1083static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1084{
1085	struct ttm_base_object *base = *p_base;
1086	struct vmw_user_stream *stream =
1087	    container_of(base, struct vmw_user_stream, base);
1088	struct vmw_resource *res = &stream->stream.res;
1089
1090	*p_base = NULL;
1091	vmw_resource_unreference(&res);
1092}
1093
1094int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1095			   struct drm_file *file_priv)
1096{
1097	struct vmw_private *dev_priv = vmw_priv(dev);
1098	struct vmw_resource *res;
1099	struct vmw_user_stream *stream;
1100	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1101	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1102	int ret = 0;
1103
1104	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1105	if (unlikely(res == NULL))
1106		return -EINVAL;
1107
1108	if (res->res_free != &vmw_user_stream_free) {
1109		ret = -EINVAL;
1110		goto out;
1111	}
1112
1113	stream = container_of(res, struct vmw_user_stream, stream.res);
1114	if (stream->base.tfile != tfile) {
1115		ret = -EINVAL;
1116		goto out;
1117	}
1118
1119	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1120out:
1121	vmw_resource_unreference(&res);
1122	return ret;
1123}
1124
1125int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1126			   struct drm_file *file_priv)
1127{
1128	struct vmw_private *dev_priv = vmw_priv(dev);
1129	struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1130	struct vmw_resource *res;
1131	struct vmw_resource *tmp;
1132	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1133	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1134	int ret;
1135
1136	if (unlikely(stream == NULL))
1137		return -ENOMEM;
1138
1139	res = &stream->stream.res;
1140	stream->base.shareable = false;
1141	stream->base.tfile = NULL;
1142
1143	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1144	if (unlikely(ret != 0))
1145		return ret;
1146
1147	tmp = vmw_resource_reference(res);
1148	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1149				   &vmw_user_stream_base_release, NULL);
1150
1151	if (unlikely(ret != 0)) {
1152		vmw_resource_unreference(&tmp);
1153		goto out_err;
1154	}
1155
1156	arg->stream_id = res->id;
1157out_err:
1158	vmw_resource_unreference(&res);
1159	return ret;
1160}
1161
1162int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1163			   struct ttm_object_file *tfile,
1164			   uint32_t *inout_id, struct vmw_resource **out)
1165{
1166	struct vmw_user_stream *stream;
1167	struct vmw_resource *res;
1168	int ret;
1169
1170	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1171	if (unlikely(res == NULL))
1172		return -EINVAL;
1173
1174	if (res->res_free != &vmw_user_stream_free) {
1175		ret = -EINVAL;
1176		goto err_ref;
1177	}
1178
1179	stream = container_of(res, struct vmw_user_stream, stream.res);
1180	if (stream->base.tfile != tfile) {
1181		ret = -EPERM;
1182		goto err_ref;
1183	}
1184
1185	*inout_id = stream->stream.stream_id;
1186	*out = res;
1187	return 0;
1188err_ref:
1189	vmw_resource_unreference(&res);
1190	return ret;
1191}
1192