1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright �� 2011-2023 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "vmwgfx_bo.h"
30#include "vmwgfx_drv.h"
31
32
33#include <drm/ttm/ttm_placement.h>
34
35static void vmw_bo_release(struct vmw_bo *vbo)
36{
37	WARN_ON(vbo->tbo.base.funcs &&
38		kref_read(&vbo->tbo.base.refcount) != 0);
39	vmw_bo_unmap(vbo);
40	drm_gem_object_release(&vbo->tbo.base);
41}
42
43/**
44 * vmw_bo_free - vmw_bo destructor
45 *
46 * @bo: Pointer to the embedded struct ttm_buffer_object
47 */
48static void vmw_bo_free(struct ttm_buffer_object *bo)
49{
50	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
51
52	WARN_ON(vbo->dirty);
53	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
54	vmw_bo_release(vbo);
55	kfree(vbo);
56}
57
58/**
59 * vmw_bo_pin_in_placement - Validate a buffer to placement.
60 *
61 * @dev_priv:  Driver private.
62 * @buf:  DMA buffer to move.
63 * @placement:  The placement to pin it.
64 * @interruptible:  Use interruptible wait.
65 * Return: Zero on success, Negative error code on failure. In particular
66 * -ERESTARTSYS if interrupted by a signal
67 */
68static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
69				   struct vmw_bo *buf,
70				   struct ttm_placement *placement,
71				   bool interruptible)
72{
73	struct ttm_operation_ctx ctx = {interruptible, false };
74	struct ttm_buffer_object *bo = &buf->tbo;
75	int ret;
76
77	vmw_execbuf_release_pinned_bo(dev_priv);
78
79	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
80	if (unlikely(ret != 0))
81		goto err;
82
83	ret = ttm_bo_validate(bo, placement, &ctx);
84	if (!ret)
85		vmw_bo_pin_reserved(buf, true);
86
87	ttm_bo_unreserve(bo);
88err:
89	return ret;
90}
91
92
93/**
94 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
95 *
96 * This function takes the reservation_sem in write mode.
97 * Flushes and unpins the query bo to avoid failures.
98 *
99 * @dev_priv:  Driver private.
100 * @buf:  DMA buffer to move.
101 * @interruptible:  Use interruptible wait.
102 * Return: Zero on success, Negative error code on failure. In particular
103 * -ERESTARTSYS if interrupted by a signal
104 */
105int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
106			      struct vmw_bo *buf,
107			      bool interruptible)
108{
109	struct ttm_operation_ctx ctx = {interruptible, false };
110	struct ttm_buffer_object *bo = &buf->tbo;
111	int ret;
112
113	vmw_execbuf_release_pinned_bo(dev_priv);
114
115	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
116	if (unlikely(ret != 0))
117		goto err;
118
119	vmw_bo_placement_set(buf,
120			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
121			     VMW_BO_DOMAIN_GMR);
122	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
123	if (likely(ret == 0) || ret == -ERESTARTSYS)
124		goto out_unreserve;
125
126	vmw_bo_placement_set(buf,
127			     VMW_BO_DOMAIN_VRAM,
128			     VMW_BO_DOMAIN_VRAM);
129	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
130
131out_unreserve:
132	if (!ret)
133		vmw_bo_pin_reserved(buf, true);
134
135	ttm_bo_unreserve(bo);
136err:
137	return ret;
138}
139
140
141/**
142 * vmw_bo_pin_in_vram - Move a buffer to vram.
143 *
144 * This function takes the reservation_sem in write mode.
145 * Flushes and unpins the query bo to avoid failures.
146 *
147 * @dev_priv:  Driver private.
148 * @buf:  DMA buffer to move.
149 * @interruptible:  Use interruptible wait.
150 * Return: Zero on success, Negative error code on failure. In particular
151 * -ERESTARTSYS if interrupted by a signal
152 */
153int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
154		       struct vmw_bo *buf,
155		       bool interruptible)
156{
157	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
158				       interruptible);
159}
160
161
162/**
163 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
164 *
165 * This function takes the reservation_sem in write mode.
166 * Flushes and unpins the query bo to avoid failures.
167 *
168 * @dev_priv:  Driver private.
169 * @buf:  DMA buffer to pin.
170 * @interruptible:  Use interruptible wait.
171 * Return: Zero on success, Negative error code on failure. In particular
172 * -ERESTARTSYS if interrupted by a signal
173 */
174int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
175				struct vmw_bo *buf,
176				bool interruptible)
177{
178	struct ttm_operation_ctx ctx = {interruptible, false };
179	struct ttm_buffer_object *bo = &buf->tbo;
180	int ret = 0;
181
182	vmw_execbuf_release_pinned_bo(dev_priv);
183	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
184	if (unlikely(ret != 0))
185		goto err_unlock;
186
187	/*
188	 * Is this buffer already in vram but not at the start of it?
189	 * In that case, evict it first because TTM isn't good at handling
190	 * that situation.
191	 */
192	if (bo->resource->mem_type == TTM_PL_VRAM &&
193	    bo->resource->start < PFN_UP(bo->resource->size) &&
194	    bo->resource->start > 0 &&
195	    buf->tbo.pin_count == 0) {
196		ctx.interruptible = false;
197		vmw_bo_placement_set(buf,
198				     VMW_BO_DOMAIN_SYS,
199				     VMW_BO_DOMAIN_SYS);
200		(void)ttm_bo_validate(bo, &buf->placement, &ctx);
201	}
202
203	vmw_bo_placement_set(buf,
204			     VMW_BO_DOMAIN_VRAM,
205			     VMW_BO_DOMAIN_VRAM);
206	buf->places[0].lpfn = PFN_UP(bo->resource->size);
207	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
208
209	/* For some reason we didn't end up at the start of vram */
210	WARN_ON(ret == 0 && bo->resource->start != 0);
211	if (!ret)
212		vmw_bo_pin_reserved(buf, true);
213
214	ttm_bo_unreserve(bo);
215err_unlock:
216
217	return ret;
218}
219
220
221/**
222 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
223 *
224 * This function takes the reservation_sem in write mode.
225 *
226 * @dev_priv:  Driver private.
227 * @buf:  DMA buffer to unpin.
228 * @interruptible:  Use interruptible wait.
229 * Return: Zero on success, Negative error code on failure. In particular
230 * -ERESTARTSYS if interrupted by a signal
231 */
232int vmw_bo_unpin(struct vmw_private *dev_priv,
233		 struct vmw_bo *buf,
234		 bool interruptible)
235{
236	struct ttm_buffer_object *bo = &buf->tbo;
237	int ret;
238
239	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
240	if (unlikely(ret != 0))
241		goto err;
242
243	vmw_bo_pin_reserved(buf, false);
244
245	ttm_bo_unreserve(bo);
246
247err:
248	return ret;
249}
250
251/**
252 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
253 * of a buffer.
254 *
255 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
256 * @ptr: SVGAGuestPtr returning the result.
257 */
258void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
259			  SVGAGuestPtr *ptr)
260{
261	if (bo->resource->mem_type == TTM_PL_VRAM) {
262		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
263		ptr->offset = bo->resource->start << PAGE_SHIFT;
264	} else {
265		ptr->gmrId = bo->resource->start;
266		ptr->offset = 0;
267	}
268}
269
270
271/**
272 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
273 *
274 * @vbo: The buffer object. Must be reserved.
275 * @pin: Whether to pin or unpin.
276 *
277 */
278void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
279{
280	struct ttm_operation_ctx ctx = { false, true };
281	struct ttm_place pl;
282	struct ttm_placement placement;
283	struct ttm_buffer_object *bo = &vbo->tbo;
284	uint32_t old_mem_type = bo->resource->mem_type;
285	int ret;
286
287	dma_resv_assert_held(bo->base.resv);
288
289	if (pin == !!bo->pin_count)
290		return;
291
292	pl.fpfn = 0;
293	pl.lpfn = 0;
294	pl.mem_type = bo->resource->mem_type;
295	pl.flags = bo->resource->placement;
296
297	memset(&placement, 0, sizeof(placement));
298	placement.num_placement = 1;
299	placement.placement = &pl;
300
301	ret = ttm_bo_validate(bo, &placement, &ctx);
302
303	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
304
305	if (pin)
306		ttm_bo_pin(bo);
307	else
308		ttm_bo_unpin(bo);
309}
310
311/**
312 * vmw_bo_map_and_cache - Map a buffer object and cache the map
313 *
314 * @vbo: The buffer object to map
315 * Return: A kernel virtual address or NULL if mapping failed.
316 *
317 * This function maps a buffer object into the kernel address space, or
318 * returns the virtual kernel address of an already existing map. The virtual
319 * address remains valid as long as the buffer object is pinned or reserved.
320 * The cached map is torn down on either
321 * 1) Buffer object move
322 * 2) Buffer object swapout
323 * 3) Buffer object destruction
324 *
325 */
326void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
327{
328	struct ttm_buffer_object *bo = &vbo->tbo;
329	bool not_used;
330	void *virtual;
331	int ret;
332
333	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
334	if (virtual)
335		return virtual;
336
337	ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
338	if (ret)
339		DRM_ERROR("Buffer object map failed: %d.\n", ret);
340
341	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
342}
343
344
345/**
346 * vmw_bo_unmap - Tear down a cached buffer object map.
347 *
348 * @vbo: The buffer object whose map we are tearing down.
349 *
350 * This function tears down a cached map set up using
351 * vmw_bo_map_and_cache().
352 */
353void vmw_bo_unmap(struct vmw_bo *vbo)
354{
355	if (vbo->map.bo == NULL)
356		return;
357
358	ttm_bo_kunmap(&vbo->map);
359	vbo->map.bo = NULL;
360}
361
362
363/**
364 * vmw_bo_init - Initialize a vmw buffer object
365 *
366 * @dev_priv: Pointer to the device private struct
367 * @vmw_bo: Buffer object to initialize
368 * @params: Parameters used to initialize the buffer object
369 * @destroy: The function used to delete the buffer object
370 * Returns: Zero on success, negative error code on error.
371 *
372 */
373static int vmw_bo_init(struct vmw_private *dev_priv,
374		       struct vmw_bo *vmw_bo,
375		       struct vmw_bo_params *params,
376		       void (*destroy)(struct ttm_buffer_object *))
377{
378	struct ttm_operation_ctx ctx = {
379		.interruptible = params->bo_type != ttm_bo_type_kernel,
380		.no_wait_gpu = false,
381		.resv = params->resv,
382	};
383	struct ttm_device *bdev = &dev_priv->bdev;
384	struct drm_device *vdev = &dev_priv->drm;
385	int ret;
386
387	memset(vmw_bo, 0, sizeof(*vmw_bo));
388
389	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
390	vmw_bo->tbo.priority = 3;
391	vmw_bo->res_tree = RB_ROOT;
392
393	params->size = ALIGN(params->size, PAGE_SIZE);
394	drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
395
396	vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
397	ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
398				   &vmw_bo->placement, 0, &ctx,
399				   params->sg, params->resv, destroy);
400	if (unlikely(ret))
401		return ret;
402
403	if (params->pin)
404		ttm_bo_pin(&vmw_bo->tbo);
405	ttm_bo_unreserve(&vmw_bo->tbo);
406
407	return 0;
408}
409
410int vmw_bo_create(struct vmw_private *vmw,
411		  struct vmw_bo_params *params,
412		  struct vmw_bo **p_bo)
413{
414	int ret;
415
416	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
417	if (unlikely(!*p_bo)) {
418		DRM_ERROR("Failed to allocate a buffer.\n");
419		return -ENOMEM;
420	}
421
422	/*
423	 * vmw_bo_init will delete the *p_bo object if it fails
424	 */
425	ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
426	if (unlikely(ret != 0))
427		goto out_error;
428
429	return ret;
430out_error:
431	*p_bo = NULL;
432	return ret;
433}
434
435/**
436 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
437 * access, idling previous GPU operations on the buffer and optionally
438 * blocking it for further command submissions.
439 *
440 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
441 * @flags: Flags indicating how the grab should be performed.
442 * Return: Zero on success, Negative error code on error. In particular,
443 * -EBUSY will be returned if a dontblock operation is requested and the
444 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
445 * interrupted by a signal.
446 *
447 * A blocking grab will be automatically released when @tfile is closed.
448 */
449static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
450				    uint32_t flags)
451{
452	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
453	struct ttm_buffer_object *bo = &vmw_bo->tbo;
454	int ret;
455
456	if (flags & drm_vmw_synccpu_allow_cs) {
457		long lret;
458
459		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
460					     true, nonblock ? 0 :
461					     MAX_SCHEDULE_TIMEOUT);
462		if (!lret)
463			return -EBUSY;
464		else if (lret < 0)
465			return lret;
466		return 0;
467	}
468
469	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
470	if (unlikely(ret != 0))
471		return ret;
472
473	ret = ttm_bo_wait(bo, true, nonblock);
474	if (likely(ret == 0))
475		atomic_inc(&vmw_bo->cpu_writers);
476
477	ttm_bo_unreserve(bo);
478	if (unlikely(ret != 0))
479		return ret;
480
481	return ret;
482}
483
484/**
485 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
486 * and unblock command submission on the buffer if blocked.
487 *
488 * @filp: Identifying the caller.
489 * @handle: Handle identifying the buffer object.
490 * @flags: Flags indicating the type of release.
491 */
492static int vmw_user_bo_synccpu_release(struct drm_file *filp,
493				       uint32_t handle,
494				       uint32_t flags)
495{
496	struct vmw_bo *vmw_bo;
497	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
498
499	if (!ret) {
500		if (!(flags & drm_vmw_synccpu_allow_cs)) {
501			atomic_dec(&vmw_bo->cpu_writers);
502		}
503		vmw_user_bo_unref(&vmw_bo);
504	}
505
506	return ret;
507}
508
509
510/**
511 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
512 * functionality.
513 *
514 * @dev: Identifies the drm device.
515 * @data: Pointer to the ioctl argument.
516 * @file_priv: Identifies the caller.
517 * Return: Zero on success, negative error code on error.
518 *
519 * This function checks the ioctl arguments for validity and calls the
520 * relevant synccpu functions.
521 */
522int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
523			      struct drm_file *file_priv)
524{
525	struct drm_vmw_synccpu_arg *arg =
526		(struct drm_vmw_synccpu_arg *) data;
527	struct vmw_bo *vbo;
528	int ret;
529
530	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
531	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
532			       drm_vmw_synccpu_dontblock |
533			       drm_vmw_synccpu_allow_cs)) != 0) {
534		DRM_ERROR("Illegal synccpu flags.\n");
535		return -EINVAL;
536	}
537
538	switch (arg->op) {
539	case drm_vmw_synccpu_grab:
540		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
541		if (unlikely(ret != 0))
542			return ret;
543
544		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
545		vmw_user_bo_unref(&vbo);
546		if (unlikely(ret != 0)) {
547			if (ret == -ERESTARTSYS || ret == -EBUSY)
548				return -EBUSY;
549			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
550				  (unsigned int) arg->handle);
551			return ret;
552		}
553		break;
554	case drm_vmw_synccpu_release:
555		ret = vmw_user_bo_synccpu_release(file_priv,
556						  arg->handle,
557						  arg->flags);
558		if (unlikely(ret != 0)) {
559			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
560				  (unsigned int) arg->handle);
561			return ret;
562		}
563		break;
564	default:
565		DRM_ERROR("Invalid synccpu operation.\n");
566		return -EINVAL;
567	}
568
569	return 0;
570}
571
572/**
573 * vmw_bo_unref_ioctl - Generic handle close ioctl.
574 *
575 * @dev: Identifies the drm device.
576 * @data: Pointer to the ioctl argument.
577 * @file_priv: Identifies the caller.
578 * Return: Zero on success, negative error code on error.
579 *
580 * This function checks the ioctl arguments for validity and closes a
581 * handle to a TTM base object, optionally freeing the object.
582 */
583int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
584		       struct drm_file *file_priv)
585{
586	struct drm_vmw_unref_dmabuf_arg *arg =
587	    (struct drm_vmw_unref_dmabuf_arg *)data;
588
589	return drm_gem_handle_delete(file_priv, arg->handle);
590}
591
592
593/**
594 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
595 *
596 * @filp: The file the handle is registered with.
597 * @handle: The user buffer object handle
598 * @out: Pointer to a where a pointer to the embedded
599 * struct vmw_bo should be placed.
600 * Return: Zero on success, Negative error code on error.
601 *
602 * The vmw buffer object pointer will be refcounted (both ttm and gem)
603 */
604int vmw_user_bo_lookup(struct drm_file *filp,
605		       u32 handle,
606		       struct vmw_bo **out)
607{
608	struct drm_gem_object *gobj;
609
610	gobj = drm_gem_object_lookup(filp, handle);
611	if (!gobj) {
612		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
613			  (unsigned long)handle);
614		return -ESRCH;
615	}
616
617	*out = to_vmw_bo(gobj);
618
619	return 0;
620}
621
622/**
623 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
624 *                       object without unreserving it.
625 *
626 * @bo:             Pointer to the struct ttm_buffer_object to fence.
627 * @fence:          Pointer to the fence. If NULL, this function will
628 *                  insert a fence into the command stream..
629 *
630 * Contrary to the ttm_eu version of this function, it takes only
631 * a single buffer object instead of a list, and it also doesn't
632 * unreserve the buffer object, which needs to be done separately.
633 */
634void vmw_bo_fence_single(struct ttm_buffer_object *bo,
635			 struct vmw_fence_obj *fence)
636{
637	struct ttm_device *bdev = bo->bdev;
638	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
639	int ret;
640
641	if (fence == NULL)
642		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
643	else
644		dma_fence_get(&fence->base);
645
646	ret = dma_resv_reserve_fences(bo->base.resv, 1);
647	if (!ret)
648		dma_resv_add_fence(bo->base.resv, &fence->base,
649				   DMA_RESV_USAGE_KERNEL);
650	else
651		/* Last resort fallback when we are OOM */
652		dma_fence_wait(&fence->base, false);
653	dma_fence_put(&fence->base);
654}
655
656
657/**
658 * vmw_dumb_create - Create a dumb kms buffer
659 *
660 * @file_priv: Pointer to a struct drm_file identifying the caller.
661 * @dev: Pointer to the drm device.
662 * @args: Pointer to a struct drm_mode_create_dumb structure
663 * Return: Zero on success, negative error code on failure.
664 *
665 * This is a driver callback for the core drm create_dumb functionality.
666 * Note that this is very similar to the vmw_bo_alloc ioctl, except
667 * that the arguments have a different format.
668 */
669int vmw_dumb_create(struct drm_file *file_priv,
670		    struct drm_device *dev,
671		    struct drm_mode_create_dumb *args)
672{
673	struct vmw_private *dev_priv = vmw_priv(dev);
674	struct vmw_bo *vbo;
675	int cpp = DIV_ROUND_UP(args->bpp, 8);
676	int ret;
677
678	switch (cpp) {
679	case 1: /* DRM_FORMAT_C8 */
680	case 2: /* DRM_FORMAT_RGB565 */
681	case 4: /* DRM_FORMAT_XRGB8888 */
682		break;
683	default:
684		/*
685		 * Dumb buffers don't allow anything else.
686		 * This is tested via IGT's dumb_buffers
687		 */
688		return -EINVAL;
689	}
690
691	args->pitch = args->width * cpp;
692	args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
693
694	ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
695						args->size, &args->handle,
696						&vbo);
697	/* drop reference from allocate - handle holds it now */
698	drm_gem_object_put(&vbo->tbo.base);
699	return ret;
700}
701
702/**
703 * vmw_bo_swap_notify - swapout notify callback.
704 *
705 * @bo: The buffer object to be swapped out.
706 */
707void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
708{
709	/* Kill any cached kernel maps before swapout */
710	vmw_bo_unmap(to_vmw_bo(&bo->base));
711}
712
713
714/**
715 * vmw_bo_move_notify - TTM move_notify_callback
716 *
717 * @bo: The TTM buffer object about to move.
718 * @mem: The struct ttm_resource indicating to what memory
719 *       region the move is taking place.
720 *
721 * Detaches cached maps and device bindings that require that the
722 * buffer doesn't move.
723 */
724void vmw_bo_move_notify(struct ttm_buffer_object *bo,
725			struct ttm_resource *mem)
726{
727	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
728
729	/*
730	 * Kill any cached kernel maps before move to or from VRAM.
731	 * With other types of moves, the underlying pages stay the same,
732	 * and the map can be kept.
733	 */
734	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
735		vmw_bo_unmap(vbo);
736
737	/*
738	 * If we're moving a backup MOB out of MOB placement, then make sure we
739	 * read back all resource content first, and unbind the MOB from
740	 * the resource.
741	 */
742	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
743		vmw_resource_unbind_list(vbo);
744}
745
746static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
747{
748	if (desired & fallback & domain)
749		return 0;
750
751	if (desired & domain)
752		return TTM_PL_FLAG_DESIRED;
753
754	return TTM_PL_FLAG_FALLBACK;
755}
756
757static u32
758set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
759{
760	u32 domain = desired | fallback;
761	u32 n = 0;
762
763	/*
764	 * The placements are ordered according to our preferences
765	 */
766	if (domain & VMW_BO_DOMAIN_MOB) {
767		pl[n].mem_type = VMW_PL_MOB;
768		pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
769					      fallback);
770		pl[n].fpfn = 0;
771		pl[n].lpfn = 0;
772		n++;
773	}
774	if (domain & VMW_BO_DOMAIN_GMR) {
775		pl[n].mem_type = VMW_PL_GMR;
776		pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
777					      fallback);
778		pl[n].fpfn = 0;
779		pl[n].lpfn = 0;
780		n++;
781	}
782	if (domain & VMW_BO_DOMAIN_VRAM) {
783		pl[n].mem_type = TTM_PL_VRAM;
784		pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
785					      fallback);
786		pl[n].fpfn = 0;
787		pl[n].lpfn = 0;
788		n++;
789	}
790	if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
791		pl[n].mem_type = VMW_PL_SYSTEM;
792		pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
793					      desired, fallback);
794		pl[n].fpfn = 0;
795		pl[n].lpfn = 0;
796		n++;
797	}
798	if (domain & VMW_BO_DOMAIN_SYS) {
799		pl[n].mem_type = TTM_PL_SYSTEM;
800		pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
801					      fallback);
802		pl[n].fpfn = 0;
803		pl[n].lpfn = 0;
804		n++;
805	}
806
807	WARN_ON(!n);
808	if (!n) {
809		pl[n].mem_type = TTM_PL_SYSTEM;
810		pl[n].flags = 0;
811		pl[n].fpfn = 0;
812		pl[n].lpfn = 0;
813		n++;
814	}
815	return n;
816}
817
818void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
819{
820	struct ttm_device *bdev = bo->tbo.bdev;
821	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
822	struct ttm_placement *pl = &bo->placement;
823	bool mem_compatible = false;
824	u32 i;
825
826	pl->placement = bo->places;
827	pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
828
829	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
830		for (i = 0; i < pl->num_placement; ++i) {
831			if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
832			    bo->tbo.resource->mem_type == pl->placement[i].mem_type)
833				mem_compatible = true;
834		}
835		if (!mem_compatible)
836			drm_warn(&vmw->drm,
837				 "%s: Incompatible transition from "
838				 "bo->base.resource->mem_type = %u to domain = %u\n",
839				 __func__, bo->tbo.resource->mem_type, domain);
840	}
841
842}
843
844void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
845{
846	struct ttm_device *bdev = bo->tbo.bdev;
847	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
848	u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
849
850	if (vmw->has_mob)
851		domain = VMW_BO_DOMAIN_MOB;
852
853	vmw_bo_placement_set(bo, domain, domain);
854}
855