1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 *          Alon Levy
24 */
25
26#include <linux/pci.h>
27#include <linux/uaccess.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32/*
33 * TODO: allocating a new gem(in qxl_bo) for each request.
34 * This is wasteful since bo's are page aligned.
35 */
36int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{
38	struct qxl_device *qdev = to_qxl(dev);
39	struct drm_qxl_alloc *qxl_alloc = data;
40	int ret;
41	uint32_t handle;
42	u32 domain = QXL_GEM_DOMAIN_VRAM;
43
44	if (qxl_alloc->size == 0) {
45		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
46		return -EINVAL;
47	}
48	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
49						domain,
50						qxl_alloc->size,
51						NULL,
52						NULL, &handle);
53	if (ret) {
54		DRM_ERROR("%s: failed to create gem ret=%d\n",
55			  __func__, ret);
56		return -ENOMEM;
57	}
58	qxl_alloc->handle = handle;
59	return 0;
60}
61
62int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
63{
64	struct qxl_device *qdev = to_qxl(dev);
65	struct drm_qxl_map *qxl_map = data;
66
67	return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
68					   &qxl_map->offset);
69}
70
71struct qxl_reloc_info {
72	int type;
73	struct qxl_bo *dst_bo;
74	uint32_t dst_offset;
75	struct qxl_bo *src_bo;
76	int src_offset;
77};
78
79/*
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
81 * are on vram).
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
83 */
84static void
85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
86{
87	void *reloc_page;
88
89	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
90	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
91											      info->src_bo,
92											      info->src_offset);
93	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
94}
95
96static void
97apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
98{
99	uint32_t id = 0;
100	void *reloc_page;
101
102	if (info->src_bo && !info->src_bo->is_primary)
103		id = info->src_bo->surface_id;
104
105	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
106	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
107	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
108}
109
110/* return holding the reference to this object */
111static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
112			      struct qxl_release *release, struct qxl_bo **qbo_p)
113{
114	struct drm_gem_object *gobj;
115	struct qxl_bo *qobj;
116	int ret;
117
118	gobj = drm_gem_object_lookup(file_priv, handle);
119	if (!gobj)
120		return -EINVAL;
121
122	qobj = gem_to_qxl_bo(gobj);
123
124	ret = qxl_release_list_add(release, qobj);
125	drm_gem_object_put(gobj);
126	if (ret)
127		return ret;
128
129	*qbo_p = qobj;
130	return 0;
131}
132
133/*
134 * Usage of execbuffer:
135 * Relocations need to take into account the full QXLDrawable size.
136 * However, the command as passed from user space must *not* contain the initial
137 * QXLReleaseInfo struct (first XXX bytes)
138 */
139static int qxl_process_single_command(struct qxl_device *qdev,
140				      struct drm_qxl_command *cmd,
141				      struct drm_file *file_priv)
142{
143	struct qxl_reloc_info *reloc_info;
144	int release_type;
145	struct qxl_release *release;
146	struct qxl_bo *cmd_bo;
147	void *fb_cmd;
148	int i, ret;
149	int unwritten;
150
151	switch (cmd->type) {
152	case QXL_CMD_DRAW:
153		release_type = QXL_RELEASE_DRAWABLE;
154		break;
155	case QXL_CMD_SURFACE:
156	case QXL_CMD_CURSOR:
157	default:
158		DRM_DEBUG("Only draw commands in execbuffers\n");
159		return -EINVAL;
160	}
161
162	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
163		return -EINVAL;
164
165	if (!access_ok(u64_to_user_ptr(cmd->command),
166		       cmd->command_size))
167		return -EFAULT;
168
169	reloc_info = kmalloc_array(cmd->relocs_num,
170				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
171	if (!reloc_info)
172		return -ENOMEM;
173
174	ret = qxl_alloc_release_reserved(qdev,
175					 sizeof(union qxl_release_info) +
176					 cmd->command_size,
177					 release_type,
178					 &release,
179					 &cmd_bo);
180	if (ret)
181		goto out_free_reloc;
182
183	/* TODO copy slow path code from i915 */
184	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
185	unwritten = __copy_from_user_inatomic_nocache
186		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
187		 u64_to_user_ptr(cmd->command), cmd->command_size);
188
189	{
190		struct qxl_drawable *draw = fb_cmd;
191
192		draw->mm_time = qdev->rom->mm_clock;
193	}
194
195	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
196	if (unwritten) {
197		DRM_ERROR("got unwritten %d\n", unwritten);
198		ret = -EFAULT;
199		goto out_free_release;
200	}
201
202	/* fill out reloc info structs */
203	for (i = 0; i < cmd->relocs_num; ++i) {
204		struct drm_qxl_reloc reloc;
205		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
206
207		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
208			ret = -EFAULT;
209			goto out_free_bos;
210		}
211
212		/* add the bos to the list of bos to validate -
213		   need to validate first then process relocs? */
214		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
215			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
216
217			ret = -EINVAL;
218			goto out_free_bos;
219		}
220		reloc_info[i].type = reloc.reloc_type;
221
222		if (reloc.dst_handle) {
223			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
224						 &reloc_info[i].dst_bo);
225			if (ret)
226				goto out_free_bos;
227			reloc_info[i].dst_offset = reloc.dst_offset;
228		} else {
229			reloc_info[i].dst_bo = cmd_bo;
230			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
231		}
232
233		/* reserve and validate the reloc dst bo */
234		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
235			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
236						 &reloc_info[i].src_bo);
237			if (ret)
238				goto out_free_bos;
239			reloc_info[i].src_offset = reloc.src_offset;
240		} else {
241			reloc_info[i].src_bo = NULL;
242			reloc_info[i].src_offset = 0;
243		}
244	}
245
246	/* validate all buffers */
247	ret = qxl_release_reserve_list(release, false);
248	if (ret)
249		goto out_free_bos;
250
251	for (i = 0; i < cmd->relocs_num; ++i) {
252		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
253			apply_reloc(qdev, &reloc_info[i]);
254		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
255			apply_surf_reloc(qdev, &reloc_info[i]);
256	}
257
258	qxl_release_fence_buffer_objects(release);
259	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
260
261out_free_bos:
262out_free_release:
263	if (ret)
264		qxl_release_free(qdev, release);
265out_free_reloc:
266	kfree(reloc_info);
267	return ret;
268}
269
270int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
271{
272	struct qxl_device *qdev = to_qxl(dev);
273	struct drm_qxl_execbuffer *execbuffer = data;
274	struct drm_qxl_command user_cmd;
275	int cmd_num;
276	int ret;
277
278	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
279
280		struct drm_qxl_command __user *commands =
281			u64_to_user_ptr(execbuffer->commands);
282
283		if (copy_from_user(&user_cmd, commands + cmd_num,
284				       sizeof(user_cmd)))
285			return -EFAULT;
286
287		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
288		if (ret)
289			return ret;
290	}
291	return 0;
292}
293
294int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
295{
296	struct qxl_device *qdev = to_qxl(dev);
297	struct drm_qxl_update_area *update_area = data;
298	struct qxl_rect area = {.left = update_area->left,
299				.top = update_area->top,
300				.right = update_area->right,
301				.bottom = update_area->bottom};
302	int ret;
303	struct drm_gem_object *gobj = NULL;
304	struct qxl_bo *qobj = NULL;
305	struct ttm_operation_ctx ctx = { true, false };
306
307	if (update_area->left >= update_area->right ||
308	    update_area->top >= update_area->bottom)
309		return -EINVAL;
310
311	gobj = drm_gem_object_lookup(file, update_area->handle);
312	if (gobj == NULL)
313		return -ENOENT;
314
315	qobj = gem_to_qxl_bo(gobj);
316
317	ret = qxl_bo_reserve(qobj);
318	if (ret)
319		goto out;
320
321	if (!qobj->tbo.pin_count) {
322		qxl_ttm_placement_from_domain(qobj, qobj->type);
323		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
324		if (unlikely(ret))
325			goto out;
326	}
327
328	ret = qxl_bo_check_id(qdev, qobj);
329	if (ret)
330		goto out2;
331	if (!qobj->surface_id)
332		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
333	ret = qxl_io_update_area(qdev, qobj, &area);
334
335out2:
336	qxl_bo_unreserve(qobj);
337
338out:
339	drm_gem_object_put(gobj);
340	return ret;
341}
342
343int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
344{
345	struct qxl_device *qdev = to_qxl(dev);
346	struct drm_qxl_getparam *param = data;
347
348	switch (param->param) {
349	case QXL_PARAM_NUM_SURFACES:
350		param->value = qdev->rom->n_surfaces;
351		break;
352	case QXL_PARAM_MAX_RELOCS:
353		param->value = QXL_MAX_RES;
354		break;
355	default:
356		return -EINVAL;
357	}
358	return 0;
359}
360
361int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
362{
363	struct qxl_device *qdev = to_qxl(dev);
364	struct pci_dev *pdev = to_pci_dev(dev->dev);
365	struct drm_qxl_clientcap *param = data;
366	int byte, idx;
367
368	byte = param->index / 8;
369	idx = param->index % 8;
370
371	if (pdev->revision < 4)
372		return -ENOSYS;
373
374	if (byte >= 58)
375		return -ENOSYS;
376
377	if (qdev->rom->client_capabilities[byte] & (1 << idx))
378		return 0;
379	return -ENOSYS;
380}
381
382int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
383{
384	struct qxl_device *qdev = to_qxl(dev);
385	struct drm_qxl_alloc_surf *param = data;
386	int handle;
387	int ret;
388	int size, actual_stride;
389	struct qxl_surface surf;
390
391	/* work out size allocate bo with handle */
392	actual_stride = param->stride < 0 ? -param->stride : param->stride;
393	size = actual_stride * param->height + actual_stride;
394
395	surf.format = param->format;
396	surf.width = param->width;
397	surf.height = param->height;
398	surf.stride = param->stride;
399	surf.data = 0;
400
401	ret = qxl_gem_object_create_with_handle(qdev, file,
402						QXL_GEM_DOMAIN_SURFACE,
403						size,
404						&surf,
405						NULL, &handle);
406	if (ret) {
407		DRM_ERROR("%s: failed to create gem ret=%d\n",
408			  __func__, ret);
409		return -ENOMEM;
410	} else
411		param->handle = handle;
412	return ret;
413}
414