• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/gpu/drm/radeon/
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 *          Alex Deucher
26 *          Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
35	/* we do nothings here */
36	return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{
41	struct radeon_bo *robj = gobj->driver_private;
42
43	gobj->driver_private = NULL;
44	if (robj) {
45		radeon_bo_unref(&robj);
46	}
47
48	drm_gem_object_release(gobj);
49	kfree(gobj);
50}
51
52int radeon_gem_object_create(struct radeon_device *rdev, int size,
53				int alignment, int initial_domain,
54				bool discardable, bool kernel,
55				struct drm_gem_object **obj)
56{
57	struct drm_gem_object *gobj;
58	struct radeon_bo *robj;
59	int r;
60
61	*obj = NULL;
62	gobj = drm_gem_object_alloc(rdev->ddev, size);
63	if (!gobj) {
64		return -ENOMEM;
65	}
66	/* At least align on page size */
67	if (alignment < PAGE_SIZE) {
68		alignment = PAGE_SIZE;
69	}
70	r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
71	if (r) {
72		if (r != -ERESTARTSYS)
73			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
74				  size, initial_domain, alignment, r);
75		drm_gem_object_unreference_unlocked(gobj);
76		return r;
77	}
78	gobj->driver_private = robj;
79	*obj = gobj;
80	return 0;
81}
82
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84			  uint64_t *gpu_addr)
85{
86	struct radeon_bo *robj = obj->driver_private;
87	int r;
88
89	r = radeon_bo_reserve(robj, false);
90	if (unlikely(r != 0))
91		return r;
92	r = radeon_bo_pin(robj, pin_domain, gpu_addr);
93	radeon_bo_unreserve(robj);
94	return r;
95}
96
97void radeon_gem_object_unpin(struct drm_gem_object *obj)
98{
99	struct radeon_bo *robj = obj->driver_private;
100	int r;
101
102	r = radeon_bo_reserve(robj, false);
103	if (likely(r == 0)) {
104		radeon_bo_unpin(robj);
105		radeon_bo_unreserve(robj);
106	}
107}
108
109int radeon_gem_set_domain(struct drm_gem_object *gobj,
110			  uint32_t rdomain, uint32_t wdomain)
111{
112	struct radeon_bo *robj;
113	uint32_t domain;
114	int r;
115
116	robj = gobj->driver_private;
117	/* work out where to validate the buffer to */
118	domain = wdomain;
119	if (!domain) {
120		domain = rdomain;
121	}
122	if (!domain) {
123		/* Do nothings */
124		printk(KERN_WARNING "Set domain withou domain !\n");
125		return 0;
126	}
127	if (domain == RADEON_GEM_DOMAIN_CPU) {
128		/* Asking for cpu access wait for object idle */
129		r = radeon_bo_wait(robj, NULL, false);
130		if (r) {
131			printk(KERN_ERR "Failed to wait for object !\n");
132			return r;
133		}
134	}
135	return 0;
136}
137
138int radeon_gem_init(struct radeon_device *rdev)
139{
140	INIT_LIST_HEAD(&rdev->gem.objects);
141	return 0;
142}
143
144void radeon_gem_fini(struct radeon_device *rdev)
145{
146	radeon_bo_force_delete(rdev);
147}
148
149
150/*
151 * GEM ioctls.
152 */
153int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
154			  struct drm_file *filp)
155{
156	struct radeon_device *rdev = dev->dev_private;
157	struct drm_radeon_gem_info *args = data;
158
159	args->vram_size = rdev->mc.real_vram_size;
160	args->vram_visible = rdev->mc.real_vram_size;
161	if (rdev->stollen_vga_memory)
162		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
163	args->vram_visible -= radeon_fbdev_total_size(rdev);
164	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
165		RADEON_IB_POOL_SIZE*64*1024;
166	return 0;
167}
168
169int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
170			   struct drm_file *filp)
171{
172	/* TODO: implement */
173	DRM_ERROR("unimplemented %s\n", __func__);
174	return -ENOSYS;
175}
176
177int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
178			    struct drm_file *filp)
179{
180	/* TODO: implement */
181	DRM_ERROR("unimplemented %s\n", __func__);
182	return -ENOSYS;
183}
184
185int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
186			    struct drm_file *filp)
187{
188	struct radeon_device *rdev = dev->dev_private;
189	struct drm_radeon_gem_create *args = data;
190	struct drm_gem_object *gobj;
191	uint32_t handle;
192	int r;
193
194	/* create a gem object to contain this object in */
195	args->size = roundup(args->size, PAGE_SIZE);
196	r = radeon_gem_object_create(rdev, args->size, args->alignment,
197					args->initial_domain, false,
198					false, &gobj);
199	if (r) {
200		return r;
201	}
202	r = drm_gem_handle_create(filp, gobj, &handle);
203	/* drop reference from allocate - handle holds it now */
204	drm_gem_object_unreference_unlocked(gobj);
205	if (r) {
206		return r;
207	}
208	args->handle = handle;
209	return 0;
210}
211
212int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
213				struct drm_file *filp)
214{
215	/* transition the BO to a domain -
216	 * just validate the BO into a certain domain */
217	struct drm_radeon_gem_set_domain *args = data;
218	struct drm_gem_object *gobj;
219	struct radeon_bo *robj;
220	int r;
221
222	/* for now if someone requests domain CPU -
223	 * just make sure the buffer is finished with */
224
225	/* just do a BO wait for now */
226	gobj = drm_gem_object_lookup(dev, filp, args->handle);
227	if (gobj == NULL) {
228		return -ENOENT;
229	}
230	robj = gobj->driver_private;
231
232	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
233
234	drm_gem_object_unreference_unlocked(gobj);
235	return r;
236}
237
238int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
239			  struct drm_file *filp)
240{
241	struct drm_radeon_gem_mmap *args = data;
242	struct drm_gem_object *gobj;
243	struct radeon_bo *robj;
244
245	gobj = drm_gem_object_lookup(dev, filp, args->handle);
246	if (gobj == NULL) {
247		return -ENOENT;
248	}
249	robj = gobj->driver_private;
250	args->addr_ptr = radeon_bo_mmap_offset(robj);
251	drm_gem_object_unreference_unlocked(gobj);
252	return 0;
253}
254
255int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
256			  struct drm_file *filp)
257{
258	struct drm_radeon_gem_busy *args = data;
259	struct drm_gem_object *gobj;
260	struct radeon_bo *robj;
261	int r;
262	uint32_t cur_placement = 0;
263
264	gobj = drm_gem_object_lookup(dev, filp, args->handle);
265	if (gobj == NULL) {
266		return -ENOENT;
267	}
268	robj = gobj->driver_private;
269	r = radeon_bo_wait(robj, &cur_placement, true);
270	switch (cur_placement) {
271	case TTM_PL_VRAM:
272		args->domain = RADEON_GEM_DOMAIN_VRAM;
273		break;
274	case TTM_PL_TT:
275		args->domain = RADEON_GEM_DOMAIN_GTT;
276		break;
277	case TTM_PL_SYSTEM:
278		args->domain = RADEON_GEM_DOMAIN_CPU;
279	default:
280		break;
281	}
282	drm_gem_object_unreference_unlocked(gobj);
283	return r;
284}
285
286int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
287			      struct drm_file *filp)
288{
289	struct drm_radeon_gem_wait_idle *args = data;
290	struct drm_gem_object *gobj;
291	struct radeon_bo *robj;
292	int r;
293
294	gobj = drm_gem_object_lookup(dev, filp, args->handle);
295	if (gobj == NULL) {
296		return -ENOENT;
297	}
298	robj = gobj->driver_private;
299	r = radeon_bo_wait(robj, NULL, false);
300	/* callback hw specific functions if any */
301	if (robj->rdev->asic->ioctl_wait_idle)
302		robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
303	drm_gem_object_unreference_unlocked(gobj);
304	return r;
305}
306
307int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
308				struct drm_file *filp)
309{
310	struct drm_radeon_gem_set_tiling *args = data;
311	struct drm_gem_object *gobj;
312	struct radeon_bo *robj;
313	int r = 0;
314
315	DRM_DEBUG("%d \n", args->handle);
316	gobj = drm_gem_object_lookup(dev, filp, args->handle);
317	if (gobj == NULL)
318		return -ENOENT;
319	robj = gobj->driver_private;
320	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
321	drm_gem_object_unreference_unlocked(gobj);
322	return r;
323}
324
325int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
326				struct drm_file *filp)
327{
328	struct drm_radeon_gem_get_tiling *args = data;
329	struct drm_gem_object *gobj;
330	struct radeon_bo *rbo;
331	int r = 0;
332
333	DRM_DEBUG("\n");
334	gobj = drm_gem_object_lookup(dev, filp, args->handle);
335	if (gobj == NULL)
336		return -ENOENT;
337	rbo = gobj->driver_private;
338	r = radeon_bo_reserve(rbo, false);
339	if (unlikely(r != 0))
340		goto out;
341	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
342	radeon_bo_unreserve(rbo);
343out:
344	drm_gem_object_unreference_unlocked(gobj);
345	return r;
346}
347