• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/gpu/drm/radeon/
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 *    Jerome Glisse <glisse@freedesktop.org>
29 *    Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <asm/atomic.h>
33#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
36#include <linux/slab.h>
37#include "drmP.h"
38#include "drm.h"
39#include "radeon_reg.h"
40#include "radeon.h"
41
42int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
43{
44	unsigned long irq_flags;
45
46	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
47	if (fence->emited) {
48		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
49		return 0;
50	}
51	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
52	if (!rdev->cp.ready) {
53		WREG32(rdev->fence_drv.scratch_reg, fence->seq);
54	} else
55		radeon_fence_ring_emit(rdev, fence);
56
57	fence->emited = true;
58	list_del(&fence->list);
59	list_add_tail(&fence->list, &rdev->fence_drv.emited);
60	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
61	return 0;
62}
63
64static bool radeon_fence_poll_locked(struct radeon_device *rdev)
65{
66	struct radeon_fence *fence;
67	struct list_head *i, *n;
68	uint32_t seq;
69	bool wake = false;
70	unsigned long cjiffies;
71
72	seq = RREG32(rdev->fence_drv.scratch_reg);
73	if (seq != rdev->fence_drv.last_seq) {
74		rdev->fence_drv.last_seq = seq;
75		rdev->fence_drv.last_jiffies = jiffies;
76		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
77	} else {
78		cjiffies = jiffies;
79		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
80			cjiffies -= rdev->fence_drv.last_jiffies;
81			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
82				/* update the timeout */
83				rdev->fence_drv.last_timeout -= cjiffies;
84			} else {
85				/* the 500ms timeout is elapsed we should test
86				 * for GPU lockup
87				 */
88				rdev->fence_drv.last_timeout = 1;
89			}
90		} else {
91			/* wrap around update last jiffies, we will just wait
92			 * a little longer
93			 */
94			rdev->fence_drv.last_jiffies = cjiffies;
95		}
96		return false;
97	}
98	n = NULL;
99	list_for_each(i, &rdev->fence_drv.emited) {
100		fence = list_entry(i, struct radeon_fence, list);
101		if (fence->seq == seq) {
102			n = i;
103			break;
104		}
105	}
106	/* all fence previous to this one are considered as signaled */
107	if (n) {
108		i = n;
109		do {
110			n = i->prev;
111			list_del(i);
112			list_add_tail(i, &rdev->fence_drv.signaled);
113			fence = list_entry(i, struct radeon_fence, list);
114			fence->signaled = true;
115			i = n;
116		} while (i != &rdev->fence_drv.emited);
117		wake = true;
118	}
119	return wake;
120}
121
122static void radeon_fence_destroy(struct kref *kref)
123{
124	unsigned long irq_flags;
125        struct radeon_fence *fence;
126
127	fence = container_of(kref, struct radeon_fence, kref);
128	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
129	list_del(&fence->list);
130	fence->emited = false;
131	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
132	kfree(fence);
133}
134
135int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
136{
137	unsigned long irq_flags;
138
139	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
140	if ((*fence) == NULL) {
141		return -ENOMEM;
142	}
143	kref_init(&((*fence)->kref));
144	(*fence)->rdev = rdev;
145	(*fence)->emited = false;
146	(*fence)->signaled = false;
147	(*fence)->seq = 0;
148	INIT_LIST_HEAD(&(*fence)->list);
149
150	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
151	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
152	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
153	return 0;
154}
155
156
157bool radeon_fence_signaled(struct radeon_fence *fence)
158{
159	unsigned long irq_flags;
160	bool signaled = false;
161
162	if (!fence)
163		return true;
164
165	if (fence->rdev->gpu_lockup)
166		return true;
167
168	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
169	signaled = fence->signaled;
170	/* if we are shuting down report all fence as signaled */
171	if (fence->rdev->shutdown) {
172		signaled = true;
173	}
174	if (!fence->emited) {
175		WARN(1, "Querying an unemited fence : %p !\n", fence);
176		signaled = true;
177	}
178	if (!signaled) {
179		radeon_fence_poll_locked(fence->rdev);
180		signaled = fence->signaled;
181	}
182	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
183	return signaled;
184}
185
186int radeon_fence_wait(struct radeon_fence *fence, bool intr)
187{
188	struct radeon_device *rdev;
189	unsigned long irq_flags, timeout;
190	u32 seq;
191	int r;
192
193	if (fence == NULL) {
194		WARN(1, "Querying an invalid fence : %p !\n", fence);
195		return 0;
196	}
197	rdev = fence->rdev;
198	if (radeon_fence_signaled(fence)) {
199		return 0;
200	}
201	timeout = rdev->fence_drv.last_timeout;
202retry:
203	/* save current sequence used to check for GPU lockup */
204	seq = rdev->fence_drv.last_seq;
205	if (intr) {
206		radeon_irq_kms_sw_irq_get(rdev);
207		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
208				radeon_fence_signaled(fence), timeout);
209		radeon_irq_kms_sw_irq_put(rdev);
210		if (unlikely(r < 0)) {
211			return r;
212		}
213	} else {
214		radeon_irq_kms_sw_irq_get(rdev);
215		r = wait_event_timeout(rdev->fence_drv.queue,
216			 radeon_fence_signaled(fence), timeout);
217		radeon_irq_kms_sw_irq_put(rdev);
218	}
219	if (unlikely(!radeon_fence_signaled(fence))) {
220		/* we were interrupted for some reason and fence isn't
221		 * isn't signaled yet, resume wait
222		 */
223		if (r) {
224			timeout = r;
225			goto retry;
226		}
227		/* don't protect read access to rdev->fence_drv.last_seq
228		 * if we experiencing a lockup the value doesn't change
229		 */
230		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
231			/* good news we believe it's a lockup */
232			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
233			rdev->gpu_lockup = true;
234			r = radeon_gpu_reset(rdev);
235			if (r)
236				return r;
237			WREG32(rdev->fence_drv.scratch_reg, fence->seq);
238			rdev->gpu_lockup = false;
239		}
240		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
241		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
242		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
243		rdev->fence_drv.last_jiffies = jiffies;
244		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
245		goto retry;
246	}
247	return 0;
248}
249
250int radeon_fence_wait_next(struct radeon_device *rdev)
251{
252	unsigned long irq_flags;
253	struct radeon_fence *fence;
254	int r;
255
256	if (rdev->gpu_lockup) {
257		return 0;
258	}
259	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
260	if (list_empty(&rdev->fence_drv.emited)) {
261		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
262		return 0;
263	}
264	fence = list_entry(rdev->fence_drv.emited.next,
265			   struct radeon_fence, list);
266	radeon_fence_ref(fence);
267	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
268	r = radeon_fence_wait(fence, false);
269	radeon_fence_unref(&fence);
270	return r;
271}
272
273int radeon_fence_wait_last(struct radeon_device *rdev)
274{
275	unsigned long irq_flags;
276	struct radeon_fence *fence;
277	int r;
278
279	if (rdev->gpu_lockup) {
280		return 0;
281	}
282	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
283	if (list_empty(&rdev->fence_drv.emited)) {
284		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
285		return 0;
286	}
287	fence = list_entry(rdev->fence_drv.emited.prev,
288			   struct radeon_fence, list);
289	radeon_fence_ref(fence);
290	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
291	r = radeon_fence_wait(fence, false);
292	radeon_fence_unref(&fence);
293	return r;
294}
295
296struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
297{
298	kref_get(&fence->kref);
299	return fence;
300}
301
302void radeon_fence_unref(struct radeon_fence **fence)
303{
304	struct radeon_fence *tmp = *fence;
305
306	*fence = NULL;
307	if (tmp) {
308		kref_put(&tmp->kref, &radeon_fence_destroy);
309	}
310}
311
312void radeon_fence_process(struct radeon_device *rdev)
313{
314	unsigned long irq_flags;
315	bool wake;
316
317	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
318	wake = radeon_fence_poll_locked(rdev);
319	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
320	if (wake) {
321		wake_up_all(&rdev->fence_drv.queue);
322	}
323}
324
325int radeon_fence_driver_init(struct radeon_device *rdev)
326{
327	unsigned long irq_flags;
328	int r;
329
330	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
331	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
332	if (r) {
333		dev_err(rdev->dev, "fence failed to get scratch register\n");
334		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
335		return r;
336	}
337	WREG32(rdev->fence_drv.scratch_reg, 0);
338	atomic_set(&rdev->fence_drv.seq, 0);
339	INIT_LIST_HEAD(&rdev->fence_drv.created);
340	INIT_LIST_HEAD(&rdev->fence_drv.emited);
341	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
342	init_waitqueue_head(&rdev->fence_drv.queue);
343	rdev->fence_drv.initialized = true;
344	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
345	if (radeon_debugfs_fence_init(rdev)) {
346		dev_err(rdev->dev, "fence debugfs file creation failed\n");
347	}
348	return 0;
349}
350
351void radeon_fence_driver_fini(struct radeon_device *rdev)
352{
353	unsigned long irq_flags;
354
355	if (!rdev->fence_drv.initialized)
356		return;
357	wake_up_all(&rdev->fence_drv.queue);
358	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
359	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
360	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
361	rdev->fence_drv.initialized = false;
362}
363
364
365/*
366 * Fence debugfs
367 */
368#if defined(CONFIG_DEBUG_FS)
369static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
370{
371	struct drm_info_node *node = (struct drm_info_node *)m->private;
372	struct drm_device *dev = node->minor->dev;
373	struct radeon_device *rdev = dev->dev_private;
374	struct radeon_fence *fence;
375
376	seq_printf(m, "Last signaled fence 0x%08X\n",
377		   RREG32(rdev->fence_drv.scratch_reg));
378	if (!list_empty(&rdev->fence_drv.emited)) {
379		   fence = list_entry(rdev->fence_drv.emited.prev,
380				      struct radeon_fence, list);
381		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
382			      fence,  fence->seq);
383	}
384	return 0;
385}
386
387static struct drm_info_list radeon_debugfs_fence_list[] = {
388	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
389};
390#endif
391
392int radeon_debugfs_fence_init(struct radeon_device *rdev)
393{
394#if defined(CONFIG_DEBUG_FS)
395	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
396#else
397	return 0;
398#endif
399}
400