1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 *    Jerome Glisse <glisse@freedesktop.org>
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include <dev/drm2/drmP.h>
32#include <dev/drm2/radeon/radeon_drm.h>
33#include "radeon_reg.h"
34#include "radeon.h"
35
36void r100_cs_dump_packet(struct radeon_cs_parser *p,
37			 struct radeon_cs_packet *pkt);
38
39static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
40{
41	struct drm_device *ddev = p->rdev->ddev;
42	struct radeon_cs_chunk *chunk;
43	unsigned i, j;
44	bool duplicate;
45
46	if (p->chunk_relocs_idx == -1) {
47		return 0;
48	}
49	chunk = &p->chunks[p->chunk_relocs_idx];
50	p->dma_reloc_idx = 0;
51	/* FIXME: we assume that each relocs use 4 dwords */
52	p->nrelocs = chunk->length_dw / 4;
53	p->relocs_ptr = malloc(p->nrelocs * sizeof(void *),
54	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
55	if (p->relocs_ptr == NULL) {
56		return -ENOMEM;
57	}
58	p->relocs = malloc(p->nrelocs * sizeof(struct radeon_cs_reloc),
59	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
60	if (p->relocs == NULL) {
61		return -ENOMEM;
62	}
63	for (i = 0; i < p->nrelocs; i++) {
64		struct drm_radeon_cs_reloc *r;
65
66		duplicate = false;
67		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
68		for (j = 0; j < i; j++) {
69			if (r->handle == p->relocs[j].handle) {
70				p->relocs_ptr[i] = &p->relocs[j];
71				duplicate = true;
72				break;
73			}
74		}
75		if (!duplicate) {
76			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
77								  p->filp,
78								  r->handle);
79			if (p->relocs[i].gobj == NULL) {
80				DRM_ERROR("gem object lookup failed 0x%x\n",
81					  r->handle);
82				return -ENOENT;
83			}
84			p->relocs_ptr[i] = &p->relocs[i];
85			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
86			p->relocs[i].lobj.bo = p->relocs[i].robj;
87			p->relocs[i].lobj.wdomain = r->write_domain;
88			p->relocs[i].lobj.rdomain = r->read_domains;
89			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
90			p->relocs[i].handle = r->handle;
91			p->relocs[i].flags = r->flags;
92			radeon_bo_list_add_object(&p->relocs[i].lobj,
93						  &p->validated);
94
95		} else
96			p->relocs[i].handle = 0;
97	}
98	return radeon_bo_list_validate(&p->validated);
99}
100
101static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
102{
103	p->priority = priority;
104
105	switch (ring) {
106	default:
107		DRM_ERROR("unknown ring id: %d\n", ring);
108		return -EINVAL;
109	case RADEON_CS_RING_GFX:
110		p->ring = RADEON_RING_TYPE_GFX_INDEX;
111		break;
112	case RADEON_CS_RING_COMPUTE:
113		if (p->rdev->family >= CHIP_TAHITI) {
114			if (p->priority > 0)
115				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
116			else
117				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
118		} else
119			p->ring = RADEON_RING_TYPE_GFX_INDEX;
120		break;
121	case RADEON_CS_RING_DMA:
122		if (p->rdev->family >= CHIP_CAYMAN) {
123			if (p->priority > 0)
124				p->ring = R600_RING_TYPE_DMA_INDEX;
125			else
126				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
127		} else if (p->rdev->family >= CHIP_R600) {
128			p->ring = R600_RING_TYPE_DMA_INDEX;
129		} else {
130			return -EINVAL;
131		}
132		break;
133	}
134	return 0;
135}
136
137static void radeon_cs_sync_to(struct radeon_cs_parser *p,
138			      struct radeon_fence *fence)
139{
140	struct radeon_fence *other;
141
142	if (!fence)
143		return;
144
145	other = p->ib.sync_to[fence->ring];
146	p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
147}
148
149static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
150{
151	int i;
152
153	for (i = 0; i < p->nrelocs; i++) {
154		if (!p->relocs[i].robj)
155			continue;
156
157		radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
158	}
159}
160
161/* XXX: note that this is called from the legacy UMS CS ioctl as well */
162int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
163{
164	struct drm_radeon_cs *cs = data;
165	uint64_t *chunk_array_ptr;
166	unsigned size, i;
167	u32 ring = RADEON_CS_RING_GFX;
168	s32 priority = 0;
169
170	if (!cs->num_chunks) {
171		return 0;
172	}
173	/* get chunks */
174	INIT_LIST_HEAD(&p->validated);
175	p->idx = 0;
176	p->ib.sa_bo = NULL;
177	p->ib.semaphore = NULL;
178	p->const_ib.sa_bo = NULL;
179	p->const_ib.semaphore = NULL;
180	p->chunk_ib_idx = -1;
181	p->chunk_relocs_idx = -1;
182	p->chunk_flags_idx = -1;
183	p->chunk_const_ib_idx = -1;
184	p->chunks_array = malloc(cs->num_chunks * sizeof(uint64_t),
185	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
186	if (p->chunks_array == NULL) {
187		return -ENOMEM;
188	}
189	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
190	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
191			       sizeof(uint64_t)*cs->num_chunks)) {
192		return -EFAULT;
193	}
194	p->cs_flags = 0;
195	p->nchunks = cs->num_chunks;
196	p->chunks = malloc(p->nchunks * sizeof(struct radeon_cs_chunk),
197	    DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
198	if (p->chunks == NULL) {
199		return -ENOMEM;
200	}
201	for (i = 0; i < p->nchunks; i++) {
202		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
203		struct drm_radeon_cs_chunk user_chunk;
204		uint32_t __user *cdata;
205
206		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
207		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
208				       sizeof(struct drm_radeon_cs_chunk))) {
209			return -EFAULT;
210		}
211		p->chunks[i].length_dw = user_chunk.length_dw;
212		p->chunks[i].kdata = NULL;
213		p->chunks[i].chunk_id = user_chunk.chunk_id;
214
215		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
216			p->chunk_relocs_idx = i;
217		}
218		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
219			p->chunk_ib_idx = i;
220			/* zero length IB isn't useful */
221			if (p->chunks[i].length_dw == 0)
222				return -EINVAL;
223		}
224		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
225			p->chunk_const_ib_idx = i;
226			/* zero length CONST IB isn't useful */
227			if (p->chunks[i].length_dw == 0)
228				return -EINVAL;
229		}
230		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
231			p->chunk_flags_idx = i;
232			/* zero length flags aren't useful */
233			if (p->chunks[i].length_dw == 0)
234				return -EINVAL;
235		}
236
237		p->chunks[i].length_dw = user_chunk.length_dw;
238		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
239
240		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
241		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
242		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
243			size = p->chunks[i].length_dw * sizeof(uint32_t);
244			p->chunks[i].kdata = malloc(size, DRM_MEM_DRIVER, M_NOWAIT);
245			if (p->chunks[i].kdata == NULL) {
246				return -ENOMEM;
247			}
248			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
249					       p->chunks[i].user_ptr, size)) {
250				return -EFAULT;
251			}
252			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
253				p->cs_flags = p->chunks[i].kdata[0];
254				if (p->chunks[i].length_dw > 1)
255					ring = p->chunks[i].kdata[1];
256				if (p->chunks[i].length_dw > 2)
257					priority = (s32)p->chunks[i].kdata[2];
258			}
259		}
260	}
261
262	/* these are KMS only */
263	if (p->rdev) {
264		if ((p->cs_flags & RADEON_CS_USE_VM) &&
265		    !p->rdev->vm_manager.enabled) {
266			DRM_ERROR("VM not active on asic!\n");
267			return -EINVAL;
268		}
269
270		/* we only support VM on SI+ */
271		if ((p->rdev->family >= CHIP_TAHITI) &&
272		    ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273			DRM_ERROR("VM required on SI+!\n");
274			return -EINVAL;
275		}
276
277		if (radeon_cs_get_ring(p, ring, priority))
278			return -EINVAL;
279	}
280
281	/* deal with non-vm */
282	if ((p->chunk_ib_idx != -1) &&
283	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
284	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
285		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
286			DRM_ERROR("cs IB too big: %d\n",
287				  p->chunks[p->chunk_ib_idx].length_dw);
288			return -EINVAL;
289		}
290		if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
291			p->chunks[p->chunk_ib_idx].kpage[0] = malloc(PAGE_SIZE, DRM_MEM_DRIVER, M_NOWAIT);
292			p->chunks[p->chunk_ib_idx].kpage[1] = malloc(PAGE_SIZE, DRM_MEM_DRIVER, M_NOWAIT);
293			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
294			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
295				free(p->chunks[p->chunk_ib_idx].kpage[0], DRM_MEM_DRIVER);
296				free(p->chunks[p->chunk_ib_idx].kpage[1], DRM_MEM_DRIVER);
297				p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
298				p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
299				return -ENOMEM;
300			}
301		}
302		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
303		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
304		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
305		p->chunks[p->chunk_ib_idx].last_page_index =
306			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
307	}
308
309	return 0;
310}
311
312/**
313 * cs_parser_fini() - clean parser states
314 * @parser:	parser structure holding parsing context.
315 * @error:	error number
316 *
317 * If error is set than unvalidate buffer, otherwise just free memory
318 * used by parsing context.
319 **/
320static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
321{
322	unsigned i;
323
324	if (!error) {
325		ttm_eu_fence_buffer_objects(&parser->validated,
326					    parser->ib.fence);
327	} else {
328		ttm_eu_backoff_reservation(&parser->validated);
329	}
330
331	if (parser->relocs != NULL) {
332		for (i = 0; i < parser->nrelocs; i++) {
333			if (parser->relocs[i].gobj)
334				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
335		}
336	}
337	free(parser->track, DRM_MEM_DRIVER);
338	free(parser->relocs, DRM_MEM_DRIVER);
339	free(parser->relocs_ptr, DRM_MEM_DRIVER);
340	for (i = 0; i < parser->nchunks; i++) {
341		free(parser->chunks[i].kdata, DRM_MEM_DRIVER);
342		if ((parser->rdev->flags & RADEON_IS_AGP)) {
343			free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER);
344			free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER);
345		}
346	}
347	free(parser->chunks, DRM_MEM_DRIVER);
348	free(parser->chunks_array, DRM_MEM_DRIVER);
349	radeon_ib_free(parser->rdev, &parser->ib);
350	radeon_ib_free(parser->rdev, &parser->const_ib);
351}
352
353static int radeon_cs_ib_chunk(struct radeon_device *rdev,
354			      struct radeon_cs_parser *parser)
355{
356	struct radeon_cs_chunk *ib_chunk;
357	int r;
358
359	if (parser->chunk_ib_idx == -1)
360		return 0;
361
362	if (parser->cs_flags & RADEON_CS_USE_VM)
363		return 0;
364
365	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
366	/* Copy the packet into the IB, the parser will read from the
367	 * input memory (cached) and write to the IB (which can be
368	 * uncached).
369	 */
370	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
371			   NULL, ib_chunk->length_dw * 4);
372	if (r) {
373		DRM_ERROR("Failed to get ib !\n");
374		return r;
375	}
376	parser->ib.length_dw = ib_chunk->length_dw;
377	r = radeon_cs_parse(rdev, parser->ring, parser);
378	if (r || parser->parser_error) {
379		DRM_ERROR("Invalid command stream !\n");
380		return r;
381	}
382	r = radeon_cs_finish_pages(parser);
383	if (r) {
384		DRM_ERROR("Invalid command stream !\n");
385		return r;
386	}
387	radeon_cs_sync_rings(parser);
388	r = radeon_ib_schedule(rdev, &parser->ib, NULL);
389	if (r) {
390		DRM_ERROR("Failed to schedule IB !\n");
391	}
392	return r;
393}
394
395static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
396				   struct radeon_vm *vm)
397{
398	struct radeon_device *rdev = parser->rdev;
399	struct radeon_bo_list *lobj;
400	struct radeon_bo *bo;
401	int r;
402
403	r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
404	if (r) {
405		return r;
406	}
407	list_for_each_entry(lobj, &parser->validated, tv.head) {
408		bo = lobj->bo;
409		r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
410		if (r) {
411			return r;
412		}
413	}
414	return 0;
415}
416
417static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
418				 struct radeon_cs_parser *parser)
419{
420	struct radeon_cs_chunk *ib_chunk;
421	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
422	struct radeon_vm *vm = &fpriv->vm;
423	int r;
424
425	if (parser->chunk_ib_idx == -1)
426		return 0;
427	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
428		return 0;
429
430	if ((rdev->family >= CHIP_TAHITI) &&
431	    (parser->chunk_const_ib_idx != -1)) {
432		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
433		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
434			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
435			return -EINVAL;
436		}
437		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
438				   vm, ib_chunk->length_dw * 4);
439		if (r) {
440			DRM_ERROR("Failed to get const ib !\n");
441			return r;
442		}
443		parser->const_ib.is_const_ib = true;
444		parser->const_ib.length_dw = ib_chunk->length_dw;
445		/* Copy the packet into the IB */
446		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
447				       ib_chunk->length_dw * 4)) {
448			return -EFAULT;
449		}
450		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
451		if (r) {
452			return r;
453		}
454	}
455
456	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
457	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
458		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
459		return -EINVAL;
460	}
461	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
462			   vm, ib_chunk->length_dw * 4);
463	if (r) {
464		DRM_ERROR("Failed to get ib !\n");
465		return r;
466	}
467	parser->ib.length_dw = ib_chunk->length_dw;
468	/* Copy the packet into the IB */
469	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
470			       ib_chunk->length_dw * 4)) {
471		return -EFAULT;
472	}
473	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
474	if (r) {
475		return r;
476	}
477
478	sx_xlock(&rdev->vm_manager.lock);
479	sx_xlock(&vm->mutex);
480	r = radeon_vm_alloc_pt(rdev, vm);
481	if (r) {
482		goto out;
483	}
484	r = radeon_bo_vm_update_pte(parser, vm);
485	if (r) {
486		goto out;
487	}
488	radeon_cs_sync_rings(parser);
489	radeon_cs_sync_to(parser, vm->fence);
490	radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
491
492	if ((rdev->family >= CHIP_TAHITI) &&
493	    (parser->chunk_const_ib_idx != -1)) {
494		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
495	} else {
496		r = radeon_ib_schedule(rdev, &parser->ib, NULL);
497	}
498
499	if (!r) {
500		radeon_vm_fence(rdev, vm, parser->ib.fence);
501	}
502
503out:
504	radeon_vm_add_to_lru(rdev, vm);
505	sx_xunlock(&vm->mutex);
506	sx_xunlock(&rdev->vm_manager.lock);
507	return r;
508}
509
510static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
511{
512	if (r == -EDEADLK) {
513		r = radeon_gpu_reset(rdev);
514		if (!r)
515			r = -EAGAIN;
516	}
517	return r;
518}
519
520int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
521{
522	struct radeon_device *rdev = dev->dev_private;
523	struct radeon_cs_parser parser;
524	int r;
525
526	sx_slock(&rdev->exclusive_lock);
527	if (!rdev->accel_working) {
528		sx_sunlock(&rdev->exclusive_lock);
529		return -EBUSY;
530	}
531	/* initialize parser */
532	memset(&parser, 0, sizeof(struct radeon_cs_parser));
533	parser.filp = filp;
534	parser.rdev = rdev;
535	parser.dev = rdev->dev;
536	parser.family = rdev->family;
537	r = radeon_cs_parser_init(&parser, data);
538	if (r) {
539		DRM_ERROR("Failed to initialize parser !\n");
540		radeon_cs_parser_fini(&parser, r);
541		sx_sunlock(&rdev->exclusive_lock);
542		r = radeon_cs_handle_lockup(rdev, r);
543		return r;
544	}
545	r = radeon_cs_parser_relocs(&parser);
546	if (r) {
547		if (r != -ERESTARTSYS)
548			DRM_ERROR("Failed to parse relocation %d!\n", r);
549		radeon_cs_parser_fini(&parser, r);
550		sx_sunlock(&rdev->exclusive_lock);
551		r = radeon_cs_handle_lockup(rdev, r);
552		return r;
553	}
554	r = radeon_cs_ib_chunk(rdev, &parser);
555	if (r) {
556		goto out;
557	}
558	r = radeon_cs_ib_vm_chunk(rdev, &parser);
559	if (r) {
560		goto out;
561	}
562out:
563	radeon_cs_parser_fini(&parser, r);
564	sx_sunlock(&rdev->exclusive_lock);
565	r = radeon_cs_handle_lockup(rdev, r);
566	return r;
567}
568
569int radeon_cs_finish_pages(struct radeon_cs_parser *p)
570{
571	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
572	int i;
573	int size = PAGE_SIZE;
574
575	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
576		if (i == ibc->last_page_index) {
577			size = (ibc->length_dw * 4) % PAGE_SIZE;
578			if (size == 0)
579				size = PAGE_SIZE;
580		}
581
582		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
583				       (char *)ibc->user_ptr + (i * PAGE_SIZE),
584				       size))
585			return -EFAULT;
586	}
587	return 0;
588}
589
590static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
591{
592	int new_page;
593	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
594	int i;
595	int size = PAGE_SIZE;
596	bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
597		false : true;
598
599	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
600		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
601				       (char *)ibc->user_ptr + (i * PAGE_SIZE),
602				       PAGE_SIZE)) {
603			p->parser_error = -EFAULT;
604			return 0;
605		}
606	}
607
608	if (pg_idx == ibc->last_page_index) {
609		size = (ibc->length_dw * 4) % PAGE_SIZE;
610		if (size == 0)
611			size = PAGE_SIZE;
612	}
613
614	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
615	if (copy1)
616		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
617
618	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
619			       (char *)ibc->user_ptr + (pg_idx * PAGE_SIZE),
620			       size)) {
621		p->parser_error = -EFAULT;
622		return 0;
623	}
624
625	/* copy to IB for non single case */
626	if (!copy1)
627		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
628
629	ibc->last_copied_page = pg_idx;
630	ibc->kpage_idx[new_page] = pg_idx;
631
632	return new_page;
633}
634
635u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
636{
637	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
638	u32 pg_idx, pg_offset;
639	u32 idx_value = 0;
640	int new_page;
641
642	pg_idx = (idx * 4) / PAGE_SIZE;
643	pg_offset = (idx * 4) % PAGE_SIZE;
644
645	if (ibc->kpage_idx[0] == pg_idx)
646		return ibc->kpage[0][pg_offset/4];
647	if (ibc->kpage_idx[1] == pg_idx)
648		return ibc->kpage[1][pg_offset/4];
649
650	new_page = radeon_cs_update_pages(p, pg_idx);
651	if (new_page < 0) {
652		p->parser_error = new_page;
653		return 0;
654	}
655
656	idx_value = ibc->kpage[new_page][pg_offset/4];
657	return idx_value;
658}
659