drm_gem.c revision 1.1
1/*
2 * Copyright �� 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <dev/pci/drm/drmP.h>
29#include <dev/pci/drm/drm_vma_manager.h>
30
31#include <uvm/uvm.h>
32
33int drm_handle_cmp(struct drm_handle *, struct drm_handle *);
34SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
35int drm_name_cmp(struct drm_gem_object *, struct drm_gem_object *);
36SPLAY_PROTOTYPE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
37
38
39void drm_unref(struct uvm_object *);
40void drm_ref(struct uvm_object *);
41boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
42int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
43    vm_fault_t, vm_prot_t, int);
44
45struct uvm_pagerops drm_pgops = {
46	NULL,
47	drm_ref,
48	drm_unref,
49	drm_fault,
50	drm_flush,
51};
52
53void
54drm_ref(struct uvm_object *uobj)
55{
56	struct drm_gem_object *obj =
57	    container_of(uobj, struct drm_gem_object, uobj);
58
59	drm_gem_object_reference(obj);
60}
61
62void
63drm_unref(struct uvm_object *uobj)
64{
65	struct drm_gem_object *obj =
66	    container_of(uobj, struct drm_gem_object, uobj);
67
68	drm_gem_object_unreference_unlocked(obj);
69}
70
71int
72drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
73    int npages, int centeridx, vm_fault_t fault_type,
74    vm_prot_t access_type, int flags)
75{
76	struct vm_map_entry *entry = ufi->entry;
77	struct uvm_object *uobj = entry->object.uvm_obj;
78	struct drm_gem_object *obj =
79	    container_of(uobj, struct drm_gem_object, uobj);
80	struct drm_device *dev = obj->dev;
81	int ret;
82
83	/*
84	 * we do not allow device mappings to be mapped copy-on-write
85	 * so we kill any attempt to do so here.
86	 */
87
88	if (UVM_ET_ISCOPYONWRITE(entry)) {
89		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
90		return(VM_PAGER_ERROR);
91	}
92
93	/*
94	 * We could end up here as the result of a copyin(9) or
95	 * copyout(9) while handling an ioctl.  So we must be careful
96	 * not to deadlock.  Therefore we only block if the quiesce
97	 * count is zero, which guarantees we didn't enter from within
98	 * an ioctl code path.
99	 */
100	mtx_enter(&dev->quiesce_mtx);
101	if (dev->quiesce && dev->quiesce_count == 0) {
102		mtx_leave(&dev->quiesce_mtx);
103		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
104		mtx_enter(&dev->quiesce_mtx);
105		while (dev->quiesce) {
106			msleep(&dev->quiesce, &dev->quiesce_mtx,
107			    PZERO, "drmflt", 0);
108		}
109		mtx_leave(&dev->quiesce_mtx);
110		return(VM_PAGER_REFAULT);
111	}
112	dev->quiesce_count++;
113	mtx_leave(&dev->quiesce_mtx);
114
115	/* Call down into driver to do the magic */
116	ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
117	    entry->start), vaddr, pps, npages, centeridx,
118	    access_type, flags);
119
120	mtx_enter(&dev->quiesce_mtx);
121	dev->quiesce_count--;
122	if (dev->quiesce)
123		wakeup(&dev->quiesce_count);
124	mtx_leave(&dev->quiesce_mtx);
125
126	return (ret);
127}
128
129boolean_t
130drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
131{
132	return (TRUE);
133}
134
135struct uvm_object *
136udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
137{
138	struct drm_device *dev = drm_get_device_from_kdev(device);
139	struct drm_gem_object *obj;
140	struct drm_vma_offset_node *node;
141	struct drm_file *priv;
142	struct file *filp;
143
144	if (cdevsw[major(device)].d_mmap != drmmmap)
145		return NULL;
146
147	if (dev == NULL)
148		return NULL;
149
150	if (dev->driver->mmap)
151		return dev->driver->mmap(dev, off, size);
152
153	mutex_lock(&dev->struct_mutex);
154
155	priv = drm_find_file_by_minor(dev, minor(device));
156	if (priv == 0) {
157		mutex_unlock(&dev->struct_mutex);
158		return NULL;
159	}
160	filp = priv->filp;
161
162	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
163					   off >> PAGE_SHIFT,
164					   atop(round_page(size)));
165	if (!node) {
166		mutex_unlock(&dev->struct_mutex);
167		return NULL;
168	} else if (!drm_vma_node_is_allowed(node, filp)) {
169		mutex_unlock(&dev->struct_mutex);
170		return NULL;
171	}
172
173	obj = container_of(node, struct drm_gem_object, vma_node);
174	drm_gem_object_reference(obj);
175
176	mutex_unlock(&dev->struct_mutex);
177	return &obj->uobj;
178}
179
180/** @file drm_gem.c
181 *
182 * This file provides some of the base ioctls and library routines for
183 * the graphics memory manager implemented by each device driver.
184 *
185 * Because various devices have different requirements in terms of
186 * synchronization and migration strategies, implementing that is left up to
187 * the driver, and all that the general API provides should be generic --
188 * allocating objects, reading/writing data with the cpu, freeing objects.
189 * Even there, platform-dependent optimizations for reading/writing data with
190 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
191 * the DRI2 implementation wants to have at least allocate/mmap be generic.
192 *
193 * The goal was to have swap-backed object allocation managed through
194 * struct file.  However, file descriptors as handles to a struct file have
195 * two major failings:
196 * - Process limits prevent more than 1024 or so being used at a time by
197 *   default.
198 * - Inability to allocate high fds will aggravate the X Server's select()
199 *   handling, and likely that of many GL client applications as well.
200 *
201 * This led to a plan of using our own integer IDs (called handles, following
202 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
203 * ioctls.  The objects themselves will still include the struct file so
204 * that we can transition to fds if the required kernel infrastructure shows
205 * up at a later date, and as our interface with shmfs for memory allocation.
206 */
207
208/*
209 * We make up offsets for buffer objects so we can recognize them at
210 * mmap time.
211 */
212
213/* pgoff in mmap is an unsigned long, so we need to make sure that
214 * the faked up offset will fit
215 */
216
217#if BITS_PER_LONG == 64
218#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
219#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
220#else
221#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
222#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
223#endif
224
225/**
226 * Initialize the GEM device fields
227 */
228
229int
230drm_gem_init(struct drm_device *dev)
231{
232	struct drm_vma_offset_manager *vma_offset_manager;
233
234	rw_init(&dev->object_name_lock, "drmonl");
235#ifdef __linux__
236	idr_init(&dev->object_name_idr);
237#else
238	SPLAY_INIT(&dev->name_tree);
239#endif
240
241	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
242	if (!vma_offset_manager) {
243		DRM_ERROR("out of memory\n");
244		return -ENOMEM;
245	}
246
247	dev->vma_offset_manager = vma_offset_manager;
248	drm_vma_offset_manager_init(vma_offset_manager,
249				    DRM_FILE_PAGE_OFFSET_START,
250				    DRM_FILE_PAGE_OFFSET_SIZE);
251
252	return 0;
253}
254
255void
256drm_gem_destroy(struct drm_device *dev)
257{
258
259	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
260	kfree(dev->vma_offset_manager);
261	dev->vma_offset_manager = NULL;
262}
263
264#ifdef __linux__
265
266/**
267 * Initialize an already allocated GEM object of the specified size with
268 * shmfs backing store.
269 */
270int drm_gem_object_init(struct drm_device *dev,
271			struct drm_gem_object *obj, size_t size)
272{
273	struct file *filp;
274
275	drm_gem_private_object_init(dev, obj, size);
276
277	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
278	if (IS_ERR(filp))
279		return PTR_ERR(filp);
280
281	obj->filp = filp;
282
283	return 0;
284}
285EXPORT_SYMBOL(drm_gem_object_init);
286
287#else
288
289int drm_gem_object_init(struct drm_device *dev,
290			struct drm_gem_object *obj, size_t size)
291{
292	drm_gem_private_object_init(dev, obj, size);
293
294	obj->uao = uao_create(size, 0);
295	uvm_objinit(&obj->uobj, &drm_pgops, 1);
296
297	atomic_inc(&dev->obj_count);
298	atomic_add(obj->size, &dev->obj_memory);
299
300	return 0;
301}
302
303#endif
304
305/**
306 * Initialize an already allocated GEM object of the specified size with
307 * no GEM provided backing store. Instead the caller is responsible for
308 * backing the object and handling it.
309 */
310void drm_gem_private_object_init(struct drm_device *dev,
311				 struct drm_gem_object *obj, size_t size)
312{
313	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
314
315	obj->dev = dev;
316	obj->filp = NULL;
317
318	kref_init(&obj->refcount);
319	obj->handle_count = 0;
320	obj->size = size;
321	drm_vma_node_reset(&obj->vma_node);
322}
323EXPORT_SYMBOL(drm_gem_private_object_init);
324
325static void
326drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
327{
328#ifdef __linux__
329	/*
330	 * Note: obj->dma_buf can't disappear as long as we still hold a
331	 * handle reference in obj->handle_count.
332	 */
333	mutex_lock(&filp->prime.lock);
334	if (obj->dma_buf) {
335		drm_prime_remove_buf_handle_locked(&filp->prime,
336						   obj->dma_buf);
337	}
338	mutex_unlock(&filp->prime.lock);
339#endif
340}
341
342#ifdef __linux__
343
344/**
345 * Called after the last handle to the object has been closed
346 *
347 * Removes any name for the object. Note that this must be
348 * called before drm_gem_object_free or we'll be touching
349 * freed memory
350 */
351static void drm_gem_object_handle_free(struct drm_gem_object *obj)
352{
353	struct drm_device *dev = obj->dev;
354
355	/* Remove any name for this object */
356	if (obj->name) {
357		idr_remove(&dev->object_name_idr, obj->name);
358		obj->name = 0;
359	}
360}
361
362#else
363
364static void drm_gem_object_handle_free(struct drm_gem_object *obj)
365{
366	struct drm_device *dev = obj->dev;
367
368	/* Remove any name for this object */
369	if (obj->name) {
370		SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
371		obj->name = 0;
372	}
373}
374
375#endif
376
377static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
378{
379#ifdef __linux__
380	/* Unbreak the reference cycle if we have an exported dma_buf. */
381	if (obj->dma_buf) {
382		dma_buf_put(obj->dma_buf);
383		obj->dma_buf = NULL;
384	}
385#endif
386}
387
388static void
389drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
390{
391	if (WARN_ON(obj->handle_count == 0))
392		return;
393
394	/*
395	* Must bump handle count first as this may be the last
396	* ref, in which case the object would disappear before we
397	* checked for a name
398	*/
399
400	mutex_lock(&obj->dev->object_name_lock);
401	if (--obj->handle_count == 0) {
402		drm_gem_object_handle_free(obj);
403		drm_gem_object_exported_dma_buf_free(obj);
404	}
405	mutex_unlock(&obj->dev->object_name_lock);
406
407	drm_gem_object_unreference_unlocked(obj);
408}
409
410#ifdef __linux__
411
412/**
413 * Removes the mapping from handle to filp for this object.
414 */
415int
416drm_gem_handle_delete(struct drm_file *filp, u32 handle)
417{
418	struct drm_device *dev;
419	struct drm_gem_object *obj;
420
421	/* This is gross. The idr system doesn't let us try a delete and
422	 * return an error code.  It just spews if you fail at deleting.
423	 * So, we have to grab a lock around finding the object and then
424	 * doing the delete on it and dropping the refcount, or the user
425	 * could race us to double-decrement the refcount and cause a
426	 * use-after-free later.  Given the frequency of our handle lookups,
427	 * we may want to use ida for number allocation and a hash table
428	 * for the pointers, anyway.
429	 */
430	spin_lock(&filp->table_lock);
431
432	/* Check if we currently have a reference on the object */
433	obj = idr_find(&filp->object_idr, handle);
434	if (obj == NULL) {
435		spin_unlock(&filp->table_lock);
436		return -EINVAL;
437	}
438	dev = obj->dev;
439
440	/* Release reference and decrement refcount. */
441	idr_remove(&filp->object_idr, handle);
442	spin_unlock(&filp->table_lock);
443
444	if (drm_core_check_feature(dev, DRIVER_PRIME))
445		drm_gem_remove_prime_handles(obj, filp);
446	drm_vma_node_revoke(&obj->vma_node, filp->filp);
447
448	if (dev->driver->gem_close_object)
449		dev->driver->gem_close_object(obj, filp);
450	drm_gem_object_handle_unreference_unlocked(obj);
451
452	return 0;
453}
454EXPORT_SYMBOL(drm_gem_handle_delete);
455
456#else
457
458int
459drm_gem_handle_delete(struct drm_file *filp, u32 handle)
460{
461	struct drm_device *dev;
462	struct drm_gem_object *obj;
463	struct drm_handle *han, find;
464
465	spin_lock(&filp->table_lock);
466
467	find.handle = handle;
468	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &find);
469	if (han == NULL) {
470		spin_unlock(&filp->table_lock);
471		return -EINVAL;
472	}
473	obj = han->obj;
474	dev = obj->dev;
475
476	SPLAY_REMOVE(drm_obj_tree, &filp->obj_tree, han);
477	spin_unlock(&filp->table_lock);
478
479	drm_free(han);
480
481	if (drm_core_check_feature(dev, DRIVER_PRIME))
482		drm_gem_remove_prime_handles(obj, filp);
483	drm_vma_node_revoke(&obj->vma_node, filp->filp);
484
485	if (dev->driver->gem_close_object)
486		dev->driver->gem_close_object(obj, filp);
487	drm_gem_object_handle_unreference_unlocked(obj);
488
489	return 0;
490}
491
492#endif
493
494/**
495 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
496 *
497 * This implements the ->dumb_destroy kms driver callback for drivers which use
498 * gem to manage their backing storage.
499 */
500int drm_gem_dumb_destroy(struct drm_file *file,
501			 struct drm_device *dev,
502			 uint32_t handle)
503{
504	return drm_gem_handle_delete(file, handle);
505}
506EXPORT_SYMBOL(drm_gem_dumb_destroy);
507
508#ifdef __linux__
509
510/**
511 * drm_gem_handle_create_tail - internal functions to create a handle
512 *
513 * This expects the dev->object_name_lock to be held already and will drop it
514 * before returning. Used to avoid races in establishing new handles when
515 * importing an object from either an flink name or a dma-buf.
516 */
517int
518drm_gem_handle_create_tail(struct drm_file *file_priv,
519			   struct drm_gem_object *obj,
520			   u32 *handlep)
521{
522	struct drm_device *dev = obj->dev;
523	int ret;
524
525	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
526
527	/*
528	 * Get the user-visible handle using idr.  Preload and perform
529	 * allocation under our spinlock.
530	 */
531	idr_preload(GFP_KERNEL);
532	spin_lock(&file_priv->table_lock);
533
534	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
535	drm_gem_object_reference(obj);
536	obj->handle_count++;
537	spin_unlock(&file_priv->table_lock);
538	idr_preload_end();
539	mutex_unlock(&dev->object_name_lock);
540	if (ret < 0) {
541		drm_gem_object_handle_unreference_unlocked(obj);
542		return ret;
543	}
544	*handlep = ret;
545
546	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
547	if (ret) {
548		drm_gem_handle_delete(file_priv, *handlep);
549		return ret;
550	}
551
552	if (dev->driver->gem_open_object) {
553		ret = dev->driver->gem_open_object(obj, file_priv);
554		if (ret) {
555			drm_gem_handle_delete(file_priv, *handlep);
556			return ret;
557		}
558	}
559
560	return 0;
561}
562
563#else
564
565int
566drm_gem_handle_create_tail(struct drm_file *file_priv,
567			   struct drm_gem_object *obj,
568			   u32 *handlep)
569{
570	struct drm_device *dev = obj->dev;
571	struct drm_handle *han;
572	int ret;
573
574	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
575
576	/*
577	 * Get the user-visible handle using idr.  Preload and perform
578	 * allocation under our spinlock.
579	 */
580	if ((han = drm_calloc(1, sizeof(*han))) == NULL)
581		return -ENOMEM;
582	han->obj = obj;
583	KASSERT(obj->dev != NULL);
584	spin_lock(&file_priv->table_lock);
585
586again:
587	han->handle = ++file_priv->obj_id;
588	/*
589	 * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
590	 * reserved.
591	 */
592	if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
593	    &file_priv->obj_tree, han))
594		goto again;
595	drm_gem_object_reference(obj);
596	obj->handle_count++;
597	spin_unlock(&file_priv->table_lock);
598	mutex_unlock(&dev->object_name_lock);
599	*handlep = han->handle;
600
601	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
602	if (ret) {
603		drm_gem_handle_delete(file_priv, *handlep);
604		return ret;
605	}
606
607	if (dev->driver->gem_open_object) {
608		ret = dev->driver->gem_open_object(obj, file_priv);
609		if (ret) {
610			drm_gem_handle_delete(file_priv, *handlep);
611			return ret;
612		}
613	}
614
615	return 0;
616}
617
618#endif
619
620/**
621 * Create a handle for this object. This adds a handle reference
622 * to the object, which includes a regular reference count. Callers
623 * will likely want to dereference the object afterwards.
624 */
625int
626drm_gem_handle_create(struct drm_file *file_priv,
627		       struct drm_gem_object *obj,
628		       u32 *handlep)
629{
630	mutex_lock(&obj->dev->object_name_lock);
631
632	return drm_gem_handle_create_tail(file_priv, obj, handlep);
633}
634EXPORT_SYMBOL(drm_gem_handle_create);
635
636
637/**
638 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
639 * @obj: obj in question
640 *
641 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
642 */
643void
644drm_gem_free_mmap_offset(struct drm_gem_object *obj)
645{
646	struct drm_device *dev = obj->dev;
647
648	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
649}
650EXPORT_SYMBOL(drm_gem_free_mmap_offset);
651
652/**
653 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
654 * @obj: obj in question
655 * @size: the virtual size
656 *
657 * GEM memory mapping works by handing back to userspace a fake mmap offset
658 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
659 * up the object based on the offset and sets up the various memory mapping
660 * structures.
661 *
662 * This routine allocates and attaches a fake offset for @obj, in cases where
663 * the virtual size differs from the physical size (ie. obj->size).  Otherwise
664 * just use drm_gem_create_mmap_offset().
665 */
666int
667drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
668{
669	struct drm_device *dev = obj->dev;
670
671	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
672				  size / PAGE_SIZE);
673}
674EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
675
676/**
677 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
678 * @obj: obj in question
679 *
680 * GEM memory mapping works by handing back to userspace a fake mmap offset
681 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
682 * up the object based on the offset and sets up the various memory mapping
683 * structures.
684 *
685 * This routine allocates and attaches a fake offset for @obj.
686 */
687int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
688{
689	return drm_gem_create_mmap_offset_size(obj, obj->size);
690}
691EXPORT_SYMBOL(drm_gem_create_mmap_offset);
692
693#ifdef __linux__
694
695/**
696 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
697 * from shmem
698 * @obj: obj in question
699 * @gfpmask: gfp mask of requested pages
700 */
701struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
702{
703	struct inode *inode;
704	struct address_space *mapping;
705	struct page *p, **pages;
706	int i, npages;
707
708	/* This is the shared memory object that backs the GEM resource */
709	inode = file_inode(obj->filp);
710	mapping = inode->i_mapping;
711
712	/* We already BUG_ON() for non-page-aligned sizes in
713	 * drm_gem_object_init(), so we should never hit this unless
714	 * driver author is doing something really wrong:
715	 */
716	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
717
718	npages = obj->size >> PAGE_SHIFT;
719
720	pages = drm_malloc_ab(npages, sizeof(struct page *));
721	if (pages == NULL)
722		return ERR_PTR(-ENOMEM);
723
724	gfpmask |= mapping_gfp_mask(mapping);
725
726	for (i = 0; i < npages; i++) {
727		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
728		if (IS_ERR(p))
729			goto fail;
730		pages[i] = p;
731
732		/* There is a hypothetical issue w/ drivers that require
733		 * buffer memory in the low 4GB.. if the pages are un-
734		 * pinned, and swapped out, they can end up swapped back
735		 * in above 4GB.  If pages are already in memory, then
736		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
737		 * even if the already in-memory page disobeys the mask.
738		 *
739		 * It is only a theoretical issue today, because none of
740		 * the devices with this limitation can be populated with
741		 * enough memory to trigger the issue.  But this BUG_ON()
742		 * is here as a reminder in case the problem with
743		 * shmem_read_mapping_page_gfp() isn't solved by the time
744		 * it does become a real issue.
745		 *
746		 * See this thread: http://lkml.org/lkml/2011/7/11/238
747		 */
748		BUG_ON((gfpmask & __GFP_DMA32) &&
749				(page_to_pfn(p) >= 0x00100000UL));
750	}
751
752	return pages;
753
754fail:
755	while (i--)
756		page_cache_release(pages[i]);
757
758	drm_free_large(pages);
759	return ERR_CAST(p);
760}
761EXPORT_SYMBOL(drm_gem_get_pages);
762
763/**
764 * drm_gem_put_pages - helper to free backing pages for a GEM object
765 * @obj: obj in question
766 * @pages: pages to free
767 * @dirty: if true, pages will be marked as dirty
768 * @accessed: if true, the pages will be marked as accessed
769 */
770void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
771		bool dirty, bool accessed)
772{
773	int i, npages;
774
775	/* We already BUG_ON() for non-page-aligned sizes in
776	 * drm_gem_object_init(), so we should never hit this unless
777	 * driver author is doing something really wrong:
778	 */
779	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
780
781	npages = obj->size >> PAGE_SHIFT;
782
783	for (i = 0; i < npages; i++) {
784		if (dirty)
785			set_page_dirty(pages[i]);
786
787		if (accessed)
788			mark_page_accessed(pages[i]);
789
790		/* Undo the reference we took when populating the table */
791		page_cache_release(pages[i]);
792	}
793
794	drm_free_large(pages);
795}
796EXPORT_SYMBOL(drm_gem_put_pages);
797
798#endif
799
800#ifdef __linux__
801
802/** Returns a reference to the object named by the handle. */
803struct drm_gem_object *
804drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
805		      u32 handle)
806{
807	struct drm_gem_object *obj;
808
809	spin_lock(&filp->table_lock);
810
811	/* Check if we currently have a reference on the object */
812	obj = idr_find(&filp->object_idr, handle);
813	if (obj == NULL) {
814		spin_unlock(&filp->table_lock);
815		return NULL;
816	}
817
818	drm_gem_object_reference(obj);
819
820	spin_unlock(&filp->table_lock);
821
822	return obj;
823}
824EXPORT_SYMBOL(drm_gem_object_lookup);
825
826#else
827
828struct drm_gem_object *
829drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
830		      u32 handle)
831{
832	struct drm_gem_object *obj;
833	struct drm_handle *han, search;
834
835	spin_lock(&filp->table_lock);
836
837	/* Check if we currently have a reference on the object */
838	search.handle = handle;
839	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
840	if (han == NULL) {
841		spin_unlock(&filp->table_lock);
842		return NULL;
843	}
844	obj = han->obj;
845
846	drm_gem_object_reference(obj);
847
848	spin_unlock(&filp->table_lock);
849
850	return obj;
851}
852
853#endif
854
855/**
856 * Releases the handle to an mm object.
857 */
858int
859drm_gem_close_ioctl(struct drm_device *dev, void *data,
860		    struct drm_file *file_priv)
861{
862	struct drm_gem_close *args = data;
863	int ret;
864
865	if (!(dev->driver->driver_features & DRIVER_GEM))
866		return -ENODEV;
867
868	ret = drm_gem_handle_delete(file_priv, args->handle);
869
870	return ret;
871}
872
873#ifdef __linux__
874
875/**
876 * Create a global name for an object, returning the name.
877 *
878 * Note that the name does not hold a reference; when the object
879 * is freed, the name goes away.
880 */
881int
882drm_gem_flink_ioctl(struct drm_device *dev, void *data,
883		    struct drm_file *file_priv)
884{
885	struct drm_gem_flink *args = data;
886	struct drm_gem_object *obj;
887	int ret;
888
889	if (!(dev->driver->driver_features & DRIVER_GEM))
890		return -ENODEV;
891
892	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
893	if (obj == NULL)
894		return -ENOENT;
895
896	mutex_lock(&dev->object_name_lock);
897	idr_preload(GFP_KERNEL);
898	/* prevent races with concurrent gem_close. */
899	if (obj->handle_count == 0) {
900		ret = -ENOENT;
901		goto err;
902	}
903
904	if (!obj->name) {
905		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
906		if (ret < 0)
907			goto err;
908
909		obj->name = ret;
910	}
911
912	args->name = (uint64_t) obj->name;
913	ret = 0;
914
915err:
916	idr_preload_end();
917	mutex_unlock(&dev->object_name_lock);
918	drm_gem_object_unreference_unlocked(obj);
919	return ret;
920}
921
922#else
923
924int
925drm_gem_flink_ioctl(struct drm_device *dev, void *data,
926		    struct drm_file *file_priv)
927{
928	struct drm_gem_flink *args = data;
929	struct drm_gem_object *obj;
930	int ret;
931
932	if (!(dev->driver->flags & DRIVER_GEM))
933		return -ENODEV;
934
935	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
936	if (obj == NULL)
937		return -ENOENT;
938
939	mutex_lock(&dev->object_name_lock);
940	if (obj->handle_count == 0) {
941		ret = -ENOENT;
942		goto err;
943	}
944
945	if (!obj->name) {
946again:
947		obj->name = ++dev->obj_name;
948		/* 0 is reserved, make sure we don't clash. */
949		if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
950		    &dev->name_tree, obj))
951			goto again;
952	}
953
954	args->name = (uint64_t)obj->name;
955	ret = 0;
956
957err:
958	mutex_unlock(&dev->object_name_lock);
959	drm_gem_object_unreference_unlocked(obj);
960	return ret;
961}
962
963#endif
964
965#ifdef __linux__
966
967/**
968 * Open an object using the global name, returning a handle and the size.
969 *
970 * This handle (of course) holds a reference to the object, so the object
971 * will not go away until the handle is deleted.
972 */
973int
974drm_gem_open_ioctl(struct drm_device *dev, void *data,
975		   struct drm_file *file_priv)
976{
977	struct drm_gem_open *args = data;
978	struct drm_gem_object *obj;
979	int ret;
980	u32 handle;
981
982	if (!(dev->driver->driver_features & DRIVER_GEM))
983		return -ENODEV;
984
985	mutex_lock(&dev->object_name_lock);
986	obj = idr_find(&dev->object_name_idr, (int) args->name);
987	if (obj) {
988		drm_gem_object_reference(obj);
989	} else {
990		mutex_unlock(&dev->object_name_lock);
991		return -ENOENT;
992	}
993
994	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
995	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
996	drm_gem_object_unreference_unlocked(obj);
997	if (ret)
998		return ret;
999
1000	args->handle = handle;
1001	args->size = obj->size;
1002
1003	return 0;
1004}
1005
1006#else
1007
1008int
1009drm_gem_open_ioctl(struct drm_device *dev, void *data,
1010		   struct drm_file *file_priv)
1011{
1012	struct drm_gem_open *args = data;
1013	struct drm_gem_object *obj, search;
1014	int ret;
1015	u32 handle;
1016
1017	if (!(dev->driver->flags & DRIVER_GEM))
1018		return -ENODEV;
1019
1020	mutex_lock(&dev->object_name_lock);
1021	search.name = args->name;
1022	obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
1023	if (obj) {
1024		drm_gem_object_reference(obj);
1025	} else {
1026		mutex_unlock(&dev->object_name_lock);
1027		return -ENOENT;
1028	}
1029
1030	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1031	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1032	drm_gem_object_unreference_unlocked(obj);
1033	if (ret)
1034		return ret;
1035
1036	args->handle = handle;
1037	args->size = obj->size;
1038
1039        return 0;
1040}
1041
1042#endif
1043
1044#ifdef __linux__
1045
1046/**
1047 * Called at device open time, sets up the structure for handling refcounting
1048 * of mm objects.
1049 */
1050void
1051drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1052{
1053	idr_init(&file_private->object_idr);
1054	spin_lock_init(&file_private->table_lock);
1055}
1056
1057#else
1058
1059void
1060drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1061{
1062	SPLAY_INIT(&file_private->obj_tree);
1063	mtx_init(&file_private->table_lock, IPL_NONE);
1064}
1065
1066#endif
1067
1068#ifdef __linux__
1069
1070/**
1071 * Called at device close to release the file's
1072 * handle references on objects.
1073 */
1074static int
1075drm_gem_object_release_handle(int id, void *ptr, void *data)
1076{
1077	struct drm_file *file_priv = data;
1078	struct drm_gem_object *obj = ptr;
1079	struct drm_device *dev = obj->dev;
1080
1081	if (drm_core_check_feature(dev, DRIVER_PRIME))
1082		drm_gem_remove_prime_handles(obj, file_priv);
1083	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
1084
1085	if (dev->driver->gem_close_object)
1086		dev->driver->gem_close_object(obj, file_priv);
1087
1088	drm_gem_object_handle_unreference_unlocked(obj);
1089
1090	return 0;
1091}
1092
1093/**
1094 * Called at close time when the filp is going away.
1095 *
1096 * Releases any remaining references on objects by this filp.
1097 */
1098void
1099drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1100{
1101	idr_for_each(&file_private->object_idr,
1102		     &drm_gem_object_release_handle, file_private);
1103	idr_destroy(&file_private->object_idr);
1104}
1105
1106#else
1107
1108static int
1109drm_gem_object_release_handle(struct drm_file *file_priv,
1110			      struct drm_gem_object *obj)
1111{
1112	struct drm_device *dev = obj->dev;
1113
1114	if (drm_core_check_feature(dev, DRIVER_PRIME))
1115		drm_gem_remove_prime_handles(obj, file_priv);
1116	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
1117
1118	if (dev->driver->gem_close_object)
1119		dev->driver->gem_close_object(obj, file_priv);
1120
1121	drm_gem_object_handle_unreference_unlocked(obj);
1122
1123	return 0;
1124}
1125
1126void
1127drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1128{
1129	struct drm_handle *han;
1130
1131	while ((han = SPLAY_ROOT(&file_private->obj_tree)) != NULL) {
1132		SPLAY_REMOVE(drm_obj_tree, &file_private->obj_tree, han);
1133		drm_gem_object_release_handle(file_private, han->obj);
1134		drm_free(han);
1135	}
1136}
1137
1138#endif
1139
1140#ifdef __linux__
1141
1142void
1143drm_gem_object_release(struct drm_gem_object *obj)
1144{
1145	WARN_ON(obj->dma_buf);
1146
1147	if (obj->filp)
1148	    fput(obj->filp);
1149}
1150EXPORT_SYMBOL(drm_gem_object_release);
1151
1152#else
1153
1154void
1155drm_gem_object_release(struct drm_gem_object *obj)
1156{
1157	struct drm_device *dev = obj->dev;
1158
1159	if (obj->uao)
1160		uao_detach(obj->uao);
1161
1162	atomic_dec(&dev->obj_count);
1163	atomic_sub(obj->size, &dev->obj_memory);
1164}
1165
1166#endif
1167
1168/**
1169 * Called after the last reference to the object has been lost.
1170 * Must be called holding struct_ mutex
1171 *
1172 * Frees the object
1173 */
1174void
1175drm_gem_object_free(struct kref *kref)
1176{
1177	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
1178	struct drm_device *dev = obj->dev;
1179
1180	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1181
1182	if (dev->driver->gem_free_object != NULL)
1183		dev->driver->gem_free_object(obj);
1184}
1185EXPORT_SYMBOL(drm_gem_object_free);
1186
1187#ifdef __linux__
1188
1189void drm_gem_vm_open(struct vm_area_struct *vma)
1190{
1191	struct drm_gem_object *obj = vma->vm_private_data;
1192
1193	drm_gem_object_reference(obj);
1194
1195	mutex_lock(&obj->dev->struct_mutex);
1196	drm_vm_open_locked(obj->dev, vma);
1197	mutex_unlock(&obj->dev->struct_mutex);
1198}
1199EXPORT_SYMBOL(drm_gem_vm_open);
1200
1201void drm_gem_vm_close(struct vm_area_struct *vma)
1202{
1203	struct drm_gem_object *obj = vma->vm_private_data;
1204	struct drm_device *dev = obj->dev;
1205
1206	mutex_lock(&dev->struct_mutex);
1207	drm_vm_close_locked(obj->dev, vma);
1208	drm_gem_object_unreference(obj);
1209	mutex_unlock(&dev->struct_mutex);
1210}
1211EXPORT_SYMBOL(drm_gem_vm_close);
1212
1213/**
1214 * drm_gem_mmap_obj - memory map a GEM object
1215 * @obj: the GEM object to map
1216 * @obj_size: the object size to be mapped, in bytes
1217 * @vma: VMA for the area to be mapped
1218 *
1219 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1220 * provided by the driver. Depending on their requirements, drivers can either
1221 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1222 * the object will be trapped, to perform migration, GTT binding, surface
1223 * register allocation, or performance monitoring), or mmap the buffer memory
1224 * synchronously after calling drm_gem_mmap_obj.
1225 *
1226 * This function is mainly intended to implement the DMABUF mmap operation, when
1227 * the GEM object is not looked up based on its fake offset. To implement the
1228 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1229 *
1230 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1231 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1232 * callers must verify access restrictions before calling this helper.
1233 *
1234 * NOTE: This function has to be protected with dev->struct_mutex
1235 *
1236 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1237 * size, or if no gem_vm_ops are provided.
1238 */
1239int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1240		     struct vm_area_struct *vma)
1241{
1242	struct drm_device *dev = obj->dev;
1243
1244	lockdep_assert_held(&dev->struct_mutex);
1245
1246	/* Check for valid size. */
1247	if (obj_size < vma->vm_end - vma->vm_start)
1248		return -EINVAL;
1249
1250	if (!dev->driver->gem_vm_ops)
1251		return -EINVAL;
1252
1253	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1254	vma->vm_ops = dev->driver->gem_vm_ops;
1255	vma->vm_private_data = obj;
1256	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1257
1258	/* Take a ref for this mapping of the object, so that the fault
1259	 * handler can dereference the mmap offset's pointer to the object.
1260	 * This reference is cleaned up by the corresponding vm_close
1261	 * (which should happen whether the vma was created by this call, or
1262	 * by a vm_open due to mremap or partial unmap or whatever).
1263	 */
1264	drm_gem_object_reference(obj);
1265
1266	drm_vm_open_locked(dev, vma);
1267	return 0;
1268}
1269EXPORT_SYMBOL(drm_gem_mmap_obj);
1270
1271/**
1272 * drm_gem_mmap - memory map routine for GEM objects
1273 * @filp: DRM file pointer
1274 * @vma: VMA for the area to be mapped
1275 *
1276 * If a driver supports GEM object mapping, mmap calls on the DRM file
1277 * descriptor will end up here.
1278 *
1279 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1280 * contain the fake offset we created when the GTT map ioctl was called on
1281 * the object) and map it with a call to drm_gem_mmap_obj().
1282 *
1283 * If the caller is not granted access to the buffer object, the mmap will fail
1284 * with EACCES. Please see the vma manager for more information.
1285 */
1286int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1287{
1288	struct drm_file *priv = filp->private_data;
1289	struct drm_device *dev = priv->minor->dev;
1290	struct drm_gem_object *obj;
1291	struct drm_vma_offset_node *node;
1292	int ret = 0;
1293
1294	if (drm_device_is_unplugged(dev))
1295		return -ENODEV;
1296
1297	mutex_lock(&dev->struct_mutex);
1298
1299	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
1300					   vma->vm_pgoff,
1301					   vma_pages(vma));
1302	if (!node) {
1303		mutex_unlock(&dev->struct_mutex);
1304		return drm_mmap(filp, vma);
1305	} else if (!drm_vma_node_is_allowed(node, filp)) {
1306		mutex_unlock(&dev->struct_mutex);
1307		return -EACCES;
1308	}
1309
1310	obj = container_of(node, struct drm_gem_object, vma_node);
1311	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
1312
1313	mutex_unlock(&dev->struct_mutex);
1314
1315	return ret;
1316}
1317EXPORT_SYMBOL(drm_gem_mmap);
1318
1319#endif
1320
1321/*
1322 * Code to support memory managers based on the GEM (Graphics
1323 * Execution Manager) api.
1324 */
1325
1326struct drm_gem_object *
1327drm_gem_object_find(struct drm_file *filp, u32 handle)
1328{
1329	struct drm_handle *han, search;
1330
1331	MUTEX_ASSERT_LOCKED(&filp->table_lock);
1332
1333	search.handle = handle;
1334	han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
1335	if (han == NULL)
1336		return NULL;
1337
1338	return han->obj;
1339}
1340
1341int
1342drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
1343{
1344	return (a->handle < b->handle ? -1 : a->handle > b->handle);
1345}
1346
1347SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
1348
1349int
1350drm_name_cmp(struct drm_gem_object *a, struct drm_gem_object *b)
1351{
1352	return (a->name < b->name ? -1 : a->name > b->name);
1353}
1354
1355SPLAY_GENERATE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
1356