drm_prime.c revision 1.3
1/*	$NetBSD: drm_prime.c,v 1.3 2018/08/27 15:22:54 riastradh Exp $	*/
2
3/*
4 * Copyright �� 2012 Red Hat
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *      Dave Airlie <airlied@redhat.com>
27 *      Rob Clark <rob.clark@linaro.org>
28 *
29 */
30
31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.3 2018/08/27 15:22:54 riastradh Exp $");
33
34#include <linux/export.h>
35#include <linux/dma-buf.h>
36#include <drm/drmP.h>
37#include <drm/drm_gem.h>
38
39#include "drm_internal.h"
40
41struct sg_table {
42	bus_dma_segment_t	*sgt_segs;
43	int			sgt_nsegs;
44	bus_size_t		sgt_size;
45};
46
47static int
48sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
49    unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
50{
51	unsigned i;
52
53	KASSERT(offset == 0);
54	KASSERT(size == npages << PAGE_SHIFT);
55
56	sgt->sgt_segs = kcalloc(npages, sizeof(sgt->sgt_segs[0]), gfp);
57	if (sgt->sgt_segs == NULL)
58		return -ENOMEM;
59	sgt->sgt_nsegs = npages;
60	sgt->sgt_size = size;
61
62	for (i = 0; i < npages; i++) {
63		sgt->sgt_segs[i].ds_addr = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
64		sgt->sgt_segs[i].ds_len = PAGE_SIZE;
65	}
66
67	return 0;
68}
69
70static int
71sg_alloc_table_from_pglist(struct sg_table *sgt, struct pglist *pglist,
72    unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
73{
74	struct vm_page *pg;
75	unsigned i;
76
77	KASSERT(offset == 0);
78	KASSERT(size == npages << PAGE_SHIFT);
79
80	sgt->sgt_segs = kcalloc(npages, sizeof(sgt->sgt_segs[0]), gfp);
81	if (sgt->sgt_segs == NULL)
82		return -ENOMEM;
83	sgt->sgt_nsegs = npages;
84	sgt->sgt_size = size;
85
86	i = 0;
87	TAILQ_FOREACH(pg, pglist, pageq.queue) {
88		KASSERT(i < npages);
89		sgt->sgt_segs[i].ds_addr = VM_PAGE_TO_PHYS(pg);
90		sgt->sgt_segs[i].ds_len = PAGE_SIZE;
91	}
92	KASSERT(i == npages);
93
94	return 0;
95}
96
97static void
98sg_free_table(struct sg_table *sgt)
99{
100
101	kfree(sgt->sgt_segs);
102	sgt->sgt_segs = NULL;
103	sgt->sgt_nsegs = 0;
104	sgt->sgt_size = 0;
105}
106
107/*
108 * DMA-BUF/GEM Object references and lifetime overview:
109 *
110 * On the export the dma_buf holds a reference to the exporting GEM
111 * object. It takes this reference in handle_to_fd_ioctl, when it
112 * first calls .prime_export and stores the exporting GEM object in
113 * the dma_buf priv. This reference is released when the dma_buf
114 * object goes away in the driver .release function.
115 *
116 * On the import the importing GEM object holds a reference to the
117 * dma_buf (which in turn holds a ref to the exporting GEM object).
118 * It takes that reference in the fd_to_handle ioctl.
119 * It calls dma_buf_get, creates an attachment to it and stores the
120 * attachment in the GEM object. When this attachment is destroyed
121 * when the imported object is destroyed, we remove the attachment
122 * and drop the reference to the dma_buf.
123 *
124 * Thus the chain of references always flows in one direction
125 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
126 *
127 * Self-importing: if userspace is using PRIME as a replacement for flink
128 * then it will get a fd->handle request for a GEM object that it created.
129 * Drivers should detect this situation and return back the gem object
130 * from the dma-buf private.  Prime will do this automatically for drivers that
131 * use the drm_gem_prime_{import,export} helpers.
132 */
133
134struct drm_prime_member {
135	struct list_head entry;
136	struct dma_buf *dma_buf;
137	uint32_t handle;
138};
139
140struct drm_prime_attachment {
141	struct sg_table *sgt;
142	enum dma_data_direction dir;
143};
144
145static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
146				    struct dma_buf *dma_buf, uint32_t handle)
147{
148	struct drm_prime_member *member;
149
150	member = kmalloc(sizeof(*member), GFP_KERNEL);
151	if (!member)
152		return -ENOMEM;
153
154	get_dma_buf(dma_buf);
155	member->dma_buf = dma_buf;
156	member->handle = handle;
157	list_add(&member->entry, &prime_fpriv->head);
158	return 0;
159}
160
161static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
162						      uint32_t handle)
163{
164	struct drm_prime_member *member;
165
166	list_for_each_entry(member, &prime_fpriv->head, entry) {
167		if (member->handle == handle)
168			return member->dma_buf;
169	}
170
171	return NULL;
172}
173
174static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
175				       struct dma_buf *dma_buf,
176				       uint32_t *handle)
177{
178	struct drm_prime_member *member;
179
180	list_for_each_entry(member, &prime_fpriv->head, entry) {
181		if (member->dma_buf == dma_buf) {
182			*handle = member->handle;
183			return 0;
184		}
185	}
186	return -ENOENT;
187}
188
189static int drm_gem_map_attach(struct dma_buf *dma_buf,
190			      struct device *target_dev,
191			      struct dma_buf_attachment *attach)
192{
193	struct drm_prime_attachment *prime_attach;
194	struct drm_gem_object *obj = dma_buf->priv;
195	struct drm_device *dev = obj->dev;
196
197	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
198	if (!prime_attach)
199		return -ENOMEM;
200
201	prime_attach->dir = DMA_NONE;
202	attach->priv = prime_attach;
203
204	if (!dev->driver->gem_prime_pin)
205		return 0;
206
207	return dev->driver->gem_prime_pin(obj);
208}
209
210static void drm_gem_map_detach(struct dma_buf *dma_buf,
211			       struct dma_buf_attachment *attach)
212{
213	struct drm_prime_attachment *prime_attach = attach->priv;
214	struct drm_gem_object *obj = dma_buf->priv;
215	struct drm_device *dev = obj->dev;
216	struct sg_table *sgt;
217
218	if (dev->driver->gem_prime_unpin)
219		dev->driver->gem_prime_unpin(obj);
220
221	if (!prime_attach)
222		return;
223
224	sgt = prime_attach->sgt;
225	if (sgt) {
226#ifndef __NetBSD__		/* We map/unmap elsewhere.  */
227		if (prime_attach->dir != DMA_NONE)
228			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
229					prime_attach->dir);
230#endif
231		sg_free_table(sgt);
232	}
233
234	kfree(sgt);
235	kfree(prime_attach);
236	attach->priv = NULL;
237}
238
239void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
240					struct dma_buf *dma_buf)
241{
242	struct drm_prime_member *member, *safe;
243
244	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
245		if (member->dma_buf == dma_buf) {
246			dma_buf_put(dma_buf);
247			list_del(&member->entry);
248			kfree(member);
249		}
250	}
251}
252
253static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
254						enum dma_data_direction dir)
255{
256	struct drm_prime_attachment *prime_attach = attach->priv;
257	struct drm_gem_object *obj = attach->dmabuf->priv;
258	struct sg_table *sgt;
259
260	if (WARN_ON(dir == DMA_NONE || !prime_attach))
261		return ERR_PTR(-EINVAL);
262
263	/* return the cached mapping when possible */
264	if (prime_attach->dir == dir)
265		return prime_attach->sgt;
266
267	/*
268	 * two mappings with different directions for the same attachment are
269	 * not allowed
270	 */
271	if (WARN_ON(prime_attach->dir != DMA_NONE))
272		return ERR_PTR(-EBUSY);
273
274	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
275	if (!IS_ERR(sgt)) {
276#ifdef __NetBSD__		/* We map/unmap elsewhere.  */
277		prime_attach->sgt = sgt;
278		prime_attach->dir = dir;
279#else
280		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
281			sg_free_table(sgt);
282			kfree(sgt);
283			sgt = ERR_PTR(-ENOMEM);
284		} else {
285			prime_attach->sgt = sgt;
286			prime_attach->dir = dir;
287		}
288#endif
289	}
290
291	return sgt;
292}
293
294static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
295				  struct sg_table *sgt,
296				  enum dma_data_direction dir)
297{
298	/* nothing to be done here */
299}
300
301/**
302 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
303 * @dma_buf: buffer to be released
304 *
305 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
306 * must use this in their dma_buf ops structure as the release callback.
307 */
308void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
309{
310	struct drm_gem_object *obj = dma_buf->priv;
311
312	/* drop the reference on the export fd holds */
313	drm_gem_object_unreference_unlocked(obj);
314}
315EXPORT_SYMBOL(drm_gem_dmabuf_release);
316
317static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
318{
319	struct drm_gem_object *obj = dma_buf->priv;
320	struct drm_device *dev = obj->dev;
321
322	return dev->driver->gem_prime_vmap(obj);
323}
324
325static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
326{
327	struct drm_gem_object *obj = dma_buf->priv;
328	struct drm_device *dev = obj->dev;
329
330	dev->driver->gem_prime_vunmap(obj, vaddr);
331}
332
333static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
334					unsigned long page_num)
335{
336	return NULL;
337}
338
339static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
340					 unsigned long page_num, void *addr)
341{
342
343}
344static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
345				 unsigned long page_num)
346{
347	return NULL;
348}
349
350static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
351				  unsigned long page_num, void *addr)
352{
353
354}
355
356#ifdef __NetBSD__
357static int
358drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
359    int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
360    int *maxprotp)
361#else
362static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
363			       struct vm_area_struct *vma)
364#endif
365{
366	struct drm_gem_object *obj = dma_buf->priv;
367	struct drm_device *dev = obj->dev;
368
369	if (!dev->driver->gem_prime_mmap)
370		return -ENOSYS;
371
372#ifdef __NetBSD__
373	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
374	    advicep, uobjp, maxprotp);
375#else
376	return dev->driver->gem_prime_mmap(obj, vma);
377#endif
378}
379
380static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
381	.attach = drm_gem_map_attach,
382	.detach = drm_gem_map_detach,
383	.map_dma_buf = drm_gem_map_dma_buf,
384	.unmap_dma_buf = drm_gem_unmap_dma_buf,
385	.release = drm_gem_dmabuf_release,
386	.kmap = drm_gem_dmabuf_kmap,
387	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
388	.kunmap = drm_gem_dmabuf_kunmap,
389	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
390	.mmap = drm_gem_dmabuf_mmap,
391	.vmap = drm_gem_dmabuf_vmap,
392	.vunmap = drm_gem_dmabuf_vunmap,
393};
394
395/**
396 * DOC: PRIME Helpers
397 *
398 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
399 * simpler APIs by using the helper functions @drm_gem_prime_export and
400 * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
401 * six lower-level driver callbacks:
402 *
403 * Export callbacks:
404 *
405 *  - @gem_prime_pin (optional): prepare a GEM object for exporting
406 *
407 *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
408 *
409 *  - @gem_prime_vmap: vmap a buffer exported by your driver
410 *
411 *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
412 *
413 *  - @gem_prime_mmap (optional): mmap a buffer exported by your driver
414 *
415 * Import callback:
416 *
417 *  - @gem_prime_import_sg_table (import): produce a GEM object from another
418 *    driver's scatter/gather table
419 */
420
421/**
422 * drm_gem_prime_export - helper library implementation of the export callback
423 * @dev: drm_device to export from
424 * @obj: GEM object to export
425 * @flags: flags like DRM_CLOEXEC
426 *
427 * This is the implementation of the gem_prime_export functions for GEM drivers
428 * using the PRIME helpers.
429 */
430struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
431				     struct drm_gem_object *obj,
432				     int flags)
433{
434	struct dma_buf_export_info exp_info = {
435#ifndef __NetBSD__
436		.exp_name = KBUILD_MODNAME, /* white lie for debug */
437		.owner = dev->driver->fops->owner,
438#endif
439		.ops = &drm_gem_prime_dmabuf_ops,
440		.size = obj->size,
441		.flags = flags,
442		.priv = obj,
443	};
444
445	if (dev->driver->gem_prime_res_obj)
446		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
447
448	return dma_buf_export(&exp_info);
449}
450EXPORT_SYMBOL(drm_gem_prime_export);
451
452static struct dma_buf *export_and_register_object(struct drm_device *dev,
453						  struct drm_gem_object *obj,
454						  uint32_t flags)
455{
456	struct dma_buf *dmabuf;
457
458	/* prevent races with concurrent gem_close. */
459	if (obj->handle_count == 0) {
460		dmabuf = ERR_PTR(-ENOENT);
461		return dmabuf;
462	}
463
464	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
465	if (IS_ERR(dmabuf)) {
466		/* normally the created dma-buf takes ownership of the ref,
467		 * but if that fails then drop the ref
468		 */
469		return dmabuf;
470	}
471
472	/*
473	 * Note that callers do not need to clean up the export cache
474	 * since the check for obj->handle_count guarantees that someone
475	 * will clean it up.
476	 */
477	obj->dma_buf = dmabuf;
478	get_dma_buf(obj->dma_buf);
479	/* Grab a new ref since the callers is now used by the dma-buf */
480	drm_gem_object_reference(obj);
481
482	return dmabuf;
483}
484
485/**
486 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
487 * @dev: dev to export the buffer from
488 * @file_priv: drm file-private structure
489 * @handle: buffer handle to export
490 * @flags: flags like DRM_CLOEXEC
491 * @prime_fd: pointer to storage for the fd id of the create dma-buf
492 *
493 * This is the PRIME export function which must be used mandatorily by GEM
494 * drivers to ensure correct lifetime management of the underlying GEM object.
495 * The actual exporting from GEM object to a dma-buf is done through the
496 * gem_prime_export driver callback.
497 */
498int drm_gem_prime_handle_to_fd(struct drm_device *dev,
499			       struct drm_file *file_priv, uint32_t handle,
500			       uint32_t flags,
501			       int *prime_fd)
502{
503	struct drm_gem_object *obj;
504	int ret = 0;
505	struct dma_buf *dmabuf;
506
507	mutex_lock(&file_priv->prime.lock);
508	obj = drm_gem_object_lookup(dev, file_priv, handle);
509	if (!obj)  {
510		ret = -ENOENT;
511		goto out_unlock;
512	}
513
514	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
515	if (dmabuf) {
516		get_dma_buf(dmabuf);
517		goto out_have_handle;
518	}
519
520	mutex_lock(&dev->object_name_lock);
521	/* re-export the original imported object */
522	if (obj->import_attach) {
523		dmabuf = obj->import_attach->dmabuf;
524		get_dma_buf(dmabuf);
525		goto out_have_obj;
526	}
527
528	if (obj->dma_buf) {
529		get_dma_buf(obj->dma_buf);
530		dmabuf = obj->dma_buf;
531		goto out_have_obj;
532	}
533
534	dmabuf = export_and_register_object(dev, obj, flags);
535	if (IS_ERR(dmabuf)) {
536		/* normally the created dma-buf takes ownership of the ref,
537		 * but if that fails then drop the ref
538		 */
539		ret = PTR_ERR(dmabuf);
540		mutex_unlock(&dev->object_name_lock);
541		goto out;
542	}
543
544out_have_obj:
545	/*
546	 * If we've exported this buffer then cheat and add it to the import list
547	 * so we get the correct handle back. We must do this under the
548	 * protection of dev->object_name_lock to ensure that a racing gem close
549	 * ioctl doesn't miss to remove this buffer handle from the cache.
550	 */
551	ret = drm_prime_add_buf_handle(&file_priv->prime,
552				       dmabuf, handle);
553	mutex_unlock(&dev->object_name_lock);
554	if (ret)
555		goto fail_put_dmabuf;
556
557out_have_handle:
558	ret = dma_buf_fd(dmabuf, flags);
559	/*
560	 * We must _not_ remove the buffer from the handle cache since the newly
561	 * created dma buf is already linked in the global obj->dma_buf pointer,
562	 * and that is invariant as long as a userspace gem handle exists.
563	 * Closing the handle will clean out the cache anyway, so we don't leak.
564	 */
565	if (ret < 0) {
566		goto fail_put_dmabuf;
567	} else {
568		*prime_fd = ret;
569		ret = 0;
570	}
571
572	goto out;
573
574fail_put_dmabuf:
575	dma_buf_put(dmabuf);
576out:
577	drm_gem_object_unreference_unlocked(obj);
578out_unlock:
579	mutex_unlock(&file_priv->prime.lock);
580
581	return ret;
582}
583EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
584
585/**
586 * drm_gem_prime_import - helper library implementation of the import callback
587 * @dev: drm_device to import into
588 * @dma_buf: dma-buf object to import
589 *
590 * This is the implementation of the gem_prime_import functions for GEM drivers
591 * using the PRIME helpers.
592 */
593struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
594					    struct dma_buf *dma_buf)
595{
596	struct dma_buf_attachment *attach;
597	struct sg_table *sgt;
598	struct drm_gem_object *obj;
599	int ret;
600
601	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
602		obj = dma_buf->priv;
603		if (obj->dev == dev) {
604			/*
605			 * Importing dmabuf exported from out own gem increases
606			 * refcount on gem itself instead of f_count of dmabuf.
607			 */
608			drm_gem_object_reference(obj);
609			return obj;
610		}
611	}
612
613	if (!dev->driver->gem_prime_import_sg_table)
614		return ERR_PTR(-EINVAL);
615
616	attach = dma_buf_attach(dma_buf, dev->dev);
617	if (IS_ERR(attach))
618		return ERR_CAST(attach);
619
620	get_dma_buf(dma_buf);
621
622	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
623	if (IS_ERR(sgt)) {
624		ret = PTR_ERR(sgt);
625		goto fail_detach;
626	}
627
628	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
629	if (IS_ERR(obj)) {
630		ret = PTR_ERR(obj);
631		goto fail_unmap;
632	}
633
634	obj->import_attach = attach;
635
636	return obj;
637
638fail_unmap:
639	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
640fail_detach:
641	dma_buf_detach(dma_buf, attach);
642	dma_buf_put(dma_buf);
643
644	return ERR_PTR(ret);
645}
646EXPORT_SYMBOL(drm_gem_prime_import);
647
648/**
649 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
650 * @dev: dev to export the buffer from
651 * @file_priv: drm file-private structure
652 * @prime_fd: fd id of the dma-buf which should be imported
653 * @handle: pointer to storage for the handle of the imported buffer object
654 *
655 * This is the PRIME import function which must be used mandatorily by GEM
656 * drivers to ensure correct lifetime management of the underlying GEM object.
657 * The actual importing of GEM object from the dma-buf is done through the
658 * gem_import_export driver callback.
659 */
660int drm_gem_prime_fd_to_handle(struct drm_device *dev,
661			       struct drm_file *file_priv, int prime_fd,
662			       uint32_t *handle)
663{
664	struct dma_buf *dma_buf;
665	struct drm_gem_object *obj;
666	int ret;
667
668	dma_buf = dma_buf_get(prime_fd);
669	if (IS_ERR(dma_buf))
670		return PTR_ERR(dma_buf);
671
672	mutex_lock(&file_priv->prime.lock);
673
674	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
675			dma_buf, handle);
676	if (ret == 0)
677		goto out_put;
678
679	/* never seen this one, need to import */
680	mutex_lock(&dev->object_name_lock);
681	obj = dev->driver->gem_prime_import(dev, dma_buf);
682	if (IS_ERR(obj)) {
683		ret = PTR_ERR(obj);
684		goto out_unlock;
685	}
686
687	if (obj->dma_buf) {
688		WARN_ON(obj->dma_buf != dma_buf);
689	} else {
690		obj->dma_buf = dma_buf;
691		get_dma_buf(dma_buf);
692	}
693
694	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
695	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
696	drm_gem_object_unreference_unlocked(obj);
697	if (ret)
698		goto out_put;
699
700	ret = drm_prime_add_buf_handle(&file_priv->prime,
701			dma_buf, *handle);
702	if (ret)
703		goto fail;
704
705	mutex_unlock(&file_priv->prime.lock);
706
707	dma_buf_put(dma_buf);
708
709	return 0;
710
711fail:
712	/* hmm, if driver attached, we are relying on the free-object path
713	 * to detach.. which seems ok..
714	 */
715	drm_gem_handle_delete(file_priv, *handle);
716out_unlock:
717	mutex_unlock(&dev->object_name_lock);
718out_put:
719	dma_buf_put(dma_buf);
720	mutex_unlock(&file_priv->prime.lock);
721	return ret;
722}
723EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
724
725int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
726				 struct drm_file *file_priv)
727{
728	struct drm_prime_handle *args = data;
729	uint32_t flags;
730
731	if (!drm_core_check_feature(dev, DRIVER_PRIME))
732		return -EINVAL;
733
734	if (!dev->driver->prime_handle_to_fd)
735		return -ENOSYS;
736
737	/* check flags are valid */
738	if (args->flags & ~DRM_CLOEXEC)
739		return -EINVAL;
740
741	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
742	flags = args->flags & DRM_CLOEXEC;
743
744	return dev->driver->prime_handle_to_fd(dev, file_priv,
745			args->handle, flags, &args->fd);
746}
747
748int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
749				 struct drm_file *file_priv)
750{
751	struct drm_prime_handle *args = data;
752
753	if (!drm_core_check_feature(dev, DRIVER_PRIME))
754		return -EINVAL;
755
756	if (!dev->driver->prime_fd_to_handle)
757		return -ENOSYS;
758
759	return dev->driver->prime_fd_to_handle(dev, file_priv,
760			args->fd, &args->handle);
761}
762
763/**
764 * drm_prime_pages_to_sg - converts a page array into an sg list
765 * @pages: pointer to the array of page pointers to convert
766 * @nr_pages: length of the page vector
767 *
768 * This helper creates an sg table object from a set of pages
769 * the driver is responsible for mapping the pages into the
770 * importers address space for use with dma_buf itself.
771 */
772struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
773{
774	struct sg_table *sg = NULL;
775	int ret;
776
777	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
778	if (!sg) {
779		ret = -ENOMEM;
780		goto out;
781	}
782
783	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
784				nr_pages << PAGE_SHIFT, GFP_KERNEL);
785	if (ret)
786		goto out;
787
788	return sg;
789out:
790	kfree(sg);
791	return ERR_PTR(ret);
792}
793EXPORT_SYMBOL(drm_prime_pages_to_sg);
794
795#ifdef __NetBSD__
796
797struct sg_table *
798drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
799{
800	struct sg_table *sg;
801	int ret;
802
803	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
804	if (sg == NULL) {
805		ret = -ENOMEM;
806		goto out;
807	}
808
809	ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
810	    npages, GFP_KERNEL);
811	if (ret)
812		goto out;
813
814	return sg;
815
816out:
817	kfree(sg);
818	return ERR_PTR(ret);
819}
820
821void
822drm_prime_sg_free(struct sg_table *sg)
823{
824
825	sg_free_table(sg);
826	kfree(sg);
827}
828
829int
830drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
831    struct sg_table *sgt)
832{
833
834	/* XXX errno NetBSD->Linux */
835	return -bus_dmamap_load_raw(dmat, map, sgt->sgt_segs, sgt->sgt_nsegs,
836	    sgt->sgt_size, BUS_DMA_NOWAIT);
837}
838
839#else  /* !__NetBSD__ */
840
841/**
842 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
843 * @sgt: scatter-gather table to convert
844 * @pages: array of page pointers to store the page array in
845 * @addrs: optional array to store the dma bus address of each page
846 * @max_pages: size of both the passed-in arrays
847 *
848 * Exports an sg table into an array of pages and addresses. This is currently
849 * required by the TTM driver in order to do correct fault handling.
850 */
851int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
852				     dma_addr_t *addrs, int max_pages)
853{
854	unsigned count;
855	struct scatterlist *sg;
856	struct page *page;
857	u32 len;
858	int pg_index;
859	dma_addr_t addr;
860
861	pg_index = 0;
862	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
863		len = sg->length;
864		page = sg_page(sg);
865		addr = sg_dma_address(sg);
866
867		while (len > 0) {
868			if (WARN_ON(pg_index >= max_pages))
869				return -1;
870			pages[pg_index] = page;
871			if (addrs)
872				addrs[pg_index] = addr;
873
874			page++;
875			addr += PAGE_SIZE;
876			len -= PAGE_SIZE;
877			pg_index++;
878		}
879	}
880	return 0;
881}
882EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
883
884#endif	/* __NetBSD__ */
885
886/**
887 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
888 * @obj: GEM object which was created from a dma-buf
889 * @sg: the sg-table which was pinned at import time
890 *
891 * This is the cleanup functions which GEM drivers need to call when they use
892 * @drm_gem_prime_import to import dma-bufs.
893 */
894void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
895{
896	struct dma_buf_attachment *attach;
897	struct dma_buf *dma_buf;
898	attach = obj->import_attach;
899	if (sg)
900		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
901	dma_buf = attach->dmabuf;
902	dma_buf_detach(attach->dmabuf, attach);
903	/* remove the reference */
904	dma_buf_put(dma_buf);
905}
906EXPORT_SYMBOL(drm_prime_gem_destroy);
907
908void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
909{
910	INIT_LIST_HEAD(&prime_fpriv->head);
911#ifdef __NetBSD__
912	linux_mutex_init(&prime_fpriv->lock);
913#else
914	mutex_init(&prime_fpriv->lock);
915#endif
916}
917
918void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
919{
920	/* by now drm_gem_release should've made sure the list is empty */
921	WARN_ON(!list_empty(&prime_fpriv->head));
922#ifdef __NetBSD__
923	linux_mutex_destroy(&prime_fpriv->lock);
924#else
925	mutex_destroy(&prime_fpriv->lock);
926#endif
927}
928