drm_linux.c revision 1.24
1/*	$OpenBSD: drm_linux.c,v 1.24 2018/06/25 22:29:16 kettenis Exp $	*/
2/*
3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <dev/pci/drm/drmP.h>
20#include <dev/pci/ppbreg.h>
21#include <sys/event.h>
22#include <sys/file.h>
23#include <sys/filedesc.h>
24#include <sys/stat.h>
25
26struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
27void *sch_ident;
28int sch_priority;
29
30void
31flush_barrier(void *arg)
32{
33	int *barrier = arg;
34
35	*barrier = 1;
36	wakeup(barrier);
37}
38
39void
40flush_workqueue(struct workqueue_struct *wq)
41{
42	struct sleep_state sls;
43	struct task task;
44	int barrier = 0;
45
46	if (cold)
47		return;
48
49	task_set(&task, flush_barrier, &barrier);
50	task_add((struct taskq *)wq, &task);
51	while (!barrier) {
52		sleep_setup(&sls, &barrier, PWAIT, "flwqbar");
53		sleep_finish(&sls, !barrier);
54	}
55}
56
57void
58flush_work(struct work_struct *work)
59{
60	struct sleep_state sls;
61	struct task task;
62	int barrier = 0;
63
64	if (cold)
65		return;
66
67	task_set(&task, flush_barrier, &barrier);
68	task_add(work->tq, &task);
69	while (!barrier) {
70		sleep_setup(&sls, &barrier, PWAIT, "flwkbar");
71		sleep_finish(&sls, !barrier);
72	}
73}
74
75void
76flush_delayed_work(struct delayed_work *dwork)
77{
78	struct sleep_state sls;
79	struct task task;
80	int barrier = 0;
81
82	if (cold)
83		return;
84
85	while (timeout_pending(&dwork->to))
86		tsleep(&barrier, PWAIT, "fldwto", 1);
87
88	task_set(&task, flush_barrier, &barrier);
89	task_add(dwork->tq ? dwork->tq : systq, &task);
90	while (!barrier) {
91		sleep_setup(&sls, &barrier, PWAIT, "fldwbar");
92		sleep_finish(&sls, !barrier);
93	}
94}
95
96struct timespec
97ns_to_timespec(const int64_t nsec)
98{
99	struct timespec ts;
100	int32_t rem;
101
102	if (nsec == 0) {
103		ts.tv_sec = 0;
104		ts.tv_nsec = 0;
105		return (ts);
106	}
107
108	ts.tv_sec = nsec / NSEC_PER_SEC;
109	rem = nsec % NSEC_PER_SEC;
110	if (rem < 0) {
111		ts.tv_sec--;
112		rem += NSEC_PER_SEC;
113	}
114	ts.tv_nsec = rem;
115	return (ts);
116}
117
118int64_t
119timeval_to_ns(const struct timeval *tv)
120{
121	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
122		tv->tv_usec * NSEC_PER_USEC;
123}
124
125struct timeval
126ns_to_timeval(const int64_t nsec)
127{
128	struct timeval tv;
129	int32_t rem;
130
131	if (nsec == 0) {
132		tv.tv_sec = 0;
133		tv.tv_usec = 0;
134		return (tv);
135	}
136
137	tv.tv_sec = nsec / NSEC_PER_SEC;
138	rem = nsec % NSEC_PER_SEC;
139	if (rem < 0) {
140		tv.tv_sec--;
141		rem += NSEC_PER_SEC;
142	}
143	tv.tv_usec = rem / 1000;
144	return (tv);
145}
146
147int64_t
148timeval_to_us(const struct timeval *tv)
149{
150	return ((int64_t)tv->tv_sec * 1000000) + tv->tv_usec;
151}
152
153extern char *hw_vendor, *hw_prod, *hw_ver;
154
155bool
156dmi_match(int slot, const char *str)
157{
158	switch (slot) {
159	case DMI_SYS_VENDOR:
160	case DMI_BOARD_VENDOR:
161		if (hw_vendor != NULL &&
162		    !strcmp(hw_vendor, str))
163			return true;
164		break;
165	case DMI_PRODUCT_NAME:
166	case DMI_BOARD_NAME:
167		if (hw_prod != NULL &&
168		    !strcmp(hw_prod, str))
169			return true;
170		break;
171	case DMI_PRODUCT_VERSION:
172	case DMI_BOARD_VERSION:
173		if (hw_ver != NULL &&
174		    !strcmp(hw_ver, str))
175			return true;
176		break;
177	case DMI_NONE:
178	default:
179		return false;
180	}
181
182	return false;
183}
184
185static bool
186dmi_found(const struct dmi_system_id *dsi)
187{
188	int i, slot;
189
190	for (i = 0; i < nitems(dsi->matches); i++) {
191		slot = dsi->matches[i].slot;
192		if (slot == DMI_NONE)
193			break;
194		if (!dmi_match(slot, dsi->matches[i].substr))
195			return false;
196	}
197
198	return true;
199}
200
201int
202dmi_check_system(const struct dmi_system_id *sysid)
203{
204	const struct dmi_system_id *dsi;
205	int num = 0;
206
207	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
208		if (dmi_found(dsi)) {
209			num++;
210			if (dsi->callback && dsi->callback(dsi))
211				break;
212		}
213	}
214	return (num);
215}
216
217struct vm_page *
218alloc_pages(unsigned int gfp_mask, unsigned int order)
219{
220	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
221	struct pglist mlist;
222
223	if (gfp_mask & M_CANFAIL)
224		flags |= UVM_PLA_FAILOK;
225	if (gfp_mask & M_ZERO)
226		flags |= UVM_PLA_ZERO;
227
228	TAILQ_INIT(&mlist);
229	if (uvm_pglistalloc(PAGE_SIZE << order, dma_constraint.ucr_low,
230	    dma_constraint.ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
231		return NULL;
232	return TAILQ_FIRST(&mlist);
233}
234
235void
236__free_pages(struct vm_page *page, unsigned int order)
237{
238	struct pglist mlist;
239	int i;
240
241	TAILQ_INIT(&mlist);
242	for (i = 0; i < (1 << order); i++)
243		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
244	uvm_pglistfree(&mlist);
245}
246
247void *
248kmap(struct vm_page *pg)
249{
250	vaddr_t va;
251
252#if defined (__HAVE_PMAP_DIRECT)
253	va = pmap_map_direct(pg);
254#else
255	va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
256	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
257	pmap_update(pmap_kernel());
258#endif
259	return (void *)va;
260}
261
262void
263kunmap(void *addr)
264{
265	vaddr_t va = (vaddr_t)addr;
266
267#if defined (__HAVE_PMAP_DIRECT)
268	pmap_unmap_direct(va);
269#else
270	pmap_kremove(va, PAGE_SIZE);
271	pmap_update(pmap_kernel());
272	uvm_km_free_wakeup(phys_map, va, PAGE_SIZE);
273#endif
274}
275
276void *
277vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
278     pgprot_t prot)
279{
280	vaddr_t va;
281	paddr_t pa;
282	int i;
283
284	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
285	if (va == 0)
286		return NULL;
287	for (i = 0; i < npages; i++) {
288		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
289		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
290		    PROT_READ | PROT_WRITE,
291		    PROT_READ | PROT_WRITE | PMAP_WIRED);
292		pmap_update(pmap_kernel());
293	}
294
295	return (void *)va;
296}
297
298void
299vunmap(void *addr, size_t size)
300{
301	vaddr_t va = (vaddr_t)addr;
302
303	pmap_remove(pmap_kernel(), va, va + size);
304	pmap_update(pmap_kernel());
305	uvm_km_free(kernel_map, va, size);
306}
307
308void
309print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
310    int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
311{
312	const uint8_t *cbuf = buf;
313	int i;
314
315	for (i = 0; i < len; i++) {
316		if ((i % rowsize) == 0)
317			printf("%s", prefix_str);
318		printf("%02x", cbuf[i]);
319		if ((i % rowsize) == (rowsize - 1))
320			printf("\n");
321		else
322			printf(" ");
323	}
324}
325
326void *
327memchr_inv(const void *s, int c, size_t n)
328{
329	if (n != 0) {
330		const unsigned char *p = s;
331
332		do {
333			if (*p++ != (unsigned char)c)
334				return ((void *)(p - 1));
335		}while (--n != 0);
336	}
337	return (NULL);
338}
339
340int
341panic_cmp(struct rb_node *a, struct rb_node *b)
342{
343	panic(__func__);
344}
345
346#undef RB_ROOT
347#define RB_ROOT(head)	(head)->rbh_root
348
349RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
350
351/*
352 * This is a fairly minimal implementation of the Linux "idr" API.  It
353 * probably isn't very efficient, and defenitely isn't RCU safe.  The
354 * pre-load buffer is global instead of per-cpu; we rely on the kernel
355 * lock to make this work.  We do randomize our IDs in order to make
356 * them harder to guess.
357 */
358
359int idr_cmp(struct idr_entry *, struct idr_entry *);
360SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
361
362struct pool idr_pool;
363struct idr_entry *idr_entry_cache;
364
365void
366idr_init(struct idr *idr)
367{
368	static int initialized;
369
370	if (!initialized) {
371		pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
372		    "idrpl", NULL);
373		initialized = 1;
374	}
375	SPLAY_INIT(&idr->tree);
376}
377
378void
379idr_destroy(struct idr *idr)
380{
381	struct idr_entry *id;
382
383	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
384		SPLAY_REMOVE(idr_tree, &idr->tree, id);
385		pool_put(&idr_pool, id);
386	}
387}
388
389void
390idr_preload(unsigned int gfp_mask)
391{
392	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
393
394	KERNEL_ASSERT_LOCKED();
395
396	if (idr_entry_cache == NULL)
397		idr_entry_cache = pool_get(&idr_pool, flags);
398}
399
400int
401idr_alloc(struct idr *idr, void *ptr, int start, int end,
402    unsigned int gfp_mask)
403{
404	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
405	struct idr_entry *id;
406	int begin;
407
408	KERNEL_ASSERT_LOCKED();
409
410	if (idr_entry_cache) {
411		id = idr_entry_cache;
412		idr_entry_cache = NULL;
413	} else {
414		id = pool_get(&idr_pool, flags);
415		if (id == NULL)
416			return -ENOMEM;
417	}
418
419	if (end <= 0)
420		end = INT_MAX;
421
422#ifdef notyet
423	id->id = begin = start + arc4random_uniform(end - start);
424#else
425	id->id = begin = start;
426#endif
427	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
428		if (++id->id == end)
429			id->id = start;
430		if (id->id == begin) {
431			pool_put(&idr_pool, id);
432			return -ENOSPC;
433		}
434	}
435	id->ptr = ptr;
436	return id->id;
437}
438
439void *
440idr_replace(struct idr *idr, void *ptr, int id)
441{
442	struct idr_entry find, *res;
443	void *old;
444
445	find.id = id;
446	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
447	if (res == NULL)
448		return ERR_PTR(-ENOENT);
449	old = res->ptr;
450	res->ptr = ptr;
451	return old;
452}
453
454void
455idr_remove(struct idr *idr, int id)
456{
457	struct idr_entry find, *res;
458
459	find.id = id;
460	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
461	if (res) {
462		SPLAY_REMOVE(idr_tree, &idr->tree, res);
463		pool_put(&idr_pool, res);
464	}
465}
466
467void *
468idr_find(struct idr *idr, int id)
469{
470	struct idr_entry find, *res;
471
472	find.id = id;
473	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
474	if (res == NULL)
475		return NULL;
476	return res->ptr;
477}
478
479void *
480idr_get_next(struct idr *idr, int *id)
481{
482	struct idr_entry *res;
483
484	res = idr_find(idr, *id);
485	if (res == NULL)
486		res = SPLAY_MIN(idr_tree, &idr->tree);
487	else
488		res = SPLAY_NEXT(idr_tree, &idr->tree, res);
489	if (res == NULL)
490		return NULL;
491	*id = res->id;
492	return res->ptr;
493}
494
495int
496idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
497{
498	struct idr_entry *id;
499	int ret;
500
501	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
502		ret = func(id->id, id->ptr, data);
503		if (ret)
504			return ret;
505	}
506
507	return 0;
508}
509
510int
511idr_cmp(struct idr_entry *a, struct idr_entry *b)
512{
513	return (a->id < b->id ? -1 : a->id > b->id);
514}
515
516SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
517
518void
519ida_init(struct ida *ida)
520{
521	ida->counter = 0;
522}
523
524void
525ida_destroy(struct ida *ida)
526{
527}
528
529void
530ida_remove(struct ida *ida, int id)
531{
532}
533
534int
535ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
536    int flags)
537{
538	if (end <= 0)
539		end = INT_MAX;
540
541	if (start > ida->counter)
542		ida->counter = start;
543
544	if (ida->counter >= end)
545		return -ENOSPC;
546
547	return ida->counter++;
548}
549
550int
551sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
552{
553	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
554	    M_DRM, gfp_mask);
555	if (table->sgl == NULL)
556		return -ENOMEM;
557	table->nents = table->orig_nents = nents;
558	return 0;
559}
560
561void
562sg_free_table(struct sg_table *table)
563{
564	free(table->sgl, M_DRM,
565	    table->orig_nents * sizeof(struct scatterlist));
566}
567
568size_t
569sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
570    const void *buf, size_t buflen)
571{
572	panic("%s", __func__);
573}
574
575int
576i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
577{
578	void *cmd = NULL;
579	int cmdlen = 0;
580	int err, ret = 0;
581	int op;
582
583	iic_acquire_bus(&adap->ic, 0);
584
585	while (num > 2) {
586		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
587		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
588		    msgs->buf, msgs->len, 0);
589		if (err) {
590			ret = -err;
591			goto fail;
592		}
593		msgs++;
594		num--;
595		ret++;
596	}
597
598	if (num > 1) {
599		cmd = msgs->buf;
600		cmdlen = msgs->len;
601		msgs++;
602		num--;
603		ret++;
604	}
605
606	op = (msgs->flags & I2C_M_RD) ?
607	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
608	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
609	    msgs->buf, msgs->len, 0);
610	if (err) {
611		ret = -err;
612		goto fail;
613	}
614	msgs++;
615	ret++;
616
617fail:
618	iic_release_bus(&adap->ic, 0);
619
620	return ret;
621}
622
623int
624i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
625{
626	if (adap->algo)
627		return adap->algo->master_xfer(adap, msgs, num);
628
629	return i2c_master_xfer(adap, msgs, num);
630}
631
632int
633i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
634{
635	struct i2c_algo_bit_data *algo = adap->algo_data;
636	struct i2c_adapter bb;
637
638	memset(&bb, 0, sizeof(bb));
639	bb.ic = algo->ic;
640	bb.retries = adap->retries;
641	return i2c_master_xfer(&bb, msgs, num);
642}
643
644uint32_t
645i2c_bb_functionality(struct i2c_adapter *adap)
646{
647	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
648}
649
650struct i2c_algorithm i2c_bit_algo = {
651	.master_xfer = i2c_bb_master_xfer,
652	.functionality = i2c_bb_functionality
653};
654
655int
656i2c_bit_add_bus(struct i2c_adapter *adap)
657{
658	adap->algo = &i2c_bit_algo;
659	adap->retries = 3;
660
661	return 0;
662}
663
664#if defined(__amd64__) || defined(__i386__)
665
666/*
667 * This is a minimal implementation of the Linux vga_get/vga_put
668 * interface.  In all likelyhood, it will only work for inteldrm(4) as
669 * it assumes that if there is another active VGA device in the
670 * system, it is sitting behind a PCI bridge.
671 */
672
673extern int pci_enumerate_bus(struct pci_softc *,
674    int (*)(struct pci_attach_args *), struct pci_attach_args *);
675
676pcitag_t vga_bridge_tag;
677int vga_bridge_disabled;
678
679int
680vga_disable_bridge(struct pci_attach_args *pa)
681{
682	pcireg_t bhlc, bc;
683
684	if (pa->pa_domain != 0)
685		return 0;
686
687	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
688	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
689		return 0;
690
691	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
692	if ((bc & PPB_BC_VGA_ENABLE) == 0)
693		return 0;
694	bc &= ~PPB_BC_VGA_ENABLE;
695	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
696
697	vga_bridge_tag = pa->pa_tag;
698	vga_bridge_disabled = 1;
699
700	return 1;
701}
702
703void
704vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
705{
706	KASSERT(pdev->pci->sc_bridgetag == NULL);
707	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
708}
709
710void
711vga_put(struct pci_dev *pdev, int rsrc)
712{
713	pcireg_t bc;
714
715	if (!vga_bridge_disabled)
716		return;
717
718	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
719	bc |= PPB_BC_VGA_ENABLE;
720	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
721
722	vga_bridge_disabled = 0;
723}
724
725#endif
726
727/*
728 * ACPI types and interfaces.
729 */
730
731#if defined(__amd64__) || defined(__i386__)
732#include "acpi.h"
733#endif
734
735#if NACPI > 0
736
737#include <dev/acpi/acpireg.h>
738#include <dev/acpi/acpivar.h>
739
740acpi_status
741acpi_get_table_with_size(const char *sig, int instance,
742    struct acpi_table_header **hdr, acpi_size *size)
743{
744	struct acpi_softc *sc = acpi_softc;
745	struct acpi_q *entry;
746
747	KASSERT(instance == 1);
748
749	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
750		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
751			*hdr = entry->q_table;
752			*size = (*hdr)->length;
753			return 0;
754		}
755	}
756
757	return AE_NOT_FOUND;
758}
759
760#endif
761
762void
763backlight_do_update_status(void *arg)
764{
765	backlight_update_status(arg);
766}
767
768struct backlight_device *
769backlight_device_register(const char *name, void *kdev, void *data,
770    const struct backlight_ops *ops, struct backlight_properties *props)
771{
772	struct backlight_device *bd;
773
774	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
775	bd->ops = ops;
776	bd->props = *props;
777	bd->data = data;
778
779	task_set(&bd->task, backlight_do_update_status, bd);
780
781	return bd;
782}
783
784void
785backlight_device_unregister(struct backlight_device *bd)
786{
787	free(bd, M_DRM, sizeof(*bd));
788}
789
790void
791backlight_schedule_update_status(struct backlight_device *bd)
792{
793	task_add(systq, &bd->task);
794}
795
796void
797drm_sysfs_hotplug_event(struct drm_device *dev)
798{
799	KNOTE(&dev->note, NOTE_CHANGE);
800}
801
802unsigned int drm_fence_count;
803
804unsigned int
805fence_context_alloc(unsigned int num)
806{
807	return __sync_add_and_fetch(&drm_fence_count, num) - num;
808}
809
810int
811dmabuf_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
812{
813	return (ENXIO);
814}
815
816int
817dmabuf_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
818{
819	return (ENXIO);
820}
821
822int
823dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
824{
825	return (ENOTTY);
826}
827
828int
829dmabuf_poll(struct file *fp, int events, struct proc *p)
830{
831	return (0);
832}
833
834int
835dmabuf_kqfilter(struct file *fp, struct knote *kn)
836{
837	return (EINVAL);
838}
839
840int
841dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
842{
843	struct dma_buf *dmabuf = fp->f_data;
844
845	memset(st, 0, sizeof(*st));
846	st->st_size = dmabuf->size;
847	st->st_mode = S_IFIFO;	/* XXX */
848	return (0);
849}
850
851int
852dmabuf_close(struct file *fp, struct proc *p)
853{
854	struct dma_buf *dmabuf = fp->f_data;
855
856	fp->f_data = NULL;
857	dmabuf->ops->release(dmabuf);
858	free(dmabuf, M_DRM, sizeof(struct dma_buf));
859	return (0);
860}
861
862struct fileops dmabufops = {
863	.fo_read	= dmabuf_read,
864	.fo_write	= dmabuf_write,
865	.fo_ioctl	= dmabuf_ioctl,
866	.fo_poll	= dmabuf_poll,
867	.fo_kqfilter	= dmabuf_kqfilter,
868	.fo_stat	= dmabuf_stat,
869	.fo_close	= dmabuf_close
870};
871
872struct dma_buf *
873dma_buf_export(const struct dma_buf_export_info *info)
874{
875	struct proc *p = curproc;
876	struct filedesc *fdp = p->p_fd;
877	struct dma_buf *dmabuf;
878	struct file *fp;
879	int fd, error;
880
881	fdplock(fdp);
882	error = falloc(p, &fp, &fd);
883	if (error) {
884		fdpunlock(fdp);
885		return ERR_PTR(-error);
886	}
887	fdremove(fdp, fd);
888	fdpunlock(fdp);
889	fp->f_type = DTYPE_DMABUF;
890	fp->f_ops = &dmabufops;
891	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
892	dmabuf->priv = info->priv;
893	dmabuf->ops = info->ops;
894	dmabuf->size = info->size;
895	dmabuf->file = fp;
896	fp->f_data = dmabuf;
897	return dmabuf;
898}
899
900struct dma_buf *
901dma_buf_get(int fd)
902{
903	struct proc *p = curproc;
904	struct filedesc *fdp = p->p_fd;
905	struct file *fp;
906
907	if ((fp = fd_getfile(fdp, fd)) == NULL)
908		return ERR_PTR(-EBADF);
909
910	if (fp->f_type != DTYPE_DMABUF) {
911		FRELE(fp, p);
912		return ERR_PTR(-EINVAL);
913	}
914
915	return fp->f_data;
916}
917
918void
919dma_buf_put(struct dma_buf *dmabuf)
920{
921	KASSERT(dmabuf);
922	KASSERT(dmabuf->file);
923
924	FRELE(dmabuf->file, curproc);
925}
926
927int
928dma_buf_fd(struct dma_buf *dmabuf, int flags)
929{
930	struct proc *p = curproc;
931	struct filedesc *fdp = p->p_fd;
932	struct file *fp = dmabuf->file;
933	int fd, cloexec, error;
934
935	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
936
937	fdplock(fdp);
938restart:
939	if ((error = fdalloc(p, 0, &fd)) != 0) {
940		if (error == ENOSPC) {
941			fdexpand(p);
942			goto restart;
943		}
944		fdpunlock(fdp);
945		return -error;
946	}
947
948	fdinsert(fdp, fd, cloexec, fp);
949	fdpunlock(fdp);
950
951	return fd;
952}
953
954void
955get_dma_buf(struct dma_buf *dmabuf)
956{
957	FREF(dmabuf->file);
958}
959