1/*	$NetBSD: drm_vma_manager.c,v 1.8 2021/12/19 11:57:27 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: drm_vma_manager.c,v 1.8 2021/12/19 11:57:27 riastradh Exp $");
34
35#include <sys/kmem.h>
36#include <sys/rbtree.h>
37#include <sys/vmem.h>
38
39#include <drm/drm_vma_manager.h>
40
41static int
42drm_vma_node_compare(void *cookie __unused, const void *va, const void *vb)
43{
44	const struct drm_vma_offset_node *const na = va;
45	const struct drm_vma_offset_node *const nb = vb;
46
47	if (na->von_startpage < nb->von_startpage)
48		return -1;
49	if (na->von_startpage > nb->von_startpage)
50		return +1;
51	return 0;
52}
53
54static int
55drm_vma_node_compare_key(void *cookie __unused, const void *vn, const void *vk)
56{
57	const struct drm_vma_offset_node *const n = vn;
58	const vmem_addr_t *const k = vk;
59
60	if (n->von_startpage < *k)
61		return -1;
62	if (n->von_startpage > *k)
63		return +1;
64	return 0;
65}
66
67static const rb_tree_ops_t drm_vma_node_rb_ops = {
68	.rbto_compare_nodes = &drm_vma_node_compare,
69	.rbto_compare_key = &drm_vma_node_compare_key,
70	.rbto_node_offset = offsetof(struct drm_vma_offset_node, von_rb_node),
71	.rbto_context = NULL,
72};
73
74static int
75drm_vma_file_compare(void *cookie __unused, const void *va, const void *vb)
76{
77	const struct drm_vma_offset_file *const fa = va;
78	const struct drm_vma_offset_file *const fb = vb;
79
80	if (fa->vof_file < fb->vof_file)
81		return -1;
82	if (fa->vof_file > fb->vof_file)
83		return +1;
84	return 0;
85}
86
87static int
88drm_vma_file_compare_key(void *cookie __unused, const void *vf, const void *vk)
89{
90	const struct drm_vma_offset_file *const f = vf;
91	const struct drm_file *const k = vk;
92
93	if (f->vof_file < k)
94		return -1;
95	if (f->vof_file > k)
96		return +1;
97	return 0;
98}
99
100static const rb_tree_ops_t drm_vma_file_rb_ops = {
101	.rbto_compare_nodes = &drm_vma_file_compare,
102	.rbto_compare_key = &drm_vma_file_compare_key,
103	.rbto_node_offset = offsetof(struct drm_vma_offset_file, vof_rb_node),
104	.rbto_context = NULL,
105};
106
107void
108drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
109    unsigned long startpage, unsigned long npages)
110{
111
112	rw_init(&mgr->vom_lock);
113	rb_tree_init(&mgr->vom_nodes, &drm_vma_node_rb_ops);
114	mgr->vom_vmem = vmem_create("drm_vma", startpage, npages, 1,
115	    NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
116}
117
118void
119drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
120{
121
122	vmem_destroy(mgr->vom_vmem);
123	KASSERTMSG((RB_TREE_MIN(&mgr->vom_nodes) == NULL),
124	    "drm vma offset manager %p not empty", mgr);
125#if 0
126	rb_tree_destroy(&mgr->vom_nodes);
127#endif
128	rw_destroy(&mgr->vom_lock);
129}
130
131void
132drm_vma_node_init(struct drm_vma_offset_node *node)
133{
134	static const struct drm_vma_offset_node zero_node;
135
136	*node = zero_node;
137
138	rw_init(&node->von_lock);
139	node->von_startpage = 0;
140	node->von_npages = 0;
141	rb_tree_init(&node->von_files, &drm_vma_file_rb_ops);
142}
143
144void
145drm_vma_node_destroy(struct drm_vma_offset_node *node)
146{
147
148	KASSERTMSG((RB_TREE_MIN(&node->von_files) == NULL),
149	    "drm vma node %p not empty", node);
150#if 0
151	rb_tree_destroy(&node->von_files);
152#endif
153	KASSERT(node->von_startpage == 0);
154	KASSERT(node->von_npages == 0);
155	rw_destroy(&node->von_lock);
156}
157
158int
159drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
160    struct drm_vma_offset_node *node, unsigned long npages)
161{
162	vmem_size_t startpage;
163	struct drm_vma_offset_node *collision __diagused;
164	int error;
165
166	KASSERT(npages != 0);
167
168	if (0 < node->von_npages)
169		return 0;
170
171	error = vmem_alloc(mgr->vom_vmem, npages, VM_NOSLEEP|VM_BESTFIT,
172	    &startpage);
173	if (error) {
174		if (error == ENOMEM)
175			error = ENOSPC;
176		/* XXX errno NetBSD->Linux */
177		return -error;
178	}
179
180	node->von_startpage = startpage;
181	node->von_npages = npages;
182
183	rw_enter(&mgr->vom_lock, RW_WRITER);
184	collision = rb_tree_insert_node(&mgr->vom_nodes, node);
185	KASSERT(collision == node);
186	rw_exit(&mgr->vom_lock);
187
188	return 0;
189}
190
191void
192drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
193    struct drm_vma_offset_node *node)
194{
195
196	if (node->von_npages == 0)
197		return;
198
199	rw_enter(&mgr->vom_lock, RW_WRITER);
200	rb_tree_remove_node(&mgr->vom_nodes, node);
201	rw_exit(&mgr->vom_lock);
202
203	vmem_free(mgr->vom_vmem, node->von_startpage, node->von_npages);
204
205	node->von_npages = 0;
206	node->von_startpage = 0;
207}
208
209void
210drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
211{
212
213	rw_enter(&mgr->vom_lock, RW_READER);
214}
215
216void
217drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
218{
219
220	rw_exit(&mgr->vom_lock);
221}
222
223struct drm_vma_offset_node *
224drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
225    unsigned long startpage, unsigned long npages)
226{
227	const vmem_addr_t key = startpage;
228	struct drm_vma_offset_node *node;
229
230	KASSERT(rw_lock_held(&mgr->vom_lock));
231
232	node = rb_tree_find_node_leq(&mgr->vom_nodes, &key);
233	if (node == NULL)
234		return NULL;
235	KASSERT(node->von_startpage <= startpage);
236	if (node->von_npages < npages)
237		return NULL;
238	if (node->von_npages - npages < startpage - node->von_startpage)
239		return NULL;
240
241	return node;
242}
243
244struct drm_vma_offset_node *
245drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
246    unsigned long startpage, unsigned long npages)
247{
248	const vmem_addr_t key = startpage;
249	struct drm_vma_offset_node *node;
250
251	KASSERT(rw_lock_held(&mgr->vom_lock));
252
253	node = rb_tree_find_node(&mgr->vom_nodes, &key);
254	if (node == NULL)
255		return NULL;
256	KASSERT(node->von_startpage == startpage);
257	if (node->von_npages != npages)
258		return NULL;
259
260	return node;
261}
262
263struct drm_vma_offset_node *
264drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
265    unsigned long startpage, unsigned long npages)
266{
267	struct drm_vma_offset_node *node;
268
269	rw_enter(&mgr->vom_lock, RW_READER);
270	node = drm_vma_offset_exact_lookup_locked(mgr, startpage, npages);
271	rw_exit(&mgr->vom_lock);
272
273	return node;
274}
275
276int
277drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *file)
278{
279	struct drm_vma_offset_file *new, *old;
280
281	new = kmem_alloc(sizeof(*new), KM_NOSLEEP);
282	if (new == NULL)
283		return -ENOMEM;
284	new->vof_file = file;
285
286	rw_enter(&node->von_lock, RW_WRITER);
287	old = rb_tree_insert_node(&node->von_files, new);
288	rw_exit(&node->von_lock);
289
290	if (old != new)		/* collision */
291		kmem_free(new, sizeof(*new));
292
293	return 0;
294}
295
296void
297drm_vma_node_revoke(struct drm_vma_offset_node *node, struct drm_file *file)
298{
299
300	rw_enter(&node->von_lock, RW_WRITER);
301	struct drm_vma_offset_file *const found =
302	    rb_tree_find_node(&node->von_files, file);
303	if (found != NULL)
304		rb_tree_remove_node(&node->von_files, found);
305	rw_exit(&node->von_lock);
306	if (found != NULL)
307		kmem_free(found, sizeof(*found));
308}
309
310bool
311drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
312    struct drm_file *file)
313{
314
315	rw_enter(&node->von_lock, RW_READER);
316	const bool allowed =
317	    (rb_tree_find_node(&node->von_files, file) != NULL);
318	rw_exit(&node->von_lock);
319
320	return allowed;
321}
322
323int
324drm_vma_node_verify_access(struct drm_vma_offset_node *node,
325    struct drm_file *file)
326{
327
328	if (!drm_vma_node_is_allowed(node, file))
329		return -EACCES;
330
331	return 0;
332}
333