1/*-
2 * Copyright (c) 2015 Michal Meloun
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35
36#include <machine/bus.h>
37
38#include <dev/extres/clk/clk.h>
39#include <dev/drm2/drmP.h>
40#include <dev/drm2/drm_crtc_helper.h>
41#include <dev/drm2/drm_fb_helper.h>
42
43#include <arm/nvidia/drm2/tegra_drm.h>
44
45#include <sys/vmem.h>
46#include <sys/vmem.h>
47#include <vm/vm.h>
48#include <vm/vm_pageout.h>
49
50static void
51tegra_bo_destruct(struct tegra_bo *bo)
52{
53	vm_page_t m;
54	size_t size;
55	int i;
56
57	if (bo->cdev_pager == NULL)
58		return;
59
60	size = round_page(bo->gem_obj.size);
61	if (bo->vbase != 0)
62		pmap_qremove(bo->vbase, bo->npages);
63
64	VM_OBJECT_WLOCK(bo->cdev_pager);
65	for (i = 0; i < bo->npages; i++) {
66		m = bo->m[i];
67		vm_page_busy_acquire(m, 0);
68		cdev_pager_free_page(bo->cdev_pager, m);
69		m->flags &= ~PG_FICTITIOUS;
70		vm_page_unwire_noq(m);
71		vm_page_free(m);
72	}
73	VM_OBJECT_WUNLOCK(bo->cdev_pager);
74
75	vm_object_deallocate(bo->cdev_pager);
76	if (bo->vbase != 0)
77		vmem_free(kmem_arena, bo->vbase, size);
78}
79
80static void
81tegra_bo_free_object(struct drm_gem_object *gem_obj)
82{
83	struct tegra_bo *bo;
84
85	bo = container_of(gem_obj, struct tegra_bo, gem_obj);
86	drm_gem_free_mmap_offset(gem_obj);
87	drm_gem_object_release(gem_obj);
88
89	tegra_bo_destruct(bo);
90
91	free(bo->m, DRM_MEM_DRIVER);
92	free(bo, DRM_MEM_DRIVER);
93}
94
95static int
96tegra_bo_alloc_contig(size_t npages, u_long alignment, vm_memattr_t memattr,
97    vm_page_t **ret_page)
98{
99	vm_page_t m;
100	int pflags, tries, i;
101	vm_paddr_t low, high, boundary;
102
103	low = 0;
104	high = -1UL;
105	boundary = 0;
106	pflags = VM_ALLOC_NORMAL  | VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
107	    VM_ALLOC_WIRED | VM_ALLOC_ZERO;
108	tries = 0;
109retry:
110	m = vm_page_alloc_contig(NULL, 0, pflags, npages, low, high, alignment,
111	    boundary, memattr);
112	if (m == NULL) {
113		if (tries < 3) {
114			if (!vm_page_reclaim_contig(pflags, npages, low, high,
115			    alignment, boundary))
116				vm_wait(NULL);
117			tries++;
118			goto retry;
119		}
120		return (ENOMEM);
121	}
122
123	for (i = 0; i < npages; i++, m++) {
124		if ((m->flags & PG_ZERO) == 0)
125			pmap_zero_page(m);
126		m->valid = VM_PAGE_BITS_ALL;
127		(*ret_page)[i] = m;
128	}
129
130	return (0);
131}
132
133/* Initialize pager and insert all object pages to it*/
134static int
135tegra_bo_init_pager(struct tegra_bo *bo)
136{
137	vm_page_t m;
138	size_t size;
139	int i;
140
141	size = round_page(bo->gem_obj.size);
142
143	bo->pbase = VM_PAGE_TO_PHYS(bo->m[0]);
144	if (vmem_alloc(kmem_arena, size, M_WAITOK | M_BESTFIT, &bo->vbase))
145		return (ENOMEM);
146
147	VM_OBJECT_WLOCK(bo->cdev_pager);
148	for (i = 0; i < bo->npages; i++) {
149		m = bo->m[i];
150		/*
151		 * XXX This is a temporary hack.
152		 * We need pager suitable for paging (mmap) managed
153		 * real (non-fictitious) pages.
154		 * - managed pages are needed for clean module unload.
155		 * - aliasing fictitious page to real one is bad,
156		 *   pmap cannot handle this situation without issues
157		 *   It expects that
158		 *    paddr = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(paddr))
159		 *   for every single page passed to pmap.
160		 */
161		m->oflags &= ~VPO_UNMANAGED;
162		m->flags |= PG_FICTITIOUS;
163		if (vm_page_insert(m, bo->cdev_pager, i) != 0)
164			return (EINVAL);
165	}
166	VM_OBJECT_WUNLOCK(bo->cdev_pager);
167
168	pmap_qenter(bo->vbase, bo->m, bo->npages);
169	return (0);
170}
171
172/* Allocate memory for frame buffer */
173static int
174tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
175{
176	size_t size;
177	int rv;
178
179	size = bo->gem_obj.size;
180
181	bo->npages = atop(size);
182	bo->m = malloc(sizeof(vm_page_t *) * bo->npages, DRM_MEM_DRIVER,
183	    M_WAITOK | M_ZERO);
184
185	rv = tegra_bo_alloc_contig(bo->npages, PAGE_SIZE,
186	    VM_MEMATTR_WRITE_COMBINING, &(bo->m));
187	if (rv != 0) {
188		DRM_WARNING("Cannot allocate memory for gem object.\n");
189		return (rv);
190	}
191	rv = tegra_bo_init_pager(bo);
192	if (rv != 0) {
193		DRM_WARNING("Cannot initialize gem object pager.\n");
194		return (rv);
195	}
196	return (0);
197}
198
199int
200tegra_bo_create(struct drm_device *drm, size_t size, struct tegra_bo **res_bo)
201{
202	struct tegra_bo *bo;
203	int rv;
204
205	if (size <= 0)
206		return (-EINVAL);
207
208	bo = malloc(sizeof(*bo), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
209
210	size = round_page(size);
211	rv = drm_gem_object_init(drm, &bo->gem_obj, size);
212	if (rv != 0) {
213		free(bo, DRM_MEM_DRIVER);
214		return (rv);
215	}
216	rv = drm_gem_create_mmap_offset(&bo->gem_obj);
217	if (rv != 0) {
218		drm_gem_object_release(&bo->gem_obj);
219		free(bo, DRM_MEM_DRIVER);
220		return (rv);
221	}
222
223	bo->cdev_pager = cdev_pager_allocate(&bo->gem_obj, OBJT_MGTDEVICE,
224	    drm->driver->gem_pager_ops, size, 0, 0, NULL);
225	rv = tegra_bo_alloc(drm, bo);
226	if (rv != 0) {
227		tegra_bo_free_object(&bo->gem_obj);
228		return (rv);
229	}
230
231	*res_bo = bo;
232	return (0);
233}
234
235static int
236tegra_bo_create_with_handle(struct drm_file *file, struct drm_device *drm,
237    size_t size, uint32_t *handle, struct tegra_bo **res_bo)
238{
239	int rv;
240	struct tegra_bo *bo;
241
242	rv = tegra_bo_create(drm, size, &bo);
243	if (rv != 0)
244		return (rv);
245
246	rv = drm_gem_handle_create(file, &bo->gem_obj, handle);
247	if (rv != 0) {
248		tegra_bo_free_object(&bo->gem_obj);
249		drm_gem_object_release(&bo->gem_obj);
250		return (rv);
251	}
252
253	drm_gem_object_unreference_unlocked(&bo->gem_obj);
254
255	*res_bo = bo;
256	return (0);
257}
258
259static int
260tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm_dev,
261    struct drm_mode_create_dumb *args)
262{
263	struct tegra_drm *drm;
264	struct tegra_bo *bo;
265	int rv;
266
267	drm = container_of(drm_dev, struct tegra_drm, drm_dev);
268
269	args->pitch= (args->width * args->bpp + 7) / 8;
270	args->pitch = roundup(args->pitch, drm->pitch_align);
271	args->size = args->pitch * args->height;
272	rv = tegra_bo_create_with_handle(file, drm_dev, args->size,
273	    &args->handle, &bo);
274
275	return (rv);
276}
277
278static int
279tegra_bo_dumb_map_offset(struct drm_file *file_priv,
280    struct drm_device *drm_dev, uint32_t handle, uint64_t *offset)
281{
282	struct drm_gem_object *gem_obj;
283	int rv;
284
285	DRM_LOCK(drm_dev);
286	gem_obj = drm_gem_object_lookup(drm_dev, file_priv, handle);
287	if (gem_obj == NULL) {
288		device_printf(drm_dev->dev, "Object not found\n");
289		DRM_UNLOCK(drm_dev);
290		return (-EINVAL);
291	}
292	rv = drm_gem_create_mmap_offset(gem_obj);
293	if (rv != 0)
294		goto fail;
295
296	*offset = DRM_GEM_MAPPING_OFF(gem_obj->map_list.key) |
297	    DRM_GEM_MAPPING_KEY;
298
299	drm_gem_object_unreference(gem_obj);
300	DRM_UNLOCK(drm_dev);
301	return (0);
302
303fail:
304	drm_gem_object_unreference(gem_obj);
305	DRM_UNLOCK(drm_dev);
306	return (rv);
307}
308
309static int
310tegra_bo_dumb_destroy(struct drm_file *file_priv, struct drm_device *drm_dev,
311    unsigned int handle)
312{
313	int rv;
314
315	rv = drm_gem_handle_delete(file_priv, handle);
316	return (rv);
317}
318
319/*
320 * mmap support
321 */
322static int
323tegra_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
324    vm_page_t *mres)
325{
326
327#ifdef DRM_PAGER_DEBUG
328	DRM_DEBUG("object %p offset %jd prot %d mres %p\n",
329	    vm_obj, (intmax_t)offset, prot, mres);
330#endif
331	return (VM_PAGER_FAIL);
332
333}
334
335static int
336tegra_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
337    vm_ooffset_t foff, struct ucred *cred, u_short *color)
338{
339
340	if (color != NULL)
341		*color = 0;
342	return (0);
343}
344
345static void
346tegra_gem_pager_dtor(void *handle)
347{
348
349}
350
351static struct cdev_pager_ops tegra_gem_pager_ops = {
352	.cdev_pg_fault = tegra_gem_pager_fault,
353	.cdev_pg_ctor  = tegra_gem_pager_ctor,
354	.cdev_pg_dtor  = tegra_gem_pager_dtor
355};
356
357/* Fill up relevant fields in drm_driver ops */
358void
359tegra_bo_driver_register(struct drm_driver *drm_drv)
360{
361	drm_drv->gem_free_object = tegra_bo_free_object;
362	drm_drv->gem_pager_ops = &tegra_gem_pager_ops;
363	drm_drv->dumb_create = tegra_bo_dumb_create;
364	drm_drv->dumb_map_offset = tegra_bo_dumb_map_offset;
365	drm_drv->dumb_destroy = tegra_bo_dumb_destroy;
366}
367