mem.c revision 331722
1123474Swpaul/*-
2123474Swpaul * Copyright (c) 1988 University of Utah.
3123474Swpaul * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4123474Swpaul * All rights reserved.
5123474Swpaul *
6123474Swpaul * This code is derived from software contributed to Berkeley by
7123474Swpaul * the Systems Programming Group of the University of Utah Computer
8123474Swpaul * Science Department, and code derived from software contributed to
9123474Swpaul * Berkeley by William Jolitz.
10123474Swpaul *
11123474Swpaul * Redistribution and use in source and binary forms, with or without
12123474Swpaul * modification, are permitted provided that the following conditions
13123474Swpaul * are met:
14123474Swpaul * 1. Redistributions of source code must retain the above copyright
15123474Swpaul *    notice, this list of conditions and the following disclaimer.
16123474Swpaul * 2. Redistributions in binary form must reproduce the above copyright
17123474Swpaul *    notice, this list of conditions and the following disclaimer in the
18123474Swpaul *    documentation and/or other materials provided with the distribution.
19123474Swpaul * 4. Neither the name of the University nor the names of its contributors
20123474Swpaul *    may be used to endorse or promote products derived from this software
21123474Swpaul *    without specific prior written permission.
22123474Swpaul *
23123474Swpaul * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24123474Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25123474Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26123474Swpaul * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27123474Swpaul * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28123474Swpaul * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29123474Swpaul * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30123474Swpaul * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31123474Swpaul * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32123474Swpaul * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33123474Swpaul * SUCH DAMAGE.
34123474Swpaul *
35123474Swpaul *	from: Utah $Hdr: mem.c 1.13 89/10/08$
36123474Swpaul *	from: @(#)mem.c	7.2 (Berkeley) 5/9/91
37124697Swpaul */
38124697Swpaul
39123474Swpaul#include <sys/cdefs.h>
40123474Swpaul__FBSDID("$FreeBSD: stable/11/sys/powerpc/powerpc/mem.c 331722 2018-03-29 02:50:57Z eadler $");
41123474Swpaul
42123474Swpaul/*
43123474Swpaul * Memory special file
44123474Swpaul */
45124697Swpaul
46123474Swpaul#include <sys/param.h>
47123474Swpaul#include <sys/conf.h>
48123474Swpaul#include <sys/fcntl.h>
49123474Swpaul#include <sys/kernel.h>
50123474Swpaul#include <sys/lock.h>
51123474Swpaul#include <sys/ioccom.h>
52124697Swpaul#include <sys/malloc.h>
53123474Swpaul#include <sys/memrange.h>
54123474Swpaul#include <sys/module.h>
55123474Swpaul#include <sys/mutex.h>
56123474Swpaul#include <sys/proc.h>
57123474Swpaul#include <sys/msgbuf.h>
58124060Swpaul#include <sys/systm.h>
59124060Swpaul#include <sys/signalvar.h>
60123474Swpaul#include <sys/uio.h>
61123474Swpaul
62123474Swpaul#include <machine/md_var.h>
63123474Swpaul#include <machine/vmparam.h>
64123474Swpaul
65123474Swpaul#include <vm/vm.h>
66123695Swpaul#include <vm/pmap.h>
67123695Swpaul#include <vm/vm_extern.h>
68123695Swpaul#include <vm/vm_page.h>
69123474Swpaul
70123474Swpaul#include <machine/memdev.h>
71125551Swpaul
72123474Swpaulstatic void ppc_mrinit(struct mem_range_softc *);
73123474Swpaulstatic int ppc_mrset(struct mem_range_softc *, struct mem_range_desc *, int *);
74123474Swpaul
75123474SwpaulMALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
76123474Swpaul
77123474Swpaulstatic struct mem_range_ops ppc_mem_range_ops = {
78123474Swpaul	ppc_mrinit,
79123474Swpaul	ppc_mrset,
80123474Swpaul	NULL,
81123474Swpaul	NULL
82123474Swpaul};
83123535Swpaulstruct mem_range_softc mem_range_softc = {
84123474Swpaul	&ppc_mem_range_ops,
85124100Swpaul	0, 0, NULL
86123474Swpaul};
87125057Swpaul
88125057Swpaul/* ARGSUSED */
89124724Swpaulint
90124724Swpaulmemrw(struct cdev *dev, struct uio *uio, int flags)
91124724Swpaul{
92124724Swpaul	struct iovec *iov;
93124724Swpaul	int error = 0;
94124724Swpaul	vm_offset_t va, eva, off, v;
95124724Swpaul	vm_prot_t prot;
96124724Swpaul	struct vm_page m;
97124724Swpaul	vm_page_t marr;
98124724Swpaul	vm_size_t cnt;
99125814Swpaul
100124724Swpaul	cnt = 0;
101124724Swpaul	error = 0;
102124697Swpaul
103124697Swpaul	while (uio->uio_resid > 0 && !error) {
104124697Swpaul		iov = uio->uio_iov;
105124697Swpaul		if (iov->iov_len == 0) {
106124697Swpaul			uio->uio_iov++;
107124697Swpaul			uio->uio_iovcnt--;
108124697Swpaul			if (uio->uio_iovcnt < 0)
109124060Swpaul				panic("memrw");
110128229Swpaul			continue;
111124697Swpaul		}
112124697Swpaul		if (dev2unit(dev) == CDEV_MINOR_MEM) {
113124697Swpaulkmem_direct_mapped:	v = uio->uio_offset;
114124697Swpaul
115124060Swpaul			off = uio->uio_offset & PAGE_MASK;
116124724Swpaul			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
117124724Swpaul			    PAGE_MASK);
118124697Swpaul			cnt = min(cnt, PAGE_SIZE - off);
119123474Swpaul			cnt = min(cnt, iov->iov_len);
120123474Swpaul
121123474Swpaul			if (mem_valid(v, cnt)) {
122123474Swpaul				error = EFAULT;
123123474Swpaul				break;
124123474Swpaul			}
125123474Swpaul
126123474Swpaul			if (!pmap_dev_direct_mapped(v, cnt)) {
127123474Swpaul				error = uiomove((void *)v, cnt, uio);
128124060Swpaul			} else {
129124060Swpaul				m.phys_addr = trunc_page(v);
130124060Swpaul				marr = &m;
131124060Swpaul				error = uiomove_fromphys(&marr, off, cnt, uio);
132124122Swpaul			}
133124122Swpaul		}
134124122Swpaul		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
135124122Swpaul			va = uio->uio_offset;
136124122Swpaul
137124060Swpaul			if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end))
138124060Swpaul				goto kmem_direct_mapped;
139124060Swpaul
140124060Swpaul			va = trunc_page(uio->uio_offset);
141124060Swpaul			eva = round_page(uio->uio_offset
142124060Swpaul			    + iov->iov_len);
143124697Swpaul
144124697Swpaul			/*
145124697Swpaul			 * Make sure that all the pages are currently resident
146125057Swpaul			 * so that we don't create any zero-fill pages.
147125057Swpaul			 */
148124060Swpaul
149124060Swpaul			for (; va < eva; va += PAGE_SIZE)
150124697Swpaul				if (pmap_extract(kernel_pmap, va) == 0)
151124697Swpaul					return (EFAULT);
152127311Swpaul
153125006Swpaul			prot = (uio->uio_rw == UIO_READ)
154125006Swpaul			    ? VM_PROT_READ : VM_PROT_WRITE;
155125006Swpaul
156124697Swpaul			va = uio->uio_offset;
157125006Swpaul			if (kernacc((void *) va, iov->iov_len, prot)
158125006Swpaul			    == FALSE)
159125006Swpaul				return (EFAULT);
160125006Swpaul
161125006Swpaul			error = uiomove((void *)va, iov->iov_len, uio);
162125006Swpaul
163125006Swpaul			continue;
164125006Swpaul		}
165125006Swpaul	}
166124122Swpaul
167124122Swpaul	return (error);
168124122Swpaul}
169124122Swpaul
170124122Swpaul/*
171124060Swpaul * allow user processes to MMAP some memory sections
172124060Swpaul * instead of going through read/write
173124060Swpaul */
174124060Swpaulint
175124060Swpaulmemmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
176124060Swpaul    int prot, vm_memattr_t *memattr)
177124060Swpaul{
178124060Swpaul	int i;
179124060Swpaul
180123474Swpaul	if (dev2unit(dev) == CDEV_MINOR_MEM)
181123474Swpaul		*paddr = offset;
182123474Swpaul	else
183123474Swpaul		return (EFAULT);
184124697Swpaul
185124697Swpaul	for (i = 0; i < mem_range_softc.mr_ndesc; i++) {
186124697Swpaul		if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE))
187124697Swpaul			continue;
188124697Swpaul
189124697Swpaul		if (offset >= mem_range_softc.mr_desc[i].mr_base &&
190124697Swpaul		    offset < mem_range_softc.mr_desc[i].mr_base +
191124697Swpaul		    mem_range_softc.mr_desc[i].mr_len) {
192124697Swpaul			switch (mem_range_softc.mr_desc[i].mr_flags &
193124697Swpaul			    MDF_ATTRMASK) {
194124697Swpaul			case MDF_WRITEBACK:
195124697Swpaul				*memattr = VM_MEMATTR_WRITE_BACK;
196124697Swpaul				break;
197124697Swpaul			case MDF_WRITECOMBINE:
198123474Swpaul				*memattr = VM_MEMATTR_WRITE_COMBINING;
199124697Swpaul				break;
200124724Swpaul			case MDF_UNCACHEABLE:
201124724Swpaul				*memattr = VM_MEMATTR_UNCACHEABLE;
202124697Swpaul				break;
203124697Swpaul			case MDF_WRITETHROUGH:
204124724Swpaul				*memattr = VM_MEMATTR_WRITE_THROUGH;
205124697Swpaul				break;
206124724Swpaul			}
207124724Swpaul
208124697Swpaul			break;
209124697Swpaul		}
210128229Swpaul	}
211128229Swpaul
212128229Swpaul	return (0);
213124697Swpaul}
214124697Swpaul
215128229Swpaulstatic void
216125814Swpaulppc_mrinit(struct mem_range_softc *sc)
217124724Swpaul{
218124724Swpaul	sc->mr_cap = 0;
219124724Swpaul	sc->mr_ndesc = 8; /* XXX: Should be dynamically expandable */
220128229Swpaul	sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc),
221124697Swpaul	    M_MEMDESC, M_WAITOK | M_ZERO);
222124697Swpaul}
223124697Swpaul
224124724Swpaulstatic int
225124724Swpaulppc_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
226124697Swpaul{
227128229Swpaul	int i;
228124697Swpaul
229124697Swpaul	switch(*arg) {
230124697Swpaul	case MEMRANGE_SET_UPDATE:
231124697Swpaul		for (i = 0; i < sc->mr_ndesc; i++) {
232124724Swpaul			if (!sc->mr_desc[i].mr_len) {
233124697Swpaul				sc->mr_desc[i] = *desc;
234124697Swpaul				sc->mr_desc[i].mr_flags |= MDF_ACTIVE;
235125814Swpaul				return (0);
236128229Swpaul			}
237124697Swpaul			if (sc->mr_desc[i].mr_base == desc->mr_base &&
238124697Swpaul			    sc->mr_desc[i].mr_len == desc->mr_len)
239124697Swpaul				return (EEXIST);
240124697Swpaul		}
241124697Swpaul		return (ENOSPC);
242124697Swpaul	case MEMRANGE_SET_REMOVE:
243124697Swpaul		for (i = 0; i < sc->mr_ndesc; i++)
244124697Swpaul			if (sc->mr_desc[i].mr_base == desc->mr_base &&
245124697Swpaul			    sc->mr_desc[i].mr_len == desc->mr_len) {
246128229Swpaul				bzero(&sc->mr_desc[i], sizeof(sc->mr_desc[i]));
247124697Swpaul				return (0);
248124697Swpaul			}
249124697Swpaul		return (ENOENT);
250124697Swpaul	default:
251124697Swpaul		return (EOPNOTSUPP);
252124697Swpaul	}
253124697Swpaul
254124697Swpaul	return (0);
255128229Swpaul}
256128229Swpaul
257128229Swpaul/*
258124697Swpaul * Operations for changing memory attributes.
259124697Swpaul *
260124697Swpaul * This is basically just an ioctl shim for mem_range_attr_get
261124697Swpaul * and mem_range_attr_set.
262124697Swpaul */
263124697Swpaul/* ARGSUSED */
264124697Swpaulint
265124697Swpaulmemioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
266124697Swpaul    struct thread *td)
267124697Swpaul{
268124697Swpaul	int nd, error = 0;
269124697Swpaul	struct mem_range_op *mo = (struct mem_range_op *)data;
270124697Swpaul	struct mem_range_desc *md;
271124724Swpaul
272124724Swpaul	/* is this for us? */
273125814Swpaul	if ((cmd != MEMRANGE_GET) &&
274124724Swpaul	    (cmd != MEMRANGE_SET))
275127284Swpaul		return (ENOTTY);
276127284Swpaul
277124724Swpaul	/* any chance we can handle this? */
278124697Swpaul	if (mem_range_softc.mr_op == NULL)
279124724Swpaul		return (EOPNOTSUPP);
280124724Swpaul
281125814Swpaul	/* do we have any descriptors? */
282124724Swpaul	if (mem_range_softc.mr_ndesc == 0)
283127284Swpaul		return (ENXIO);
284127284Swpaul
285124724Swpaul	switch (cmd) {
286124697Swpaul	case MEMRANGE_GET:
287124697Swpaul		nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
288124697Swpaul		if (nd > 0) {
289124697Swpaul			md = (struct mem_range_desc *)
290124697Swpaul				malloc(nd * sizeof(struct mem_range_desc),
291124697Swpaul				       M_MEMDESC, M_WAITOK);
292124697Swpaul			error = mem_range_attr_get(md, &nd);
293124697Swpaul			if (!error)
294124697Swpaul				error = copyout(md, mo->mo_desc,
295124697Swpaul					nd * sizeof(struct mem_range_desc));
296124697Swpaul			free(md, M_MEMDESC);
297124697Swpaul		}
298124697Swpaul		else
299124697Swpaul			nd = mem_range_softc.mr_ndesc;
300124697Swpaul		mo->mo_arg[0] = nd;
301124697Swpaul		break;
302124697Swpaul
303124697Swpaul	case MEMRANGE_SET:
304124697Swpaul		md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc),
305124697Swpaul						    M_MEMDESC, M_WAITOK);
306124697Swpaul		error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
307124697Swpaul		/* clamp description string */
308124697Swpaul		md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
309124697Swpaul		if (error == 0)
310124697Swpaul			error = mem_range_attr_set(md, &mo->mo_arg[0]);
311124697Swpaul		free(md, M_MEMDESC);
312124697Swpaul		break;
313124697Swpaul	}
314124697Swpaul	return (error);
315128229Swpaul}
316128229Swpaul
317124697Swpaul