1133855Sssouhlal/*- 2133855Sssouhlal * Copyright (c) 1988 University of Utah. 3133855Sssouhlal * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. 4133855Sssouhlal * All rights reserved. 5133855Sssouhlal * 6133855Sssouhlal * This code is derived from software contributed to Berkeley by 7133855Sssouhlal * the Systems Programming Group of the University of Utah Computer 8133855Sssouhlal * Science Department, and code derived from software contributed to 9133855Sssouhlal * Berkeley by William Jolitz. 10133855Sssouhlal * 11133855Sssouhlal * Redistribution and use in source and binary forms, with or without 12133855Sssouhlal * modification, are permitted provided that the following conditions 13133855Sssouhlal * are met: 14133855Sssouhlal * 1. Redistributions of source code must retain the above copyright 15133855Sssouhlal * notice, this list of conditions and the following disclaimer. 16133855Sssouhlal * 2. Redistributions in binary form must reproduce the above copyright 17133855Sssouhlal * notice, this list of conditions and the following disclaimer in the 18133855Sssouhlal * documentation and/or other materials provided with the distribution. 19133855Sssouhlal * 4. Neither the name of the University nor the names of its contributors 20133855Sssouhlal * may be used to endorse or promote products derived from this software 21133855Sssouhlal * without specific prior written permission. 22133855Sssouhlal * 23133855Sssouhlal * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24133855Sssouhlal * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25133855Sssouhlal * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26133855Sssouhlal * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27133855Sssouhlal * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28133855Sssouhlal * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29133855Sssouhlal * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30133855Sssouhlal * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31133855Sssouhlal * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32133855Sssouhlal * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33133855Sssouhlal * SUCH DAMAGE. 34133855Sssouhlal * 35133855Sssouhlal * from: Utah $Hdr: mem.c 1.13 89/10/08$ 36133855Sssouhlal * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 37133855Sssouhlal */ 38133855Sssouhlal 39133855Sssouhlal#include <sys/cdefs.h> 40133855Sssouhlal__FBSDID("$FreeBSD$"); 41133855Sssouhlal 42133855Sssouhlal/* 43133855Sssouhlal * Memory special file 44133855Sssouhlal */ 45133855Sssouhlal 46133855Sssouhlal#include <sys/param.h> 47133855Sssouhlal#include <sys/conf.h> 48133855Sssouhlal#include <sys/fcntl.h> 49133855Sssouhlal#include <sys/kernel.h> 50133855Sssouhlal#include <sys/lock.h> 51213383Snwhitehorn#include <sys/ioccom.h> 52133855Sssouhlal#include <sys/malloc.h> 53133855Sssouhlal#include <sys/memrange.h> 54133855Sssouhlal#include <sys/module.h> 55133855Sssouhlal#include <sys/mutex.h> 56133855Sssouhlal#include <sys/proc.h> 57133855Sssouhlal#include <sys/msgbuf.h> 58133855Sssouhlal#include <sys/systm.h> 59133855Sssouhlal#include <sys/signalvar.h> 60133855Sssouhlal#include <sys/uio.h> 61133855Sssouhlal 62133855Sssouhlal#include <machine/md_var.h> 63133855Sssouhlal#include <machine/vmparam.h> 64133855Sssouhlal 65133855Sssouhlal#include <vm/vm.h> 66133855Sssouhlal#include <vm/pmap.h> 67133855Sssouhlal#include <vm/vm_extern.h> 68190681Snwhitehorn#include <vm/vm_page.h> 69133855Sssouhlal 70133855Sssouhlal#include <machine/memdev.h> 71133855Sssouhlal 72213383Snwhitehornstatic void ppc_mrinit(struct mem_range_softc *); 73213383Snwhitehornstatic int ppc_mrset(struct mem_range_softc *, struct mem_range_desc *, int *); 74133855Sssouhlal 75213383SnwhitehornMALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors"); 76213383Snwhitehorn 77213383Snwhitehornstatic struct mem_range_ops ppc_mem_range_ops = { 78213383Snwhitehorn ppc_mrinit, 79213383Snwhitehorn ppc_mrset, 80213383Snwhitehorn NULL, 81213383Snwhitehorn NULL 82213383Snwhitehorn}; 83213383Snwhitehornstruct mem_range_softc mem_range_softc = { 84213383Snwhitehorn &ppc_mem_range_ops, 85217515Sjkim 0, 0, NULL 86213383Snwhitehorn}; 87213383Snwhitehorn 88133855Sssouhlal/* ARGSUSED */ 89133855Sssouhlalint 90133855Sssouhlalmemrw(struct cdev *dev, struct uio *uio, int flags) 91133855Sssouhlal{ 92133855Sssouhlal struct iovec *iov; 93133855Sssouhlal int error = 0; 94133855Sssouhlal vm_offset_t va, eva, off, v; 95133855Sssouhlal vm_prot_t prot; 96190681Snwhitehorn struct vm_page m; 97190681Snwhitehorn vm_page_t marr; 98133855Sssouhlal vm_size_t cnt; 99133855Sssouhlal 100133855Sssouhlal cnt = 0; 101133855Sssouhlal error = 0; 102133855Sssouhlal 103133855Sssouhlal GIANT_REQUIRED; 104133855Sssouhlal 105133855Sssouhlal while (uio->uio_resid > 0 && !error) { 106133855Sssouhlal iov = uio->uio_iov; 107133855Sssouhlal if (iov->iov_len == 0) { 108133855Sssouhlal uio->uio_iov++; 109133855Sssouhlal uio->uio_iovcnt--; 110133855Sssouhlal if (uio->uio_iovcnt < 0) 111133855Sssouhlal panic("memrw"); 112133855Sssouhlal continue; 113133855Sssouhlal } 114183397Sed if (dev2unit(dev) == CDEV_MINOR_MEM) { 115133855Sssouhlalkmem_direct_mapped: v = uio->uio_offset; 116133855Sssouhlal 117133855Sssouhlal off = uio->uio_offset & PAGE_MASK; 118133855Sssouhlal cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base & 119133855Sssouhlal PAGE_MASK); 120133855Sssouhlal cnt = min(cnt, PAGE_SIZE - off); 121133855Sssouhlal cnt = min(cnt, iov->iov_len); 122133855Sssouhlal 123190681Snwhitehorn if (mem_valid(v, cnt)) { 124133855Sssouhlal error = EFAULT; 125133855Sssouhlal break; 126133855Sssouhlal } 127190681Snwhitehorn 128190681Snwhitehorn if (!pmap_dev_direct_mapped(v, cnt)) { 129190681Snwhitehorn error = uiomove((void *)v, cnt, uio); 130190681Snwhitehorn } else { 131190681Snwhitehorn m.phys_addr = trunc_page(v); 132190681Snwhitehorn marr = &m; 133190681Snwhitehorn error = uiomove_fromphys(&marr, off, cnt, uio); 134190681Snwhitehorn } 135133855Sssouhlal } 136183397Sed else if (dev2unit(dev) == CDEV_MINOR_KMEM) { 137133855Sssouhlal va = uio->uio_offset; 138133855Sssouhlal 139204312Snwhitehorn if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) 140133855Sssouhlal goto kmem_direct_mapped; 141133855Sssouhlal 142133855Sssouhlal va = trunc_page(uio->uio_offset); 143133855Sssouhlal eva = round_page(uio->uio_offset 144133855Sssouhlal + iov->iov_len); 145133855Sssouhlal 146133855Sssouhlal /* 147133855Sssouhlal * Make sure that all the pages are currently resident 148133855Sssouhlal * so that we don't create any zero-fill pages. 149133855Sssouhlal */ 150133855Sssouhlal 151133855Sssouhlal for (; va < eva; va += PAGE_SIZE) 152204312Snwhitehorn if (pmap_extract(kernel_pmap, va) == 0) 153133855Sssouhlal return (EFAULT); 154133855Sssouhlal 155133855Sssouhlal prot = (uio->uio_rw == UIO_READ) 156133855Sssouhlal ? VM_PROT_READ : VM_PROT_WRITE; 157133855Sssouhlal 158133855Sssouhlal va = uio->uio_offset; 159133855Sssouhlal if (kernacc((void *) va, iov->iov_len, prot) 160133855Sssouhlal == FALSE) 161133855Sssouhlal return (EFAULT); 162133855Sssouhlal 163133855Sssouhlal error = uiomove((void *)va, iov->iov_len, uio); 164133855Sssouhlal 165133855Sssouhlal continue; 166133855Sssouhlal } 167133855Sssouhlal } 168133855Sssouhlal 169133855Sssouhlal return (error); 170133855Sssouhlal} 171133855Sssouhlal 172133855Sssouhlal/* 173133855Sssouhlal * allow user processes to MMAP some memory sections 174133855Sssouhlal * instead of going through read/write 175133855Sssouhlal */ 176133855Sssouhlalint 177201223Srnolandmemmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 178201223Srnoland int prot, vm_memattr_t *memattr) 179133855Sssouhlal{ 180213383Snwhitehorn int i; 181213383Snwhitehorn 182271113Snwhitehorn if (dev2unit(dev) == CDEV_MINOR_MEM) 183271113Snwhitehorn *paddr = offset; 184271113Snwhitehorn else if (dev2unit(dev) == CDEV_MINOR_KMEM) 185271113Snwhitehorn *paddr = vtophys(offset); 186271113Snwhitehorn else 187133855Sssouhlal return (EFAULT); 188133855Sssouhlal 189213383Snwhitehorn for (i = 0; i < mem_range_softc.mr_ndesc; i++) { 190213383Snwhitehorn if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE)) 191213383Snwhitehorn continue; 192213383Snwhitehorn 193213383Snwhitehorn if (offset >= mem_range_softc.mr_desc[i].mr_base && 194213383Snwhitehorn offset < mem_range_softc.mr_desc[i].mr_base + 195213383Snwhitehorn mem_range_softc.mr_desc[i].mr_len) { 196213383Snwhitehorn switch (mem_range_softc.mr_desc[i].mr_flags & 197213383Snwhitehorn MDF_ATTRMASK) { 198213383Snwhitehorn case MDF_WRITEBACK: 199213383Snwhitehorn *memattr = VM_MEMATTR_WRITE_BACK; 200213383Snwhitehorn break; 201213383Snwhitehorn case MDF_WRITECOMBINE: 202213383Snwhitehorn *memattr = VM_MEMATTR_WRITE_COMBINING; 203213383Snwhitehorn break; 204213383Snwhitehorn case MDF_UNCACHEABLE: 205213383Snwhitehorn *memattr = VM_MEMATTR_UNCACHEABLE; 206213383Snwhitehorn break; 207213383Snwhitehorn case MDF_WRITETHROUGH: 208213383Snwhitehorn *memattr = VM_MEMATTR_WRITE_THROUGH; 209213383Snwhitehorn break; 210213383Snwhitehorn } 211213383Snwhitehorn 212213383Snwhitehorn break; 213213383Snwhitehorn } 214213383Snwhitehorn } 215213383Snwhitehorn 216133855Sssouhlal return (0); 217133855Sssouhlal} 218133855Sssouhlal 219213383Snwhitehornstatic void 220213383Snwhitehornppc_mrinit(struct mem_range_softc *sc) 221213383Snwhitehorn{ 222213383Snwhitehorn sc->mr_cap = 0; 223213383Snwhitehorn sc->mr_ndesc = 8; /* XXX: Should be dynamically expandable */ 224213383Snwhitehorn sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc), 225271113Snwhitehorn M_MEMDESC, M_WAITOK | M_ZERO); 226213383Snwhitehorn} 227213383Snwhitehorn 228213383Snwhitehornstatic int 229213383Snwhitehornppc_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) 230213383Snwhitehorn{ 231213383Snwhitehorn int i; 232213383Snwhitehorn 233213383Snwhitehorn switch(*arg) { 234213383Snwhitehorn case MEMRANGE_SET_UPDATE: 235213383Snwhitehorn for (i = 0; i < sc->mr_ndesc; i++) { 236213383Snwhitehorn if (!sc->mr_desc[i].mr_len) { 237213383Snwhitehorn sc->mr_desc[i] = *desc; 238213383Snwhitehorn sc->mr_desc[i].mr_flags |= MDF_ACTIVE; 239213383Snwhitehorn return (0); 240213383Snwhitehorn } 241213383Snwhitehorn if (sc->mr_desc[i].mr_base == desc->mr_base && 242213383Snwhitehorn sc->mr_desc[i].mr_len == desc->mr_len) 243213383Snwhitehorn return (EEXIST); 244213383Snwhitehorn } 245213383Snwhitehorn return (ENOSPC); 246213383Snwhitehorn case MEMRANGE_SET_REMOVE: 247213383Snwhitehorn for (i = 0; i < sc->mr_ndesc; i++) 248213383Snwhitehorn if (sc->mr_desc[i].mr_base == desc->mr_base && 249213383Snwhitehorn sc->mr_desc[i].mr_len == desc->mr_len) { 250213383Snwhitehorn bzero(&sc->mr_desc[i], sizeof(sc->mr_desc[i])); 251213383Snwhitehorn return (0); 252213383Snwhitehorn } 253213383Snwhitehorn return (ENOENT); 254213383Snwhitehorn default: 255213383Snwhitehorn return (EOPNOTSUPP); 256213383Snwhitehorn } 257213383Snwhitehorn 258213383Snwhitehorn return (0); 259213383Snwhitehorn} 260213383Snwhitehorn 261213383Snwhitehorn/* 262213383Snwhitehorn * Operations for changing memory attributes. 263213383Snwhitehorn * 264213383Snwhitehorn * This is basically just an ioctl shim for mem_range_attr_get 265213383Snwhitehorn * and mem_range_attr_set. 266213383Snwhitehorn */ 267213383Snwhitehorn/* ARGSUSED */ 268213383Snwhitehornint 269213383Snwhitehornmemioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, 270213383Snwhitehorn struct thread *td) 271213383Snwhitehorn{ 272213383Snwhitehorn int nd, error = 0; 273213383Snwhitehorn struct mem_range_op *mo = (struct mem_range_op *)data; 274213383Snwhitehorn struct mem_range_desc *md; 275213383Snwhitehorn 276213383Snwhitehorn /* is this for us? */ 277213383Snwhitehorn if ((cmd != MEMRANGE_GET) && 278213383Snwhitehorn (cmd != MEMRANGE_SET)) 279213383Snwhitehorn return (ENOTTY); 280213383Snwhitehorn 281213383Snwhitehorn /* any chance we can handle this? */ 282213383Snwhitehorn if (mem_range_softc.mr_op == NULL) 283213383Snwhitehorn return (EOPNOTSUPP); 284213383Snwhitehorn 285213383Snwhitehorn /* do we have any descriptors? */ 286213383Snwhitehorn if (mem_range_softc.mr_ndesc == 0) 287213383Snwhitehorn return (ENXIO); 288213383Snwhitehorn 289213383Snwhitehorn switch (cmd) { 290213383Snwhitehorn case MEMRANGE_GET: 291213383Snwhitehorn nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 292213383Snwhitehorn if (nd > 0) { 293213383Snwhitehorn md = (struct mem_range_desc *) 294213383Snwhitehorn malloc(nd * sizeof(struct mem_range_desc), 295213383Snwhitehorn M_MEMDESC, M_WAITOK); 296213383Snwhitehorn error = mem_range_attr_get(md, &nd); 297213383Snwhitehorn if (!error) 298213383Snwhitehorn error = copyout(md, mo->mo_desc, 299213383Snwhitehorn nd * sizeof(struct mem_range_desc)); 300213383Snwhitehorn free(md, M_MEMDESC); 301213383Snwhitehorn } 302213383Snwhitehorn else 303213383Snwhitehorn nd = mem_range_softc.mr_ndesc; 304213383Snwhitehorn mo->mo_arg[0] = nd; 305213383Snwhitehorn break; 306213383Snwhitehorn 307213383Snwhitehorn case MEMRANGE_SET: 308213383Snwhitehorn md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), 309213383Snwhitehorn M_MEMDESC, M_WAITOK); 310213383Snwhitehorn error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 311213383Snwhitehorn /* clamp description string */ 312213383Snwhitehorn md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 313213383Snwhitehorn if (error == 0) 314213383Snwhitehorn error = mem_range_attr_set(md, &mo->mo_arg[0]); 315213383Snwhitehorn free(md, M_MEMDESC); 316213383Snwhitehorn break; 317213383Snwhitehorn } 318213383Snwhitehorn return (error); 319213383Snwhitehorn} 320271113Snwhitehorn 321