altivec.c revision 1.22
1/* $NetBSD: altivec.c,v 1.22 2011/05/02 02:01:33 matt Exp $ */ 2 3/* 4 * Copyright (C) 1996 Wolfgang Solfrank. 5 * Copyright (C) 1996 TooLs GmbH. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by TooLs GmbH. 19 * 4. The name of TooLs GmbH may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include <sys/cdefs.h> 35__KERNEL_RCSID(0, "$NetBSD: altivec.c,v 1.22 2011/05/02 02:01:33 matt Exp $"); 36 37#include "opt_multiprocessor.h" 38 39#include <sys/param.h> 40#include <sys/proc.h> 41#include <sys/systm.h> 42#include <sys/atomic.h> 43 44#include <uvm/uvm_extern.h> /* for vcopypage/vzeropage */ 45 46#include <powerpc/pcb.h> 47#include <powerpc/altivec.h> 48#include <powerpc/spr.h> 49#include <powerpc/oea/spr.h> 50#include <powerpc/psl.h> 51 52static void vec_state_load(lwp_t *, bool); 53static void vec_state_save(lwp_t *); 54static void vec_state_release(lwp_t *); 55 56const pcu_ops_t vec_ops = { 57 .pcu_id = PCU_VEC, 58 .pcu_state_load = vec_state_load, 59 .pcu_state_save = vec_state_save, 60 .pcu_state_release = vec_state_release, 61}; 62 63bool 64vec_used_p(lwp_t *l) 65{ 66 return (l->l_md.md_flags & MDLWP_USEDVEC) != 0; 67} 68 69void 70vec_mark_used(lwp_t *l) 71{ 72 l->l_md.md_flags |= MDLWP_USEDVEC; 73} 74 75void 76vec_state_load(lwp_t *l, bool used) 77{ 78 struct pcb * const pcb = lwp_getpcb(l); 79 80 /* 81 * Enable AltiVec temporarily (and disable interrupts). 82 */ 83 const register_t msr = mfmsr(); 84 mtmsr((msr & ~PSL_EE) | PSL_VEC); 85 __asm volatile ("isync"); 86 87 /* 88 * Load the vector unit from vreg which is best done in 89 * assembly. 90 */ 91 vec_load_from_vreg(&pcb->pcb_vr); 92 93 /* 94 * VRSAVE will be restored when trap frame returns 95 */ 96 l->l_md.md_utf->tf_vrsave = pcb->pcb_vr.vrsave; 97 98 /* 99 * Restore MSR (turn off AltiVec) 100 */ 101 mtmsr(msr); 102 __asm volatile ("isync"); 103 104 /* 105 * Mark vector registers as modified. 106 */ 107 l->l_md.md_flags |= MDLWP_USEDVEC; 108} 109 110void 111vec_state_save(lwp_t *l) 112{ 113 struct pcb * const pcb = lwp_getpcb(l); 114 115 /* 116 * Turn on AltiVEC, turn off interrupts. 117 */ 118 const register_t msr = mfmsr(); 119 mtmsr((msr & ~PSL_EE) | PSL_VEC); 120 __asm volatile ("isync"); 121 122 /* 123 * Grab contents of vector unit. 124 */ 125 vec_unload_to_vreg(&pcb->pcb_vr); 126 127 /* 128 * Save VRSAVE 129 */ 130 pcb->pcb_vr.vrsave = l->l_md.md_utf->tf_vrsave; 131 132 /* 133 * Note that we aren't using any CPU resources and stop any 134 * data streams. 135 */ 136 __asm volatile ("dssall; sync"); 137 138 /* 139 * Restore MSR (turn off AltiVec) 140 */ 141 mtmsr(msr); 142 __asm volatile ("isync"); 143} 144 145void 146vec_state_release(lwp_t *l) 147{ 148 __asm volatile("dssall;sync"); 149 l->l_md.md_utf->tf_srr1 &= ~PSL_VEC; 150 l->l_md.md_flags &= ~PSL_VEC; 151} 152 153void 154vec_restore_from_mcontext(struct lwp *l, const mcontext_t *mcp) 155{ 156 struct pcb * const pcb = lwp_getpcb(l); 157 158 KASSERT(l == curlwp); 159 160 /* we don't need to save the state, just drop it */ 161 pcu_discard(&vec_ops); 162 memcpy(pcb->pcb_vr.vreg, &mcp->__vrf.__vrs, sizeof (pcb->pcb_vr.vreg)); 163 pcb->pcb_vr.vscr = mcp->__vrf.__vscr; 164 pcb->pcb_vr.vrsave = mcp->__vrf.__vrsave; 165 l->l_md.md_utf->tf_vrsave = pcb->pcb_vr.vrsave; 166} 167 168bool 169vec_save_to_mcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flagp) 170{ 171 struct pcb * const pcb = lwp_getpcb(l); 172 173 KASSERT(l == curlwp); 174 175 /* Save AltiVec context, if any. */ 176 if (!vec_used_p(l)) 177 return false; 178 179 /* 180 * If we're the AltiVec owner, dump its context to the PCB first. 181 */ 182 pcu_save(&vec_ops); 183 184 mcp->__gregs[_REG_MSR] |= PSL_VEC; 185 mcp->__vrf.__vscr = pcb->pcb_vr.vscr; 186 mcp->__vrf.__vrsave = l->l_md.md_utf->tf_vrsave; 187 memcpy(mcp->__vrf.__vrs, pcb->pcb_vr.vreg, sizeof (mcp->__vrf.__vrs)); 188 *flagp |= _UC_POWERPC_VEC; 189 return true; 190} 191 192#define ZERO_VEC 19 193 194void 195vzeropage(paddr_t pa) 196{ 197 const paddr_t ea = pa + PAGE_SIZE; 198 uint32_t vec[7], *vp = (void *) roundup((uintptr_t) vec, 16); 199 register_t omsr, msr; 200 201 __asm volatile("mfmsr %0" : "=r"(omsr) :); 202 203 /* 204 * Turn on AltiVec, turn off interrupts. 205 */ 206 msr = (omsr & ~PSL_EE) | PSL_VEC; 207 __asm volatile("sync; mtmsr %0; isync" :: "r"(msr)); 208 209 /* 210 * Save the VEC register we are going to use before we disable 211 * relocation. 212 */ 213 __asm("stvx %1,0,%0" :: "r"(vp), "n"(ZERO_VEC)); 214 __asm("vxor %0,%0,%0" :: "n"(ZERO_VEC)); 215 216 /* 217 * Zero the page using a single cache line. 218 */ 219 __asm volatile( 220 " sync ;" 221 " mfmsr %[msr];" 222 " rlwinm %[msr],%[msr],0,28,26;" /* Clear PSL_DR */ 223 " mtmsr %[msr];" /* Turn off DMMU */ 224 " isync;" 225 "1: stvx %[zv], %[pa], %[off0];" 226 " stvxl %[zv], %[pa], %[off16];" 227 " stvx %[zv], %[pa], %[off32];" 228 " stvxl %[zv], %[pa], %[off48];" 229 " addi %[pa], %[pa], 64;" 230 " cmplw %[pa], %[ea];" 231 " blt+ 1b;" 232 " ori %[msr], %[msr], 0x10;" /* Set PSL_DR */ 233 " sync;" 234 " mtmsr %[msr];" /* Turn on DMMU */ 235 " isync;" 236 :: [msr] "r"(msr), [pa] "b"(pa), [ea] "b"(ea), 237 [off0] "r"(0), [off16] "r"(16), [off32] "r"(32), [off48] "r"(48), 238 [zv] "n"(ZERO_VEC)); 239 240 /* 241 * Restore VEC register (now that we can access the stack again). 242 */ 243 __asm("lvx %1,0,%0" :: "r"(vp), "n"(ZERO_VEC)); 244 245 /* 246 * Restore old MSR (AltiVec OFF). 247 */ 248 __asm volatile("sync; mtmsr %0; isync" :: "r"(omsr)); 249} 250 251#define LO_VEC 16 252#define HI_VEC 17 253 254void 255vcopypage(paddr_t dst, paddr_t src) 256{ 257 const paddr_t edst = dst + PAGE_SIZE; 258 uint32_t vec[11], *vp = (void *) roundup((uintptr_t) vec, 16); 259 register_t omsr, msr; 260 261 __asm volatile("mfmsr %0" : "=r"(omsr) :); 262 263 /* 264 * Turn on AltiVec, turn off interrupts. 265 */ 266 msr = (omsr & ~PSL_EE) | PSL_VEC; 267 __asm volatile("sync; mtmsr %0; isync" :: "r"(msr)); 268 269 /* 270 * Save the VEC registers we will be using before we disable 271 * relocation. 272 */ 273 __asm("stvx %2,%1,%0" :: "b"(vp), "r"( 0), "n"(LO_VEC)); 274 __asm("stvx %2,%1,%0" :: "b"(vp), "r"(16), "n"(HI_VEC)); 275 276 /* 277 * Copy the page using a single cache line, with DMMU 278 * disabled. On most PPCs, two vector registers occupy one 279 * cache line. 280 */ 281 __asm volatile( 282 " sync ;" 283 " mfmsr %[msr];" 284 " rlwinm %[msr],%[msr],0,28,26;" /* Clear PSL_DR */ 285 " mtmsr %[msr];" /* Turn off DMMU */ 286 " isync;" 287 "1: lvx %[lv], %[src], %[off0];" 288 " stvx %[lv], %[dst], %[off0];" 289 " lvxl %[hv], %[src], %[off16];" 290 " stvxl %[hv], %[dst], %[off16];" 291 " addi %[src], %[src], 32;" 292 " addi %[dst], %[dst], 32;" 293 " cmplw %[dst], %[edst];" 294 " blt+ 1b;" 295 " ori %[msr], %[msr], 0x10;" /* Set PSL_DR */ 296 " sync;" 297 " mtmsr %[msr];" /* Turn on DMMU */ 298 " isync;" 299 :: [msr] "r"(msr), [src] "b"(src), [dst] "b"(dst), 300 [edst] "b"(edst), [off0] "r"(0), [off16] "r"(16), 301 [lv] "n"(LO_VEC), [hv] "n"(HI_VEC)); 302 303 /* 304 * Restore VEC registers (now that we can access the stack again). 305 */ 306 __asm("lvx %2,%1,%0" :: "b"(vp), "r"( 0), "n"(LO_VEC)); 307 __asm("lvx %2,%1,%0" :: "b"(vp), "r"(16), "n"(HI_VEC)); 308 309 /* 310 * Restore old MSR (AltiVec OFF). 311 */ 312 __asm volatile("sync; mtmsr %0; isync" :: "r"(omsr)); 313} 314