x86_mem.c revision 94683
1/*- 2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/i386/i686_mem.c 94683 2002-04-14 20:13:08Z dwmalone $ 27 */ 28 29#include <sys/param.h> 30#include <sys/kernel.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/memrange.h> 34#include <sys/smp.h> 35 36#include <machine/md_var.h> 37#include <machine/specialreg.h> 38 39/* 40 * i686 memory range operations 41 * 42 * This code will probably be impenetrable without reference to the 43 * Intel Pentium Pro documentation. 44 */ 45 46static char *mem_owner_bios = "BIOS"; 47 48#define MR686_FIXMTRR (1<<0) 49 50#define mrwithin(mr, a) \ 51 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 52#define mroverlap(mra, mrb) \ 53 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 54 55#define mrvalid(base, len) \ 56 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 57 ((len) >= (1 << 12)) && /* length is >= 4k */ \ 58 powerof2((len)) && /* ... and power of two */ \ 59 !((base) & ((len) - 1))) /* range is not discontiuous */ 60 61#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 62 63static void i686_mrinit(struct mem_range_softc *sc); 64static int i686_mrset(struct mem_range_softc *sc, 65 struct mem_range_desc *mrd, 66 int *arg); 67static void i686_mrAPinit(struct mem_range_softc *sc); 68 69static struct mem_range_ops i686_mrops = { 70 i686_mrinit, 71 i686_mrset, 72 i686_mrAPinit 73}; 74 75/* XXX for AP startup hook */ 76static u_int64_t mtrrcap, mtrrdef; 77 78static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 79 struct mem_range_desc *mrd); 80static void i686_mrfetch(struct mem_range_softc *sc); 81static int i686_mtrrtype(int flags); 82static int i686_mrt2mtrr(int flags, int oldval); 83static int i686_mtrrconflict(int flag1, int flag2); 84static void i686_mrstore(struct mem_range_softc *sc); 85static void i686_mrstoreone(void *arg); 86static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, 87 u_int64_t addr); 88static int i686_mrsetlow(struct mem_range_softc *sc, 89 struct mem_range_desc *mrd, 90 int *arg); 91static int i686_mrsetvariable(struct mem_range_softc *sc, 92 struct mem_range_desc *mrd, 93 int *arg); 94 95/* i686 MTRR type to memory range type conversion */ 96static int i686_mtrrtomrt[] = { 97 MDF_UNCACHEABLE, 98 MDF_WRITECOMBINE, 99 MDF_UNKNOWN, 100 MDF_UNKNOWN, 101 MDF_WRITETHROUGH, 102 MDF_WRITEPROTECT, 103 MDF_WRITEBACK 104}; 105 106#define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])) 107 108static int 109i686_mtrr2mrt(int val) { 110 if (val < 0 || val >= MTRRTOMRTLEN) 111 return MDF_UNKNOWN; 112 return i686_mtrrtomrt[val]; 113} 114 115/* 116 * i686 MTRR conflicts. Writeback and uncachable may overlap. 117 */ 118static int 119i686_mtrrconflict(int flag1, int flag2) { 120 flag1 &= MDF_ATTRMASK; 121 flag2 &= MDF_ATTRMASK; 122 if (flag1 == flag2 || 123 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || 124 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) 125 return 0; 126 return 1; 127} 128 129/* 130 * Look for an exactly-matching range. 131 */ 132static struct mem_range_desc * 133mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 134{ 135 struct mem_range_desc *cand; 136 int i; 137 138 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 139 if ((cand->mr_base == mrd->mr_base) && 140 (cand->mr_len == mrd->mr_len)) 141 return(cand); 142 return(NULL); 143} 144 145/* 146 * Fetch the current mtrr settings from the current CPU (assumed to all 147 * be in sync in the SMP case). Note that if we are here, we assume 148 * that MTRRs are enabled, and we may or may not have fixed MTRRs. 149 */ 150static void 151i686_mrfetch(struct mem_range_softc *sc) 152{ 153 struct mem_range_desc *mrd; 154 u_int64_t msrv; 155 int i, j, msr; 156 157 mrd = sc->mr_desc; 158 159 /* Get fixed-range MTRRs */ 160 if (sc->mr_cap & MR686_FIXMTRR) { 161 msr = MSR_MTRR64kBase; 162 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 163 msrv = rdmsr(msr); 164 for (j = 0; j < 8; j++, mrd++) { 165 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 166 i686_mtrr2mrt(msrv & 0xff) | 167 MDF_ACTIVE; 168 if (mrd->mr_owner[0] == 0) 169 strcpy(mrd->mr_owner, mem_owner_bios); 170 msrv = msrv >> 8; 171 } 172 } 173 msr = MSR_MTRR16kBase; 174 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 175 msrv = rdmsr(msr); 176 for (j = 0; j < 8; j++, mrd++) { 177 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 178 i686_mtrr2mrt(msrv & 0xff) | 179 MDF_ACTIVE; 180 if (mrd->mr_owner[0] == 0) 181 strcpy(mrd->mr_owner, mem_owner_bios); 182 msrv = msrv >> 8; 183 } 184 } 185 msr = MSR_MTRR4kBase; 186 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 187 msrv = rdmsr(msr); 188 for (j = 0; j < 8; j++, mrd++) { 189 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 190 i686_mtrr2mrt(msrv & 0xff) | 191 MDF_ACTIVE; 192 if (mrd->mr_owner[0] == 0) 193 strcpy(mrd->mr_owner, mem_owner_bios); 194 msrv = msrv >> 8; 195 } 196 } 197 } 198 199 /* Get remainder which must be variable MTRRs */ 200 msr = MSR_MTRRVarBase; 201 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 202 msrv = rdmsr(msr); 203 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 204 i686_mtrr2mrt(msrv & 0xff); 205 mrd->mr_base = msrv & 0x0000000ffffff000LL; 206 msrv = rdmsr(msr + 1); 207 mrd->mr_flags = (msrv & 0x800) ? 208 (mrd->mr_flags | MDF_ACTIVE) : 209 (mrd->mr_flags & ~MDF_ACTIVE); 210 /* Compute the range from the mask. Ick. */ 211 mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1; 212 if (!mrvalid(mrd->mr_base, mrd->mr_len)) 213 mrd->mr_flags |= MDF_BOGUS; 214 /* If unclaimed and active, must be the BIOS */ 215 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 216 strcpy(mrd->mr_owner, mem_owner_bios); 217 } 218} 219 220/* 221 * Return the MTRR memory type matching a region's flags 222 */ 223static int 224i686_mtrrtype(int flags) 225{ 226 int i; 227 228 flags &= MDF_ATTRMASK; 229 230 for (i = 0; i < MTRRTOMRTLEN; i++) { 231 if (i686_mtrrtomrt[i] == MDF_UNKNOWN) 232 continue; 233 if (flags == i686_mtrrtomrt[i]) 234 return(i); 235 } 236 return(-1); 237} 238 239static int 240i686_mrt2mtrr(int flags, int oldval) 241{ 242 int val; 243 244 if ((val = i686_mtrrtype(flags)) == -1) 245 return oldval & 0xff; 246 return val & 0xff; 247} 248 249/* 250 * Update running CPU(s) MTRRs to match the ranges in the descriptor 251 * list. 252 * 253 * XXX Must be called with interrupts enabled. 254 */ 255static void 256i686_mrstore(struct mem_range_softc *sc) 257{ 258#ifdef SMP 259 /* 260 * We should use ipi_all_but_self() to call other CPUs into a 261 * locking gate, then call a target function to do this work. 262 * The "proper" solution involves a generalised locking gate 263 * implementation, not ready yet. 264 */ 265 smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc); 266#else 267 disable_intr(); /* disable interrupts */ 268 i686_mrstoreone((void *)sc); 269 enable_intr(); 270#endif 271} 272 273/* 274 * Update the current CPU's MTRRs with those represented in the 275 * descriptor list. Note that we do this wholesale rather than 276 * just stuffing one entry; this is simpler (but slower, of course). 277 */ 278static void 279i686_mrstoreone(void *arg) 280{ 281 struct mem_range_softc *sc = (struct mem_range_softc *)arg; 282 struct mem_range_desc *mrd; 283 u_int64_t omsrv, msrv; 284 int i, j, msr; 285 u_int cr4save; 286 287 mrd = sc->mr_desc; 288 289 cr4save = rcr4(); /* save cr4 */ 290 if (cr4save & CR4_PGE) 291 load_cr4(cr4save & ~CR4_PGE); 292 load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */ 293 wbinvd(); /* flush caches, TLBs */ 294 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */ 295 296 /* Set fixed-range MTRRs */ 297 if (sc->mr_cap & MR686_FIXMTRR) { 298 msr = MSR_MTRR64kBase; 299 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 300 msrv = 0; 301 omsrv = rdmsr(msr); 302 for (j = 7; j >= 0; j--) { 303 msrv = msrv << 8; 304 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8)); 305 } 306 wrmsr(msr, msrv); 307 mrd += 8; 308 } 309 msr = MSR_MTRR16kBase; 310 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 311 msrv = 0; 312 omsrv = rdmsr(msr); 313 for (j = 7; j >= 0; j--) { 314 msrv = msrv << 8; 315 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8)); 316 } 317 wrmsr(msr, msrv); 318 mrd += 8; 319 } 320 msr = MSR_MTRR4kBase; 321 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 322 msrv = 0; 323 omsrv = rdmsr(msr); 324 for (j = 7; j >= 0; j--) { 325 msrv = msrv << 8; 326 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8)); 327 } 328 wrmsr(msr, msrv); 329 mrd += 8; 330 } 331 } 332 333 /* Set remainder which must be variable MTRRs */ 334 msr = MSR_MTRRVarBase; 335 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 336 /* base/type register */ 337 omsrv = rdmsr(msr); 338 if (mrd->mr_flags & MDF_ACTIVE) { 339 msrv = mrd->mr_base & 0x0000000ffffff000LL; 340 msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv); 341 } else { 342 msrv = 0; 343 } 344 wrmsr(msr, msrv); 345 346 /* mask/active register */ 347 if (mrd->mr_flags & MDF_ACTIVE) { 348 msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL); 349 } else { 350 msrv = 0; 351 } 352 wrmsr(msr + 1, msrv); 353 } 354 wbinvd(); /* flush caches, TLBs */ 355 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */ 356 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */ 357 load_cr4(cr4save); /* restore cr4 */ 358} 359 360/* 361 * Hunt for the fixed MTRR referencing (addr) 362 */ 363static struct mem_range_desc * 364i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 365{ 366 struct mem_range_desc *mrd; 367 int i; 368 369 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++) 370 if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len))) 371 return(mrd); 372 return(NULL); 373} 374 375/* 376 * Try to satisfy the given range request by manipulating the fixed MTRRs that 377 * cover low memory. 378 * 379 * Note that we try to be generous here; we'll bloat the range out to the 380 * next higher/lower boundary to avoid the consumer having to know too much 381 * about the mechanisms here. 382 * 383 * XXX note that this will have to be updated when we start supporting "busy" ranges. 384 */ 385static int 386i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 387{ 388 struct mem_range_desc *first_md, *last_md, *curr_md; 389 390 /* range check */ 391 if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || 392 ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) 393 return(EINVAL); 394 395 /* set flags, clear set-by-firmware flag */ 396 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 397 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags); 398 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 399 } 400 401 return(0); 402} 403 404 405/* 406 * Modify/add a variable MTRR to satisfy the request. 407 * 408 * XXX needs to be updated to properly support "busy" ranges. 409 */ 410static int 411i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 412{ 413 struct mem_range_desc *curr_md, *free_md; 414 int i; 415 416 /* 417 * Scan the currently active variable descriptors, look for 418 * one we exactly match (straight takeover) and for possible 419 * accidental overlaps. 420 * Keep track of the first empty variable descriptor in case we 421 * can't perform a takeover. 422 */ 423 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 424 curr_md = sc->mr_desc + i; 425 free_md = NULL; 426 for (; i < sc->mr_ndesc; i++, curr_md++) { 427 if (curr_md->mr_flags & MDF_ACTIVE) { 428 /* exact match? */ 429 if ((curr_md->mr_base == mrd->mr_base) && 430 (curr_md->mr_len == mrd->mr_len)) { 431 /* whoops, owned by someone */ 432 if (curr_md->mr_flags & MDF_BUSY) 433 return(EBUSY); 434 /* Ok, just hijack this entry */ 435 free_md = curr_md; 436 break; 437 } 438 /* non-exact overlap ? */ 439 if (mroverlap(curr_md, mrd)) { 440 /* between conflicting region types? */ 441 if (i686_mtrrconflict(curr_md->mr_flags, mrd->mr_flags)) 442 return(EINVAL); 443 } 444 } else if (free_md == NULL) { 445 free_md = curr_md; 446 } 447 } 448 /* got somewhere to put it? */ 449 if (free_md == NULL) 450 return(ENOSPC); 451 452 /* Set up new descriptor */ 453 free_md->mr_base = mrd->mr_base; 454 free_md->mr_len = mrd->mr_len; 455 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 456 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 457 return(0); 458} 459 460/* 461 * Handle requests to set memory range attributes by manipulating MTRRs. 462 * 463 */ 464static int 465i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 466{ 467 struct mem_range_desc *targ; 468 int error = 0; 469 470 switch(*arg) { 471 case MEMRANGE_SET_UPDATE: 472 /* make sure that what's being asked for is even possible at all */ 473 if (!mrvalid(mrd->mr_base, mrd->mr_len) || 474 i686_mtrrtype(mrd->mr_flags) == -1) 475 return(EINVAL); 476 477#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 478 479 /* are the "low memory" conditions applicable? */ 480 if ((sc->mr_cap & MR686_FIXMTRR) && 481 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { 482 if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) 483 return(error); 484 } else { 485 /* it's time to play with variable MTRRs */ 486 if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) 487 return(error); 488 } 489 break; 490 491 case MEMRANGE_SET_REMOVE: 492 if ((targ = mem_range_match(sc, mrd)) == NULL) 493 return(ENOENT); 494 if (targ->mr_flags & MDF_FIXACTIVE) 495 return(EPERM); 496 if (targ->mr_flags & MDF_BUSY) 497 return(EBUSY); 498 targ->mr_flags &= ~MDF_ACTIVE; 499 targ->mr_owner[0] = 0; 500 break; 501 502 default: 503 return(EOPNOTSUPP); 504 } 505 506 /* update the hardware */ 507 i686_mrstore(sc); 508 i686_mrfetch(sc); /* refetch to see where we're at */ 509 return(0); 510} 511 512/* 513 * Work out how many ranges we support, initialise storage for them, 514 * fetch the initial settings. 515 */ 516static void 517i686_mrinit(struct mem_range_softc *sc) 518{ 519 struct mem_range_desc *mrd; 520 int nmdesc = 0; 521 int i; 522 523 mtrrcap = rdmsr(MSR_MTRRcap); 524 mtrrdef = rdmsr(MSR_MTRRdefType); 525 526 /* For now, bail out if MTRRs are not enabled */ 527 if (!(mtrrdef & 0x800)) { 528 if (bootverbose) 529 printf("CPU supports MTRRs but not enabled\n"); 530 return; 531 } 532 nmdesc = mtrrcap & 0xff; 533 printf("Pentium Pro MTRR support enabled\n"); 534 535 /* If fixed MTRRs supported and enabled */ 536 if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) { 537 sc->mr_cap = MR686_FIXMTRR; 538 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 539 } 540 541 sc->mr_desc = 542 (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc), 543 M_MEMDESC, M_WAITOK | M_ZERO); 544 sc->mr_ndesc = nmdesc; 545 546 mrd = sc->mr_desc; 547 548 /* Populate the fixed MTRR entries' base/length */ 549 if (sc->mr_cap & MR686_FIXMTRR) { 550 for (i = 0; i < MTRR_N64K; i++, mrd++) { 551 mrd->mr_base = i * 0x10000; 552 mrd->mr_len = 0x10000; 553 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 554 } 555 for (i = 0; i < MTRR_N16K; i++, mrd++) { 556 mrd->mr_base = i * 0x4000 + 0x80000; 557 mrd->mr_len = 0x4000; 558 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 559 } 560 for (i = 0; i < MTRR_N4K; i++, mrd++) { 561 mrd->mr_base = i * 0x1000 + 0xc0000; 562 mrd->mr_len = 0x1000; 563 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 564 } 565 } 566 567 /* 568 * Get current settings, anything set now is considered to have 569 * been set by the firmware. (XXX has something already played here?) 570 */ 571 i686_mrfetch(sc); 572 mrd = sc->mr_desc; 573 for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 574 if (mrd->mr_flags & MDF_ACTIVE) 575 mrd->mr_flags |= MDF_FIRMWARE; 576 } 577} 578 579/* 580 * Initialise MTRRs on an AP after the BSP has run the init code. 581 */ 582static void 583i686_mrAPinit(struct mem_range_softc *sc) 584{ 585 i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */ 586 wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */ 587} 588 589static void 590i686_mem_drvinit(void *unused) 591{ 592 /* Try for i686 MTRRs */ 593 if ((cpu_feature & CPUID_MTRR) && 594 ((cpu_id & 0xf00) == 0x600) && 595 ((strcmp(cpu_vendor, "GenuineIntel") == 0) || 596 (strcmp(cpu_vendor, "AuthenticAMD") == 0))) { 597 mem_range_softc.mr_op = &i686_mrops; 598 } 599} 600 601SYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL) 602 603 604