ixp425_npe.c revision 267961
1/*- 2 * Copyright (c) 2006-2008 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30/*- 31 * Copyright (c) 2001-2005, Intel Corporation. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. Neither the name of the Intel Corporation nor the names of its contributors 43 * may be used to endorse or promote products derived from this software 44 * without specific prior written permission. 45 * 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' 48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58*/ 59#include <sys/cdefs.h> 60__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/ixp425_npe.c 267961 2014-06-27 16:33:43Z hselasky $"); 61 62/* 63 * Intel XScale Network Processing Engine (NPE) support. 64 * 65 * Each NPE has an ixpnpeX device associated with it that is 66 * attached at boot. Depending on the microcode loaded into 67 * an NPE there may be an Ethernet interface (npeX) or some 68 * other network interface (e.g. for ATM). This file has support 69 * for loading microcode images and the associated NPE CPU 70 * manipulations (start, stop, reset). 71 * 72 * The code here basically replaces the npeDl and npeMh classes 73 * in the Intel Access Library (IAL). 74 * 75 * NB: Microcode images are loaded with firmware(9). To 76 * include microcode in a static kernel include the 77 * ixpnpe_fw device. Otherwise the firmware will be 78 * automatically loaded from the filesystem. 79 */ 80#include <sys/param.h> 81#include <sys/systm.h> 82#include <sys/kernel.h> 83#include <sys/malloc.h> 84#include <sys/module.h> 85#include <sys/time.h> 86#include <sys/bus.h> 87#include <sys/resource.h> 88#include <sys/rman.h> 89#include <sys/sysctl.h> 90 91#include <sys/linker.h> 92#include <sys/firmware.h> 93 94#include <machine/bus.h> 95#include <machine/cpu.h> 96#include <machine/cpufunc.h> 97#include <machine/resource.h> 98#include <machine/intr.h> 99#include <arm/xscale/ixp425/ixp425reg.h> 100#include <arm/xscale/ixp425/ixp425var.h> 101 102#include <arm/xscale/ixp425/ixp425_npereg.h> 103#include <arm/xscale/ixp425/ixp425_npevar.h> 104 105struct ixpnpe_softc { 106 device_t sc_dev; 107 bus_space_tag_t sc_iot; 108 bus_space_handle_t sc_ioh; 109 bus_size_t sc_size; /* size of mapped register window */ 110 struct resource *sc_irq; /* IRQ resource */ 111 void *sc_ih; /* interrupt handler */ 112 struct mtx sc_mtx; /* mailbox lock */ 113 uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */ 114 int sc_msgwaiting; /* sc_msg holds valid data */ 115 int sc_npeid; 116 int sc_nrefs; /* # of references */ 117 118 int validImage; /* valid ucode image loaded */ 119 int started; /* NPE is started */ 120 uint8_t functionalityId;/* ucode functionality ID */ 121 int insMemSize; /* size of instruction memory */ 122 int dataMemSize; /* size of data memory */ 123 uint32_t savedExecCount; 124 uint32_t savedEcsDbgCtxtReg2; 125}; 126static struct ixpnpe_softc *npes[NPE_MAX]; 127 128#define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff 129 130/* used to read download map from version in microcode image */ 131#define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000 132#define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001 133#define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002 134#define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F 135 136/* 137 * masks used to extract address info from State information context 138 * register addresses as read from microcode image 139 */ 140#define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F 141#define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0 142 143/* LSB offset of Context Number field in State-Info Context Address */ 144#define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4 145 146/* size (in words) of single State Information entry (ctxt reg address|data) */ 147#define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2 148 149typedef struct { 150 uint32_t type; 151 uint32_t offset; 152} IxNpeDlNpeMgrDownloadMapBlockEntry; 153 154typedef union { 155 IxNpeDlNpeMgrDownloadMapBlockEntry block; 156 uint32_t eodmMarker; 157} IxNpeDlNpeMgrDownloadMapEntry; 158 159typedef struct { 160 /* 1st entry in the download map (there may be more than one) */ 161 IxNpeDlNpeMgrDownloadMapEntry entry[1]; 162} IxNpeDlNpeMgrDownloadMap; 163 164/* used to access an instruction or data block in a microcode image */ 165typedef struct { 166 uint32_t npeMemAddress; 167 uint32_t size; 168 uint32_t data[1]; 169} IxNpeDlNpeMgrCodeBlock; 170 171/* used to access each Context Reg entry state-information block */ 172typedef struct { 173 uint32_t addressInfo; 174 uint32_t value; 175} IxNpeDlNpeMgrStateInfoCtxtRegEntry; 176 177/* used to access a state-information block in a microcode image */ 178typedef struct { 179 uint32_t size; 180 IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1]; 181} IxNpeDlNpeMgrStateInfoBlock; 182 183static int npe_debug = 0; 184SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RWTUN, &npe_debug, 185 0, "IXP4XX NPE debug msgs"); 186#define DPRINTF(dev, fmt, ...) do { \ 187 if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \ 188} while (0) 189#define DPRINTFn(n, dev, fmt, ...) do { \ 190 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \ 191} while (0) 192 193static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t); 194static int npe_isstopped(struct ixpnpe_softc *); 195static int npe_load_ins(struct ixpnpe_softc *, 196 const IxNpeDlNpeMgrCodeBlock *bp, int verify); 197static int npe_load_data(struct ixpnpe_softc *, 198 const IxNpeDlNpeMgrCodeBlock *bp, int verify); 199static int npe_load_stateinfo(struct ixpnpe_softc *, 200 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify); 201static int npe_load_image(struct ixpnpe_softc *, 202 const uint32_t *imageCodePtr, int verify); 203static int npe_cpu_reset(struct ixpnpe_softc *); 204static int npe_cpu_start(struct ixpnpe_softc *); 205static int npe_cpu_stop(struct ixpnpe_softc *); 206static void npe_cmd_issue_write(struct ixpnpe_softc *, 207 uint32_t cmd, uint32_t addr, uint32_t data); 208static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *, 209 uint32_t cmd, uint32_t addr); 210static int npe_ins_write(struct ixpnpe_softc *, 211 uint32_t addr, uint32_t data, int verify); 212static int npe_data_write(struct ixpnpe_softc *, 213 uint32_t addr, uint32_t data, int verify); 214static void npe_ecs_reg_write(struct ixpnpe_softc *, 215 uint32_t reg, uint32_t data); 216static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg); 217static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command); 218static void npe_cpu_step_save(struct ixpnpe_softc *); 219static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction, 220 uint32_t ctxtNum, uint32_t ldur); 221static void npe_cpu_step_restore(struct ixpnpe_softc *); 222static int npe_logical_reg_read(struct ixpnpe_softc *, 223 uint32_t regAddr, uint32_t regSize, 224 uint32_t ctxtNum, uint32_t *regVal); 225static int npe_logical_reg_write(struct ixpnpe_softc *, 226 uint32_t regAddr, uint32_t regVal, 227 uint32_t regSize, uint32_t ctxtNum, int verify); 228static int npe_physical_reg_write(struct ixpnpe_softc *, 229 uint32_t regAddr, uint32_t regValue, int verify); 230static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum, 231 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify); 232 233static void ixpnpe_intr(void *arg); 234 235static uint32_t 236npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off) 237{ 238 uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 239 DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v); 240 return v; 241} 242 243static void 244npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val) 245{ 246 DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val); 247 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 248} 249 250struct ixpnpe_softc * 251ixpnpe_attach(device_t dev, int npeid) 252{ 253 struct npeconfig { 254 uint32_t base; 255 uint32_t size; 256 int irq; 257 uint32_t ins_memsize; 258 uint32_t data_memsize; 259 }; 260 static const struct npeconfig npeconfigs[NPE_MAX] = { 261 [NPE_A] = { 262 .base = IXP425_NPE_A_HWBASE, 263 .size = IXP425_NPE_A_SIZE, 264 .irq = IXP425_INT_NPE_A, 265 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEA, 266 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA 267 }, 268 [NPE_B] = { 269 .base = IXP425_NPE_B_HWBASE, 270 .size = IXP425_NPE_B_SIZE, 271 .irq = IXP425_INT_NPE_B, 272 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB, 273 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB 274 }, 275 [NPE_C] = { 276 .base = IXP425_NPE_C_HWBASE, 277 .size = IXP425_NPE_C_SIZE, 278 .irq = IXP425_INT_NPE_C, 279 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC, 280 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC 281 }, 282 }; 283 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); 284 struct ixpnpe_softc *sc; 285 const struct npeconfig *config; 286 int rid; 287 288 if (npeid >= NPE_MAX) { 289 device_printf(dev, "%s: bad npeid %d\n", __func__, npeid); 290 return NULL; 291 } 292 sc = npes[npeid]; 293 if (sc != NULL) { 294 sc->sc_nrefs++; 295 return sc; 296 } 297 config = &npeconfigs[npeid]; 298 299 /* XXX M_BUS */ 300 sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO); 301 sc->sc_dev = dev; 302 sc->sc_iot = sa->sc_iot; 303 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF); 304 sc->sc_npeid = npeid; 305 sc->sc_nrefs = 1; 306 307 sc->sc_size = config->size; 308 if (cpu_is_ixp42x()) { 309 /* NB: instruction/data memory sizes are NPE-dependent */ 310 sc->insMemSize = config->ins_memsize; 311 sc->dataMemSize = config->data_memsize; 312 } else { 313 sc->insMemSize = IXP46X_NPEDL_INS_MEMSIZE_WORDS; 314 sc->dataMemSize = IXP46X_NPEDL_DATA_MEMSIZE_WORDS; 315 } 316 317 if (bus_space_map(sc->sc_iot, config->base, sc->sc_size, 0, &sc->sc_ioh)) 318 panic("%s: Cannot map registers", device_get_name(dev)); 319 320 /* 321 * Setup IRQ and handler for NPE message support. 322 */ 323 rid = 0; 324 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 325 config->irq, config->irq, 1, RF_ACTIVE); 326 if (sc->sc_irq == NULL) 327 panic("%s: Unable to allocate irq %u", device_get_name(dev), 328 config->irq); 329 /* XXX could be a source of entropy */ 330 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 331 NULL, ixpnpe_intr, sc, &sc->sc_ih); 332 /* 333 * Enable output fifo interrupts (NB: must also set OFIFO Write Enable) 334 */ 335 npe_reg_write(sc, IX_NPECTL, 336 npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE)); 337 338 npes[npeid] = sc; 339 340 return sc; 341} 342 343void 344ixpnpe_detach(struct ixpnpe_softc *sc) 345{ 346 if (--sc->sc_nrefs == 0) { 347 npes[sc->sc_npeid] = NULL; 348 349 /* disable output fifo interrupts */ 350 npe_reg_write(sc, IX_NPECTL, 351 npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE)); 352 353 bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih); 354 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size); 355 mtx_destroy(&sc->sc_mtx); 356 free(sc, M_TEMP); 357 } 358} 359 360int 361ixpnpe_stopandreset(struct ixpnpe_softc *sc) 362{ 363 int error; 364 365 mtx_lock(&sc->sc_mtx); 366 error = npe_cpu_stop(sc); /* stop NPE */ 367 if (error == 0) 368 error = npe_cpu_reset(sc); /* reset it */ 369 if (error == 0) 370 sc->started = 0; /* mark stopped */ 371 mtx_unlock(&sc->sc_mtx); 372 373 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); 374 return error; 375} 376 377static int 378ixpnpe_start_locked(struct ixpnpe_softc *sc) 379{ 380 int error; 381 382 if (!sc->started) { 383 error = npe_cpu_start(sc); 384 if (error == 0) 385 sc->started = 1; 386 } else 387 error = 0; 388 389 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); 390 return error; 391} 392 393int 394ixpnpe_start(struct ixpnpe_softc *sc) 395{ 396 int ret; 397 398 mtx_lock(&sc->sc_mtx); 399 ret = ixpnpe_start_locked(sc); 400 mtx_unlock(&sc->sc_mtx); 401 return (ret); 402} 403 404int 405ixpnpe_stop(struct ixpnpe_softc *sc) 406{ 407 int error; 408 409 mtx_lock(&sc->sc_mtx); 410 error = npe_cpu_stop(sc); 411 if (error == 0) 412 sc->started = 0; 413 mtx_unlock(&sc->sc_mtx); 414 415 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); 416 return error; 417} 418 419/* 420 * Indicates the start of an NPE Image, in new NPE Image Library format. 421 * 2 consecutive occurrences indicates the end of the NPE Image Library 422 */ 423#define NPE_IMAGE_MARKER 0xfeedf00d 424 425/* 426 * NPE Image Header definition, used in new NPE Image Library format 427 */ 428typedef struct { 429 uint32_t marker; 430 uint32_t id; 431 uint32_t size; 432} IxNpeDlImageMgrImageHeader; 433 434static int 435npe_findimage(struct ixpnpe_softc *sc, 436 const uint32_t *imageLibrary, uint32_t imageId, 437 const uint32_t **imagePtr, uint32_t *imageSize) 438{ 439 const IxNpeDlImageMgrImageHeader *image; 440 uint32_t offset = 0; 441 442 while (imageLibrary[offset] == NPE_IMAGE_MARKER) { 443 image = (const IxNpeDlImageMgrImageHeader *) 444 &imageLibrary[offset]; 445 offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t); 446 447 DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n", 448 __func__, offset, image->marker, image->id, image->size); 449 if (image->id == imageId) { 450 *imagePtr = imageLibrary + offset; 451 *imageSize = image->size; 452 return 0; 453 } 454 /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */ 455 if (image->id == NPE_IMAGE_MARKER) { 456 DPRINTF(sc->sc_dev, "imageId 0x%08x not found in " 457 "image library header\n", imageId); 458 /* reached end of library, image not found */ 459 return ESRCH; 460 } 461 offset += image->size; 462 } 463 return ESRCH; 464} 465 466static int 467ixpnpe_load_firmware(struct ixpnpe_softc *sc, const char *imageName, 468 uint32_t imageId) 469{ 470 static const char *devname[4] = 471 { "IXP425", "IXP435/IXP465", "DeviceID#2", "DeviceID#3" }; 472 uint32_t imageSize; 473 const uint32_t *imageCodePtr; 474 const struct firmware *fw; 475 int error; 476 477 DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId); 478 479#if 0 480 IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId); 481 /* 482 * Checking if image being loaded is meant for device that is running. 483 * Image is forward compatible. i.e Image built for IXP42X should run 484 * on IXP46X but not vice versa. 485 */ 486 if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK)) 487 return EINVAL; 488#endif 489 error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */ 490 if (error != 0) 491 return error; 492 493 fw = firmware_get(imageName); 494 if (fw == NULL) 495 return ENOENT; 496 497 /* Locate desired image in files w/ combined images */ 498 error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize); 499 if (error != 0) 500 goto done; 501 502 device_printf(sc->sc_dev, 503 "load fw image %s.NPE-%c Func 0x%x Rev %u.%u\n", 504 devname[NPEIMAGE_DEVID(imageId)], 'A' + NPEIMAGE_NPEID(imageId), 505 NPEIMAGE_FUNCID(imageId), NPEIMAGE_MAJOR(imageId), 506 NPEIMAGE_MINOR(imageId)); 507 508 /* 509 * If download was successful, store image Id in list of 510 * currently loaded images. If a critical error occured 511 * during download, record that the NPE has an invalid image 512 */ 513 mtx_lock(&sc->sc_mtx); 514 error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/); 515 if (error == 0) { 516 sc->validImage = 1; 517 error = ixpnpe_start_locked(sc); 518 } else { 519 sc->validImage = 0; 520 } 521 sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId); 522 mtx_unlock(&sc->sc_mtx); 523done: 524 firmware_put(fw, FIRMWARE_UNLOAD); 525 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); 526 return error; 527} 528 529static int 530override_imageid(device_t dev, const char *resname, uint32_t *val) 531{ 532 int unit = device_get_unit(dev); 533 int resval; 534 535 if (resource_int_value("npe", unit, resname, &resval) != 0) 536 return 0; 537 /* XXX validate */ 538 if (bootverbose) 539 device_printf(dev, "using npe.%d.%s=0x%x override\n", 540 unit, resname, resval); 541 *val = resval; 542 return 1; 543} 544 545int 546ixpnpe_init(struct ixpnpe_softc *sc) 547{ 548 static const uint32_t npeconfig[NPE_MAX] = { 549 [NPE_A] = IXP425_NPE_A_IMAGEID, 550 [NPE_B] = IXP425_NPE_B_IMAGEID, 551 [NPE_C] = IXP425_NPE_C_IMAGEID, 552 }; 553 uint32_t imageid, msg[2]; 554 int error; 555 556 if (sc->started) 557 return 0; 558 /* 559 * Load NPE firmware and start it running. We assume 560 * that minor version bumps remain compatible so probe 561 * the firmware image starting with the expected version 562 * and then bump the minor version up to the max. 563 */ 564 if (!override_imageid(sc->sc_dev, "imageid", &imageid)) 565 imageid = npeconfig[sc->sc_npeid]; 566 for (;;) { 567 error = ixpnpe_load_firmware(sc, "npe_fw", imageid); 568 if (error == 0) 569 break; 570 /* 571 * ESRCH is returned when the requested image 572 * is not present 573 */ 574 if (error != ESRCH) { 575 device_printf(sc->sc_dev, 576 "cannot init NPE (error %d)\n", error); 577 return error; 578 } 579 /* bump the minor version up to the max possible */ 580 if (NPEIMAGE_MINOR(imageid) == 0xff) { 581 device_printf(sc->sc_dev, "cannot locate firmware " 582 "(imageid 0x%08x)\n", imageid); 583 return error; 584 } 585 imageid++; 586 } 587 /* NB: firmware should respond with a status msg */ 588 if (ixpnpe_recvmsg_sync(sc, msg) != 0) { 589 device_printf(sc->sc_dev, 590 "firmware did not respond as expected\n"); 591 return EIO; 592 } 593 return 0; 594} 595 596int 597ixpnpe_getfunctionality(struct ixpnpe_softc *sc) 598{ 599 return (sc->validImage ? sc->functionalityId : 0); 600} 601 602static int 603npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet) 604{ 605 uint32_t val; 606 607 val = npe_reg_read(sc, reg); 608 DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n", 609 __func__, reg, expectedBitsSet, val, 610 (val & expectedBitsSet) == expectedBitsSet); 611 return ((val & expectedBitsSet) == expectedBitsSet); 612} 613 614static int 615npe_isstopped(struct ixpnpe_softc *sc) 616{ 617 return npe_checkbits(sc, 618 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP); 619} 620 621static int 622npe_load_ins(struct ixpnpe_softc *sc, 623 const IxNpeDlNpeMgrCodeBlock *bp, int verify) 624{ 625 uint32_t npeMemAddress; 626 int i, blockSize; 627 628 npeMemAddress = bp->npeMemAddress; 629 blockSize = bp->size; /* NB: instruction/data count */ 630 if (npeMemAddress + blockSize > sc->insMemSize) { 631 device_printf(sc->sc_dev, 632 "Block size %u too big for NPE memory\n", blockSize); 633 return EINVAL; /* XXX */ 634 } 635 for (i = 0; i < blockSize; i++, npeMemAddress++) { 636 if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) { 637 device_printf(sc->sc_dev, 638 "NPE instruction write failed"); 639 return EIO; 640 } 641 } 642 return 0; 643} 644 645static int 646npe_load_data(struct ixpnpe_softc *sc, 647 const IxNpeDlNpeMgrCodeBlock *bp, int verify) 648{ 649 uint32_t npeMemAddress; 650 int i, blockSize; 651 652 npeMemAddress = bp->npeMemAddress; 653 blockSize = bp->size; /* NB: instruction/data count */ 654 if (npeMemAddress + blockSize > sc->dataMemSize) { 655 device_printf(sc->sc_dev, 656 "Block size %u too big for NPE memory\n", blockSize); 657 return EINVAL; 658 } 659 for (i = 0; i < blockSize; i++, npeMemAddress++) { 660 if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) { 661 device_printf(sc->sc_dev, "NPE data write failed\n"); 662 return EIO; 663 } 664 } 665 return 0; 666} 667 668static int 669npe_load_stateinfo(struct ixpnpe_softc *sc, 670 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify) 671{ 672 int i, nentries, error; 673 674 npe_cpu_step_save(sc); 675 676 /* for each state-info context register entry in block */ 677 nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE; 678 error = 0; 679 for (i = 0; i < nentries; i++) { 680 /* each state-info entry is 2 words (address, value) */ 681 uint32_t regVal = bp->ctxtRegEntry[i].value; 682 uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo; 683 684 uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG); 685 uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >> 686 IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM; 687 688 /* error-check Context Register No. and Context Number values */ 689 if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) { 690 device_printf(sc->sc_dev, 691 "invalid Context Register %u\n", reg); 692 error = EINVAL; 693 break; 694 } 695 if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) { 696 device_printf(sc->sc_dev, 697 "invalid Context Number %u\n", cNum); 698 error = EINVAL; 699 break; 700 } 701 /* NOTE that there is no STEVT register for Context 0 */ 702 if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) { 703 device_printf(sc->sc_dev, 704 "no STEVT for Context 0\n"); 705 error = EINVAL; 706 break; 707 } 708 709 if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) { 710 device_printf(sc->sc_dev, 711 "write of state-info to NPE failed\n"); 712 error = EIO; 713 break; 714 } 715 } 716 717 npe_cpu_step_restore(sc); 718 return error; 719} 720 721static int 722npe_load_image(struct ixpnpe_softc *sc, 723 const uint32_t *imageCodePtr, int verify) 724{ 725#define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP) 726 const IxNpeDlNpeMgrDownloadMap *downloadMap; 727 int i, error; 728 729 if (!npe_isstopped(sc)) { /* verify NPE is stopped */ 730 device_printf(sc->sc_dev, 731 "cannot load image, NPE not stopped\n"); 732 return EIO; 733 } 734 735 /* 736 * Read Download Map, checking each block type and calling 737 * appropriate function to perform download 738 */ 739 error = 0; 740 downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr; 741 for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) { 742 /* calculate pointer to block to be downloaded */ 743 const uint32_t *bp = imageCodePtr + 744 downloadMap->entry[i].block.offset; 745 switch (downloadMap->entry[i].block.type) { 746 case IX_NPEDL_BLOCK_TYPE_INSTRUCTION: 747 error = npe_load_ins(sc, 748 (const IxNpeDlNpeMgrCodeBlock *) bp, verify); 749 DPRINTF(sc->sc_dev, "%s: inst, error %d\n", 750 __func__, error); 751 break; 752 case IX_NPEDL_BLOCK_TYPE_DATA: 753 error = npe_load_data(sc, 754 (const IxNpeDlNpeMgrCodeBlock *) bp, verify); 755 DPRINTF(sc->sc_dev, "%s: data, error %d\n", 756 __func__, error); 757 break; 758 case IX_NPEDL_BLOCK_TYPE_STATE: 759 error = npe_load_stateinfo(sc, 760 (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify); 761 DPRINTF(sc->sc_dev, "%s: state, error %d\n", 762 __func__, error); 763 break; 764 default: 765 device_printf(sc->sc_dev, 766 "unknown block type 0x%x in download map\n", 767 downloadMap->entry[i].block.type); 768 error = EIO; /* XXX */ 769 break; 770 } 771 if (error != 0) 772 break; 773 } 774 return error; 775#undef EOM 776} 777 778/* contains Reset values for Context Store Registers */ 779static const struct { 780 uint32_t regAddr; 781 uint32_t regResetVal; 782} ixNpeDlEcsRegResetValues[] = { 783 { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET }, 784 { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET }, 785 { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET }, 786 { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET }, 787 { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET }, 788 { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET }, 789 { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET }, 790 { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET }, 791 { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET }, 792 { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET }, 793 { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET }, 794 { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET }, 795 { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET } 796}; 797 798/* contains Reset values for Context Store Registers */ 799static const uint32_t ixNpeDlCtxtRegResetValues[] = { 800 IX_NPEDL_CTXT_REG_RESET_STEVT, 801 IX_NPEDL_CTXT_REG_RESET_STARTPC, 802 IX_NPEDL_CTXT_REG_RESET_REGMAP, 803 IX_NPEDL_CTXT_REG_RESET_CINDEX, 804}; 805 806#define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF 807#define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF 808 809#if 0 810/* 811 * Reset the NPE and its coprocessor using the 812 * fuse bits in the feature control register. 813 */ 814static void 815npe_reset(int npeid) 816{ 817 uint32_t mask = EXP_FCTRL_NPEA << npeid; 818 uint32_t v; 819 820 v = ixp4xx_read_feature_bits(); 821 ixp4xx_write_feature_bits(v &~ mask); 822 /* un-fuse and un-reset the NPE & coprocessor */ 823 ixp4xx_write_feature_bits(v | mask); 824} 825#endif 826 827static int 828npe_cpu_reset(struct ixpnpe_softc *sc) 829{ 830#define N(a) (sizeof(a) / sizeof(a[0])) 831 uint32_t ctxtReg; /* identifies Context Store reg (0-3) */ 832 uint32_t regAddr; 833 uint32_t regVal; 834 uint32_t ixNpeConfigCtrlRegVal; 835 int i, error = 0; 836 837 /* pre-store the NPE Config Control Register Value */ 838 ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL); 839 ixNpeConfigCtrlRegVal |= 0x3F000000; 840 841 /* disable the parity interrupt */ 842 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL, 843 (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK)); 844 DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n", 845 __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK); 846 847 npe_cpu_step_save(sc); 848 849 /* 850 * Clear the FIFOs. 851 */ 852 while (npe_checkbits(sc, 853 IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) { 854 /* read from the Watch-point FIFO until empty */ 855 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO); 856 } 857 858 while (npe_checkbits(sc, 859 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) { 860 /* read from the outFIFO until empty */ 861 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO); 862 } 863 864 while (npe_checkbits(sc, 865 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) { 866 /* 867 * Step execution of the NPE intruction to read inFIFO using 868 * the Debug Executing Context stack. 869 */ 870 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0); 871 if (error != 0) { 872 DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n", 873 __func__, error); 874 npe_cpu_step_restore(sc); 875 return error; 876 } 877 } 878 879 /* 880 * Reset the mailbox reg 881 */ 882 /* ...from XScale side */ 883 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST); 884 /* ...from NPE side */ 885 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0); 886 if (error != 0) { 887 DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n", 888 __func__, error); 889 npe_cpu_step_restore(sc); 890 return error; 891 } 892 893 /* 894 * Reset the physical registers in the NPE register file: 895 * Note: no need to save/restore REGMAP for Context 0 here 896 * since all Context Store regs are reset in subsequent code. 897 */ 898 for (regAddr = 0; 899 regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0; 900 regAddr++) { 901 /* for each physical register in the NPE reg file, write 0 : */ 902 error = npe_physical_reg_write(sc, regAddr, 0, TRUE); 903 if (error != 0) { 904 DPRINTF(sc->sc_dev, "%s: cannot write phy reg," 905 "error %u\n", __func__, error); 906 npe_cpu_step_restore(sc); 907 return error; /* abort reset */ 908 } 909 } 910 911 /* 912 * Reset the context store: 913 */ 914 for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) { 915 /* set each context's Context Store registers to reset values */ 916 for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) { 917 /* NOTE that there is no STEVT register for Context 0 */ 918 if (i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT) 919 continue; 920 regVal = ixNpeDlCtxtRegResetValues[ctxtReg]; 921 error = npe_ctx_reg_write(sc, i, ctxtReg, 922 regVal, TRUE); 923 if (error != 0) { 924 DPRINTF(sc->sc_dev, "%s: cannot write ctx reg," 925 "error %u\n", __func__, error); 926 npe_cpu_step_restore(sc); 927 return error; /* abort reset */ 928 } 929 } 930 } 931 932 npe_cpu_step_restore(sc); 933 934 /* write Reset values to Execution Context Stack registers */ 935 for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++) 936 npe_ecs_reg_write(sc, 937 ixNpeDlEcsRegResetValues[i].regAddr, 938 ixNpeDlEcsRegResetValues[i].regResetVal); 939 940 /* clear the profile counter */ 941 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT); 942 943 /* clear registers EXCT, AP0, AP1, AP2 and AP3 */ 944 for (regAddr = IX_NPEDL_REG_OFFSET_EXCT; 945 regAddr <= IX_NPEDL_REG_OFFSET_AP3; 946 regAddr += sizeof(uint32_t)) 947 npe_reg_write(sc, regAddr, 0); 948 949 /* Reset the Watch-count register */ 950 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0); 951#if 0 952 /* 953 * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation 954 * XXX Removed because it breaks IXP435 operation; e.g. on Gateworks 955 * XXX 2358 boards reseting NPE-A after NPE-C is running causes both 956 * XXX npe's to stop working 957 */ 958 npe_reset(sc->sc_npeid); 959#endif 960 /* 961 * Call NpeMgr function to stop the NPE again after the Feature Control 962 * has unfused and Un-Reset the NPE and its associated Coprocessors. 963 */ 964 error = npe_cpu_stop(sc); 965 966 /* restore NPE configuration bus Control Register - Parity Settings */ 967 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL, 968 (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK)); 969 DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n", 970 __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL)); 971 972 return error; 973#undef N 974} 975 976static int 977npe_cpu_start(struct ixpnpe_softc *sc) 978{ 979 uint32_t ecsRegVal; 980 981 /* 982 * Ensure only Background Context Stack Level is Active by turning off 983 * the Active bit in each of the other Executing Context Stack levels. 984 */ 985 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0); 986 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; 987 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal); 988 989 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0); 990 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; 991 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal); 992 993 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0); 994 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; 995 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal); 996 997 /* clear the pipeline */ 998 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); 999 1000 /* start NPE execution by issuing cmd through EXCTL register on NPE */ 1001 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START); 1002 1003 /* 1004 * Check execution status of NPE to verify operation was successful. 1005 */ 1006 return npe_checkbits(sc, 1007 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO; 1008} 1009 1010static int 1011npe_cpu_stop(struct ixpnpe_softc *sc) 1012{ 1013 /* stop NPE execution by issuing cmd through EXCTL register on NPE */ 1014 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP); 1015 1016 /* verify that NPE Stop was successful */ 1017 return npe_checkbits(sc, 1018 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO; 1019} 1020 1021#define IX_NPEDL_REG_SIZE_BYTE 8 1022#define IX_NPEDL_REG_SIZE_SHORT 16 1023#define IX_NPEDL_REG_SIZE_WORD 32 1024 1025/* 1026 * Introduce extra read cycles after issuing read command to NPE 1027 * so that we read the register after the NPE has updated it 1028 * This is to overcome race condition between XScale and NPE 1029 */ 1030#define IX_NPEDL_DELAY_READ_CYCLES 2 1031/* 1032 * To mask top three MSBs of 32bit word to download into NPE IMEM 1033 */ 1034#define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF; 1035 1036static void 1037npe_cmd_issue_write(struct ixpnpe_softc *sc, 1038 uint32_t cmd, uint32_t addr, uint32_t data) 1039{ 1040 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data); 1041 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr); 1042 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd); 1043} 1044 1045static uint32_t 1046npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr) 1047{ 1048 uint32_t data; 1049 int i; 1050 1051 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr); 1052 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd); 1053 for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++) 1054 data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA); 1055 return data; 1056} 1057 1058static int 1059npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify) 1060{ 1061 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data); 1062 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data); 1063 if (verify) { 1064 uint32_t rdata; 1065 1066 /* 1067 * Write invalid data to this reg, so we can see if we're 1068 * reading the EXDATA register too early. 1069 */ 1070 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data); 1071 1072 /* 1073 * Disabled since top 3 MSB are not used for Azusa 1074 * hardware Refer WR:IXA00053900 1075 */ 1076 data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS; 1077 1078 rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM, 1079 addr); 1080 rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS; 1081 1082 if (data != rdata) 1083 return EIO; 1084 } 1085 return 0; 1086} 1087 1088static int 1089npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify) 1090{ 1091 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data); 1092 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data); 1093 if (verify) { 1094 /* 1095 * Write invalid data to this reg, so we can see if we're 1096 * reading the EXDATA register too early. 1097 */ 1098 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data); 1099 if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr)) 1100 return EIO; 1101 } 1102 return 0; 1103} 1104 1105static void 1106npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data) 1107{ 1108 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data); 1109} 1110 1111static uint32_t 1112npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg) 1113{ 1114 return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg); 1115} 1116 1117static void 1118npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command) 1119{ 1120 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command); 1121} 1122 1123static void 1124npe_cpu_step_save(struct ixpnpe_softc *sc) 1125{ 1126 /* turn off the halt bit by clearing Execution Count register. */ 1127 /* save reg contents 1st and restore later */ 1128 sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT); 1129 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0); 1130 1131 /* ensure that IF and IE are on (temporarily), so that we don't end up 1132 * stepping forever */ 1133 sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc, 1134 IX_NPEDL_ECS_DBG_CTXT_REG_2); 1135 1136 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, 1137 (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF | 1138 IX_NPEDL_MASK_ECS_DBG_REG_2_IE)); 1139} 1140 1141static int 1142npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction, 1143 uint32_t ctxtNum, uint32_t ldur) 1144{ 1145#define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000 1146 uint32_t ecsDbgRegVal; 1147 uint32_t oldWatchcount, newWatchcount; 1148 int tries; 1149 1150 /* set the Active bit, and the LDUR, in the debug level */ 1151 ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE | 1152 (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR); 1153 1154 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal); 1155 1156 /* 1157 * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the 1158 * instruction, and set SELCTXT at ECS DEBUG Level to specify which 1159 * context store to access. 1160 * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number 1161 */ 1162 ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) | 1163 (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT); 1164 1165 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal); 1166 1167 /* clear the pipeline */ 1168 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); 1169 1170 /* load NPE instruction into the instruction register */ 1171 npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction); 1172 1173 /* need this value later to wait for completion of NPE execution step */ 1174 oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); 1175 1176 /* issue a Step One command via the Execution Control register */ 1177 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP); 1178 1179 /* 1180 * Force the XScale to wait until the NPE has finished execution step 1181 * NOTE that this delay will be very small, just long enough to allow a 1182 * single NPE instruction to complete execution; if instruction 1183 * execution is not completed before timeout retries, exit the while 1184 * loop. 1185 */ 1186 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); 1187 for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES && 1188 newWatchcount == oldWatchcount; tries++) { 1189 /* Watch Count register incr's when NPE completes an inst */ 1190 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); 1191 } 1192 return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO; 1193#undef IX_NPE_DL_MAX_NUM_OF_RETRIES 1194} 1195 1196static void 1197npe_cpu_step_restore(struct ixpnpe_softc *sc) 1198{ 1199 /* clear active bit in debug level */ 1200 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0); 1201 1202 /* clear the pipeline */ 1203 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); 1204 1205 /* restore Execution Count register contents. */ 1206 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount); 1207 1208 /* restore IF and IE bits to original values */ 1209 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2); 1210} 1211 1212static int 1213npe_logical_reg_read(struct ixpnpe_softc *sc, 1214 uint32_t regAddr, uint32_t regSize, 1215 uint32_t ctxtNum, uint32_t *regVal) 1216{ 1217 uint32_t npeInstruction, mask; 1218 int error; 1219 1220 switch (regSize) { 1221 case IX_NPEDL_REG_SIZE_BYTE: 1222 npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE; 1223 mask = 0xff; 1224 break; 1225 case IX_NPEDL_REG_SIZE_SHORT: 1226 npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT; 1227 mask = 0xffff; 1228 break; 1229 case IX_NPEDL_REG_SIZE_WORD: 1230 npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD; 1231 mask = 0xffffffff; 1232 break; 1233 default: 1234 return EINVAL; 1235 } 1236 1237 /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */ 1238 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) | 1239 (regAddr << IX_NPEDL_OFFSET_INSTR_DEST); 1240 1241 /* step execution of NPE inst using Debug Executing Context stack */ 1242 error = npe_cpu_step(sc, npeInstruction, ctxtNum, 1243 IX_NPEDL_RD_INSTR_LDUR); 1244 if (error != 0) { 1245 DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n", 1246 __func__, regAddr, regSize, ctxtNum, error); 1247 return error; 1248 } 1249 /* read value of register from Execution Data register */ 1250 *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA); 1251 1252 /* align value from left to right */ 1253 *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask; 1254 1255 return 0; 1256} 1257 1258static int 1259npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal, 1260 uint32_t regSize, uint32_t ctxtNum, int verify) 1261{ 1262 int error; 1263 1264 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n", 1265 __func__, regAddr, regVal, regSize, ctxtNum); 1266 if (regSize == IX_NPEDL_REG_SIZE_WORD) { 1267 /* 1268 * NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3| 1269 * Write upper half-word (short) to |d0|d1| 1270 */ 1271 error = npe_logical_reg_write(sc, regAddr, 1272 regVal >> IX_NPEDL_REG_SIZE_SHORT, 1273 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify); 1274 if (error != 0) 1275 return error; 1276 1277 /* Write lower half-word (short) to |d2|d3| */ 1278 error = npe_logical_reg_write(sc, 1279 regAddr + sizeof(uint16_t), 1280 regVal & 0xffff, 1281 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify); 1282 } else { 1283 uint32_t npeInstruction; 1284 1285 switch (regSize) { 1286 case IX_NPEDL_REG_SIZE_BYTE: 1287 npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE; 1288 regVal &= 0xff; 1289 break; 1290 case IX_NPEDL_REG_SIZE_SHORT: 1291 npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT; 1292 regVal &= 0xffff; 1293 break; 1294 default: 1295 return EINVAL; 1296 } 1297 /* fill dest operand field of inst with dest reg addr */ 1298 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST); 1299 1300 /* fill src operand field of inst with least-sig 5 bits of val*/ 1301 npeInstruction |= 1302 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) << 1303 IX_NPEDL_OFFSET_INSTR_SRC); 1304 1305 /* fill coprocessor field of inst with most-sig 11 bits of val*/ 1306 npeInstruction |= 1307 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) << 1308 IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA); 1309 1310 /* step execution of NPE intruction using Debug ECS */ 1311 error = npe_cpu_step(sc, npeInstruction, 1312 ctxtNum, IX_NPEDL_WR_INSTR_LDUR); 1313 } 1314 if (error != 0) { 1315 DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u " 1316 "writing reg\n", __func__, regAddr, regVal, regSize, 1317 ctxtNum, error); 1318 return error; 1319 } 1320 if (verify) { 1321 uint32_t retRegVal; 1322 1323 error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum, 1324 &retRegVal); 1325 if (error == 0 && regVal != retRegVal) 1326 error = EIO; /* XXX ambiguous */ 1327 } 1328 return error; 1329} 1330 1331/* 1332 * There are 32 physical registers used in an NPE. These are 1333 * treated as 16 pairs of 32-bit registers. To write one of the pair, 1334 * write the pair number (0-16) to the REGMAP for Context 0. Then write 1335 * the value to register 0 or 4 in the regfile, depending on which 1336 * register of the pair is to be written 1337 */ 1338static int 1339npe_physical_reg_write(struct ixpnpe_softc *sc, 1340 uint32_t regAddr, uint32_t regValue, int verify) 1341{ 1342 int error; 1343 1344 /* 1345 * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair 1346 * (0-16) of physical registers to write . 1347 */ 1348 error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP, 1349 (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP), 1350 IX_NPEDL_REG_SIZE_SHORT, 0, verify); 1351 if (error == 0) { 1352 /* regAddr = 0 or 4 */ 1353 regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) * 1354 sizeof(uint32_t); 1355 error = npe_logical_reg_write(sc, regAddr, regValue, 1356 IX_NPEDL_REG_SIZE_WORD, 0, verify); 1357 } 1358 return error; 1359} 1360 1361static int 1362npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum, 1363 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify) 1364{ 1365 DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n", 1366 __func__, ctxtNum, ctxtReg, ctxtRegVal); 1367 /* 1368 * Context 0 has no STARTPC. Instead, this value is used to set 1369 * NextPC for Background ECS, to set where NPE starts executing code 1370 */ 1371 if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) { 1372 /* read BG_CTXT_REG_0, update NEXTPC bits, & write back to reg*/ 1373 uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0); 1374 v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC; 1375 v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) & 1376 IX_NPEDL_MASK_ECS_REG_0_NEXTPC; 1377 1378 npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v); 1379 return 0; 1380 } else { 1381 static const struct { 1382 uint32_t regAddress; 1383 uint32_t regSize; 1384 } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = { 1385 { IX_NPEDL_CTXT_REG_ADDR_STEVT, 1386 IX_NPEDL_REG_SIZE_BYTE }, 1387 { IX_NPEDL_CTXT_REG_ADDR_STARTPC, 1388 IX_NPEDL_REG_SIZE_SHORT }, 1389 { IX_NPEDL_CTXT_REG_ADDR_REGMAP, 1390 IX_NPEDL_REG_SIZE_SHORT }, 1391 { IX_NPEDL_CTXT_REG_ADDR_CINDEX, 1392 IX_NPEDL_REG_SIZE_BYTE } 1393 }; 1394 return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress, 1395 ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify); 1396 } 1397} 1398 1399/* 1400 * NPE Mailbox support. 1401 */ 1402#define IX_NPEMH_MAXTRIES 100000 1403 1404static int 1405ofifo_wait(struct ixpnpe_softc *sc) 1406{ 1407 int i; 1408 1409 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) { 1410 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE) 1411 return 1; 1412 DELAY(10); 1413 } 1414 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n", 1415 __func__, npe_reg_read(sc, IX_NPESTAT)); 1416 return 0; 1417} 1418 1419static int 1420getmsg(struct ixpnpe_softc *sc, uint32_t msg[2]) 1421{ 1422 mtx_assert(&sc->sc_mtx, MA_OWNED); 1423 1424 if (!ofifo_wait(sc)) 1425 return EAGAIN; 1426 msg[0] = npe_reg_read(sc, IX_NPEFIFO); 1427 DPRINTF(sc->sc_dev, "%s: msg0 0x%x\n", __func__, msg[0]); 1428 if (!ofifo_wait(sc)) 1429 return EAGAIN; 1430 msg[1] = npe_reg_read(sc, IX_NPEFIFO); 1431 DPRINTF(sc->sc_dev, "%s: msg1 0x%x\n", __func__, msg[1]); 1432 return 0; 1433} 1434 1435static void 1436ixpnpe_intr(void *arg) 1437{ 1438 struct ixpnpe_softc *sc = arg; 1439 uint32_t status; 1440 1441 mtx_lock(&sc->sc_mtx); 1442 status = npe_reg_read(sc, IX_NPESTAT); 1443 DPRINTF(sc->sc_dev, "%s: status 0x%x\n", __func__, status); 1444 if ((status & IX_NPESTAT_OFINT) == 0) { 1445 /* NB: should not happen */ 1446 device_printf(sc->sc_dev, "%s: status 0x%x\n", 1447 __func__, status); 1448 /* XXX must silence interrupt? */ 1449 mtx_unlock(&sc->sc_mtx); 1450 return; 1451 } 1452 /* 1453 * A message is waiting in the output FIFO, copy it so 1454 * the interrupt will be silenced. 1455 */ 1456 if (getmsg(sc, sc->sc_msg) == 0) 1457 sc->sc_msgwaiting = 1; 1458 mtx_unlock(&sc->sc_mtx); 1459} 1460 1461static int 1462ififo_wait(struct ixpnpe_softc *sc) 1463{ 1464 int i; 1465 1466 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) { 1467 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF) 1468 return 1; 1469 DELAY(10); 1470 } 1471 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n", 1472 __func__, npe_reg_read(sc, IX_NPESTAT)); 1473 return 0; 1474} 1475 1476static int 1477putmsg(struct ixpnpe_softc *sc, const uint32_t msg[2]) 1478{ 1479 mtx_assert(&sc->sc_mtx, MA_OWNED); 1480 1481 DPRINTF(sc->sc_dev, "%s: msg 0x%x:0x%x\n", __func__, msg[0], msg[1]); 1482 if (!ififo_wait(sc)) 1483 return EIO; 1484 npe_reg_write(sc, IX_NPEFIFO, msg[0]); 1485 if (!ififo_wait(sc)) 1486 return EIO; 1487 npe_reg_write(sc, IX_NPEFIFO, msg[1]); 1488 1489 return 0; 1490} 1491 1492/* 1493 * Send a msg to the NPE and wait for a reply. We spin as 1494 * we may be called early with interrupts not properly setup. 1495 */ 1496int 1497ixpnpe_sendandrecvmsg_sync(struct ixpnpe_softc *sc, 1498 const uint32_t send[2], uint32_t recv[2]) 1499{ 1500 int error; 1501 1502 mtx_lock(&sc->sc_mtx); 1503 error = putmsg(sc, send); 1504 if (error == 0) 1505 error = getmsg(sc, recv); 1506 mtx_unlock(&sc->sc_mtx); 1507 1508 return error; 1509} 1510 1511/* 1512 * Send a msg to the NPE w/o waiting for a reply. 1513 */ 1514int 1515ixpnpe_sendmsg_async(struct ixpnpe_softc *sc, const uint32_t msg[2]) 1516{ 1517 int error; 1518 1519 mtx_lock(&sc->sc_mtx); 1520 error = putmsg(sc, msg); 1521 mtx_unlock(&sc->sc_mtx); 1522 1523 return error; 1524} 1525 1526static int 1527recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2]) 1528{ 1529 mtx_assert(&sc->sc_mtx, MA_OWNED); 1530 1531 DPRINTF(sc->sc_dev, "%s: msgwaiting %d\n", __func__, sc->sc_msgwaiting); 1532 if (sc->sc_msgwaiting) { 1533 msg[0] = sc->sc_msg[0]; 1534 msg[1] = sc->sc_msg[1]; 1535 sc->sc_msgwaiting = 0; 1536 return 0; 1537 } 1538 return EAGAIN; 1539} 1540 1541/* 1542 * Receive any msg previously received from the NPE. If nothing 1543 * is available we return EAGAIN and the caller is required to 1544 * do a synchronous receive or try again later. 1545 */ 1546int 1547ixpnpe_recvmsg_async(struct ixpnpe_softc *sc, uint32_t msg[2]) 1548{ 1549 int error; 1550 1551 mtx_lock(&sc->sc_mtx); 1552 error = recvmsg_locked(sc, msg); 1553 mtx_unlock(&sc->sc_mtx); 1554 1555 return error; 1556} 1557 1558/* 1559 * Receive a msg from the NPE. If one was received asynchronously 1560 * then it's returned; otherwise we poll synchronously. 1561 */ 1562int 1563ixpnpe_recvmsg_sync(struct ixpnpe_softc *sc, uint32_t msg[2]) 1564{ 1565 int error; 1566 1567 mtx_lock(&sc->sc_mtx); 1568 error = recvmsg_locked(sc, msg); 1569 if (error == EAGAIN) 1570 error = getmsg(sc, msg); 1571 mtx_unlock(&sc->sc_mtx); 1572 1573 return error; 1574} 1575