1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Copyright (c) 2011-12 The Chromium OS Authors. 4 * 5 * This file is derived from the flashrom project. 6 */ 7 8#define LOG_CATEGORY UCLASS_SPI 9 10#include <bootstage.h> 11#include <div64.h> 12#include <dm.h> 13#include <dt-structs.h> 14#include <errno.h> 15#include <log.h> 16#include <malloc.h> 17#include <pch.h> 18#include <pci.h> 19#include <pci_ids.h> 20#include <spi.h> 21#include <spi_flash.h> 22#include <spi-mem.h> 23#include <spl.h> 24#include <asm/fast_spi.h> 25#include <asm/io.h> 26#include <dm/uclass-internal.h> 27#include <asm/mtrr.h> 28#include <linux/bitops.h> 29#include <linux/delay.h> 30#include <linux/sizes.h> 31 32#include "ich.h" 33 34#ifdef DEBUG_TRACE 35#define debug_trace(fmt, args...) debug(fmt, ##args) 36#else 37#define debug_trace(x, args...) 38#endif 39 40static u8 ich_readb(struct ich_spi_priv *priv, int reg) 41{ 42 u8 value = readb(priv->base + reg); 43 44 debug_trace("read %2.2x from %4.4x\n", value, reg); 45 46 return value; 47} 48 49static u16 ich_readw(struct ich_spi_priv *priv, int reg) 50{ 51 u16 value = readw(priv->base + reg); 52 53 debug_trace("read %4.4x from %4.4x\n", value, reg); 54 55 return value; 56} 57 58static u32 ich_readl(struct ich_spi_priv *priv, int reg) 59{ 60 u32 value = readl(priv->base + reg); 61 62 debug_trace("read %8.8x from %4.4x\n", value, reg); 63 64 return value; 65} 66 67static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg) 68{ 69 writeb(value, priv->base + reg); 70 debug_trace("wrote %2.2x to %4.4x\n", value, reg); 71} 72 73static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg) 74{ 75 writew(value, priv->base + reg); 76 debug_trace("wrote %4.4x to %4.4x\n", value, reg); 77} 78 79static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg) 80{ 81 writel(value, priv->base + reg); 82 debug_trace("wrote %8.8x to %4.4x\n", value, reg); 83} 84 85static void write_reg(struct ich_spi_priv *priv, const void *value, 86 int dest_reg, uint32_t size) 87{ 88 memcpy_toio(priv->base + dest_reg, value, size); 89} 90 91static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value, 92 uint32_t size) 93{ 94 memcpy_fromio(value, priv->base + src_reg, size); 95} 96 97static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr) 98{ 99 const uint32_t bbar_mask = 0x00ffff00; 100 uint32_t ichspi_bbar; 101 102 if (ctlr->bbar) { 103 minaddr &= bbar_mask; 104 ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask; 105 ichspi_bbar |= minaddr; 106 ich_writel(ctlr, ichspi_bbar, ctlr->bbar); 107 } 108} 109 110/* @return 1 if the SPI flash supports the 33MHz speed */ 111static bool ich9_can_do_33mhz(struct udevice *dev) 112{ 113 struct ich_spi_priv *priv = dev_get_priv(dev); 114 u32 fdod, speed; 115 116 if (!CONFIG_IS_ENABLED(PCI) || !priv->pch) 117 return false; 118 /* Observe SPI Descriptor Component Section 0 */ 119 dm_pci_write_config32(priv->pch, 0xb0, 0x1000); 120 121 /* Extract the Write/Erase SPI Frequency from descriptor */ 122 dm_pci_read_config32(priv->pch, 0xb4, &fdod); 123 124 /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */ 125 speed = (fdod >> 21) & 7; 126 127 return speed == 1; 128} 129 130static void spi_lock_down(struct ich_spi_plat *plat, void *sbase) 131{ 132 if (plat->ich_version == ICHV_7) { 133 struct ich7_spi_regs *ich7_spi = sbase; 134 135 setbits_le16(&ich7_spi->spis, SPIS_LOCK); 136 } else if (plat->ich_version == ICHV_9) { 137 struct ich9_spi_regs *ich9_spi = sbase; 138 139 setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN); 140 } 141} 142 143static bool spi_lock_status(struct ich_spi_plat *plat, void *sbase) 144{ 145 int lock = 0; 146 147 if (plat->ich_version == ICHV_7) { 148 struct ich7_spi_regs *ich7_spi = sbase; 149 150 lock = readw(&ich7_spi->spis) & SPIS_LOCK; 151 } else if (plat->ich_version == ICHV_9) { 152 struct ich9_spi_regs *ich9_spi = sbase; 153 154 lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN; 155 } 156 157 return lock != 0; 158} 159 160static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans, 161 bool lock) 162{ 163 uint16_t optypes; 164 uint8_t opmenu[ctlr->menubytes]; 165 166 if (!lock) { 167 /* The lock is off, so just use index 0. */ 168 ich_writeb(ctlr, trans->opcode, ctlr->opmenu); 169 optypes = ich_readw(ctlr, ctlr->optype); 170 optypes = (optypes & 0xfffc) | (trans->type & 0x3); 171 ich_writew(ctlr, optypes, ctlr->optype); 172 return 0; 173 } else { 174 /* The lock is on. See if what we need is on the menu. */ 175 uint8_t optype; 176 uint16_t opcode_index; 177 178 /* Write Enable is handled as atomic prefix */ 179 if (trans->opcode == SPI_OPCODE_WREN) 180 return 0; 181 182 read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu)); 183 for (opcode_index = 0; opcode_index < ctlr->menubytes; 184 opcode_index++) { 185 if (opmenu[opcode_index] == trans->opcode) 186 break; 187 } 188 189 if (opcode_index == ctlr->menubytes) { 190 debug("ICH SPI: Opcode %x not found\n", trans->opcode); 191 return -EINVAL; 192 } 193 194 optypes = ich_readw(ctlr, ctlr->optype); 195 optype = (optypes >> (opcode_index * 2)) & 0x3; 196 197 if (optype != trans->type) { 198 debug("ICH SPI: Transaction doesn't fit type %d\n", 199 optype); 200 return -ENOSPC; 201 } 202 return opcode_index; 203 } 204} 205 206/* 207 * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set 208 * below is true) or 0. In case the wait was for the bit(s) to set - write 209 * those bits back, which would cause resetting them. 210 * 211 * Return the last read status value on success or -1 on failure. 212 */ 213static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask, 214 int wait_til_set) 215{ 216 int timeout = 600000; /* This will result in 6s */ 217 u16 status = 0; 218 219 while (timeout--) { 220 status = ich_readw(ctlr, ctlr->status); 221 if (wait_til_set ^ ((status & bitmask) == 0)) { 222 if (wait_til_set) { 223 ich_writew(ctlr, status & bitmask, 224 ctlr->status); 225 } 226 return status; 227 } 228 udelay(10); 229 } 230 debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n", 231 status, bitmask, wait_til_set, status & bitmask); 232 233 return -ETIMEDOUT; 234} 235 236static void ich_spi_config_opcode(struct udevice *dev) 237{ 238 struct ich_spi_priv *ctlr = dev_get_priv(dev); 239 240 /* 241 * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down 242 * to prevent accidental or intentional writes. Before they get 243 * locked down, these registers should be initialized properly. 244 */ 245 ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop); 246 ich_writew(ctlr, SPI_OPTYPE, ctlr->optype); 247 ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu); 248 ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32)); 249} 250 251static int ich_spi_exec_op_swseq(struct spi_slave *slave, 252 const struct spi_mem_op *op) 253{ 254 struct udevice *bus = dev_get_parent(slave->dev); 255 struct ich_spi_plat *plat = dev_get_plat(bus); 256 struct ich_spi_priv *ctlr = dev_get_priv(bus); 257 uint16_t control; 258 int16_t opcode_index; 259 int with_address; 260 int status; 261 struct spi_trans *trans = &ctlr->trans; 262 bool lock = spi_lock_status(plat, ctlr->base); 263 int ret = 0; 264 265 trans->in = NULL; 266 trans->out = NULL; 267 trans->type = 0xFF; 268 269 if (op->data.nbytes) { 270 if (op->data.dir == SPI_MEM_DATA_IN) { 271 trans->in = op->data.buf.in; 272 trans->bytesin = op->data.nbytes; 273 } else { 274 trans->out = op->data.buf.out; 275 trans->bytesout = op->data.nbytes; 276 } 277 } 278 279 if (trans->opcode != op->cmd.opcode) 280 trans->opcode = op->cmd.opcode; 281 282 if (lock && trans->opcode == SPI_OPCODE_WRDIS) 283 return 0; 284 285 if (trans->opcode == SPI_OPCODE_WREN) { 286 /* 287 * Treat Write Enable as Atomic Pre-Op if possible 288 * in order to prevent the Management Engine from 289 * issuing a transaction between WREN and DATA. 290 */ 291 if (!lock) 292 ich_writew(ctlr, trans->opcode, ctlr->preop); 293 return 0; 294 } 295 296 ret = ich_status_poll(ctlr, SPIS_SCIP, 0); 297 if (ret < 0) 298 return ret; 299 300 if (plat->ich_version == ICHV_7) 301 ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status); 302 else 303 ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status); 304 305 /* Try to guess spi transaction type */ 306 if (op->data.dir == SPI_MEM_DATA_OUT) { 307 if (op->addr.nbytes) 308 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS; 309 else 310 trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS; 311 } else { 312 if (op->addr.nbytes) 313 trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS; 314 else 315 trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS; 316 } 317 /* Special erase case handling */ 318 if (op->addr.nbytes && !op->data.buswidth) 319 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS; 320 321 opcode_index = spi_setup_opcode(ctlr, trans, lock); 322 if (opcode_index < 0) 323 return -EINVAL; 324 325 if (op->addr.nbytes) { 326 trans->offset = op->addr.val; 327 with_address = 1; 328 } 329 330 if (ctlr->speed && ctlr->max_speed >= 33000000) { 331 int byte; 332 333 byte = ich_readb(ctlr, ctlr->speed); 334 if (ctlr->cur_speed >= 33000000) 335 byte |= SSFC_SCF_33MHZ; 336 else 337 byte &= ~SSFC_SCF_33MHZ; 338 ich_writeb(ctlr, byte, ctlr->speed); 339 } 340 341 /* Preset control fields */ 342 control = SPIC_SCGO | ((opcode_index & 0x07) << 4); 343 344 /* Issue atomic preop cycle if needed */ 345 if (ich_readw(ctlr, ctlr->preop)) 346 control |= SPIC_ACS; 347 348 if (!trans->bytesout && !trans->bytesin) { 349 /* SPI addresses are 24 bit only */ 350 if (with_address) { 351 ich_writel(ctlr, trans->offset & 0x00FFFFFF, 352 ctlr->addr); 353 } 354 /* 355 * This is a 'no data' command (like Write Enable), its 356 * bitesout size was 1, decremented to zero while executing 357 * spi_setup_opcode() above. Tell the chip to send the 358 * command. 359 */ 360 ich_writew(ctlr, control, ctlr->control); 361 362 /* wait for the result */ 363 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1); 364 if (status < 0) 365 return status; 366 367 if (status & SPIS_FCERR) { 368 debug("ICH SPI: Command transaction error\n"); 369 return -EIO; 370 } 371 372 return 0; 373 } 374 375 while (trans->bytesout || trans->bytesin) { 376 uint32_t data_length; 377 378 /* SPI addresses are 24 bit only */ 379 ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr); 380 381 if (trans->bytesout) 382 data_length = min(trans->bytesout, ctlr->databytes); 383 else 384 data_length = min(trans->bytesin, ctlr->databytes); 385 386 /* Program data into FDATA0 to N */ 387 if (trans->bytesout) { 388 write_reg(ctlr, trans->out, ctlr->data, data_length); 389 trans->bytesout -= data_length; 390 } 391 392 /* Add proper control fields' values */ 393 control &= ~((ctlr->databytes - 1) << 8); 394 control |= SPIC_DS; 395 control |= (data_length - 1) << 8; 396 397 /* write it */ 398 ich_writew(ctlr, control, ctlr->control); 399 400 /* Wait for Cycle Done Status or Flash Cycle Error */ 401 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1); 402 if (status < 0) 403 return status; 404 405 if (status & SPIS_FCERR) { 406 debug("ICH SPI: Data transaction error %x\n", status); 407 return -EIO; 408 } 409 410 if (trans->bytesin) { 411 read_reg(ctlr, ctlr->data, trans->in, data_length); 412 trans->bytesin -= data_length; 413 } 414 } 415 416 /* Clear atomic preop now that xfer is done */ 417 if (!lock) 418 ich_writew(ctlr, 0, ctlr->preop); 419 420 return 0; 421} 422 423/* 424 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and 425 * that the operation does not cross page boundary. 426 */ 427static uint get_xfer_len(u32 offset, int len, int page_size) 428{ 429 uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE); 430 uint bytes_left = ALIGN(offset, page_size) - offset; 431 432 if (bytes_left) 433 xfer_len = min(xfer_len, bytes_left); 434 435 return xfer_len; 436} 437 438/* Fill FDATAn FIFO in preparation for a write transaction */ 439static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data, 440 uint len) 441{ 442 memcpy(regs->fdata, data, len); 443} 444 445/* Drain FDATAn FIFO after a read transaction populates data */ 446static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len) 447{ 448 memcpy(dest, regs->fdata, len); 449} 450 451/* Fire up a transfer using the hardware sequencer */ 452static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle, 453 uint offset, uint len) 454{ 455 /* Make sure all W1C status bits get cleared */ 456 u32 hsfsts; 457 458 hsfsts = readl(®s->hsfsts_ctl); 459 hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK); 460 hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE; 461 462 /* Set up transaction parameters */ 463 hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT; 464 hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK; 465 hsfsts |= HSFSTS_FGO; 466 467 writel(offset, ®s->faddr); 468 writel(hsfsts, ®s->hsfsts_ctl); 469} 470 471static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset) 472{ 473 ulong start; 474 u32 hsfsts; 475 476 start = get_timer(0); 477 do { 478 hsfsts = readl(®s->hsfsts_ctl); 479 if (hsfsts & HSFSTS_FCERR) { 480 debug("SPI transaction error at offset %x HSFSTS = %08x\n", 481 offset, hsfsts); 482 return -EIO; 483 } 484 if (hsfsts & HSFSTS_AEL) 485 return -EPERM; 486 487 if (hsfsts & HSFSTS_FDONE) 488 return 0; 489 } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS); 490 491 debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n", 492 offset, hsfsts, (uint)get_timer(start)); 493 494 return -ETIMEDOUT; 495} 496 497/** 498 * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing 499 * 500 * This waits until complete or timeout 501 * 502 * @regs: SPI registers 503 * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t) 504 * @offset: Offset to access 505 * @len: Number of bytes to transfer (can be 0) 506 * Return: 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error 507 * (AEL), -ETIMEDOUT on timeout 508 */ 509static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle, 510 uint offset, uint len) 511{ 512 start_hwseq_xfer(regs, hsfsts_cycle, offset, len); 513 514 return wait_for_hwseq_xfer(regs, offset); 515} 516 517static int ich_spi_exec_op_hwseq(struct spi_slave *slave, 518 const struct spi_mem_op *op) 519{ 520 struct spi_flash *flash = dev_get_uclass_priv(slave->dev); 521 struct udevice *bus = dev_get_parent(slave->dev); 522 struct ich_spi_priv *priv = dev_get_priv(bus); 523 struct fast_spi_regs *regs = priv->base; 524 uint page_size; 525 uint offset; 526 int cycle; 527 uint len; 528 bool out; 529 int ret; 530 u8 *buf; 531 532 offset = op->addr.val; 533 len = op->data.nbytes; 534 535 switch (op->cmd.opcode) { 536 case SPINOR_OP_RDID: 537 cycle = HSFSTS_CYCLE_RDID; 538 break; 539 case SPINOR_OP_READ_FAST: 540 cycle = HSFSTS_CYCLE_READ; 541 break; 542 case SPINOR_OP_PP: 543 cycle = HSFSTS_CYCLE_WRITE; 544 break; 545 case SPINOR_OP_WREN: 546 /* Nothing needs to be done */ 547 return 0; 548 case SPINOR_OP_WRSR: 549 cycle = HSFSTS_CYCLE_WR_STATUS; 550 break; 551 case SPINOR_OP_RDSR: 552 cycle = HSFSTS_CYCLE_RD_STATUS; 553 break; 554 case SPINOR_OP_WRDI: 555 return 0; /* ignore */ 556 case SPINOR_OP_BE_4K: 557 cycle = HSFSTS_CYCLE_4K_ERASE; 558 ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0); 559 return ret; 560 default: 561 debug("Unknown cycle %x\n", op->cmd.opcode); 562 return -EINVAL; 563 }; 564 565 out = op->data.dir == SPI_MEM_DATA_OUT; 566 buf = out ? (u8 *)op->data.buf.out : op->data.buf.in; 567 page_size = flash->page_size ? : 256; 568 569 while (len) { 570 uint xfer_len = get_xfer_len(offset, len, page_size); 571 572 if (out) 573 fill_xfer_fifo(regs, buf, xfer_len); 574 575 ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len); 576 if (ret) 577 return ret; 578 579 if (!out) 580 drain_xfer_fifo(regs, buf, xfer_len); 581 582 offset += xfer_len; 583 buf += xfer_len; 584 len -= xfer_len; 585 } 586 587 return 0; 588} 589 590static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op) 591{ 592 struct udevice *bus = dev_get_parent(slave->dev); 593 struct ich_spi_plat *plat = dev_get_plat(bus); 594 int ret; 595 596 bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi"); 597 if (plat->hwseq) 598 ret = ich_spi_exec_op_hwseq(slave, op); 599 else 600 ret = ich_spi_exec_op_swseq(slave, op); 601 bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI); 602 603 return ret; 604} 605 606#if CONFIG_IS_ENABLED(OF_REAL) 607/** 608 * ich_spi_get_basics() - Get basic information about the ICH device 609 * 610 * This works without probing any devices if requested. 611 * 612 * @bus: SPI controller to use 613 * @can_probe: true if this function is allowed to probe the PCH 614 * @pchp: Returns a pointer to the pch, or NULL if not found 615 * @ich_versionp: Returns ICH version detected on success 616 * @mmio_basep: Returns the address of the SPI registers on success 617 * Return: 0 if OK, -EPROTOTYPE if the PCH could not be found, -EAGAIN if 618 * the function cannot success without probing, possible another error if 619 * pch_get_spi_base() fails 620 */ 621static int ich_spi_get_basics(struct udevice *bus, bool can_probe, 622 struct udevice **pchp, 623 enum ich_version *ich_versionp, ulong *mmio_basep) 624{ 625 struct udevice *pch = NULL; 626 int ret = 0; 627 628 /* Find a PCH if there is one */ 629 if (can_probe) { 630 pch = dev_get_parent(bus); 631 if (device_get_uclass_id(pch) != UCLASS_PCH) { 632 uclass_first_device(UCLASS_PCH, &pch); 633 if (!pch) 634 ; /* ignore this error since we don't need it */ 635 } 636 } 637 638 *ich_versionp = dev_get_driver_data(bus); 639 if (*ich_versionp == ICHV_APL) 640 *mmio_basep = dm_pci_read_bar32(bus, 0); 641 else if (pch) 642 ret = pch_get_spi_base(pch, mmio_basep); 643 else 644 return -EAGAIN; 645 *pchp = pch; 646 647 return ret; 648} 649#endif 650 651/** 652 * ich_get_mmap_bus() - Handle the get_mmap() method for a bus 653 * 654 * There are several cases to consider: 655 * 1. Using of-platdata, in which case we have the BDF and can access the 656 * registers by reading the BAR 657 * 2. Not using of-platdata, but still with a SPI controller that is on its own 658 * PCI PDF. In this case we read the BDF from the parent plat and again get 659 * the registers by reading the BAR 660 * 3. Using a SPI controller that is a child of the PCH, in which case we try 661 * to find the registers by asking the PCH. This only works if the PCH has 662 * been probed (which it will be if the bus is probed since parents are 663 * probed before children), since the PCH may not have a PCI address until 664 * its parent (the PCI bus itself) has been probed. If you are using this 665 * method then you should make sure the SPI bus is probed. 666 * 667 * The first two cases are useful in early init. The last one is more useful 668 * afterwards. 669 */ 670static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep, 671 uint *map_sizep, uint *offsetp) 672{ 673 pci_dev_t spi_bdf; 674#if CONFIG_IS_ENABLED(OF_REAL) 675 if (device_is_on_pci_bus(bus)) { 676 struct pci_child_plat *pplat; 677 678 pplat = dev_get_parent_plat(bus); 679 spi_bdf = pplat->devfn; 680 } else { 681 enum ich_version ich_version; 682 struct fast_spi_regs *regs; 683 struct udevice *pch; 684 ulong mmio_base; 685 int ret; 686 687 ret = ich_spi_get_basics(bus, device_active(bus), &pch, 688 &ich_version, &mmio_base); 689 if (ret) 690 return log_msg_ret("basics", ret); 691 regs = (struct fast_spi_regs *)mmio_base; 692 693 return fast_spi_get_bios_mmap_regs(regs, map_basep, map_sizep, 694 offsetp); 695 } 696#else 697 struct ich_spi_plat *plat = dev_get_plat(bus); 698 699 /* 700 * We cannot rely on plat->bdf being set up yet since this method can 701 * be called before the device is probed. Use the of-platdata directly 702 * instead. 703 */ 704 spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]); 705#endif 706 707 return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp); 708} 709 710static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep, 711 uint *offsetp) 712{ 713 struct udevice *bus = dev_get_parent(dev); 714 715 return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp); 716} 717 718static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op) 719{ 720 unsigned int page_offset; 721 int addr = op->addr.val; 722 unsigned int byte_count = op->data.nbytes; 723 724 if (hweight32(ICH_BOUNDARY) == 1) { 725 page_offset = addr & (ICH_BOUNDARY - 1); 726 } else { 727 u64 aux = addr; 728 729 page_offset = do_div(aux, ICH_BOUNDARY); 730 } 731 732 if (op->data.dir == SPI_MEM_DATA_IN) { 733 if (slave->max_read_size) { 734 op->data.nbytes = min(ICH_BOUNDARY - page_offset, 735 slave->max_read_size); 736 } 737 } else if (slave->max_write_size) { 738 op->data.nbytes = min(ICH_BOUNDARY - page_offset, 739 slave->max_write_size); 740 } 741 742 op->data.nbytes = min(op->data.nbytes, byte_count); 743 744 return 0; 745} 746 747static int ich_protect_lockdown(struct udevice *dev) 748{ 749 struct ich_spi_plat *plat = dev_get_plat(dev); 750 struct ich_spi_priv *priv = dev_get_priv(dev); 751 int ret = -ENOSYS; 752 753 /* Disable the BIOS write protect so write commands are allowed */ 754 if (priv->pch) 755 ret = pch_set_spi_protect(priv->pch, false); 756 if (ret == -ENOSYS) { 757 u8 bios_cntl; 758 759 bios_cntl = ich_readb(priv, priv->bcr); 760 bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */ 761 bios_cntl |= 1; /* Write Protect Disable (WPD) */ 762 ich_writeb(priv, bios_cntl, priv->bcr); 763 } else if (ret) { 764 debug("%s: Failed to disable write-protect: err=%d\n", 765 __func__, ret); 766 return ret; 767 } 768 769 /* Lock down SPI controller settings if required */ 770 if (plat->lockdown) { 771 ich_spi_config_opcode(dev); 772 spi_lock_down(plat, priv->base); 773 } 774 775 return 0; 776} 777 778static int ich_init_controller(struct udevice *dev, 779 struct ich_spi_plat *plat, 780 struct ich_spi_priv *ctlr) 781{ 782 if (spl_phase() == PHASE_TPL) { 783 struct ich_spi_plat *plat = dev_get_plat(dev); 784 int ret; 785 786 ret = fast_spi_early_init(plat->bdf, plat->mmio_base); 787 if (ret) 788 return ret; 789 } 790 791 ctlr->base = (void *)plat->mmio_base; 792 if (plat->ich_version == ICHV_7) { 793 struct ich7_spi_regs *ich7_spi = ctlr->base; 794 795 ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu); 796 ctlr->menubytes = sizeof(ich7_spi->opmenu); 797 ctlr->optype = offsetof(struct ich7_spi_regs, optype); 798 ctlr->addr = offsetof(struct ich7_spi_regs, spia); 799 ctlr->data = offsetof(struct ich7_spi_regs, spid); 800 ctlr->databytes = sizeof(ich7_spi->spid); 801 ctlr->status = offsetof(struct ich7_spi_regs, spis); 802 ctlr->control = offsetof(struct ich7_spi_regs, spic); 803 ctlr->bbar = offsetof(struct ich7_spi_regs, bbar); 804 ctlr->preop = offsetof(struct ich7_spi_regs, preop); 805 } else if (plat->ich_version == ICHV_9) { 806 struct ich9_spi_regs *ich9_spi = ctlr->base; 807 808 ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu); 809 ctlr->menubytes = sizeof(ich9_spi->opmenu); 810 ctlr->optype = offsetof(struct ich9_spi_regs, optype); 811 ctlr->addr = offsetof(struct ich9_spi_regs, faddr); 812 ctlr->data = offsetof(struct ich9_spi_regs, fdata); 813 ctlr->databytes = sizeof(ich9_spi->fdata); 814 ctlr->status = offsetof(struct ich9_spi_regs, ssfs); 815 ctlr->control = offsetof(struct ich9_spi_regs, ssfc); 816 ctlr->speed = ctlr->control + 2; 817 ctlr->bbar = offsetof(struct ich9_spi_regs, bbar); 818 ctlr->preop = offsetof(struct ich9_spi_regs, preop); 819 ctlr->bcr = offsetof(struct ich9_spi_regs, bcr); 820 ctlr->pr = &ich9_spi->pr[0]; 821 } else if (plat->ich_version == ICHV_APL) { 822 } else { 823 debug("ICH SPI: Unrecognised ICH version %d\n", 824 plat->ich_version); 825 return -EINVAL; 826 } 827 828 /* Work out the maximum speed we can support */ 829 ctlr->max_speed = 20000000; 830 if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev)) 831 ctlr->max_speed = 33000000; 832 debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n", 833 plat->ich_version, plat->mmio_base, ctlr->max_speed); 834 835 ich_set_bbar(ctlr, 0); 836 837 return 0; 838} 839 840static int ich_cache_bios_region(struct udevice *dev) 841{ 842 ulong map_base; 843 uint map_size; 844 uint offset; 845 ulong base; 846 int ret; 847 848 ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset); 849 if (ret) 850 return ret; 851 852 /* Don't use WRBACK since we are not supposed to write to SPI flash */ 853 base = SZ_4G - map_size; 854 mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size); 855 log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size); 856 857 return 0; 858} 859 860static int ich_spi_probe(struct udevice *dev) 861{ 862 struct ich_spi_plat *plat = dev_get_plat(dev); 863 struct ich_spi_priv *priv = dev_get_priv(dev); 864 int ret; 865 866 ret = ich_init_controller(dev, plat, priv); 867 if (ret) 868 return ret; 869 870 if (spl_phase() == PHASE_TPL) { 871 /* Cache the BIOS to speed things up */ 872 ret = ich_cache_bios_region(dev); 873 if (ret) 874 return ret; 875 } else { 876 ret = ich_protect_lockdown(dev); 877 if (ret) 878 return ret; 879 } 880 priv->cur_speed = priv->max_speed; 881 882 return 0; 883} 884 885static int ich_spi_remove(struct udevice *bus) 886{ 887 /* 888 * Configure SPI controller so that the Linux MTD driver can fully 889 * access the SPI NOR chip 890 */ 891 ich_spi_config_opcode(bus); 892 893 return 0; 894} 895 896static int ich_spi_set_speed(struct udevice *bus, uint speed) 897{ 898 struct ich_spi_priv *priv = dev_get_priv(bus); 899 900 priv->cur_speed = speed; 901 902 return 0; 903} 904 905static int ich_spi_set_mode(struct udevice *bus, uint mode) 906{ 907 debug("%s: mode=%d\n", __func__, mode); 908 909 return 0; 910} 911 912static int ich_spi_child_pre_probe(struct udevice *dev) 913{ 914 struct udevice *bus = dev_get_parent(dev); 915 struct ich_spi_plat *plat = dev_get_plat(bus); 916 struct ich_spi_priv *priv = dev_get_priv(bus); 917 struct spi_slave *slave = dev_get_parent_priv(dev); 918 919 /* 920 * Yes this controller can only transfer a small number of bytes at 921 * once! The limit is typically 64 bytes. For hardware sequencing a 922 * a loop is used to get around this. 923 */ 924 if (!plat->hwseq) { 925 slave->max_read_size = priv->databytes; 926 slave->max_write_size = priv->databytes; 927 } 928 /* 929 * ICH 7 SPI controller only supports array read command 930 * and byte program command for SST flash 931 */ 932 if (plat->ich_version == ICHV_7) 933 slave->mode = SPI_RX_SLOW | SPI_TX_BYTE; 934 935 return 0; 936} 937 938static int ich_spi_of_to_plat(struct udevice *dev) 939{ 940 struct ich_spi_plat *plat = dev_get_plat(dev); 941 942#if CONFIG_IS_ENABLED(OF_REAL) 943 struct ich_spi_priv *priv = dev_get_priv(dev); 944 int ret; 945 946 ret = ich_spi_get_basics(dev, true, &priv->pch, &plat->ich_version, 947 &plat->mmio_base); 948 if (ret) 949 return log_msg_ret("basics", ret); 950 plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down"); 951 /* 952 * Use an int so that the property is present in of-platdata even 953 * when false. 954 */ 955 plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0); 956#else 957 plat->ich_version = ICHV_APL; 958 plat->mmio_base = plat->dtplat.early_regs[0]; 959 plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]); 960 plat->hwseq = plat->dtplat.intel_hardware_seq; 961#endif 962 debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base); 963 964 return 0; 965} 966 967static const struct spi_controller_mem_ops ich_controller_mem_ops = { 968 .adjust_op_size = ich_spi_adjust_size, 969 .supports_op = NULL, 970 .exec_op = ich_spi_exec_op, 971}; 972 973static const struct dm_spi_ops ich_spi_ops = { 974 /* xfer is not supported */ 975 .set_speed = ich_spi_set_speed, 976 .set_mode = ich_spi_set_mode, 977 .mem_ops = &ich_controller_mem_ops, 978 .get_mmap = ich_get_mmap, 979 /* 980 * cs_info is not needed, since we require all chip selects to be 981 * in the device tree explicitly 982 */ 983}; 984 985static const struct udevice_id ich_spi_ids[] = { 986 { .compatible = "intel,ich7-spi", ICHV_7 }, 987 { .compatible = "intel,ich9-spi", ICHV_9 }, 988 { .compatible = "intel,fast-spi", ICHV_APL }, 989 { } 990}; 991 992U_BOOT_DRIVER(intel_fast_spi) = { 993 .name = "intel_fast_spi", 994 .id = UCLASS_SPI, 995 .of_match = ich_spi_ids, 996 .ops = &ich_spi_ops, 997 .of_to_plat = ich_spi_of_to_plat, 998 .plat_auto = sizeof(struct ich_spi_plat), 999 .priv_auto = sizeof(struct ich_spi_priv), 1000 .child_pre_probe = ich_spi_child_pre_probe, 1001 .probe = ich_spi_probe, 1002 .remove = ich_spi_remove, 1003 .flags = DM_FLAG_OS_PREPARE, 1004}; 1005