1/* 2 * Support for IDE interfaces on Celleb platform 3 * 4 * (C) Copyright 2006 TOSHIBA CORPORATION 5 * 6 * This code is based on drivers/ide/pci/siimage.c: 7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 2003 Red Hat 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, write to the Free Software Foundation, Inc., 22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 23 */ 24 25#include <linux/types.h> 26#include <linux/module.h> 27#include <linux/pci.h> 28#include <linux/delay.h> 29#include <linux/ide.h> 30#include <linux/init.h> 31 32#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 33 34#define SCC_PATA_NAME "scc IDE" 35 36#define TDVHSEL_MASTER 0x00000001 37#define TDVHSEL_SLAVE 0x00000004 38 39#define MODE_JCUSFEN 0x00000080 40 41#define CCKCTRL_ATARESET 0x00040000 42#define CCKCTRL_BUFCNT 0x00020000 43#define CCKCTRL_CRST 0x00010000 44#define CCKCTRL_OCLKEN 0x00000100 45#define CCKCTRL_ATACLKOEN 0x00000002 46#define CCKCTRL_LCLKEN 0x00000001 47 48#define QCHCD_IOS_SS 0x00000001 49 50#define QCHSD_STPDIAG 0x00020000 51 52#define INTMASK_MSK 0xD1000012 53#define INTSTS_SERROR 0x80000000 54#define INTSTS_PRERR 0x40000000 55#define INTSTS_RERR 0x10000000 56#define INTSTS_ICERR 0x01000000 57#define INTSTS_BMSINT 0x00000010 58#define INTSTS_BMHE 0x00000008 59#define INTSTS_IOIRQS 0x00000004 60#define INTSTS_INTRQ 0x00000002 61#define INTSTS_ACTEINT 0x00000001 62 63#define ECMODE_VALUE 0x01 64 65static struct scc_ports { 66 unsigned long ctl, dma; 67 struct ide_host *host; /* for removing port from system */ 68} scc_ports[MAX_HWIFS]; 69 70/* PIO transfer mode table */ 71/* JCHST */ 72static unsigned long JCHSTtbl[2][7] = { 73 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ 74 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ 75}; 76 77/* JCHHT */ 78static unsigned long JCHHTtbl[2][7] = { 79 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ 80 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ 81}; 82 83/* JCHCT */ 84static unsigned long JCHCTtbl[2][7] = { 85 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ 86 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ 87}; 88 89 90/* DMA transfer mode table */ 91/* JCHDCTM/JCHDCTS */ 92static unsigned long JCHDCTxtbl[2][7] = { 93 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ 94 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ 95}; 96 97/* JCSTWTM/JCSTWTS */ 98static unsigned long JCSTWTxtbl[2][7] = { 99 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ 100 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ 101}; 102 103/* JCTSS */ 104static unsigned long JCTSStbl[2][7] = { 105 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ 106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ 107}; 108 109/* JCENVT */ 110static unsigned long JCENVTtbl[2][7] = { 111 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ 112 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ 113}; 114 115/* JCACTSELS/JCACTSELM */ 116static unsigned long JCACTSELtbl[2][7] = { 117 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ 118 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ 119}; 120 121 122static u8 scc_ide_inb(unsigned long port) 123{ 124 u32 data = in_be32((void*)port); 125 return (u8)data; 126} 127 128static void scc_exec_command(ide_hwif_t *hwif, u8 cmd) 129{ 130 out_be32((void *)hwif->io_ports.command_addr, cmd); 131 eieio(); 132 in_be32((void *)(hwif->dma_base + 0x01c)); 133 eieio(); 134} 135 136static u8 scc_read_status(ide_hwif_t *hwif) 137{ 138 return (u8)in_be32((void *)hwif->io_ports.status_addr); 139} 140 141static u8 scc_read_altstatus(ide_hwif_t *hwif) 142{ 143 return (u8)in_be32((void *)hwif->io_ports.ctl_addr); 144} 145 146static u8 scc_dma_sff_read_status(ide_hwif_t *hwif) 147{ 148 return (u8)in_be32((void *)(hwif->dma_base + 4)); 149} 150 151static void scc_write_devctl(ide_hwif_t *hwif, u8 ctl) 152{ 153 out_be32((void *)hwif->io_ports.ctl_addr, ctl); 154 eieio(); 155 in_be32((void *)(hwif->dma_base + 0x01c)); 156 eieio(); 157} 158 159static void scc_ide_insw(unsigned long port, void *addr, u32 count) 160{ 161 u16 *ptr = (u16 *)addr; 162 while (count--) { 163 *ptr++ = le16_to_cpu(in_be32((void*)port)); 164 } 165} 166 167static void scc_ide_insl(unsigned long port, void *addr, u32 count) 168{ 169 u16 *ptr = (u16 *)addr; 170 while (count--) { 171 *ptr++ = le16_to_cpu(in_be32((void*)port)); 172 *ptr++ = le16_to_cpu(in_be32((void*)port)); 173 } 174} 175 176static void scc_ide_outb(u8 addr, unsigned long port) 177{ 178 out_be32((void*)port, addr); 179} 180 181static void 182scc_ide_outsw(unsigned long port, void *addr, u32 count) 183{ 184 u16 *ptr = (u16 *)addr; 185 while (count--) { 186 out_be32((void*)port, cpu_to_le16(*ptr++)); 187 } 188} 189 190static void 191scc_ide_outsl(unsigned long port, void *addr, u32 count) 192{ 193 u16 *ptr = (u16 *)addr; 194 while (count--) { 195 out_be32((void*)port, cpu_to_le16(*ptr++)); 196 out_be32((void*)port, cpu_to_le16(*ptr++)); 197 } 198} 199 200/** 201 * scc_set_pio_mode - set host controller for PIO mode 202 * @hwif: port 203 * @drive: drive 204 * 205 * Load the timing settings for this device mode into the 206 * controller. 207 */ 208 209static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) 210{ 211 struct scc_ports *ports = ide_get_hwifdata(hwif); 212 unsigned long ctl_base = ports->ctl; 213 unsigned long cckctrl_port = ctl_base + 0xff0; 214 unsigned long piosht_port = ctl_base + 0x000; 215 unsigned long pioct_port = ctl_base + 0x004; 216 unsigned long reg; 217 int offset; 218 const u8 pio = drive->pio_mode - XFER_PIO_0; 219 220 reg = in_be32((void __iomem *)cckctrl_port); 221 if (reg & CCKCTRL_ATACLKOEN) { 222 offset = 1; /* 133MHz */ 223 } else { 224 offset = 0; /* 100MHz */ 225 } 226 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; 227 out_be32((void __iomem *)piosht_port, reg); 228 reg = JCHCTtbl[offset][pio]; 229 out_be32((void __iomem *)pioct_port, reg); 230} 231 232/** 233 * scc_set_dma_mode - set host controller for DMA mode 234 * @hwif: port 235 * @drive: drive 236 * 237 * Load the timing settings for this device mode into the 238 * controller. 239 */ 240 241static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) 242{ 243 struct scc_ports *ports = ide_get_hwifdata(hwif); 244 unsigned long ctl_base = ports->ctl; 245 unsigned long cckctrl_port = ctl_base + 0xff0; 246 unsigned long mdmact_port = ctl_base + 0x008; 247 unsigned long mcrcst_port = ctl_base + 0x00c; 248 unsigned long sdmact_port = ctl_base + 0x010; 249 unsigned long scrcst_port = ctl_base + 0x014; 250 unsigned long udenvt_port = ctl_base + 0x018; 251 unsigned long tdvhsel_port = ctl_base + 0x020; 252 int is_slave = drive->dn & 1; 253 int offset, idx; 254 unsigned long reg; 255 unsigned long jcactsel; 256 const u8 speed = drive->dma_mode; 257 258 reg = in_be32((void __iomem *)cckctrl_port); 259 if (reg & CCKCTRL_ATACLKOEN) { 260 offset = 1; /* 133MHz */ 261 } else { 262 offset = 0; /* 100MHz */ 263 } 264 265 idx = speed - XFER_UDMA_0; 266 267 jcactsel = JCACTSELtbl[offset][idx]; 268 if (is_slave) { 269 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]); 270 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]); 271 jcactsel = jcactsel << 2; 272 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel); 273 } else { 274 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]); 275 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]); 276 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel); 277 } 278 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]; 279 out_be32((void __iomem *)udenvt_port, reg); 280} 281 282static void scc_dma_host_set(ide_drive_t *drive, int on) 283{ 284 ide_hwif_t *hwif = drive->hwif; 285 u8 unit = drive->dn & 1; 286 u8 dma_stat = scc_dma_sff_read_status(hwif); 287 288 if (on) 289 dma_stat |= (1 << (5 + unit)); 290 else 291 dma_stat &= ~(1 << (5 + unit)); 292 293 scc_ide_outb(dma_stat, hwif->dma_base + 4); 294} 295 296/** 297 * scc_dma_setup - begin a DMA phase 298 * @drive: target device 299 * @cmd: command 300 * 301 * Build an IDE DMA PRD (IDE speak for scatter gather table) 302 * and then set up the DMA transfer registers. 303 * 304 * Returns 0 on success. If a PIO fallback is required then 1 305 * is returned. 306 */ 307 308static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) 309{ 310 ide_hwif_t *hwif = drive->hwif; 311 u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; 312 u8 dma_stat; 313 314 /* fall back to pio! */ 315 if (ide_build_dmatable(drive, cmd) == 0) 316 return 1; 317 318 /* PRD table */ 319 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); 320 321 /* specify r/w */ 322 out_be32((void __iomem *)hwif->dma_base, rw); 323 324 /* read DMA status for INTR & ERROR flags */ 325 dma_stat = scc_dma_sff_read_status(hwif); 326 327 /* clear INTR & ERROR flags */ 328 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6); 329 330 return 0; 331} 332 333static void scc_dma_start(ide_drive_t *drive) 334{ 335 ide_hwif_t *hwif = drive->hwif; 336 u8 dma_cmd = scc_ide_inb(hwif->dma_base); 337 338 /* start DMA */ 339 scc_ide_outb(dma_cmd | 1, hwif->dma_base); 340} 341 342static int __scc_dma_end(ide_drive_t *drive) 343{ 344 ide_hwif_t *hwif = drive->hwif; 345 u8 dma_stat, dma_cmd; 346 347 /* get DMA command mode */ 348 dma_cmd = scc_ide_inb(hwif->dma_base); 349 /* stop DMA */ 350 scc_ide_outb(dma_cmd & ~1, hwif->dma_base); 351 /* get DMA status */ 352 dma_stat = scc_dma_sff_read_status(hwif); 353 /* clear the INTR & ERROR bits */ 354 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); 355 /* verify good DMA status */ 356 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; 357} 358 359/** 360 * scc_dma_end - Stop DMA 361 * @drive: IDE drive 362 * 363 * Check and clear INT Status register. 364 * Then call __scc_dma_end(). 365 */ 366 367static int scc_dma_end(ide_drive_t *drive) 368{ 369 ide_hwif_t *hwif = drive->hwif; 370 void __iomem *dma_base = (void __iomem *)hwif->dma_base; 371 unsigned long intsts_port = hwif->dma_base + 0x014; 372 u32 reg; 373 int dma_stat, data_loss = 0; 374 static int retry = 0; 375 376 /* We don't check non ide_disk because it is limited to UDMA4 */ 377 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr) 378 & ATA_ERR) && 379 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { 380 reg = in_be32((void __iomem *)intsts_port); 381 if (!(reg & INTSTS_ACTEINT)) { 382 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n", 383 drive->name); 384 data_loss = 1; 385 if (retry++) { 386 struct request *rq = hwif->rq; 387 ide_drive_t *drive; 388 int i; 389 390 /* ERROR_RESET and drive->crc_count are needed 391 * to reduce DMA transfer mode in retry process. 392 */ 393 if (rq) 394 rq->errors |= ERROR_RESET; 395 396 ide_port_for_each_dev(i, drive, hwif) 397 drive->crc_count++; 398 } 399 } 400 } 401 402 while (1) { 403 reg = in_be32((void __iomem *)intsts_port); 404 405 if (reg & INTSTS_SERROR) { 406 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); 407 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); 408 409 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); 410 continue; 411 } 412 413 if (reg & INTSTS_PRERR) { 414 u32 maea0, maec0; 415 unsigned long ctl_base = hwif->config_data; 416 417 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50)); 418 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54)); 419 420 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0); 421 422 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); 423 424 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); 425 continue; 426 } 427 428 if (reg & INTSTS_RERR) { 429 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); 430 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); 431 432 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); 433 continue; 434 } 435 436 if (reg & INTSTS_ICERR) { 437 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); 438 439 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); 440 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); 441 continue; 442 } 443 444 if (reg & INTSTS_BMSINT) { 445 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME); 446 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT); 447 448 ide_do_reset(drive); 449 continue; 450 } 451 452 if (reg & INTSTS_BMHE) { 453 out_be32((void __iomem *)intsts_port, INTSTS_BMHE); 454 continue; 455 } 456 457 if (reg & INTSTS_ACTEINT) { 458 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT); 459 continue; 460 } 461 462 if (reg & INTSTS_IOIRQS) { 463 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS); 464 continue; 465 } 466 break; 467 } 468 469 dma_stat = __scc_dma_end(drive); 470 if (data_loss) 471 dma_stat |= 2; /* emulate DMA error (to retry command) */ 472 return dma_stat; 473} 474 475/* returns 1 if dma irq issued, 0 otherwise */ 476static int scc_dma_test_irq(ide_drive_t *drive) 477{ 478 ide_hwif_t *hwif = drive->hwif; 479 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); 480 481 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr) 482 & ATA_ERR) && 483 (int_stat & INTSTS_INTRQ)) 484 return 1; 485 486 if (int_stat & INTSTS_IOIRQS) 487 return 1; 488 489 return 0; 490} 491 492static u8 scc_udma_filter(ide_drive_t *drive) 493{ 494 ide_hwif_t *hwif = drive->hwif; 495 u8 mask = hwif->ultra_mask; 496 497 if ((drive->media != ide_disk) && (mask & 0xE0)) { 498 printk(KERN_INFO "%s: limit %s to UDMA4\n", 499 SCC_PATA_NAME, drive->name); 500 mask = ATA_UDMA4; 501 } 502 503 return mask; 504} 505 506/** 507 * setup_mmio_scc - map CTRL/BMID region 508 * @dev: PCI device we are configuring 509 * @name: device name 510 * 511 */ 512 513static int setup_mmio_scc (struct pci_dev *dev, const char *name) 514{ 515 void __iomem *ctl_addr; 516 void __iomem *dma_addr; 517 int i, ret; 518 519 for (i = 0; i < MAX_HWIFS; i++) { 520 if (scc_ports[i].ctl == 0) 521 break; 522 } 523 if (i >= MAX_HWIFS) 524 return -ENOMEM; 525 526 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name); 527 if (ret < 0) { 528 printk(KERN_ERR "%s: can't reserve resources\n", name); 529 return ret; 530 } 531 532 ctl_addr = pci_ioremap_bar(dev, 0); 533 if (!ctl_addr) 534 goto fail_0; 535 536 dma_addr = pci_ioremap_bar(dev, 1); 537 if (!dma_addr) 538 goto fail_1; 539 540 pci_set_master(dev); 541 scc_ports[i].ctl = (unsigned long)ctl_addr; 542 scc_ports[i].dma = (unsigned long)dma_addr; 543 pci_set_drvdata(dev, (void *) &scc_ports[i]); 544 545 return 1; 546 547 fail_1: 548 iounmap(ctl_addr); 549 fail_0: 550 return -ENOMEM; 551} 552 553static int scc_ide_setup_pci_device(struct pci_dev *dev, 554 const struct ide_port_info *d) 555{ 556 struct scc_ports *ports = pci_get_drvdata(dev); 557 struct ide_host *host; 558 struct ide_hw hw, *hws[] = { &hw }; 559 int i, rc; 560 561 memset(&hw, 0, sizeof(hw)); 562 for (i = 0; i <= 8; i++) 563 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; 564 hw.irq = dev->irq; 565 hw.dev = &dev->dev; 566 567 rc = ide_host_add(d, hws, 1, &host); 568 if (rc) 569 return rc; 570 571 ports->host = host; 572 573 return 0; 574} 575 576/** 577 * init_setup_scc - set up an SCC PATA Controller 578 * @dev: PCI device 579 * @d: IDE port info 580 * 581 * Perform the initial set up for this device. 582 */ 583 584static int __devinit init_setup_scc(struct pci_dev *dev, 585 const struct ide_port_info *d) 586{ 587 unsigned long ctl_base; 588 unsigned long dma_base; 589 unsigned long cckctrl_port; 590 unsigned long intmask_port; 591 unsigned long mode_port; 592 unsigned long ecmode_port; 593 u32 reg = 0; 594 struct scc_ports *ports; 595 int rc; 596 597 rc = pci_enable_device(dev); 598 if (rc) 599 goto end; 600 601 rc = setup_mmio_scc(dev, d->name); 602 if (rc < 0) 603 goto end; 604 605 ports = pci_get_drvdata(dev); 606 ctl_base = ports->ctl; 607 dma_base = ports->dma; 608 cckctrl_port = ctl_base + 0xff0; 609 intmask_port = dma_base + 0x010; 610 mode_port = ctl_base + 0x024; 611 ecmode_port = ctl_base + 0xf00; 612 613 /* controller initialization */ 614 reg = 0; 615 out_be32((void*)cckctrl_port, reg); 616 reg |= CCKCTRL_ATACLKOEN; 617 out_be32((void*)cckctrl_port, reg); 618 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; 619 out_be32((void*)cckctrl_port, reg); 620 reg |= CCKCTRL_CRST; 621 out_be32((void*)cckctrl_port, reg); 622 623 for (;;) { 624 reg = in_be32((void*)cckctrl_port); 625 if (reg & CCKCTRL_CRST) 626 break; 627 udelay(5000); 628 } 629 630 reg |= CCKCTRL_ATARESET; 631 out_be32((void*)cckctrl_port, reg); 632 633 out_be32((void*)ecmode_port, ECMODE_VALUE); 634 out_be32((void*)mode_port, MODE_JCUSFEN); 635 out_be32((void*)intmask_port, INTMASK_MSK); 636 637 rc = scc_ide_setup_pci_device(dev, d); 638 639 end: 640 return rc; 641} 642 643static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) 644{ 645 struct ide_io_ports *io_ports = &drive->hwif->io_ports; 646 647 if (valid & IDE_VALID_FEATURE) 648 scc_ide_outb(tf->feature, io_ports->feature_addr); 649 if (valid & IDE_VALID_NSECT) 650 scc_ide_outb(tf->nsect, io_ports->nsect_addr); 651 if (valid & IDE_VALID_LBAL) 652 scc_ide_outb(tf->lbal, io_ports->lbal_addr); 653 if (valid & IDE_VALID_LBAM) 654 scc_ide_outb(tf->lbam, io_ports->lbam_addr); 655 if (valid & IDE_VALID_LBAH) 656 scc_ide_outb(tf->lbah, io_ports->lbah_addr); 657 if (valid & IDE_VALID_DEVICE) 658 scc_ide_outb(tf->device, io_ports->device_addr); 659} 660 661static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) 662{ 663 struct ide_io_ports *io_ports = &drive->hwif->io_ports; 664 665 if (valid & IDE_VALID_ERROR) 666 tf->error = scc_ide_inb(io_ports->feature_addr); 667 if (valid & IDE_VALID_NSECT) 668 tf->nsect = scc_ide_inb(io_ports->nsect_addr); 669 if (valid & IDE_VALID_LBAL) 670 tf->lbal = scc_ide_inb(io_ports->lbal_addr); 671 if (valid & IDE_VALID_LBAM) 672 tf->lbam = scc_ide_inb(io_ports->lbam_addr); 673 if (valid & IDE_VALID_LBAH) 674 tf->lbah = scc_ide_inb(io_ports->lbah_addr); 675 if (valid & IDE_VALID_DEVICE) 676 tf->device = scc_ide_inb(io_ports->device_addr); 677} 678 679static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, 680 void *buf, unsigned int len) 681{ 682 unsigned long data_addr = drive->hwif->io_ports.data_addr; 683 684 len++; 685 686 if (drive->io_32bit) { 687 scc_ide_insl(data_addr, buf, len / 4); 688 689 if ((len & 3) >= 2) 690 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1); 691 } else 692 scc_ide_insw(data_addr, buf, len / 2); 693} 694 695static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd, 696 void *buf, unsigned int len) 697{ 698 unsigned long data_addr = drive->hwif->io_ports.data_addr; 699 700 len++; 701 702 if (drive->io_32bit) { 703 scc_ide_outsl(data_addr, buf, len / 4); 704 705 if ((len & 3) >= 2) 706 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1); 707 } else 708 scc_ide_outsw(data_addr, buf, len / 2); 709} 710 711/** 712 * init_mmio_iops_scc - set up the iops for MMIO 713 * @hwif: interface to set up 714 * 715 */ 716 717static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif) 718{ 719 struct pci_dev *dev = to_pci_dev(hwif->dev); 720 struct scc_ports *ports = pci_get_drvdata(dev); 721 unsigned long dma_base = ports->dma; 722 723 ide_set_hwifdata(hwif, ports); 724 725 hwif->dma_base = dma_base; 726 hwif->config_data = ports->ctl; 727} 728 729/** 730 * init_iops_scc - set up iops 731 * @hwif: interface to set up 732 * 733 * Do the basic setup for the SCC hardware interface 734 * and then do the MMIO setup. 735 */ 736 737static void __devinit init_iops_scc(ide_hwif_t *hwif) 738{ 739 struct pci_dev *dev = to_pci_dev(hwif->dev); 740 741 hwif->hwif_data = NULL; 742 if (pci_get_drvdata(dev) == NULL) 743 return; 744 init_mmio_iops_scc(hwif); 745} 746 747static int __devinit scc_init_dma(ide_hwif_t *hwif, 748 const struct ide_port_info *d) 749{ 750 return ide_allocate_dma_engine(hwif); 751} 752 753static u8 scc_cable_detect(ide_hwif_t *hwif) 754{ 755 return ATA_CBL_PATA80; 756} 757 758/** 759 * init_hwif_scc - set up hwif 760 * @hwif: interface to set up 761 * 762 * We do the basic set up of the interface structure. The SCC 763 * requires several custom handlers so we override the default 764 * ide DMA handlers appropriately. 765 */ 766 767static void __devinit init_hwif_scc(ide_hwif_t *hwif) 768{ 769 /* PTERADD */ 770 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 771 772 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN) 773 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */ 774 else 775 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ 776} 777 778static const struct ide_tp_ops scc_tp_ops = { 779 .exec_command = scc_exec_command, 780 .read_status = scc_read_status, 781 .read_altstatus = scc_read_altstatus, 782 .write_devctl = scc_write_devctl, 783 784 .dev_select = ide_dev_select, 785 .tf_load = scc_tf_load, 786 .tf_read = scc_tf_read, 787 788 .input_data = scc_input_data, 789 .output_data = scc_output_data, 790}; 791 792static const struct ide_port_ops scc_port_ops = { 793 .set_pio_mode = scc_set_pio_mode, 794 .set_dma_mode = scc_set_dma_mode, 795 .udma_filter = scc_udma_filter, 796 .cable_detect = scc_cable_detect, 797}; 798 799static const struct ide_dma_ops scc_dma_ops = { 800 .dma_host_set = scc_dma_host_set, 801 .dma_setup = scc_dma_setup, 802 .dma_start = scc_dma_start, 803 .dma_end = scc_dma_end, 804 .dma_test_irq = scc_dma_test_irq, 805 .dma_lost_irq = ide_dma_lost_irq, 806 .dma_timer_expiry = ide_dma_sff_timer_expiry, 807 .dma_sff_read_status = scc_dma_sff_read_status, 808}; 809 810static const struct ide_port_info scc_chipset __devinitdata = { 811 .name = "sccIDE", 812 .init_iops = init_iops_scc, 813 .init_dma = scc_init_dma, 814 .init_hwif = init_hwif_scc, 815 .tp_ops = &scc_tp_ops, 816 .port_ops = &scc_port_ops, 817 .dma_ops = &scc_dma_ops, 818 .host_flags = IDE_HFLAG_SINGLE, 819 .irq_flags = IRQF_SHARED, 820 .pio_mask = ATA_PIO4, 821 .chipset = ide_pci, 822}; 823 824/** 825 * scc_init_one - pci layer discovery entry 826 * @dev: PCI device 827 * @id: ident table entry 828 * 829 * Called by the PCI code when it finds an SCC PATA controller. 830 * We then use the IDE PCI generic helper to do most of the work. 831 */ 832 833static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id) 834{ 835 return init_setup_scc(dev, &scc_chipset); 836} 837 838/** 839 * scc_remove - pci layer remove entry 840 * @dev: PCI device 841 * 842 * Called by the PCI code when it removes an SCC PATA controller. 843 */ 844 845static void __devexit scc_remove(struct pci_dev *dev) 846{ 847 struct scc_ports *ports = pci_get_drvdata(dev); 848 struct ide_host *host = ports->host; 849 850 ide_host_remove(host); 851 852 iounmap((void*)ports->dma); 853 iounmap((void*)ports->ctl); 854 pci_release_selected_regions(dev, (1 << 2) - 1); 855 memset(ports, 0, sizeof(*ports)); 856} 857 858static const struct pci_device_id scc_pci_tbl[] = { 859 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 }, 860 { 0, }, 861}; 862MODULE_DEVICE_TABLE(pci, scc_pci_tbl); 863 864static struct pci_driver scc_pci_driver = { 865 .name = "SCC IDE", 866 .id_table = scc_pci_tbl, 867 .probe = scc_init_one, 868 .remove = __devexit_p(scc_remove), 869}; 870 871static int __init scc_ide_init(void) 872{ 873 return ide_pci_register_driver(&scc_pci_driver); 874} 875 876static void __exit scc_ide_exit(void) 877{ 878 pci_unregister_driver(&scc_pci_driver); 879} 880 881module_init(scc_ide_init); 882module_exit(scc_ide_exit); 883 884MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE"); 885MODULE_LICENSE("GPL"); 886