1/* 2 * libata-sff.c - helper library for PCI IDE BMDMA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2006 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35#include <linux/kernel.h> 36#include <linux/gfp.h> 37#include <linux/pci.h> 38#include <linux/libata.h> 39#include <linux/highmem.h> 40 41#include "libata.h" 42 43static struct workqueue_struct *ata_sff_wq; 44 45const struct ata_port_operations ata_sff_port_ops = { 46 .inherits = &ata_base_port_ops, 47 48 .qc_prep = ata_noop_qc_prep, 49 .qc_issue = ata_sff_qc_issue, 50 .qc_fill_rtf = ata_sff_qc_fill_rtf, 51 52 .freeze = ata_sff_freeze, 53 .thaw = ata_sff_thaw, 54 .prereset = ata_sff_prereset, 55 .softreset = ata_sff_softreset, 56 .hardreset = sata_sff_hardreset, 57 .postreset = ata_sff_postreset, 58 .error_handler = ata_sff_error_handler, 59 60 .sff_dev_select = ata_sff_dev_select, 61 .sff_check_status = ata_sff_check_status, 62 .sff_tf_load = ata_sff_tf_load, 63 .sff_tf_read = ata_sff_tf_read, 64 .sff_exec_command = ata_sff_exec_command, 65 .sff_data_xfer = ata_sff_data_xfer, 66 .sff_drain_fifo = ata_sff_drain_fifo, 67 68 .lost_interrupt = ata_sff_lost_interrupt, 69}; 70EXPORT_SYMBOL_GPL(ata_sff_port_ops); 71 72/** 73 * ata_sff_check_status - Read device status reg & clear interrupt 74 * @ap: port where the device is 75 * 76 * Reads ATA taskfile status register for currently-selected device 77 * and return its value. This also clears pending interrupts 78 * from this device 79 * 80 * LOCKING: 81 * Inherited from caller. 82 */ 83u8 ata_sff_check_status(struct ata_port *ap) 84{ 85 return ioread8(ap->ioaddr.status_addr); 86} 87EXPORT_SYMBOL_GPL(ata_sff_check_status); 88 89/** 90 * ata_sff_altstatus - Read device alternate status reg 91 * @ap: port where the device is 92 * 93 * Reads ATA taskfile alternate status register for 94 * currently-selected device and return its value. 95 * 96 * Note: may NOT be used as the check_altstatus() entry in 97 * ata_port_operations. 98 * 99 * LOCKING: 100 * Inherited from caller. 101 */ 102static u8 ata_sff_altstatus(struct ata_port *ap) 103{ 104 if (ap->ops->sff_check_altstatus) 105 return ap->ops->sff_check_altstatus(ap); 106 107 return ioread8(ap->ioaddr.altstatus_addr); 108} 109 110/** 111 * ata_sff_irq_status - Check if the device is busy 112 * @ap: port where the device is 113 * 114 * Determine if the port is currently busy. Uses altstatus 115 * if available in order to avoid clearing shared IRQ status 116 * when finding an IRQ source. Non ctl capable devices don't 117 * share interrupt lines fortunately for us. 118 * 119 * LOCKING: 120 * Inherited from caller. 121 */ 122static u8 ata_sff_irq_status(struct ata_port *ap) 123{ 124 u8 status; 125 126 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 127 status = ata_sff_altstatus(ap); 128 /* Not us: We are busy */ 129 if (status & ATA_BUSY) 130 return status; 131 } 132 /* Clear INTRQ latch */ 133 status = ap->ops->sff_check_status(ap); 134 return status; 135} 136 137/** 138 * ata_sff_sync - Flush writes 139 * @ap: Port to wait for. 140 * 141 * CAUTION: 142 * If we have an mmio device with no ctl and no altstatus 143 * method this will fail. No such devices are known to exist. 144 * 145 * LOCKING: 146 * Inherited from caller. 147 */ 148 149static void ata_sff_sync(struct ata_port *ap) 150{ 151 if (ap->ops->sff_check_altstatus) 152 ap->ops->sff_check_altstatus(ap); 153 else if (ap->ioaddr.altstatus_addr) 154 ioread8(ap->ioaddr.altstatus_addr); 155} 156 157/** 158 * ata_sff_pause - Flush writes and wait 400nS 159 * @ap: Port to pause for. 160 * 161 * CAUTION: 162 * If we have an mmio device with no ctl and no altstatus 163 * method this will fail. No such devices are known to exist. 164 * 165 * LOCKING: 166 * Inherited from caller. 167 */ 168 169void ata_sff_pause(struct ata_port *ap) 170{ 171 ata_sff_sync(ap); 172 ndelay(400); 173} 174EXPORT_SYMBOL_GPL(ata_sff_pause); 175 176/** 177 * ata_sff_dma_pause - Pause before commencing DMA 178 * @ap: Port to pause for. 179 * 180 * Perform I/O fencing and ensure sufficient cycle delays occur 181 * for the HDMA1:0 transition 182 */ 183 184void ata_sff_dma_pause(struct ata_port *ap) 185{ 186 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 187 /* An altstatus read will cause the needed delay without 188 messing up the IRQ status */ 189 ata_sff_altstatus(ap); 190 return; 191 } 192 /* There are no DMA controllers without ctl. BUG here to ensure 193 we never violate the HDMA1:0 transition timing and risk 194 corruption. */ 195 BUG(); 196} 197EXPORT_SYMBOL_GPL(ata_sff_dma_pause); 198 199/** 200 * ata_sff_busy_sleep - sleep until BSY clears, or timeout 201 * @ap: port containing status register to be polled 202 * @tmout_pat: impatience timeout in msecs 203 * @tmout: overall timeout in msecs 204 * 205 * Sleep until ATA Status register bit BSY clears, 206 * or a timeout occurs. 207 * 208 * LOCKING: 209 * Kernel thread context (may sleep). 210 * 211 * RETURNS: 212 * 0 on success, -errno otherwise. 213 */ 214int ata_sff_busy_sleep(struct ata_port *ap, 215 unsigned long tmout_pat, unsigned long tmout) 216{ 217 unsigned long timer_start, timeout; 218 u8 status; 219 220 status = ata_sff_busy_wait(ap, ATA_BUSY, 300); 221 timer_start = jiffies; 222 timeout = ata_deadline(timer_start, tmout_pat); 223 while (status != 0xff && (status & ATA_BUSY) && 224 time_before(jiffies, timeout)) { 225 msleep(50); 226 status = ata_sff_busy_wait(ap, ATA_BUSY, 3); 227 } 228 229 if (status != 0xff && (status & ATA_BUSY)) 230 ata_port_printk(ap, KERN_WARNING, 231 "port is slow to respond, please be patient " 232 "(Status 0x%x)\n", status); 233 234 timeout = ata_deadline(timer_start, tmout); 235 while (status != 0xff && (status & ATA_BUSY) && 236 time_before(jiffies, timeout)) { 237 msleep(50); 238 status = ap->ops->sff_check_status(ap); 239 } 240 241 if (status == 0xff) 242 return -ENODEV; 243 244 if (status & ATA_BUSY) { 245 ata_port_printk(ap, KERN_ERR, "port failed to respond " 246 "(%lu secs, Status 0x%x)\n", 247 DIV_ROUND_UP(tmout, 1000), status); 248 return -EBUSY; 249 } 250 251 return 0; 252} 253EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); 254 255static int ata_sff_check_ready(struct ata_link *link) 256{ 257 u8 status = link->ap->ops->sff_check_status(link->ap); 258 259 return ata_check_ready(status); 260} 261 262/** 263 * ata_sff_wait_ready - sleep until BSY clears, or timeout 264 * @link: SFF link to wait ready status for 265 * @deadline: deadline jiffies for the operation 266 * 267 * Sleep until ATA Status register bit BSY clears, or timeout 268 * occurs. 269 * 270 * LOCKING: 271 * Kernel thread context (may sleep). 272 * 273 * RETURNS: 274 * 0 on success, -errno otherwise. 275 */ 276int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) 277{ 278 return ata_wait_ready(link, deadline, ata_sff_check_ready); 279} 280EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 281 282/** 283 * ata_sff_set_devctl - Write device control reg 284 * @ap: port where the device is 285 * @ctl: value to write 286 * 287 * Writes ATA taskfile device control register. 288 * 289 * Note: may NOT be used as the sff_set_devctl() entry in 290 * ata_port_operations. 291 * 292 * LOCKING: 293 * Inherited from caller. 294 */ 295static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) 296{ 297 if (ap->ops->sff_set_devctl) 298 ap->ops->sff_set_devctl(ap, ctl); 299 else 300 iowrite8(ctl, ap->ioaddr.ctl_addr); 301} 302 303/** 304 * ata_sff_dev_select - Select device 0/1 on ATA bus 305 * @ap: ATA channel to manipulate 306 * @device: ATA device (numbered from zero) to select 307 * 308 * Use the method defined in the ATA specification to 309 * make either device 0, or device 1, active on the 310 * ATA channel. Works with both PIO and MMIO. 311 * 312 * May be used as the dev_select() entry in ata_port_operations. 313 * 314 * LOCKING: 315 * caller. 316 */ 317void ata_sff_dev_select(struct ata_port *ap, unsigned int device) 318{ 319 u8 tmp; 320 321 if (device == 0) 322 tmp = ATA_DEVICE_OBS; 323 else 324 tmp = ATA_DEVICE_OBS | ATA_DEV1; 325 326 iowrite8(tmp, ap->ioaddr.device_addr); 327 ata_sff_pause(ap); /* needed; also flushes, for mmio */ 328} 329EXPORT_SYMBOL_GPL(ata_sff_dev_select); 330 331/** 332 * ata_dev_select - Select device 0/1 on ATA bus 333 * @ap: ATA channel to manipulate 334 * @device: ATA device (numbered from zero) to select 335 * @wait: non-zero to wait for Status register BSY bit to clear 336 * @can_sleep: non-zero if context allows sleeping 337 * 338 * Use the method defined in the ATA specification to 339 * make either device 0, or device 1, active on the 340 * ATA channel. 341 * 342 * This is a high-level version of ata_sff_dev_select(), which 343 * additionally provides the services of inserting the proper 344 * pauses and status polling, where needed. 345 * 346 * LOCKING: 347 * caller. 348 */ 349static void ata_dev_select(struct ata_port *ap, unsigned int device, 350 unsigned int wait, unsigned int can_sleep) 351{ 352 if (ata_msg_probe(ap)) 353 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 354 "device %u, wait %u\n", device, wait); 355 356 if (wait) 357 ata_wait_idle(ap); 358 359 ap->ops->sff_dev_select(ap, device); 360 361 if (wait) { 362 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 363 msleep(150); 364 ata_wait_idle(ap); 365 } 366} 367 368/** 369 * ata_sff_irq_on - Enable interrupts on a port. 370 * @ap: Port on which interrupts are enabled. 371 * 372 * Enable interrupts on a legacy IDE device using MMIO or PIO, 373 * wait for idle, clear any pending interrupts. 374 * 375 * Note: may NOT be used as the sff_irq_on() entry in 376 * ata_port_operations. 377 * 378 * LOCKING: 379 * Inherited from caller. 380 */ 381void ata_sff_irq_on(struct ata_port *ap) 382{ 383 struct ata_ioports *ioaddr = &ap->ioaddr; 384 385 if (ap->ops->sff_irq_on) { 386 ap->ops->sff_irq_on(ap); 387 return; 388 } 389 390 ap->ctl &= ~ATA_NIEN; 391 ap->last_ctl = ap->ctl; 392 393 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) 394 ata_sff_set_devctl(ap, ap->ctl); 395 ata_wait_idle(ap); 396 397 if (ap->ops->sff_irq_clear) 398 ap->ops->sff_irq_clear(ap); 399} 400EXPORT_SYMBOL_GPL(ata_sff_irq_on); 401 402/** 403 * ata_sff_tf_load - send taskfile registers to host controller 404 * @ap: Port to which output is sent 405 * @tf: ATA taskfile register set 406 * 407 * Outputs ATA taskfile to standard ATA host controller. 408 * 409 * LOCKING: 410 * Inherited from caller. 411 */ 412void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 413{ 414 struct ata_ioports *ioaddr = &ap->ioaddr; 415 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 416 417 if (tf->ctl != ap->last_ctl) { 418 if (ioaddr->ctl_addr) 419 iowrite8(tf->ctl, ioaddr->ctl_addr); 420 ap->last_ctl = tf->ctl; 421 ata_wait_idle(ap); 422 } 423 424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 425 WARN_ON_ONCE(!ioaddr->ctl_addr); 426 iowrite8(tf->hob_feature, ioaddr->feature_addr); 427 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 428 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 429 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 430 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 431 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 432 tf->hob_feature, 433 tf->hob_nsect, 434 tf->hob_lbal, 435 tf->hob_lbam, 436 tf->hob_lbah); 437 } 438 439 if (is_addr) { 440 iowrite8(tf->feature, ioaddr->feature_addr); 441 iowrite8(tf->nsect, ioaddr->nsect_addr); 442 iowrite8(tf->lbal, ioaddr->lbal_addr); 443 iowrite8(tf->lbam, ioaddr->lbam_addr); 444 iowrite8(tf->lbah, ioaddr->lbah_addr); 445 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 446 tf->feature, 447 tf->nsect, 448 tf->lbal, 449 tf->lbam, 450 tf->lbah); 451 } 452 453 if (tf->flags & ATA_TFLAG_DEVICE) { 454 iowrite8(tf->device, ioaddr->device_addr); 455 VPRINTK("device 0x%X\n", tf->device); 456 } 457 458 ata_wait_idle(ap); 459} 460EXPORT_SYMBOL_GPL(ata_sff_tf_load); 461 462/** 463 * ata_sff_tf_read - input device's ATA taskfile shadow registers 464 * @ap: Port from which input is read 465 * @tf: ATA taskfile register set for storing input 466 * 467 * Reads ATA taskfile registers for currently-selected device 468 * into @tf. Assumes the device has a fully SFF compliant task file 469 * layout and behaviour. If you device does not (eg has a different 470 * status method) then you will need to provide a replacement tf_read 471 * 472 * LOCKING: 473 * Inherited from caller. 474 */ 475void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 476{ 477 struct ata_ioports *ioaddr = &ap->ioaddr; 478 479 tf->command = ata_sff_check_status(ap); 480 tf->feature = ioread8(ioaddr->error_addr); 481 tf->nsect = ioread8(ioaddr->nsect_addr); 482 tf->lbal = ioread8(ioaddr->lbal_addr); 483 tf->lbam = ioread8(ioaddr->lbam_addr); 484 tf->lbah = ioread8(ioaddr->lbah_addr); 485 tf->device = ioread8(ioaddr->device_addr); 486 487 if (tf->flags & ATA_TFLAG_LBA48) { 488 if (likely(ioaddr->ctl_addr)) { 489 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 490 tf->hob_feature = ioread8(ioaddr->error_addr); 491 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 492 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 493 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 494 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 495 iowrite8(tf->ctl, ioaddr->ctl_addr); 496 ap->last_ctl = tf->ctl; 497 } else 498 WARN_ON_ONCE(1); 499 } 500} 501EXPORT_SYMBOL_GPL(ata_sff_tf_read); 502 503/** 504 * ata_sff_exec_command - issue ATA command to host controller 505 * @ap: port to which command is being issued 506 * @tf: ATA taskfile register set 507 * 508 * Issues ATA command, with proper synchronization with interrupt 509 * handler / other threads. 510 * 511 * LOCKING: 512 * spin_lock_irqsave(host lock) 513 */ 514void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 515{ 516 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 517 518 iowrite8(tf->command, ap->ioaddr.command_addr); 519 ata_sff_pause(ap); 520} 521EXPORT_SYMBOL_GPL(ata_sff_exec_command); 522 523/** 524 * ata_tf_to_host - issue ATA taskfile to host controller 525 * @ap: port to which command is being issued 526 * @tf: ATA taskfile register set 527 * 528 * Issues ATA taskfile register set to ATA host controller, 529 * with proper synchronization with interrupt handler and 530 * other threads. 531 * 532 * LOCKING: 533 * spin_lock_irqsave(host lock) 534 */ 535static inline void ata_tf_to_host(struct ata_port *ap, 536 const struct ata_taskfile *tf) 537{ 538 ap->ops->sff_tf_load(ap, tf); 539 ap->ops->sff_exec_command(ap, tf); 540} 541 542/** 543 * ata_sff_data_xfer - Transfer data by PIO 544 * @dev: device to target 545 * @buf: data buffer 546 * @buflen: buffer length 547 * @rw: read/write 548 * 549 * Transfer data from/to the device data register by PIO. 550 * 551 * LOCKING: 552 * Inherited from caller. 553 * 554 * RETURNS: 555 * Bytes consumed. 556 */ 557unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, 558 unsigned int buflen, int rw) 559{ 560 struct ata_port *ap = dev->link->ap; 561 void __iomem *data_addr = ap->ioaddr.data_addr; 562 unsigned int words = buflen >> 1; 563 564 /* Transfer multiple of 2 bytes */ 565 if (rw == READ) 566 ioread16_rep(data_addr, buf, words); 567 else 568 iowrite16_rep(data_addr, buf, words); 569 570 /* Transfer trailing byte, if any. */ 571 if (unlikely(buflen & 0x01)) { 572 unsigned char pad[2]; 573 574 /* Point buf to the tail of buffer */ 575 buf += buflen - 1; 576 577 /* 578 * Use io*16_rep() accessors here as well to avoid pointlessly 579 * swapping bytes to and from on the big endian machines... 580 */ 581 if (rw == READ) { 582 ioread16_rep(data_addr, pad, 1); 583 *buf = pad[0]; 584 } else { 585 pad[0] = *buf; 586 iowrite16_rep(data_addr, pad, 1); 587 } 588 words++; 589 } 590 591 return words << 1; 592} 593EXPORT_SYMBOL_GPL(ata_sff_data_xfer); 594 595/** 596 * ata_sff_data_xfer32 - Transfer data by PIO 597 * @dev: device to target 598 * @buf: data buffer 599 * @buflen: buffer length 600 * @rw: read/write 601 * 602 * Transfer data from/to the device data register by PIO using 32bit 603 * I/O operations. 604 * 605 * LOCKING: 606 * Inherited from caller. 607 * 608 * RETURNS: 609 * Bytes consumed. 610 */ 611 612unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, 613 unsigned int buflen, int rw) 614{ 615 struct ata_port *ap = dev->link->ap; 616 void __iomem *data_addr = ap->ioaddr.data_addr; 617 unsigned int words = buflen >> 2; 618 int slop = buflen & 3; 619 620 if (!(ap->pflags & ATA_PFLAG_PIO32)) 621 return ata_sff_data_xfer(dev, buf, buflen, rw); 622 623 /* Transfer multiple of 4 bytes */ 624 if (rw == READ) 625 ioread32_rep(data_addr, buf, words); 626 else 627 iowrite32_rep(data_addr, buf, words); 628 629 /* Transfer trailing bytes, if any */ 630 if (unlikely(slop)) { 631 unsigned char pad[4]; 632 633 /* Point buf to the tail of buffer */ 634 buf += buflen - slop; 635 636 /* 637 * Use io*_rep() accessors here as well to avoid pointlessly 638 * swapping bytes to and from on the big endian machines... 639 */ 640 if (rw == READ) { 641 if (slop < 3) 642 ioread16_rep(data_addr, pad, 1); 643 else 644 ioread32_rep(data_addr, pad, 1); 645 memcpy(buf, pad, slop); 646 } else { 647 memcpy(pad, buf, slop); 648 if (slop < 3) 649 iowrite16_rep(data_addr, pad, 1); 650 else 651 iowrite32_rep(data_addr, pad, 1); 652 } 653 } 654 return (buflen + 1) & ~1; 655} 656EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); 657 658/** 659 * ata_sff_data_xfer_noirq - Transfer data by PIO 660 * @dev: device to target 661 * @buf: data buffer 662 * @buflen: buffer length 663 * @rw: read/write 664 * 665 * Transfer data from/to the device data register by PIO. Do the 666 * transfer with interrupts disabled. 667 * 668 * LOCKING: 669 * Inherited from caller. 670 * 671 * RETURNS: 672 * Bytes consumed. 673 */ 674unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, 675 unsigned int buflen, int rw) 676{ 677 unsigned long flags; 678 unsigned int consumed; 679 680 local_irq_save(flags); 681 consumed = ata_sff_data_xfer(dev, buf, buflen, rw); 682 local_irq_restore(flags); 683 684 return consumed; 685} 686EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); 687 688/** 689 * ata_pio_sector - Transfer a sector of data. 690 * @qc: Command on going 691 * 692 * Transfer qc->sect_size bytes of data from/to the ATA device. 693 * 694 * LOCKING: 695 * Inherited from caller. 696 */ 697static void ata_pio_sector(struct ata_queued_cmd *qc) 698{ 699 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 700 struct ata_port *ap = qc->ap; 701 struct page *page; 702 unsigned int offset; 703 unsigned char *buf; 704 705 if (qc->curbytes == qc->nbytes - qc->sect_size) 706 ap->hsm_task_state = HSM_ST_LAST; 707 708 page = sg_page(qc->cursg); 709 offset = qc->cursg->offset + qc->cursg_ofs; 710 711 /* get the current page and offset */ 712 page = nth_page(page, (offset >> PAGE_SHIFT)); 713 offset %= PAGE_SIZE; 714 715 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 716 717 if (PageHighMem(page)) { 718 unsigned long flags; 719 720 local_irq_save(flags); 721 buf = kmap_atomic(page, KM_IRQ0); 722 723 /* do the actual data transfer */ 724 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 725 do_write); 726 727 kunmap_atomic(buf, KM_IRQ0); 728 local_irq_restore(flags); 729 } else { 730 buf = page_address(page); 731 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 732 do_write); 733 } 734 735 if (!do_write && !PageSlab(page)) 736 flush_dcache_page(page); 737 738 qc->curbytes += qc->sect_size; 739 qc->cursg_ofs += qc->sect_size; 740 741 if (qc->cursg_ofs == qc->cursg->length) { 742 qc->cursg = sg_next(qc->cursg); 743 qc->cursg_ofs = 0; 744 } 745} 746 747/** 748 * ata_pio_sectors - Transfer one or many sectors. 749 * @qc: Command on going 750 * 751 * Transfer one or many sectors of data from/to the 752 * ATA device for the DRQ request. 753 * 754 * LOCKING: 755 * Inherited from caller. 756 */ 757static void ata_pio_sectors(struct ata_queued_cmd *qc) 758{ 759 if (is_multi_taskfile(&qc->tf)) { 760 /* READ/WRITE MULTIPLE */ 761 unsigned int nsect; 762 763 WARN_ON_ONCE(qc->dev->multi_count == 0); 764 765 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 766 qc->dev->multi_count); 767 while (nsect--) 768 ata_pio_sector(qc); 769 } else 770 ata_pio_sector(qc); 771 772 ata_sff_sync(qc->ap); /* flush */ 773} 774 775/** 776 * atapi_send_cdb - Write CDB bytes to hardware 777 * @ap: Port to which ATAPI device is attached. 778 * @qc: Taskfile currently active 779 * 780 * When device has indicated its readiness to accept 781 * a CDB, this function is called. Send the CDB. 782 * 783 * LOCKING: 784 * caller. 785 */ 786static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 787{ 788 /* send SCSI cdb */ 789 DPRINTK("send cdb\n"); 790 WARN_ON_ONCE(qc->dev->cdb_len < 12); 791 792 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 793 ata_sff_sync(ap); 794 switch (qc->tf.protocol) { 795 case ATAPI_PROT_PIO: 796 ap->hsm_task_state = HSM_ST; 797 break; 798 case ATAPI_PROT_NODATA: 799 ap->hsm_task_state = HSM_ST_LAST; 800 break; 801#ifdef CONFIG_ATA_BMDMA 802 case ATAPI_PROT_DMA: 803 ap->hsm_task_state = HSM_ST_LAST; 804 /* initiate bmdma */ 805 ap->ops->bmdma_start(qc); 806 break; 807#endif /* CONFIG_ATA_BMDMA */ 808 default: 809 BUG(); 810 } 811} 812 813/** 814 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 815 * @qc: Command on going 816 * @bytes: number of bytes 817 * 818 * Transfer Transfer data from/to the ATAPI device. 819 * 820 * LOCKING: 821 * Inherited from caller. 822 * 823 */ 824static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 825{ 826 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; 827 struct ata_port *ap = qc->ap; 828 struct ata_device *dev = qc->dev; 829 struct ata_eh_info *ehi = &dev->link->eh_info; 830 struct scatterlist *sg; 831 struct page *page; 832 unsigned char *buf; 833 unsigned int offset, count, consumed; 834 835next_sg: 836 sg = qc->cursg; 837 if (unlikely(!sg)) { 838 ata_ehi_push_desc(ehi, "unexpected or too much trailing data " 839 "buf=%u cur=%u bytes=%u", 840 qc->nbytes, qc->curbytes, bytes); 841 return -1; 842 } 843 844 page = sg_page(sg); 845 offset = sg->offset + qc->cursg_ofs; 846 847 /* get the current page and offset */ 848 page = nth_page(page, (offset >> PAGE_SHIFT)); 849 offset %= PAGE_SIZE; 850 851 /* don't overrun current sg */ 852 count = min(sg->length - qc->cursg_ofs, bytes); 853 854 /* don't cross page boundaries */ 855 count = min(count, (unsigned int)PAGE_SIZE - offset); 856 857 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 858 859 if (PageHighMem(page)) { 860 unsigned long flags; 861 862 local_irq_save(flags); 863 buf = kmap_atomic(page, KM_IRQ0); 864 865 /* do the actual data transfer */ 866 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 867 count, rw); 868 869 kunmap_atomic(buf, KM_IRQ0); 870 local_irq_restore(flags); 871 } else { 872 buf = page_address(page); 873 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 874 count, rw); 875 } 876 877 bytes -= min(bytes, consumed); 878 qc->curbytes += count; 879 qc->cursg_ofs += count; 880 881 if (qc->cursg_ofs == sg->length) { 882 qc->cursg = sg_next(qc->cursg); 883 qc->cursg_ofs = 0; 884 } 885 886 /* 887 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); 888 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN 889 * check correctly as it doesn't know if it is the last request being 890 * made. Somebody should implement a proper sanity check. 891 */ 892 if (bytes) 893 goto next_sg; 894 return 0; 895} 896 897/** 898 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 899 * @qc: Command on going 900 * 901 * Transfer Transfer data from/to the ATAPI device. 902 * 903 * LOCKING: 904 * Inherited from caller. 905 */ 906static void atapi_pio_bytes(struct ata_queued_cmd *qc) 907{ 908 struct ata_port *ap = qc->ap; 909 struct ata_device *dev = qc->dev; 910 struct ata_eh_info *ehi = &dev->link->eh_info; 911 unsigned int ireason, bc_lo, bc_hi, bytes; 912 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 913 914 /* Abuse qc->result_tf for temp storage of intermediate TF 915 * here to save some kernel stack usage. 916 * For normal completion, qc->result_tf is not relevant. For 917 * error, qc->result_tf is later overwritten by ata_qc_complete(). 918 * So, the correctness of qc->result_tf is not affected. 919 */ 920 ap->ops->sff_tf_read(ap, &qc->result_tf); 921 ireason = qc->result_tf.nsect; 922 bc_lo = qc->result_tf.lbam; 923 bc_hi = qc->result_tf.lbah; 924 bytes = (bc_hi << 8) | bc_lo; 925 926 /* shall be cleared to zero, indicating xfer of data */ 927 if (unlikely(ireason & (1 << 0))) 928 goto atapi_check; 929 930 /* make sure transfer direction matches expected */ 931 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 932 if (unlikely(do_write != i_write)) 933 goto atapi_check; 934 935 if (unlikely(!bytes)) 936 goto atapi_check; 937 938 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 939 940 if (unlikely(__atapi_pio_bytes(qc, bytes))) 941 goto err_out; 942 ata_sff_sync(ap); /* flush */ 943 944 return; 945 946 atapi_check: 947 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", 948 ireason, bytes); 949 err_out: 950 qc->err_mask |= AC_ERR_HSM; 951 ap->hsm_task_state = HSM_ST_ERR; 952} 953 954/** 955 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 956 * @ap: the target ata_port 957 * @qc: qc on going 958 * 959 * RETURNS: 960 * 1 if ok in workqueue, 0 otherwise. 961 */ 962static inline int ata_hsm_ok_in_wq(struct ata_port *ap, 963 struct ata_queued_cmd *qc) 964{ 965 if (qc->tf.flags & ATA_TFLAG_POLLING) 966 return 1; 967 968 if (ap->hsm_task_state == HSM_ST_FIRST) { 969 if (qc->tf.protocol == ATA_PROT_PIO && 970 (qc->tf.flags & ATA_TFLAG_WRITE)) 971 return 1; 972 973 if (ata_is_atapi(qc->tf.protocol) && 974 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 975 return 1; 976 } 977 978 return 0; 979} 980 981/** 982 * ata_hsm_qc_complete - finish a qc running on standard HSM 983 * @qc: Command to complete 984 * @in_wq: 1 if called from workqueue, 0 otherwise 985 * 986 * Finish @qc which is running on standard HSM. 987 * 988 * LOCKING: 989 * If @in_wq is zero, spin_lock_irqsave(host lock). 990 * Otherwise, none on entry and grabs host lock. 991 */ 992static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 993{ 994 struct ata_port *ap = qc->ap; 995 unsigned long flags; 996 997 if (ap->ops->error_handler) { 998 if (in_wq) { 999 spin_lock_irqsave(ap->lock, flags); 1000 1001 /* EH might have kicked in while host lock is 1002 * released. 1003 */ 1004 qc = ata_qc_from_tag(ap, qc->tag); 1005 if (qc) { 1006 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 1007 ata_sff_irq_on(ap); 1008 ata_qc_complete(qc); 1009 } else 1010 ata_port_freeze(ap); 1011 } 1012 1013 spin_unlock_irqrestore(ap->lock, flags); 1014 } else { 1015 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1016 ata_qc_complete(qc); 1017 else 1018 ata_port_freeze(ap); 1019 } 1020 } else { 1021 if (in_wq) { 1022 spin_lock_irqsave(ap->lock, flags); 1023 ata_sff_irq_on(ap); 1024 ata_qc_complete(qc); 1025 spin_unlock_irqrestore(ap->lock, flags); 1026 } else 1027 ata_qc_complete(qc); 1028 } 1029} 1030 1031/** 1032 * ata_sff_hsm_move - move the HSM to the next state. 1033 * @ap: the target ata_port 1034 * @qc: qc on going 1035 * @status: current device status 1036 * @in_wq: 1 if called from workqueue, 0 otherwise 1037 * 1038 * RETURNS: 1039 * 1 when poll next status needed, 0 otherwise. 1040 */ 1041int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1042 u8 status, int in_wq) 1043{ 1044 struct ata_link *link = qc->dev->link; 1045 struct ata_eh_info *ehi = &link->eh_info; 1046 unsigned long flags = 0; 1047 int poll_next; 1048 1049 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1050 1051 /* Make sure ata_sff_qc_issue() does not throw things 1052 * like DMA polling into the workqueue. Notice that 1053 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 1054 */ 1055 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); 1056 1057fsm_start: 1058 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 1059 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 1060 1061 switch (ap->hsm_task_state) { 1062 case HSM_ST_FIRST: 1063 /* Send first data block or PACKET CDB */ 1064 1065 /* If polling, we will stay in the work queue after 1066 * sending the data. Otherwise, interrupt handler 1067 * takes over after sending the data. 1068 */ 1069 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 1070 1071 /* check device status */ 1072 if (unlikely((status & ATA_DRQ) == 0)) { 1073 /* handle BSY=0, DRQ=0 as error */ 1074 if (likely(status & (ATA_ERR | ATA_DF))) 1075 /* device stops HSM for abort/error */ 1076 qc->err_mask |= AC_ERR_DEV; 1077 else { 1078 /* HSM violation. Let EH handle this */ 1079 ata_ehi_push_desc(ehi, 1080 "ST_FIRST: !(DRQ|ERR|DF)"); 1081 qc->err_mask |= AC_ERR_HSM; 1082 } 1083 1084 ap->hsm_task_state = HSM_ST_ERR; 1085 goto fsm_start; 1086 } 1087 1088 /* Device should not ask for data transfer (DRQ=1) 1089 * when it finds something wrong. 1090 * We ignore DRQ here and stop the HSM by 1091 * changing hsm_task_state to HSM_ST_ERR and 1092 * let the EH abort the command or reset the device. 1093 */ 1094 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1095 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { 1096 ata_ehi_push_desc(ehi, "ST_FIRST: " 1097 "DRQ=1 with device error, " 1098 "dev_stat 0x%X", status); 1099 qc->err_mask |= AC_ERR_HSM; 1100 ap->hsm_task_state = HSM_ST_ERR; 1101 goto fsm_start; 1102 } 1103 } 1104 1105 /* Send the CDB (atapi) or the first data block (ata pio out). 1106 * During the state transition, interrupt handler shouldn't 1107 * be invoked before the data transfer is complete and 1108 * hsm_task_state is changed. Hence, the following locking. 1109 */ 1110 if (in_wq) 1111 spin_lock_irqsave(ap->lock, flags); 1112 1113 if (qc->tf.protocol == ATA_PROT_PIO) { 1114 /* PIO data out protocol. 1115 * send first data block. 1116 */ 1117 1118 /* ata_pio_sectors() might change the state 1119 * to HSM_ST_LAST. so, the state is changed here 1120 * before ata_pio_sectors(). 1121 */ 1122 ap->hsm_task_state = HSM_ST; 1123 ata_pio_sectors(qc); 1124 } else 1125 /* send CDB */ 1126 atapi_send_cdb(ap, qc); 1127 1128 if (in_wq) 1129 spin_unlock_irqrestore(ap->lock, flags); 1130 1131 /* if polling, ata_sff_pio_task() handles the rest. 1132 * otherwise, interrupt handler takes over from here. 1133 */ 1134 break; 1135 1136 case HSM_ST: 1137 /* complete command or read/write the data register */ 1138 if (qc->tf.protocol == ATAPI_PROT_PIO) { 1139 /* ATAPI PIO protocol */ 1140 if ((status & ATA_DRQ) == 0) { 1141 /* No more data to transfer or device error. 1142 * Device error will be tagged in HSM_ST_LAST. 1143 */ 1144 ap->hsm_task_state = HSM_ST_LAST; 1145 goto fsm_start; 1146 } 1147 1148 /* Device should not ask for data transfer (DRQ=1) 1149 * when it finds something wrong. 1150 * We ignore DRQ here and stop the HSM by 1151 * changing hsm_task_state to HSM_ST_ERR and 1152 * let the EH abort the command or reset the device. 1153 */ 1154 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1155 ata_ehi_push_desc(ehi, "ST-ATAPI: " 1156 "DRQ=1 with device error, " 1157 "dev_stat 0x%X", status); 1158 qc->err_mask |= AC_ERR_HSM; 1159 ap->hsm_task_state = HSM_ST_ERR; 1160 goto fsm_start; 1161 } 1162 1163 atapi_pio_bytes(qc); 1164 1165 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 1166 /* bad ireason reported by device */ 1167 goto fsm_start; 1168 1169 } else { 1170 /* ATA PIO protocol */ 1171 if (unlikely((status & ATA_DRQ) == 0)) { 1172 /* handle BSY=0, DRQ=0 as error */ 1173 if (likely(status & (ATA_ERR | ATA_DF))) { 1174 /* device stops HSM for abort/error */ 1175 qc->err_mask |= AC_ERR_DEV; 1176 1177 /* If diagnostic failed and this is 1178 * IDENTIFY, it's likely a phantom 1179 * device. Mark hint. 1180 */ 1181 if (qc->dev->horkage & 1182 ATA_HORKAGE_DIAGNOSTIC) 1183 qc->err_mask |= 1184 AC_ERR_NODEV_HINT; 1185 } else { 1186 /* HSM violation. Let EH handle this. 1187 * Phantom devices also trigger this 1188 * condition. Mark hint. 1189 */ 1190 ata_ehi_push_desc(ehi, "ST-ATA: " 1191 "DRQ=0 without device error, " 1192 "dev_stat 0x%X", status); 1193 qc->err_mask |= AC_ERR_HSM | 1194 AC_ERR_NODEV_HINT; 1195 } 1196 1197 ap->hsm_task_state = HSM_ST_ERR; 1198 goto fsm_start; 1199 } 1200 1201 /* For PIO reads, some devices may ask for 1202 * data transfer (DRQ=1) alone with ERR=1. 1203 * We respect DRQ here and transfer one 1204 * block of junk data before changing the 1205 * hsm_task_state to HSM_ST_ERR. 1206 * 1207 * For PIO writes, ERR=1 DRQ=1 doesn't make 1208 * sense since the data block has been 1209 * transferred to the device. 1210 */ 1211 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1212 /* data might be corrputed */ 1213 qc->err_mask |= AC_ERR_DEV; 1214 1215 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1216 ata_pio_sectors(qc); 1217 status = ata_wait_idle(ap); 1218 } 1219 1220 if (status & (ATA_BUSY | ATA_DRQ)) { 1221 ata_ehi_push_desc(ehi, "ST-ATA: " 1222 "BUSY|DRQ persists on ERR|DF, " 1223 "dev_stat 0x%X", status); 1224 qc->err_mask |= AC_ERR_HSM; 1225 } 1226 1227 /* There are oddball controllers with 1228 * status register stuck at 0x7f and 1229 * lbal/m/h at zero which makes it 1230 * pass all other presence detection 1231 * mechanisms we have. Set NODEV_HINT 1232 * for it. Kernel bz#7241. 1233 */ 1234 if (status == 0x7f) 1235 qc->err_mask |= AC_ERR_NODEV_HINT; 1236 1237 /* ata_pio_sectors() might change the 1238 * state to HSM_ST_LAST. so, the state 1239 * is changed after ata_pio_sectors(). 1240 */ 1241 ap->hsm_task_state = HSM_ST_ERR; 1242 goto fsm_start; 1243 } 1244 1245 ata_pio_sectors(qc); 1246 1247 if (ap->hsm_task_state == HSM_ST_LAST && 1248 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 1249 /* all data read */ 1250 status = ata_wait_idle(ap); 1251 goto fsm_start; 1252 } 1253 } 1254 1255 poll_next = 1; 1256 break; 1257 1258 case HSM_ST_LAST: 1259 if (unlikely(!ata_ok(status))) { 1260 qc->err_mask |= __ac_err_mask(status); 1261 ap->hsm_task_state = HSM_ST_ERR; 1262 goto fsm_start; 1263 } 1264 1265 /* no more data to transfer */ 1266 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 1267 ap->print_id, qc->dev->devno, status); 1268 1269 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); 1270 1271 ap->hsm_task_state = HSM_ST_IDLE; 1272 1273 /* complete taskfile transaction */ 1274 ata_hsm_qc_complete(qc, in_wq); 1275 1276 poll_next = 0; 1277 break; 1278 1279 case HSM_ST_ERR: 1280 ap->hsm_task_state = HSM_ST_IDLE; 1281 1282 /* complete taskfile transaction */ 1283 ata_hsm_qc_complete(qc, in_wq); 1284 1285 poll_next = 0; 1286 break; 1287 default: 1288 poll_next = 0; 1289 BUG(); 1290 } 1291 1292 return poll_next; 1293} 1294EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1295 1296void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) 1297{ 1298 struct ata_port *ap = link->ap; 1299 1300 WARN_ON((ap->sff_pio_task_link != NULL) && 1301 (ap->sff_pio_task_link != link)); 1302 ap->sff_pio_task_link = link; 1303 1304 /* may fail if ata_sff_flush_pio_task() in progress */ 1305 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1306 msecs_to_jiffies(delay)); 1307} 1308EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); 1309 1310void ata_sff_flush_pio_task(struct ata_port *ap) 1311{ 1312 DPRINTK("ENTER\n"); 1313 1314 cancel_rearming_delayed_work(&ap->sff_pio_task); 1315 ap->hsm_task_state = HSM_ST_IDLE; 1316 1317 if (ata_msg_ctl(ap)) 1318 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); 1319} 1320 1321static void ata_sff_pio_task(struct work_struct *work) 1322{ 1323 struct ata_port *ap = 1324 container_of(work, struct ata_port, sff_pio_task.work); 1325 struct ata_link *link = ap->sff_pio_task_link; 1326 struct ata_queued_cmd *qc; 1327 u8 status; 1328 int poll_next; 1329 1330 BUG_ON(ap->sff_pio_task_link == NULL); 1331 /* qc can be NULL if timeout occurred */ 1332 qc = ata_qc_from_tag(ap, link->active_tag); 1333 if (!qc) { 1334 ap->sff_pio_task_link = NULL; 1335 return; 1336 } 1337 1338fsm_start: 1339 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1340 1341 /* 1342 * This is purely heuristic. This is a fast path. 1343 * Sometimes when we enter, BSY will be cleared in 1344 * a chk-status or two. If not, the drive is probably seeking 1345 * or something. Snooze for a couple msecs, then 1346 * chk-status again. If still busy, queue delayed work. 1347 */ 1348 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1349 if (status & ATA_BUSY) { 1350 msleep(2); 1351 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1352 if (status & ATA_BUSY) { 1353 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1354 return; 1355 } 1356 } 1357 1358 /* 1359 * hsm_move() may trigger another command to be processed. 1360 * clean the link beforehand. 1361 */ 1362 ap->sff_pio_task_link = NULL; 1363 /* move the HSM */ 1364 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1365 1366 /* another command or interrupt handler 1367 * may be running at this point. 1368 */ 1369 if (poll_next) 1370 goto fsm_start; 1371} 1372 1373/** 1374 * ata_sff_qc_issue - issue taskfile to a SFF controller 1375 * @qc: command to issue to device 1376 * 1377 * This function issues a PIO or NODATA command to a SFF 1378 * controller. 1379 * 1380 * LOCKING: 1381 * spin_lock_irqsave(host lock) 1382 * 1383 * RETURNS: 1384 * Zero on success, AC_ERR_* mask on failure 1385 */ 1386unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1387{ 1388 struct ata_port *ap = qc->ap; 1389 struct ata_link *link = qc->dev->link; 1390 1391 /* Use polling pio if the LLD doesn't handle 1392 * interrupt driven pio and atapi CDB interrupt. 1393 */ 1394 if (ap->flags & ATA_FLAG_PIO_POLLING) 1395 qc->tf.flags |= ATA_TFLAG_POLLING; 1396 1397 /* select the device */ 1398 ata_dev_select(ap, qc->dev->devno, 1, 0); 1399 1400 /* start the command */ 1401 switch (qc->tf.protocol) { 1402 case ATA_PROT_NODATA: 1403 if (qc->tf.flags & ATA_TFLAG_POLLING) 1404 ata_qc_set_polling(qc); 1405 1406 ata_tf_to_host(ap, &qc->tf); 1407 ap->hsm_task_state = HSM_ST_LAST; 1408 1409 if (qc->tf.flags & ATA_TFLAG_POLLING) 1410 ata_sff_queue_pio_task(link, 0); 1411 1412 break; 1413 1414 case ATA_PROT_PIO: 1415 if (qc->tf.flags & ATA_TFLAG_POLLING) 1416 ata_qc_set_polling(qc); 1417 1418 ata_tf_to_host(ap, &qc->tf); 1419 1420 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1421 /* PIO data out protocol */ 1422 ap->hsm_task_state = HSM_ST_FIRST; 1423 ata_sff_queue_pio_task(link, 0); 1424 1425 /* always send first data block using the 1426 * ata_sff_pio_task() codepath. 1427 */ 1428 } else { 1429 /* PIO data in protocol */ 1430 ap->hsm_task_state = HSM_ST; 1431 1432 if (qc->tf.flags & ATA_TFLAG_POLLING) 1433 ata_sff_queue_pio_task(link, 0); 1434 1435 /* if polling, ata_sff_pio_task() handles the 1436 * rest. otherwise, interrupt handler takes 1437 * over from here. 1438 */ 1439 } 1440 1441 break; 1442 1443 case ATAPI_PROT_PIO: 1444 case ATAPI_PROT_NODATA: 1445 if (qc->tf.flags & ATA_TFLAG_POLLING) 1446 ata_qc_set_polling(qc); 1447 1448 ata_tf_to_host(ap, &qc->tf); 1449 1450 ap->hsm_task_state = HSM_ST_FIRST; 1451 1452 /* send cdb by polling if no cdb interrupt */ 1453 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1454 (qc->tf.flags & ATA_TFLAG_POLLING)) 1455 ata_sff_queue_pio_task(link, 0); 1456 break; 1457 1458 default: 1459 WARN_ON_ONCE(1); 1460 return AC_ERR_SYSTEM; 1461 } 1462 1463 return 0; 1464} 1465EXPORT_SYMBOL_GPL(ata_sff_qc_issue); 1466 1467/** 1468 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read 1469 * @qc: qc to fill result TF for 1470 * 1471 * @qc is finished and result TF needs to be filled. Fill it 1472 * using ->sff_tf_read. 1473 * 1474 * LOCKING: 1475 * spin_lock_irqsave(host lock) 1476 * 1477 * RETURNS: 1478 * true indicating that result TF is successfully filled. 1479 */ 1480bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) 1481{ 1482 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); 1483 return true; 1484} 1485EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); 1486 1487static unsigned int ata_sff_idle_irq(struct ata_port *ap) 1488{ 1489 ap->stats.idle_irq++; 1490 1491#ifdef ATA_IRQ_TRAP 1492 if ((ap->stats.idle_irq % 1000) == 0) { 1493 ap->ops->sff_check_status(ap); 1494 if (ap->ops->sff_irq_clear) 1495 ap->ops->sff_irq_clear(ap); 1496 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 1497 return 1; 1498 } 1499#endif 1500 return 0; /* irq not handled */ 1501} 1502 1503static unsigned int __ata_sff_port_intr(struct ata_port *ap, 1504 struct ata_queued_cmd *qc, 1505 bool hsmv_on_idle) 1506{ 1507 u8 status; 1508 1509 VPRINTK("ata%u: protocol %d task_state %d\n", 1510 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 1511 1512 /* Check whether we are expecting interrupt in this state */ 1513 switch (ap->hsm_task_state) { 1514 case HSM_ST_FIRST: 1515 /* Some pre-ATAPI-4 devices assert INTRQ 1516 * at this state when ready to receive CDB. 1517 */ 1518 1519 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 1520 * The flag was turned on only for atapi devices. No 1521 * need to check ata_is_atapi(qc->tf.protocol) again. 1522 */ 1523 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1524 return ata_sff_idle_irq(ap); 1525 break; 1526 case HSM_ST_IDLE: 1527 return ata_sff_idle_irq(ap); 1528 default: 1529 break; 1530 } 1531 1532 /* check main status, clearing INTRQ if needed */ 1533 status = ata_sff_irq_status(ap); 1534 if (status & ATA_BUSY) { 1535 if (hsmv_on_idle) { 1536 /* BMDMA engine is already stopped, we're screwed */ 1537 qc->err_mask |= AC_ERR_HSM; 1538 ap->hsm_task_state = HSM_ST_ERR; 1539 } else 1540 return ata_sff_idle_irq(ap); 1541 } 1542 1543 /* clear irq events */ 1544 if (ap->ops->sff_irq_clear) 1545 ap->ops->sff_irq_clear(ap); 1546 1547 ata_sff_hsm_move(ap, qc, status, 0); 1548 1549 return 1; /* irq handled */ 1550} 1551 1552/** 1553 * ata_sff_port_intr - Handle SFF port interrupt 1554 * @ap: Port on which interrupt arrived (possibly...) 1555 * @qc: Taskfile currently active in engine 1556 * 1557 * Handle port interrupt for given queued command. 1558 * 1559 * LOCKING: 1560 * spin_lock_irqsave(host lock) 1561 * 1562 * RETURNS: 1563 * One if interrupt was handled, zero if not (shared irq). 1564 */ 1565unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 1566{ 1567 return __ata_sff_port_intr(ap, qc, false); 1568} 1569EXPORT_SYMBOL_GPL(ata_sff_port_intr); 1570 1571static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, 1572 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) 1573{ 1574 struct ata_host *host = dev_instance; 1575 bool retried = false; 1576 unsigned int i; 1577 unsigned int handled, idle, polling; 1578 unsigned long flags; 1579 1580 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 1581 spin_lock_irqsave(&host->lock, flags); 1582 1583retry: 1584 handled = idle = polling = 0; 1585 for (i = 0; i < host->n_ports; i++) { 1586 struct ata_port *ap = host->ports[i]; 1587 struct ata_queued_cmd *qc; 1588 1589 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1590 if (qc) { 1591 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 1592 handled |= port_intr(ap, qc); 1593 else 1594 polling |= 1 << i; 1595 } else 1596 idle |= 1 << i; 1597 } 1598 1599 /* 1600 * If no port was expecting IRQ but the controller is actually 1601 * asserting IRQ line, nobody cared will ensue. Check IRQ 1602 * pending status if available and clear spurious IRQ. 1603 */ 1604 if (!handled && !retried) { 1605 bool retry = false; 1606 1607 for (i = 0; i < host->n_ports; i++) { 1608 struct ata_port *ap = host->ports[i]; 1609 1610 if (polling & (1 << i)) 1611 continue; 1612 1613 if (!ap->ops->sff_irq_check || 1614 !ap->ops->sff_irq_check(ap)) 1615 continue; 1616 1617 if (idle & (1 << i)) { 1618 ap->ops->sff_check_status(ap); 1619 if (ap->ops->sff_irq_clear) 1620 ap->ops->sff_irq_clear(ap); 1621 } else { 1622 /* clear INTRQ and check if BUSY cleared */ 1623 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) 1624 retry |= true; 1625 /* 1626 * With command in flight, we can't do 1627 * sff_irq_clear() w/o racing with completion. 1628 */ 1629 } 1630 } 1631 1632 if (retry) { 1633 retried = true; 1634 goto retry; 1635 } 1636 } 1637 1638 spin_unlock_irqrestore(&host->lock, flags); 1639 1640 return IRQ_RETVAL(handled); 1641} 1642 1643/** 1644 * ata_sff_interrupt - Default SFF ATA host interrupt handler 1645 * @irq: irq line (unused) 1646 * @dev_instance: pointer to our ata_host information structure 1647 * 1648 * Default interrupt handler for PCI IDE devices. Calls 1649 * ata_sff_port_intr() for each port that is not disabled. 1650 * 1651 * LOCKING: 1652 * Obtains host lock during operation. 1653 * 1654 * RETURNS: 1655 * IRQ_NONE or IRQ_HANDLED. 1656 */ 1657irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) 1658{ 1659 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); 1660} 1661EXPORT_SYMBOL_GPL(ata_sff_interrupt); 1662 1663/** 1664 * ata_sff_lost_interrupt - Check for an apparent lost interrupt 1665 * @ap: port that appears to have timed out 1666 * 1667 * Called from the libata error handlers when the core code suspects 1668 * an interrupt has been lost. If it has complete anything we can and 1669 * then return. Interface must support altstatus for this faster 1670 * recovery to occur. 1671 * 1672 * Locking: 1673 * Caller holds host lock 1674 */ 1675 1676void ata_sff_lost_interrupt(struct ata_port *ap) 1677{ 1678 u8 status; 1679 struct ata_queued_cmd *qc; 1680 1681 /* Only one outstanding command per SFF channel */ 1682 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1683 /* We cannot lose an interrupt on a non-existent or polled command */ 1684 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) 1685 return; 1686 /* See if the controller thinks it is still busy - if so the command 1687 isn't a lost IRQ but is still in progress */ 1688 status = ata_sff_altstatus(ap); 1689 if (status & ATA_BUSY) 1690 return; 1691 1692 /* There was a command running, we are no longer busy and we have 1693 no interrupt. */ 1694 ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n", 1695 status); 1696 /* Run the host interrupt logic as if the interrupt had not been 1697 lost */ 1698 ata_sff_port_intr(ap, qc); 1699} 1700EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); 1701 1702/** 1703 * ata_sff_freeze - Freeze SFF controller port 1704 * @ap: port to freeze 1705 * 1706 * Freeze SFF controller port. 1707 * 1708 * LOCKING: 1709 * Inherited from caller. 1710 */ 1711void ata_sff_freeze(struct ata_port *ap) 1712{ 1713 ap->ctl |= ATA_NIEN; 1714 ap->last_ctl = ap->ctl; 1715 1716 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) 1717 ata_sff_set_devctl(ap, ap->ctl); 1718 1719 /* Under certain circumstances, some controllers raise IRQ on 1720 * ATA_NIEN manipulation. Also, many controllers fail to mask 1721 * previously pending IRQ on ATA_NIEN assertion. Clear it. 1722 */ 1723 ap->ops->sff_check_status(ap); 1724 1725 if (ap->ops->sff_irq_clear) 1726 ap->ops->sff_irq_clear(ap); 1727} 1728EXPORT_SYMBOL_GPL(ata_sff_freeze); 1729 1730/** 1731 * ata_sff_thaw - Thaw SFF controller port 1732 * @ap: port to thaw 1733 * 1734 * Thaw SFF controller port. 1735 * 1736 * LOCKING: 1737 * Inherited from caller. 1738 */ 1739void ata_sff_thaw(struct ata_port *ap) 1740{ 1741 /* clear & re-enable interrupts */ 1742 ap->ops->sff_check_status(ap); 1743 if (ap->ops->sff_irq_clear) 1744 ap->ops->sff_irq_clear(ap); 1745 ata_sff_irq_on(ap); 1746} 1747EXPORT_SYMBOL_GPL(ata_sff_thaw); 1748 1749/** 1750 * ata_sff_prereset - prepare SFF link for reset 1751 * @link: SFF link to be reset 1752 * @deadline: deadline jiffies for the operation 1753 * 1754 * SFF link @link is about to be reset. Initialize it. It first 1755 * calls ata_std_prereset() and wait for !BSY if the port is 1756 * being softreset. 1757 * 1758 * LOCKING: 1759 * Kernel thread context (may sleep) 1760 * 1761 * RETURNS: 1762 * 0 on success, -errno otherwise. 1763 */ 1764int ata_sff_prereset(struct ata_link *link, unsigned long deadline) 1765{ 1766 struct ata_eh_context *ehc = &link->eh_context; 1767 int rc; 1768 1769 rc = ata_std_prereset(link, deadline); 1770 if (rc) 1771 return rc; 1772 1773 /* if we're about to do hardreset, nothing more to do */ 1774 if (ehc->i.action & ATA_EH_HARDRESET) 1775 return 0; 1776 1777 /* wait for !BSY if we don't know that no device is attached */ 1778 if (!ata_link_offline(link)) { 1779 rc = ata_sff_wait_ready(link, deadline); 1780 if (rc && rc != -ENODEV) { 1781 ata_link_printk(link, KERN_WARNING, "device not ready " 1782 "(errno=%d), forcing hardreset\n", rc); 1783 ehc->i.action |= ATA_EH_HARDRESET; 1784 } 1785 } 1786 1787 return 0; 1788} 1789EXPORT_SYMBOL_GPL(ata_sff_prereset); 1790 1791/** 1792 * ata_devchk - PATA device presence detection 1793 * @ap: ATA channel to examine 1794 * @device: Device to examine (starting at zero) 1795 * 1796 * This technique was originally described in 1797 * Hale Landis's ATADRVR (www.ata-atapi.com), and 1798 * later found its way into the ATA/ATAPI spec. 1799 * 1800 * Write a pattern to the ATA shadow registers, 1801 * and if a device is present, it will respond by 1802 * correctly storing and echoing back the 1803 * ATA shadow register contents. 1804 * 1805 * LOCKING: 1806 * caller. 1807 */ 1808static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 1809{ 1810 struct ata_ioports *ioaddr = &ap->ioaddr; 1811 u8 nsect, lbal; 1812 1813 ap->ops->sff_dev_select(ap, device); 1814 1815 iowrite8(0x55, ioaddr->nsect_addr); 1816 iowrite8(0xaa, ioaddr->lbal_addr); 1817 1818 iowrite8(0xaa, ioaddr->nsect_addr); 1819 iowrite8(0x55, ioaddr->lbal_addr); 1820 1821 iowrite8(0x55, ioaddr->nsect_addr); 1822 iowrite8(0xaa, ioaddr->lbal_addr); 1823 1824 nsect = ioread8(ioaddr->nsect_addr); 1825 lbal = ioread8(ioaddr->lbal_addr); 1826 1827 if ((nsect == 0x55) && (lbal == 0xaa)) 1828 return 1; /* we found a device */ 1829 1830 return 0; /* nothing found */ 1831} 1832 1833/** 1834 * ata_sff_dev_classify - Parse returned ATA device signature 1835 * @dev: ATA device to classify (starting at zero) 1836 * @present: device seems present 1837 * @r_err: Value of error register on completion 1838 * 1839 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 1840 * an ATA/ATAPI-defined set of values is placed in the ATA 1841 * shadow registers, indicating the results of device detection 1842 * and diagnostics. 1843 * 1844 * Select the ATA device, and read the values from the ATA shadow 1845 * registers. Then parse according to the Error register value, 1846 * and the spec-defined values examined by ata_dev_classify(). 1847 * 1848 * LOCKING: 1849 * caller. 1850 * 1851 * RETURNS: 1852 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 1853 */ 1854unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, 1855 u8 *r_err) 1856{ 1857 struct ata_port *ap = dev->link->ap; 1858 struct ata_taskfile tf; 1859 unsigned int class; 1860 u8 err; 1861 1862 ap->ops->sff_dev_select(ap, dev->devno); 1863 1864 memset(&tf, 0, sizeof(tf)); 1865 1866 ap->ops->sff_tf_read(ap, &tf); 1867 err = tf.feature; 1868 if (r_err) 1869 *r_err = err; 1870 1871 /* see if device passed diags: continue and warn later */ 1872 if (err == 0) 1873 /* diagnostic fail : do nothing _YET_ */ 1874 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 1875 else if (err == 1) 1876 /* do nothing */ ; 1877 else if ((dev->devno == 0) && (err == 0x81)) 1878 /* do nothing */ ; 1879 else 1880 return ATA_DEV_NONE; 1881 1882 /* determine if device is ATA or ATAPI */ 1883 class = ata_dev_classify(&tf); 1884 1885 if (class == ATA_DEV_UNKNOWN) { 1886 /* If the device failed diagnostic, it's likely to 1887 * have reported incorrect device signature too. 1888 * Assume ATA device if the device seems present but 1889 * device signature is invalid with diagnostic 1890 * failure. 1891 */ 1892 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 1893 class = ATA_DEV_ATA; 1894 else 1895 class = ATA_DEV_NONE; 1896 } else if ((class == ATA_DEV_ATA) && 1897 (ap->ops->sff_check_status(ap) == 0)) 1898 class = ATA_DEV_NONE; 1899 1900 return class; 1901} 1902EXPORT_SYMBOL_GPL(ata_sff_dev_classify); 1903 1904/** 1905 * ata_sff_wait_after_reset - wait for devices to become ready after reset 1906 * @link: SFF link which is just reset 1907 * @devmask: mask of present devices 1908 * @deadline: deadline jiffies for the operation 1909 * 1910 * Wait devices attached to SFF @link to become ready after 1911 * reset. It contains preceding 150ms wait to avoid accessing TF 1912 * status register too early. 1913 * 1914 * LOCKING: 1915 * Kernel thread context (may sleep). 1916 * 1917 * RETURNS: 1918 * 0 on success, -ENODEV if some or all of devices in @devmask 1919 * don't seem to exist. -errno on other errors. 1920 */ 1921int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, 1922 unsigned long deadline) 1923{ 1924 struct ata_port *ap = link->ap; 1925 struct ata_ioports *ioaddr = &ap->ioaddr; 1926 unsigned int dev0 = devmask & (1 << 0); 1927 unsigned int dev1 = devmask & (1 << 1); 1928 int rc, ret = 0; 1929 1930 msleep(ATA_WAIT_AFTER_RESET); 1931 1932 /* always check readiness of the master device */ 1933 rc = ata_sff_wait_ready(link, deadline); 1934 /* -ENODEV means the odd clown forgot the D7 pulldown resistor 1935 * and TF status is 0xff, bail out on it too. 1936 */ 1937 if (rc) 1938 return rc; 1939 1940 /* if device 1 was found in ata_devchk, wait for register 1941 * access briefly, then wait for BSY to clear. 1942 */ 1943 if (dev1) { 1944 int i; 1945 1946 ap->ops->sff_dev_select(ap, 1); 1947 1948 /* Wait for register access. Some ATAPI devices fail 1949 * to set nsect/lbal after reset, so don't waste too 1950 * much time on it. We're gonna wait for !BSY anyway. 1951 */ 1952 for (i = 0; i < 2; i++) { 1953 u8 nsect, lbal; 1954 1955 nsect = ioread8(ioaddr->nsect_addr); 1956 lbal = ioread8(ioaddr->lbal_addr); 1957 if ((nsect == 1) && (lbal == 1)) 1958 break; 1959 msleep(50); /* give drive a breather */ 1960 } 1961 1962 rc = ata_sff_wait_ready(link, deadline); 1963 if (rc) { 1964 if (rc != -ENODEV) 1965 return rc; 1966 ret = rc; 1967 } 1968 } 1969 1970 /* is all this really necessary? */ 1971 ap->ops->sff_dev_select(ap, 0); 1972 if (dev1) 1973 ap->ops->sff_dev_select(ap, 1); 1974 if (dev0) 1975 ap->ops->sff_dev_select(ap, 0); 1976 1977 return ret; 1978} 1979EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); 1980 1981static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 1982 unsigned long deadline) 1983{ 1984 struct ata_ioports *ioaddr = &ap->ioaddr; 1985 1986 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 1987 1988 /* software reset. causes dev0 to be selected */ 1989 iowrite8(ap->ctl, ioaddr->ctl_addr); 1990 udelay(20); 1991 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 1992 udelay(20); 1993 iowrite8(ap->ctl, ioaddr->ctl_addr); 1994 ap->last_ctl = ap->ctl; 1995 1996 /* wait the port to become ready */ 1997 return ata_sff_wait_after_reset(&ap->link, devmask, deadline); 1998} 1999 2000/** 2001 * ata_sff_softreset - reset host port via ATA SRST 2002 * @link: ATA link to reset 2003 * @classes: resulting classes of attached devices 2004 * @deadline: deadline jiffies for the operation 2005 * 2006 * Reset host port using ATA SRST. 2007 * 2008 * LOCKING: 2009 * Kernel thread context (may sleep) 2010 * 2011 * RETURNS: 2012 * 0 on success, -errno otherwise. 2013 */ 2014int ata_sff_softreset(struct ata_link *link, unsigned int *classes, 2015 unsigned long deadline) 2016{ 2017 struct ata_port *ap = link->ap; 2018 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2019 unsigned int devmask = 0; 2020 int rc; 2021 u8 err; 2022 2023 DPRINTK("ENTER\n"); 2024 2025 /* determine if device 0/1 are present */ 2026 if (ata_devchk(ap, 0)) 2027 devmask |= (1 << 0); 2028 if (slave_possible && ata_devchk(ap, 1)) 2029 devmask |= (1 << 1); 2030 2031 /* select device 0 again */ 2032 ap->ops->sff_dev_select(ap, 0); 2033 2034 /* issue bus reset */ 2035 DPRINTK("about to softreset, devmask=%x\n", devmask); 2036 rc = ata_bus_softreset(ap, devmask, deadline); 2037 /* if link is occupied, -ENODEV too is an error */ 2038 if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 2039 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 2040 return rc; 2041 } 2042 2043 /* determine by signature whether we have ATA or ATAPI devices */ 2044 classes[0] = ata_sff_dev_classify(&link->device[0], 2045 devmask & (1 << 0), &err); 2046 if (slave_possible && err != 0x81) 2047 classes[1] = ata_sff_dev_classify(&link->device[1], 2048 devmask & (1 << 1), &err); 2049 2050 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 2051 return 0; 2052} 2053EXPORT_SYMBOL_GPL(ata_sff_softreset); 2054 2055/** 2056 * sata_sff_hardreset - reset host port via SATA phy reset 2057 * @link: link to reset 2058 * @class: resulting class of attached device 2059 * @deadline: deadline jiffies for the operation 2060 * 2061 * SATA phy-reset host port using DET bits of SControl register, 2062 * wait for !BSY and classify the attached device. 2063 * 2064 * LOCKING: 2065 * Kernel thread context (may sleep) 2066 * 2067 * RETURNS: 2068 * 0 on success, -errno otherwise. 2069 */ 2070int sata_sff_hardreset(struct ata_link *link, unsigned int *class, 2071 unsigned long deadline) 2072{ 2073 struct ata_eh_context *ehc = &link->eh_context; 2074 const unsigned long *timing = sata_ehc_deb_timing(ehc); 2075 bool online; 2076 int rc; 2077 2078 rc = sata_link_hardreset(link, timing, deadline, &online, 2079 ata_sff_check_ready); 2080 if (online) 2081 *class = ata_sff_dev_classify(link->device, 1, NULL); 2082 2083 DPRINTK("EXIT, class=%u\n", *class); 2084 return rc; 2085} 2086EXPORT_SYMBOL_GPL(sata_sff_hardreset); 2087 2088/** 2089 * ata_sff_postreset - SFF postreset callback 2090 * @link: the target SFF ata_link 2091 * @classes: classes of attached devices 2092 * 2093 * This function is invoked after a successful reset. It first 2094 * calls ata_std_postreset() and performs SFF specific postreset 2095 * processing. 2096 * 2097 * LOCKING: 2098 * Kernel thread context (may sleep) 2099 */ 2100void ata_sff_postreset(struct ata_link *link, unsigned int *classes) 2101{ 2102 struct ata_port *ap = link->ap; 2103 2104 ata_std_postreset(link, classes); 2105 2106 /* is double-select really necessary? */ 2107 if (classes[0] != ATA_DEV_NONE) 2108 ap->ops->sff_dev_select(ap, 1); 2109 if (classes[1] != ATA_DEV_NONE) 2110 ap->ops->sff_dev_select(ap, 0); 2111 2112 /* bail out if no device is present */ 2113 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2114 DPRINTK("EXIT, no device\n"); 2115 return; 2116 } 2117 2118 /* set up device control */ 2119 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { 2120 ata_sff_set_devctl(ap, ap->ctl); 2121 ap->last_ctl = ap->ctl; 2122 } 2123} 2124EXPORT_SYMBOL_GPL(ata_sff_postreset); 2125 2126/** 2127 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers 2128 * @qc: command 2129 * 2130 * Drain the FIFO and device of any stuck data following a command 2131 * failing to complete. In some cases this is necessary before a 2132 * reset will recover the device. 2133 * 2134 */ 2135 2136void ata_sff_drain_fifo(struct ata_queued_cmd *qc) 2137{ 2138 int count; 2139 struct ata_port *ap; 2140 2141 /* We only need to flush incoming data when a command was running */ 2142 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) 2143 return; 2144 2145 ap = qc->ap; 2146 /* Drain up to 64K of data before we give up this recovery method */ 2147 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) 2148 && count < 65536; count += 2) 2149 ioread16(ap->ioaddr.data_addr); 2150 2151 /* Can become DEBUG later */ 2152 if (count) 2153 ata_port_printk(ap, KERN_DEBUG, 2154 "drained %d bytes to clear DRQ.\n", count); 2155 2156} 2157EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); 2158 2159/** 2160 * ata_sff_error_handler - Stock error handler for SFF controller 2161 * @ap: port to handle error for 2162 * 2163 * Stock error handler for SFF controller. It can handle both 2164 * PATA and SATA controllers. Many controllers should be able to 2165 * use this EH as-is or with some added handling before and 2166 * after. 2167 * 2168 * LOCKING: 2169 * Kernel thread context (may sleep) 2170 */ 2171void ata_sff_error_handler(struct ata_port *ap) 2172{ 2173 ata_reset_fn_t softreset = ap->ops->softreset; 2174 ata_reset_fn_t hardreset = ap->ops->hardreset; 2175 struct ata_queued_cmd *qc; 2176 unsigned long flags; 2177 2178 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2179 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2180 qc = NULL; 2181 2182 spin_lock_irqsave(ap->lock, flags); 2183 2184 /* 2185 * We *MUST* do FIFO draining before we issue a reset as 2186 * several devices helpfully clear their internal state and 2187 * will lock solid if we touch the data port post reset. Pass 2188 * qc in case anyone wants to do different PIO/DMA recovery or 2189 * has per command fixups 2190 */ 2191 if (ap->ops->sff_drain_fifo) 2192 ap->ops->sff_drain_fifo(qc); 2193 2194 spin_unlock_irqrestore(ap->lock, flags); 2195 2196 /* ignore ata_sff_softreset if ctl isn't accessible */ 2197 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) 2198 softreset = NULL; 2199 2200 /* ignore built-in hardresets if SCR access is not available */ 2201 if ((hardreset == sata_std_hardreset || 2202 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) 2203 hardreset = NULL; 2204 2205 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2206 ap->ops->postreset); 2207} 2208EXPORT_SYMBOL_GPL(ata_sff_error_handler); 2209 2210/** 2211 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2212 * @ioaddr: IO address structure to be initialized 2213 * 2214 * Utility function which initializes data_addr, error_addr, 2215 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 2216 * device_addr, status_addr, and command_addr to standard offsets 2217 * relative to cmd_addr. 2218 * 2219 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 2220 */ 2221void ata_sff_std_ports(struct ata_ioports *ioaddr) 2222{ 2223 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 2224 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 2225 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 2226 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 2227 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 2228 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 2229 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 2230 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 2231 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 2232 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 2233} 2234EXPORT_SYMBOL_GPL(ata_sff_std_ports); 2235 2236#ifdef CONFIG_PCI 2237 2238static int ata_resources_present(struct pci_dev *pdev, int port) 2239{ 2240 int i; 2241 2242 /* Check the PCI resources for this channel are enabled */ 2243 port = port * 2; 2244 for (i = 0; i < 2; i++) { 2245 if (pci_resource_start(pdev, port + i) == 0 || 2246 pci_resource_len(pdev, port + i) == 0) 2247 return 0; 2248 } 2249 return 1; 2250} 2251 2252/** 2253 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host 2254 * @host: target ATA host 2255 * 2256 * Acquire native PCI ATA resources for @host and initialize the 2257 * first two ports of @host accordingly. Ports marked dummy are 2258 * skipped and allocation failure makes the port dummy. 2259 * 2260 * Note that native PCI resources are valid even for legacy hosts 2261 * as we fix up pdev resources array early in boot, so this 2262 * function can be used for both native and legacy SFF hosts. 2263 * 2264 * LOCKING: 2265 * Inherited from calling layer (may sleep). 2266 * 2267 * RETURNS: 2268 * 0 if at least one port is initialized, -ENODEV if no port is 2269 * available. 2270 */ 2271int ata_pci_sff_init_host(struct ata_host *host) 2272{ 2273 struct device *gdev = host->dev; 2274 struct pci_dev *pdev = to_pci_dev(gdev); 2275 unsigned int mask = 0; 2276 int i, rc; 2277 2278 /* request, iomap BARs and init port addresses accordingly */ 2279 for (i = 0; i < 2; i++) { 2280 struct ata_port *ap = host->ports[i]; 2281 int base = i * 2; 2282 void __iomem * const *iomap; 2283 2284 if (ata_port_is_dummy(ap)) 2285 continue; 2286 2287 /* Discard disabled ports. Some controllers show 2288 * their unused channels this way. Disabled ports are 2289 * made dummy. 2290 */ 2291 if (!ata_resources_present(pdev, i)) { 2292 ap->ops = &ata_dummy_port_ops; 2293 continue; 2294 } 2295 2296 rc = pcim_iomap_regions(pdev, 0x3 << base, 2297 dev_driver_string(gdev)); 2298 if (rc) { 2299 dev_printk(KERN_WARNING, gdev, 2300 "failed to request/iomap BARs for port %d " 2301 "(errno=%d)\n", i, rc); 2302 if (rc == -EBUSY) 2303 pcim_pin_device(pdev); 2304 ap->ops = &ata_dummy_port_ops; 2305 continue; 2306 } 2307 host->iomap = iomap = pcim_iomap_table(pdev); 2308 2309 ap->ioaddr.cmd_addr = iomap[base]; 2310 ap->ioaddr.altstatus_addr = 2311 ap->ioaddr.ctl_addr = (void __iomem *) 2312 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 2313 ata_sff_std_ports(&ap->ioaddr); 2314 2315 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 2316 (unsigned long long)pci_resource_start(pdev, base), 2317 (unsigned long long)pci_resource_start(pdev, base + 1)); 2318 2319 mask |= 1 << i; 2320 } 2321 2322 if (!mask) { 2323 dev_printk(KERN_ERR, gdev, "no available native port\n"); 2324 return -ENODEV; 2325 } 2326 2327 return 0; 2328} 2329EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); 2330 2331/** 2332 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host 2333 * @pdev: target PCI device 2334 * @ppi: array of port_info, must be enough for two ports 2335 * @r_host: out argument for the initialized ATA host 2336 * 2337 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire 2338 * all PCI resources and initialize it accordingly in one go. 2339 * 2340 * LOCKING: 2341 * Inherited from calling layer (may sleep). 2342 * 2343 * RETURNS: 2344 * 0 on success, -errno otherwise. 2345 */ 2346int ata_pci_sff_prepare_host(struct pci_dev *pdev, 2347 const struct ata_port_info * const *ppi, 2348 struct ata_host **r_host) 2349{ 2350 struct ata_host *host; 2351 int rc; 2352 2353 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 2354 return -ENOMEM; 2355 2356 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); 2357 if (!host) { 2358 dev_printk(KERN_ERR, &pdev->dev, 2359 "failed to allocate ATA host\n"); 2360 rc = -ENOMEM; 2361 goto err_out; 2362 } 2363 2364 rc = ata_pci_sff_init_host(host); 2365 if (rc) 2366 goto err_out; 2367 2368 devres_remove_group(&pdev->dev, NULL); 2369 *r_host = host; 2370 return 0; 2371 2372err_out: 2373 devres_release_group(&pdev->dev, NULL); 2374 return rc; 2375} 2376EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); 2377 2378/** 2379 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it 2380 * @host: target SFF ATA host 2381 * @irq_handler: irq_handler used when requesting IRQ(s) 2382 * @sht: scsi_host_template to use when registering the host 2383 * 2384 * This is the counterpart of ata_host_activate() for SFF ATA 2385 * hosts. This separate helper is necessary because SFF hosts 2386 * use two separate interrupts in legacy mode. 2387 * 2388 * LOCKING: 2389 * Inherited from calling layer (may sleep). 2390 * 2391 * RETURNS: 2392 * 0 on success, -errno otherwise. 2393 */ 2394int ata_pci_sff_activate_host(struct ata_host *host, 2395 irq_handler_t irq_handler, 2396 struct scsi_host_template *sht) 2397{ 2398 struct device *dev = host->dev; 2399 struct pci_dev *pdev = to_pci_dev(dev); 2400 const char *drv_name = dev_driver_string(host->dev); 2401 int legacy_mode = 0, rc; 2402 2403 rc = ata_host_start(host); 2404 if (rc) 2405 return rc; 2406 2407 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 2408 u8 tmp8, mask; 2409 2410 /* TODO: What if one channel is in native mode ... */ 2411 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 2412 mask = (1 << 2) | (1 << 0); 2413 if ((tmp8 & mask) != mask) 2414 legacy_mode = 1; 2415#if defined(CONFIG_NO_ATA_LEGACY) 2416 /* Some platforms with PCI limits cannot address compat 2417 port space. In that case we punt if their firmware has 2418 left a device in compatibility mode */ 2419 if (legacy_mode) { 2420 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 2421 return -EOPNOTSUPP; 2422 } 2423#endif 2424 } 2425 2426 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2427 return -ENOMEM; 2428 2429 if (!legacy_mode && pdev->irq) { 2430 rc = devm_request_irq(dev, pdev->irq, irq_handler, 2431 IRQF_SHARED, drv_name, host); 2432 if (rc) 2433 goto out; 2434 2435 ata_port_desc(host->ports[0], "irq %d", pdev->irq); 2436 ata_port_desc(host->ports[1], "irq %d", pdev->irq); 2437 } else if (legacy_mode) { 2438 if (!ata_port_is_dummy(host->ports[0])) { 2439 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), 2440 irq_handler, IRQF_SHARED, 2441 drv_name, host); 2442 if (rc) 2443 goto out; 2444 2445 ata_port_desc(host->ports[0], "irq %d", 2446 ATA_PRIMARY_IRQ(pdev)); 2447 } 2448 2449 if (!ata_port_is_dummy(host->ports[1])) { 2450 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), 2451 irq_handler, IRQF_SHARED, 2452 drv_name, host); 2453 if (rc) 2454 goto out; 2455 2456 ata_port_desc(host->ports[1], "irq %d", 2457 ATA_SECONDARY_IRQ(pdev)); 2458 } 2459 } 2460 2461 rc = ata_host_register(host, sht); 2462out: 2463 if (rc == 0) 2464 devres_remove_group(dev, NULL); 2465 else 2466 devres_release_group(dev, NULL); 2467 2468 return rc; 2469} 2470EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); 2471 2472static const struct ata_port_info *ata_sff_find_valid_pi( 2473 const struct ata_port_info * const *ppi) 2474{ 2475 int i; 2476 2477 /* look up the first valid port_info */ 2478 for (i = 0; i < 2 && ppi[i]; i++) 2479 if (ppi[i]->port_ops != &ata_dummy_port_ops) 2480 return ppi[i]; 2481 2482 return NULL; 2483} 2484 2485/** 2486 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller 2487 * @pdev: Controller to be initialized 2488 * @ppi: array of port_info, must be enough for two ports 2489 * @sht: scsi_host_template to use when registering the host 2490 * @host_priv: host private_data 2491 * @hflag: host flags 2492 * 2493 * This is a helper function which can be called from a driver's 2494 * xxx_init_one() probe function if the hardware uses traditional 2495 * IDE taskfile registers and is PIO only. 2496 * 2497 * ASSUMPTION: 2498 * Nobody makes a single channel controller that appears solely as 2499 * the secondary legacy port on PCI. 2500 * 2501 * LOCKING: 2502 * Inherited from PCI layer (may sleep). 2503 * 2504 * RETURNS: 2505 * Zero on success, negative on errno-based value on error. 2506 */ 2507int ata_pci_sff_init_one(struct pci_dev *pdev, 2508 const struct ata_port_info * const *ppi, 2509 struct scsi_host_template *sht, void *host_priv, int hflag) 2510{ 2511 struct device *dev = &pdev->dev; 2512 const struct ata_port_info *pi; 2513 struct ata_host *host = NULL; 2514 int rc; 2515 2516 DPRINTK("ENTER\n"); 2517 2518 pi = ata_sff_find_valid_pi(ppi); 2519 if (!pi) { 2520 dev_printk(KERN_ERR, &pdev->dev, 2521 "no valid port_info specified\n"); 2522 return -EINVAL; 2523 } 2524 2525 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2526 return -ENOMEM; 2527 2528 rc = pcim_enable_device(pdev); 2529 if (rc) 2530 goto out; 2531 2532 /* prepare and activate SFF host */ 2533 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2534 if (rc) 2535 goto out; 2536 host->private_data = host_priv; 2537 host->flags |= hflag; 2538 2539 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2540out: 2541 if (rc == 0) 2542 devres_remove_group(&pdev->dev, NULL); 2543 else 2544 devres_release_group(&pdev->dev, NULL); 2545 2546 return rc; 2547} 2548EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); 2549 2550#endif /* CONFIG_PCI */ 2551 2552/* 2553 * BMDMA support 2554 */ 2555 2556#ifdef CONFIG_ATA_BMDMA 2557 2558const struct ata_port_operations ata_bmdma_port_ops = { 2559 .inherits = &ata_sff_port_ops, 2560 2561 .error_handler = ata_bmdma_error_handler, 2562 .post_internal_cmd = ata_bmdma_post_internal_cmd, 2563 2564 .qc_prep = ata_bmdma_qc_prep, 2565 .qc_issue = ata_bmdma_qc_issue, 2566 2567 .sff_irq_clear = ata_bmdma_irq_clear, 2568 .bmdma_setup = ata_bmdma_setup, 2569 .bmdma_start = ata_bmdma_start, 2570 .bmdma_stop = ata_bmdma_stop, 2571 .bmdma_status = ata_bmdma_status, 2572 2573 .port_start = ata_bmdma_port_start, 2574}; 2575EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); 2576 2577const struct ata_port_operations ata_bmdma32_port_ops = { 2578 .inherits = &ata_bmdma_port_ops, 2579 2580 .sff_data_xfer = ata_sff_data_xfer32, 2581 .port_start = ata_bmdma_port_start32, 2582}; 2583EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); 2584 2585/** 2586 * ata_bmdma_fill_sg - Fill PCI IDE PRD table 2587 * @qc: Metadata associated with taskfile to be transferred 2588 * 2589 * Fill PCI IDE PRD (scatter-gather) table with segments 2590 * associated with the current disk command. 2591 * 2592 * LOCKING: 2593 * spin_lock_irqsave(host lock) 2594 * 2595 */ 2596static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) 2597{ 2598 struct ata_port *ap = qc->ap; 2599 struct ata_bmdma_prd *prd = ap->bmdma_prd; 2600 struct scatterlist *sg; 2601 unsigned int si, pi; 2602 2603 pi = 0; 2604 for_each_sg(qc->sg, sg, qc->n_elem, si) { 2605 u32 addr, offset; 2606 u32 sg_len, len; 2607 2608 /* determine if physical DMA addr spans 64K boundary. 2609 * Note h/w doesn't support 64-bit, so we unconditionally 2610 * truncate dma_addr_t to u32. 2611 */ 2612 addr = (u32) sg_dma_address(sg); 2613 sg_len = sg_dma_len(sg); 2614 2615 while (sg_len) { 2616 offset = addr & 0xffff; 2617 len = sg_len; 2618 if ((offset + sg_len) > 0x10000) 2619 len = 0x10000 - offset; 2620 2621 prd[pi].addr = cpu_to_le32(addr); 2622 prd[pi].flags_len = cpu_to_le32(len & 0xffff); 2623 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 2624 2625 pi++; 2626 sg_len -= len; 2627 addr += len; 2628 } 2629 } 2630 2631 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2632} 2633 2634/** 2635 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table 2636 * @qc: Metadata associated with taskfile to be transferred 2637 * 2638 * Fill PCI IDE PRD (scatter-gather) table with segments 2639 * associated with the current disk command. Perform the fill 2640 * so that we avoid writing any length 64K records for 2641 * controllers that don't follow the spec. 2642 * 2643 * LOCKING: 2644 * spin_lock_irqsave(host lock) 2645 * 2646 */ 2647static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) 2648{ 2649 struct ata_port *ap = qc->ap; 2650 struct ata_bmdma_prd *prd = ap->bmdma_prd; 2651 struct scatterlist *sg; 2652 unsigned int si, pi; 2653 2654 pi = 0; 2655 for_each_sg(qc->sg, sg, qc->n_elem, si) { 2656 u32 addr, offset; 2657 u32 sg_len, len, blen; 2658 2659 /* determine if physical DMA addr spans 64K boundary. 2660 * Note h/w doesn't support 64-bit, so we unconditionally 2661 * truncate dma_addr_t to u32. 2662 */ 2663 addr = (u32) sg_dma_address(sg); 2664 sg_len = sg_dma_len(sg); 2665 2666 while (sg_len) { 2667 offset = addr & 0xffff; 2668 len = sg_len; 2669 if ((offset + sg_len) > 0x10000) 2670 len = 0x10000 - offset; 2671 2672 blen = len & 0xffff; 2673 prd[pi].addr = cpu_to_le32(addr); 2674 if (blen == 0) { 2675 /* Some PATA chipsets like the CS5530 can't 2676 cope with 0x0000 meaning 64K as the spec 2677 says */ 2678 prd[pi].flags_len = cpu_to_le32(0x8000); 2679 blen = 0x8000; 2680 prd[++pi].addr = cpu_to_le32(addr + 0x8000); 2681 } 2682 prd[pi].flags_len = cpu_to_le32(blen); 2683 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 2684 2685 pi++; 2686 sg_len -= len; 2687 addr += len; 2688 } 2689 } 2690 2691 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2692} 2693 2694/** 2695 * ata_bmdma_qc_prep - Prepare taskfile for submission 2696 * @qc: Metadata associated with taskfile to be prepared 2697 * 2698 * Prepare ATA taskfile for submission. 2699 * 2700 * LOCKING: 2701 * spin_lock_irqsave(host lock) 2702 */ 2703void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) 2704{ 2705 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2706 return; 2707 2708 ata_bmdma_fill_sg(qc); 2709} 2710EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); 2711 2712/** 2713 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission 2714 * @qc: Metadata associated with taskfile to be prepared 2715 * 2716 * Prepare ATA taskfile for submission. 2717 * 2718 * LOCKING: 2719 * spin_lock_irqsave(host lock) 2720 */ 2721void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) 2722{ 2723 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2724 return; 2725 2726 ata_bmdma_fill_sg_dumb(qc); 2727} 2728EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); 2729 2730/** 2731 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller 2732 * @qc: command to issue to device 2733 * 2734 * This function issues a PIO, NODATA or DMA command to a 2735 * SFF/BMDMA controller. PIO and NODATA are handled by 2736 * ata_sff_qc_issue(). 2737 * 2738 * LOCKING: 2739 * spin_lock_irqsave(host lock) 2740 * 2741 * RETURNS: 2742 * Zero on success, AC_ERR_* mask on failure 2743 */ 2744unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2745{ 2746 struct ata_port *ap = qc->ap; 2747 struct ata_link *link = qc->dev->link; 2748 2749 /* defer PIO handling to sff_qc_issue */ 2750 if (!ata_is_dma(qc->tf.protocol)) 2751 return ata_sff_qc_issue(qc); 2752 2753 /* select the device */ 2754 ata_dev_select(ap, qc->dev->devno, 1, 0); 2755 2756 /* start the command */ 2757 switch (qc->tf.protocol) { 2758 case ATA_PROT_DMA: 2759 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 2760 2761 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 2762 ap->ops->bmdma_setup(qc); /* set up bmdma */ 2763 ap->ops->bmdma_start(qc); /* initiate bmdma */ 2764 ap->hsm_task_state = HSM_ST_LAST; 2765 break; 2766 2767 case ATAPI_PROT_DMA: 2768 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 2769 2770 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 2771 ap->ops->bmdma_setup(qc); /* set up bmdma */ 2772 ap->hsm_task_state = HSM_ST_FIRST; 2773 2774 /* send cdb by polling if no cdb interrupt */ 2775 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2776 ata_sff_queue_pio_task(link, 0); 2777 break; 2778 2779 default: 2780 WARN_ON(1); 2781 return AC_ERR_SYSTEM; 2782 } 2783 2784 return 0; 2785} 2786EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); 2787 2788/** 2789 * ata_bmdma_port_intr - Handle BMDMA port interrupt 2790 * @ap: Port on which interrupt arrived (possibly...) 2791 * @qc: Taskfile currently active in engine 2792 * 2793 * Handle port interrupt for given queued command. 2794 * 2795 * LOCKING: 2796 * spin_lock_irqsave(host lock) 2797 * 2798 * RETURNS: 2799 * One if interrupt was handled, zero if not (shared irq). 2800 */ 2801unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 2802{ 2803 struct ata_eh_info *ehi = &ap->link.eh_info; 2804 u8 host_stat = 0; 2805 bool bmdma_stopped = false; 2806 unsigned int handled; 2807 2808 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { 2809 /* check status of DMA engine */ 2810 host_stat = ap->ops->bmdma_status(ap); 2811 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); 2812 2813 /* if it's not our irq... */ 2814 if (!(host_stat & ATA_DMA_INTR)) 2815 return ata_sff_idle_irq(ap); 2816 2817 /* before we do anything else, clear DMA-Start bit */ 2818 ap->ops->bmdma_stop(qc); 2819 bmdma_stopped = true; 2820 2821 if (unlikely(host_stat & ATA_DMA_ERR)) { 2822 /* error when transfering data to/from memory */ 2823 qc->err_mask |= AC_ERR_HOST_BUS; 2824 ap->hsm_task_state = HSM_ST_ERR; 2825 } 2826 } 2827 2828 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); 2829 2830 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) 2831 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 2832 2833 return handled; 2834} 2835EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); 2836 2837/** 2838 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler 2839 * @irq: irq line (unused) 2840 * @dev_instance: pointer to our ata_host information structure 2841 * 2842 * Default interrupt handler for PCI IDE devices. Calls 2843 * ata_bmdma_port_intr() for each port that is not disabled. 2844 * 2845 * LOCKING: 2846 * Obtains host lock during operation. 2847 * 2848 * RETURNS: 2849 * IRQ_NONE or IRQ_HANDLED. 2850 */ 2851irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) 2852{ 2853 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); 2854} 2855EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); 2856 2857/** 2858 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 2859 * @ap: port to handle error for 2860 * 2861 * Stock error handler for BMDMA controller. It can handle both 2862 * PATA and SATA controllers. Most BMDMA controllers should be 2863 * able to use this EH as-is or with some added handling before 2864 * and after. 2865 * 2866 * LOCKING: 2867 * Kernel thread context (may sleep) 2868 */ 2869void ata_bmdma_error_handler(struct ata_port *ap) 2870{ 2871 struct ata_queued_cmd *qc; 2872 unsigned long flags; 2873 bool thaw = false; 2874 2875 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2876 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2877 qc = NULL; 2878 2879 /* reset PIO HSM and stop DMA engine */ 2880 spin_lock_irqsave(ap->lock, flags); 2881 2882 if (qc && ata_is_dma(qc->tf.protocol)) { 2883 u8 host_stat; 2884 2885 host_stat = ap->ops->bmdma_status(ap); 2886 2887 /* BMDMA controllers indicate host bus error by 2888 * setting DMA_ERR bit and timing out. As it wasn't 2889 * really a timeout event, adjust error mask and 2890 * cancel frozen state. 2891 */ 2892 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { 2893 qc->err_mask = AC_ERR_HOST_BUS; 2894 thaw = true; 2895 } 2896 2897 ap->ops->bmdma_stop(qc); 2898 2899 /* if we're gonna thaw, make sure IRQ is clear */ 2900 if (thaw) { 2901 ap->ops->sff_check_status(ap); 2902 if (ap->ops->sff_irq_clear) 2903 ap->ops->sff_irq_clear(ap); 2904 } 2905 } 2906 2907 spin_unlock_irqrestore(ap->lock, flags); 2908 2909 if (thaw) 2910 ata_eh_thaw_port(ap); 2911 2912 ata_sff_error_handler(ap); 2913} 2914EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 2915 2916/** 2917 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA 2918 * @qc: internal command to clean up 2919 * 2920 * LOCKING: 2921 * Kernel thread context (may sleep) 2922 */ 2923void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) 2924{ 2925 struct ata_port *ap = qc->ap; 2926 unsigned long flags; 2927 2928 if (ata_is_dma(qc->tf.protocol)) { 2929 spin_lock_irqsave(ap->lock, flags); 2930 ap->ops->bmdma_stop(qc); 2931 spin_unlock_irqrestore(ap->lock, flags); 2932 } 2933} 2934EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 2935 2936/** 2937 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 2938 * @ap: Port associated with this ATA transaction. 2939 * 2940 * Clear interrupt and error flags in DMA status register. 2941 * 2942 * May be used as the irq_clear() entry in ata_port_operations. 2943 * 2944 * LOCKING: 2945 * spin_lock_irqsave(host lock) 2946 */ 2947void ata_bmdma_irq_clear(struct ata_port *ap) 2948{ 2949 void __iomem *mmio = ap->ioaddr.bmdma_addr; 2950 2951 if (!mmio) 2952 return; 2953 2954 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 2955} 2956EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 2957 2958/** 2959 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 2960 * @qc: Info associated with this ATA transaction. 2961 * 2962 * LOCKING: 2963 * spin_lock_irqsave(host lock) 2964 */ 2965void ata_bmdma_setup(struct ata_queued_cmd *qc) 2966{ 2967 struct ata_port *ap = qc->ap; 2968 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 2969 u8 dmactl; 2970 2971 /* load PRD table addr. */ 2972 mb(); /* make sure PRD table writes are visible to controller */ 2973 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 2974 2975 /* specify data direction, triple-check start bit is clear */ 2976 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2977 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 2978 if (!rw) 2979 dmactl |= ATA_DMA_WR; 2980 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2981 2982 /* issue r/w command */ 2983 ap->ops->sff_exec_command(ap, &qc->tf); 2984} 2985EXPORT_SYMBOL_GPL(ata_bmdma_setup); 2986 2987/** 2988 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 2989 * @qc: Info associated with this ATA transaction. 2990 * 2991 * LOCKING: 2992 * spin_lock_irqsave(host lock) 2993 */ 2994void ata_bmdma_start(struct ata_queued_cmd *qc) 2995{ 2996 struct ata_port *ap = qc->ap; 2997 u8 dmactl; 2998 2999 /* start host DMA transaction */ 3000 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3001 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3002 3003} 3004EXPORT_SYMBOL_GPL(ata_bmdma_start); 3005 3006/** 3007 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 3008 * @qc: Command we are ending DMA for 3009 * 3010 * Clears the ATA_DMA_START flag in the dma control register 3011 * 3012 * May be used as the bmdma_stop() entry in ata_port_operations. 3013 * 3014 * LOCKING: 3015 * spin_lock_irqsave(host lock) 3016 */ 3017void ata_bmdma_stop(struct ata_queued_cmd *qc) 3018{ 3019 struct ata_port *ap = qc->ap; 3020 void __iomem *mmio = ap->ioaddr.bmdma_addr; 3021 3022 /* clear start/stop bit */ 3023 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 3024 mmio + ATA_DMA_CMD); 3025 3026 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 3027 ata_sff_dma_pause(ap); 3028} 3029EXPORT_SYMBOL_GPL(ata_bmdma_stop); 3030 3031/** 3032 * ata_bmdma_status - Read PCI IDE BMDMA status 3033 * @ap: Port associated with this ATA transaction. 3034 * 3035 * Read and return BMDMA status register. 3036 * 3037 * May be used as the bmdma_status() entry in ata_port_operations. 3038 * 3039 * LOCKING: 3040 * spin_lock_irqsave(host lock) 3041 */ 3042u8 ata_bmdma_status(struct ata_port *ap) 3043{ 3044 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3045} 3046EXPORT_SYMBOL_GPL(ata_bmdma_status); 3047 3048 3049/** 3050 * ata_bmdma_port_start - Set port up for bmdma. 3051 * @ap: Port to initialize 3052 * 3053 * Called just after data structures for each port are 3054 * initialized. Allocates space for PRD table. 3055 * 3056 * May be used as the port_start() entry in ata_port_operations. 3057 * 3058 * LOCKING: 3059 * Inherited from caller. 3060 */ 3061int ata_bmdma_port_start(struct ata_port *ap) 3062{ 3063 if (ap->mwdma_mask || ap->udma_mask) { 3064 ap->bmdma_prd = 3065 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, 3066 &ap->bmdma_prd_dma, GFP_KERNEL); 3067 if (!ap->bmdma_prd) 3068 return -ENOMEM; 3069 } 3070 3071 return 0; 3072} 3073EXPORT_SYMBOL_GPL(ata_bmdma_port_start); 3074 3075/** 3076 * ata_bmdma_port_start32 - Set port up for dma. 3077 * @ap: Port to initialize 3078 * 3079 * Called just after data structures for each port are 3080 * initialized. Enables 32bit PIO and allocates space for PRD 3081 * table. 3082 * 3083 * May be used as the port_start() entry in ata_port_operations for 3084 * devices that are capable of 32bit PIO. 3085 * 3086 * LOCKING: 3087 * Inherited from caller. 3088 */ 3089int ata_bmdma_port_start32(struct ata_port *ap) 3090{ 3091 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; 3092 return ata_bmdma_port_start(ap); 3093} 3094EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); 3095 3096#ifdef CONFIG_PCI 3097 3098/** 3099 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex 3100 * @pdev: PCI device 3101 * 3102 * Some PCI ATA devices report simplex mode but in fact can be told to 3103 * enter non simplex mode. This implements the necessary logic to 3104 * perform the task on such devices. Calling it on other devices will 3105 * have -undefined- behaviour. 3106 */ 3107int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) 3108{ 3109 unsigned long bmdma = pci_resource_start(pdev, 4); 3110 u8 simplex; 3111 3112 if (bmdma == 0) 3113 return -ENOENT; 3114 3115 simplex = inb(bmdma + 0x02); 3116 outb(simplex & 0x60, bmdma + 0x02); 3117 simplex = inb(bmdma + 0x02); 3118 if (simplex & 0x80) 3119 return -EOPNOTSUPP; 3120 return 0; 3121} 3122EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); 3123 3124static void ata_bmdma_nodma(struct ata_host *host, const char *reason) 3125{ 3126 int i; 3127 3128 dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n", 3129 reason); 3130 3131 for (i = 0; i < 2; i++) { 3132 host->ports[i]->mwdma_mask = 0; 3133 host->ports[i]->udma_mask = 0; 3134 } 3135} 3136 3137/** 3138 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host 3139 * @host: target ATA host 3140 * 3141 * Acquire PCI BMDMA resources and initialize @host accordingly. 3142 * 3143 * LOCKING: 3144 * Inherited from calling layer (may sleep). 3145 */ 3146void ata_pci_bmdma_init(struct ata_host *host) 3147{ 3148 struct device *gdev = host->dev; 3149 struct pci_dev *pdev = to_pci_dev(gdev); 3150 int i, rc; 3151 3152 /* No BAR4 allocation: No DMA */ 3153 if (pci_resource_start(pdev, 4) == 0) { 3154 ata_bmdma_nodma(host, "BAR4 is zero"); 3155 return; 3156 } 3157 3158 /* 3159 * Some controllers require BMDMA region to be initialized 3160 * even if DMA is not in use to clear IRQ status via 3161 * ->sff_irq_clear method. Try to initialize bmdma_addr 3162 * regardless of dma masks. 3163 */ 3164 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 3165 if (rc) 3166 ata_bmdma_nodma(host, "failed to set dma mask"); 3167 if (!rc) { 3168 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 3169 if (rc) 3170 ata_bmdma_nodma(host, 3171 "failed to set consistent dma mask"); 3172 } 3173 3174 /* request and iomap DMA region */ 3175 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); 3176 if (rc) { 3177 ata_bmdma_nodma(host, "failed to request/iomap BAR4"); 3178 return; 3179 } 3180 host->iomap = pcim_iomap_table(pdev); 3181 3182 for (i = 0; i < 2; i++) { 3183 struct ata_port *ap = host->ports[i]; 3184 void __iomem *bmdma = host->iomap[4] + 8 * i; 3185 3186 if (ata_port_is_dummy(ap)) 3187 continue; 3188 3189 ap->ioaddr.bmdma_addr = bmdma; 3190 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 3191 (ioread8(bmdma + 2) & 0x80)) 3192 host->flags |= ATA_HOST_SIMPLEX; 3193 3194 ata_port_desc(ap, "bmdma 0x%llx", 3195 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); 3196 } 3197} 3198EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); 3199 3200/** 3201 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host 3202 * @pdev: target PCI device 3203 * @ppi: array of port_info, must be enough for two ports 3204 * @r_host: out argument for the initialized ATA host 3205 * 3206 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI 3207 * resources and initialize it accordingly in one go. 3208 * 3209 * LOCKING: 3210 * Inherited from calling layer (may sleep). 3211 * 3212 * RETURNS: 3213 * 0 on success, -errno otherwise. 3214 */ 3215int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, 3216 const struct ata_port_info * const * ppi, 3217 struct ata_host **r_host) 3218{ 3219 int rc; 3220 3221 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); 3222 if (rc) 3223 return rc; 3224 3225 ata_pci_bmdma_init(*r_host); 3226 return 0; 3227} 3228EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); 3229 3230/** 3231 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller 3232 * @pdev: Controller to be initialized 3233 * @ppi: array of port_info, must be enough for two ports 3234 * @sht: scsi_host_template to use when registering the host 3235 * @host_priv: host private_data 3236 * @hflags: host flags 3237 * 3238 * This function is similar to ata_pci_sff_init_one() but also 3239 * takes care of BMDMA initialization. 3240 * 3241 * LOCKING: 3242 * Inherited from PCI layer (may sleep). 3243 * 3244 * RETURNS: 3245 * Zero on success, negative on errno-based value on error. 3246 */ 3247int ata_pci_bmdma_init_one(struct pci_dev *pdev, 3248 const struct ata_port_info * const * ppi, 3249 struct scsi_host_template *sht, void *host_priv, 3250 int hflags) 3251{ 3252 struct device *dev = &pdev->dev; 3253 const struct ata_port_info *pi; 3254 struct ata_host *host = NULL; 3255 int rc; 3256 3257 DPRINTK("ENTER\n"); 3258 3259 pi = ata_sff_find_valid_pi(ppi); 3260 if (!pi) { 3261 dev_printk(KERN_ERR, &pdev->dev, 3262 "no valid port_info specified\n"); 3263 return -EINVAL; 3264 } 3265 3266 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 3267 return -ENOMEM; 3268 3269 rc = pcim_enable_device(pdev); 3270 if (rc) 3271 goto out; 3272 3273 /* prepare and activate BMDMA host */ 3274 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 3275 if (rc) 3276 goto out; 3277 host->private_data = host_priv; 3278 host->flags |= hflags; 3279 3280 pci_set_master(pdev); 3281 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); 3282 out: 3283 if (rc == 0) 3284 devres_remove_group(&pdev->dev, NULL); 3285 else 3286 devres_release_group(&pdev->dev, NULL); 3287 3288 return rc; 3289} 3290EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); 3291 3292#endif /* CONFIG_PCI */ 3293#endif /* CONFIG_ATA_BMDMA */ 3294 3295/** 3296 * ata_sff_port_init - Initialize SFF/BMDMA ATA port 3297 * @ap: Port to initialize 3298 * 3299 * Called on port allocation to initialize SFF/BMDMA specific 3300 * fields. 3301 * 3302 * LOCKING: 3303 * None. 3304 */ 3305void ata_sff_port_init(struct ata_port *ap) 3306{ 3307 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); 3308 ap->ctl = ATA_DEVCTL_OBS; 3309 ap->last_ctl = 0xFF; 3310} 3311 3312int __init ata_sff_init(void) 3313{ 3314 ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); 3315 if (!ata_sff_wq) 3316 return -ENOMEM; 3317 3318 return 0; 3319} 3320 3321void __exit ata_sff_exit(void) 3322{ 3323 destroy_workqueue(ata_sff_wq); 3324} 3325