1#include <linux/types.h> 2#include <linux/kernel.h> 3#include <linux/ide.h> 4#include <linux/scatterlist.h> 5#include <linux/dma-mapping.h> 6#include <linux/io.h> 7 8/** 9 * config_drive_for_dma - attempt to activate IDE DMA 10 * @drive: the drive to place in DMA mode 11 * 12 * If the drive supports at least mode 2 DMA or UDMA of any kind 13 * then attempt to place it into DMA mode. Drives that are known to 14 * support DMA but predate the DMA properties or that are known 15 * to have DMA handling bugs are also set up appropriately based 16 * on the good/bad drive lists. 17 */ 18 19int config_drive_for_dma(ide_drive_t *drive) 20{ 21 ide_hwif_t *hwif = drive->hwif; 22 u16 *id = drive->id; 23 24 if (drive->media != ide_disk) { 25 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) 26 return 0; 27 } 28 29 /* 30 * Enable DMA on any drive that has 31 * UltraDMA (mode 0/1/2/3/4/5/6) enabled 32 */ 33 if ((id[ATA_ID_FIELD_VALID] & 4) && 34 ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f)) 35 return 1; 36 37 /* 38 * Enable DMA on any drive that has mode2 DMA 39 * (multi or single) enabled 40 */ 41 if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 || 42 (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404) 43 return 1; 44 45 /* Consult the list of known "good" drives */ 46 if (ide_dma_good_drive(drive)) 47 return 1; 48 49 return 0; 50} 51 52u8 ide_dma_sff_read_status(ide_hwif_t *hwif) 53{ 54 unsigned long addr = hwif->dma_base + ATA_DMA_STATUS; 55 56 if (hwif->host_flags & IDE_HFLAG_MMIO) 57 return readb((void __iomem *)addr); 58 else 59 return inb(addr); 60} 61EXPORT_SYMBOL_GPL(ide_dma_sff_read_status); 62 63static void ide_dma_sff_write_status(ide_hwif_t *hwif, u8 val) 64{ 65 unsigned long addr = hwif->dma_base + ATA_DMA_STATUS; 66 67 if (hwif->host_flags & IDE_HFLAG_MMIO) 68 writeb(val, (void __iomem *)addr); 69 else 70 outb(val, addr); 71} 72 73/** 74 * ide_dma_host_set - Enable/disable DMA on a host 75 * @drive: drive to control 76 * 77 * Enable/disable DMA on an IDE controller following generic 78 * bus-mastering IDE controller behaviour. 79 */ 80 81void ide_dma_host_set(ide_drive_t *drive, int on) 82{ 83 ide_hwif_t *hwif = drive->hwif; 84 u8 unit = drive->dn & 1; 85 u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); 86 87 if (on) 88 dma_stat |= (1 << (5 + unit)); 89 else 90 dma_stat &= ~(1 << (5 + unit)); 91 92 ide_dma_sff_write_status(hwif, dma_stat); 93} 94EXPORT_SYMBOL_GPL(ide_dma_host_set); 95 96/** 97 * ide_build_dmatable - build IDE DMA table 98 * 99 * ide_build_dmatable() prepares a dma request. We map the command 100 * to get the pci bus addresses of the buffers and then build up 101 * the PRD table that the IDE layer wants to be fed. 102 * 103 * Most chipsets correctly interpret a length of 0x0000 as 64KB, 104 * but at least one (e.g. CS5530) misinterprets it as zero (!). 105 * So we break the 64KB entry into two 32KB entries instead. 106 * 107 * Returns the number of built PRD entries if all went okay, 108 * returns 0 otherwise. 109 * 110 * May also be invoked from trm290.c 111 */ 112 113int ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) 114{ 115 ide_hwif_t *hwif = drive->hwif; 116 __le32 *table = (__le32 *)hwif->dmatable_cpu; 117 unsigned int count = 0; 118 int i; 119 struct scatterlist *sg; 120 u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290); 121 122 for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) { 123 u32 cur_addr, cur_len, xcount, bcount; 124 125 cur_addr = sg_dma_address(sg); 126 cur_len = sg_dma_len(sg); 127 128 /* 129 * Fill in the dma table, without crossing any 64kB boundaries. 130 * Most hardware requires 16-bit alignment of all blocks, 131 * but the trm290 requires 32-bit alignment. 132 */ 133 134 while (cur_len) { 135 if (count++ >= PRD_ENTRIES) 136 goto use_pio_instead; 137 138 bcount = 0x10000 - (cur_addr & 0xffff); 139 if (bcount > cur_len) 140 bcount = cur_len; 141 *table++ = cpu_to_le32(cur_addr); 142 xcount = bcount & 0xffff; 143 if (is_trm290) 144 xcount = ((xcount >> 2) - 1) << 16; 145 else if (xcount == 0x0000) { 146 if (count++ >= PRD_ENTRIES) 147 goto use_pio_instead; 148 *table++ = cpu_to_le32(0x8000); 149 *table++ = cpu_to_le32(cur_addr + 0x8000); 150 xcount = 0x8000; 151 } 152 *table++ = cpu_to_le32(xcount); 153 cur_addr += bcount; 154 cur_len -= bcount; 155 } 156 } 157 158 if (count) { 159 if (!is_trm290) 160 *--table |= cpu_to_le32(0x80000000); 161 return count; 162 } 163 164use_pio_instead: 165 printk(KERN_ERR "%s: %s\n", drive->name, 166 count ? "DMA table too small" : "empty DMA table?"); 167 168 return 0; /* revert to PIO for this request */ 169} 170EXPORT_SYMBOL_GPL(ide_build_dmatable); 171 172/** 173 * ide_dma_setup - begin a DMA phase 174 * @drive: target device 175 * @cmd: command 176 * 177 * Build an IDE DMA PRD (IDE speak for scatter gather table) 178 * and then set up the DMA transfer registers for a device 179 * that follows generic IDE PCI DMA behaviour. Controllers can 180 * override this function if they need to 181 * 182 * Returns 0 on success. If a PIO fallback is required then 1 183 * is returned. 184 */ 185 186int ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) 187{ 188 ide_hwif_t *hwif = drive->hwif; 189 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 190 u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; 191 u8 dma_stat; 192 193 /* fall back to pio! */ 194 if (ide_build_dmatable(drive, cmd) == 0) { 195 ide_map_sg(drive, cmd); 196 return 1; 197 } 198 199 /* PRD table */ 200 if (mmio) 201 writel(hwif->dmatable_dma, 202 (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); 203 else 204 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); 205 206 /* specify r/w */ 207 if (mmio) 208 writeb(rw, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 209 else 210 outb(rw, hwif->dma_base + ATA_DMA_CMD); 211 212 /* read DMA status for INTR & ERROR flags */ 213 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); 214 215 /* clear INTR & ERROR flags */ 216 ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); 217 218 return 0; 219} 220EXPORT_SYMBOL_GPL(ide_dma_setup); 221 222/** 223 * ide_dma_sff_timer_expiry - handle a DMA timeout 224 * @drive: Drive that timed out 225 * 226 * An IDE DMA transfer timed out. In the event of an error we ask 227 * the driver to resolve the problem, if a DMA transfer is still 228 * in progress we continue to wait (arguably we need to add a 229 * secondary 'I don't care what the drive thinks' timeout here) 230 * Finally if we have an interrupt we let it complete the I/O. 231 * But only one time - we clear expiry and if it's still not 232 * completed after WAIT_CMD, we error and retry in PIO. 233 * This can occur if an interrupt is lost or due to hang or bugs. 234 */ 235 236int ide_dma_sff_timer_expiry(ide_drive_t *drive) 237{ 238 ide_hwif_t *hwif = drive->hwif; 239 u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); 240 241 printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n", 242 drive->name, __func__, dma_stat); 243 244 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ 245 return WAIT_CMD; 246 247 hwif->expiry = NULL; /* one free ride for now */ 248 249 if (dma_stat & ATA_DMA_ERR) /* ERROR */ 250 return -1; 251 252 if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */ 253 return WAIT_CMD; 254 255 if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */ 256 return WAIT_CMD; 257 258 return 0; /* Status is unknown -- reset the bus */ 259} 260EXPORT_SYMBOL_GPL(ide_dma_sff_timer_expiry); 261 262void ide_dma_start(ide_drive_t *drive) 263{ 264 ide_hwif_t *hwif = drive->hwif; 265 u8 dma_cmd; 266 267 /* Note that this is done *after* the cmd has 268 * been issued to the drive, as per the BM-IDE spec. 269 * The Promise Ultra33 doesn't work correctly when 270 * we do this part before issuing the drive cmd. 271 */ 272 if (hwif->host_flags & IDE_HFLAG_MMIO) { 273 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 274 writeb(dma_cmd | ATA_DMA_START, 275 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 276 } else { 277 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 278 outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); 279 } 280} 281EXPORT_SYMBOL_GPL(ide_dma_start); 282 283/* returns 1 on error, 0 otherwise */ 284int ide_dma_end(ide_drive_t *drive) 285{ 286 ide_hwif_t *hwif = drive->hwif; 287 u8 dma_stat = 0, dma_cmd = 0; 288 289 /* stop DMA */ 290 if (hwif->host_flags & IDE_HFLAG_MMIO) { 291 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 292 writeb(dma_cmd & ~ATA_DMA_START, 293 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 294 } else { 295 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 296 outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); 297 } 298 299 /* get DMA status */ 300 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); 301 302 /* clear INTR & ERROR bits */ 303 ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); 304 305#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) 306 307 /* verify good DMA status */ 308 if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) 309 return 0x10 | dma_stat; 310 return 0; 311} 312EXPORT_SYMBOL_GPL(ide_dma_end); 313 314/* returns 1 if dma irq issued, 0 otherwise */ 315int ide_dma_test_irq(ide_drive_t *drive) 316{ 317 ide_hwif_t *hwif = drive->hwif; 318 u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); 319 320 return (dma_stat & ATA_DMA_INTR) ? 1 : 0; 321} 322EXPORT_SYMBOL_GPL(ide_dma_test_irq); 323 324const struct ide_dma_ops sff_dma_ops = { 325 .dma_host_set = ide_dma_host_set, 326 .dma_setup = ide_dma_setup, 327 .dma_start = ide_dma_start, 328 .dma_end = ide_dma_end, 329 .dma_test_irq = ide_dma_test_irq, 330 .dma_lost_irq = ide_dma_lost_irq, 331 .dma_timer_expiry = ide_dma_sff_timer_expiry, 332 .dma_sff_read_status = ide_dma_sff_read_status, 333}; 334EXPORT_SYMBOL_GPL(sff_dma_ops); 335