1/* $NetBSD: aceride.c,v 1.29 2010/11/05 18:07:24 jakllsch Exp $ */ 2 3/* 4 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__KERNEL_RCSID(0, "$NetBSD: aceride.c,v 1.29 2010/11/05 18:07:24 jakllsch Exp $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32 33#include <dev/pci/pcivar.h> 34#include <dev/pci/pcidevs.h> 35#include <dev/pci/pciidereg.h> 36#include <dev/pci/pciidevar.h> 37#include <dev/pci/pciide_acer_reg.h> 38 39static int acer_pcib_match(const struct pci_attach_args *); 40static void acer_do_reset(struct ata_channel *, int); 41static void acer_chip_map(struct pciide_softc*, const struct pci_attach_args*); 42static void acer_setup_channel(struct ata_channel*); 43static int acer_pci_intr(void *); 44static int acer_dma_init(void *, int, int, void *, size_t, int); 45 46static int aceride_match(device_t, cfdata_t, void *); 47static void aceride_attach(device_t, device_t, void *); 48 49struct aceride_softc { 50 struct pciide_softc pciide_sc; 51 struct pci_attach_args pcib_pa; 52}; 53 54CFATTACH_DECL_NEW(aceride, sizeof(struct aceride_softc), 55 aceride_match, aceride_attach, NULL, NULL); 56 57static const struct pciide_product_desc pciide_acer_products[] = { 58 { PCI_PRODUCT_ALI_M5229, 59 0, 60 "Acer Labs M5229 UDMA IDE Controller", 61 acer_chip_map, 62 }, 63 { 0, 64 0, 65 NULL, 66 NULL 67 } 68}; 69 70static int 71aceride_match(device_t parent, cfdata_t match, void *aux) 72{ 73 struct pci_attach_args *pa = aux; 74 75 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI && 76 PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 77 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 78 if (pciide_lookup_product(pa->pa_id, pciide_acer_products)) 79 return (2); 80 } 81 return (0); 82} 83 84static void 85aceride_attach(device_t parent, device_t self, void *aux) 86{ 87 struct pci_attach_args *pa = aux; 88 struct pciide_softc *sc = device_private(self); 89 90 sc->sc_wdcdev.sc_atac.atac_dev = self; 91 92 pciide_common_attach(sc, pa, 93 pciide_lookup_product(pa->pa_id, pciide_acer_products)); 94} 95 96static int 97acer_pcib_match(const struct pci_attach_args *pa) 98{ 99 /* 100 * we need to access the PCI config space of the pcib, see 101 * acer_do_reset() 102 */ 103 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE && 104 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_ISA && 105 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI && 106 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1533) 107 return 1; 108 return 0; 109} 110 111static void 112acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 113{ 114 struct pciide_channel *cp; 115 int channel; 116 pcireg_t cr, interface; 117 pcireg_t rev = PCI_REVISION(pa->pa_class); 118 struct aceride_softc *acer_sc = (struct aceride_softc *)sc; 119 120 if (pciide_chipen(sc, pa) == 0) 121 return; 122 123 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 124 "bus-master DMA support present"); 125 pciide_mapreg_dma(sc, pa); 126 aprint_verbose("\n"); 127 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; 128 if (sc->sc_dma_ok) { 129 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 130 if (rev >= 0x20) { 131 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; 132 if (rev >= 0xC7) 133 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; 134 else if (rev >= 0xC4) 135 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; 136 else if (rev >= 0xC2) 137 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; 138 else 139 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; 140 } 141 sc->sc_wdcdev.irqack = pciide_irqack; 142 if (rev <= 0xc4) { 143 sc->sc_wdcdev.dma_init = acer_dma_init; 144 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 145 "using PIO transfers above 137GB as workaround for " 146 "48bit DMA access bug, expect reduced performance\n"); 147 } 148 } 149 150 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; 151 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; 152 sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel; 153 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 154 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 155 156 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 157 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 158 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 159 160 /* Enable "microsoft register bits" R/W. */ 161 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 162 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 163 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 164 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 165 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 166 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 167 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 168 ~ACER_CHANSTATUSREGS_RO); 169 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 170 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 171 172 { 173 /* 174 * some BIOSes (port-cats ABLE) enable native mode, but don't 175 * setup everything correctly, so allow the forcing of 176 * compat mode 177 */ 178 bool force_compat_mode; 179 bool property_is_set; 180 property_is_set = prop_dictionary_get_bool( 181 device_properties(sc->sc_wdcdev.sc_atac.atac_dev), 182 "ali1543-ide-force-compat-mode", 183 &force_compat_mode); 184 if (property_is_set && force_compat_mode) { 185 cr &= ~((PCIIDE_INTERFACE_PCI(0) 186 | PCIIDE_INTERFACE_PCI(1)) 187 << PCI_INTERFACE_SHIFT); 188 } 189 } 190 191 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 192 /* Don't use cr, re-read the real register content instead */ 193 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 194 PCI_CLASS_REG)); 195 196 /* From linux: enable "Cable Detection" */ 197 if (rev >= 0xC2) { 198 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 199 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 200 | ACER_0x4B_CDETECT); 201 } 202 203 wdc_allocate_regs(&sc->sc_wdcdev); 204 if (rev == 0xC3) { 205 /* install reset bug workaround */ 206 if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) { 207 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 208 "WARNING: can't find pci-isa bridge\n"); 209 } else 210 sc->sc_wdcdev.reset = acer_do_reset; 211 } 212 213 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 214 channel++) { 215 cp = &sc->pciide_channels[channel]; 216 if (pciide_chansetup(sc, channel, interface) == 0) 217 continue; 218 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 219 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 220 "%s channel ignored (disabled)\n", cp->name); 221 cp->ata_channel.ch_flags |= ATACH_DISABLED; 222 continue; 223 } 224 /* newer controllers seems to lack the ACER_CHIDS. Sigh */ 225 pciide_mapchan(pa, cp, interface, 226 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 227 } 228} 229 230static void 231acer_do_reset(struct ata_channel *chp, int poll) 232{ 233 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 234 struct aceride_softc *acer_sc = (struct aceride_softc *)sc; 235 u_int8_t reg; 236 237 /* 238 * From OpenSolaris: after a reset we need to disable/enable the 239 * corresponding channel, or data corruption will occur in 240 * UltraDMA modes 241 */ 242 243 wdc_do_reset(chp, poll); 244 reg = pciide_pci_read(acer_sc->pcib_pa.pa_pc, acer_sc->pcib_pa.pa_tag, 245 ACER_PCIB_CTRL); 246 pciide_pci_write(acer_sc->pcib_pa.pa_pc, acer_sc->pcib_pa.pa_tag, 247 ACER_PCIB_CTRL, reg & ~ACER_PCIB_CTRL_ENCHAN(chp->ch_channel)); 248 delay(1000); 249 pciide_pci_write(acer_sc->pcib_pa.pa_pc, acer_sc->pcib_pa.pa_tag, 250 ACER_PCIB_CTRL, reg); 251} 252 253static void 254acer_setup_channel(struct ata_channel *chp) 255{ 256 struct ata_drive_datas *drvp; 257 int drive, s; 258 u_int32_t acer_fifo_udma; 259 u_int32_t idedma_ctl; 260 struct pciide_channel *cp = (struct pciide_channel*)chp; 261 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 262 263 idedma_ctl = 0; 264 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 265 ATADEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 266 acer_fifo_udma), DEBUG_PROBE); 267 /* setup DMA if needed */ 268 pciide_channel_dma_setup(cp); 269 270 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 271 DRIVE_UDMA) { /* check 80 pins cable */ 272 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 273 ACER_0x4A_80PIN(chp->ch_channel)) { 274 if (chp->ch_drive[0].UDMA_mode > 2) 275 chp->ch_drive[0].UDMA_mode = 2; 276 if (chp->ch_drive[1].UDMA_mode > 2) 277 chp->ch_drive[1].UDMA_mode = 2; 278 } 279 } 280 281 for (drive = 0; drive < 2; drive++) { 282 drvp = &chp->ch_drive[drive]; 283 /* If no drive, skip */ 284 if ((drvp->drive_flags & DRIVE) == 0) 285 continue; 286 ATADEBUG_PRINT(("acer_setup_channel: old timings reg for " 287 "channel %d drive %d 0x%x\n", chp->ch_channel, drive, 288 pciide_pci_read(sc->sc_pc, sc->sc_tag, 289 ACER_IDETIM(chp->ch_channel, drive))), DEBUG_PROBE); 290 /* clear FIFO/DMA mode */ 291 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->ch_channel, drive, 0x3) | 292 ACER_UDMA_EN(chp->ch_channel, drive) | 293 ACER_UDMA_TIM(chp->ch_channel, drive, 0x7)); 294 295 /* add timing values, setup DMA if needed */ 296 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 297 (drvp->drive_flags & DRIVE_UDMA) == 0) { 298 acer_fifo_udma |= 299 ACER_FTH_OPL(chp->ch_channel, drive, 0x1); 300 goto pio; 301 } 302 303 acer_fifo_udma |= ACER_FTH_OPL(chp->ch_channel, drive, 0x2); 304 if (drvp->drive_flags & DRIVE_UDMA) { 305 /* use Ultra/DMA */ 306 s = splbio(); 307 drvp->drive_flags &= ~DRIVE_DMA; 308 splx(s); 309 acer_fifo_udma |= ACER_UDMA_EN(chp->ch_channel, drive); 310 acer_fifo_udma |= 311 ACER_UDMA_TIM(chp->ch_channel, drive, 312 acer_udma[drvp->UDMA_mode]); 313 /* XXX disable if one drive < UDMA3 ? */ 314 if (drvp->UDMA_mode >= 3) { 315 pciide_pci_write(sc->sc_pc, sc->sc_tag, 316 ACER_0x4B, 317 pciide_pci_read(sc->sc_pc, sc->sc_tag, 318 ACER_0x4B) | ACER_0x4B_UDMA66); 319 } 320 } else { 321 /* 322 * use Multiword DMA 323 * Timings will be used for both PIO and DMA, 324 * so adjust DMA mode if needed 325 */ 326 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 327 drvp->PIO_mode = drvp->DMA_mode + 2; 328 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 329 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 330 drvp->PIO_mode - 2 : 0; 331 if (drvp->DMA_mode == 0) 332 drvp->PIO_mode = 0; 333 } 334 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 335pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 336 ACER_IDETIM(chp->ch_channel, drive), 337 acer_pio[drvp->PIO_mode]); 338 } 339 ATADEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 340 acer_fifo_udma), DEBUG_PROBE); 341 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 342 if (idedma_ctl != 0) { 343 /* Add software bits in status register */ 344 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 345 idedma_ctl); 346 } 347} 348 349static int 350acer_pci_intr(void *arg) 351{ 352 struct pciide_softc *sc = arg; 353 struct pciide_channel *cp; 354 struct ata_channel *wdc_cp; 355 int i, rv, crv; 356 u_int32_t chids; 357 358 rv = 0; 359 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 360 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { 361 cp = &sc->pciide_channels[i]; 362 wdc_cp = &cp->ata_channel; 363 /* If a compat channel skip. */ 364 if (cp->compat) 365 continue; 366 if (chids & ACER_CHIDS_INT(i)) { 367 crv = wdcintr(wdc_cp); 368 if (crv == 0) { 369 aprint_error("%s:%d: bogus intr\n", 370 device_xname( 371 sc->sc_wdcdev.sc_atac.atac_dev), i); 372 pciide_irqack(wdc_cp); 373 } else 374 rv = 1; 375 } 376 } 377 return rv; 378} 379 380static int 381acer_dma_init(void *v, int channel, int drive, void *databuf, 382 size_t datalen, int flags) 383{ 384 385 /* use PIO for LBA48 transfer */ 386 if (flags & WDC_DMA_LBA48) 387 return EINVAL; 388 389 return pciide_dma_init(v, channel, drive, databuf, datalen, flags); 390} 391