1/* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2008-2009: Marvell Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved. 7 * 8 * Originally written by Brett Russ. 9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. 10 * 11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; version 2 of the License. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 */ 27 28/* 29 * sata_mv TODO list: 30 * 31 * --> Develop a low-power-consumption strategy, and implement it. 32 * 33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. 34 * 35 * --> [Experiment, Marvell value added] Is it possible to use target 36 * mode to cross-connect two Linux boxes with Marvell cards? If so, 37 * creating LibATA target mode support would be very interesting. 38 * 39 * Target mode, for those without docs, is the ability to directly 40 * connect two SATA ports. 41 */ 42 43/* 44 * 80x1-B2 errata PCI#11: 45 * 46 * Users of the 6041/6081 Rev.B2 chips (current is C0) 47 * should be careful to insert those cards only onto PCI-X bus #0, 48 * and only in device slots 0..7, not higher. The chips may not 49 * work correctly otherwise (note: this is a pretty rare condition). 50 */ 51 52#include <linux/kernel.h> 53#include <linux/module.h> 54#include <linux/pci.h> 55#include <linux/init.h> 56#include <linux/blkdev.h> 57#include <linux/delay.h> 58#include <linux/interrupt.h> 59#include <linux/dmapool.h> 60#include <linux/dma-mapping.h> 61#include <linux/device.h> 62#include <linux/clk.h> 63#include <linux/platform_device.h> 64#include <linux/ata_platform.h> 65#include <linux/mbus.h> 66#include <linux/bitops.h> 67#include <linux/gfp.h> 68#include <scsi/scsi_host.h> 69#include <scsi/scsi_cmnd.h> 70#include <scsi/scsi_device.h> 71#include <linux/libata.h> 72 73#define DRV_NAME "sata_mv" 74#define DRV_VERSION "1.28" 75 76/* 77 * module options 78 */ 79 80static int msi; 81#ifdef CONFIG_PCI 82module_param(msi, int, S_IRUGO); 83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 84#endif 85 86static int irq_coalescing_io_count; 87module_param(irq_coalescing_io_count, int, S_IRUGO); 88MODULE_PARM_DESC(irq_coalescing_io_count, 89 "IRQ coalescing I/O count threshold (0..255)"); 90 91static int irq_coalescing_usecs; 92module_param(irq_coalescing_usecs, int, S_IRUGO); 93MODULE_PARM_DESC(irq_coalescing_usecs, 94 "IRQ coalescing time threshold in usecs"); 95 96enum { 97 /* BAR's are enumerated in terms of pci_resource_start() terms */ 98 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 99 MV_IO_BAR = 2, /* offset 0x18: IO space */ 100 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 101 102 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 103 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 104 105 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ 106 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ 107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ 108 MAX_COAL_IO_COUNT = 255, /* completed I/O count */ 109 110 MV_PCI_REG_BASE = 0, 111 112 /* 113 * Per-chip ("all ports") interrupt coalescing feature. 114 * This is only for GEN_II / GEN_IIE hardware. 115 * 116 * Coalescing defers the interrupt until either the IO_THRESHOLD 117 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. 118 */ 119 COAL_REG_BASE = 0x18000, 120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), 121 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ 122 123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), 124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), 125 126 /* 127 * Registers for the (unused here) transaction coalescing feature: 128 */ 129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), 130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), 131 132 SATAHC0_REG_BASE = 0x20000, 133 FLASH_CTL = 0x1046c, 134 GPIO_PORT_CTL = 0x104f0, 135 RESET_CFG = 0x180d8, 136 137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 141 142 MV_MAX_Q_DEPTH = 32, 143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 144 145 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 146 * CRPB needs alignment on a 256B boundary. Size == 256B 147 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 148 */ 149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 151 MV_MAX_SG_CT = 256, 152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 153 154 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 155 MV_PORT_HC_SHIFT = 2, 156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 157 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 159 160 /* Host Flags */ 161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 162 163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, 165 166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, 167 168 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | 169 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, 170 171 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, 172 173 CRQB_FLAG_READ = (1 << 0), 174 CRQB_TAG_SHIFT = 1, 175 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 176 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 177 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 178 CRQB_CMD_ADDR_SHIFT = 8, 179 CRQB_CMD_CS = (0x2 << 11), 180 CRQB_CMD_LAST = (1 << 15), 181 182 CRPB_FLAG_STATUS_SHIFT = 8, 183 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 184 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 185 186 EPRD_FLAG_END_OF_TBL = (1 << 31), 187 188 /* PCI interface registers */ 189 190 MV_PCI_COMMAND = 0xc00, 191 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ 192 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 193 194 PCI_MAIN_CMD_STS = 0xd30, 195 STOP_PCI_MASTER = (1 << 2), 196 PCI_MASTER_EMPTY = (1 << 3), 197 GLOB_SFT_RST = (1 << 4), 198 199 MV_PCI_MODE = 0xd00, 200 MV_PCI_MODE_MASK = 0x30, 201 202 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 203 MV_PCI_DISC_TIMER = 0xd04, 204 MV_PCI_MSI_TRIGGER = 0xc38, 205 MV_PCI_SERR_MASK = 0xc28, 206 MV_PCI_XBAR_TMOUT = 0x1d04, 207 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 208 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 209 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 210 MV_PCI_ERR_COMMAND = 0x1d50, 211 212 PCI_IRQ_CAUSE = 0x1d58, 213 PCI_IRQ_MASK = 0x1d5c, 214 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 215 216 PCIE_IRQ_CAUSE = 0x1900, 217 PCIE_IRQ_MASK = 0x1910, 218 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 219 220 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 221 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, 222 PCI_HC_MAIN_IRQ_MASK = 0x1d64, 223 SOC_HC_MAIN_IRQ_CAUSE = 0x20020, 224 SOC_HC_MAIN_IRQ_MASK = 0x20024, 225 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ 226 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ 227 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 228 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 229 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ 230 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ 231 PCI_ERR = (1 << 18), 232 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ 233 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ 234 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ 235 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ 236 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ 237 GPIO_INT = (1 << 22), 238 SELF_INT = (1 << 23), 239 TWSI_INT = (1 << 24), 240 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 241 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 242 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 243 244 /* SATAHC registers */ 245 HC_CFG = 0x00, 246 247 HC_IRQ_CAUSE = 0x14, 248 DMA_IRQ = (1 << 0), /* shift by port # */ 249 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 250 DEV_IRQ = (1 << 8), /* shift by port # */ 251 252 /* 253 * Per-HC (Host-Controller) interrupt coalescing feature. 254 * This is present on all chip generations. 255 * 256 * Coalescing defers the interrupt until either the IO_THRESHOLD 257 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. 258 */ 259 HC_IRQ_COAL_IO_THRESHOLD = 0x000c, 260 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, 261 262 SOC_LED_CTRL = 0x2c, 263 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ 264 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ 265 /* with dev activity LED */ 266 267 /* Shadow block registers */ 268 SHD_BLK = 0x100, 269 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ 270 271 /* SATA registers */ 272 SATA_STATUS = 0x300, /* ctrl, err regs follow status */ 273 SATA_ACTIVE = 0x350, 274 FIS_IRQ_CAUSE = 0x364, 275 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ 276 277 LTMODE = 0x30c, /* requires read-after-write */ 278 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 279 280 PHY_MODE2 = 0x330, 281 PHY_MODE3 = 0x310, 282 283 PHY_MODE4 = 0x314, /* requires read-after-write */ 284 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ 285 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ 286 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ 287 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ 288 289 SATA_IFCTL = 0x344, 290 SATA_TESTCTL = 0x348, 291 SATA_IFSTAT = 0x34c, 292 VENDOR_UNIQUE_FIS = 0x35c, 293 294 FISCFG = 0x360, 295 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 296 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 297 298 PHY_MODE9_GEN2 = 0x398, 299 PHY_MODE9_GEN1 = 0x39c, 300 PHYCFG_OFS = 0x3a0, /* only in 65n devices */ 301 302 MV5_PHY_MODE = 0x74, 303 MV5_LTMODE = 0x30, 304 MV5_PHY_CTL = 0x0C, 305 SATA_IFCFG = 0x050, 306 307 MV_M2_PREAMP_MASK = 0x7e0, 308 309 /* Port registers */ 310 EDMA_CFG = 0, 311 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 312 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 313 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 314 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 315 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 316 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 317 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 318 319 EDMA_ERR_IRQ_CAUSE = 0x8, 320 EDMA_ERR_IRQ_MASK = 0xc, 321 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 322 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 323 EDMA_ERR_DEV = (1 << 2), /* device error */ 324 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 325 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 326 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 327 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 328 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 329 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 330 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 331 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 332 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 333 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 334 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 335 336 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 337 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 338 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 339 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 340 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 341 342 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 343 344 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 345 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 346 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 347 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 348 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 349 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 350 351 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 352 353 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 354 EDMA_ERR_OVERRUN_5 = (1 << 5), 355 EDMA_ERR_UNDERRUN_5 = (1 << 6), 356 357 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 358 EDMA_ERR_LNK_CTRL_RX_1 | 359 EDMA_ERR_LNK_CTRL_RX_3 | 360 EDMA_ERR_LNK_CTRL_TX, 361 362 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 363 EDMA_ERR_PRD_PAR | 364 EDMA_ERR_DEV_DCON | 365 EDMA_ERR_DEV_CON | 366 EDMA_ERR_SERR | 367 EDMA_ERR_SELF_DIS | 368 EDMA_ERR_CRQB_PAR | 369 EDMA_ERR_CRPB_PAR | 370 EDMA_ERR_INTRL_PAR | 371 EDMA_ERR_IORDY | 372 EDMA_ERR_LNK_CTRL_RX_2 | 373 EDMA_ERR_LNK_DATA_RX | 374 EDMA_ERR_LNK_DATA_TX | 375 EDMA_ERR_TRANS_PROTO, 376 377 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 378 EDMA_ERR_PRD_PAR | 379 EDMA_ERR_DEV_DCON | 380 EDMA_ERR_DEV_CON | 381 EDMA_ERR_OVERRUN_5 | 382 EDMA_ERR_UNDERRUN_5 | 383 EDMA_ERR_SELF_DIS_5 | 384 EDMA_ERR_CRQB_PAR | 385 EDMA_ERR_CRPB_PAR | 386 EDMA_ERR_INTRL_PAR | 387 EDMA_ERR_IORDY, 388 389 EDMA_REQ_Q_BASE_HI = 0x10, 390 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ 391 392 EDMA_REQ_Q_OUT_PTR = 0x18, 393 EDMA_REQ_Q_PTR_SHIFT = 5, 394 395 EDMA_RSP_Q_BASE_HI = 0x1c, 396 EDMA_RSP_Q_IN_PTR = 0x20, 397 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ 398 EDMA_RSP_Q_PTR_SHIFT = 3, 399 400 EDMA_CMD = 0x28, /* EDMA command register */ 401 EDMA_EN = (1 << 0), /* enable EDMA */ 402 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 403 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 404 405 EDMA_STATUS = 0x30, /* EDMA engine status */ 406 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 407 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 408 409 EDMA_IORDY_TMOUT = 0x34, 410 EDMA_ARB_CFG = 0x38, 411 412 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ 413 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ 414 415 BMDMA_CMD = 0x224, /* bmdma command register */ 416 BMDMA_STATUS = 0x228, /* bmdma status register */ 417 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ 418 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ 419 420 /* Host private flags (hp_flags) */ 421 MV_HP_FLAG_MSI = (1 << 0), 422 MV_HP_ERRATA_50XXB0 = (1 << 1), 423 MV_HP_ERRATA_50XXB2 = (1 << 2), 424 MV_HP_ERRATA_60X1B2 = (1 << 3), 425 MV_HP_ERRATA_60X1C0 = (1 << 4), 426 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 427 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 428 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 429 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 430 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 431 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ 432 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ 433 434 /* Port private flags (pp_flags) */ 435 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 436 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 437 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 438 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 439 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ 440}; 441 442#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 443#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 444#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 445#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 446#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) 447 448#define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 449#define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 450 451enum { 452 /* DMA boundary 0xffff is required by the s/g splitting 453 * we need on /length/ in mv_fill-sg(). 454 */ 455 MV_DMA_BOUNDARY = 0xffffU, 456 457 /* mask of register bits containing lower 32 bits 458 * of EDMA request queue DMA address 459 */ 460 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 461 462 /* ditto, for response queue */ 463 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 464}; 465 466enum chip_type { 467 chip_504x, 468 chip_508x, 469 chip_5080, 470 chip_604x, 471 chip_608x, 472 chip_6042, 473 chip_7042, 474 chip_soc, 475}; 476 477/* Command ReQuest Block: 32B */ 478struct mv_crqb { 479 __le32 sg_addr; 480 __le32 sg_addr_hi; 481 __le16 ctrl_flags; 482 __le16 ata_cmd[11]; 483}; 484 485struct mv_crqb_iie { 486 __le32 addr; 487 __le32 addr_hi; 488 __le32 flags; 489 __le32 len; 490 __le32 ata_cmd[4]; 491}; 492 493/* Command ResPonse Block: 8B */ 494struct mv_crpb { 495 __le16 id; 496 __le16 flags; 497 __le32 tmstmp; 498}; 499 500/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 501struct mv_sg { 502 __le32 addr; 503 __le32 flags_size; 504 __le32 addr_hi; 505 __le32 reserved; 506}; 507 508/* 509 * We keep a local cache of a few frequently accessed port 510 * registers here, to avoid having to read them (very slow) 511 * when switching between EDMA and non-EDMA modes. 512 */ 513struct mv_cached_regs { 514 u32 fiscfg; 515 u32 ltmode; 516 u32 haltcond; 517 u32 unknown_rsvd; 518}; 519 520struct mv_port_priv { 521 struct mv_crqb *crqb; 522 dma_addr_t crqb_dma; 523 struct mv_crpb *crpb; 524 dma_addr_t crpb_dma; 525 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 526 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 527 528 unsigned int req_idx; 529 unsigned int resp_idx; 530 531 u32 pp_flags; 532 struct mv_cached_regs cached; 533 unsigned int delayed_eh_pmp_map; 534}; 535 536struct mv_port_signal { 537 u32 amps; 538 u32 pre; 539}; 540 541struct mv_host_priv { 542 u32 hp_flags; 543 unsigned int board_idx; 544 u32 main_irq_mask; 545 struct mv_port_signal signal[8]; 546 const struct mv_hw_ops *ops; 547 int n_ports; 548 void __iomem *base; 549 void __iomem *main_irq_cause_addr; 550 void __iomem *main_irq_mask_addr; 551 u32 irq_cause_offset; 552 u32 irq_mask_offset; 553 u32 unmask_all_irqs; 554 555#if defined(CONFIG_HAVE_CLK) 556 struct clk *clk; 557#endif 558 /* 559 * These consistent DMA memory pools give us guaranteed 560 * alignment for hardware-accessed data structures, 561 * and less memory waste in accomplishing the alignment. 562 */ 563 struct dma_pool *crqb_pool; 564 struct dma_pool *crpb_pool; 565 struct dma_pool *sg_tbl_pool; 566}; 567 568struct mv_hw_ops { 569 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 570 unsigned int port); 571 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 572 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 573 void __iomem *mmio); 574 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 575 unsigned int n_hc); 576 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 577 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 578}; 579 580static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 581static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 582static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 583static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 584static int mv_port_start(struct ata_port *ap); 585static void mv_port_stop(struct ata_port *ap); 586static int mv_qc_defer(struct ata_queued_cmd *qc); 587static void mv_qc_prep(struct ata_queued_cmd *qc); 588static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 589static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 590static int mv_hardreset(struct ata_link *link, unsigned int *class, 591 unsigned long deadline); 592static void mv_eh_freeze(struct ata_port *ap); 593static void mv_eh_thaw(struct ata_port *ap); 594static void mv6_dev_config(struct ata_device *dev); 595 596static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 597 unsigned int port); 598static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 599static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 600 void __iomem *mmio); 601static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 602 unsigned int n_hc); 603static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 604static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 605 606static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 607 unsigned int port); 608static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 609static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 610 void __iomem *mmio); 611static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 612 unsigned int n_hc); 613static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 614static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 615 void __iomem *mmio); 616static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 617 void __iomem *mmio); 618static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 619 void __iomem *mmio, unsigned int n_hc); 620static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 621 void __iomem *mmio); 622static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 623static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, 624 void __iomem *mmio, unsigned int port); 625static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 626static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 627 unsigned int port_no); 628static int mv_stop_edma(struct ata_port *ap); 629static int mv_stop_edma_engine(void __iomem *port_mmio); 630static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); 631 632static void mv_pmp_select(struct ata_port *ap, int pmp); 633static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 634 unsigned long deadline); 635static int mv_softreset(struct ata_link *link, unsigned int *class, 636 unsigned long deadline); 637static void mv_pmp_error_handler(struct ata_port *ap); 638static void mv_process_crpb_entries(struct ata_port *ap, 639 struct mv_port_priv *pp); 640 641static void mv_sff_irq_clear(struct ata_port *ap); 642static int mv_check_atapi_dma(struct ata_queued_cmd *qc); 643static void mv_bmdma_setup(struct ata_queued_cmd *qc); 644static void mv_bmdma_start(struct ata_queued_cmd *qc); 645static void mv_bmdma_stop(struct ata_queued_cmd *qc); 646static u8 mv_bmdma_status(struct ata_port *ap); 647static u8 mv_sff_check_status(struct ata_port *ap); 648 649/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 650 * because we have to allow room for worst case splitting of 651 * PRDs for 64K boundaries in mv_fill_sg(). 652 */ 653static struct scsi_host_template mv5_sht = { 654 ATA_BASE_SHT(DRV_NAME), 655 .sg_tablesize = MV_MAX_SG_CT / 2, 656 .dma_boundary = MV_DMA_BOUNDARY, 657}; 658 659static struct scsi_host_template mv6_sht = { 660 ATA_NCQ_SHT(DRV_NAME), 661 .can_queue = MV_MAX_Q_DEPTH - 1, 662 .sg_tablesize = MV_MAX_SG_CT / 2, 663 .dma_boundary = MV_DMA_BOUNDARY, 664}; 665 666static struct ata_port_operations mv5_ops = { 667 .inherits = &ata_sff_port_ops, 668 669 .lost_interrupt = ATA_OP_NULL, 670 671 .qc_defer = mv_qc_defer, 672 .qc_prep = mv_qc_prep, 673 .qc_issue = mv_qc_issue, 674 675 .freeze = mv_eh_freeze, 676 .thaw = mv_eh_thaw, 677 .hardreset = mv_hardreset, 678 679 .scr_read = mv5_scr_read, 680 .scr_write = mv5_scr_write, 681 682 .port_start = mv_port_start, 683 .port_stop = mv_port_stop, 684}; 685 686static struct ata_port_operations mv6_ops = { 687 .inherits = &ata_bmdma_port_ops, 688 689 .lost_interrupt = ATA_OP_NULL, 690 691 .qc_defer = mv_qc_defer, 692 .qc_prep = mv_qc_prep, 693 .qc_issue = mv_qc_issue, 694 695 .dev_config = mv6_dev_config, 696 697 .freeze = mv_eh_freeze, 698 .thaw = mv_eh_thaw, 699 .hardreset = mv_hardreset, 700 .softreset = mv_softreset, 701 .pmp_hardreset = mv_pmp_hardreset, 702 .pmp_softreset = mv_softreset, 703 .error_handler = mv_pmp_error_handler, 704 705 .scr_read = mv_scr_read, 706 .scr_write = mv_scr_write, 707 708 .sff_check_status = mv_sff_check_status, 709 .sff_irq_clear = mv_sff_irq_clear, 710 .check_atapi_dma = mv_check_atapi_dma, 711 .bmdma_setup = mv_bmdma_setup, 712 .bmdma_start = mv_bmdma_start, 713 .bmdma_stop = mv_bmdma_stop, 714 .bmdma_status = mv_bmdma_status, 715 716 .port_start = mv_port_start, 717 .port_stop = mv_port_stop, 718}; 719 720static struct ata_port_operations mv_iie_ops = { 721 .inherits = &mv6_ops, 722 .dev_config = ATA_OP_NULL, 723 .qc_prep = mv_qc_prep_iie, 724}; 725 726static const struct ata_port_info mv_port_info[] = { 727 { /* chip_504x */ 728 .flags = MV_GEN_I_FLAGS, 729 .pio_mask = ATA_PIO4, 730 .udma_mask = ATA_UDMA6, 731 .port_ops = &mv5_ops, 732 }, 733 { /* chip_508x */ 734 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, 735 .pio_mask = ATA_PIO4, 736 .udma_mask = ATA_UDMA6, 737 .port_ops = &mv5_ops, 738 }, 739 { /* chip_5080 */ 740 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, 741 .pio_mask = ATA_PIO4, 742 .udma_mask = ATA_UDMA6, 743 .port_ops = &mv5_ops, 744 }, 745 { /* chip_604x */ 746 .flags = MV_GEN_II_FLAGS, 747 .pio_mask = ATA_PIO4, 748 .udma_mask = ATA_UDMA6, 749 .port_ops = &mv6_ops, 750 }, 751 { /* chip_608x */ 752 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, 753 .pio_mask = ATA_PIO4, 754 .udma_mask = ATA_UDMA6, 755 .port_ops = &mv6_ops, 756 }, 757 { /* chip_6042 */ 758 .flags = MV_GEN_IIE_FLAGS, 759 .pio_mask = ATA_PIO4, 760 .udma_mask = ATA_UDMA6, 761 .port_ops = &mv_iie_ops, 762 }, 763 { /* chip_7042 */ 764 .flags = MV_GEN_IIE_FLAGS, 765 .pio_mask = ATA_PIO4, 766 .udma_mask = ATA_UDMA6, 767 .port_ops = &mv_iie_ops, 768 }, 769 { /* chip_soc */ 770 .flags = MV_GEN_IIE_FLAGS, 771 .pio_mask = ATA_PIO4, 772 .udma_mask = ATA_UDMA6, 773 .port_ops = &mv_iie_ops, 774 }, 775}; 776 777static const struct pci_device_id mv_pci_tbl[] = { 778 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 779 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 780 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 781 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 782 /* RocketRAID 1720/174x have different identifiers */ 783 { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, 784 { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, 785 { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, 786 787 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 788 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 789 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 790 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 791 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 792 793 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 794 795 /* Adaptec 1430SA */ 796 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 797 798 /* Marvell 7042 support */ 799 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 800 801 /* Highpoint RocketRAID PCIe series */ 802 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 803 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 804 805 { } /* terminate list */ 806}; 807 808static const struct mv_hw_ops mv5xxx_ops = { 809 .phy_errata = mv5_phy_errata, 810 .enable_leds = mv5_enable_leds, 811 .read_preamp = mv5_read_preamp, 812 .reset_hc = mv5_reset_hc, 813 .reset_flash = mv5_reset_flash, 814 .reset_bus = mv5_reset_bus, 815}; 816 817static const struct mv_hw_ops mv6xxx_ops = { 818 .phy_errata = mv6_phy_errata, 819 .enable_leds = mv6_enable_leds, 820 .read_preamp = mv6_read_preamp, 821 .reset_hc = mv6_reset_hc, 822 .reset_flash = mv6_reset_flash, 823 .reset_bus = mv_reset_pci_bus, 824}; 825 826static const struct mv_hw_ops mv_soc_ops = { 827 .phy_errata = mv6_phy_errata, 828 .enable_leds = mv_soc_enable_leds, 829 .read_preamp = mv_soc_read_preamp, 830 .reset_hc = mv_soc_reset_hc, 831 .reset_flash = mv_soc_reset_flash, 832 .reset_bus = mv_soc_reset_bus, 833}; 834 835static const struct mv_hw_ops mv_soc_65n_ops = { 836 .phy_errata = mv_soc_65n_phy_errata, 837 .enable_leds = mv_soc_enable_leds, 838 .reset_hc = mv_soc_reset_hc, 839 .reset_flash = mv_soc_reset_flash, 840 .reset_bus = mv_soc_reset_bus, 841}; 842 843/* 844 * Functions 845 */ 846 847static inline void writelfl(unsigned long data, void __iomem *addr) 848{ 849 writel(data, addr); 850 (void) readl(addr); /* flush to avoid PCI posted write */ 851} 852 853static inline unsigned int mv_hc_from_port(unsigned int port) 854{ 855 return port >> MV_PORT_HC_SHIFT; 856} 857 858static inline unsigned int mv_hardport_from_port(unsigned int port) 859{ 860 return port & MV_PORT_MASK; 861} 862 863/* 864 * Consolidate some rather tricky bit shift calculations. 865 * This is hot-path stuff, so not a function. 866 * Simple code, with two return values, so macro rather than inline. 867 * 868 * port is the sole input, in range 0..7. 869 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 870 * hardport is the other output, in range 0..3. 871 * 872 * Note that port and hardport may be the same variable in some cases. 873 */ 874#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 875{ \ 876 shift = mv_hc_from_port(port) * HC_SHIFT; \ 877 hardport = mv_hardport_from_port(port); \ 878 shift += hardport * 2; \ 879} 880 881static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 882{ 883 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 884} 885 886static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 887 unsigned int port) 888{ 889 return mv_hc_base(base, mv_hc_from_port(port)); 890} 891 892static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 893{ 894 return mv_hc_base_from_port(base, port) + 895 MV_SATAHC_ARBTR_REG_SZ + 896 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 897} 898 899static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 900{ 901 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 902 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 903 904 return hc_mmio + ofs; 905} 906 907static inline void __iomem *mv_host_base(struct ata_host *host) 908{ 909 struct mv_host_priv *hpriv = host->private_data; 910 return hpriv->base; 911} 912 913static inline void __iomem *mv_ap_base(struct ata_port *ap) 914{ 915 return mv_port_base(mv_host_base(ap->host), ap->port_no); 916} 917 918static inline int mv_get_hc_count(unsigned long port_flags) 919{ 920 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 921} 922 923/** 924 * mv_save_cached_regs - (re-)initialize cached port registers 925 * @ap: the port whose registers we are caching 926 * 927 * Initialize the local cache of port registers, 928 * so that reading them over and over again can 929 * be avoided on the hotter paths of this driver. 930 * This saves a few microseconds each time we switch 931 * to/from EDMA mode to perform (eg.) a drive cache flush. 932 */ 933static void mv_save_cached_regs(struct ata_port *ap) 934{ 935 void __iomem *port_mmio = mv_ap_base(ap); 936 struct mv_port_priv *pp = ap->private_data; 937 938 pp->cached.fiscfg = readl(port_mmio + FISCFG); 939 pp->cached.ltmode = readl(port_mmio + LTMODE); 940 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); 941 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); 942} 943 944/** 945 * mv_write_cached_reg - write to a cached port register 946 * @addr: hardware address of the register 947 * @old: pointer to cached value of the register 948 * @new: new value for the register 949 * 950 * Write a new value to a cached register, 951 * but only if the value is different from before. 952 */ 953static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) 954{ 955 if (new != *old) { 956 unsigned long laddr; 957 *old = new; 958 laddr = (long)addr & 0xffff; 959 if (laddr >= 0x300 && laddr <= 0x33c) { 960 laddr &= 0x000f; 961 if (laddr == 0x4 || laddr == 0xc) { 962 writelfl(new, addr); /* read after write */ 963 return; 964 } 965 } 966 writel(new, addr); /* unaffected by the errata */ 967 } 968} 969 970static void mv_set_edma_ptrs(void __iomem *port_mmio, 971 struct mv_host_priv *hpriv, 972 struct mv_port_priv *pp) 973{ 974 u32 index; 975 976 /* 977 * initialize request queue 978 */ 979 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 980 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 981 982 WARN_ON(pp->crqb_dma & 0x3ff); 983 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); 984 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 985 port_mmio + EDMA_REQ_Q_IN_PTR); 986 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); 987 988 /* 989 * initialize response queue 990 */ 991 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 992 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 993 994 WARN_ON(pp->crpb_dma & 0xff); 995 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); 996 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); 997 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 998 port_mmio + EDMA_RSP_Q_OUT_PTR); 999} 1000 1001static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) 1002{ 1003 /* 1004 * When writing to the main_irq_mask in hardware, 1005 * we must ensure exclusivity between the interrupt coalescing bits 1006 * and the corresponding individual port DONE_IRQ bits. 1007 * 1008 * Note that this register is really an "IRQ enable" register, 1009 * not an "IRQ mask" register as Marvell's naming might suggest. 1010 */ 1011 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) 1012 mask &= ~DONE_IRQ_0_3; 1013 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) 1014 mask &= ~DONE_IRQ_4_7; 1015 writelfl(mask, hpriv->main_irq_mask_addr); 1016} 1017 1018static void mv_set_main_irq_mask(struct ata_host *host, 1019 u32 disable_bits, u32 enable_bits) 1020{ 1021 struct mv_host_priv *hpriv = host->private_data; 1022 u32 old_mask, new_mask; 1023 1024 old_mask = hpriv->main_irq_mask; 1025 new_mask = (old_mask & ~disable_bits) | enable_bits; 1026 if (new_mask != old_mask) { 1027 hpriv->main_irq_mask = new_mask; 1028 mv_write_main_irq_mask(new_mask, hpriv); 1029 } 1030} 1031 1032static void mv_enable_port_irqs(struct ata_port *ap, 1033 unsigned int port_bits) 1034{ 1035 unsigned int shift, hardport, port = ap->port_no; 1036 u32 disable_bits, enable_bits; 1037 1038 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 1039 1040 disable_bits = (DONE_IRQ | ERR_IRQ) << shift; 1041 enable_bits = port_bits << shift; 1042 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); 1043} 1044 1045static void mv_clear_and_enable_port_irqs(struct ata_port *ap, 1046 void __iomem *port_mmio, 1047 unsigned int port_irqs) 1048{ 1049 struct mv_host_priv *hpriv = ap->host->private_data; 1050 int hardport = mv_hardport_from_port(ap->port_no); 1051 void __iomem *hc_mmio = mv_hc_base_from_port( 1052 mv_host_base(ap->host), ap->port_no); 1053 u32 hc_irq_cause; 1054 1055 /* clear EDMA event indicators, if any */ 1056 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 1057 1058 /* clear pending irq events */ 1059 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 1060 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); 1061 1062 /* clear FIS IRQ Cause */ 1063 if (IS_GEN_IIE(hpriv)) 1064 writelfl(0, port_mmio + FIS_IRQ_CAUSE); 1065 1066 mv_enable_port_irqs(ap, port_irqs); 1067} 1068 1069static void mv_set_irq_coalescing(struct ata_host *host, 1070 unsigned int count, unsigned int usecs) 1071{ 1072 struct mv_host_priv *hpriv = host->private_data; 1073 void __iomem *mmio = hpriv->base, *hc_mmio; 1074 u32 coal_enable = 0; 1075 unsigned long flags; 1076 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; 1077 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | 1078 ALL_PORTS_COAL_DONE; 1079 1080 /* Disable IRQ coalescing if either threshold is zero */ 1081 if (!usecs || !count) { 1082 clks = count = 0; 1083 } else { 1084 /* Respect maximum limits of the hardware */ 1085 clks = usecs * COAL_CLOCKS_PER_USEC; 1086 if (clks > MAX_COAL_TIME_THRESHOLD) 1087 clks = MAX_COAL_TIME_THRESHOLD; 1088 if (count > MAX_COAL_IO_COUNT) 1089 count = MAX_COAL_IO_COUNT; 1090 } 1091 1092 spin_lock_irqsave(&host->lock, flags); 1093 mv_set_main_irq_mask(host, coal_disable, 0); 1094 1095 if (is_dual_hc && !IS_GEN_I(hpriv)) { 1096 /* 1097 * GEN_II/GEN_IIE with dual host controllers: 1098 * one set of global thresholds for the entire chip. 1099 */ 1100 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); 1101 writel(count, mmio + IRQ_COAL_IO_THRESHOLD); 1102 /* clear leftover coal IRQ bit */ 1103 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); 1104 if (count) 1105 coal_enable = ALL_PORTS_COAL_DONE; 1106 clks = count = 0; /* force clearing of regular regs below */ 1107 } 1108 1109 /* 1110 * All chips: independent thresholds for each HC on the chip. 1111 */ 1112 hc_mmio = mv_hc_base_from_port(mmio, 0); 1113 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); 1114 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); 1115 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); 1116 if (count) 1117 coal_enable |= PORTS_0_3_COAL_DONE; 1118 if (is_dual_hc) { 1119 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); 1120 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); 1121 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); 1122 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); 1123 if (count) 1124 coal_enable |= PORTS_4_7_COAL_DONE; 1125 } 1126 1127 mv_set_main_irq_mask(host, 0, coal_enable); 1128 spin_unlock_irqrestore(&host->lock, flags); 1129} 1130 1131/** 1132 * mv_start_edma - Enable eDMA engine 1133 * @base: port base address 1134 * @pp: port private data 1135 * 1136 * Verify the local cache of the eDMA state is accurate with a 1137 * WARN_ON. 1138 * 1139 * LOCKING: 1140 * Inherited from caller. 1141 */ 1142static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, 1143 struct mv_port_priv *pp, u8 protocol) 1144{ 1145 int want_ncq = (protocol == ATA_PROT_NCQ); 1146 1147 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1148 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 1149 if (want_ncq != using_ncq) 1150 mv_stop_edma(ap); 1151 } 1152 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 1153 struct mv_host_priv *hpriv = ap->host->private_data; 1154 1155 mv_edma_cfg(ap, want_ncq, 1); 1156 1157 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1158 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); 1159 1160 writelfl(EDMA_EN, port_mmio + EDMA_CMD); 1161 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 1162 } 1163} 1164 1165static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 1166{ 1167 void __iomem *port_mmio = mv_ap_base(ap); 1168 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 1169 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 1170 int i; 1171 1172 /* 1173 * Wait for the EDMA engine to finish transactions in progress. 1174 * No idea what a good "timeout" value might be, but measurements 1175 * indicate that it often requires hundreds of microseconds 1176 * with two drives in-use. So we use the 15msec value above 1177 * as a rough guess at what even more drives might require. 1178 */ 1179 for (i = 0; i < timeout; ++i) { 1180 u32 edma_stat = readl(port_mmio + EDMA_STATUS); 1181 if ((edma_stat & empty_idle) == empty_idle) 1182 break; 1183 udelay(per_loop); 1184 } 1185 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ 1186} 1187 1188/** 1189 * mv_stop_edma_engine - Disable eDMA engine 1190 * @port_mmio: io base address 1191 * 1192 * LOCKING: 1193 * Inherited from caller. 1194 */ 1195static int mv_stop_edma_engine(void __iomem *port_mmio) 1196{ 1197 int i; 1198 1199 /* Disable eDMA. The disable bit auto clears. */ 1200 writelfl(EDMA_DS, port_mmio + EDMA_CMD); 1201 1202 /* Wait for the chip to confirm eDMA is off. */ 1203 for (i = 10000; i > 0; i--) { 1204 u32 reg = readl(port_mmio + EDMA_CMD); 1205 if (!(reg & EDMA_EN)) 1206 return 0; 1207 udelay(10); 1208 } 1209 return -EIO; 1210} 1211 1212static int mv_stop_edma(struct ata_port *ap) 1213{ 1214 void __iomem *port_mmio = mv_ap_base(ap); 1215 struct mv_port_priv *pp = ap->private_data; 1216 int err = 0; 1217 1218 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1219 return 0; 1220 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1221 mv_wait_for_edma_empty_idle(ap); 1222 if (mv_stop_edma_engine(port_mmio)) { 1223 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 1224 err = -EIO; 1225 } 1226 mv_edma_cfg(ap, 0, 0); 1227 return err; 1228} 1229 1230#ifdef ATA_DEBUG 1231static void mv_dump_mem(void __iomem *start, unsigned bytes) 1232{ 1233 int b, w; 1234 for (b = 0; b < bytes; ) { 1235 DPRINTK("%p: ", start + b); 1236 for (w = 0; b < bytes && w < 4; w++) { 1237 printk("%08x ", readl(start + b)); 1238 b += sizeof(u32); 1239 } 1240 printk("\n"); 1241 } 1242} 1243#endif 1244 1245static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 1246{ 1247#ifdef ATA_DEBUG 1248 int b, w; 1249 u32 dw; 1250 for (b = 0; b < bytes; ) { 1251 DPRINTK("%02x: ", b); 1252 for (w = 0; b < bytes && w < 4; w++) { 1253 (void) pci_read_config_dword(pdev, b, &dw); 1254 printk("%08x ", dw); 1255 b += sizeof(u32); 1256 } 1257 printk("\n"); 1258 } 1259#endif 1260} 1261static void mv_dump_all_regs(void __iomem *mmio_base, int port, 1262 struct pci_dev *pdev) 1263{ 1264#ifdef ATA_DEBUG 1265 void __iomem *hc_base = mv_hc_base(mmio_base, 1266 port >> MV_PORT_HC_SHIFT); 1267 void __iomem *port_base; 1268 int start_port, num_ports, p, start_hc, num_hcs, hc; 1269 1270 if (0 > port) { 1271 start_hc = start_port = 0; 1272 num_ports = 8; /* shld be benign for 4 port devs */ 1273 num_hcs = 2; 1274 } else { 1275 start_hc = port >> MV_PORT_HC_SHIFT; 1276 start_port = port; 1277 num_ports = num_hcs = 1; 1278 } 1279 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 1280 num_ports > 1 ? num_ports - 1 : start_port); 1281 1282 if (NULL != pdev) { 1283 DPRINTK("PCI config space regs:\n"); 1284 mv_dump_pci_cfg(pdev, 0x68); 1285 } 1286 DPRINTK("PCI regs:\n"); 1287 mv_dump_mem(mmio_base+0xc00, 0x3c); 1288 mv_dump_mem(mmio_base+0xd00, 0x34); 1289 mv_dump_mem(mmio_base+0xf00, 0x4); 1290 mv_dump_mem(mmio_base+0x1d00, 0x6c); 1291 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1292 hc_base = mv_hc_base(mmio_base, hc); 1293 DPRINTK("HC regs (HC %i):\n", hc); 1294 mv_dump_mem(hc_base, 0x1c); 1295 } 1296 for (p = start_port; p < start_port + num_ports; p++) { 1297 port_base = mv_port_base(mmio_base, p); 1298 DPRINTK("EDMA regs (port %i):\n", p); 1299 mv_dump_mem(port_base, 0x54); 1300 DPRINTK("SATA regs (port %i):\n", p); 1301 mv_dump_mem(port_base+0x300, 0x60); 1302 } 1303#endif 1304} 1305 1306static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1307{ 1308 unsigned int ofs; 1309 1310 switch (sc_reg_in) { 1311 case SCR_STATUS: 1312 case SCR_CONTROL: 1313 case SCR_ERROR: 1314 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); 1315 break; 1316 case SCR_ACTIVE: 1317 ofs = SATA_ACTIVE; /* active is not with the others */ 1318 break; 1319 default: 1320 ofs = 0xffffffffU; 1321 break; 1322 } 1323 return ofs; 1324} 1325 1326static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 1327{ 1328 unsigned int ofs = mv_scr_offset(sc_reg_in); 1329 1330 if (ofs != 0xffffffffU) { 1331 *val = readl(mv_ap_base(link->ap) + ofs); 1332 return 0; 1333 } else 1334 return -EINVAL; 1335} 1336 1337static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 1338{ 1339 unsigned int ofs = mv_scr_offset(sc_reg_in); 1340 1341 if (ofs != 0xffffffffU) { 1342 void __iomem *addr = mv_ap_base(link->ap) + ofs; 1343 if (sc_reg_in == SCR_CONTROL) { 1344 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) 1345 val |= 0xf000; 1346 } 1347 writelfl(val, addr); 1348 return 0; 1349 } else 1350 return -EINVAL; 1351} 1352 1353static void mv6_dev_config(struct ata_device *adev) 1354{ 1355 /* 1356 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1357 * 1358 * Gen-II does not support NCQ over a port multiplier 1359 * (no FIS-based switching). 1360 */ 1361 if (adev->flags & ATA_DFLAG_NCQ) { 1362 if (sata_pmp_attached(adev->link->ap)) { 1363 adev->flags &= ~ATA_DFLAG_NCQ; 1364 ata_dev_printk(adev, KERN_INFO, 1365 "NCQ disabled for command-based switching\n"); 1366 } 1367 } 1368} 1369 1370static int mv_qc_defer(struct ata_queued_cmd *qc) 1371{ 1372 struct ata_link *link = qc->dev->link; 1373 struct ata_port *ap = link->ap; 1374 struct mv_port_priv *pp = ap->private_data; 1375 1376 /* 1377 * Don't allow new commands if we're in a delayed EH state 1378 * for NCQ and/or FIS-based switching. 1379 */ 1380 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1381 return ATA_DEFER_PORT; 1382 1383 /* PIO commands need exclusive link: no other commands [DMA or PIO] 1384 * can run concurrently. 1385 * set excl_link when we want to send a PIO command in DMA mode 1386 * or a non-NCQ command in NCQ mode. 1387 * When we receive a command from that link, and there are no 1388 * outstanding commands, mark a flag to clear excl_link and let 1389 * the command go through. 1390 */ 1391 if (unlikely(ap->excl_link)) { 1392 if (link == ap->excl_link) { 1393 if (ap->nr_active_links) 1394 return ATA_DEFER_PORT; 1395 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 1396 return 0; 1397 } else 1398 return ATA_DEFER_PORT; 1399 } 1400 1401 /* 1402 * If the port is completely idle, then allow the new qc. 1403 */ 1404 if (ap->nr_active_links == 0) 1405 return 0; 1406 1407 /* 1408 * The port is operating in host queuing mode (EDMA) with NCQ 1409 * enabled, allow multiple NCQ commands. EDMA also allows 1410 * queueing multiple DMA commands but libata core currently 1411 * doesn't allow it. 1412 */ 1413 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1414 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { 1415 if (ata_is_ncq(qc->tf.protocol)) 1416 return 0; 1417 else { 1418 ap->excl_link = link; 1419 return ATA_DEFER_PORT; 1420 } 1421 } 1422 1423 return ATA_DEFER_PORT; 1424} 1425 1426static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) 1427{ 1428 struct mv_port_priv *pp = ap->private_data; 1429 void __iomem *port_mmio; 1430 1431 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; 1432 u32 ltmode, *old_ltmode = &pp->cached.ltmode; 1433 u32 haltcond, *old_haltcond = &pp->cached.haltcond; 1434 1435 ltmode = *old_ltmode & ~LTMODE_BIT8; 1436 haltcond = *old_haltcond | EDMA_ERR_DEV; 1437 1438 if (want_fbs) { 1439 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; 1440 ltmode = *old_ltmode | LTMODE_BIT8; 1441 if (want_ncq) 1442 haltcond &= ~EDMA_ERR_DEV; 1443 else 1444 fiscfg |= FISCFG_WAIT_DEV_ERR; 1445 } else { 1446 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1447 } 1448 1449 port_mmio = mv_ap_base(ap); 1450 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); 1451 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); 1452 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); 1453} 1454 1455static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1456{ 1457 struct mv_host_priv *hpriv = ap->host->private_data; 1458 u32 old, new; 1459 1460 old = readl(hpriv->base + GPIO_PORT_CTL); 1461 if (want_ncq) 1462 new = old | (1 << 22); 1463 else 1464 new = old & ~(1 << 22); 1465 if (new != old) 1466 writel(new, hpriv->base + GPIO_PORT_CTL); 1467} 1468 1469/** 1470 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma 1471 * @ap: Port being initialized 1472 * 1473 * There are two DMA modes on these chips: basic DMA, and EDMA. 1474 * 1475 * Bit-0 of the "EDMA RESERVED" register enables/disables use 1476 * of basic DMA on the GEN_IIE versions of the chips. 1477 * 1478 * This bit survives EDMA resets, and must be set for basic DMA 1479 * to function, and should be cleared when EDMA is active. 1480 */ 1481static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) 1482{ 1483 struct mv_port_priv *pp = ap->private_data; 1484 u32 new, *old = &pp->cached.unknown_rsvd; 1485 1486 if (enable_bmdma) 1487 new = *old | 1; 1488 else 1489 new = *old & ~1; 1490 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); 1491} 1492 1493/* 1494 * SOC chips have an issue whereby the HDD LEDs don't always blink 1495 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode 1496 * of the SOC takes care of it, generating a steady blink rate when 1497 * any drive on the chip is active. 1498 * 1499 * Unfortunately, the blink mode is a global hardware setting for the SOC, 1500 * so we must use it whenever at least one port on the SOC has NCQ enabled. 1501 * 1502 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal 1503 * LED operation works then, and provides better (more accurate) feedback. 1504 * 1505 * Note that this code assumes that an SOC never has more than one HC onboard. 1506 */ 1507static void mv_soc_led_blink_enable(struct ata_port *ap) 1508{ 1509 struct ata_host *host = ap->host; 1510 struct mv_host_priv *hpriv = host->private_data; 1511 void __iomem *hc_mmio; 1512 u32 led_ctrl; 1513 1514 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) 1515 return; 1516 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; 1517 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); 1518 led_ctrl = readl(hc_mmio + SOC_LED_CTRL); 1519 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); 1520} 1521 1522static void mv_soc_led_blink_disable(struct ata_port *ap) 1523{ 1524 struct ata_host *host = ap->host; 1525 struct mv_host_priv *hpriv = host->private_data; 1526 void __iomem *hc_mmio; 1527 u32 led_ctrl; 1528 unsigned int port; 1529 1530 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) 1531 return; 1532 1533 /* disable led-blink only if no ports are using NCQ */ 1534 for (port = 0; port < hpriv->n_ports; port++) { 1535 struct ata_port *this_ap = host->ports[port]; 1536 struct mv_port_priv *pp = this_ap->private_data; 1537 1538 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1539 return; 1540 } 1541 1542 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; 1543 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); 1544 led_ctrl = readl(hc_mmio + SOC_LED_CTRL); 1545 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); 1546} 1547 1548static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) 1549{ 1550 u32 cfg; 1551 struct mv_port_priv *pp = ap->private_data; 1552 struct mv_host_priv *hpriv = ap->host->private_data; 1553 void __iomem *port_mmio = mv_ap_base(ap); 1554 1555 /* set up non-NCQ EDMA configuration */ 1556 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1557 pp->pp_flags &= 1558 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); 1559 1560 if (IS_GEN_I(hpriv)) 1561 cfg |= (1 << 8); /* enab config burst size mask */ 1562 1563 else if (IS_GEN_II(hpriv)) { 1564 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1565 mv_60x1_errata_sata25(ap, want_ncq); 1566 1567 } else if (IS_GEN_IIE(hpriv)) { 1568 int want_fbs = sata_pmp_attached(ap); 1569 /* 1570 * Possible future enhancement: 1571 * 1572 * The chip can use FBS with non-NCQ, if we allow it, 1573 * But first we need to have the error handling in place 1574 * for this mode (datasheet section 7.3.15.4.2.3). 1575 * So disallow non-NCQ FBS for now. 1576 */ 1577 want_fbs &= want_ncq; 1578 1579 mv_config_fbs(ap, want_ncq, want_fbs); 1580 1581 if (want_fbs) { 1582 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1583 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1584 } 1585 1586 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1587 if (want_edma) { 1588 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1589 if (!IS_SOC(hpriv)) 1590 cfg |= (1 << 18); /* enab early completion */ 1591 } 1592 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1593 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1594 mv_bmdma_enable_iie(ap, !want_edma); 1595 1596 if (IS_SOC(hpriv)) { 1597 if (want_ncq) 1598 mv_soc_led_blink_enable(ap); 1599 else 1600 mv_soc_led_blink_disable(ap); 1601 } 1602 } 1603 1604 if (want_ncq) { 1605 cfg |= EDMA_CFG_NCQ; 1606 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1607 } 1608 1609 writelfl(cfg, port_mmio + EDMA_CFG); 1610} 1611 1612static void mv_port_free_dma_mem(struct ata_port *ap) 1613{ 1614 struct mv_host_priv *hpriv = ap->host->private_data; 1615 struct mv_port_priv *pp = ap->private_data; 1616 int tag; 1617 1618 if (pp->crqb) { 1619 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1620 pp->crqb = NULL; 1621 } 1622 if (pp->crpb) { 1623 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1624 pp->crpb = NULL; 1625 } 1626 /* 1627 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1628 * For later hardware, we have one unique sg_tbl per NCQ tag. 1629 */ 1630 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1631 if (pp->sg_tbl[tag]) { 1632 if (tag == 0 || !IS_GEN_I(hpriv)) 1633 dma_pool_free(hpriv->sg_tbl_pool, 1634 pp->sg_tbl[tag], 1635 pp->sg_tbl_dma[tag]); 1636 pp->sg_tbl[tag] = NULL; 1637 } 1638 } 1639} 1640 1641/** 1642 * mv_port_start - Port specific init/start routine. 1643 * @ap: ATA channel to manipulate 1644 * 1645 * Allocate and point to DMA memory, init port private memory, 1646 * zero indices. 1647 * 1648 * LOCKING: 1649 * Inherited from caller. 1650 */ 1651static int mv_port_start(struct ata_port *ap) 1652{ 1653 struct device *dev = ap->host->dev; 1654 struct mv_host_priv *hpriv = ap->host->private_data; 1655 struct mv_port_priv *pp; 1656 unsigned long flags; 1657 int tag; 1658 1659 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1660 if (!pp) 1661 return -ENOMEM; 1662 ap->private_data = pp; 1663 1664 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1665 if (!pp->crqb) 1666 return -ENOMEM; 1667 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1668 1669 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1670 if (!pp->crpb) 1671 goto out_port_free_dma_mem; 1672 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1673 1674 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ 1675 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) 1676 ap->flags |= ATA_FLAG_AN; 1677 /* 1678 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1679 * For later hardware, we need one unique sg_tbl per NCQ tag. 1680 */ 1681 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1682 if (tag == 0 || !IS_GEN_I(hpriv)) { 1683 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1684 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1685 if (!pp->sg_tbl[tag]) 1686 goto out_port_free_dma_mem; 1687 } else { 1688 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1689 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1690 } 1691 } 1692 1693 spin_lock_irqsave(ap->lock, flags); 1694 mv_save_cached_regs(ap); 1695 mv_edma_cfg(ap, 0, 0); 1696 spin_unlock_irqrestore(ap->lock, flags); 1697 1698 return 0; 1699 1700out_port_free_dma_mem: 1701 mv_port_free_dma_mem(ap); 1702 return -ENOMEM; 1703} 1704 1705/** 1706 * mv_port_stop - Port specific cleanup/stop routine. 1707 * @ap: ATA channel to manipulate 1708 * 1709 * Stop DMA, cleanup port memory. 1710 * 1711 * LOCKING: 1712 * This routine uses the host lock to protect the DMA stop. 1713 */ 1714static void mv_port_stop(struct ata_port *ap) 1715{ 1716 unsigned long flags; 1717 1718 spin_lock_irqsave(ap->lock, flags); 1719 mv_stop_edma(ap); 1720 mv_enable_port_irqs(ap, 0); 1721 spin_unlock_irqrestore(ap->lock, flags); 1722 mv_port_free_dma_mem(ap); 1723} 1724 1725/** 1726 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1727 * @qc: queued command whose SG list to source from 1728 * 1729 * Populate the SG list and mark the last entry. 1730 * 1731 * LOCKING: 1732 * Inherited from caller. 1733 */ 1734static void mv_fill_sg(struct ata_queued_cmd *qc) 1735{ 1736 struct mv_port_priv *pp = qc->ap->private_data; 1737 struct scatterlist *sg; 1738 struct mv_sg *mv_sg, *last_sg = NULL; 1739 unsigned int si; 1740 1741 mv_sg = pp->sg_tbl[qc->tag]; 1742 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1743 dma_addr_t addr = sg_dma_address(sg); 1744 u32 sg_len = sg_dma_len(sg); 1745 1746 while (sg_len) { 1747 u32 offset = addr & 0xffff; 1748 u32 len = sg_len; 1749 1750 if (offset + len > 0x10000) 1751 len = 0x10000 - offset; 1752 1753 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1754 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1755 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1756 mv_sg->reserved = 0; 1757 1758 sg_len -= len; 1759 addr += len; 1760 1761 last_sg = mv_sg; 1762 mv_sg++; 1763 } 1764 } 1765 1766 if (likely(last_sg)) 1767 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1768 mb(); /* ensure data structure is visible to the chipset */ 1769} 1770 1771static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1772{ 1773 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1774 (last ? CRQB_CMD_LAST : 0); 1775 *cmdw = cpu_to_le16(tmp); 1776} 1777 1778/** 1779 * mv_sff_irq_clear - Clear hardware interrupt after DMA. 1780 * @ap: Port associated with this ATA transaction. 1781 * 1782 * We need this only for ATAPI bmdma transactions, 1783 * as otherwise we experience spurious interrupts 1784 * after libata-sff handles the bmdma interrupts. 1785 */ 1786static void mv_sff_irq_clear(struct ata_port *ap) 1787{ 1788 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); 1789} 1790 1791/** 1792 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. 1793 * @qc: queued command to check for chipset/DMA compatibility. 1794 * 1795 * The bmdma engines cannot handle speculative data sizes 1796 * (bytecount under/over flow). So only allow DMA for 1797 * data transfer commands with known data sizes. 1798 * 1799 * LOCKING: 1800 * Inherited from caller. 1801 */ 1802static int mv_check_atapi_dma(struct ata_queued_cmd *qc) 1803{ 1804 struct scsi_cmnd *scmd = qc->scsicmd; 1805 1806 if (scmd) { 1807 switch (scmd->cmnd[0]) { 1808 case READ_6: 1809 case READ_10: 1810 case READ_12: 1811 case WRITE_6: 1812 case WRITE_10: 1813 case WRITE_12: 1814 case GPCMD_READ_CD: 1815 case GPCMD_SEND_DVD_STRUCTURE: 1816 case GPCMD_SEND_CUE_SHEET: 1817 return 0; /* DMA is safe */ 1818 } 1819 } 1820 return -EOPNOTSUPP; /* use PIO instead */ 1821} 1822 1823/** 1824 * mv_bmdma_setup - Set up BMDMA transaction 1825 * @qc: queued command to prepare DMA for. 1826 * 1827 * LOCKING: 1828 * Inherited from caller. 1829 */ 1830static void mv_bmdma_setup(struct ata_queued_cmd *qc) 1831{ 1832 struct ata_port *ap = qc->ap; 1833 void __iomem *port_mmio = mv_ap_base(ap); 1834 struct mv_port_priv *pp = ap->private_data; 1835 1836 mv_fill_sg(qc); 1837 1838 /* clear all DMA cmd bits */ 1839 writel(0, port_mmio + BMDMA_CMD); 1840 1841 /* load PRD table addr. */ 1842 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, 1843 port_mmio + BMDMA_PRD_HIGH); 1844 writelfl(pp->sg_tbl_dma[qc->tag], 1845 port_mmio + BMDMA_PRD_LOW); 1846 1847 /* issue r/w command */ 1848 ap->ops->sff_exec_command(ap, &qc->tf); 1849} 1850 1851/** 1852 * mv_bmdma_start - Start a BMDMA transaction 1853 * @qc: queued command to start DMA on. 1854 * 1855 * LOCKING: 1856 * Inherited from caller. 1857 */ 1858static void mv_bmdma_start(struct ata_queued_cmd *qc) 1859{ 1860 struct ata_port *ap = qc->ap; 1861 void __iomem *port_mmio = mv_ap_base(ap); 1862 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 1863 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; 1864 1865 /* start host DMA transaction */ 1866 writelfl(cmd, port_mmio + BMDMA_CMD); 1867} 1868 1869/** 1870 * mv_bmdma_stop - Stop BMDMA transfer 1871 * @qc: queued command to stop DMA on. 1872 * 1873 * Clears the ATA_DMA_START flag in the bmdma control register 1874 * 1875 * LOCKING: 1876 * Inherited from caller. 1877 */ 1878static void mv_bmdma_stop_ap(struct ata_port *ap) 1879{ 1880 void __iomem *port_mmio = mv_ap_base(ap); 1881 u32 cmd; 1882 1883 /* clear start/stop bit */ 1884 cmd = readl(port_mmio + BMDMA_CMD); 1885 if (cmd & ATA_DMA_START) { 1886 cmd &= ~ATA_DMA_START; 1887 writelfl(cmd, port_mmio + BMDMA_CMD); 1888 1889 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1890 ata_sff_dma_pause(ap); 1891 } 1892} 1893 1894static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1895{ 1896 mv_bmdma_stop_ap(qc->ap); 1897} 1898 1899/** 1900 * mv_bmdma_status - Read BMDMA status 1901 * @ap: port for which to retrieve DMA status. 1902 * 1903 * Read and return equivalent of the sff BMDMA status register. 1904 * 1905 * LOCKING: 1906 * Inherited from caller. 1907 */ 1908static u8 mv_bmdma_status(struct ata_port *ap) 1909{ 1910 void __iomem *port_mmio = mv_ap_base(ap); 1911 u32 reg, status; 1912 1913 /* 1914 * Other bits are valid only if ATA_DMA_ACTIVE==0, 1915 * and the ATA_DMA_INTR bit doesn't exist. 1916 */ 1917 reg = readl(port_mmio + BMDMA_STATUS); 1918 if (reg & ATA_DMA_ACTIVE) 1919 status = ATA_DMA_ACTIVE; 1920 else if (reg & ATA_DMA_ERR) 1921 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1922 else { 1923 /* 1924 * Just because DMA_ACTIVE is 0 (DMA completed), 1925 * this does _not_ mean the device is "done". 1926 * So we should not yet be signalling ATA_DMA_INTR 1927 * in some cases. Eg. DSM/TRIM, and perhaps others. 1928 */ 1929 mv_bmdma_stop_ap(ap); 1930 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) 1931 status = 0; 1932 else 1933 status = ATA_DMA_INTR; 1934 } 1935 return status; 1936} 1937 1938static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) 1939{ 1940 struct ata_taskfile *tf = &qc->tf; 1941 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { 1942 if (qc->dev->multi_count > 7) { 1943 switch (tf->command) { 1944 case ATA_CMD_WRITE_MULTI: 1945 tf->command = ATA_CMD_PIO_WRITE; 1946 break; 1947 case ATA_CMD_WRITE_MULTI_FUA_EXT: 1948 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ 1949 /* fall through */ 1950 case ATA_CMD_WRITE_MULTI_EXT: 1951 tf->command = ATA_CMD_PIO_WRITE_EXT; 1952 break; 1953 } 1954 } 1955 } 1956} 1957 1958/** 1959 * mv_qc_prep - Host specific command preparation. 1960 * @qc: queued command to prepare 1961 * 1962 * This routine simply redirects to the general purpose routine 1963 * if command is not DMA. Else, it handles prep of the CRQB 1964 * (command request block), does some sanity checking, and calls 1965 * the SG load routine. 1966 * 1967 * LOCKING: 1968 * Inherited from caller. 1969 */ 1970static void mv_qc_prep(struct ata_queued_cmd *qc) 1971{ 1972 struct ata_port *ap = qc->ap; 1973 struct mv_port_priv *pp = ap->private_data; 1974 __le16 *cw; 1975 struct ata_taskfile *tf = &qc->tf; 1976 u16 flags = 0; 1977 unsigned in_index; 1978 1979 switch (tf->protocol) { 1980 case ATA_PROT_DMA: 1981 if (tf->command == ATA_CMD_DSM) 1982 return; 1983 /* fall-thru */ 1984 case ATA_PROT_NCQ: 1985 break; /* continue below */ 1986 case ATA_PROT_PIO: 1987 mv_rw_multi_errata_sata24(qc); 1988 return; 1989 default: 1990 return; 1991 } 1992 1993 /* Fill in command request block 1994 */ 1995 if (!(tf->flags & ATA_TFLAG_WRITE)) 1996 flags |= CRQB_FLAG_READ; 1997 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1998 flags |= qc->tag << CRQB_TAG_SHIFT; 1999 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 2000 2001 /* get current queue index from software */ 2002 in_index = pp->req_idx; 2003 2004 pp->crqb[in_index].sg_addr = 2005 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 2006 pp->crqb[in_index].sg_addr_hi = 2007 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 2008 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 2009 2010 cw = &pp->crqb[in_index].ata_cmd[0]; 2011 2012 /* Sadly, the CRQB cannot accomodate all registers--there are 2013 * only 11 bytes...so we must pick and choose required 2014 * registers based on the command. So, we drop feature and 2015 * hob_feature for [RW] DMA commands, but they are needed for 2016 * NCQ. NCQ will drop hob_nsect, which is not needed there 2017 * (nsect is used only for the tag; feat/hob_feat hold true nsect). 2018 */ 2019 switch (tf->command) { 2020 case ATA_CMD_READ: 2021 case ATA_CMD_READ_EXT: 2022 case ATA_CMD_WRITE: 2023 case ATA_CMD_WRITE_EXT: 2024 case ATA_CMD_WRITE_FUA_EXT: 2025 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 2026 break; 2027 case ATA_CMD_FPDMA_READ: 2028 case ATA_CMD_FPDMA_WRITE: 2029 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 2030 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 2031 break; 2032 default: 2033 BUG_ON(tf->command); 2034 break; 2035 } 2036 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 2037 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 2038 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 2039 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 2040 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 2041 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 2042 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 2043 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 2044 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 2045 2046 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2047 return; 2048 mv_fill_sg(qc); 2049} 2050 2051/** 2052 * mv_qc_prep_iie - Host specific command preparation. 2053 * @qc: queued command to prepare 2054 * 2055 * This routine simply redirects to the general purpose routine 2056 * if command is not DMA. Else, it handles prep of the CRQB 2057 * (command request block), does some sanity checking, and calls 2058 * the SG load routine. 2059 * 2060 * LOCKING: 2061 * Inherited from caller. 2062 */ 2063static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 2064{ 2065 struct ata_port *ap = qc->ap; 2066 struct mv_port_priv *pp = ap->private_data; 2067 struct mv_crqb_iie *crqb; 2068 struct ata_taskfile *tf = &qc->tf; 2069 unsigned in_index; 2070 u32 flags = 0; 2071 2072 if ((tf->protocol != ATA_PROT_DMA) && 2073 (tf->protocol != ATA_PROT_NCQ)) 2074 return; 2075 if (tf->command == ATA_CMD_DSM) 2076 return; /* use bmdma for this */ 2077 2078 /* Fill in Gen IIE command request block */ 2079 if (!(tf->flags & ATA_TFLAG_WRITE)) 2080 flags |= CRQB_FLAG_READ; 2081 2082 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 2083 flags |= qc->tag << CRQB_TAG_SHIFT; 2084 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 2085 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 2086 2087 /* get current queue index from software */ 2088 in_index = pp->req_idx; 2089 2090 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 2091 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 2092 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 2093 crqb->flags = cpu_to_le32(flags); 2094 2095 crqb->ata_cmd[0] = cpu_to_le32( 2096 (tf->command << 16) | 2097 (tf->feature << 24) 2098 ); 2099 crqb->ata_cmd[1] = cpu_to_le32( 2100 (tf->lbal << 0) | 2101 (tf->lbam << 8) | 2102 (tf->lbah << 16) | 2103 (tf->device << 24) 2104 ); 2105 crqb->ata_cmd[2] = cpu_to_le32( 2106 (tf->hob_lbal << 0) | 2107 (tf->hob_lbam << 8) | 2108 (tf->hob_lbah << 16) | 2109 (tf->hob_feature << 24) 2110 ); 2111 crqb->ata_cmd[3] = cpu_to_le32( 2112 (tf->nsect << 0) | 2113 (tf->hob_nsect << 8) 2114 ); 2115 2116 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2117 return; 2118 mv_fill_sg(qc); 2119} 2120 2121/** 2122 * mv_sff_check_status - fetch device status, if valid 2123 * @ap: ATA port to fetch status from 2124 * 2125 * When using command issue via mv_qc_issue_fis(), 2126 * the initial ATA_BUSY state does not show up in the 2127 * ATA status (shadow) register. This can confuse libata! 2128 * 2129 * So we have a hook here to fake ATA_BUSY for that situation, 2130 * until the first time a BUSY, DRQ, or ERR bit is seen. 2131 * 2132 * The rest of the time, it simply returns the ATA status register. 2133 */ 2134static u8 mv_sff_check_status(struct ata_port *ap) 2135{ 2136 u8 stat = ioread8(ap->ioaddr.status_addr); 2137 struct mv_port_priv *pp = ap->private_data; 2138 2139 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { 2140 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) 2141 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; 2142 else 2143 stat = ATA_BUSY; 2144 } 2145 return stat; 2146} 2147 2148/** 2149 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register 2150 * @fis: fis to be sent 2151 * @nwords: number of 32-bit words in the fis 2152 */ 2153static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) 2154{ 2155 void __iomem *port_mmio = mv_ap_base(ap); 2156 u32 ifctl, old_ifctl, ifstat; 2157 int i, timeout = 200, final_word = nwords - 1; 2158 2159 /* Initiate FIS transmission mode */ 2160 old_ifctl = readl(port_mmio + SATA_IFCTL); 2161 ifctl = 0x100 | (old_ifctl & 0xf); 2162 writelfl(ifctl, port_mmio + SATA_IFCTL); 2163 2164 /* Send all words of the FIS except for the final word */ 2165 for (i = 0; i < final_word; ++i) 2166 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); 2167 2168 /* Flag end-of-transmission, and then send the final word */ 2169 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); 2170 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); 2171 2172 /* 2173 * Wait for FIS transmission to complete. 2174 * This typically takes just a single iteration. 2175 */ 2176 do { 2177 ifstat = readl(port_mmio + SATA_IFSTAT); 2178 } while (!(ifstat & 0x1000) && --timeout); 2179 2180 /* Restore original port configuration */ 2181 writelfl(old_ifctl, port_mmio + SATA_IFCTL); 2182 2183 /* See if it worked */ 2184 if ((ifstat & 0x3000) != 0x1000) { 2185 ata_port_printk(ap, KERN_WARNING, 2186 "%s transmission error, ifstat=%08x\n", 2187 __func__, ifstat); 2188 return AC_ERR_OTHER; 2189 } 2190 return 0; 2191} 2192 2193/** 2194 * mv_qc_issue_fis - Issue a command directly as a FIS 2195 * @qc: queued command to start 2196 * 2197 * Note that the ATA shadow registers are not updated 2198 * after command issue, so the device will appear "READY" 2199 * if polled, even while it is BUSY processing the command. 2200 * 2201 * So we use a status hook to fake ATA_BUSY until the drive changes state. 2202 * 2203 * Note: we don't get updated shadow regs on *completion* 2204 * of non-data commands. So avoid sending them via this function, 2205 * as they will appear to have completed immediately. 2206 * 2207 * GEN_IIE has special registers that we could get the result tf from, 2208 * but earlier chipsets do not. For now, we ignore those registers. 2209 */ 2210static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) 2211{ 2212 struct ata_port *ap = qc->ap; 2213 struct mv_port_priv *pp = ap->private_data; 2214 struct ata_link *link = qc->dev->link; 2215 u32 fis[5]; 2216 int err = 0; 2217 2218 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); 2219 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); 2220 if (err) 2221 return err; 2222 2223 switch (qc->tf.protocol) { 2224 case ATAPI_PROT_PIO: 2225 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; 2226 /* fall through */ 2227 case ATAPI_PROT_NODATA: 2228 ap->hsm_task_state = HSM_ST_FIRST; 2229 break; 2230 case ATA_PROT_PIO: 2231 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; 2232 if (qc->tf.flags & ATA_TFLAG_WRITE) 2233 ap->hsm_task_state = HSM_ST_FIRST; 2234 else 2235 ap->hsm_task_state = HSM_ST; 2236 break; 2237 default: 2238 ap->hsm_task_state = HSM_ST_LAST; 2239 break; 2240 } 2241 2242 if (qc->tf.flags & ATA_TFLAG_POLLING) 2243 ata_sff_queue_pio_task(link, 0); 2244 return 0; 2245} 2246 2247/** 2248 * mv_qc_issue - Initiate a command to the host 2249 * @qc: queued command to start 2250 * 2251 * This routine simply redirects to the general purpose routine 2252 * if command is not DMA. Else, it sanity checks our local 2253 * caches of the request producer/consumer indices then enables 2254 * DMA and bumps the request producer index. 2255 * 2256 * LOCKING: 2257 * Inherited from caller. 2258 */ 2259static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 2260{ 2261 static int limit_warnings = 10; 2262 struct ata_port *ap = qc->ap; 2263 void __iomem *port_mmio = mv_ap_base(ap); 2264 struct mv_port_priv *pp = ap->private_data; 2265 u32 in_index; 2266 unsigned int port_irqs; 2267 2268 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ 2269 2270 switch (qc->tf.protocol) { 2271 case ATA_PROT_DMA: 2272 if (qc->tf.command == ATA_CMD_DSM) { 2273 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ 2274 return AC_ERR_OTHER; 2275 break; /* use bmdma for this */ 2276 } 2277 /* fall thru */ 2278 case ATA_PROT_NCQ: 2279 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2280 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2281 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 2282 2283 /* Write the request in pointer to kick the EDMA to life */ 2284 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 2285 port_mmio + EDMA_REQ_Q_IN_PTR); 2286 return 0; 2287 2288 case ATA_PROT_PIO: 2289 /* 2290 * Errata SATA#16, SATA#24: warn if multiple DRQs expected. 2291 * 2292 * Someday, we might implement special polling workarounds 2293 * for these, but it all seems rather unnecessary since we 2294 * normally use only DMA for commands which transfer more 2295 * than a single block of data. 2296 * 2297 * Much of the time, this could just work regardless. 2298 * So for now, just log the incident, and allow the attempt. 2299 */ 2300 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { 2301 --limit_warnings; 2302 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME 2303 ": attempting PIO w/multiple DRQ: " 2304 "this may fail due to h/w errata\n"); 2305 } 2306 /* drop through */ 2307 case ATA_PROT_NODATA: 2308 case ATAPI_PROT_PIO: 2309 case ATAPI_PROT_NODATA: 2310 if (ap->flags & ATA_FLAG_PIO_POLLING) 2311 qc->tf.flags |= ATA_TFLAG_POLLING; 2312 break; 2313 } 2314 2315 if (qc->tf.flags & ATA_TFLAG_POLLING) 2316 port_irqs = ERR_IRQ; /* mask device interrupt when polling */ 2317 else 2318 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ 2319 2320 /* 2321 * We're about to send a non-EDMA capable command to the 2322 * port. Turn off EDMA so there won't be problems accessing 2323 * shadow block, etc registers. 2324 */ 2325 mv_stop_edma(ap); 2326 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); 2327 mv_pmp_select(ap, qc->dev->link->pmp); 2328 2329 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { 2330 struct mv_host_priv *hpriv = ap->host->private_data; 2331 if (IS_GEN_II(hpriv)) 2332 return mv_qc_issue_fis(qc); 2333 } 2334 return ata_bmdma_qc_issue(qc); 2335} 2336 2337static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 2338{ 2339 struct mv_port_priv *pp = ap->private_data; 2340 struct ata_queued_cmd *qc; 2341 2342 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 2343 return NULL; 2344 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2345 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) 2346 return qc; 2347 return NULL; 2348} 2349 2350static void mv_pmp_error_handler(struct ata_port *ap) 2351{ 2352 unsigned int pmp, pmp_map; 2353 struct mv_port_priv *pp = ap->private_data; 2354 2355 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 2356 /* 2357 * Perform NCQ error analysis on failed PMPs 2358 * before we freeze the port entirely. 2359 * 2360 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 2361 */ 2362 pmp_map = pp->delayed_eh_pmp_map; 2363 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 2364 for (pmp = 0; pmp_map != 0; pmp++) { 2365 unsigned int this_pmp = (1 << pmp); 2366 if (pmp_map & this_pmp) { 2367 struct ata_link *link = &ap->pmp_link[pmp]; 2368 pmp_map &= ~this_pmp; 2369 ata_eh_analyze_ncq_error(link); 2370 } 2371 } 2372 ata_port_freeze(ap); 2373 } 2374 sata_pmp_error_handler(ap); 2375} 2376 2377static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 2378{ 2379 void __iomem *port_mmio = mv_ap_base(ap); 2380 2381 return readl(port_mmio + SATA_TESTCTL) >> 16; 2382} 2383 2384static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 2385{ 2386 struct ata_eh_info *ehi; 2387 unsigned int pmp; 2388 2389 /* 2390 * Initialize EH info for PMPs which saw device errors 2391 */ 2392 ehi = &ap->link.eh_info; 2393 for (pmp = 0; pmp_map != 0; pmp++) { 2394 unsigned int this_pmp = (1 << pmp); 2395 if (pmp_map & this_pmp) { 2396 struct ata_link *link = &ap->pmp_link[pmp]; 2397 2398 pmp_map &= ~this_pmp; 2399 ehi = &link->eh_info; 2400 ata_ehi_clear_desc(ehi); 2401 ata_ehi_push_desc(ehi, "dev err"); 2402 ehi->err_mask |= AC_ERR_DEV; 2403 ehi->action |= ATA_EH_RESET; 2404 ata_link_abort(link); 2405 } 2406 } 2407} 2408 2409static int mv_req_q_empty(struct ata_port *ap) 2410{ 2411 void __iomem *port_mmio = mv_ap_base(ap); 2412 u32 in_ptr, out_ptr; 2413 2414 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) 2415 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2416 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) 2417 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2418 return (in_ptr == out_ptr); /* 1 == queue_is_empty */ 2419} 2420 2421static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 2422{ 2423 struct mv_port_priv *pp = ap->private_data; 2424 int failed_links; 2425 unsigned int old_map, new_map; 2426 2427 /* 2428 * Device error during FBS+NCQ operation: 2429 * 2430 * Set a port flag to prevent further I/O being enqueued. 2431 * Leave the EDMA running to drain outstanding commands from this port. 2432 * Perform the post-mortem/EH only when all responses are complete. 2433 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 2434 */ 2435 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 2436 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 2437 pp->delayed_eh_pmp_map = 0; 2438 } 2439 old_map = pp->delayed_eh_pmp_map; 2440 new_map = old_map | mv_get_err_pmp_map(ap); 2441 2442 if (old_map != new_map) { 2443 pp->delayed_eh_pmp_map = new_map; 2444 mv_pmp_eh_prep(ap, new_map & ~old_map); 2445 } 2446 failed_links = hweight16(new_map); 2447 2448 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " 2449 "failed_links=%d nr_active_links=%d\n", 2450 __func__, pp->delayed_eh_pmp_map, 2451 ap->qc_active, failed_links, 2452 ap->nr_active_links); 2453 2454 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { 2455 mv_process_crpb_entries(ap, pp); 2456 mv_stop_edma(ap); 2457 mv_eh_freeze(ap); 2458 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); 2459 return 1; /* handled */ 2460 } 2461 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); 2462 return 1; /* handled */ 2463} 2464 2465static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 2466{ 2467 /* 2468 * Possible future enhancement: 2469 * 2470 * FBS+non-NCQ operation is not yet implemented. 2471 * See related notes in mv_edma_cfg(). 2472 * 2473 * Device error during FBS+non-NCQ operation: 2474 * 2475 * We need to snapshot the shadow registers for each failed command. 2476 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 2477 */ 2478 return 0; /* not handled */ 2479} 2480 2481static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 2482{ 2483 struct mv_port_priv *pp = ap->private_data; 2484 2485 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 2486 return 0; /* EDMA was not active: not handled */ 2487 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 2488 return 0; /* FBS was not active: not handled */ 2489 2490 if (!(edma_err_cause & EDMA_ERR_DEV)) 2491 return 0; /* non DEV error: not handled */ 2492 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 2493 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 2494 return 0; /* other problems: not handled */ 2495 2496 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 2497 /* 2498 * EDMA should NOT have self-disabled for this case. 2499 * If it did, then something is wrong elsewhere, 2500 * and we cannot handle it here. 2501 */ 2502 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 2503 ata_port_printk(ap, KERN_WARNING, 2504 "%s: err_cause=0x%x pp_flags=0x%x\n", 2505 __func__, edma_err_cause, pp->pp_flags); 2506 return 0; /* not handled */ 2507 } 2508 return mv_handle_fbs_ncq_dev_err(ap); 2509 } else { 2510 /* 2511 * EDMA should have self-disabled for this case. 2512 * If it did not, then something is wrong elsewhere, 2513 * and we cannot handle it here. 2514 */ 2515 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 2516 ata_port_printk(ap, KERN_WARNING, 2517 "%s: err_cause=0x%x pp_flags=0x%x\n", 2518 __func__, edma_err_cause, pp->pp_flags); 2519 return 0; /* not handled */ 2520 } 2521 return mv_handle_fbs_non_ncq_dev_err(ap); 2522 } 2523 return 0; /* not handled */ 2524} 2525 2526static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 2527{ 2528 struct ata_eh_info *ehi = &ap->link.eh_info; 2529 char *when = "idle"; 2530 2531 ata_ehi_clear_desc(ehi); 2532 if (edma_was_enabled) { 2533 when = "EDMA enabled"; 2534 } else { 2535 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 2536 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 2537 when = "polling"; 2538 } 2539 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 2540 ehi->err_mask |= AC_ERR_OTHER; 2541 ehi->action |= ATA_EH_RESET; 2542 ata_port_freeze(ap); 2543} 2544 2545/** 2546 * mv_err_intr - Handle error interrupts on the port 2547 * @ap: ATA channel to manipulate 2548 * 2549 * Most cases require a full reset of the chip's state machine, 2550 * which also performs a COMRESET. 2551 * Also, if the port disabled DMA, update our cached copy to match. 2552 * 2553 * LOCKING: 2554 * Inherited from caller. 2555 */ 2556static void mv_err_intr(struct ata_port *ap) 2557{ 2558 void __iomem *port_mmio = mv_ap_base(ap); 2559 u32 edma_err_cause, eh_freeze_mask, serr = 0; 2560 u32 fis_cause = 0; 2561 struct mv_port_priv *pp = ap->private_data; 2562 struct mv_host_priv *hpriv = ap->host->private_data; 2563 unsigned int action = 0, err_mask = 0; 2564 struct ata_eh_info *ehi = &ap->link.eh_info; 2565 struct ata_queued_cmd *qc; 2566 int abort = 0; 2567 2568 /* 2569 * Read and clear the SError and err_cause bits. 2570 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear 2571 * the FIS_IRQ_CAUSE register before clearing edma_err_cause. 2572 */ 2573 sata_scr_read(&ap->link, SCR_ERROR, &serr); 2574 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 2575 2576 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); 2577 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 2578 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); 2579 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); 2580 } 2581 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); 2582 2583 if (edma_err_cause & EDMA_ERR_DEV) { 2584 /* 2585 * Device errors during FIS-based switching operation 2586 * require special handling. 2587 */ 2588 if (mv_handle_dev_err(ap, edma_err_cause)) 2589 return; 2590 } 2591 2592 qc = mv_get_active_qc(ap); 2593 ata_ehi_clear_desc(ehi); 2594 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 2595 edma_err_cause, pp->pp_flags); 2596 2597 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 2598 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); 2599 if (fis_cause & FIS_IRQ_CAUSE_AN) { 2600 u32 ec = edma_err_cause & 2601 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); 2602 sata_async_notification(ap); 2603 if (!ec) 2604 return; /* Just an AN; no need for the nukes */ 2605 ata_ehi_push_desc(ehi, "SDB notify"); 2606 } 2607 } 2608 /* 2609 * All generations share these EDMA error cause bits: 2610 */ 2611 if (edma_err_cause & EDMA_ERR_DEV) { 2612 err_mask |= AC_ERR_DEV; 2613 action |= ATA_EH_RESET; 2614 ata_ehi_push_desc(ehi, "dev error"); 2615 } 2616 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 2617 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 2618 EDMA_ERR_INTRL_PAR)) { 2619 err_mask |= AC_ERR_ATA_BUS; 2620 action |= ATA_EH_RESET; 2621 ata_ehi_push_desc(ehi, "parity error"); 2622 } 2623 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 2624 ata_ehi_hotplugged(ehi); 2625 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 2626 "dev disconnect" : "dev connect"); 2627 action |= ATA_EH_RESET; 2628 } 2629 2630 /* 2631 * Gen-I has a different SELF_DIS bit, 2632 * different FREEZE bits, and no SERR bit: 2633 */ 2634 if (IS_GEN_I(hpriv)) { 2635 eh_freeze_mask = EDMA_EH_FREEZE_5; 2636 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 2637 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2638 ata_ehi_push_desc(ehi, "EDMA self-disable"); 2639 } 2640 } else { 2641 eh_freeze_mask = EDMA_EH_FREEZE; 2642 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 2643 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2644 ata_ehi_push_desc(ehi, "EDMA self-disable"); 2645 } 2646 if (edma_err_cause & EDMA_ERR_SERR) { 2647 ata_ehi_push_desc(ehi, "SError=%08x", serr); 2648 err_mask |= AC_ERR_ATA_BUS; 2649 action |= ATA_EH_RESET; 2650 } 2651 } 2652 2653 if (!err_mask) { 2654 err_mask = AC_ERR_OTHER; 2655 action |= ATA_EH_RESET; 2656 } 2657 2658 ehi->serror |= serr; 2659 ehi->action |= action; 2660 2661 if (qc) 2662 qc->err_mask |= err_mask; 2663 else 2664 ehi->err_mask |= err_mask; 2665 2666 if (err_mask == AC_ERR_DEV) { 2667 /* 2668 * Cannot do ata_port_freeze() here, 2669 * because it would kill PIO access, 2670 * which is needed for further diagnosis. 2671 */ 2672 mv_eh_freeze(ap); 2673 abort = 1; 2674 } else if (edma_err_cause & eh_freeze_mask) { 2675 /* 2676 * Note to self: ata_port_freeze() calls ata_port_abort() 2677 */ 2678 ata_port_freeze(ap); 2679 } else { 2680 abort = 1; 2681 } 2682 2683 if (abort) { 2684 if (qc) 2685 ata_link_abort(qc->dev->link); 2686 else 2687 ata_port_abort(ap); 2688 } 2689} 2690 2691static void mv_process_crpb_response(struct ata_port *ap, 2692 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 2693{ 2694 u8 ata_status; 2695 u16 edma_status = le16_to_cpu(response->flags); 2696 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 2697 2698 if (unlikely(!qc)) { 2699 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 2700 __func__, tag); 2701 return; 2702 } 2703 2704 /* 2705 * edma_status from a response queue entry: 2706 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). 2707 * MSB is saved ATA status from command completion. 2708 */ 2709 if (!ncq_enabled) { 2710 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 2711 if (err_cause) { 2712 /* 2713 * Error will be seen/handled by 2714 * mv_err_intr(). So do nothing at all here. 2715 */ 2716 return; 2717 } 2718 } 2719 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 2720 if (!ac_err_mask(ata_status)) 2721 ata_qc_complete(qc); 2722 /* else: leave it for mv_err_intr() */ 2723} 2724 2725static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 2726{ 2727 void __iomem *port_mmio = mv_ap_base(ap); 2728 struct mv_host_priv *hpriv = ap->host->private_data; 2729 u32 in_index; 2730 bool work_done = false; 2731 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2732 2733 /* Get the hardware queue position index */ 2734 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) 2735 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2736 2737 /* Process new responses from since the last time we looked */ 2738 while (in_index != pp->resp_idx) { 2739 unsigned int tag; 2740 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 2741 2742 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2743 2744 if (IS_GEN_I(hpriv)) { 2745 /* 50xx: no NCQ, only one command active at a time */ 2746 tag = ap->link.active_tag; 2747 } else { 2748 /* Gen II/IIE: get command tag from CRPB entry */ 2749 tag = le16_to_cpu(response->id) & 0x1f; 2750 } 2751 mv_process_crpb_response(ap, response, tag, ncq_enabled); 2752 work_done = true; 2753 } 2754 2755 /* Update the software queue position index in hardware */ 2756 if (work_done) 2757 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2758 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2759 port_mmio + EDMA_RSP_Q_OUT_PTR); 2760} 2761 2762static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2763{ 2764 struct mv_port_priv *pp; 2765 int edma_was_enabled; 2766 2767 /* 2768 * Grab a snapshot of the EDMA_EN flag setting, 2769 * so that we have a consistent view for this port, 2770 * even if something we call of our routines changes it. 2771 */ 2772 pp = ap->private_data; 2773 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2774 /* 2775 * Process completed CRPB response(s) before other events. 2776 */ 2777 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2778 mv_process_crpb_entries(ap, pp); 2779 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2780 mv_handle_fbs_ncq_dev_err(ap); 2781 } 2782 /* 2783 * Handle chip-reported errors, or continue on to handle PIO. 2784 */ 2785 if (unlikely(port_cause & ERR_IRQ)) { 2786 mv_err_intr(ap); 2787 } else if (!edma_was_enabled) { 2788 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2789 if (qc) 2790 ata_bmdma_port_intr(ap, qc); 2791 else 2792 mv_unexpected_intr(ap, edma_was_enabled); 2793 } 2794} 2795 2796/** 2797 * mv_host_intr - Handle all interrupts on the given host controller 2798 * @host: host specific structure 2799 * @main_irq_cause: Main interrupt cause register for the chip. 2800 * 2801 * LOCKING: 2802 * Inherited from caller. 2803 */ 2804static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2805{ 2806 struct mv_host_priv *hpriv = host->private_data; 2807 void __iomem *mmio = hpriv->base, *hc_mmio; 2808 unsigned int handled = 0, port; 2809 2810 /* If asserted, clear the "all ports" IRQ coalescing bit */ 2811 if (main_irq_cause & ALL_PORTS_COAL_DONE) 2812 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); 2813 2814 for (port = 0; port < hpriv->n_ports; port++) { 2815 struct ata_port *ap = host->ports[port]; 2816 unsigned int p, shift, hardport, port_cause; 2817 2818 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2819 /* 2820 * Each hc within the host has its own hc_irq_cause register, 2821 * where the interrupting ports bits get ack'd. 2822 */ 2823 if (hardport == 0) { /* first port on this hc ? */ 2824 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2825 u32 port_mask, ack_irqs; 2826 /* 2827 * Skip this entire hc if nothing pending for any ports 2828 */ 2829 if (!hc_cause) { 2830 port += MV_PORTS_PER_HC - 1; 2831 continue; 2832 } 2833 /* 2834 * We don't need/want to read the hc_irq_cause register, 2835 * because doing so hurts performance, and 2836 * main_irq_cause already gives us everything we need. 2837 * 2838 * But we do have to *write* to the hc_irq_cause to ack 2839 * the ports that we are handling this time through. 2840 * 2841 * This requires that we create a bitmap for those 2842 * ports which interrupted us, and use that bitmap 2843 * to ack (only) those ports via hc_irq_cause. 2844 */ 2845 ack_irqs = 0; 2846 if (hc_cause & PORTS_0_3_COAL_DONE) 2847 ack_irqs = HC_COAL_IRQ; 2848 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2849 if ((port + p) >= hpriv->n_ports) 2850 break; 2851 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2852 if (hc_cause & port_mask) 2853 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2854 } 2855 hc_mmio = mv_hc_base_from_port(mmio, port); 2856 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); 2857 handled = 1; 2858 } 2859 /* 2860 * Handle interrupts signalled for this port: 2861 */ 2862 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2863 if (port_cause) 2864 mv_port_intr(ap, port_cause); 2865 } 2866 return handled; 2867} 2868 2869static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2870{ 2871 struct mv_host_priv *hpriv = host->private_data; 2872 struct ata_port *ap; 2873 struct ata_queued_cmd *qc; 2874 struct ata_eh_info *ehi; 2875 unsigned int i, err_mask, printed = 0; 2876 u32 err_cause; 2877 2878 err_cause = readl(mmio + hpriv->irq_cause_offset); 2879 2880 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 2881 err_cause); 2882 2883 DPRINTK("All regs @ PCI error\n"); 2884 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 2885 2886 writelfl(0, mmio + hpriv->irq_cause_offset); 2887 2888 for (i = 0; i < host->n_ports; i++) { 2889 ap = host->ports[i]; 2890 if (!ata_link_offline(&ap->link)) { 2891 ehi = &ap->link.eh_info; 2892 ata_ehi_clear_desc(ehi); 2893 if (!printed++) 2894 ata_ehi_push_desc(ehi, 2895 "PCI err cause 0x%08x", err_cause); 2896 err_mask = AC_ERR_HOST_BUS; 2897 ehi->action = ATA_EH_RESET; 2898 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2899 if (qc) 2900 qc->err_mask |= err_mask; 2901 else 2902 ehi->err_mask |= err_mask; 2903 2904 ata_port_freeze(ap); 2905 } 2906 } 2907 return 1; /* handled */ 2908} 2909 2910/** 2911 * mv_interrupt - Main interrupt event handler 2912 * @irq: unused 2913 * @dev_instance: private data; in this case the host structure 2914 * 2915 * Read the read only register to determine if any host 2916 * controllers have pending interrupts. If so, call lower level 2917 * routine to handle. Also check for PCI errors which are only 2918 * reported here. 2919 * 2920 * LOCKING: 2921 * This routine holds the host lock while processing pending 2922 * interrupts. 2923 */ 2924static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2925{ 2926 struct ata_host *host = dev_instance; 2927 struct mv_host_priv *hpriv = host->private_data; 2928 unsigned int handled = 0; 2929 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; 2930 u32 main_irq_cause, pending_irqs; 2931 2932 spin_lock(&host->lock); 2933 2934 /* for MSI: block new interrupts while in here */ 2935 if (using_msi) 2936 mv_write_main_irq_mask(0, hpriv); 2937 2938 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2939 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2940 /* 2941 * Deal with cases where we either have nothing pending, or have read 2942 * a bogus register value which can indicate HW removal or PCI fault. 2943 */ 2944 if (pending_irqs && main_irq_cause != 0xffffffffU) { 2945 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) 2946 handled = mv_pci_error(host, hpriv->base); 2947 else 2948 handled = mv_host_intr(host, pending_irqs); 2949 } 2950 2951 /* for MSI: unmask; interrupt cause bits will retrigger now */ 2952 if (using_msi) 2953 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); 2954 2955 spin_unlock(&host->lock); 2956 2957 return IRQ_RETVAL(handled); 2958} 2959 2960static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 2961{ 2962 unsigned int ofs; 2963 2964 switch (sc_reg_in) { 2965 case SCR_STATUS: 2966 case SCR_ERROR: 2967 case SCR_CONTROL: 2968 ofs = sc_reg_in * sizeof(u32); 2969 break; 2970 default: 2971 ofs = 0xffffffffU; 2972 break; 2973 } 2974 return ofs; 2975} 2976 2977static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 2978{ 2979 struct mv_host_priv *hpriv = link->ap->host->private_data; 2980 void __iomem *mmio = hpriv->base; 2981 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 2982 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2983 2984 if (ofs != 0xffffffffU) { 2985 *val = readl(addr + ofs); 2986 return 0; 2987 } else 2988 return -EINVAL; 2989} 2990 2991static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 2992{ 2993 struct mv_host_priv *hpriv = link->ap->host->private_data; 2994 void __iomem *mmio = hpriv->base; 2995 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 2996 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2997 2998 if (ofs != 0xffffffffU) { 2999 writelfl(val, addr + ofs); 3000 return 0; 3001 } else 3002 return -EINVAL; 3003} 3004 3005static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 3006{ 3007 struct pci_dev *pdev = to_pci_dev(host->dev); 3008 int early_5080; 3009 3010 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 3011 3012 if (!early_5080) { 3013 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 3014 tmp |= (1 << 0); 3015 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 3016 } 3017 3018 mv_reset_pci_bus(host, mmio); 3019} 3020 3021static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 3022{ 3023 writel(0x0fcfffff, mmio + FLASH_CTL); 3024} 3025 3026static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 3027 void __iomem *mmio) 3028{ 3029 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 3030 u32 tmp; 3031 3032 tmp = readl(phy_mmio + MV5_PHY_MODE); 3033 3034 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 3035 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 3036} 3037 3038static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 3039{ 3040 u32 tmp; 3041 3042 writel(0, mmio + GPIO_PORT_CTL); 3043 3044 3045 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 3046 tmp |= ~(1 << 0); 3047 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 3048} 3049 3050static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 3051 unsigned int port) 3052{ 3053 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 3054 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 3055 u32 tmp; 3056 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 3057 3058 if (fix_apm_sq) { 3059 tmp = readl(phy_mmio + MV5_LTMODE); 3060 tmp |= (1 << 19); 3061 writel(tmp, phy_mmio + MV5_LTMODE); 3062 3063 tmp = readl(phy_mmio + MV5_PHY_CTL); 3064 tmp &= ~0x3; 3065 tmp |= 0x1; 3066 writel(tmp, phy_mmio + MV5_PHY_CTL); 3067 } 3068 3069 tmp = readl(phy_mmio + MV5_PHY_MODE); 3070 tmp &= ~mask; 3071 tmp |= hpriv->signal[port].pre; 3072 tmp |= hpriv->signal[port].amps; 3073 writel(tmp, phy_mmio + MV5_PHY_MODE); 3074} 3075 3076 3077#undef ZERO 3078#define ZERO(reg) writel(0, port_mmio + (reg)) 3079static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 3080 unsigned int port) 3081{ 3082 void __iomem *port_mmio = mv_port_base(mmio, port); 3083 3084 mv_reset_channel(hpriv, mmio, port); 3085 3086 ZERO(0x028); /* command */ 3087 writel(0x11f, port_mmio + EDMA_CFG); 3088 ZERO(0x004); /* timer */ 3089 ZERO(0x008); /* irq err cause */ 3090 ZERO(0x00c); /* irq err mask */ 3091 ZERO(0x010); /* rq bah */ 3092 ZERO(0x014); /* rq inp */ 3093 ZERO(0x018); /* rq outp */ 3094 ZERO(0x01c); /* respq bah */ 3095 ZERO(0x024); /* respq outp */ 3096 ZERO(0x020); /* respq inp */ 3097 ZERO(0x02c); /* test control */ 3098 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 3099} 3100#undef ZERO 3101 3102#define ZERO(reg) writel(0, hc_mmio + (reg)) 3103static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3104 unsigned int hc) 3105{ 3106 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3107 u32 tmp; 3108 3109 ZERO(0x00c); 3110 ZERO(0x010); 3111 ZERO(0x014); 3112 ZERO(0x018); 3113 3114 tmp = readl(hc_mmio + 0x20); 3115 tmp &= 0x1c1c1c1c; 3116 tmp |= 0x03030303; 3117 writel(tmp, hc_mmio + 0x20); 3118} 3119#undef ZERO 3120 3121static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3122 unsigned int n_hc) 3123{ 3124 unsigned int hc, port; 3125 3126 for (hc = 0; hc < n_hc; hc++) { 3127 for (port = 0; port < MV_PORTS_PER_HC; port++) 3128 mv5_reset_hc_port(hpriv, mmio, 3129 (hc * MV_PORTS_PER_HC) + port); 3130 3131 mv5_reset_one_hc(hpriv, mmio, hc); 3132 } 3133 3134 return 0; 3135} 3136 3137#undef ZERO 3138#define ZERO(reg) writel(0, mmio + (reg)) 3139static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 3140{ 3141 struct mv_host_priv *hpriv = host->private_data; 3142 u32 tmp; 3143 3144 tmp = readl(mmio + MV_PCI_MODE); 3145 tmp &= 0xff00ffff; 3146 writel(tmp, mmio + MV_PCI_MODE); 3147 3148 ZERO(MV_PCI_DISC_TIMER); 3149 ZERO(MV_PCI_MSI_TRIGGER); 3150 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 3151 ZERO(MV_PCI_SERR_MASK); 3152 ZERO(hpriv->irq_cause_offset); 3153 ZERO(hpriv->irq_mask_offset); 3154 ZERO(MV_PCI_ERR_LOW_ADDRESS); 3155 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 3156 ZERO(MV_PCI_ERR_ATTRIBUTE); 3157 ZERO(MV_PCI_ERR_COMMAND); 3158} 3159#undef ZERO 3160 3161static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 3162{ 3163 u32 tmp; 3164 3165 mv5_reset_flash(hpriv, mmio); 3166 3167 tmp = readl(mmio + GPIO_PORT_CTL); 3168 tmp &= 0x3; 3169 tmp |= (1 << 5) | (1 << 6); 3170 writel(tmp, mmio + GPIO_PORT_CTL); 3171} 3172 3173/** 3174 * mv6_reset_hc - Perform the 6xxx global soft reset 3175 * @mmio: base address of the HBA 3176 * 3177 * This routine only applies to 6xxx parts. 3178 * 3179 * LOCKING: 3180 * Inherited from caller. 3181 */ 3182static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3183 unsigned int n_hc) 3184{ 3185 void __iomem *reg = mmio + PCI_MAIN_CMD_STS; 3186 int i, rc = 0; 3187 u32 t; 3188 3189 /* Following procedure defined in PCI "main command and status 3190 * register" table. 3191 */ 3192 t = readl(reg); 3193 writel(t | STOP_PCI_MASTER, reg); 3194 3195 for (i = 0; i < 1000; i++) { 3196 udelay(1); 3197 t = readl(reg); 3198 if (PCI_MASTER_EMPTY & t) 3199 break; 3200 } 3201 if (!(PCI_MASTER_EMPTY & t)) { 3202 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 3203 rc = 1; 3204 goto done; 3205 } 3206 3207 /* set reset */ 3208 i = 5; 3209 do { 3210 writel(t | GLOB_SFT_RST, reg); 3211 t = readl(reg); 3212 udelay(1); 3213 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 3214 3215 if (!(GLOB_SFT_RST & t)) { 3216 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 3217 rc = 1; 3218 goto done; 3219 } 3220 3221 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 3222 i = 5; 3223 do { 3224 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 3225 t = readl(reg); 3226 udelay(1); 3227 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 3228 3229 if (GLOB_SFT_RST & t) { 3230 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 3231 rc = 1; 3232 } 3233done: 3234 return rc; 3235} 3236 3237static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 3238 void __iomem *mmio) 3239{ 3240 void __iomem *port_mmio; 3241 u32 tmp; 3242 3243 tmp = readl(mmio + RESET_CFG); 3244 if ((tmp & (1 << 0)) == 0) { 3245 hpriv->signal[idx].amps = 0x7 << 8; 3246 hpriv->signal[idx].pre = 0x1 << 5; 3247 return; 3248 } 3249 3250 port_mmio = mv_port_base(mmio, idx); 3251 tmp = readl(port_mmio + PHY_MODE2); 3252 3253 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 3254 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 3255} 3256 3257static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 3258{ 3259 writel(0x00000060, mmio + GPIO_PORT_CTL); 3260} 3261 3262static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 3263 unsigned int port) 3264{ 3265 void __iomem *port_mmio = mv_port_base(mmio, port); 3266 3267 u32 hp_flags = hpriv->hp_flags; 3268 int fix_phy_mode2 = 3269 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 3270 int fix_phy_mode4 = 3271 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 3272 u32 m2, m3; 3273 3274 if (fix_phy_mode2) { 3275 m2 = readl(port_mmio + PHY_MODE2); 3276 m2 &= ~(1 << 16); 3277 m2 |= (1 << 31); 3278 writel(m2, port_mmio + PHY_MODE2); 3279 3280 udelay(200); 3281 3282 m2 = readl(port_mmio + PHY_MODE2); 3283 m2 &= ~((1 << 16) | (1 << 31)); 3284 writel(m2, port_mmio + PHY_MODE2); 3285 3286 udelay(200); 3287 } 3288 3289 /* 3290 * Gen-II/IIe PHY_MODE3 errata RM#2: 3291 * Achieves better receiver noise performance than the h/w default: 3292 */ 3293 m3 = readl(port_mmio + PHY_MODE3); 3294 m3 = (m3 & 0x1f) | (0x5555601 << 5); 3295 3296 /* Guideline 88F5182 (GL# SATA-S11) */ 3297 if (IS_SOC(hpriv)) 3298 m3 &= ~0x1c; 3299 3300 if (fix_phy_mode4) { 3301 u32 m4 = readl(port_mmio + PHY_MODE4); 3302 if (IS_GEN_IIE(hpriv)) 3303 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; 3304 else 3305 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; 3306 writel(m4, port_mmio + PHY_MODE4); 3307 } 3308 writel(m3, port_mmio + PHY_MODE3); 3309 3310 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3311 m2 = readl(port_mmio + PHY_MODE2); 3312 3313 m2 &= ~MV_M2_PREAMP_MASK; 3314 m2 |= hpriv->signal[port].amps; 3315 m2 |= hpriv->signal[port].pre; 3316 m2 &= ~(1 << 16); 3317 3318 /* according to mvSata 3.6.1, some IIE values are fixed */ 3319 if (IS_GEN_IIE(hpriv)) { 3320 m2 &= ~0xC30FF01F; 3321 m2 |= 0x0000900F; 3322 } 3323 3324 writel(m2, port_mmio + PHY_MODE2); 3325} 3326 3327/* TODO: use the generic LED interface to configure the SATA Presence */ 3328/* & Acitivy LEDs on the board */ 3329static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 3330 void __iomem *mmio) 3331{ 3332 return; 3333} 3334 3335static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 3336 void __iomem *mmio) 3337{ 3338 void __iomem *port_mmio; 3339 u32 tmp; 3340 3341 port_mmio = mv_port_base(mmio, idx); 3342 tmp = readl(port_mmio + PHY_MODE2); 3343 3344 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 3345 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 3346} 3347 3348#undef ZERO 3349#define ZERO(reg) writel(0, port_mmio + (reg)) 3350static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 3351 void __iomem *mmio, unsigned int port) 3352{ 3353 void __iomem *port_mmio = mv_port_base(mmio, port); 3354 3355 mv_reset_channel(hpriv, mmio, port); 3356 3357 ZERO(0x028); /* command */ 3358 writel(0x101f, port_mmio + EDMA_CFG); 3359 ZERO(0x004); /* timer */ 3360 ZERO(0x008); /* irq err cause */ 3361 ZERO(0x00c); /* irq err mask */ 3362 ZERO(0x010); /* rq bah */ 3363 ZERO(0x014); /* rq inp */ 3364 ZERO(0x018); /* rq outp */ 3365 ZERO(0x01c); /* respq bah */ 3366 ZERO(0x024); /* respq outp */ 3367 ZERO(0x020); /* respq inp */ 3368 ZERO(0x02c); /* test control */ 3369 writel(0x800, port_mmio + EDMA_IORDY_TMOUT); 3370} 3371 3372#undef ZERO 3373 3374#define ZERO(reg) writel(0, hc_mmio + (reg)) 3375static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 3376 void __iomem *mmio) 3377{ 3378 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 3379 3380 ZERO(0x00c); 3381 ZERO(0x010); 3382 ZERO(0x014); 3383 3384} 3385 3386#undef ZERO 3387 3388static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 3389 void __iomem *mmio, unsigned int n_hc) 3390{ 3391 unsigned int port; 3392 3393 for (port = 0; port < hpriv->n_ports; port++) 3394 mv_soc_reset_hc_port(hpriv, mmio, port); 3395 3396 mv_soc_reset_one_hc(hpriv, mmio); 3397 3398 return 0; 3399} 3400 3401static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 3402 void __iomem *mmio) 3403{ 3404 return; 3405} 3406 3407static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 3408{ 3409 return; 3410} 3411 3412static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, 3413 void __iomem *mmio, unsigned int port) 3414{ 3415 void __iomem *port_mmio = mv_port_base(mmio, port); 3416 u32 reg; 3417 3418 reg = readl(port_mmio + PHY_MODE3); 3419 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ 3420 reg |= (0x1 << 27); 3421 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ 3422 reg |= (0x1 << 29); 3423 writel(reg, port_mmio + PHY_MODE3); 3424 3425 reg = readl(port_mmio + PHY_MODE4); 3426 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ 3427 reg |= (0x1 << 16); 3428 writel(reg, port_mmio + PHY_MODE4); 3429 3430 reg = readl(port_mmio + PHY_MODE9_GEN2); 3431 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ 3432 reg |= 0x8; 3433 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ 3434 writel(reg, port_mmio + PHY_MODE9_GEN2); 3435 3436 reg = readl(port_mmio + PHY_MODE9_GEN1); 3437 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ 3438 reg |= 0x8; 3439 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ 3440 writel(reg, port_mmio + PHY_MODE9_GEN1); 3441} 3442 3443/** 3444 * soc_is_65 - check if the soc is 65 nano device 3445 * 3446 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS 3447 * register, this register should contain non-zero value and it exists only 3448 * in the 65 nano devices, when reading it from older devices we get 0. 3449 */ 3450static bool soc_is_65n(struct mv_host_priv *hpriv) 3451{ 3452 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); 3453 3454 if (readl(port0_mmio + PHYCFG_OFS)) 3455 return true; 3456 return false; 3457} 3458 3459static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 3460{ 3461 u32 ifcfg = readl(port_mmio + SATA_IFCFG); 3462 3463 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 3464 if (want_gen2i) 3465 ifcfg |= (1 << 7); /* enable gen2i speed */ 3466 writelfl(ifcfg, port_mmio + SATA_IFCFG); 3467} 3468 3469static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 3470 unsigned int port_no) 3471{ 3472 void __iomem *port_mmio = mv_port_base(mmio, port_no); 3473 3474 /* 3475 * The datasheet warns against setting EDMA_RESET when EDMA is active 3476 * (but doesn't say what the problem might be). So we first try 3477 * to disable the EDMA engine before doing the EDMA_RESET operation. 3478 */ 3479 mv_stop_edma_engine(port_mmio); 3480 writelfl(EDMA_RESET, port_mmio + EDMA_CMD); 3481 3482 if (!IS_GEN_I(hpriv)) { 3483 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 3484 mv_setup_ifcfg(port_mmio, 1); 3485 } 3486 /* 3487 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 3488 * link, and physical layers. It resets all SATA interface registers 3489 * (except for SATA_IFCFG), and issues a COMRESET to the dev. 3490 */ 3491 writelfl(EDMA_RESET, port_mmio + EDMA_CMD); 3492 udelay(25); /* allow reset propagation */ 3493 writelfl(0, port_mmio + EDMA_CMD); 3494 3495 hpriv->ops->phy_errata(hpriv, mmio, port_no); 3496 3497 if (IS_GEN_I(hpriv)) 3498 mdelay(1); 3499} 3500 3501static void mv_pmp_select(struct ata_port *ap, int pmp) 3502{ 3503 if (sata_pmp_supported(ap)) { 3504 void __iomem *port_mmio = mv_ap_base(ap); 3505 u32 reg = readl(port_mmio + SATA_IFCTL); 3506 int old = reg & 0xf; 3507 3508 if (old != pmp) { 3509 reg = (reg & ~0xf) | pmp; 3510 writelfl(reg, port_mmio + SATA_IFCTL); 3511 } 3512 } 3513} 3514 3515static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 3516 unsigned long deadline) 3517{ 3518 mv_pmp_select(link->ap, sata_srst_pmp(link)); 3519 return sata_std_hardreset(link, class, deadline); 3520} 3521 3522static int mv_softreset(struct ata_link *link, unsigned int *class, 3523 unsigned long deadline) 3524{ 3525 mv_pmp_select(link->ap, sata_srst_pmp(link)); 3526 return ata_sff_softreset(link, class, deadline); 3527} 3528 3529static int mv_hardreset(struct ata_link *link, unsigned int *class, 3530 unsigned long deadline) 3531{ 3532 struct ata_port *ap = link->ap; 3533 struct mv_host_priv *hpriv = ap->host->private_data; 3534 struct mv_port_priv *pp = ap->private_data; 3535 void __iomem *mmio = hpriv->base; 3536 int rc, attempts = 0, extra = 0; 3537 u32 sstatus; 3538 bool online; 3539 3540 mv_reset_channel(hpriv, mmio, ap->port_no); 3541 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 3542 pp->pp_flags &= 3543 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); 3544 3545 do { 3546 const unsigned long *timing = 3547 sata_ehc_deb_timing(&link->eh_context); 3548 3549 rc = sata_link_hardreset(link, timing, deadline + extra, 3550 &online, NULL); 3551 rc = online ? -EAGAIN : rc; 3552 if (rc) 3553 return rc; 3554 sata_scr_read(link, SCR_STATUS, &sstatus); 3555 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 3556 /* Force 1.5gb/s link speed and try again */ 3557 mv_setup_ifcfg(mv_ap_base(ap), 0); 3558 if (time_after(jiffies + HZ, deadline)) 3559 extra = HZ; /* only extend it once, max */ 3560 } 3561 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 3562 mv_save_cached_regs(ap); 3563 mv_edma_cfg(ap, 0, 0); 3564 3565 return rc; 3566} 3567 3568static void mv_eh_freeze(struct ata_port *ap) 3569{ 3570 mv_stop_edma(ap); 3571 mv_enable_port_irqs(ap, 0); 3572} 3573 3574static void mv_eh_thaw(struct ata_port *ap) 3575{ 3576 struct mv_host_priv *hpriv = ap->host->private_data; 3577 unsigned int port = ap->port_no; 3578 unsigned int hardport = mv_hardport_from_port(port); 3579 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 3580 void __iomem *port_mmio = mv_ap_base(ap); 3581 u32 hc_irq_cause; 3582 3583 /* clear EDMA errors on this port */ 3584 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 3585 3586 /* clear pending irq events */ 3587 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 3588 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); 3589 3590 mv_enable_port_irqs(ap, ERR_IRQ); 3591} 3592 3593/** 3594 * mv_port_init - Perform some early initialization on a single port. 3595 * @port: libata data structure storing shadow register addresses 3596 * @port_mmio: base address of the port 3597 * 3598 * Initialize shadow register mmio addresses, clear outstanding 3599 * interrupts on the port, and unmask interrupts for the future 3600 * start of the port. 3601 * 3602 * LOCKING: 3603 * Inherited from caller. 3604 */ 3605static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 3606{ 3607 void __iomem *serr, *shd_base = port_mmio + SHD_BLK; 3608 3609 /* PIO related setup 3610 */ 3611 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 3612 port->error_addr = 3613 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 3614 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 3615 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 3616 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 3617 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 3618 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 3619 port->status_addr = 3620 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 3621 /* special case: control/altstatus doesn't have ATA_REG_ address */ 3622 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; 3623 3624 /* Clear any currently outstanding port interrupt conditions */ 3625 serr = port_mmio + mv_scr_offset(SCR_ERROR); 3626 writelfl(readl(serr), serr); 3627 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 3628 3629 /* unmask all non-transient EDMA error interrupts */ 3630 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); 3631 3632 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 3633 readl(port_mmio + EDMA_CFG), 3634 readl(port_mmio + EDMA_ERR_IRQ_CAUSE), 3635 readl(port_mmio + EDMA_ERR_IRQ_MASK)); 3636} 3637 3638static unsigned int mv_in_pcix_mode(struct ata_host *host) 3639{ 3640 struct mv_host_priv *hpriv = host->private_data; 3641 void __iomem *mmio = hpriv->base; 3642 u32 reg; 3643 3644 if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) 3645 return 0; /* not PCI-X capable */ 3646 reg = readl(mmio + MV_PCI_MODE); 3647 if ((reg & MV_PCI_MODE_MASK) == 0) 3648 return 0; /* conventional PCI mode */ 3649 return 1; /* chip is in PCI-X mode */ 3650} 3651 3652static int mv_pci_cut_through_okay(struct ata_host *host) 3653{ 3654 struct mv_host_priv *hpriv = host->private_data; 3655 void __iomem *mmio = hpriv->base; 3656 u32 reg; 3657 3658 if (!mv_in_pcix_mode(host)) { 3659 reg = readl(mmio + MV_PCI_COMMAND); 3660 if (reg & MV_PCI_COMMAND_MRDTRIG) 3661 return 0; /* not okay */ 3662 } 3663 return 1; /* okay */ 3664} 3665 3666static void mv_60x1b2_errata_pci7(struct ata_host *host) 3667{ 3668 struct mv_host_priv *hpriv = host->private_data; 3669 void __iomem *mmio = hpriv->base; 3670 3671 if (mv_in_pcix_mode(host)) { 3672 u32 reg = readl(mmio + MV_PCI_COMMAND); 3673 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); 3674 } 3675} 3676 3677static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 3678{ 3679 struct pci_dev *pdev = to_pci_dev(host->dev); 3680 struct mv_host_priv *hpriv = host->private_data; 3681 u32 hp_flags = hpriv->hp_flags; 3682 3683 switch (board_idx) { 3684 case chip_5080: 3685 hpriv->ops = &mv5xxx_ops; 3686 hp_flags |= MV_HP_GEN_I; 3687 3688 switch (pdev->revision) { 3689 case 0x1: 3690 hp_flags |= MV_HP_ERRATA_50XXB0; 3691 break; 3692 case 0x3: 3693 hp_flags |= MV_HP_ERRATA_50XXB2; 3694 break; 3695 default: 3696 dev_printk(KERN_WARNING, &pdev->dev, 3697 "Applying 50XXB2 workarounds to unknown rev\n"); 3698 hp_flags |= MV_HP_ERRATA_50XXB2; 3699 break; 3700 } 3701 break; 3702 3703 case chip_504x: 3704 case chip_508x: 3705 hpriv->ops = &mv5xxx_ops; 3706 hp_flags |= MV_HP_GEN_I; 3707 3708 switch (pdev->revision) { 3709 case 0x0: 3710 hp_flags |= MV_HP_ERRATA_50XXB0; 3711 break; 3712 case 0x3: 3713 hp_flags |= MV_HP_ERRATA_50XXB2; 3714 break; 3715 default: 3716 dev_printk(KERN_WARNING, &pdev->dev, 3717 "Applying B2 workarounds to unknown rev\n"); 3718 hp_flags |= MV_HP_ERRATA_50XXB2; 3719 break; 3720 } 3721 break; 3722 3723 case chip_604x: 3724 case chip_608x: 3725 hpriv->ops = &mv6xxx_ops; 3726 hp_flags |= MV_HP_GEN_II; 3727 3728 switch (pdev->revision) { 3729 case 0x7: 3730 mv_60x1b2_errata_pci7(host); 3731 hp_flags |= MV_HP_ERRATA_60X1B2; 3732 break; 3733 case 0x9: 3734 hp_flags |= MV_HP_ERRATA_60X1C0; 3735 break; 3736 default: 3737 dev_printk(KERN_WARNING, &pdev->dev, 3738 "Applying B2 workarounds to unknown rev\n"); 3739 hp_flags |= MV_HP_ERRATA_60X1B2; 3740 break; 3741 } 3742 break; 3743 3744 case chip_7042: 3745 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 3746 if (pdev->vendor == PCI_VENDOR_ID_TTI && 3747 (pdev->device == 0x2300 || pdev->device == 0x2310)) 3748 { 3749 /* 3750 * Highpoint RocketRAID PCIe 23xx series cards: 3751 * 3752 * Unconfigured drives are treated as "Legacy" 3753 * by the BIOS, and it overwrites sector 8 with 3754 * a "Lgcy" metadata block prior to Linux boot. 3755 * 3756 * Configured drives (RAID or JBOD) leave sector 8 3757 * alone, but instead overwrite a high numbered 3758 * sector for the RAID metadata. This sector can 3759 * be determined exactly, by truncating the physical 3760 * drive capacity to a nice even GB value. 3761 * 3762 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 3763 * 3764 * Warn the user, lest they think we're just buggy. 3765 */ 3766 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 3767 " BIOS CORRUPTS DATA on all attached drives," 3768 " regardless of if/how they are configured." 3769 " BEWARE!\n"); 3770 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 3771 " use sectors 8-9 on \"Legacy\" drives," 3772 " and avoid the final two gigabytes on" 3773 " all RocketRAID BIOS initialized drives.\n"); 3774 } 3775 /* drop through */ 3776 case chip_6042: 3777 hpriv->ops = &mv6xxx_ops; 3778 hp_flags |= MV_HP_GEN_IIE; 3779 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 3780 hp_flags |= MV_HP_CUT_THROUGH; 3781 3782 switch (pdev->revision) { 3783 case 0x2: /* Rev.B0: the first/only public release */ 3784 hp_flags |= MV_HP_ERRATA_60X1C0; 3785 break; 3786 default: 3787 dev_printk(KERN_WARNING, &pdev->dev, 3788 "Applying 60X1C0 workarounds to unknown rev\n"); 3789 hp_flags |= MV_HP_ERRATA_60X1C0; 3790 break; 3791 } 3792 break; 3793 case chip_soc: 3794 if (soc_is_65n(hpriv)) 3795 hpriv->ops = &mv_soc_65n_ops; 3796 else 3797 hpriv->ops = &mv_soc_ops; 3798 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | 3799 MV_HP_ERRATA_60X1C0; 3800 break; 3801 3802 default: 3803 dev_printk(KERN_ERR, host->dev, 3804 "BUG: invalid board index %u\n", board_idx); 3805 return 1; 3806 } 3807 3808 hpriv->hp_flags = hp_flags; 3809 if (hp_flags & MV_HP_PCIE) { 3810 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; 3811 hpriv->irq_mask_offset = PCIE_IRQ_MASK; 3812 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 3813 } else { 3814 hpriv->irq_cause_offset = PCI_IRQ_CAUSE; 3815 hpriv->irq_mask_offset = PCI_IRQ_MASK; 3816 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3817 } 3818 3819 return 0; 3820} 3821 3822/** 3823 * mv_init_host - Perform some early initialization of the host. 3824 * @host: ATA host to initialize 3825 * 3826 * If possible, do an early global reset of the host. Then do 3827 * our port init and clear/unmask all/relevant host interrupts. 3828 * 3829 * LOCKING: 3830 * Inherited from caller. 3831 */ 3832static int mv_init_host(struct ata_host *host) 3833{ 3834 int rc = 0, n_hc, port, hc; 3835 struct mv_host_priv *hpriv = host->private_data; 3836 void __iomem *mmio = hpriv->base; 3837 3838 rc = mv_chip_id(host, hpriv->board_idx); 3839 if (rc) 3840 goto done; 3841 3842 if (IS_SOC(hpriv)) { 3843 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; 3844 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; 3845 } else { 3846 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; 3847 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; 3848 } 3849 3850 /* initialize shadow irq mask with register's value */ 3851 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); 3852 3853 /* global interrupt mask: 0 == mask everything */ 3854 mv_set_main_irq_mask(host, ~0, 0); 3855 3856 n_hc = mv_get_hc_count(host->ports[0]->flags); 3857 3858 for (port = 0; port < host->n_ports; port++) 3859 if (hpriv->ops->read_preamp) 3860 hpriv->ops->read_preamp(hpriv, port, mmio); 3861 3862 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 3863 if (rc) 3864 goto done; 3865 3866 hpriv->ops->reset_flash(hpriv, mmio); 3867 hpriv->ops->reset_bus(host, mmio); 3868 hpriv->ops->enable_leds(hpriv, mmio); 3869 3870 for (port = 0; port < host->n_ports; port++) { 3871 struct ata_port *ap = host->ports[port]; 3872 void __iomem *port_mmio = mv_port_base(mmio, port); 3873 3874 mv_port_init(&ap->ioaddr, port_mmio); 3875 } 3876 3877 for (hc = 0; hc < n_hc; hc++) { 3878 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3879 3880 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 3881 "(before clear)=0x%08x\n", hc, 3882 readl(hc_mmio + HC_CFG), 3883 readl(hc_mmio + HC_IRQ_CAUSE)); 3884 3885 /* Clear any currently outstanding hc interrupt conditions */ 3886 writelfl(0, hc_mmio + HC_IRQ_CAUSE); 3887 } 3888 3889 if (!IS_SOC(hpriv)) { 3890 /* Clear any currently outstanding host interrupt conditions */ 3891 writelfl(0, mmio + hpriv->irq_cause_offset); 3892 3893 /* and unmask interrupt generation for host regs */ 3894 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); 3895 } 3896 3897 /* 3898 * enable only global host interrupts for now. 3899 * The per-port interrupts get done later as ports are set up. 3900 */ 3901 mv_set_main_irq_mask(host, 0, PCI_ERR); 3902 mv_set_irq_coalescing(host, irq_coalescing_io_count, 3903 irq_coalescing_usecs); 3904done: 3905 return rc; 3906} 3907 3908static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3909{ 3910 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3911 MV_CRQB_Q_SZ, 0); 3912 if (!hpriv->crqb_pool) 3913 return -ENOMEM; 3914 3915 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3916 MV_CRPB_Q_SZ, 0); 3917 if (!hpriv->crpb_pool) 3918 return -ENOMEM; 3919 3920 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3921 MV_SG_TBL_SZ, 0); 3922 if (!hpriv->sg_tbl_pool) 3923 return -ENOMEM; 3924 3925 return 0; 3926} 3927 3928static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3929 struct mbus_dram_target_info *dram) 3930{ 3931 int i; 3932 3933 for (i = 0; i < 4; i++) { 3934 writel(0, hpriv->base + WINDOW_CTRL(i)); 3935 writel(0, hpriv->base + WINDOW_BASE(i)); 3936 } 3937 3938 for (i = 0; i < dram->num_cs; i++) { 3939 struct mbus_dram_window *cs = dram->cs + i; 3940 3941 writel(((cs->size - 1) & 0xffff0000) | 3942 (cs->mbus_attr << 8) | 3943 (dram->mbus_dram_target_id << 4) | 1, 3944 hpriv->base + WINDOW_CTRL(i)); 3945 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 3946 } 3947} 3948 3949/** 3950 * mv_platform_probe - handle a positive probe of an soc Marvell 3951 * host 3952 * @pdev: platform device found 3953 * 3954 * LOCKING: 3955 * Inherited from caller. 3956 */ 3957static int mv_platform_probe(struct platform_device *pdev) 3958{ 3959 static int printed_version; 3960 const struct mv_sata_platform_data *mv_platform_data; 3961 const struct ata_port_info *ppi[] = 3962 { &mv_port_info[chip_soc], NULL }; 3963 struct ata_host *host; 3964 struct mv_host_priv *hpriv; 3965 struct resource *res; 3966 int n_ports, rc; 3967 3968 if (!printed_version++) 3969 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3970 3971 /* 3972 * Simple resource validation .. 3973 */ 3974 if (unlikely(pdev->num_resources != 2)) { 3975 dev_err(&pdev->dev, "invalid number of resources\n"); 3976 return -EINVAL; 3977 } 3978 3979 /* 3980 * Get the register base first 3981 */ 3982 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3983 if (res == NULL) 3984 return -EINVAL; 3985 3986 /* allocate host */ 3987 mv_platform_data = pdev->dev.platform_data; 3988 n_ports = mv_platform_data->n_ports; 3989 3990 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3991 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3992 3993 if (!host || !hpriv) 3994 return -ENOMEM; 3995 host->private_data = hpriv; 3996 hpriv->n_ports = n_ports; 3997 hpriv->board_idx = chip_soc; 3998 3999 host->iomap = NULL; 4000 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4001 resource_size(res)); 4002 hpriv->base -= SATAHC0_REG_BASE; 4003 4004#if defined(CONFIG_HAVE_CLK) 4005 hpriv->clk = clk_get(&pdev->dev, NULL); 4006 if (IS_ERR(hpriv->clk)) 4007 dev_notice(&pdev->dev, "cannot get clkdev\n"); 4008 else 4009 clk_enable(hpriv->clk); 4010#endif 4011 4012 /* 4013 * (Re-)program MBUS remapping windows if we are asked to. 4014 */ 4015 if (mv_platform_data->dram != NULL) 4016 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 4017 4018 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4019 if (rc) 4020 goto err; 4021 4022 /* initialize adapter */ 4023 rc = mv_init_host(host); 4024 if (rc) 4025 goto err; 4026 4027 dev_printk(KERN_INFO, &pdev->dev, 4028 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 4029 host->n_ports); 4030 4031 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 4032 IRQF_SHARED, &mv6_sht); 4033err: 4034#if defined(CONFIG_HAVE_CLK) 4035 if (!IS_ERR(hpriv->clk)) { 4036 clk_disable(hpriv->clk); 4037 clk_put(hpriv->clk); 4038 } 4039#endif 4040 4041 return rc; 4042} 4043 4044/* 4045 * 4046 * mv_platform_remove - unplug a platform interface 4047 * @pdev: platform device 4048 * 4049 * A platform bus SATA device has been unplugged. Perform the needed 4050 * cleanup. Also called on module unload for any active devices. 4051 */ 4052static int __devexit mv_platform_remove(struct platform_device *pdev) 4053{ 4054 struct device *dev = &pdev->dev; 4055 struct ata_host *host = dev_get_drvdata(dev); 4056#if defined(CONFIG_HAVE_CLK) 4057 struct mv_host_priv *hpriv = host->private_data; 4058#endif 4059 ata_host_detach(host); 4060 4061#if defined(CONFIG_HAVE_CLK) 4062 if (!IS_ERR(hpriv->clk)) { 4063 clk_disable(hpriv->clk); 4064 clk_put(hpriv->clk); 4065 } 4066#endif 4067 return 0; 4068} 4069 4070#ifdef CONFIG_PM 4071static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) 4072{ 4073 struct ata_host *host = dev_get_drvdata(&pdev->dev); 4074 if (host) 4075 return ata_host_suspend(host, state); 4076 else 4077 return 0; 4078} 4079 4080static int mv_platform_resume(struct platform_device *pdev) 4081{ 4082 struct ata_host *host = dev_get_drvdata(&pdev->dev); 4083 int ret; 4084 4085 if (host) { 4086 struct mv_host_priv *hpriv = host->private_data; 4087 const struct mv_sata_platform_data *mv_platform_data = \ 4088 pdev->dev.platform_data; 4089 /* 4090 * (Re-)program MBUS remapping windows if we are asked to. 4091 */ 4092 if (mv_platform_data->dram != NULL) 4093 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 4094 4095 /* initialize adapter */ 4096 ret = mv_init_host(host); 4097 if (ret) { 4098 printk(KERN_ERR DRV_NAME ": Error during HW init\n"); 4099 return ret; 4100 } 4101 ata_host_resume(host); 4102 } 4103 4104 return 0; 4105} 4106#else 4107#define mv_platform_suspend NULL 4108#define mv_platform_resume NULL 4109#endif 4110 4111static struct platform_driver mv_platform_driver = { 4112 .probe = mv_platform_probe, 4113 .remove = __devexit_p(mv_platform_remove), 4114 .suspend = mv_platform_suspend, 4115 .resume = mv_platform_resume, 4116 .driver = { 4117 .name = DRV_NAME, 4118 .owner = THIS_MODULE, 4119 }, 4120}; 4121 4122 4123#ifdef CONFIG_PCI 4124static int mv_pci_init_one(struct pci_dev *pdev, 4125 const struct pci_device_id *ent); 4126#ifdef CONFIG_PM 4127static int mv_pci_device_resume(struct pci_dev *pdev); 4128#endif 4129 4130 4131static struct pci_driver mv_pci_driver = { 4132 .name = DRV_NAME, 4133 .id_table = mv_pci_tbl, 4134 .probe = mv_pci_init_one, 4135 .remove = ata_pci_remove_one, 4136#ifdef CONFIG_PM 4137 .suspend = ata_pci_device_suspend, 4138 .resume = mv_pci_device_resume, 4139#endif 4140 4141}; 4142 4143/* move to PCI layer or libata core? */ 4144static int pci_go_64(struct pci_dev *pdev) 4145{ 4146 int rc; 4147 4148 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4149 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4150 if (rc) { 4151 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4152 if (rc) { 4153 dev_printk(KERN_ERR, &pdev->dev, 4154 "64-bit DMA enable failed\n"); 4155 return rc; 4156 } 4157 } 4158 } else { 4159 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4160 if (rc) { 4161 dev_printk(KERN_ERR, &pdev->dev, 4162 "32-bit DMA enable failed\n"); 4163 return rc; 4164 } 4165 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4166 if (rc) { 4167 dev_printk(KERN_ERR, &pdev->dev, 4168 "32-bit consistent DMA enable failed\n"); 4169 return rc; 4170 } 4171 } 4172 4173 return rc; 4174} 4175 4176static void mv_print_info(struct ata_host *host) 4177{ 4178 struct pci_dev *pdev = to_pci_dev(host->dev); 4179 struct mv_host_priv *hpriv = host->private_data; 4180 u8 scc; 4181 const char *scc_s, *gen; 4182 4183 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 4184 if (scc == 0) 4185 scc_s = "SCSI"; 4186 else if (scc == 0x01) 4187 scc_s = "RAID"; 4188 else 4189 scc_s = "?"; 4190 4191 if (IS_GEN_I(hpriv)) 4192 gen = "I"; 4193 else if (IS_GEN_II(hpriv)) 4194 gen = "II"; 4195 else if (IS_GEN_IIE(hpriv)) 4196 gen = "IIE"; 4197 else 4198 gen = "?"; 4199 4200 dev_printk(KERN_INFO, &pdev->dev, 4201 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 4202 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 4203 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 4204} 4205 4206/** 4207 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 4208 * @pdev: PCI device found 4209 * @ent: PCI device ID entry for the matched host 4210 * 4211 * LOCKING: 4212 * Inherited from caller. 4213 */ 4214static int mv_pci_init_one(struct pci_dev *pdev, 4215 const struct pci_device_id *ent) 4216{ 4217 static int printed_version; 4218 unsigned int board_idx = (unsigned int)ent->driver_data; 4219 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 4220 struct ata_host *host; 4221 struct mv_host_priv *hpriv; 4222 int n_ports, port, rc; 4223 4224 if (!printed_version++) 4225 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 4226 4227 /* allocate host */ 4228 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 4229 4230 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 4231 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 4232 if (!host || !hpriv) 4233 return -ENOMEM; 4234 host->private_data = hpriv; 4235 hpriv->n_ports = n_ports; 4236 hpriv->board_idx = board_idx; 4237 4238 /* acquire resources */ 4239 rc = pcim_enable_device(pdev); 4240 if (rc) 4241 return rc; 4242 4243 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 4244 if (rc == -EBUSY) 4245 pcim_pin_device(pdev); 4246 if (rc) 4247 return rc; 4248 host->iomap = pcim_iomap_table(pdev); 4249 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 4250 4251 rc = pci_go_64(pdev); 4252 if (rc) 4253 return rc; 4254 4255 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4256 if (rc) 4257 return rc; 4258 4259 for (port = 0; port < host->n_ports; port++) { 4260 struct ata_port *ap = host->ports[port]; 4261 void __iomem *port_mmio = mv_port_base(hpriv->base, port); 4262 unsigned int offset = port_mmio - hpriv->base; 4263 4264 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 4265 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 4266 } 4267 4268 /* initialize adapter */ 4269 rc = mv_init_host(host); 4270 if (rc) 4271 return rc; 4272 4273 /* Enable message-switched interrupts, if requested */ 4274 if (msi && pci_enable_msi(pdev) == 0) 4275 hpriv->hp_flags |= MV_HP_FLAG_MSI; 4276 4277 mv_dump_pci_cfg(pdev, 0x68); 4278 mv_print_info(host); 4279 4280 pci_set_master(pdev); 4281 pci_try_set_mwi(pdev); 4282 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 4283 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 4284} 4285 4286#ifdef CONFIG_PM 4287static int mv_pci_device_resume(struct pci_dev *pdev) 4288{ 4289 struct ata_host *host = dev_get_drvdata(&pdev->dev); 4290 int rc; 4291 4292 rc = ata_pci_device_do_resume(pdev); 4293 if (rc) 4294 return rc; 4295 4296 /* initialize adapter */ 4297 rc = mv_init_host(host); 4298 if (rc) 4299 return rc; 4300 4301 ata_host_resume(host); 4302 4303 return 0; 4304} 4305#endif 4306#endif 4307 4308static int mv_platform_probe(struct platform_device *pdev); 4309static int __devexit mv_platform_remove(struct platform_device *pdev); 4310 4311static int __init mv_init(void) 4312{ 4313 int rc = -ENODEV; 4314#ifdef CONFIG_PCI 4315 rc = pci_register_driver(&mv_pci_driver); 4316 if (rc < 0) 4317 return rc; 4318#endif 4319 rc = platform_driver_register(&mv_platform_driver); 4320 4321#ifdef CONFIG_PCI 4322 if (rc < 0) 4323 pci_unregister_driver(&mv_pci_driver); 4324#endif 4325 return rc; 4326} 4327 4328static void __exit mv_exit(void) 4329{ 4330#ifdef CONFIG_PCI 4331 pci_unregister_driver(&mv_pci_driver); 4332#endif 4333 platform_driver_unregister(&mv_platform_driver); 4334} 4335 4336MODULE_AUTHOR("Brett Russ"); 4337MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 4338MODULE_LICENSE("GPL"); 4339MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 4340MODULE_VERSION(DRV_VERSION); 4341MODULE_ALIAS("platform:" DRV_NAME); 4342 4343module_init(mv_init); 4344module_exit(mv_exit); 4345