aic7xxx_inline.h revision 70204
1311116Sdim/* 2311116Sdim * Inline routines shareable across OS platforms. 3353358Sdim * 4353358Sdim * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs. 5353358Sdim * All rights reserved. 6311116Sdim * 7311116Sdim * Redistribution and use in source and binary forms, with or without 8311116Sdim * modification, are permitted provided that the following conditions 9311116Sdim * are met: 10311116Sdim * 1. Redistributions of source code must retain the above copyright 11311116Sdim * notice, this list of conditions, and the following disclaimer, 12321369Sdim * without modification. 13321369Sdim * 2. The name of the author may not be used to endorse or promote products 14311116Sdim * derived from this software without specific prior written permission. 15311116Sdim * 16311116Sdim * Alternatively, this software may be distributed under the terms of the 17311116Sdim * GNU Public License ("GPL"). 18311116Sdim * 19311116Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20311116Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21311116Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22311116Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23311116Sdim * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24311116Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25311116Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26311116Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27311116Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28311116Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29321369Sdim * SUCH DAMAGE. 30321369Sdim * 31321369Sdim * $Id: //depot/src/aic7xxx/aic7xxx_inline.h#12 $ 32321369Sdim * 33321369Sdim * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_inline.h 70204 2000-12-20 01:11:37Z gibbs $ 34321369Sdim */ 35321369Sdim 36321369Sdim#ifndef _AIC7XXX_INLINE_H_ 37353358Sdim#define _AIC7XXX_INLINE_H_ 38353358Sdim 39353358Sdim/************************* Sequencer Execution Control ************************/ 40353358Sdimstatic __inline int sequencer_paused(struct ahc_softc *ahc); 41353358Sdimstatic __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); 42353358Sdimstatic __inline void pause_sequencer(struct ahc_softc *ahc); 43353358Sdimstatic __inline void unpause_sequencer(struct ahc_softc *ahc); 44353358Sdim 45353358Sdim/* 46353358Sdim * Work around any chip bugs related to halting sequencer execution. 47353358Sdim * On Ultra2 controllers, we must clear the CIOBUS stretch signal by 48353358Sdim * reading a register that will set this signal and deassert it. 49353358Sdim * Without this workaround, if the chip is paused, by an interrupt or 50353358Sdim * manual pause while accessing scb ram, accesses to certain registers 51353358Sdim * will hang the system (infinite pci retries). 52360784Sdim */ 53353358Sdimstatic __inline void 54311116Sdimahc_pause_bug_fix(struct ahc_softc *ahc) 55311116Sdim{ 56311116Sdim if ((ahc->features & AHC_ULTRA2) != 0) 57311116Sdim (void)ahc_inb(ahc, CCSCBCTL); 58353358Sdim} 59353358Sdim 60353358Sdim/* 61311116Sdim * Determine whether the sequencer has halted code execution. 62311116Sdim * Returns non-zero status if the sequencer is stopped. 63311116Sdim */ 64311116Sdimstatic __inline int 65311116Sdimsequencer_paused(struct ahc_softc *ahc) 66311116Sdim{ 67311116Sdim return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); 68311116Sdim} 69311116Sdim 70311116Sdim/* 71311116Sdim * Request that the sequencer stop and wait, indefinitely, for it 72311116Sdim * to stop. The sequencer will only acknowledge that it is paused 73311116Sdim * once it has reached an instruction boundary and PAUSEDIS is 74311116Sdim * cleared in the SEQCTL register. The sequencer may use PAUSEDIS 75311116Sdim * for critical sections. 76311116Sdim */ 77311116Sdimstatic __inline void 78311116Sdimpause_sequencer(struct ahc_softc *ahc) 79321369Sdim{ 80321369Sdim ahc_outb(ahc, HCNTRL, ahc->pause); 81321369Sdim 82321369Sdim /* 83321369Sdim * Since the sequencer can disable pausing in a critical section, we 84321369Sdim * must loop until it actually stops. 85311116Sdim */ 86311116Sdim while (sequencer_paused(ahc) == 0) 87311116Sdim ; 88311116Sdim 89311116Sdim ahc_pause_bug_fix(ahc); 90311116Sdim} 91311116Sdim 92311116Sdim/* 93311116Sdim * Allow the sequencer to continue program execution. 94311116Sdim * We check here to ensure that no additional interrupt 95311116Sdim * sources that would cause the sequencer to halt have been 96311116Sdim * asserted. If, for example, a SCSI bus reset is detected 97311116Sdim * while we are fielding a different, pausing, interrupt type, 98311116Sdim * we don't want to release the sequencer before going back 99311116Sdim * into our interrupt handler and dealing with this new 100353358Sdim * condition. 101353358Sdim */ 102353358Sdimstatic __inline void 103353358Sdimunpause_sequencer(struct ahc_softc *ahc) 104353358Sdim{ 105353358Sdim if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) 106353358Sdim ahc_outb(ahc, HCNTRL, ahc->unpause); 107311116Sdim} 108311116Sdim 109311116Sdim/*********************** Untagged Transaction Routines ************************/ 110311116Sdimstatic __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); 111311116Sdimstatic __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); 112311116Sdim 113311116Sdim/* 114311116Sdim * Block our completion routine from starting the next untagged 115311116Sdim * transaction for this target or target lun. 116353358Sdim */ 117353358Sdimstatic __inline void 118311116Sdimahc_freeze_untagged_queues(struct ahc_softc *ahc) 119353358Sdim{ 120311116Sdim if ((ahc->features & AHC_SCB_BTT) == 0) 121311116Sdim ahc->untagged_queue_lock++; 122311116Sdim} 123311116Sdim 124311116Sdim/* 125311116Sdim * Allow the next untagged transaction for this target or target lun 126311116Sdim * to be executed. We use a counting semaphore to allow the lock 127353358Sdim * to be acquired recursively. Once the count drops to zero, the 128353358Sdim * transaction queues will be run. 129360784Sdim */ 130360784Sdimstatic __inline void 131360784Sdimahc_release_untagged_queues(struct ahc_softc *ahc) 132360784Sdim{ 133360784Sdim if ((ahc->features & AHC_SCB_BTT) == 0) { 134353358Sdim ahc->untagged_queue_lock--; 135353358Sdim if (ahc->untagged_queue_lock == 0) 136353358Sdim ahc_run_untagged_queues(ahc); 137311116Sdim } 138311116Sdim} 139353358Sdim 140353358Sdim/************************** Memory mapping routines ***************************/ 141353358Sdimstatic __inline struct ahc_dma_seg * 142353358Sdim ahc_sg_bus_to_virt(struct scb *scb, 143353358Sdim uint32_t sg_busaddr); 144311116Sdimstatic __inline uint32_t 145311116Sdim ahc_sg_virt_to_bus(struct scb *scb, 146311116Sdim struct ahc_dma_seg *sg); 147311116Sdimstatic __inline uint32_t 148353358Sdim ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); 149353358Sdim 150353358Sdimstatic __inline struct ahc_dma_seg * 151353358Sdimahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) 152353358Sdim{ 153353358Sdim int sg_index; 154353358Sdim 155353358Sdim sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); 156311116Sdim /* sg_list_phys points to entry 1, not 0 */ 157311116Sdim sg_index++; 158311116Sdim 159311116Sdim return (&scb->sg_list[sg_index]); 160311116Sdim} 161311116Sdim 162311116Sdimstatic __inline uint32_t 163311116Sdimahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) 164311116Sdim{ 165311116Sdim int sg_index; 166311116Sdim 167311116Sdim /* sg_list_phys points to entry 1, not 0 */ 168311116Sdim sg_index = sg - &scb->sg_list[1]; 169311116Sdim 170311116Sdim return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); 171311116Sdim} 172311116Sdim 173353358Sdimstatic __inline uint32_t 174353358Sdimahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) 175353358Sdim{ 176353358Sdim return (ahc->scb_data->hscb_busaddr 177353358Sdim + (sizeof(struct hardware_scb) * index)); 178311116Sdim} 179311116Sdim 180311116Sdim/******************************** Debugging ***********************************/ 181311116Sdimstatic __inline char *ahc_name(struct ahc_softc *ahc); 182311116Sdim 183311116Sdimstatic __inline char * 184311116Sdimahc_name(struct ahc_softc *ahc) 185311116Sdim{ 186311116Sdim return (ahc->name); 187311116Sdim} 188311116Sdim 189353358Sdim/*********************** Miscelaneous Support Functions ***********************/ 190353358Sdim 191311116Sdimstatic __inline int ahc_check_residual(struct scb *scb); 192353358Sdimstatic __inline struct ahc_initiator_tinfo * 193353358Sdim ahc_fetch_transinfo(struct ahc_softc *ahc, 194353358Sdim char channel, u_int our_id, 195353358Sdim u_int remote_id, 196353358Sdim struct tmode_tstate **tstate); 197311116Sdimstatic __inline struct scb* 198311116Sdim ahc_get_scb(struct ahc_softc *ahc); 199353358Sdimstatic __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); 200353358Sdimstatic __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, 201353358Sdim struct scb *scb); 202311116Sdimstatic __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); 203311116Sdim 204353358Sdim/* 205353358Sdim * Determine whether the sequencer reported a residual 206353358Sdim * for this SCB/transaction. 207353358Sdim */ 208353358Sdimstatic __inline int 209353358Sdimahc_check_residual(struct scb *scb) 210353358Sdim{ 211311116Sdim struct status_pkt *sp; 212311116Sdim 213321369Sdim sp = &scb->hscb->shared_data.status; 214311116Sdim if ((scb->hscb->sgptr & SG_RESID_VALID) != 0) 215311116Sdim return (1); 216321369Sdim return (0); 217311116Sdim} 218311116Sdim 219311116Sdim/* 220311116Sdim * Return pointers to the transfer negotiation information 221311116Sdim * for the specified our_id/remote_id pair. 222353358Sdim */ 223311116Sdimstatic __inline struct ahc_initiator_tinfo * 224353358Sdimahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, 225353358Sdim u_int remote_id, struct tmode_tstate **tstate) 226353358Sdim{ 227353358Sdim /* 228353358Sdim * Transfer data structures are stored from the perspective 229353358Sdim * of the target role. Since the parameters for a connection 230353358Sdim * in the initiator role to a given target are the same as 231353358Sdim * when the roles are reversed, we pretend we are the target. 232353358Sdim */ 233353358Sdim if (channel == 'B') 234311116Sdim our_id += 8; 235311116Sdim *tstate = ahc->enabled_targets[our_id]; 236311116Sdim return (&(*tstate)->transinfo[remote_id]); 237311116Sdim} 238321369Sdim 239311116Sdim/* 240311116Sdim * Get a free scb. If there are none, see if we can allocate a new SCB. 241321369Sdim */ 242321369Sdimstatic __inline struct scb * 243311116Sdimahc_get_scb(struct ahc_softc *ahc) 244321369Sdim{ 245311116Sdim struct scb *scb; 246311116Sdim 247311116Sdim if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { 248311116Sdim ahc_alloc_scbs(ahc); 249353358Sdim scb = SLIST_FIRST(&ahc->scb_data->free_scbs); 250353358Sdim if (scb == NULL) 251353358Sdim return (NULL); 252353358Sdim } 253353358Sdim SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); 254311116Sdim return (scb); 255311116Sdim} 256311116Sdim 257311116Sdim/* 258353358Sdim * Return an SCB resource to the free list. 259353358Sdim */ 260311116Sdimstatic __inline void 261311116Sdimahc_free_scb(struct ahc_softc *ahc, struct scb *scb) 262311116Sdim{ 263311116Sdim struct hardware_scb *hscb; 264311116Sdim 265311116Sdim hscb = scb->hscb; 266311116Sdim /* Clean up for the next user */ 267311116Sdim ahc->scb_data->scbindex[hscb->tag] = NULL; 268311116Sdim scb->flags = SCB_FREE; 269311116Sdim hscb->control = 0; 270311116Sdim 271311116Sdim SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); 272311116Sdim 273311116Sdim /* Notify the OSM that a resource is now available. */ 274353358Sdim ahc_platform_scb_free(ahc, scb); 275353358Sdim} 276353358Sdim 277353358Sdimstatic __inline struct scb * 278353358Sdimahc_lookup_scb(struct ahc_softc *ahc, u_int tag) 279353358Sdim{ 280353358Sdim return (ahc->scb_data->scbindex[tag]); 281353358Sdim 282353358Sdim} 283353358Sdim 284353358Sdimstatic __inline void 285353358Sdimahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) 286353358Sdim{ 287353358Sdim struct hardware_scb *q_hscb; 288353358Sdim u_int saved_tag; 289353358Sdim 290353358Sdim /* 291353358Sdim * Our queuing method is a bit tricky. The card 292353358Sdim * knows in advance which HSCB to download, and we 293353358Sdim * can't disappoint it. To achieve this, the next 294353358Sdim * SCB to download is saved off in ahc->next_queued_scb. 295353358Sdim * When we are called to queue "an arbitrary scb", 296353358Sdim * we copy the contents of the incoming HSCB to the one 297353358Sdim * the sequencer knows about, swap HSCB pointers and 298353358Sdim * finally assign the SCB to the tag indexed location 299353358Sdim * in the scb_array. This makes sure that we can still 300353358Sdim * locate the correct SCB by SCB_TAG. 301353358Sdim */ 302353358Sdim q_hscb = ahc->next_queued_scb->hscb; 303353358Sdim saved_tag = q_hscb->tag; 304353358Sdim memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); 305353358Sdim if ((scb->flags & SCB_CDB32_PTR) != 0) { 306353358Sdim q_hscb->shared_data.cdb_ptr = 307353358Sdim ahc_hscb_busaddr(ahc, q_hscb->tag) 308353358Sdim + offsetof(struct hardware_scb, cdb32); 309353358Sdim } 310353358Sdim q_hscb->tag = saved_tag; 311353358Sdim q_hscb->next = scb->hscb->tag; 312353358Sdim 313353358Sdim /* Now swap HSCB pointers. */ 314353358Sdim ahc->next_queued_scb->hscb = scb->hscb; 315353358Sdim scb->hscb = q_hscb; 316353358Sdim 317353358Sdim /* Now define the mapping from tag to SCB in the scbindex */ 318353358Sdim ahc->scb_data->scbindex[scb->hscb->tag] = scb; 319353358Sdim} 320353358Sdim 321353358Sdim/* 322353358Sdim * Tell the sequencer about a new transaction to execute. 323353358Sdim */ 324311116Sdimstatic __inline void 325311116Sdimahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) 326311116Sdim{ 327321369Sdim ahc_swap_with_next_hscb(ahc, scb); 328311116Sdim 329321369Sdim if (scb->hscb->tag == SCB_LIST_NULL 330311116Sdim || scb->hscb->next == SCB_LIST_NULL) 331311116Sdim panic("Attempt to queue invalid SCB tag %x:%x\n", 332321369Sdim scb->hscb->tag, scb->hscb->next); 333311116Sdim 334321369Sdim /* 335311116Sdim * Keep a history of SCBs we've downloaded in the qinfifo. 336311116Sdim */ 337321369Sdim ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 338311116Sdim if ((ahc->features & AHC_QUEUE_REGS) != 0) { 339321369Sdim ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 340311116Sdim } else { 341311116Sdim if ((ahc->features & AHC_AUTOPAUSE) == 0) 342321369Sdim pause_sequencer(ahc); 343311116Sdim ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 344311116Sdim if ((ahc->features & AHC_AUTOPAUSE) == 0) 345311116Sdim unpause_sequencer(ahc); 346311116Sdim } 347311116Sdim} 348311116Sdim 349311116Sdim/************************** Interrupt Processing ******************************/ 350311116Sdimstatic __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); 351311116Sdimstatic __inline void ahc_intr(struct ahc_softc *ahc); 352321369Sdim 353311116Sdim/* 354311116Sdim * See if the firmware has posted any completed commands 355321369Sdim * into our in-core command complete fifos. 356311116Sdim */ 357321369Sdim#define AHC_RUN_QOUTFIFO 0x1 358311116Sdim#define AHC_RUN_TQINFIFO 0x2 359311116Sdimstatic __inline u_int 360321369Sdimahc_check_cmdcmpltqueues(struct ahc_softc *ahc) 361311116Sdim{ 362321369Sdim u_int retval; 363311116Sdim 364311116Sdim retval = 0; 365321369Sdim if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) 366311116Sdim retval |= AHC_RUN_QOUTFIFO; 367311116Sdim#ifdef AHC_TARGET_MODE 368311116Sdim if ((ahc->flags & AHC_TARGETROLE) != 0 369311116Sdim && ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) 370311116Sdim retval |= AHC_RUN_TQINFIFO; 371311116Sdim#endif 372311116Sdim return (retval); 373} 374 375/* 376 * Catch an interrupt from the adapter 377 */ 378static __inline void 379ahc_intr(struct ahc_softc *ahc) 380{ 381 u_int intstat; 382 u_int queuestat; 383 384 /* 385 * Instead of directly reading the interrupt status register, 386 * infer the cause of the interrupt by checking our in-core 387 * completion queues. This avoids a costly PCI bus read in 388 * most cases. 389 */ 390 intstat = 0; 391 if ((queuestat = ahc_check_cmdcmpltqueues(ahc)) != 0) 392 intstat = CMDCMPLT; 393 394 if ((intstat & INT_PEND) == 0 395 || (ahc->flags & AHC_ALL_INTERRUPTS) != 0) { 396 397 intstat = ahc_inb(ahc, INTSTAT); 398#if AHC_PCI_CONFIG > 0 399 if (ahc->unsolicited_ints > 500 400 && (ahc->chip & AHC_PCI) != 0 401 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) 402 ahc_pci_intr(ahc); 403#endif 404 } 405 406 if (intstat == 0xFF) 407 /* Hot eject */ 408 return; 409 410 if ((intstat & INT_PEND) == 0) { 411 ahc->unsolicited_ints++; 412 return; 413 } 414 ahc->unsolicited_ints = 0; 415 416 if (intstat & CMDCMPLT) { 417 ahc_outb(ahc, CLRINT, CLRCMDINT); 418 419 /* 420 * Ensure that the chip sees that we've cleared 421 * this interrupt before we walk the output fifo. 422 * Otherwise, we may, due to posted bus writes, 423 * clear the interrupt after we finish the scan, 424 * and after the sequencer has added new entries 425 * and asserted the interrupt again. 426 */ 427 ahc_flush_device_writes(ahc); 428#ifdef AHC_TARGET_MODE 429 if ((queuestat & AHC_RUN_QOUTFIFO) != 0) 430#endif 431 ahc_run_qoutfifo(ahc); 432#ifdef AHC_TARGET_MODE 433 if ((queuestat & AHC_RUN_TQINFIFO) != 0) 434 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 435#endif 436 } 437 if (intstat & BRKADRINT) { 438 ahc_handle_brkadrint(ahc); 439 /* Fatal error, no more interrupts to handle. */ 440 return; 441 } 442 443 if ((intstat & (SEQINT|SCSIINT)) != 0) 444 ahc_pause_bug_fix(ahc); 445 446 if ((intstat & SEQINT) != 0) 447 ahc_handle_seqint(ahc, intstat); 448 449 if ((intstat & SCSIINT) != 0) 450 ahc_handle_scsiint(ahc, intstat); 451} 452 453#endif /* _AIC7XXX_INLINE_H_ */ 454