asr.c revision 145658
1/*- 2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 3 * Copyright (c) 2000-2001 Adaptec Corporation 4 * All rights reserved. 5 * 6 * TERMS AND CONDITIONS OF USE 7 * 8 * Redistribution and use in source form, with or without modification, are 9 * permitted provided that redistributions of source code must retain the 10 * above copyright notice, this list of conditions and the following disclaimer. 11 * 12 * This software is provided `as is' by Adaptec and any express or implied 13 * warranties, including, but not limited to, the implied warranties of 14 * merchantability and fitness for a particular purpose, are disclaimed. In no 15 * event shall Adaptec be liable for any direct, indirect, incidental, special, 16 * exemplary or consequential damages (including, but not limited to, 17 * procurement of substitute goods or services; loss of use, data, or profits; 18 * or business interruptions) however caused and on any theory of liability, 19 * whether in contract, strict liability, or tort (including negligence or 20 * otherwise) arising in any way out of the use of this driver software, even 21 * if advised of the possibility of such damage. 22 * 23 * SCSI I2O host adapter driver 24 * 25 * V1.10 2004/05/05 scottl@freebsd.org 26 * - Massive cleanup of the driver to remove dead code and 27 * non-conformant style. 28 * - Removed most i386-specific code to make it more portable. 29 * - Converted to the bus_space API. 30 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com 31 * - The 2000S and 2005S do not initialize on some machines, 32 * increased timeout to 255ms from 50ms for the StatusGet 33 * command. 34 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com 35 * - I knew this one was too good to be true. The error return 36 * on ioctl commands needs to be compared to CAM_REQ_CMP, not 37 * to the bit masked status. 38 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com 39 * - The 2005S that was supported is affectionately called the 40 * Conjoined BAR Firmware. In order to support RAID-5 in a 41 * 16MB low-cost configuration, Firmware was forced to go 42 * to a Split BAR Firmware. This requires a separate IOP and 43 * Messaging base address. 44 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com 45 * - Handle support for 2005S Zero Channel RAID solution. 46 * - System locked up if the Adapter locked up. Do not try 47 * to send other commands if the resetIOP command fails. The 48 * fail outstanding command discovery loop was flawed as the 49 * removal of the command from the list prevented discovering 50 * all the commands. 51 * - Comment changes to clarify driver. 52 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM. 53 * - We do not use the AC_FOUND_DEV event because of I2O. 54 * Removed asr_async. 55 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org, 56 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com. 57 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0 58 * mode as this is confused with competitor adapters in run 59 * mode. 60 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove 61 * to prevent operating system panic. 62 * - moved default major number to 154 from 97. 63 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 64 * - The controller is not actually an ASR (Adaptec SCSI RAID) 65 * series that is visible, it's more of an internal code name. 66 * remove any visible references within reason for now. 67 * - bus_ptr->LUN was not correctly zeroed when initially 68 * allocated causing a possible panic of the operating system 69 * during boot. 70 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 71 * - Code always fails for ASR_getTid affecting performance. 72 * - initiated a set of changes that resulted from a formal 73 * code inspection by Mark_Salyzyn@adaptec.com, 74 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 75 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 76 * Their findings were focussed on the LCT & TID handler, and 77 * all resulting changes were to improve code readability, 78 * consistency or have a positive effect on performance. 79 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 80 * - Passthrough returned an incorrect error. 81 * - Passthrough did not migrate the intrinsic scsi layer wakeup 82 * on command completion. 83 * - generate control device nodes using make_dev and delete_dev. 84 * - Performance affected by TID caching reallocing. 85 * - Made suggested changes by Justin_Gibbs@adaptec.com 86 * - use splcam instead of splbio. 87 * - use cam_imask instead of bio_imask. 88 * - use u_int8_t instead of u_char. 89 * - use u_int16_t instead of u_short. 90 * - use u_int32_t instead of u_long where appropriate. 91 * - use 64 bit context handler instead of 32 bit. 92 * - create_ccb should only allocate the worst case 93 * requirements for the driver since CAM may evolve 94 * making union ccb much larger than needed here. 95 * renamed create_ccb to asr_alloc_ccb. 96 * - go nutz justifying all debug prints as macros 97 * defined at the top and remove unsightly ifdefs. 98 * - INLINE STATIC viewed as confusing. Historically 99 * utilized to affect code performance and debug 100 * issues in OS, Compiler or OEM specific situations. 101 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 102 * - Ported from FreeBSD 2.2.X DPT I2O driver. 103 * changed struct scsi_xfer to union ccb/struct ccb_hdr 104 * changed variable name xs to ccb 105 * changed struct scsi_link to struct cam_path 106 * changed struct scsibus_data to struct cam_sim 107 * stopped using fordriver for holding on to the TID 108 * use proprietary packet creation instead of scsi_inquire 109 * CAM layer sends synchronize commands. 110 */ 111 112#include <sys/cdefs.h> 113#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 114#include <sys/kernel.h> 115#include <sys/module.h> 116#include <sys/systm.h> 117#include <sys/malloc.h> 118#include <sys/conf.h> 119#include <sys/ioccom.h> 120#include <sys/proc.h> 121#include <sys/bus.h> 122#include <machine/resource.h> 123#include <machine/bus.h> 124#include <sys/rman.h> 125#include <sys/stat.h> 126 127#include <cam/cam.h> 128#include <cam/cam_ccb.h> 129#include <cam/cam_sim.h> 130#include <cam/cam_xpt_sim.h> 131#include <cam/cam_xpt_periph.h> 132 133#include <cam/scsi/scsi_all.h> 134#include <cam/scsi/scsi_message.h> 135 136#include <vm/vm.h> 137#include <vm/pmap.h> 138 139#if defined(__i386__) 140#include "opt_asr.h" 141#include <i386/include/cputypes.h> 142 143#ifndef BURN_BRIDGES 144#if defined(ASR_COMPAT) 145#define ASR_IOCTL_COMPAT 146#endif /* ASR_COMPAT */ 147#endif /* !BURN_BRIDGES */ 148 149#elif defined(__alpha__) 150#include <alpha/include/pmap.h> 151#endif 152#include <machine/vmparam.h> 153 154#include <dev/pci/pcivar.h> 155#include <dev/pci/pcireg.h> 156 157#define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 158#define KVTOPHYS(x) vtophys(x) 159#include "dev/asr/dptalign.h" 160#include "dev/asr/i2oexec.h" 161#include "dev/asr/i2obscsi.h" 162#include "dev/asr/i2odpt.h" 163#include "dev/asr/i2oadptr.h" 164 165#include "dev/asr/sys_info.h" 166 167__FBSDID("$FreeBSD: head/sys/dev/asr/asr.c 145658 2005-04-29 04:47:11Z scottl $"); 168 169#define ASR_VERSION 1 170#define ASR_REVISION '1' 171#define ASR_SUBREVISION '0' 172#define ASR_MONTH 5 173#define ASR_DAY 5 174#define ASR_YEAR (2004 - 1980) 175 176/* 177 * Debug macros to reduce the unsightly ifdefs 178 */ 179#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 180static __inline void 181debug_asr_message(PI2O_MESSAGE_FRAME message) 182{ 183 u_int32_t * pointer = (u_int32_t *)message; 184 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message); 185 u_int32_t counter = 0; 186 187 while (length--) { 188 printf("%08lx%c", (u_long)*(pointer++), 189 (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' '); 190 } 191} 192#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 193 194#ifdef DEBUG_ASR 195 /* Breaks on none STDC based compilers :-( */ 196#define debug_asr_printf(fmt,args...) printf(fmt, ##args) 197#define debug_asr_dump_message(message) debug_asr_message(message) 198#define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 199#else /* DEBUG_ASR */ 200#define debug_asr_printf(fmt,args...) 201#define debug_asr_dump_message(message) 202#define debug_asr_print_path(ccb) 203#endif /* DEBUG_ASR */ 204 205/* 206 * If DEBUG_ASR_CMD is defined: 207 * 0 - Display incoming SCSI commands 208 * 1 - add in a quick character before queueing. 209 * 2 - add in outgoing message frames. 210 */ 211#if (defined(DEBUG_ASR_CMD)) 212#define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) 213static __inline void 214debug_asr_dump_ccb(union ccb *ccb) 215{ 216 u_int8_t *cp = (unsigned char *)&(ccb->csio.cdb_io); 217 int len = ccb->csio.cdb_len; 218 219 while (len) { 220 debug_asr_cmd_printf (" %02x", *(cp++)); 221 --len; 222 } 223} 224#if (DEBUG_ASR_CMD > 0) 225#define debug_asr_cmd1_printf debug_asr_cmd_printf 226#else 227#define debug_asr_cmd1_printf(fmt,args...) 228#endif 229#if (DEBUG_ASR_CMD > 1) 230#define debug_asr_cmd2_printf debug_asr_cmd_printf 231#define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 232#else 233#define debug_asr_cmd2_printf(fmt,args...) 234#define debug_asr_cmd2_dump_message(message) 235#endif 236#else /* DEBUG_ASR_CMD */ 237#define debug_asr_cmd_printf(fmt,args...) 238#define debug_asr_dump_ccb(ccb) 239#define debug_asr_cmd1_printf(fmt,args...) 240#define debug_asr_cmd2_printf(fmt,args...) 241#define debug_asr_cmd2_dump_message(message) 242#endif /* DEBUG_ASR_CMD */ 243 244#if (defined(DEBUG_ASR_USR_CMD)) 245#define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) 246#define debug_usr_cmd_dump_message(message) debug_usr_message(message) 247#else /* DEBUG_ASR_USR_CMD */ 248#define debug_usr_cmd_printf(fmt,args...) 249#define debug_usr_cmd_dump_message(message) 250#endif /* DEBUG_ASR_USR_CMD */ 251 252#ifdef ASR_IOCTL_COMPAT 253#define dsDescription_size 46 /* Snug as a bug in a rug */ 254#endif /* ASR_IOCTL_COMPAT */ 255 256#include "dev/asr/dptsig.h" 257 258static dpt_sig_S ASR_sig = { 259 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 260 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 261 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5, 262 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 263 ASR_MONTH, ASR_DAY, ASR_YEAR, 264/* 01234567890123456789012345678901234567890123456789 < 50 chars */ 265 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 266 /* ^^^^^ asr_attach alters these to match OS */ 267}; 268 269/* Configuration Definitions */ 270 271#define SG_SIZE 58 /* Scatter Gather list Size */ 272#define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 273#define MAX_LUN 255 /* Maximum LUN Supported */ 274#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 275#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 276#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 277#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 278#define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 279 /* Also serves as the minimum map for */ 280 /* the 2005S zero channel RAID product */ 281 282/* I2O register set */ 283#define I2O_REG_STATUS 0x30 284#define I2O_REG_MASK 0x34 285#define I2O_REG_TOFIFO 0x40 286#define I2O_REG_FROMFIFO 0x44 287 288#define Mask_InterruptsDisabled 0x08 289 290/* 291 * A MIX of performance and space considerations for TID lookups 292 */ 293typedef u_int16_t tid_t; 294 295typedef struct { 296 u_int32_t size; /* up to MAX_LUN */ 297 tid_t TID[1]; 298} lun2tid_t; 299 300typedef struct { 301 u_int32_t size; /* up to MAX_TARGET */ 302 lun2tid_t * LUN[1]; 303} target2lun_t; 304 305/* 306 * To ensure that we only allocate and use the worst case ccb here, lets 307 * make our own local ccb union. If asr_alloc_ccb is utilized for another 308 * ccb type, ensure that you add the additional structures into our local 309 * ccb union. To ensure strict type checking, we will utilize the local 310 * ccb definition wherever possible. 311 */ 312union asr_ccb { 313 struct ccb_hdr ccb_h; /* For convenience */ 314 struct ccb_scsiio csio; 315 struct ccb_setasync csa; 316}; 317 318/************************************************************************** 319** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 320** Is Configured Into The System. The Structure Supplies Configuration ** 321** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 322***************************************************************************/ 323 324typedef struct Asr_softc { 325 u_int16_t ha_irq; 326 u_long ha_Base; /* base port for each board */ 327 bus_size_t ha_blinkLED; 328 bus_space_handle_t ha_i2o_bhandle; 329 bus_space_tag_t ha_i2o_btag; 330 bus_space_handle_t ha_frame_bhandle; 331 bus_space_tag_t ha_frame_btag; 332 I2O_IOP_ENTRY ha_SystemTable; 333 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 334 struct cam_path * ha_path[MAX_CHANNEL+1]; 335 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 336 struct resource * ha_mem_res; 337 struct resource * ha_mes_res; 338 struct resource * ha_irq_res; 339 void * ha_intr; 340 PI2O_LCT ha_LCT; /* Complete list of devices */ 341#define le_type IdentityTag[0] 342#define I2O_BSA 0x20 343#define I2O_FCA 0x40 344#define I2O_SCSI 0x00 345#define I2O_PORT 0x80 346#define I2O_UNKNOWN 0x7F 347#define le_bus IdentityTag[1] 348#define le_target IdentityTag[2] 349#define le_lun IdentityTag[3] 350 target2lun_t * ha_targets[MAX_CHANNEL+1]; 351 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 352 u_long ha_Msgs_Phys; 353 354 u_int8_t ha_in_reset; 355#define HA_OPERATIONAL 0 356#define HA_IN_RESET 1 357#define HA_OFF_LINE 2 358#define HA_OFF_LINE_RECOVERY 3 359 /* Configuration information */ 360 /* The target id maximums we take */ 361 u_int8_t ha_MaxBus; /* Maximum bus */ 362 u_int8_t ha_MaxId; /* Maximum target ID */ 363 u_int8_t ha_MaxLun; /* Maximum target LUN */ 364 u_int8_t ha_SgSize; /* Max SG elements */ 365 u_int8_t ha_pciBusNum; 366 u_int8_t ha_pciDeviceNum; 367 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 368 u_int16_t ha_QueueSize; /* Max outstanding commands */ 369 u_int16_t ha_Msgs_Count; 370 371 /* Links into other parents and HBAs */ 372 struct Asr_softc * ha_next; /* HBA list */ 373 struct cdev *ha_devt; 374} Asr_softc_t; 375 376static Asr_softc_t * Asr_softc; 377 378/* 379 * Prototypes of the routines we have in this object. 380 */ 381 382/* I2O HDM interface */ 383static int asr_probe(device_t tag); 384static int asr_attach(device_t tag); 385 386static int asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, 387 struct thread *td); 388static int asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, 389 struct thread *td); 390static int asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td); 391static int asr_intr(Asr_softc_t *sc); 392static void asr_timeout(void *arg); 393static int ASR_init(Asr_softc_t *sc); 394static int ASR_acquireLct(Asr_softc_t *sc); 395static int ASR_acquireHrt(Asr_softc_t *sc); 396static void asr_action(struct cam_sim *sim, union ccb *ccb); 397static void asr_poll(struct cam_sim *sim); 398static int ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message); 399 400/* 401 * Here is the auto-probe structure used to nest our tests appropriately 402 * during the startup phase of the operating system. 403 */ 404static device_method_t asr_methods[] = { 405 DEVMETHOD(device_probe, asr_probe), 406 DEVMETHOD(device_attach, asr_attach), 407 { 0, 0 } 408}; 409 410static driver_t asr_driver = { 411 "asr", 412 asr_methods, 413 sizeof(Asr_softc_t) 414}; 415 416static devclass_t asr_devclass; 417DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 418 419/* 420 * devsw for asr hba driver 421 * 422 * only ioctl is used. the sd driver provides all other access. 423 */ 424static struct cdevsw asr_cdevsw = { 425 .d_version = D_VERSION, 426 .d_flags = D_NEEDGIANT, 427 .d_open = asr_open, 428 .d_close = asr_close, 429 .d_ioctl = asr_ioctl, 430 .d_name = "asr", 431}; 432 433/* I2O support routines */ 434 435static __inline u_int32_t 436asr_get_FromFIFO(Asr_softc_t *sc) 437{ 438 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 439 I2O_REG_FROMFIFO)); 440} 441 442static __inline u_int32_t 443asr_get_ToFIFO(Asr_softc_t *sc) 444{ 445 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 446 I2O_REG_TOFIFO)); 447} 448 449static __inline u_int32_t 450asr_get_intr(Asr_softc_t *sc) 451{ 452 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 453 I2O_REG_MASK)); 454} 455 456static __inline u_int32_t 457asr_get_status(Asr_softc_t *sc) 458{ 459 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, 460 I2O_REG_STATUS)); 461} 462 463static __inline void 464asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val) 465{ 466 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO, 467 val); 468} 469 470static __inline void 471asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val) 472{ 473 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO, 474 val); 475} 476 477static __inline void 478asr_set_intr(Asr_softc_t *sc, u_int32_t val) 479{ 480 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK, 481 val); 482} 483 484static __inline void 485asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len) 486{ 487 bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle, 488 offset, (u_int32_t *)frame, len); 489} 490 491/* 492 * Fill message with default. 493 */ 494static PI2O_MESSAGE_FRAME 495ASR_fillMessage(void *Message, u_int16_t size) 496{ 497 PI2O_MESSAGE_FRAME Message_Ptr; 498 499 Message_Ptr = (I2O_MESSAGE_FRAME *)Message; 500 bzero(Message_Ptr, size); 501 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 502 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 503 (size + sizeof(U32) - 1) >> 2); 504 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 505 KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL")); 506 return (Message_Ptr); 507} /* ASR_fillMessage */ 508 509#define EMPTY_QUEUE (-1L) 510 511static __inline U32 512ASR_getMessage(Asr_softc_t *sc) 513{ 514 U32 MessageOffset; 515 516 MessageOffset = asr_get_ToFIFO(sc); 517 if (MessageOffset == EMPTY_QUEUE) 518 MessageOffset = asr_get_ToFIFO(sc); 519 520 return (MessageOffset); 521} /* ASR_getMessage */ 522 523/* Issue a polled command */ 524static U32 525ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) 526{ 527 U32 Mask = -1L; 528 U32 MessageOffset; 529 u_int Delay = 1500; 530 531 /* 532 * ASR_initiateCp is only used for synchronous commands and will 533 * be made more resiliant to adapter delays since commands like 534 * resetIOP can cause the adapter to be deaf for a little time. 535 */ 536 while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE) 537 && (--Delay != 0)) { 538 DELAY (10000); 539 } 540 if (MessageOffset != EMPTY_QUEUE) { 541 asr_set_frame(sc, Message, MessageOffset, 542 I2O_MESSAGE_FRAME_getMessageSize(Message)); 543 /* 544 * Disable the Interrupts 545 */ 546 Mask = asr_get_intr(sc); 547 asr_set_intr(sc, Mask | Mask_InterruptsDisabled); 548 asr_set_ToFIFO(sc, MessageOffset); 549 } 550 return (Mask); 551} /* ASR_initiateCp */ 552 553/* 554 * Reset the adapter. 555 */ 556static U32 557ASR_resetIOP(Asr_softc_t *sc) 558{ 559 struct resetMessage { 560 I2O_EXEC_IOP_RESET_MESSAGE M; 561 U32 R; 562 } Message; 563 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 564 U32 * volatile Reply_Ptr; 565 U32 Old; 566 567 /* 568 * Build up our copy of the Message. 569 */ 570 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message, 571 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 572 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 573 /* 574 * Reset the Reply Status 575 */ 576 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 577 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 578 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 579 KVTOPHYS((void *)Reply_Ptr)); 580 /* 581 * Send the Message out 582 */ 583 if ((Old = ASR_initiateCp(sc, 584 (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { 585 /* 586 * Wait for a response (Poll), timeouts are dangerous if 587 * the card is truly responsive. We assume response in 2s. 588 */ 589 u_int8_t Delay = 200; 590 591 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 592 DELAY (10000); 593 } 594 /* 595 * Re-enable the interrupts. 596 */ 597 asr_set_intr(sc, Old); 598 KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0")); 599 return(*Reply_Ptr); 600 } 601 KASSERT(Old != -1L, ("Old == -1")); 602 return (0); 603} /* ASR_resetIOP */ 604 605/* 606 * Get the curent state of the adapter 607 */ 608static PI2O_EXEC_STATUS_GET_REPLY 609ASR_getStatus(Asr_softc_t *sc, PI2O_EXEC_STATUS_GET_REPLY buffer) 610{ 611 I2O_EXEC_STATUS_GET_MESSAGE Message; 612 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 613 U32 Old; 614 615 /* 616 * Build up our copy of the Message. 617 */ 618 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message, 619 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 620 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 621 I2O_EXEC_STATUS_GET); 622 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 623 KVTOPHYS((void *)buffer)); 624 /* This one is a Byte Count */ 625 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 626 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 627 /* 628 * Reset the Reply Status 629 */ 630 bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 631 /* 632 * Send the Message out 633 */ 634 if ((Old = ASR_initiateCp(sc, 635 (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { 636 /* 637 * Wait for a response (Poll), timeouts are dangerous if 638 * the card is truly responsive. We assume response in 50ms. 639 */ 640 u_int8_t Delay = 255; 641 642 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) { 643 if (--Delay == 0) { 644 buffer = NULL; 645 break; 646 } 647 DELAY (1000); 648 } 649 /* 650 * Re-enable the interrupts. 651 */ 652 asr_set_intr(sc, Old); 653 return (buffer); 654 } 655 return (NULL); 656} /* ASR_getStatus */ 657 658/* 659 * Check if the device is a SCSI I2O HBA, and add it to the list. 660 */ 661 662/* 663 * Probe for ASR controller. If we find it, we will use it. 664 * virtual adapters. 665 */ 666static int 667asr_probe(device_t tag) 668{ 669 u_int32_t id; 670 671 id = (pci_get_device(tag) << 16) | pci_get_vendor(tag); 672 if ((id == 0xA5011044) || (id == 0xA5111044)) { 673 device_set_desc(tag, "Adaptec Caching SCSI RAID"); 674 return (BUS_PROBE_DEFAULT); 675 } 676 return (ENXIO); 677} /* asr_probe */ 678 679static __inline union asr_ccb * 680asr_alloc_ccb(Asr_softc_t *sc) 681{ 682 union asr_ccb *new_ccb; 683 684 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), 685 M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) { 686 new_ccb->ccb_h.pinfo.priority = 1; 687 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 688 new_ccb->ccb_h.spriv_ptr0 = sc; 689 } 690 return (new_ccb); 691} /* asr_alloc_ccb */ 692 693static __inline void 694asr_free_ccb(union asr_ccb *free_ccb) 695{ 696 free(free_ccb, M_DEVBUF); 697} /* asr_free_ccb */ 698 699/* 700 * Print inquiry data `carefully' 701 */ 702static void 703ASR_prstring(u_int8_t *s, int len) 704{ 705 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 706 printf ("%c", *(s++)); 707 } 708} /* ASR_prstring */ 709 710/* 711 * Send a message synchronously and without Interrupt to a ccb. 712 */ 713static int 714ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message) 715{ 716 int s; 717 U32 Mask; 718 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 719 720 /* 721 * We do not need any (optional byteswapping) method access to 722 * the Initiator context field. 723 */ 724 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 725 726 /* Prevent interrupt service */ 727 s = splcam (); 728 Mask = asr_get_intr(sc); 729 asr_set_intr(sc, Mask | Mask_InterruptsDisabled); 730 731 if (ASR_queue(sc, Message) == EMPTY_QUEUE) { 732 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 733 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 734 } 735 736 /* 737 * Wait for this board to report a finished instruction. 738 */ 739 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 740 (void)asr_intr (sc); 741 } 742 743 /* Re-enable Interrupts */ 744 asr_set_intr(sc, Mask); 745 splx(s); 746 747 return (ccb->ccb_h.status); 748} /* ASR_queue_s */ 749 750/* 751 * Send a message synchronously to an Asr_softc_t. 752 */ 753static int 754ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) 755{ 756 union asr_ccb *ccb; 757 int status; 758 759 if ((ccb = asr_alloc_ccb (sc)) == NULL) { 760 return (CAM_REQUEUE_REQ); 761 } 762 763 status = ASR_queue_s (ccb, Message); 764 765 asr_free_ccb(ccb); 766 767 return (status); 768} /* ASR_queue_c */ 769 770/* 771 * Add the specified ccb to the active queue 772 */ 773static __inline void 774ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb) 775{ 776 int s; 777 778 s = splcam(); 779 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 780 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 781 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 782 /* 783 * RAID systems can take considerable time to 784 * complete some commands given the large cache 785 * flashes switching from write back to write thru. 786 */ 787 ccb->ccb_h.timeout = 6 * 60 * 1000; 788 } 789 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 790 (ccb->ccb_h.timeout * hz) / 1000); 791 } 792 splx(s); 793} /* ASR_ccbAdd */ 794 795/* 796 * Remove the specified ccb from the active queue. 797 */ 798static __inline void 799ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb) 800{ 801 int s; 802 803 s = splcam(); 804 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); 805 LIST_REMOVE(&(ccb->ccb_h), sim_links.le); 806 splx(s); 807} /* ASR_ccbRemove */ 808 809/* 810 * Fail all the active commands, so they get re-issued by the operating 811 * system. 812 */ 813static void 814ASR_failActiveCommands(Asr_softc_t *sc) 815{ 816 struct ccb_hdr *ccb; 817 int s; 818 819 s = splcam(); 820 /* 821 * We do not need to inform the CAM layer that we had a bus 822 * reset since we manage it on our own, this also prevents the 823 * SCSI_DELAY settling that would be required on other systems. 824 * The `SCSI_DELAY' has already been handled by the card via the 825 * acquisition of the LCT table while we are at CAM priority level. 826 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) { 827 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL); 828 * } 829 */ 830 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) { 831 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 832 833 ccb->status &= ~CAM_STATUS_MASK; 834 ccb->status |= CAM_REQUEUE_REQ; 835 /* Nothing Transfered */ 836 ((struct ccb_scsiio *)ccb)->resid 837 = ((struct ccb_scsiio *)ccb)->dxfer_len; 838 839 if (ccb->path) { 840 xpt_done ((union ccb *)ccb); 841 } else { 842 wakeup (ccb); 843 } 844 } 845 splx(s); 846} /* ASR_failActiveCommands */ 847 848/* 849 * The following command causes the HBA to reset the specific bus 850 */ 851static void 852ASR_resetBus(Asr_softc_t *sc, int bus) 853{ 854 I2O_HBA_BUS_RESET_MESSAGE Message; 855 I2O_HBA_BUS_RESET_MESSAGE *Message_Ptr; 856 PI2O_LCT_ENTRY Device; 857 858 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message, 859 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 860 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 861 I2O_HBA_BUS_RESET); 862 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 863 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 864 ++Device) { 865 if (((Device->le_type & I2O_PORT) != 0) 866 && (Device->le_bus == bus)) { 867 I2O_MESSAGE_FRAME_setTargetAddress( 868 &Message_Ptr->StdMessageFrame, 869 I2O_LCT_ENTRY_getLocalTID(Device)); 870 /* Asynchronous command, with no expectations */ 871 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 872 break; 873 } 874 } 875} /* ASR_resetBus */ 876 877static __inline int 878ASR_getBlinkLedCode(Asr_softc_t *sc) 879{ 880 U8 blink; 881 882 if (sc == NULL) 883 return (0); 884 885 blink = bus_space_read_1(sc->ha_frame_btag, 886 sc->ha_frame_bhandle, sc->ha_blinkLED + 1); 887 if (blink != 0xBC) 888 return (0); 889 890 blink = bus_space_read_1(sc->ha_frame_btag, 891 sc->ha_frame_bhandle, sc->ha_blinkLED); 892 return (blink); 893} /* ASR_getBlinkCode */ 894 895/* 896 * Determine the address of an TID lookup. Must be done at high priority 897 * since the address can be changed by other threads of execution. 898 * 899 * Returns NULL pointer if not indexible (but will attempt to generate 900 * an index if `new_entry' flag is set to TRUE). 901 * 902 * All addressible entries are to be guaranteed zero if never initialized. 903 */ 904static tid_t * 905ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry) 906{ 907 target2lun_t *bus_ptr; 908 lun2tid_t *target_ptr; 909 unsigned new_size; 910 911 /* 912 * Validity checking of incoming parameters. More of a bound 913 * expansion limit than an issue with the code dealing with the 914 * values. 915 * 916 * sc must be valid before it gets here, so that check could be 917 * dropped if speed a critical issue. 918 */ 919 if ((sc == NULL) 920 || (bus > MAX_CHANNEL) 921 || (target > sc->ha_MaxId) 922 || (lun > sc->ha_MaxLun)) { 923 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 924 (u_long)sc, bus, target, lun); 925 return (NULL); 926 } 927 /* 928 * See if there is an associated bus list. 929 * 930 * for performance, allocate in size of BUS_CHUNK chunks. 931 * BUS_CHUNK must be a power of two. This is to reduce 932 * fragmentation effects on the allocations. 933 */ 934#define BUS_CHUNK 8 935 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 936 if ((bus_ptr = sc->ha_targets[bus]) == NULL) { 937 /* 938 * Allocate a new structure? 939 * Since one element in structure, the +1 940 * needed for size has been abstracted. 941 */ 942 if ((new_entry == FALSE) 943 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( 944 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 945 M_TEMP, M_WAITOK | M_ZERO)) 946 == NULL)) { 947 debug_asr_printf("failed to allocate bus list\n"); 948 return (NULL); 949 } 950 bus_ptr->size = new_size + 1; 951 } else if (bus_ptr->size <= new_size) { 952 target2lun_t * new_bus_ptr; 953 954 /* 955 * Reallocate a new structure? 956 * Since one element in structure, the +1 957 * needed for size has been abstracted. 958 */ 959 if ((new_entry == FALSE) 960 || ((new_bus_ptr = (target2lun_t *)malloc ( 961 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 962 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { 963 debug_asr_printf("failed to reallocate bus list\n"); 964 return (NULL); 965 } 966 /* 967 * Copy the whole thing, safer, simpler coding 968 * and not really performance critical at this point. 969 */ 970 bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 971 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 972 sc->ha_targets[bus] = new_bus_ptr; 973 free(bus_ptr, M_TEMP); 974 bus_ptr = new_bus_ptr; 975 bus_ptr->size = new_size + 1; 976 } 977 /* 978 * We now have the bus list, lets get to the target list. 979 * Since most systems have only *one* lun, we do not allocate 980 * in chunks as above, here we allow one, then in chunk sizes. 981 * TARGET_CHUNK must be a power of two. This is to reduce 982 * fragmentation effects on the allocations. 983 */ 984#define TARGET_CHUNK 8 985 if ((new_size = lun) != 0) { 986 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 987 } 988 if ((target_ptr = bus_ptr->LUN[target]) == NULL) { 989 /* 990 * Allocate a new structure? 991 * Since one element in structure, the +1 992 * needed for size has been abstracted. 993 */ 994 if ((new_entry == FALSE) 995 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( 996 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 997 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { 998 debug_asr_printf("failed to allocate target list\n"); 999 return (NULL); 1000 } 1001 target_ptr->size = new_size + 1; 1002 } else if (target_ptr->size <= new_size) { 1003 lun2tid_t * new_target_ptr; 1004 1005 /* 1006 * Reallocate a new structure? 1007 * Since one element in structure, the +1 1008 * needed for size has been abstracted. 1009 */ 1010 if ((new_entry == FALSE) 1011 || ((new_target_ptr = (lun2tid_t *)malloc ( 1012 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1013 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { 1014 debug_asr_printf("failed to reallocate target list\n"); 1015 return (NULL); 1016 } 1017 /* 1018 * Copy the whole thing, safer, simpler coding 1019 * and not really performance critical at this point. 1020 */ 1021 bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr) 1022 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1023 bus_ptr->LUN[target] = new_target_ptr; 1024 free(target_ptr, M_TEMP); 1025 target_ptr = new_target_ptr; 1026 target_ptr->size = new_size + 1; 1027 } 1028 /* 1029 * Now, acquire the TID address from the LUN indexed list. 1030 */ 1031 return (&(target_ptr->TID[lun])); 1032} /* ASR_getTidAddress */ 1033 1034/* 1035 * Get a pre-existing TID relationship. 1036 * 1037 * If the TID was never set, return (tid_t)-1. 1038 * 1039 * should use mutex rather than spl. 1040 */ 1041static __inline tid_t 1042ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun) 1043{ 1044 tid_t *tid_ptr; 1045 int s; 1046 tid_t retval; 1047 1048 s = splcam(); 1049 if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL) 1050 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1051 || (*tid_ptr == (tid_t)0)) { 1052 splx(s); 1053 return ((tid_t)-1); 1054 } 1055 retval = *tid_ptr; 1056 splx(s); 1057 return (retval); 1058} /* ASR_getTid */ 1059 1060/* 1061 * Set a TID relationship. 1062 * 1063 * If the TID was not set, return (tid_t)-1. 1064 * 1065 * should use mutex rather than spl. 1066 */ 1067static __inline tid_t 1068ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t TID) 1069{ 1070 tid_t *tid_ptr; 1071 int s; 1072 1073 if (TID != (tid_t)-1) { 1074 if (TID == 0) { 1075 return ((tid_t)-1); 1076 } 1077 s = splcam(); 1078 if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE)) 1079 == NULL) { 1080 splx(s); 1081 return ((tid_t)-1); 1082 } 1083 *tid_ptr = TID; 1084 splx(s); 1085 } 1086 return (TID); 1087} /* ASR_setTid */ 1088 1089/*-------------------------------------------------------------------------*/ 1090/* Function ASR_rescan */ 1091/*-------------------------------------------------------------------------*/ 1092/* The Parameters Passed To This Function Are : */ 1093/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1094/* */ 1095/* This Function Will rescan the adapter and resynchronize any data */ 1096/* */ 1097/* Return : 0 For OK, Error Code Otherwise */ 1098/*-------------------------------------------------------------------------*/ 1099 1100static int 1101ASR_rescan(Asr_softc_t *sc) 1102{ 1103 int bus; 1104 int error; 1105 1106 /* 1107 * Re-acquire the LCT table and synchronize us to the adapter. 1108 */ 1109 if ((error = ASR_acquireLct(sc)) == 0) { 1110 error = ASR_acquireHrt(sc); 1111 } 1112 1113 if (error != 0) { 1114 return error; 1115 } 1116 1117 bus = sc->ha_MaxBus; 1118 /* Reset all existing cached TID lookups */ 1119 do { 1120 int target, event = 0; 1121 1122 /* 1123 * Scan for all targets on this bus to see if they 1124 * got affected by the rescan. 1125 */ 1126 for (target = 0; target <= sc->ha_MaxId; ++target) { 1127 int lun; 1128 1129 /* Stay away from the controller ID */ 1130 if (target == sc->ha_adapter_target[bus]) { 1131 continue; 1132 } 1133 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1134 PI2O_LCT_ENTRY Device; 1135 tid_t TID = (tid_t)-1; 1136 tid_t LastTID; 1137 1138 /* 1139 * See if the cached TID changed. Search for 1140 * the device in our new LCT. 1141 */ 1142 for (Device = sc->ha_LCT->LCTEntry; 1143 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1144 + I2O_LCT_getTableSize(sc->ha_LCT)); 1145 ++Device) { 1146 if ((Device->le_type != I2O_UNKNOWN) 1147 && (Device->le_bus == bus) 1148 && (Device->le_target == target) 1149 && (Device->le_lun == lun) 1150 && (I2O_LCT_ENTRY_getUserTID(Device) 1151 == 0xFFF)) { 1152 TID = I2O_LCT_ENTRY_getLocalTID( 1153 Device); 1154 break; 1155 } 1156 } 1157 /* 1158 * Indicate to the OS that the label needs 1159 * to be recalculated, or that the specific 1160 * open device is no longer valid (Merde) 1161 * because the cached TID changed. 1162 */ 1163 LastTID = ASR_getTid (sc, bus, target, lun); 1164 if (LastTID != TID) { 1165 struct cam_path * path; 1166 1167 if (xpt_create_path(&path, 1168 /*periph*/NULL, 1169 cam_sim_path(sc->ha_sim[bus]), 1170 target, lun) != CAM_REQ_CMP) { 1171 if (TID == (tid_t)-1) { 1172 event |= AC_LOST_DEVICE; 1173 } else { 1174 event |= AC_INQ_CHANGED 1175 | AC_GETDEV_CHANGED; 1176 } 1177 } else { 1178 if (TID == (tid_t)-1) { 1179 xpt_async( 1180 AC_LOST_DEVICE, 1181 path, NULL); 1182 } else if (LastTID == (tid_t)-1) { 1183 struct ccb_getdev ccb; 1184 1185 xpt_setup_ccb( 1186 &(ccb.ccb_h), 1187 path, /*priority*/5); 1188 xpt_async( 1189 AC_FOUND_DEVICE, 1190 path, 1191 &ccb); 1192 } else { 1193 xpt_async( 1194 AC_INQ_CHANGED, 1195 path, NULL); 1196 xpt_async( 1197 AC_GETDEV_CHANGED, 1198 path, NULL); 1199 } 1200 } 1201 } 1202 /* 1203 * We have the option of clearing the 1204 * cached TID for it to be rescanned, or to 1205 * set it now even if the device never got 1206 * accessed. We chose the later since we 1207 * currently do not use the condition that 1208 * the TID ever got cached. 1209 */ 1210 ASR_setTid (sc, bus, target, lun, TID); 1211 } 1212 } 1213 /* 1214 * The xpt layer can not handle multiple events at the 1215 * same call. 1216 */ 1217 if (event & AC_LOST_DEVICE) { 1218 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL); 1219 } 1220 if (event & AC_INQ_CHANGED) { 1221 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL); 1222 } 1223 if (event & AC_GETDEV_CHANGED) { 1224 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL); 1225 } 1226 } while (--bus >= 0); 1227 return (error); 1228} /* ASR_rescan */ 1229 1230/*-------------------------------------------------------------------------*/ 1231/* Function ASR_reset */ 1232/*-------------------------------------------------------------------------*/ 1233/* The Parameters Passed To This Function Are : */ 1234/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1235/* */ 1236/* This Function Will reset the adapter and resynchronize any data */ 1237/* */ 1238/* Return : None */ 1239/*-------------------------------------------------------------------------*/ 1240 1241static int 1242ASR_reset(Asr_softc_t *sc) 1243{ 1244 int s, retVal; 1245 1246 s = splcam(); 1247 if ((sc->ha_in_reset == HA_IN_RESET) 1248 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) { 1249 splx (s); 1250 return (EBUSY); 1251 } 1252 /* 1253 * Promotes HA_OPERATIONAL to HA_IN_RESET, 1254 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY. 1255 */ 1256 ++(sc->ha_in_reset); 1257 if (ASR_resetIOP(sc) == 0) { 1258 debug_asr_printf ("ASR_resetIOP failed\n"); 1259 /* 1260 * We really need to take this card off-line, easier said 1261 * than make sense. Better to keep retrying for now since if a 1262 * UART cable is connected the blinkLEDs the adapter is now in 1263 * a hard state requiring action from the monitor commands to 1264 * the HBA to continue. For debugging waiting forever is a 1265 * good thing. In a production system, however, one may wish 1266 * to instead take the card off-line ... 1267 */ 1268 /* Wait Forever */ 1269 while (ASR_resetIOP(sc) == 0); 1270 } 1271 retVal = ASR_init (sc); 1272 splx (s); 1273 if (retVal != 0) { 1274 debug_asr_printf ("ASR_init failed\n"); 1275 sc->ha_in_reset = HA_OFF_LINE; 1276 return (ENXIO); 1277 } 1278 if (ASR_rescan (sc) != 0) { 1279 debug_asr_printf ("ASR_rescan failed\n"); 1280 } 1281 ASR_failActiveCommands (sc); 1282 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) { 1283 printf ("asr%d: Brining adapter back on-line\n", 1284 sc->ha_path[0] 1285 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1286 : 0); 1287 } 1288 sc->ha_in_reset = HA_OPERATIONAL; 1289 return (0); 1290} /* ASR_reset */ 1291 1292/* 1293 * Device timeout handler. 1294 */ 1295static void 1296asr_timeout(void *arg) 1297{ 1298 union asr_ccb *ccb = (union asr_ccb *)arg; 1299 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1300 int s; 1301 1302 debug_asr_print_path(ccb); 1303 debug_asr_printf("timed out"); 1304 1305 /* 1306 * Check if the adapter has locked up? 1307 */ 1308 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1309 /* Reset Adapter */ 1310 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 1311 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s); 1312 if (ASR_reset (sc) == ENXIO) { 1313 /* Try again later */ 1314 ccb->ccb_h.timeout_ch = timeout(asr_timeout, 1315 (caddr_t)ccb, 1316 (ccb->ccb_h.timeout * hz) / 1000); 1317 } 1318 return; 1319 } 1320 /* 1321 * Abort does not function on the ASR card!!! Walking away from 1322 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1323 * our best bet, followed by a complete adapter reset if that fails. 1324 */ 1325 s = splcam(); 1326 /* Check if we already timed out once to raise the issue */ 1327 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) { 1328 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1329 if (ASR_reset (sc) == ENXIO) { 1330 ccb->ccb_h.timeout_ch = timeout(asr_timeout, 1331 (caddr_t)ccb, 1332 (ccb->ccb_h.timeout * hz) / 1000); 1333 } 1334 splx(s); 1335 return; 1336 } 1337 debug_asr_printf ("\nresetting bus\n"); 1338 /* If the BUS reset does not take, then an adapter reset is next! */ 1339 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1340 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1341 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, 1342 (ccb->ccb_h.timeout * hz) / 1000); 1343 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1344 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL); 1345 splx(s); 1346} /* asr_timeout */ 1347 1348/* 1349 * send a message asynchronously 1350 */ 1351static int 1352ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) 1353{ 1354 U32 MessageOffset; 1355 union asr_ccb *ccb; 1356 1357 debug_asr_printf("Host Command Dump:\n"); 1358 debug_asr_dump_message(Message); 1359 1360 ccb = (union asr_ccb *)(long) 1361 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1362 1363 if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) { 1364 asr_set_frame(sc, Message, MessageOffset, 1365 I2O_MESSAGE_FRAME_getMessageSize(Message)); 1366 if (ccb) { 1367 ASR_ccbAdd (sc, ccb); 1368 } 1369 /* Post the command */ 1370 asr_set_ToFIFO(sc, MessageOffset); 1371 } else { 1372 if (ASR_getBlinkLedCode(sc)) { 1373 /* 1374 * Unlikely we can do anything if we can't grab a 1375 * message frame :-(, but lets give it a try. 1376 */ 1377 (void)ASR_reset(sc); 1378 } 1379 } 1380 return (MessageOffset); 1381} /* ASR_queue */ 1382 1383 1384/* Simple Scatter Gather elements */ 1385#define SG(SGL,Index,Flags,Buffer,Size) \ 1386 I2O_FLAGS_COUNT_setCount( \ 1387 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1388 Size); \ 1389 I2O_FLAGS_COUNT_setFlags( \ 1390 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1391 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1392 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1393 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1394 (Buffer == NULL) ? 0 : KVTOPHYS(Buffer)) 1395 1396/* 1397 * Retrieve Parameter Group. 1398 */ 1399static void * 1400ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer, 1401 unsigned BufferSize) 1402{ 1403 struct paramGetMessage { 1404 I2O_UTIL_PARAMS_GET_MESSAGE M; 1405 char 1406 F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1407 struct Operations { 1408 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1409 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1410 } O; 1411 } Message; 1412 struct Operations *Operations_Ptr; 1413 I2O_UTIL_PARAMS_GET_MESSAGE *Message_Ptr; 1414 struct ParamBuffer { 1415 I2O_PARAM_RESULTS_LIST_HEADER Header; 1416 I2O_PARAM_READ_OPERATION_RESULT Read; 1417 char Info[1]; 1418 } *Buffer_Ptr; 1419 1420 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message, 1421 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1422 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1423 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1424 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1425 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1426 bzero(Operations_Ptr, sizeof(struct Operations)); 1427 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1428 &(Operations_Ptr->Header), 1); 1429 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1430 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1431 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1432 &(Operations_Ptr->Template[0]), 0xFFFF); 1433 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1434 &(Operations_Ptr->Template[0]), Group); 1435 Buffer_Ptr = (struct ParamBuffer *)Buffer; 1436 bzero(Buffer_Ptr, BufferSize); 1437 1438 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1439 I2O_VERSION_11 1440 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1441 / sizeof(U32)) << 4)); 1442 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1443 TID); 1444 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1445 I2O_UTIL_PARAMS_GET); 1446 /* 1447 * Set up the buffers as scatter gather elements. 1448 */ 1449 SG(&(Message_Ptr->SGL), 0, 1450 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1451 Operations_Ptr, sizeof(struct Operations)); 1452 SG(&(Message_Ptr->SGL), 1, 1453 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1454 Buffer_Ptr, BufferSize); 1455 1456 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1457 && (Buffer_Ptr->Header.ResultCount)) { 1458 return ((void *)(Buffer_Ptr->Info)); 1459 } 1460 return (NULL); 1461} /* ASR_getParams */ 1462 1463/* 1464 * Acquire the LCT information. 1465 */ 1466static int 1467ASR_acquireLct(Asr_softc_t *sc) 1468{ 1469 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1470 PI2O_SGE_SIMPLE_ELEMENT sg; 1471 int MessageSizeInBytes; 1472 caddr_t v; 1473 int len; 1474 I2O_LCT Table; 1475 PI2O_LCT_ENTRY Entry; 1476 1477 /* 1478 * sc value assumed valid 1479 */ 1480 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - 1481 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1482 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc( 1483 MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) { 1484 return (ENOMEM); 1485 } 1486 (void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes); 1487 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1488 (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - 1489 sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4))); 1490 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1491 I2O_EXEC_LCT_NOTIFY); 1492 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1493 I2O_CLASS_MATCH_ANYCLASS); 1494 /* 1495 * Call the LCT table to determine the number of device entries 1496 * to reserve space for. 1497 */ 1498 SG(&(Message_Ptr->SGL), 0, 1499 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1500 sizeof(I2O_LCT)); 1501 /* 1502 * since this code is reused in several systems, code efficiency 1503 * is greater by using a shift operation rather than a divide by 1504 * sizeof(u_int32_t). 1505 */ 1506 I2O_LCT_setTableSize(&Table, 1507 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1508 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1509 /* 1510 * Determine the size of the LCT table. 1511 */ 1512 if (sc->ha_LCT) { 1513 free(sc->ha_LCT, M_TEMP); 1514 } 1515 /* 1516 * malloc only generates contiguous memory when less than a 1517 * page is expected. We must break the request up into an SG list ... 1518 */ 1519 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1520 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1521 || (len > (128 * 1024))) { /* Arbitrary */ 1522 free(Message_Ptr, M_TEMP); 1523 return (EINVAL); 1524 } 1525 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) { 1526 free(Message_Ptr, M_TEMP); 1527 return (ENOMEM); 1528 } 1529 /* 1530 * since this code is reused in several systems, code efficiency 1531 * is greater by using a shift operation rather than a divide by 1532 * sizeof(u_int32_t). 1533 */ 1534 I2O_LCT_setTableSize(sc->ha_LCT, 1535 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1536 /* 1537 * Convert the access to the LCT table into a SG list. 1538 */ 1539 sg = Message_Ptr->SGL.u.Simple; 1540 v = (caddr_t)(sc->ha_LCT); 1541 for (;;) { 1542 int next, base, span; 1543 1544 span = 0; 1545 next = base = KVTOPHYS(v); 1546 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1547 1548 /* How far can we go contiguously */ 1549 while ((len > 0) && (base == next)) { 1550 int size; 1551 1552 next = trunc_page(base) + PAGE_SIZE; 1553 size = next - base; 1554 if (size > len) { 1555 size = len; 1556 } 1557 span += size; 1558 v += size; 1559 len -= size; 1560 base = KVTOPHYS(v); 1561 } 1562 1563 /* Construct the Flags */ 1564 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1565 { 1566 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1567 if (len <= 0) { 1568 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1569 | I2O_SGL_FLAGS_LAST_ELEMENT 1570 | I2O_SGL_FLAGS_END_OF_BUFFER); 1571 } 1572 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1573 } 1574 1575 if (len <= 0) { 1576 break; 1577 } 1578 1579 /* 1580 * Incrementing requires resizing of the packet. 1581 */ 1582 ++sg; 1583 MessageSizeInBytes += sizeof(*sg); 1584 I2O_MESSAGE_FRAME_setMessageSize( 1585 &(Message_Ptr->StdMessageFrame), 1586 I2O_MESSAGE_FRAME_getMessageSize( 1587 &(Message_Ptr->StdMessageFrame)) 1588 + (sizeof(*sg) / sizeof(U32))); 1589 { 1590 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1591 1592 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1593 malloc(MessageSizeInBytes, M_TEMP, M_WAITOK)) 1594 == NULL) { 1595 free(sc->ha_LCT, M_TEMP); 1596 sc->ha_LCT = NULL; 1597 free(Message_Ptr, M_TEMP); 1598 return (ENOMEM); 1599 } 1600 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1601 bcopy(Message_Ptr, NewMessage_Ptr, span); 1602 free(Message_Ptr, M_TEMP); 1603 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1604 (((caddr_t)NewMessage_Ptr) + span); 1605 Message_Ptr = NewMessage_Ptr; 1606 } 1607 } 1608 { int retval; 1609 1610 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1611 free(Message_Ptr, M_TEMP); 1612 if (retval != CAM_REQ_CMP) { 1613 return (ENODEV); 1614 } 1615 } 1616 /* If the LCT table grew, lets truncate accesses */ 1617 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1618 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1619 } 1620 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1621 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1622 ++Entry) { 1623 Entry->le_type = I2O_UNKNOWN; 1624 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1625 1626 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1627 Entry->le_type = I2O_BSA; 1628 break; 1629 1630 case I2O_CLASS_SCSI_PERIPHERAL: 1631 Entry->le_type = I2O_SCSI; 1632 break; 1633 1634 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1635 Entry->le_type = I2O_FCA; 1636 break; 1637 1638 case I2O_CLASS_BUS_ADAPTER_PORT: 1639 Entry->le_type = I2O_PORT | I2O_SCSI; 1640 /* FALLTHRU */ 1641 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1642 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1643 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1644 Entry->le_type = I2O_PORT | I2O_FCA; 1645 } 1646 { struct ControllerInfo { 1647 I2O_PARAM_RESULTS_LIST_HEADER Header; 1648 I2O_PARAM_READ_OPERATION_RESULT Read; 1649 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1650 } Buffer; 1651 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1652 1653 Entry->le_bus = 0xff; 1654 Entry->le_target = 0xff; 1655 Entry->le_lun = 0xff; 1656 1657 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1658 ASR_getParams(sc, 1659 I2O_LCT_ENTRY_getLocalTID(Entry), 1660 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 1661 &Buffer, sizeof(struct ControllerInfo))) == NULL) { 1662 continue; 1663 } 1664 Entry->le_target 1665 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 1666 Info); 1667 Entry->le_lun = 0; 1668 } /* FALLTHRU */ 1669 default: 1670 continue; 1671 } 1672 { struct DeviceInfo { 1673 I2O_PARAM_RESULTS_LIST_HEADER Header; 1674 I2O_PARAM_READ_OPERATION_RESULT Read; 1675 I2O_DPT_DEVICE_INFO_SCALAR Info; 1676 } Buffer; 1677 PI2O_DPT_DEVICE_INFO_SCALAR Info; 1678 1679 Entry->le_bus = 0xff; 1680 Entry->le_target = 0xff; 1681 Entry->le_lun = 0xff; 1682 1683 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 1684 ASR_getParams(sc, 1685 I2O_LCT_ENTRY_getLocalTID(Entry), 1686 I2O_DPT_DEVICE_INFO_GROUP_NO, 1687 &Buffer, sizeof(struct DeviceInfo))) == NULL) { 1688 continue; 1689 } 1690 Entry->le_type 1691 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 1692 Entry->le_bus 1693 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 1694 if ((Entry->le_bus > sc->ha_MaxBus) 1695 && (Entry->le_bus <= MAX_CHANNEL)) { 1696 sc->ha_MaxBus = Entry->le_bus; 1697 } 1698 Entry->le_target 1699 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 1700 Entry->le_lun 1701 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 1702 } 1703 } 1704 /* 1705 * A zero return value indicates success. 1706 */ 1707 return (0); 1708} /* ASR_acquireLct */ 1709 1710/* 1711 * Initialize a message frame. 1712 * We assume that the CDB has already been set up, so all we do here is 1713 * generate the Scatter Gather list. 1714 */ 1715static PI2O_MESSAGE_FRAME 1716ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message) 1717{ 1718 PI2O_MESSAGE_FRAME Message_Ptr; 1719 PI2O_SGE_SIMPLE_ELEMENT sg; 1720 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1721 vm_size_t size, len; 1722 caddr_t v; 1723 U32 MessageSize; 1724 int next, span, base, rw; 1725 int target = ccb->ccb_h.target_id; 1726 int lun = ccb->ccb_h.target_lun; 1727 int bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1728 tid_t TID; 1729 1730 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 1731 Message_Ptr = (I2O_MESSAGE_FRAME *)Message; 1732 bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - 1733 sizeof(I2O_SG_ELEMENT))); 1734 1735 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 1736 PI2O_LCT_ENTRY Device; 1737 1738 TID = 0; 1739 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1740 (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT)); 1741 ++Device) { 1742 if ((Device->le_type != I2O_UNKNOWN) 1743 && (Device->le_bus == bus) 1744 && (Device->le_target == target) 1745 && (Device->le_lun == lun) 1746 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 1747 TID = I2O_LCT_ENTRY_getLocalTID(Device); 1748 ASR_setTid(sc, Device->le_bus, 1749 Device->le_target, Device->le_lun, 1750 TID); 1751 break; 1752 } 1753 } 1754 } 1755 if (TID == (tid_t)0) { 1756 return (NULL); 1757 } 1758 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 1759 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 1760 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 1761 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 1762 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1763 / sizeof(U32)) << 4)); 1764 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1765 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1766 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 1767 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 1768 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 1769 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 1770 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 1771 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1772 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1773 I2O_SCB_FLAG_ENABLE_DISCONNECT 1774 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1775 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 1776 /* 1777 * We do not need any (optional byteswapping) method access to 1778 * the Initiator & Transaction context field. 1779 */ 1780 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 1781 1782 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 1783 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 1784 /* 1785 * copy the cdb over 1786 */ 1787 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 1788 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 1789 bcopy(&(ccb->csio.cdb_io), 1790 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, 1791 ccb->csio.cdb_len); 1792 1793 /* 1794 * Given a buffer describing a transfer, set up a scatter/gather map 1795 * in a ccb to map that SCSI transfer. 1796 */ 1797 1798 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 1799 1800 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1801 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1802 (ccb->csio.dxfer_len) 1803 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 1804 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1805 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1806 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 1807 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 1808 | I2O_SCB_FLAG_ENABLE_DISCONNECT 1809 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1810 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 1811 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 1812 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 1813 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 1814 1815 /* 1816 * Given a transfer described by a `data', fill in the SG list. 1817 */ 1818 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 1819 1820 len = ccb->csio.dxfer_len; 1821 v = ccb->csio.data_ptr; 1822 KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0")); 1823 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 1824 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 1825 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 1826 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1827 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 1828 span = 0; 1829 next = base = KVTOPHYS(v); 1830 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1831 1832 /* How far can we go contiguously */ 1833 while ((len > 0) && (base == next)) { 1834 next = trunc_page(base) + PAGE_SIZE; 1835 size = next - base; 1836 if (size > len) { 1837 size = len; 1838 } 1839 span += size; 1840 v += size; 1841 len -= size; 1842 base = KVTOPHYS(v); 1843 } 1844 1845 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1846 if (len == 0) { 1847 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 1848 } 1849 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 1850 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 1851 ++sg; 1852 MessageSize += sizeof(*sg) / sizeof(U32); 1853 } 1854 /* We always do the request sense ... */ 1855 if ((span = ccb->csio.sense_len) == 0) { 1856 span = sizeof(ccb->csio.sense_data); 1857 } 1858 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1859 &(ccb->csio.sense_data), span); 1860 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1861 MessageSize + (sizeof(*sg) / sizeof(U32))); 1862 return (Message_Ptr); 1863} /* ASR_init_message */ 1864 1865/* 1866 * Reset the adapter. 1867 */ 1868static U32 1869ASR_initOutBound(Asr_softc_t *sc) 1870{ 1871 struct initOutBoundMessage { 1872 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 1873 U32 R; 1874 } Message; 1875 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 1876 U32 *volatile Reply_Ptr; 1877 U32 Old; 1878 1879 /* 1880 * Build up our copy of the Message. 1881 */ 1882 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message, 1883 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 1884 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1885 I2O_EXEC_OUTBOUND_INIT); 1886 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 1887 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 1888 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 1889 /* 1890 * Reset the Reply Status 1891 */ 1892 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 1893 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 1894 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 1895 sizeof(U32)); 1896 /* 1897 * Send the Message out 1898 */ 1899 if ((Old = ASR_initiateCp(sc, 1900 (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { 1901 u_long size, addr; 1902 1903 /* 1904 * Wait for a response (Poll). 1905 */ 1906 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 1907 /* 1908 * Re-enable the interrupts. 1909 */ 1910 asr_set_intr(sc, Old); 1911 /* 1912 * Populate the outbound table. 1913 */ 1914 if (sc->ha_Msgs == NULL) { 1915 1916 /* Allocate the reply frames */ 1917 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 1918 * sc->ha_Msgs_Count; 1919 1920 /* 1921 * contigmalloc only works reliably at 1922 * initialization time. 1923 */ 1924 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 1925 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 1926 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) { 1927 bzero(sc->ha_Msgs, size); 1928 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 1929 } 1930 } 1931 1932 /* Initialize the outbound FIFO */ 1933 if (sc->ha_Msgs != NULL) 1934 for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 1935 size; --size) { 1936 asr_set_FromFIFO(sc, addr); 1937 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 1938 } 1939 return (*Reply_Ptr); 1940 } 1941 return (0); 1942} /* ASR_initOutBound */ 1943 1944/* 1945 * Set the system table 1946 */ 1947static int 1948ASR_setSysTab(Asr_softc_t *sc) 1949{ 1950 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 1951 PI2O_SET_SYSTAB_HEADER SystemTable; 1952 Asr_softc_t * ha; 1953 PI2O_SGE_SIMPLE_ELEMENT sg; 1954 int retVal; 1955 1956 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( 1957 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) { 1958 return (ENOMEM); 1959 } 1960 for (ha = Asr_softc; ha; ha = ha->ha_next) { 1961 ++SystemTable->NumberEntries; 1962 } 1963 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( 1964 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 1965 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 1966 M_TEMP, M_WAITOK)) == NULL) { 1967 free(SystemTable, M_TEMP); 1968 return (ENOMEM); 1969 } 1970 (void)ASR_fillMessage((void *)Message_Ptr, 1971 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 1972 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 1973 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1974 (I2O_VERSION_11 + 1975 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1976 / sizeof(U32)) << 4))); 1977 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1978 I2O_EXEC_SYS_TAB_SET); 1979 /* 1980 * Call the LCT table to determine the number of device entries 1981 * to reserve space for. 1982 * since this code is reused in several systems, code efficiency 1983 * is greater by using a shift operation rather than a divide by 1984 * sizeof(u_int32_t). 1985 */ 1986 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 1987 + ((I2O_MESSAGE_FRAME_getVersionOffset( 1988 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 1989 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 1990 ++sg; 1991 for (ha = Asr_softc; ha; ha = ha->ha_next) { 1992 SG(sg, 0, 1993 ((ha->ha_next) 1994 ? (I2O_SGL_FLAGS_DIR) 1995 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 1996 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 1997 ++sg; 1998 } 1999 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2000 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 2001 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2002 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2003 free(Message_Ptr, M_TEMP); 2004 free(SystemTable, M_TEMP); 2005 return (retVal); 2006} /* ASR_setSysTab */ 2007 2008static int 2009ASR_acquireHrt(Asr_softc_t *sc) 2010{ 2011 I2O_EXEC_HRT_GET_MESSAGE Message; 2012 I2O_EXEC_HRT_GET_MESSAGE *Message_Ptr; 2013 struct { 2014 I2O_HRT Header; 2015 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2016 } Hrt; 2017 u_int8_t NumberOfEntries; 2018 PI2O_HRT_ENTRY Entry; 2019 2020 bzero(&Hrt, sizeof (Hrt)); 2021 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message, 2022 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2023 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2024 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2025 (I2O_VERSION_11 2026 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2027 / sizeof(U32)) << 4))); 2028 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2029 I2O_EXEC_HRT_GET); 2030 2031 /* 2032 * Set up the buffers as scatter gather elements. 2033 */ 2034 SG(&(Message_Ptr->SGL), 0, 2035 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2036 &Hrt, sizeof(Hrt)); 2037 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2038 return (ENODEV); 2039 } 2040 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2041 > (MAX_CHANNEL + 1)) { 2042 NumberOfEntries = MAX_CHANNEL + 1; 2043 } 2044 for (Entry = Hrt.Header.HRTEntry; 2045 NumberOfEntries != 0; 2046 ++Entry, --NumberOfEntries) { 2047 PI2O_LCT_ENTRY Device; 2048 2049 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2050 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2051 ++Device) { 2052 if (I2O_LCT_ENTRY_getLocalTID(Device) 2053 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2054 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2055 Entry) >> 16; 2056 if ((Device->le_bus > sc->ha_MaxBus) 2057 && (Device->le_bus <= MAX_CHANNEL)) { 2058 sc->ha_MaxBus = Device->le_bus; 2059 } 2060 } 2061 } 2062 } 2063 return (0); 2064} /* ASR_acquireHrt */ 2065 2066/* 2067 * Enable the adapter. 2068 */ 2069static int 2070ASR_enableSys(Asr_softc_t *sc) 2071{ 2072 I2O_EXEC_SYS_ENABLE_MESSAGE Message; 2073 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2074 2075 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message, 2076 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2077 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2078 I2O_EXEC_SYS_ENABLE); 2079 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2080} /* ASR_enableSys */ 2081 2082/* 2083 * Perform the stages necessary to initialize the adapter 2084 */ 2085static int 2086ASR_init(Asr_softc_t *sc) 2087{ 2088 return ((ASR_initOutBound(sc) == 0) 2089 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2090 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2091} /* ASR_init */ 2092 2093/* 2094 * Send a Synchronize Cache command to the target device. 2095 */ 2096static void 2097ASR_sync(Asr_softc_t *sc, int bus, int target, int lun) 2098{ 2099 tid_t TID; 2100 2101 /* 2102 * We will not synchronize the device when there are outstanding 2103 * commands issued by the OS (this is due to a locked up device, 2104 * as the OS normally would flush all outstanding commands before 2105 * issuing a shutdown or an adapter reset). 2106 */ 2107 if ((sc != NULL) 2108 && (LIST_FIRST(&(sc->ha_ccb)) != NULL) 2109 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2110 && (TID != (tid_t)0)) { 2111 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message; 2112 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2113 2114 Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message; 2115 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2116 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2117 2118 I2O_MESSAGE_FRAME_setVersionOffset( 2119 (PI2O_MESSAGE_FRAME)Message_Ptr, 2120 I2O_VERSION_11 2121 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2122 - sizeof(I2O_SG_ELEMENT)) 2123 / sizeof(U32)) << 4)); 2124 I2O_MESSAGE_FRAME_setMessageSize( 2125 (PI2O_MESSAGE_FRAME)Message_Ptr, 2126 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2127 - sizeof(I2O_SG_ELEMENT)) 2128 / sizeof(U32)); 2129 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2130 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2131 I2O_MESSAGE_FRAME_setFunction( 2132 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2133 I2O_MESSAGE_FRAME_setTargetAddress( 2134 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2135 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2136 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2137 I2O_SCSI_SCB_EXEC); 2138 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2139 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2140 I2O_SCB_FLAG_ENABLE_DISCONNECT 2141 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2142 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2143 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2144 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2145 DPT_ORGANIZATION_ID); 2146 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2147 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2148 Message_Ptr->CDB[1] = (lun << 5); 2149 2150 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2151 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2152 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2153 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2154 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2155 2156 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2157 2158 } 2159} 2160 2161static void 2162ASR_synchronize(Asr_softc_t *sc) 2163{ 2164 int bus, target, lun; 2165 2166 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2167 for (target = 0; target <= sc->ha_MaxId; ++target) { 2168 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2169 ASR_sync(sc,bus,target,lun); 2170 } 2171 } 2172 } 2173} 2174 2175/* 2176 * Reset the HBA, targets and BUS. 2177 * Currently this resets *all* the SCSI busses. 2178 */ 2179static __inline void 2180asr_hbareset(Asr_softc_t *sc) 2181{ 2182 ASR_synchronize(sc); 2183 (void)ASR_reset(sc); 2184} /* asr_hbareset */ 2185 2186/* 2187 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2188 * limit and a reduction in error checking (in the pre 4.0 case). 2189 */ 2190static int 2191asr_pci_map_mem(device_t tag, Asr_softc_t *sc) 2192{ 2193 int rid; 2194 u_int32_t p, l, s; 2195 2196 /* 2197 * I2O specification says we must find first *memory* mapped BAR 2198 */ 2199 for (rid = 0; rid < 4; rid++) { 2200 p = pci_read_config(tag, PCIR_BAR(rid), sizeof(p)); 2201 if ((p & 1) == 0) { 2202 break; 2203 } 2204 } 2205 /* 2206 * Give up? 2207 */ 2208 if (rid >= 4) { 2209 rid = 0; 2210 } 2211 rid = PCIR_BAR(rid); 2212 p = pci_read_config(tag, rid, sizeof(p)); 2213 pci_write_config(tag, rid, -1, sizeof(p)); 2214 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2215 pci_write_config(tag, rid, p, sizeof(p)); 2216 if (l > MAX_MAP) { 2217 l = MAX_MAP; 2218 } 2219 /* 2220 * The 2005S Zero Channel RAID solution is not a perfect PCI 2221 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once 2222 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to 2223 * BAR0+2MB and sets it's size to 2MB. The IOP registers are 2224 * accessible via BAR0, the messaging registers are accessible 2225 * via BAR1. If the subdevice code is 50 to 59 decimal. 2226 */ 2227 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); 2228 if (s != 0xA5111044) { 2229 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s)); 2230 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) 2231 && (ADPTDOMINATOR_SUB_ID_START <= s) 2232 && (s <= ADPTDOMINATOR_SUB_ID_END)) { 2233 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */ 2234 } 2235 } 2236 p &= ~15; 2237 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2238 p, p + l, l, RF_ACTIVE); 2239 if (sc->ha_mem_res == NULL) { 2240 return (0); 2241 } 2242 sc->ha_Base = rman_get_start(sc->ha_mem_res); 2243 sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res); 2244 sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res); 2245 2246 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */ 2247 if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) { 2248 return (0); 2249 } 2250 p = pci_read_config(tag, rid, sizeof(p)); 2251 pci_write_config(tag, rid, -1, sizeof(p)); 2252 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2253 pci_write_config(tag, rid, p, sizeof(p)); 2254 if (l > MAX_MAP) { 2255 l = MAX_MAP; 2256 } 2257 p &= ~15; 2258 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2259 p, p + l, l, RF_ACTIVE); 2260 if (sc->ha_mes_res == NULL) { 2261 return (0); 2262 } 2263 sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res); 2264 sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res); 2265 } else { 2266 sc->ha_frame_bhandle = sc->ha_i2o_bhandle; 2267 sc->ha_frame_btag = sc->ha_i2o_btag; 2268 } 2269 return (1); 2270} /* asr_pci_map_mem */ 2271 2272/* 2273 * A simplified copy of the real pci_map_int with additional 2274 * registration requirements. 2275 */ 2276static int 2277asr_pci_map_int(device_t tag, Asr_softc_t *sc) 2278{ 2279 int rid = 0; 2280 2281 sc->ha_irq_res = bus_alloc_resource_any(tag, SYS_RES_IRQ, &rid, 2282 RF_ACTIVE | RF_SHAREABLE); 2283 if (sc->ha_irq_res == NULL) { 2284 return (0); 2285 } 2286 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY, 2287 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) { 2288 return (0); 2289 } 2290 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2291 return (1); 2292} /* asr_pci_map_int */ 2293 2294/* 2295 * Attach the devices, and virtual devices to the driver list. 2296 */ 2297static int 2298asr_attach(device_t tag) 2299{ 2300 PI2O_EXEC_STATUS_GET_REPLY status; 2301 PI2O_LCT_ENTRY Device; 2302 Asr_softc_t *sc, **ha; 2303 struct scsi_inquiry_data *iq; 2304 union asr_ccb *ccb; 2305 int bus, size, unit = device_get_unit(tag); 2306 2307 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { 2308 return(ENOMEM); 2309 } 2310 if (Asr_softc == NULL) { 2311 /* 2312 * Fixup the OS revision as saved in the dptsig for the 2313 * engine (dptioctl.h) to pick up. 2314 */ 2315 bcopy(osrelease, &ASR_sig.dsDescription[16], 5); 2316 } 2317 /* 2318 * Initialize the software structure 2319 */ 2320 LIST_INIT(&(sc->ha_ccb)); 2321 /* Link us into the HA list */ 2322 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2323 *(ha) = sc; 2324 2325 /* 2326 * This is the real McCoy! 2327 */ 2328 if (!asr_pci_map_mem(tag, sc)) { 2329 printf ("asr%d: could not map memory\n", unit); 2330 return(ENXIO); 2331 } 2332 /* Enable if not formerly enabled */ 2333 pci_write_config(tag, PCIR_COMMAND, 2334 pci_read_config(tag, PCIR_COMMAND, sizeof(char)) | 2335 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2336 /* Knowledge is power, responsibility is direct */ 2337 { 2338 struct pci_devinfo { 2339 STAILQ_ENTRY(pci_devinfo) pci_links; 2340 struct resource_list resources; 2341 pcicfgregs cfg; 2342 } * dinfo = device_get_ivars(tag); 2343 sc->ha_pciBusNum = dinfo->cfg.bus; 2344 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) | dinfo->cfg.func; 2345 } 2346 /* Check if the device is there? */ 2347 if ((ASR_resetIOP(sc) == 0) || 2348 ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc( 2349 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) == NULL) || 2350 (ASR_getStatus(sc, status) == NULL)) { 2351 printf ("asr%d: could not initialize hardware\n", unit); 2352 return(ENODEV); /* Get next, maybe better luck */ 2353 } 2354 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2355 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2356 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2357 sc->ha_SystemTable.IopState = status->IopState; 2358 sc->ha_SystemTable.MessengerType = status->MessengerType; 2359 sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize; 2360 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow = 2361 (U32)(sc->ha_Base + I2O_REG_TOFIFO); /* XXX 64-bit */ 2362 2363 if (!asr_pci_map_int(tag, (void *)sc)) { 2364 printf ("asr%d: could not map interrupt\n", unit); 2365 return(ENXIO); 2366 } 2367 2368 /* Adjust the maximim inbound count */ 2369 if (((sc->ha_QueueSize = 2370 I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) > 2371 MAX_INBOUND) || (sc->ha_QueueSize == 0)) { 2372 sc->ha_QueueSize = MAX_INBOUND; 2373 } 2374 2375 /* Adjust the maximum outbound count */ 2376 if (((sc->ha_Msgs_Count = 2377 I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) > 2378 MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) { 2379 sc->ha_Msgs_Count = MAX_OUTBOUND; 2380 } 2381 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2382 sc->ha_Msgs_Count = sc->ha_QueueSize; 2383 } 2384 2385 /* Adjust the maximum SG size to adapter */ 2386 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) << 2387 2)) > MAX_INBOUND_SIZE) { 2388 size = MAX_INBOUND_SIZE; 2389 } 2390 free(status, M_TEMP); 2391 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2392 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2393 2394 /* 2395 * Only do a bus/HBA reset on the first time through. On this 2396 * first time through, we do not send a flush to the devices. 2397 */ 2398 if (ASR_init(sc) == 0) { 2399 struct BufferInfo { 2400 I2O_PARAM_RESULTS_LIST_HEADER Header; 2401 I2O_PARAM_READ_OPERATION_RESULT Read; 2402 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2403 } Buffer; 2404 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2405#define FW_DEBUG_BLED_OFFSET 8 2406 2407 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2408 ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2409 &Buffer, sizeof(struct BufferInfo))) != NULL) { 2410 sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET + 2411 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info); 2412 } 2413 if (ASR_acquireLct(sc) == 0) { 2414 (void)ASR_acquireHrt(sc); 2415 } 2416 } else { 2417 printf ("asr%d: failed to initialize\n", unit); 2418 return(ENXIO); 2419 } 2420 /* 2421 * Add in additional probe responses for more channels. We 2422 * are reusing the variable `target' for a channel loop counter. 2423 * Done here because of we need both the acquireLct and 2424 * acquireHrt data. 2425 */ 2426 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2427 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { 2428 if (Device->le_type == I2O_UNKNOWN) { 2429 continue; 2430 } 2431 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 2432 if (Device->le_target > sc->ha_MaxId) { 2433 sc->ha_MaxId = Device->le_target; 2434 } 2435 if (Device->le_lun > sc->ha_MaxLun) { 2436 sc->ha_MaxLun = Device->le_lun; 2437 } 2438 } 2439 if (((Device->le_type & I2O_PORT) != 0) 2440 && (Device->le_bus <= MAX_CHANNEL)) { 2441 /* Do not increase MaxId for efficiency */ 2442 sc->ha_adapter_target[Device->le_bus] = 2443 Device->le_target; 2444 } 2445 } 2446 2447 /* 2448 * Print the HBA model number as inquired from the card. 2449 */ 2450 2451 printf("asr%d:", unit); 2452 2453 if ((iq = (struct scsi_inquiry_data *)malloc( 2454 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) != 2455 NULL) { 2456 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message; 2457 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2458 int posted = 0; 2459 2460 Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message; 2461 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - 2462 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2463 2464 I2O_MESSAGE_FRAME_setVersionOffset( 2465 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 | 2466 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2467 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)); 2468 I2O_MESSAGE_FRAME_setMessageSize( 2469 (PI2O_MESSAGE_FRAME)Message_Ptr, 2470 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - 2471 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) / 2472 sizeof(U32)); 2473 I2O_MESSAGE_FRAME_setInitiatorAddress( 2474 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2475 I2O_MESSAGE_FRAME_setFunction( 2476 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2477 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode( 2478 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 2479 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2480 I2O_SCB_FLAG_ENABLE_DISCONNECT 2481 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2482 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2483 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 2484 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2485 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2486 DPT_ORGANIZATION_ID); 2487 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2488 Message_Ptr->CDB[0] = INQUIRY; 2489 Message_Ptr->CDB[4] = 2490 (unsigned char)sizeof(struct scsi_inquiry_data); 2491 if (Message_Ptr->CDB[4] == 0) { 2492 Message_Ptr->CDB[4] = 255; 2493 } 2494 2495 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2496 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2497 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2498 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2499 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2500 2501 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2502 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2503 sizeof(struct scsi_inquiry_data)); 2504 SG(&(Message_Ptr->SGL), 0, 2505 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2506 iq, sizeof(struct scsi_inquiry_data)); 2507 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2508 2509 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 2510 printf (" "); 2511 ASR_prstring (iq->vendor, 8); 2512 ++posted; 2513 } 2514 if (iq->product[0] && (iq->product[0] != ' ')) { 2515 printf (" "); 2516 ASR_prstring (iq->product, 16); 2517 ++posted; 2518 } 2519 if (iq->revision[0] && (iq->revision[0] != ' ')) { 2520 printf (" FW Rev. "); 2521 ASR_prstring (iq->revision, 4); 2522 ++posted; 2523 } 2524 free(iq, M_TEMP); 2525 if (posted) { 2526 printf (","); 2527 } 2528 } 2529 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 2530 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 2531 2532 /* 2533 * fill in the prototype cam_path. 2534 */ 2535 if ((ccb = asr_alloc_ccb(sc)) == NULL) { 2536 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 2537 return(ENOMEM); 2538 } 2539 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2540 struct cam_devq * devq; 2541 int QueueSize = sc->ha_QueueSize; 2542 2543 if (QueueSize > MAX_INBOUND) { 2544 QueueSize = MAX_INBOUND; 2545 } 2546 2547 /* 2548 * Create the device queue for our SIM(s). 2549 */ 2550 if ((devq = cam_simq_alloc(QueueSize)) == NULL) { 2551 continue; 2552 } 2553 2554 /* 2555 * Construct our first channel SIM entry 2556 */ 2557 sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc, 2558 unit, 1, QueueSize, devq); 2559 if (sc->ha_sim[bus] == NULL) { 2560 continue; 2561 } 2562 2563 if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS) { 2564 cam_sim_free(sc->ha_sim[bus], 2565 /*free_devq*/TRUE); 2566 sc->ha_sim[bus] = NULL; 2567 continue; 2568 } 2569 2570 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 2571 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 2572 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2573 xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus])); 2574 cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE); 2575 sc->ha_sim[bus] = NULL; 2576 continue; 2577 } 2578 } 2579 asr_free_ccb(ccb); 2580 /* 2581 * Generate the device node information 2582 */ 2583 sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, 2584 "asr%d", unit); 2585 if (sc->ha_devt != NULL) 2586 (void)make_dev_alias(sc->ha_devt, "rdpti%d", unit); 2587 sc->ha_devt->si_drv1 = sc; 2588 return(0); 2589} /* asr_attach */ 2590 2591static void 2592asr_poll(struct cam_sim *sim) 2593{ 2594 asr_intr(cam_sim_softc(sim)); 2595} /* asr_poll */ 2596 2597static void 2598asr_action(struct cam_sim *sim, union ccb *ccb) 2599{ 2600 struct Asr_softc *sc; 2601 2602 debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, 2603 ccb->ccb_h.func_code); 2604 2605 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 2606 2607 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 2608 2609 switch (ccb->ccb_h.func_code) { 2610 2611 /* Common cases first */ 2612 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2613 { 2614 struct Message { 2615 char M[MAX_INBOUND_SIZE]; 2616 } Message; 2617 PI2O_MESSAGE_FRAME Message_Ptr; 2618 2619 /* Reject incoming commands while we are resetting the card */ 2620 if (sc->ha_in_reset != HA_OPERATIONAL) { 2621 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2622 if (sc->ha_in_reset >= HA_OFF_LINE) { 2623 /* HBA is now off-line */ 2624 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 2625 } else { 2626 /* HBA currently resetting, try again later. */ 2627 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2628 } 2629 debug_asr_cmd_printf (" e\n"); 2630 xpt_done(ccb); 2631 debug_asr_cmd_printf (" q\n"); 2632 break; 2633 } 2634 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2635 printf( 2636 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 2637 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 2638 ccb->csio.cdb_io.cdb_bytes[0], 2639 cam_sim_bus(sim), 2640 ccb->ccb_h.target_id, 2641 ccb->ccb_h.target_lun); 2642 } 2643 debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim), 2644 cam_sim_bus(sim), ccb->ccb_h.target_id, 2645 ccb->ccb_h.target_lun); 2646 debug_asr_dump_ccb(ccb); 2647 2648 if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb, 2649 (PI2O_MESSAGE_FRAME)&Message)) != NULL) { 2650 debug_asr_cmd2_printf ("TID=%x:\n", 2651 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 2652 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 2653 debug_asr_cmd2_dump_message(Message_Ptr); 2654 debug_asr_cmd1_printf (" q"); 2655 2656 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 2657 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2658 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2659 debug_asr_cmd_printf (" E\n"); 2660 xpt_done(ccb); 2661 } 2662 debug_asr_cmd_printf(" Q\n"); 2663 break; 2664 } 2665 /* 2666 * We will get here if there is no valid TID for the device 2667 * referenced in the scsi command packet. 2668 */ 2669 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2670 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2671 debug_asr_cmd_printf (" B\n"); 2672 xpt_done(ccb); 2673 break; 2674 } 2675 2676 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2677 /* Rese HBA device ... */ 2678 asr_hbareset (sc); 2679 ccb->ccb_h.status = CAM_REQ_CMP; 2680 xpt_done(ccb); 2681 break; 2682 2683#if (defined(REPORT_LUNS)) 2684 case REPORT_LUNS: 2685#endif 2686 case XPT_ABORT: /* Abort the specified CCB */ 2687 /* XXX Implement */ 2688 ccb->ccb_h.status = CAM_REQ_INVALID; 2689 xpt_done(ccb); 2690 break; 2691 2692 case XPT_SET_TRAN_SETTINGS: 2693 /* XXX Implement */ 2694 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2695 xpt_done(ccb); 2696 break; 2697 2698 case XPT_GET_TRAN_SETTINGS: 2699 /* Get default/user set transfer settings for the target */ 2700 { 2701 struct ccb_trans_settings *cts; 2702 u_int target_mask; 2703 2704 cts = &(ccb->cts); 2705 target_mask = 0x01 << ccb->ccb_h.target_id; 2706 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 2707 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 2708 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2709 cts->sync_period = 6; /* 40MHz */ 2710 cts->sync_offset = 15; 2711 2712 cts->valid = CCB_TRANS_SYNC_RATE_VALID 2713 | CCB_TRANS_SYNC_OFFSET_VALID 2714 | CCB_TRANS_BUS_WIDTH_VALID 2715 | CCB_TRANS_DISC_VALID 2716 | CCB_TRANS_TQ_VALID; 2717 ccb->ccb_h.status = CAM_REQ_CMP; 2718 } else { 2719 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2720 } 2721 xpt_done(ccb); 2722 break; 2723 } 2724 2725 case XPT_CALC_GEOMETRY: 2726 { 2727 struct ccb_calc_geometry *ccg; 2728 u_int32_t size_mb; 2729 u_int32_t secs_per_cylinder; 2730 2731 ccg = &(ccb->ccg); 2732 size_mb = ccg->volume_size 2733 / ((1024L * 1024L) / ccg->block_size); 2734 2735 if (size_mb > 4096) { 2736 ccg->heads = 255; 2737 ccg->secs_per_track = 63; 2738 } else if (size_mb > 2048) { 2739 ccg->heads = 128; 2740 ccg->secs_per_track = 63; 2741 } else if (size_mb > 1024) { 2742 ccg->heads = 65; 2743 ccg->secs_per_track = 63; 2744 } else { 2745 ccg->heads = 64; 2746 ccg->secs_per_track = 32; 2747 } 2748 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2749 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2750 ccb->ccb_h.status = CAM_REQ_CMP; 2751 xpt_done(ccb); 2752 break; 2753 } 2754 2755 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2756 ASR_resetBus (sc, cam_sim_bus(sim)); 2757 ccb->ccb_h.status = CAM_REQ_CMP; 2758 xpt_done(ccb); 2759 break; 2760 2761 case XPT_TERM_IO: /* Terminate the I/O process */ 2762 /* XXX Implement */ 2763 ccb->ccb_h.status = CAM_REQ_INVALID; 2764 xpt_done(ccb); 2765 break; 2766 2767 case XPT_PATH_INQ: /* Path routing inquiry */ 2768 { 2769 struct ccb_pathinq *cpi = &(ccb->cpi); 2770 2771 cpi->version_num = 1; /* XXX??? */ 2772 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2773 cpi->target_sprt = 0; 2774 /* Not necessary to reset bus, done by HDM initialization */ 2775 cpi->hba_misc = PIM_NOBUSRESET; 2776 cpi->hba_eng_cnt = 0; 2777 cpi->max_target = sc->ha_MaxId; 2778 cpi->max_lun = sc->ha_MaxLun; 2779 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 2780 cpi->bus_id = cam_sim_bus(sim); 2781 cpi->base_transfer_speed = 3300; 2782 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2783 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 2784 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2785 cpi->unit_number = cam_sim_unit(sim); 2786 cpi->ccb_h.status = CAM_REQ_CMP; 2787 xpt_done(ccb); 2788 break; 2789 } 2790 default: 2791 ccb->ccb_h.status = CAM_REQ_INVALID; 2792 xpt_done(ccb); 2793 break; 2794 } 2795} /* asr_action */ 2796 2797/* 2798 * Handle processing of current CCB as pointed to by the Status. 2799 */ 2800static int 2801asr_intr(Asr_softc_t *sc) 2802{ 2803 int processed; 2804 2805 for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled; 2806 processed = 1) { 2807 union asr_ccb *ccb; 2808 u_int dsc; 2809 U32 ReplyOffset; 2810 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 2811 2812 if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE) 2813 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) { 2814 break; 2815 } 2816 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 2817 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 2818 /* 2819 * We do not need any (optional byteswapping) method access to 2820 * the Initiator context field. 2821 */ 2822 ccb = (union asr_ccb *)(long) 2823 I2O_MESSAGE_FRAME_getInitiatorContext64( 2824 &(Reply->StdReplyFrame.StdMessageFrame)); 2825 if (I2O_MESSAGE_FRAME_getMsgFlags( 2826 &(Reply->StdReplyFrame.StdMessageFrame)) 2827 & I2O_MESSAGE_FLAGS_FAIL) { 2828 I2O_UTIL_NOP_MESSAGE Message; 2829 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 2830 U32 MessageOffset; 2831 2832 MessageOffset = (u_long) 2833 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 2834 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 2835 /* 2836 * Get the Original Message Frame's address, and get 2837 * it's Transaction Context into our space. (Currently 2838 * unused at original authorship, but better to be 2839 * safe than sorry). Straight copy means that we 2840 * need not concern ourselves with the (optional 2841 * byteswapping) method access. 2842 */ 2843 Reply->StdReplyFrame.TransactionContext = 2844 bus_space_read_4(sc->ha_frame_btag, 2845 sc->ha_frame_bhandle, MessageOffset + 2846 offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME, 2847 TransactionContext)); 2848 /* 2849 * For 64 bit machines, we need to reconstruct the 2850 * 64 bit context. 2851 */ 2852 ccb = (union asr_ccb *)(long) 2853 I2O_MESSAGE_FRAME_getInitiatorContext64( 2854 &(Reply->StdReplyFrame.StdMessageFrame)); 2855 /* 2856 * Unique error code for command failure. 2857 */ 2858 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 2859 &(Reply->StdReplyFrame), (u_int16_t)-2); 2860 /* 2861 * Modify the message frame to contain a NOP and 2862 * re-issue it to the controller. 2863 */ 2864 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 2865 &Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 2866#if (I2O_UTIL_NOP != 0) 2867 I2O_MESSAGE_FRAME_setFunction ( 2868 &(Message_Ptr->StdMessageFrame), 2869 I2O_UTIL_NOP); 2870#endif 2871 /* 2872 * Copy the packet out to the Original Message 2873 */ 2874 asr_set_frame(sc, Message_Ptr, MessageOffset, 2875 sizeof(I2O_UTIL_NOP_MESSAGE)); 2876 /* 2877 * Issue the NOP 2878 */ 2879 asr_set_ToFIFO(sc, MessageOffset); 2880 } 2881 2882 /* 2883 * Asynchronous command with no return requirements, 2884 * and a generic handler for immunity against odd error 2885 * returns from the adapter. 2886 */ 2887 if (ccb == NULL) { 2888 /* 2889 * Return Reply so that it can be used for the 2890 * next command 2891 */ 2892 asr_set_FromFIFO(sc, ReplyOffset); 2893 continue; 2894 } 2895 2896 /* Welease Wadjah! (and stop timeouts) */ 2897 ASR_ccbRemove (sc, ccb); 2898 2899 dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 2900 &(Reply->StdReplyFrame)); 2901 ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK; 2902 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2903 switch (dsc) { 2904 2905 case I2O_SCSI_DSC_SUCCESS: 2906 ccb->ccb_h.status |= CAM_REQ_CMP; 2907 break; 2908 2909 case I2O_SCSI_DSC_CHECK_CONDITION: 2910 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | 2911 CAM_AUTOSNS_VALID; 2912 break; 2913 2914 case I2O_SCSI_DSC_BUSY: 2915 /* FALLTHRU */ 2916 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 2917 /* FALLTHRU */ 2918 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 2919 /* FALLTHRU */ 2920 case I2O_SCSI_HBA_DSC_BUS_BUSY: 2921 ccb->ccb_h.status |= CAM_SCSI_BUSY; 2922 break; 2923 2924 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 2925 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2926 break; 2927 2928 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 2929 /* FALLTHRU */ 2930 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 2931 /* FALLTHRU */ 2932 case I2O_SCSI_HBA_DSC_LUN_INVALID: 2933 /* FALLTHRU */ 2934 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 2935 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 2936 break; 2937 2938 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 2939 /* FALLTHRU */ 2940 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 2941 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 2942 break; 2943 2944 default: 2945 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2946 break; 2947 } 2948 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 2949 ccb->csio.resid -= 2950 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 2951 Reply); 2952 } 2953 2954 /* Sense data in reply packet */ 2955 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 2956 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 2957 2958 if (size) { 2959 if (size > sizeof(ccb->csio.sense_data)) { 2960 size = sizeof(ccb->csio.sense_data); 2961 } 2962 if (size > I2O_SCSI_SENSE_DATA_SZ) { 2963 size = I2O_SCSI_SENSE_DATA_SZ; 2964 } 2965 if ((ccb->csio.sense_len) 2966 && (size > ccb->csio.sense_len)) { 2967 size = ccb->csio.sense_len; 2968 } 2969 bcopy(Reply->SenseData, 2970 &(ccb->csio.sense_data), size); 2971 } 2972 } 2973 2974 /* 2975 * Return Reply so that it can be used for the next command 2976 * since we have no more need for it now 2977 */ 2978 asr_set_FromFIFO(sc, ReplyOffset); 2979 2980 if (ccb->ccb_h.path) { 2981 xpt_done ((union ccb *)ccb); 2982 } else { 2983 wakeup (ccb); 2984 } 2985 } 2986 return (processed); 2987} /* asr_intr */ 2988 2989#undef QueueSize /* Grrrr */ 2990#undef SG_Size /* Grrrr */ 2991 2992/* 2993 * Meant to be included at the bottom of asr.c !!! 2994 */ 2995 2996/* 2997 * Included here as hard coded. Done because other necessary include 2998 * files utilize C++ comment structures which make them a nuisance to 2999 * included here just to pick up these three typedefs. 3000 */ 3001typedef U32 DPT_TAG_T; 3002typedef U32 DPT_MSG_T; 3003typedef U32 DPT_RTN_T; 3004 3005#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 3006#include "dev/asr/osd_unix.h" 3007 3008#define asr_unit(dev) minor(dev) 3009 3010static u_int8_t ASR_ctlr_held; 3011 3012static int 3013asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, struct thread *td) 3014{ 3015 int s; 3016 int error; 3017 3018 if (dev->si_drv1 == NULL) { 3019 return (ENODEV); 3020 } 3021 s = splcam (); 3022 if (ASR_ctlr_held) { 3023 error = EBUSY; 3024 } else if ((error = suser(td)) == 0) { 3025 ++ASR_ctlr_held; 3026 } 3027 splx(s); 3028 return (error); 3029} /* asr_open */ 3030 3031static int 3032asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td) 3033{ 3034 3035 ASR_ctlr_held = 0; 3036 return (0); 3037} /* asr_close */ 3038 3039 3040/*-------------------------------------------------------------------------*/ 3041/* Function ASR_queue_i */ 3042/*-------------------------------------------------------------------------*/ 3043/* The Parameters Passed To This Function Are : */ 3044/* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3045/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3046/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3047/* */ 3048/* This Function Will Take The User Request Packet And Convert It To An */ 3049/* I2O MSG And Send It Off To The Adapter. */ 3050/* */ 3051/* Return : 0 For OK, Error Code Otherwise */ 3052/*-------------------------------------------------------------------------*/ 3053static int 3054ASR_queue_i(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Packet) 3055{ 3056 union asr_ccb * ccb; 3057 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3058 PI2O_MESSAGE_FRAME Message_Ptr; 3059 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3060 int MessageSizeInBytes; 3061 int ReplySizeInBytes; 3062 int error; 3063 int s; 3064 /* Scatter Gather buffer list */ 3065 struct ioctlSgList_S { 3066 SLIST_ENTRY(ioctlSgList_S) link; 3067 caddr_t UserSpace; 3068 I2O_FLAGS_COUNT FlagsCount; 3069 char KernelSpace[sizeof(long)]; 3070 } * elm; 3071 /* Generates a `first' entry */ 3072 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3073 3074 if (ASR_getBlinkLedCode(sc)) { 3075 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3076 ASR_getBlinkLedCode(sc)); 3077 return (EIO); 3078 } 3079 /* Copy in the message into a local allocation */ 3080 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( 3081 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) { 3082 debug_usr_cmd_printf ( 3083 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3084 return (ENOMEM); 3085 } 3086 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3087 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3088 free(Message_Ptr, M_TEMP); 3089 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3090 return (error); 3091 } 3092 /* Acquire information to determine type of packet */ 3093 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3094 /* The offset of the reply information within the user packet */ 3095 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3096 + MessageSizeInBytes); 3097 3098 /* Check if the message is a synchronous initialization command */ 3099 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3100 free(Message_Ptr, M_TEMP); 3101 switch (s) { 3102 3103 case I2O_EXEC_IOP_RESET: 3104 { U32 status; 3105 3106 status = ASR_resetIOP(sc); 3107 ReplySizeInBytes = sizeof(status); 3108 debug_usr_cmd_printf ("resetIOP done\n"); 3109 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3110 ReplySizeInBytes)); 3111 } 3112 3113 case I2O_EXEC_STATUS_GET: 3114 { I2O_EXEC_STATUS_GET_REPLY status; 3115 3116 if (ASR_getStatus(sc, &status) == NULL) { 3117 debug_usr_cmd_printf ("getStatus failed\n"); 3118 return (ENXIO); 3119 } 3120 ReplySizeInBytes = sizeof(status); 3121 debug_usr_cmd_printf ("getStatus done\n"); 3122 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3123 ReplySizeInBytes)); 3124 } 3125 3126 case I2O_EXEC_OUTBOUND_INIT: 3127 { U32 status; 3128 3129 status = ASR_initOutBound(sc); 3130 ReplySizeInBytes = sizeof(status); 3131 debug_usr_cmd_printf ("intOutBound done\n"); 3132 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3133 ReplySizeInBytes)); 3134 } 3135 } 3136 3137 /* Determine if the message size is valid */ 3138 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 3139 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 3140 debug_usr_cmd_printf ("Packet size %d incorrect\n", 3141 MessageSizeInBytes); 3142 return (EINVAL); 3143 } 3144 3145 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, 3146 M_TEMP, M_WAITOK)) == NULL) { 3147 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3148 MessageSizeInBytes); 3149 return (ENOMEM); 3150 } 3151 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3152 MessageSizeInBytes)) != 0) { 3153 free(Message_Ptr, M_TEMP); 3154 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 3155 MessageSizeInBytes, error); 3156 return (error); 3157 } 3158 3159 /* Check the size of the reply frame, and start constructing */ 3160 3161 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3162 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) { 3163 free(Message_Ptr, M_TEMP); 3164 debug_usr_cmd_printf ( 3165 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3166 return (ENOMEM); 3167 } 3168 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 3169 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3170 free(Reply_Ptr, M_TEMP); 3171 free(Message_Ptr, M_TEMP); 3172 debug_usr_cmd_printf ( 3173 "Failed to copy in reply frame, errno=%d\n", 3174 error); 3175 return (error); 3176 } 3177 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 3178 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 3179 free(Reply_Ptr, M_TEMP); 3180 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 3181 free(Message_Ptr, M_TEMP); 3182 debug_usr_cmd_printf ( 3183 "Failed to copy in reply frame[%d], errno=%d\n", 3184 ReplySizeInBytes, error); 3185 return (EINVAL); 3186 } 3187 3188 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3189 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 3190 ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 3191 M_TEMP, M_WAITOK)) == NULL) { 3192 free(Message_Ptr, M_TEMP); 3193 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3194 ReplySizeInBytes); 3195 return (ENOMEM); 3196 } 3197 (void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes); 3198 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 3199 = Message_Ptr->InitiatorContext; 3200 Reply_Ptr->StdReplyFrame.TransactionContext 3201 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 3202 I2O_MESSAGE_FRAME_setMsgFlags( 3203 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3204 I2O_MESSAGE_FRAME_getMsgFlags( 3205 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 3206 | I2O_MESSAGE_FLAGS_REPLY); 3207 3208 /* Check if the message is a special case command */ 3209 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 3210 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 3211 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 3212 Message_Ptr) & 0xF0) >> 2)) { 3213 free(Message_Ptr, M_TEMP); 3214 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3215 &(Reply_Ptr->StdReplyFrame), 3216 (ASR_setSysTab(sc) != CAM_REQ_CMP)); 3217 I2O_MESSAGE_FRAME_setMessageSize( 3218 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3219 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 3220 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3221 ReplySizeInBytes); 3222 free(Reply_Ptr, M_TEMP); 3223 return (error); 3224 } 3225 } 3226 3227 /* Deal in the general case */ 3228 /* First allocate and optionally copy in each scatter gather element */ 3229 SLIST_INIT(&sgList); 3230 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 3231 PI2O_SGE_SIMPLE_ELEMENT sg; 3232 3233 /* 3234 * since this code is reused in several systems, code 3235 * efficiency is greater by using a shift operation rather 3236 * than a divide by sizeof(u_int32_t). 3237 */ 3238 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3239 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 3240 >> 2)); 3241 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 3242 + MessageSizeInBytes)) { 3243 caddr_t v; 3244 int len; 3245 3246 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3247 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 3248 error = EINVAL; 3249 break; 3250 } 3251 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 3252 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 3253 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3254 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3255 Message_Ptr) & 0xF0) >> 2)), 3256 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 3257 3258 if ((elm = (struct ioctlSgList_S *)malloc ( 3259 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 3260 M_TEMP, M_WAITOK)) == NULL) { 3261 debug_usr_cmd_printf ( 3262 "Failed to allocate SG[%d]\n", len); 3263 error = ENOMEM; 3264 break; 3265 } 3266 SLIST_INSERT_HEAD(&sgList, elm, link); 3267 elm->FlagsCount = sg->FlagsCount; 3268 elm->UserSpace = (caddr_t) 3269 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 3270 v = elm->KernelSpace; 3271 /* Copy in outgoing data (DIR bit could be invalid) */ 3272 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 3273 != 0) { 3274 break; 3275 } 3276 /* 3277 * If the buffer is not contiguous, lets 3278 * break up the scatter/gather entries. 3279 */ 3280 while ((len > 0) 3281 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 3282 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 3283 int next, base, span; 3284 3285 span = 0; 3286 next = base = KVTOPHYS(v); 3287 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 3288 base); 3289 3290 /* How far can we go physically contiguously */ 3291 while ((len > 0) && (base == next)) { 3292 int size; 3293 3294 next = trunc_page(base) + PAGE_SIZE; 3295 size = next - base; 3296 if (size > len) { 3297 size = len; 3298 } 3299 span += size; 3300 v += size; 3301 len -= size; 3302 base = KVTOPHYS(v); 3303 } 3304 3305 /* Construct the Flags */ 3306 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 3307 span); 3308 { 3309 int flags = I2O_FLAGS_COUNT_getFlags( 3310 &(elm->FlagsCount)); 3311 /* Any remaining length? */ 3312 if (len > 0) { 3313 flags &= 3314 ~(I2O_SGL_FLAGS_END_OF_BUFFER 3315 | I2O_SGL_FLAGS_LAST_ELEMENT); 3316 } 3317 I2O_FLAGS_COUNT_setFlags( 3318 &(sg->FlagsCount), flags); 3319 } 3320 3321 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 3322 sg - (PI2O_SGE_SIMPLE_ELEMENT) 3323 ((char *)Message_Ptr 3324 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3325 Message_Ptr) & 0xF0) >> 2)), 3326 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 3327 span); 3328 if (len <= 0) { 3329 break; 3330 } 3331 3332 /* 3333 * Incrementing requires resizing of the 3334 * packet, and moving up the existing SG 3335 * elements. 3336 */ 3337 ++sg; 3338 MessageSizeInBytes += sizeof(*sg); 3339 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 3340 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 3341 + (sizeof(*sg) / sizeof(U32))); 3342 { 3343 PI2O_MESSAGE_FRAME NewMessage_Ptr; 3344 3345 if ((NewMessage_Ptr 3346 = (PI2O_MESSAGE_FRAME) 3347 malloc (MessageSizeInBytes, 3348 M_TEMP, M_WAITOK)) == NULL) { 3349 debug_usr_cmd_printf ( 3350 "Failed to acquire frame[%d] memory\n", 3351 MessageSizeInBytes); 3352 error = ENOMEM; 3353 break; 3354 } 3355 span = ((caddr_t)sg) 3356 - (caddr_t)Message_Ptr; 3357 bcopy(Message_Ptr,NewMessage_Ptr, span); 3358 bcopy((caddr_t)(sg-1), 3359 ((caddr_t)NewMessage_Ptr) + span, 3360 MessageSizeInBytes - span); 3361 free(Message_Ptr, M_TEMP); 3362 sg = (PI2O_SGE_SIMPLE_ELEMENT) 3363 (((caddr_t)NewMessage_Ptr) + span); 3364 Message_Ptr = NewMessage_Ptr; 3365 } 3366 } 3367 if ((error) 3368 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3369 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 3370 break; 3371 } 3372 ++sg; 3373 } 3374 if (error) { 3375 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3376 SLIST_REMOVE_HEAD(&sgList, link); 3377 free(elm, M_TEMP); 3378 } 3379 free(Reply_Ptr, M_TEMP); 3380 free(Message_Ptr, M_TEMP); 3381 return (error); 3382 } 3383 } 3384 3385 debug_usr_cmd_printf ("Inbound: "); 3386 debug_usr_cmd_dump_message(Message_Ptr); 3387 3388 /* Send the command */ 3389 if ((ccb = asr_alloc_ccb (sc)) == NULL) { 3390 /* Free up in-kernel buffers */ 3391 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3392 SLIST_REMOVE_HEAD(&sgList, link); 3393 free(elm, M_TEMP); 3394 } 3395 free(Reply_Ptr, M_TEMP); 3396 free(Message_Ptr, M_TEMP); 3397 return (ENOMEM); 3398 } 3399 3400 /* 3401 * We do not need any (optional byteswapping) method access to 3402 * the Initiator context field. 3403 */ 3404 I2O_MESSAGE_FRAME_setInitiatorContext64( 3405 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 3406 3407 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3408 3409 free(Message_Ptr, M_TEMP); 3410 3411 /* 3412 * Wait for the board to report a finished instruction. 3413 */ 3414 s = splcam(); 3415 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 3416 if (ASR_getBlinkLedCode(sc)) { 3417 /* Reset Adapter */ 3418 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 3419 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3420 ASR_getBlinkLedCode(sc)); 3421 if (ASR_reset (sc) == ENXIO) { 3422 /* Command Cleanup */ 3423 ASR_ccbRemove(sc, ccb); 3424 } 3425 splx(s); 3426 /* Free up in-kernel buffers */ 3427 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3428 SLIST_REMOVE_HEAD(&sgList, link); 3429 free(elm, M_TEMP); 3430 } 3431 free(Reply_Ptr, M_TEMP); 3432 asr_free_ccb(ccb); 3433 return (EIO); 3434 } 3435 /* Check every second for BlinkLed */ 3436 /* There is no PRICAM, but outwardly PRIBIO is functional */ 3437 tsleep(ccb, PRIBIO, "asr", hz); 3438 } 3439 splx(s); 3440 3441 debug_usr_cmd_printf ("Outbound: "); 3442 debug_usr_cmd_dump_message(Reply_Ptr); 3443 3444 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3445 &(Reply_Ptr->StdReplyFrame), 3446 (ccb->ccb_h.status != CAM_REQ_CMP)); 3447 3448 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3449 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 3450 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 3451 ccb->csio.dxfer_len - ccb->csio.resid); 3452 } 3453 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 3454 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3455 - I2O_SCSI_SENSE_DATA_SZ))) { 3456 int size = ReplySizeInBytes 3457 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3458 - I2O_SCSI_SENSE_DATA_SZ; 3459 3460 if (size > sizeof(ccb->csio.sense_data)) { 3461 size = sizeof(ccb->csio.sense_data); 3462 } 3463 bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size); 3464 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 3465 Reply_Ptr, size); 3466 } 3467 3468 /* Free up in-kernel buffers */ 3469 while ((elm = SLIST_FIRST(&sgList)) != NULL) { 3470 /* Copy out as necessary */ 3471 if ((error == 0) 3472 /* DIR bit considered `valid', error due to ignorance works */ 3473 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 3474 & I2O_SGL_FLAGS_DIR) == 0)) { 3475 error = copyout((caddr_t)(elm->KernelSpace), 3476 elm->UserSpace, 3477 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 3478 } 3479 SLIST_REMOVE_HEAD(&sgList, link); 3480 free(elm, M_TEMP); 3481 } 3482 if (error == 0) { 3483 /* Copy reply frame to user space */ 3484 error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply, 3485 ReplySizeInBytes); 3486 } 3487 free(Reply_Ptr, M_TEMP); 3488 asr_free_ccb(ccb); 3489 3490 return (error); 3491} /* ASR_queue_i */ 3492 3493/*----------------------------------------------------------------------*/ 3494/* Function asr_ioctl */ 3495/*----------------------------------------------------------------------*/ 3496/* The parameters passed to this function are : */ 3497/* dev : Device number. */ 3498/* cmd : Ioctl Command */ 3499/* data : User Argument Passed In. */ 3500/* flag : Mode Parameter */ 3501/* proc : Process Parameter */ 3502/* */ 3503/* This function is the user interface into this adapter driver */ 3504/* */ 3505/* Return : zero if OK, error code if not */ 3506/*----------------------------------------------------------------------*/ 3507 3508static int 3509asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) 3510{ 3511 Asr_softc_t *sc = dev->si_drv1; 3512 int i, error = 0; 3513#ifdef ASR_IOCTL_COMPAT 3514 int j; 3515#endif /* ASR_IOCTL_COMPAT */ 3516 3517 if (sc != NULL) 3518 switch(cmd) { 3519 3520 case DPT_SIGNATURE: 3521#ifdef ASR_IOCTL_COMPAT 3522#if (dsDescription_size != 50) 3523 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16): 3524#endif 3525 if (cmd & 0xFFFF0000) { 3526 bcopy(&ASR_sig, data, sizeof(dpt_sig_S)); 3527 return (0); 3528 } 3529 /* Traditional version of the ioctl interface */ 3530 case DPT_SIGNATURE & 0x0000FFFF: 3531#endif 3532 return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data), 3533 sizeof(dpt_sig_S))); 3534 3535 /* Traditional version of the ioctl interface */ 3536 case DPT_CTRLINFO & 0x0000FFFF: 3537 case DPT_CTRLINFO: { 3538 struct { 3539 u_int16_t length; 3540 u_int16_t drvrHBAnum; 3541 u_int32_t baseAddr; 3542 u_int16_t blinkState; 3543 u_int8_t pciBusNum; 3544 u_int8_t pciDeviceNum; 3545 u_int16_t hbaFlags; 3546 u_int16_t Interrupt; 3547 u_int32_t reserved1; 3548 u_int32_t reserved2; 3549 u_int32_t reserved3; 3550 } CtlrInfo; 3551 3552 bzero(&CtlrInfo, sizeof(CtlrInfo)); 3553 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 3554 CtlrInfo.drvrHBAnum = asr_unit(dev); 3555 CtlrInfo.baseAddr = sc->ha_Base; 3556 i = ASR_getBlinkLedCode (sc); 3557 if (i == -1) 3558 i = 0; 3559 3560 CtlrInfo.blinkState = i; 3561 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 3562 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 3563#define FLG_OSD_PCI_VALID 0x0001 3564#define FLG_OSD_DMA 0x0002 3565#define FLG_OSD_I2O 0x0004 3566 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O; 3567 CtlrInfo.Interrupt = sc->ha_irq; 3568#ifdef ASR_IOCTL_COMPAT 3569 if (cmd & 0xffff0000) 3570 bcopy(&CtlrInfo, data, sizeof(CtlrInfo)); 3571 else 3572#endif /* ASR_IOCTL_COMPAT */ 3573 error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 3574 } return (error); 3575 3576 /* Traditional version of the ioctl interface */ 3577 case DPT_SYSINFO & 0x0000FFFF: 3578 case DPT_SYSINFO: { 3579 sysInfo_S Info; 3580#ifdef ASR_IOCTL_COMPAT 3581 char * cp; 3582 /* Kernel Specific ptok `hack' */ 3583#define ptok(a) ((char *)(uintptr_t)(a) + KERNBASE) 3584 3585 bzero(&Info, sizeof(Info)); 3586 3587 /* Appears I am the only person in the Kernel doing this */ 3588 outb (0x70, 0x12); 3589 i = inb(0x71); 3590 j = i >> 4; 3591 if (i == 0x0f) { 3592 outb (0x70, 0x19); 3593 j = inb (0x71); 3594 } 3595 Info.drive0CMOS = j; 3596 3597 j = i & 0x0f; 3598 if (i == 0x0f) { 3599 outb (0x70, 0x1a); 3600 j = inb (0x71); 3601 } 3602 Info.drive1CMOS = j; 3603 3604 Info.numDrives = *((char *)ptok(0x475)); 3605#endif /* ASR_IOCTL_COMPAT */ 3606 3607 bzero(&Info, sizeof(Info)); 3608 3609 Info.processorFamily = ASR_sig.dsProcessorFamily; 3610#if defined(__i386__) 3611 switch (cpu) { 3612 case CPU_386SX: case CPU_386: 3613 Info.processorType = PROC_386; break; 3614 case CPU_486SX: case CPU_486: 3615 Info.processorType = PROC_486; break; 3616 case CPU_586: 3617 Info.processorType = PROC_PENTIUM; break; 3618 case CPU_686: 3619 Info.processorType = PROC_SEXIUM; break; 3620 } 3621#elif defined(__alpha__) 3622 Info.processorType = PROC_ALPHA; 3623#endif 3624 3625 Info.osType = OS_BSDI_UNIX; 3626 Info.osMajorVersion = osrelease[0] - '0'; 3627 Info.osMinorVersion = osrelease[2] - '0'; 3628 /* Info.osRevision = 0; */ 3629 /* Info.osSubRevision = 0; */ 3630 Info.busType = SI_PCI_BUS; 3631 Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM; 3632 3633#ifdef ASR_IOCTL_COMPAT 3634 Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid; 3635 /* Go Out And Look For I2O SmartROM */ 3636 for(j = 0xC8000; j < 0xE0000; j += 2048) { 3637 int k; 3638 3639 cp = ptok(j); 3640 if (*((unsigned short *)cp) != 0xAA55) { 3641 continue; 3642 } 3643 j += (cp[2] * 512) - 2048; 3644 if ((*((u_long *)(cp + 6)) 3645 != ('S' + (' ' * 256) + (' ' * 65536L))) 3646 || (*((u_long *)(cp + 10)) 3647 != ('I' + ('2' * 256) + ('0' * 65536L)))) { 3648 continue; 3649 } 3650 cp += 0x24; 3651 for (k = 0; k < 64; ++k) { 3652 if (*((unsigned short *)cp) 3653 == (' ' + ('v' * 256))) { 3654 break; 3655 } 3656 } 3657 if (k < 64) { 3658 Info.smartROMMajorVersion 3659 = *((unsigned char *)(cp += 4)) - '0'; 3660 Info.smartROMMinorVersion 3661 = *((unsigned char *)(cp += 2)); 3662 Info.smartROMRevision 3663 = *((unsigned char *)(++cp)); 3664 Info.flags |= SI_SmartROMverValid; 3665 Info.flags &= ~SI_NO_SmartROM; 3666 break; 3667 } 3668 } 3669 /* Get The Conventional Memory Size From CMOS */ 3670 outb (0x70, 0x16); 3671 j = inb (0x71); 3672 j <<= 8; 3673 outb (0x70, 0x15); 3674 j |= inb(0x71); 3675 Info.conventionalMemSize = j; 3676 3677 /* Get The Extended Memory Found At Power On From CMOS */ 3678 outb (0x70, 0x31); 3679 j = inb (0x71); 3680 j <<= 8; 3681 outb (0x70, 0x30); 3682 j |= inb(0x71); 3683 Info.extendedMemSize = j; 3684 Info.flags |= SI_MemorySizeValid; 3685 3686 /* Copy Out The Info Structure To The User */ 3687 if (cmd & 0xFFFF0000) 3688 bcopy(&Info, data, sizeof(Info)); 3689 else 3690#endif /* ASR_IOCTL_COMPAT */ 3691 error = copyout(&Info, *(caddr_t *)data, sizeof(Info)); 3692 return (error); } 3693 3694 /* Get The BlinkLED State */ 3695 case DPT_BLINKLED: 3696 i = ASR_getBlinkLedCode (sc); 3697 if (i == -1) 3698 i = 0; 3699#ifdef ASR_IOCTL_COMPAT 3700 if (cmd & 0xffff0000) 3701 bcopy(&i, data, sizeof(i)); 3702 else 3703#endif /* ASR_IOCTL_COMPAT */ 3704 error = copyout(&i, *(caddr_t *)data, sizeof(i)); 3705 break; 3706 3707 /* Send an I2O command */ 3708 case I2OUSRCMD: 3709 return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data))); 3710 3711 /* Reset and re-initialize the adapter */ 3712 case I2ORESETCMD: 3713 return (ASR_reset(sc)); 3714 3715 /* Rescan the LCT table and resynchronize the information */ 3716 case I2ORESCANCMD: 3717 return (ASR_rescan(sc)); 3718 } 3719 return (EINVAL); 3720} /* asr_ioctl */ 3721